ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py | 1a314f79c4db047568bbe2e2759554f82631c25a |
import dill
import logging
class Transformer(object):
def __init__(self):
with open('/mnt/lr.model', 'rb') as model_file:
self._lr_model = dill.load(model_file)
def predict(self, X, feature_names):
logging.warning(X)
prediction = self._lr_model.predict_proba(X)
logging.warning(prediction)
return prediction
|
py | 1a314fcb647d9a217a482faf3454dda559164228 | from typing import Optional
from django.db import models, DatabaseError, transaction
from .message import ChatMediaTypes
from ..users import UserUpdater
from ..base import BaseModel
from .entity_types import EntityTypes
from .entity_types import EntitySourceTypes
from core.globals import logger
from pyrogram import types
from telegram import models as tg_models
class EntityQuerySet(models.QuerySet):
def filter_by_id(self, *, id: str) -> "EntityQuerySet":
return self.filter(id=id)
def get_by_id(self, *, id: str) -> Optional["Entity"]:
try:
return self.get(id=id)
except Entity.DoesNotExist as e:
pass
except DatabaseError as e:
logger.exception(e)
except Exception as e:
logger.exception(e)
return None
def update_or_create_entity(self, *, defaults: dict, **kwargs) -> Optional["Entity"]:
try:
return self.update_or_create(
defaults=defaults,
**kwargs
)[0]
except DatabaseError as e:
logger.exception(e)
except Exception as e:
logger.exception(e)
return None
class EntityManager(models.Manager):
def get_queryset(self) -> EntityQuerySet:
return EntityQuerySet(self.model, using=self._db)
def update_or_create_from_raw(
self,
*,
raw_entity: types.MessageEntity,
db_message: "tg_models.Message",
) -> Optional["Entity"]:
if raw_entity is None or db_message is None:
return None
parsed_entity = self._parse(
raw_entity=raw_entity,
message__has_media=bool(db_message.media_type != ChatMediaTypes.undefined)
)
if parsed_entity:
with transaction.atomic():
db_entity = self.get_queryset().update_or_create_entity(
id=f'{db_message.id}:{raw_entity.offset}',
defaults={
'message': db_message,
**parsed_entity,
},
)
if db_entity:
db_entity.update_or_create_user_from_raw(
model=db_entity,
field_name='user',
raw_user=raw_entity.user
)
return db_entity
return None
@staticmethod
def _parse(*, raw_entity: types.MessageEntity, message__has_media: bool) -> dict:
if raw_entity is None:
return {}
return {
'type': EntityTypes.get_type(raw_entity.type),
'source': EntitySourceTypes.caption if message__has_media else EntitySourceTypes.text,
'offset': raw_entity.offset,
'length': raw_entity.length,
}
class Entity(BaseModel, UserUpdater):
id = models.CharField(max_length=256, primary_key=True) # `message__id:offset`
type = models.CharField(
EntityTypes.choices,
max_length=20,
null=False,
)
source = models.CharField(
EntitySourceTypes.choices,
max_length=20,
null=False,
)
offset = models.IntegerField()
length = models.IntegerField()
# entities, both from `text` and `caption`
message = models.ForeignKey(
'telegram.Message',
on_delete=models.CASCADE,
null=False,
related_name='entities',
)
# For `text_mention` only, the mentioned user.
user = models.ForeignKey(
'telegram.User',
related_name='mentioned_entities',
null=True, blank=True,
on_delete=models.CASCADE,
)
objects = EntityManager()
class Meta:
verbose_name_plural = 'Entities'
ordering = ('message',)
def __str__(self):
return f"{self.type} of type {self.source} in {self.message}"
|
py | 1a315094956cc61c6f5c7c4285866d74bac10b01 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('esg_leipzig_homepage_2015', '0005_linktoflatpage'),
]
operations = [
migrations.CreateModel(
name='News',
fields=[
('id', models.AutoField(auto_created=True, verbose_name='ID', primary_key=True, serialize=False)),
('title', models.CharField(verbose_name='Titel', max_length=255, help_text="Beispiel: 'Schrank abzugeben'. Änderungen sind immer in den Sprachfeldern vorzunehmen.")),
('title_de', models.CharField(null=True, verbose_name='Titel', max_length=255, help_text="Beispiel: 'Schrank abzugeben'. Änderungen sind immer in den Sprachfeldern vorzunehmen.")),
('title_en', models.CharField(null=True, verbose_name='Titel', max_length=255, help_text="Beispiel: 'Schrank abzugeben'. Änderungen sind immer in den Sprachfeldern vorzunehmen.")),
('content', models.TextField(blank=True, verbose_name='Inhalt (HTML)', help_text='Es können alle HTML-Tags verwendet werden. Änderungen sind immer in den Sprachfeldern vorzunehmen.')),
('content_de', models.TextField(blank=True, null=True, verbose_name='Inhalt (HTML)', help_text='Es können alle HTML-Tags verwendet werden. Änderungen sind immer in den Sprachfeldern vorzunehmen.')),
('content_en', models.TextField(blank=True, null=True, verbose_name='Inhalt (HTML)', help_text='Es können alle HTML-Tags verwendet werden. Änderungen sind immer in den Sprachfeldern vorzunehmen.')),
('author', models.CharField(verbose_name='Autor', max_length=255, help_text="Beispiel: 'Frank Martin'.")),
('weight', models.IntegerField(verbose_name='Platzierung', help_text='Eine höhere Zahl bedeutet, dass der Eintrag auf der Startseite weiter unten steht.', default=100)),
],
options={
'verbose_name_plural': 'Aktuelle Informationen',
'verbose_name': 'Aktuelle Information',
'ordering': ('weight', 'title'),
},
bases=(models.Model,),
),
]
|
py | 1a3151a994ab1549e0ca617a643a30d4382cc236 | from typing import List
from webdnn.backend.code_generator.allocator import MemoryLayout
from webdnn.backend.code_generator.injectors.buffer_injector import BufferInjector
from webdnn.backend.code_generator.injectors.kernel_name_injector import KernelNameInjector
from webdnn.backend.webassembly.generator import WebassemblyDescriptorGenerator
from webdnn.backend.webassembly.kernel import Kernel
from webdnn.graph.axis import Axis
from webdnn.graph.operators.space2depth import Space2Depth
from webdnn.graph.order import OrderNHWC
template = """
void %%FUNC_NAME%%(const int * %%META_BUFFER%%)
{
const float *x = %%LOAD_BUFFER(space2depth_x)%%;
float *y = %%LOAD_BUFFER(space2depth_y)%%;
const int r = %%LOAD_BUFFER(space2depth_r)%%;
const int N = %%LOAD_BUFFER(space2depth_N)%%;
const int C1 = %%LOAD_BUFFER(space2depth_C1)%%;
const int C2 = %%LOAD_BUFFER(space2depth_C2)%%;
const int H1 = %%LOAD_BUFFER(space2depth_H1)%%;
const int H2 = %%LOAD_BUFFER(space2depth_H2)%%;
const int W1 = %%LOAD_BUFFER(space2depth_W1)%%;
const int W2 = %%LOAD_BUFFER(space2depth_W2)%%;
for (int gid = 0; gid < N*H1*W1*C1; gid += 1) {
const int c1 = gid % C1;
const int w1 = gid / C1 % W1;
const int h1 = gid / C1 / W1 % H1;
const int n = gid / C1 / W1 / H1;
const int w2 = w1 / r;
const int h2 = h1 / r;
const int c2 = c1 + (w1 % r) * C1 + (h1 % r) * C1 * r;
y[((n*H2+h2)*W2+w2)*C2+c2] = x[gid];
}
}
"""
@WebassemblyDescriptorGenerator.register_handler(Space2Depth)
def space2depth(op: Space2Depth, memory_layout: MemoryLayout) -> List[Kernel]:
x = op.inputs["x"]
y = op.outputs["y"]
r = op.parameters['r']
assert x.order == OrderNHWC
assert y.order == OrderNHWC
buffer_injector = BufferInjector()
buffer_injector.register({
"space2depth_x": memory_layout[x],
"space2depth_y": memory_layout[y],
'space2depth_r': r,
"space2depth_N": x.shape_dict[Axis.N],
"space2depth_C1": x.shape_dict[Axis.C],
"space2depth_C2": y.shape_dict[Axis.C],
"space2depth_H1": x.shape_dict[Axis.H],
"space2depth_H2": y.shape_dict[Axis.H],
"space2depth_W1": x.shape_dict[Axis.W],
"space2depth_W2": y.shape_dict[Axis.W],
})
name_injector = KernelNameInjector(op)
source = template
source = buffer_injector.inject(source)
source = name_injector.inject(source)
kernel = Kernel(
{name_injector.name: source},
name_injector.name,
buffer_injector.buffer,
buffer_injector.unresolved_value_list
)
return [kernel]
|
py | 1a31529bc728caf2d80973da54d993ccb3718435 | import configparser
import json
from pathlib import Path
from transformers import AutoTokenizer, Wav2Vec2ForCTC
import sounddevice as sd
import soundfile as sf
import torch
def record_from_mic(config):
"""Record audio from a microphone.
Args:
config (ConfigParser): Config params.
Returns:
audio (ndarray): Recorded audio.
"""
sample_rate = config.getint('config', 'sample_rate')
duration_secs = config.getint('microphone', 'duration_secs')
channels = config.getint('microphone', 'channels')
print("Start recording . . . ")
audio = sd.rec(int(duration_secs*sample_rate), sample_rate, channels)
sd.wait() # Wait until recording is finished
print("Finish recording")
return audio
def wav2vec2_inference(audio, tokenizer, model):
"""Transcript audio with the Wav2Vec2 model.
Args:
audio (ndarray): Audio of interest.
tokenizer (Wav2Vec2Tokenizer): Wav2Vec2 associated tokenizer.
model (Wav2Vec2ForCTC): Wav2Vec2 to perform the transcription.
Returns:
transcriptions (str): Audio transcript.
"""
input_values = tokenizer(audio.ravel(), return_tensors='pt').input_values
logits = model(input_values).logits
# Store predicted id's
predicted_ids = torch.argmax(logits, dim =-1)
# Decode the audio to generate text
transcriptions = tokenizer.decode(predicted_ids[0])
return transcriptions
def main():
config = configparser.ConfigParser()
config.read('config.ini')
# Initialize tokenizer and model from HuggingFace
tokenizer = AutoTokenizer.from_pretrained("facebook/wav2vec2-large-960h-lv60-self")
model = Wav2Vec2ForCTC.from_pretrained("facebook/wav2vec2-large-960h-lv60-self")
if config.getboolean('config', 'from_microphone'):
# Record from microphone and transcript
audio = record_from_mic(config)
transcriptions = wav2vec2_inference(audio, tokenizer, model)
print(f"Transcribed audio: {transcriptions}")
if config.getboolean('config', 'save_transcriptions'):
with open('mic_transcription.txt', 'w') as file:
file.write(transcriptions)
print(f"Transcribed audio stored in mic_transcription.txt")
else:
# Transcript files in configuration file
audio_files = json.loads(config.get('config', 'audio_files'))
for audio_file in audio_files:
audio, _ = sf.read(audio_file, dtype='float32')
transcriptions = wav2vec2_inference(audio, tokenizer, model)
print(f"Transcribed audio: {transcriptions}")
if config.getboolean('config', 'save_transcriptions'):
with open(f'{Path(audio_file).stem}.txt', 'w') as file:
file.write(transcriptions)
print(f"Transcribed audio stored in {Path(audio_file).stem}.txt")
if __name__ == '__main__':
main() |
py | 1a31538a9ef5ba2a7d416ed3471a60b18450fa24 | import argparse
import pandas as pd
label_map = {
'agree': 'agree',
'disagree': 'refute',
'discuss': 'nostance'
}
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('snopes', help='/path/to/snopes/file')
parser.add_argument('pred', help='/path/to/prediction/file')
parser.add_argument('out', help='/path/to/output/file')
args = parser.parse_args()
snopes = pd.read_csv(args.snopes)
pred = pd.read_csv(args.pred)
pred = pred.rename(index=str, columns={'Stance': 'Predicted Stance', 'Body ID': 'ID', 'Headline': 'Claim'})
# assignment = {
# 'Snippets': lambda x: snopes.loc[snopes['ID'] == x.ID].Snippets,
# 'Gold Stance': lambda x: snopes.loc[snopes['ID'] == x.ID].Stance,
# }
# pred = pred.assign(**assignment)
joined = pred.set_index('ID').join(snopes.set_index('ID'), rsuffix='_right')
joined = joined.rename(index=str, columns={'Stance': 'Gold Stance'})
joined['Predicted Stance'] = joined.apply(lambda row: label_map[row['Predicted Stance']], axis=1)
# pred['Snippets'] = pred.apply(lambda x: snopes.loc[snopes['ID'] == x.ID].Snippets, axis=1)
# pred['Gold Stance'] = pred.apply(lambda x: snopes.loc[snopes['ID'] == x.ID].Stance, axis=1)
joined.to_csv(args.out, columns=['Claim', 'Snippets', 'Gold Stance', 'Predicted Stance'])
|
py | 1a3154e90eef9526d5b4c4233c8d95e38a310379 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Prints Graph
def print_dict(dictionary):
for k, v in {k: v for k, v in dictionary.items() if v[0] != 'out'}.items(): print(k, ':', *v, sep='\t', end='\n')
# Parsing
def parse(lines, gates):
data = list()
graph = dict()
for i in range(len(lines)):
line = lines[i].replace('\n', '').split('\t')
if len(line) > 3:
if line[2] in gates: data.append(line + lines[i+1].replace('\n', '').split("\t"))
elif (line[0][:1] != '\t'): data.append(line)
out_count = 1
for i in range(len(data)):
if data[i][2] == 'inpt': graph[data[i][0]] = [data[i][2], [], [], ['sa0', 'sa1']]
elif data[i][2] in gates:
for j in data[i][-1*int(data[i][4]):]:
if data[int(j) - 1][1][-3:] == 'fan':
graph[data[int(j) - 1][0]] = ['wire', [data[int(j) - 1][3][:-3]], [data[i][1][:-3]], ['sa0', 'sa1']]
for k in graph[data[int(j) - 1][0]][1]:
if k not in graph.keys(): graph[k] = [data[int(k) - 1][2], data[int(k) - 1][-1*int(data[int(k) - 1][4]):], [j], ['sa0', 'sa1']]
elif j not in graph[k][2]: graph[k][2].append(j)
else: graph[data[int(j) - 1][0]] = [data[int(j) - 1][2], data[int(j) - 1][-1*int(data[int(j) - 1][4]):], [data[i][1][:-3]], ['sa0', 'sa1']]
if data[i][3] == '0':
graph[data[i][0]] = [data[i][2], data[i][-1*int(data[i][4]):], [str(len(data) + out_count)], ['sa0', 'sa1']]
graph[str(len(data) + out_count)] = ['out', [data[i][0]], [], []]
out_count += 1
return graph
|
py | 1a3154f43ac855f6b7371e818350fdb014563c43 | # -*- coding: utf-8 -*-
import os
import os.path
import re
import sys
import string
from django.apps.registry import apps
from django.core.management.base import BaseCommand, CommandError
from python_translate.extractors import base as extractors
from python_translate import operations
from python_translate.translations import MessageCatalogue
from django_translate.utils import bcolors
from django_translate import services
from django_translate import settings
class AnyFormatSpec:
def __format__(self, fmt):
return ''
class Formatter(string.Formatter):
def __init__(self):
self.used = set()
def get_value(self, key, args, kwargs):
self.used.add(key)
return AnyFormatSpec()
class Command(BaseCommand):
help = """Extract translation strings from templates from a given location. It can display them or merge
the new ones into the translation files. When new translation strings are found it can
automatically add a prefix to the translation message.
Example running against app folder
./manage.py tranzdump -l en --path ./ --output-path ./tranz
./manage.py tranzdump -l fr --force --prefix="new_" --app website --exclude ./website/static
"""
def __init__(self, stdout=None, stderr=None, no_color=False):
self.excluded_paths = None
self.locale = None
self.verbosity = None
super(Command, self).__init__(stdout, stderr, no_color)
def add_arguments(self, parser):
parser.add_argument('--locale', '-l', default='en', dest='locale', action='store',
help='Locale to process')
parser.add_argument('--app', '-a', dest='app', action='store',
help='App to scan.')
parser.add_argument('--path', '-p', dest='path', action='store',
help='Path to scan')
parser.add_argument('--output-dir', dest='output_dir', default=None, action='store',
help='Override the default output dir')
parser.add_argument('--exclude-dir', '-x', default=[], dest='excluded_paths', action='append',
help='Paths to exclude. Default is none. Can be used multiple times. '
'Works only with ChainExtractor.')
parser.add_argument('--prefix', dest='prefix', default="__", action='store',
help='Override the default prefix')
parser.add_argument('--format', dest='format', default="yml", action='store',
help='Override the default output format')
parser.add_argument('--dump-messages', dest='dump_messages', action='store_true',
help='Should the messages be dumped in the console')
parser.add_argument('--force', dest='force', action='store_true',
help='Should the update be done')
parser.add_argument('--no-backup', dest='no_backup', action='store_true',
help='Should backup be disabled')
parser.add_argument('--clean', dest='clean', default=False, action='store_true',
help='Should clean not found messages',)
def handle(self, *args, **options):
if options.get('force') != True and options.get('dump_messages') != True:
print((bcolors.WARNING + 'You must choose at least one of --force or --dump-messages' + bcolors.ENDC))
return
if not (bool(options.get('app')) ^ bool(options.get('path'))):
print((bcolors.WARNING + 'You must choose only one of --app or --path' + bcolors.ENDC))
return
if not options.get('output_dir') and (not options.get('app') or not settings.TRANZ_SEARCH_LOCALE_IN_APPS):
print((bcolors.WARNING + 'You must provide an --output-dir when in --path mode, or when TRANZ_SEARCH_LOCALE_IN_APPS ' \
'settings variable is False.' + bcolors.ENDC))
return
self.excluded_paths = [os.path.abspath(path) for path in options['excluded_paths']]
self.excluded_paths += [os.path.abspath(django_translate.__path__[0])]
self.excluded_paths += settings.TRANZ_EXCLUDED_DIRS
# Find directories to scan
if options.get('app'):
for app in list(apps.app_configs.values()):
if app.name == options.get('app'):
current_name = app.name
root_path = app.path
break
else:
raise ValueError("App {0} not found".format(options.get('app')))
else:
root_path = os.path.abspath(options['path'])
current_name = root_path.split("/")[-1]
output_dir = options.get('output_dir') or os.path.join(root_path, 'tranz')
writer = services.writer
print(('Generating "{0}" translation files for "{1}"'.format(options.get('locale'), current_name)))
print("Loading existing messages")
current_catalogue = MessageCatalogue(options['locale'])
loader = services.loader
loader.load_messages(output_dir, current_catalogue)
if len(current_catalogue.messages) == 0:
print(("No messages were loaded, make sure there actually are " \
"translation file in format {{catalog}}.{{locale}}.{{format}} in {0}".format(output_dir)))
return
print("Extracting messages")
extracted_catalogue = MessageCatalogue(options['locale'])
extractor = services.extractor
extractor.set_prefix(options['prefix'])
self.extract_messages(extractor, root_path, extracted_catalogue)
print("Processing catalogues")
operation_class = operations.DiffOperation if options['clean'] else operations.MergeOperation
operation = operation_class(current_catalogue, extracted_catalogue)
if not len(operation.get_domains()):
print("No translations found")
return
if options["dump_messages"]:
for domain in operation.get_domains():
print(("Displaying messages for domain {0}".format(domain)))
new_keys = list(operation.get_new_messages(domain).keys())
all_keys = list(operation.get_messages(domain).keys())
for id in set(all_keys).difference(new_keys):
print(id)
for id in new_keys:
print((bcolors.OKGREEN + id + bcolors.ENDC))
for id in list(operation.get_obsolete_messages(domain).keys()):
print((bcolors.FAIL + id + bcolors.ENDC))
if options["no_backup"]:
writer.disable_backup()
if options["force"]:
print(("Writing files to {0}".format(output_dir)))
writer.write_translations(operation.get_result(), options['format'], {
"path": output_dir,
"default_locale": options['locale']
})
def extract_messages(self, extractor, root_path, extracted_catalogue):
if isinstance(extractor, extractors.ChainExtractor):
subextractors = list(extractor._extractors.values())
else:
subextractors = [extractor]
for subextractor in subextractors:
if not isinstance(subextractor, extractors.BaseExtractor):
subextractor.extract(root_path, extracted_catalogue)
continue
paths = subextractor.extract_files(root_path)
paths = self.filter_exluded_paths(paths)
for path in paths:
try:
subextractor.extract([path], extracted_catalogue)
except Exception as e:
exc_type, exc_value, exc_traceback = sys.exc_info()
msg = 'There was an exception in extractor {0} when processing ' \
'resource "{1}"'.format(type(subextractor).__name__, path)
msg = msg + "\nOriginal message: {0} {1}".format(exc_type.__name__, exc_value)
raise ValueError(msg).with_traceback(exc_traceback)
def filter_exluded_paths(self, paths):
valid = []
for path in paths:
for excluded in self.excluded_paths:
if path.startswith(excluded):
break
else:
valid.append(path)
return valid
|
py | 1a3156b6cfb2a057cce1d0feefc220375351df1a | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Jul 18 16:55:14 2017
@author: ajaver
"""
import os
import tables
import numpy as np
import warnings
from .getFoodContourNN import get_food_contour_nn
from .getFoodContourMorph import get_food_contour_morph
from tierpsy.helper.misc import TimeCounter, print_flush, get_base_name
def calculate_food_cnt(mask_file, use_nn_food_cnt, model_path, _is_debug=False, solidity_th=0.98):
if use_nn_food_cnt:
if not os.path.exists(model_path):
warnings.warn('The model to obtain the food contour was not found. Nothing to do here...\n If you dont have a valid model. You could try to set `food_method=MORPH` to use a different algorithm.')
return
food_cnt, food_prob,cnt_solidity = get_food_contour_nn(mask_file, model_path, _is_debug=_is_debug)
if cnt_solidity < solidity_th:
food_cnt = np.zeros(0)
else:
food_cnt = get_food_contour_morph(mask_file, _is_debug=_is_debug)
return food_cnt
def getFoodContour(mask_file,
skeletons_file,
use_nn_food_cnt,
model_path,
solidity_th=0.98,
_is_debug = False
):
base_name = get_base_name(mask_file)
progress_timer = TimeCounter('')
print_flush("{} Calculating food contour {}".format(base_name, progress_timer.get_time_str()))
food_cnt = calculate_food_cnt(mask_file,
use_nn_food_cnt = use_nn_food_cnt,
model_path = model_path,
solidity_th= solidity_th,
_is_debug = _is_debug)
#store contour coordinates into the skeletons file and mask_file the contour file
for fname in [skeletons_file, mask_file]:
with tables.File(fname, 'r+') as fid:
if '/food_cnt_coord' in fid:
fid.remove_node('/food_cnt_coord')
#if it is a valid contour save it
if food_cnt is not None and \
food_cnt.size >= 2 and \
food_cnt.ndim == 2 and \
food_cnt.shape[1] == 2:
tab = fid.create_array('/',
'food_cnt_coord',
obj=food_cnt)
tab._v_attrs['use_nn_food_cnt'] = int(use_nn_food_cnt)
|
py | 1a3156bb99ae056d6d9c53383820c9d971752ba4 | """
Based on https://github.com/asanakoy/kaggle_carvana_segmentation
"""
import torch
import torch.utils.data as data
from torch.autograd import Variable as V
from PIL import Image
import cv2
import numpy as np
import os
import scipy.misc as misc
import Constants
def randomHueSaturationValue(image, hue_shift_limit=(-180, 180),
sat_shift_limit=(-255, 255),
val_shift_limit=(-255, 255), u=0.5):
if np.random.random() < u:
image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
h, s, v = cv2.split(image)
hue_shift = np.random.randint(hue_shift_limit[0], hue_shift_limit[1]+1)
hue_shift = np.uint8(hue_shift)
h += hue_shift
sat_shift = np.random.uniform(sat_shift_limit[0], sat_shift_limit[1])
s = cv2.add(s, sat_shift)
val_shift = np.random.uniform(val_shift_limit[0], val_shift_limit[1])
v = cv2.add(v, val_shift)
image = cv2.merge((h, s, v))
#image = cv2.merge((s, v))
image = cv2.cvtColor(image, cv2.COLOR_HSV2BGR)
return image
def randomShiftScaleRotate(image, mask,
shift_limit=(-0.0, 0.0),
scale_limit=(-0.0, 0.0),
rotate_limit=(-0.0, 0.0),
aspect_limit=(-0.0, 0.0),
borderMode=cv2.BORDER_CONSTANT, u=0.5):
if np.random.random() < u:
height, width, channel = image.shape
angle = np.random.uniform(rotate_limit[0], rotate_limit[1])
scale = np.random.uniform(1 + scale_limit[0], 1 + scale_limit[1])
aspect = np.random.uniform(1 + aspect_limit[0], 1 + aspect_limit[1])
sx = scale * aspect / (aspect ** 0.5)
sy = scale / (aspect ** 0.5)
dx = round(np.random.uniform(shift_limit[0], shift_limit[1]) * width)
dy = round(np.random.uniform(shift_limit[0], shift_limit[1]) * height)
cc = np.math.cos(angle / 180 * np.math.pi) * sx
ss = np.math.sin(angle / 180 * np.math.pi) * sy
rotate_matrix = np.array([[cc, -ss], [ss, cc]])
box0 = np.array([[0, 0], [width, 0], [width, height], [0, height], ])
box1 = box0 - np.array([width / 2, height / 2])
box1 = np.dot(box1, rotate_matrix.T) + np.array([width / 2 + dx, height / 2 + dy])
box0 = box0.astype(np.float32)
box1 = box1.astype(np.float32)
mat = cv2.getPerspectiveTransform(box0, box1)
image = cv2.warpPerspective(image, mat, (width, height), flags=cv2.INTER_LINEAR, borderMode=borderMode,
borderValue=(
0, 0,
0,))
mask = cv2.warpPerspective(mask, mat, (width, height), flags=cv2.INTER_LINEAR, borderMode=borderMode,
borderValue=(
0, 0,
0,))
return image, mask
def randomHorizontalFlip(image, mask, u=0.5):
if np.random.random() < u:
image = cv2.flip(image, 1)
mask = cv2.flip(mask, 1)
return image, mask
def randomVerticleFlip(image, mask, u=0.5):
if np.random.random() < u:
image = cv2.flip(image, 0)
mask = cv2.flip(mask, 0)
return image, mask
def randomRotate90(image, mask, u=0.5):
if np.random.random() < u:
image=np.rot90(image)
mask=np.rot90(mask)
return image, mask
def argument_Drive_loader(img_path, mask_path):
img = cv2.imread(img_path)
img = cv2.resize(img, Constants.Image_size)
mask = np.array(Image.open(mask_path))
mask = cv2.resize(mask, Constants.Image_size)
mask = np.expand_dims(mask, axis=2)
img = np.array(img, np.float32).transpose(2, 0, 1) / 255.0 * 3.2 - 1.6
mask = np.array(mask, np.float32).transpose(2, 0, 1) / 255.0
mask[mask >= 0.5] = 1
mask[mask <= 0.5] = 0
return img, mask
def argument_CHASEDB_loader(img_path, mask_path):
img = cv2.imread(img_path)
img = cv2.resize(img, Constants.Image_size)
mask = cv2.imread(mask_path, cv2.IMREAD_GRAYSCALE)
mask = cv2.resize(mask, Constants.Image_size)
mask = np.expand_dims(mask, axis=2)
img = np.array(img, np.float32).transpose(2, 0, 1) / 255.0 * 3.2 - 1.6
mask = np.array(mask, np.float32).transpose(2, 0, 1) / 255.0
mask[mask >= 0.5] = 1
mask[mask <= 0.5] = 0
return img, mask
def default_DRIVE_loader(img_path, mask_path):
img = cv2.imread(img_path)
img = cv2.resize(img, Constants.Image_size)
# mask = cv2.imread(mask_path, cv2.IMREAD_GRAYSCALE)
mask = np.array(Image.open(mask_path))
mask = cv2.resize(mask, Constants.Image_size)
img = randomHueSaturationValue(img,
hue_shift_limit=(-30, 30),
sat_shift_limit=(-5, 5),
val_shift_limit=(-15, 15))
img, mask = randomShiftScaleRotate(img, mask,
shift_limit=(-0.1, 0.1),
scale_limit=(-0.1, 0.1),
aspect_limit=(-0.1, 0.1),
rotate_limit=(-0, 0))
img, mask = randomHorizontalFlip(img, mask)
img, mask = randomVerticleFlip(img, mask)
img, mask = randomRotate90(img, mask)
mask = np.expand_dims(mask, axis=2)
img = np.array(img, np.float32).transpose(2, 0, 1) / 255.0 * 3.2 - 1.6
mask = np.array(mask, np.float32).transpose(2, 0, 1) / 255.0
mask[mask >= 0.5] = 1
mask[mask <= 0.5] = 0
# mask = abs(mask-1)
return img, mask
def default_CHASEDB_loader(img_path, mask_path):
img = cv2.imread(img_path)
img = cv2.resize(img, Constants.Image_size)
mask = cv2.imread(mask_path, cv2.IMREAD_GRAYSCALE)
# mask = np.array(Image.open(mask_path))
mask = cv2.resize(mask, Constants.Image_size)
img = randomHueSaturationValue(img,
hue_shift_limit=(-30, 30),
sat_shift_limit=(-5, 5),
val_shift_limit=(-15, 15))
img, mask = randomShiftScaleRotate(img, mask,
shift_limit=(-0.1, 0.1),
scale_limit=(-0.1, 0.1),
aspect_limit=(-0.1, 0.1),
rotate_limit=(-0, 0))
img, mask = randomHorizontalFlip(img, mask)
img, mask = randomVerticleFlip(img, mask)
img, mask = randomRotate90(img, mask)
mask = np.expand_dims(mask, axis=2)
img = np.array(img, np.float32).transpose(2, 0, 1) / 255.0 * 3.2 - 1.6
mask = np.array(mask, np.float32).transpose(2, 0, 1) / 255.0
mask[mask >= 0.5] = 1
mask[mask <= 0.5] = 0
# mask = abs(mask-1)
return img, mask
def read_DRIVE_datasets(root_path, mode='train'):
images = []
masks = []
if mode=='Hard':
image_root = os.path.join(root_path, 'argtraining/images')
gt_root = os.path.join(root_path, 'argtraining/1st_manual')
else:
image_root = os.path.join(root_path, 'training/images')
gt_root = os.path.join(root_path, 'training/1st_manual')
for image_name in os.listdir(image_root):
image_path = os.path.join(image_root, image_name.split('.')[0] + '.tif')
if int(image_name.split('_')[0])>20:
label_path = os.path.join(gt_root, image_name.split('_')[0] + '_manual1.gif')
else:
label_path = os.path.join(gt_root, image_name.split('_')[0] + '_manual1.tif')
images.append(image_path)
masks.append(label_path)
# print(images, masks)
return images, masks
def read_CHASEDB_datasets(root_path, mode='train'):
images = []
masks = []
if mode == 'Hard':
image_root = os.path.join(root_path, 'argtraining/images')
gt_root = os.path.join(root_path, 'argtraining/1st_manual')
else:
image_root = os.path.join(root_path, 'training/images')
gt_root = os.path.join(root_path, 'training/1st_manual')
for image_name in os.listdir(image_root):
image_path = os.path.join(image_root, image_name.split('.')[0] + '.jpg')
label_path = os.path.join(gt_root, image_name.split('.')[0] + '_1stHO.png')
images.append(image_path)
masks.append(label_path)
# print(images, masks)
return images, masks
class ImageFolder(data.Dataset):
def __init__(self,root_path, datasets='Messidor', mode='train'):
self.root = root_path
self.mode = mode
self.dataset = datasets
assert self.dataset in ['CHASEDB','DRIVE'], \
"the dataset should be in 'CHASEDB', 'DRIVE'."
if self.dataset == 'DRIVE':
self.images, self.labels = read_DRIVE_datasets(self.root, self.mode)
if self.mode == 'Argument':
self.loader = argument_Drive_loader
else:
self.loader = default_DRIVE_loader
else:
self.images, self.labels = read_CHASEDB_datasets(self.root, self.mode)
if self.mode=='Argument':
self.loader=argument_CHASEDB_loader
else:
self.loader = default_CHASEDB_loader
def __getitem__(self, index):
img, mask = self.loader(self.images[index], self.labels[index])
img = torch.Tensor(img)
mask = torch.Tensor(mask)
if self.mode=='Argument':
return img, mask,self.images[index], self.labels[index]
else:
return img, mask
def __len__(self):
assert len(self.images) == len(self.labels), 'The number of images must be equal to labels'
return len(self.images) |
py | 1a31582d94c9145339b87bf7eaea7c7b170a2261 | from theano import Op, Apply
from theano.tensor import as_tensor_variable
class ScalMul(Op):
__props__ = ('scal',)
def __init__(self, scal):
if not isinstance(scal, int):
raise TypeError('expected an int')
self.scal = scal
def make_node(self, x):
x = as_tensor_variable(x)
return Apply(self, [x], [x.type()])
def perform(self, node, inputs, output_storage):
x = inputs[0]
z = output_storage[0]
z[0] = x * self.scal
|
py | 1a3158fa8f5dd7f1097b91a3ba07b22c47cb3ffe | class ScoreCalc:
def __init__(self, slices):
self.score = 0
self.slices = slices
self.calculatescore()
def calculatescore(self):
for slice in self.slices:
r1 = slice[0]
c1 = slice[1]
r2 = slice[2]
c2 = slice[3]
self.score += abs(r2-r1+1)*abs(c2-c1+1) |
py | 1a31597cfa8080ab0a6a108fdb429dda2e3c3e73 | # qubit number=3
# total number=10
import numpy as np
from qiskit import QuantumCircuit, execute, Aer, QuantumRegister, ClassicalRegister, transpile, BasicAer, IBMQ
import networkx as nx
from qiskit.visualization import plot_histogram
from typing import *
from pprint import pprint
from math import log2
from collections import Counter
from qiskit.test.mock import FakeVigo, FakeYorktown
kernel = 'circuit/bernstein'
def make_circuit(n:int) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
prog = QuantumCircuit(input_qubit)
prog.h(input_qubit[0]) # number=1
prog.z(input_qubit[3]) # number=7
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[2]) # number=3
prog.h(input_qubit[3]) # number=4
for edge in E:
k = edge[0]
l = edge[1]
prog.cp(-2 * gamma, input_qubit[k-1], input_qubit[l-1])
prog.p(gamma, k)
prog.p(gamma, l)
prog.rx(2 * beta, range(len(V)))
prog.swap(input_qubit[3],input_qubit[0]) # number=5
prog.swap(input_qubit[3],input_qubit[0]) # number=6
prog.y(input_qubit[1]) # number=8
prog.y(input_qubit[1]) # number=9
# circuit end
return prog
if __name__ == '__main__':
n = 4
V = np.arange(0, n, 1)
E = [(0, 1, 1.0), (0, 2, 1.0), (1, 2, 1.0), (3, 2, 1.0), (3, 1, 1.0)]
G = nx.Graph()
G.add_nodes_from(V)
G.add_weighted_edges_from(E)
step_size = 0.1
a_gamma = np.arange(0, np.pi, step_size)
a_beta = np.arange(0, np.pi, step_size)
a_gamma, a_beta = np.meshgrid(a_gamma, a_beta)
F1 = 3 - (np.sin(2 * a_beta) ** 2 * np.sin(2 * a_gamma) ** 2 - 0.5 * np.sin(4 * a_beta) * np.sin(4 * a_gamma)) * (
1 + np.cos(4 * a_gamma) ** 2)
result = np.where(F1 == np.amax(F1))
a = list(zip(result[0], result[1]))[0]
gamma = a[0] * step_size
beta = a[1] * step_size
prog = make_circuit(4)
sample_shot =5600
writefile = open("../data/startQiskit98.csv", "w")
# prog.draw('mpl', filename=(kernel + '.png'))
backend = BasicAer.get_backend('qasm_simulator')
circuit1 = transpile(prog, FakeYorktown())
circuit1.measure_all()
prog = circuit1
info = execute(prog,backend=backend, shots=sample_shot).result().get_counts()
print(info, file=writefile)
print("results end", file=writefile)
print(circuit1.depth(), file=writefile)
print(circuit1, file=writefile)
writefile.close()
|
py | 1a315a37a5cfc699a1c8fd966d88e69e903b12ed | # Copyright (C) 2018-2022 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import numpy as np
import pytest
from common.onnx_layer_test_class import OnnxRuntimeLayerTest
class TestLoop(OnnxRuntimeLayerTest):
@staticmethod
def create_const(name, tensor_type, value):
from onnx import helper
from onnx import TensorProto
if tensor_type == TensorProto.INT64:
np_type = np.int64
elif tensor_type == TensorProto.FLOAT:
np_type = np.float
elif tensor_type == TensorProto.BOOL:
np_type = np.bool
else:
return None
return helper.make_node('Constant', inputs=[], outputs=[name],
value=helper.make_tensor(name='const_tensor',
data_type=tensor_type,
dims=value.shape,
vals=value.flatten().astype(np_type)))
@staticmethod
def create_body_graph(input_nodes, output_nodes, input_names, output_names, input_shape,
graph_name):
# input_nodes - list of input nodes with structure {counter, condition, <other inputs>}
# output_nodes - list of output nodes with structure {condition, <back edges>, <external outputs>}.
# In this function I assume that every <other input> have <back edge> and <external output>
# input_shape - shape of all inputs from <other inputs>
from onnx import helper
from onnx import TensorProto
assert len(input_nodes) > 2
assert len(output_nodes) == (len(input_nodes) - 2) * 2 + 1
assert len(input_nodes) == len(input_names)
assert len(output_nodes) == len(output_names)
other_inputs_count = len(input_nodes) - 2
one_value = np.ones(input_shape, dtype=np.float)
one = TestLoop.create_const('one_' + graph_name, TensorProto.FLOAT, one_value)
one_int = TestLoop.create_const('one_int_' + graph_name, TensorProto.INT64, np.ones([1]))
# add one to all inputs except counter and condition
add_one_nodes = []
for i in range(2, len(input_names)):
add_one_nodes.append(
helper.make_node('Add', inputs=[input_names[i], 'one_' + graph_name],
outputs=[output_names[other_inputs_count + i - 1]]))
# add 1 to counter
add_one_to_m_node = helper.make_node(
'Add',
inputs=[input_names[0], 'one_int_' + graph_name],
outputs=['counter_plus_1_' + graph_name]
)
# map inputs to outputs - back edges
identity_nodes = []
for i in range(1, len(input_nodes)):
identity_nodes.append(helper.make_node('Identity',
inputs=[input_names[i]],
outputs=[output_names[i - 1]]))
body_nodes = [one, one_int]
body_nodes.extend(add_one_nodes)
body_nodes.append(add_one_to_m_node)
body_nodes.extend(identity_nodes)
body_graph = helper.make_graph(
body_nodes,
graph_name,
input_nodes,
output_nodes
)
return body_graph
def create_loop(self):
"""
ONNX net
Input->Loop->Output => Only accuracy check
"""
from onnx import helper
from onnx import TensorProto
# Create ONNX model
# Input ---> Loop ---> Identity ---> Result
input_shape = [1, 4, 64, 54]
in_1 = helper.make_tensor_value_info('IN_1', TensorProto.FLOAT, input_shape)
in_1_int = helper.make_tensor_value_info('in_1_int', TensorProto.FLOAT, input_shape)
in_1_int_out = helper.make_tensor_value_info('in_1_int_out', TensorProto.FLOAT, input_shape)
out_1 = helper.make_tensor_value_info('OUT_1', TensorProto.FLOAT, None)
res = helper.make_tensor_value_info('res', TensorProto.FLOAT, None)
m_1 = helper.make_tensor_value_info('m_1', TensorProto.INT64, [1])
cond_int_1 = helper.make_tensor_value_info('cond_int_1', TensorProto.BOOL, [1])
cond_out_1 = helper.make_tensor_value_info('cond_out_1', TensorProto.BOOL, [1])
m_1_value = np.array([10], dtype=np.int64)
cond_value = np.array([True], np.bool)
M_1 = self.create_const('M_1', TensorProto.INT64, m_1_value)
cond = self.create_const('cond', TensorProto.BOOL, cond_value)
body_graph_1 = self.create_body_graph([m_1, cond_int_1, in_1_int],
[cond_out_1, in_1_int_out, out_1],
['m_1', 'cond_int_1', 'in_1_int'],
['cond_out_1', 'in_1_int_out', 'OUT_1'],
input_shape, 'body_graph_1')
node_loop_1 = helper.make_node(
'Loop',
inputs=['M_1', 'cond', 'IN_1'],
outputs=['cond_out_1', 'OUT_1'],
body=body_graph_1
)
res_node = helper.make_node(
'Identity',
inputs=['OUT_1'],
outputs=['res'],
)
graph_def = helper.make_graph(
[M_1, cond, node_loop_1, res_node],
'graph',
[in_1],
[res]
)
onnx_net = helper.make_model(graph_def, producer_name='test_loop_model')
# We do not create reference graph, as it's too complicated to construct it
# So we return None to skip IR comparision
return onnx_net, None
def create_loop_in_loop(self):
"""
ONNX net
Input->Loop(Loop)->Output => Only accuracy check
"""
from onnx import helper
from onnx import TensorProto
# Create ONNX model
input_shape = [1, 4, 64, 54]
in_1 = helper.make_tensor_value_info('IN_1', TensorProto.FLOAT, input_shape)
in_1_int = helper.make_tensor_value_info('in_1_int', TensorProto.FLOAT, input_shape)
in_1_int_out = helper.make_tensor_value_info('in_1_int_out', TensorProto.FLOAT, input_shape)
in_2 = helper.make_tensor_value_info('IN_2', TensorProto.FLOAT, input_shape)
in_2_int = helper.make_tensor_value_info('in_2_int', TensorProto.FLOAT, input_shape)
in_2_int_out = helper.make_tensor_value_info('in_2_int_out', TensorProto.FLOAT, input_shape)
out_1 = helper.make_tensor_value_info('OUT_1', TensorProto.FLOAT, None)
out_2 = helper.make_tensor_value_info('OUT_2', TensorProto.FLOAT, None)
res = helper.make_tensor_value_info('res', TensorProto.FLOAT, None)
m_1 = helper.make_tensor_value_info('m_1', TensorProto.INT64, [1])
m_2 = helper.make_tensor_value_info('m_2', TensorProto.INT64, [1])
cond_int_1 = helper.make_tensor_value_info('cond_int_1', TensorProto.BOOL, [1])
cond_out_1 = helper.make_tensor_value_info('cond_out_1', TensorProto.BOOL, [1])
cond_int_2 = helper.make_tensor_value_info('cond_int_2', TensorProto.BOOL, [1])
cond_out_2 = helper.make_tensor_value_info('cond_out_2', TensorProto.BOOL, [1])
m_1_value = np.array([10], dtype=np.int64)
m_2_value = np.array([5], dtype=np.int64)
cond_value = np.array([True], np.bool)
one_value = np.ones(input_shape, dtype=np.float)
M_1 = self.create_const('M_1', TensorProto.INT64, m_1_value)
M_2 = self.create_const('M_2', TensorProto.INT64, m_2_value)
cond = self.create_const('cond', TensorProto.BOOL, cond_value)
one = self.create_const('one', TensorProto.FLOAT, one_value)
one_int = self.create_const('one_int', TensorProto.INT64, one_value)
# create body of external loop
add_one_node = helper.make_node(
'Add',
inputs=['in_1_int', 'one'],
outputs=['in_1_loop_1']
)
add_one_to_m_node = helper.make_node(
'Add',
inputs=['m_1', 'one_int'],
outputs=['m_1_loop_1']
)
cond_2 = self.create_const('cond_2', TensorProto.BOOL, cond_value)
# create body for internal loop
body_graph_2 = self.create_body_graph([m_2, cond_int_2, in_2_int],
[cond_out_2, in_2_int_out, out_2],
['m_2', 'cond_int_2', 'in_2_int'],
['cond_out_2', 'in_2_int_out', 'OUT_2'], input_shape,
'body_graph_2')
node_loop_2 = helper.make_node(
'Loop',
inputs=['M_2', 'cond_2', 'IN_2'],
outputs=['cond_out_2', 'OUT_2'],
body=body_graph_2
)
# internal loop created
out_1_node = helper.make_node(
'Identity',
inputs=['OUT_2'],
outputs=['OUT_1'],
)
cond_1_node = helper.make_node(
'Identity',
inputs=['cond_int_1'],
outputs=['cond_out_1'],
)
in_1_int_node = helper.make_node(
'Identity',
inputs=['in_1_int'],
outputs=['in_1_int_out'],
)
body_graph_1 = helper.make_graph(
[one, add_one_node, one_int, add_one_to_m_node, M_2, cond_2, node_loop_2, out_1_node,
cond_1_node,
in_1_int_node],
'body_graph_1',
[m_1, cond_int_1, in_1_int],
[cond_out_1, in_1_int_out, out_1],
)
node_loop_1 = helper.make_node(
'Loop',
inputs=['M_1', 'cond', 'IN_1'],
outputs=['cond_out_1', 'OUT_1'],
body=body_graph_1
)
# external loop created
res_node = helper.make_node(
'Identity',
inputs=['OUT_1'],
outputs=['res'],
)
graph_def = helper.make_graph(
[M_1, cond, node_loop_1, res_node],
'graph',
[in_1, in_2],
[res],
)
onnx_net = helper.make_model(graph_def, producer_name='test_loop_in_loop_model')
# We do not create reference graph, as it's too complicated to construct it
# So we return None to skip IR comparision
return onnx_net, None
@pytest.mark.precommit
@pytest.mark.timeout(250)
def test_loop_simple_precommit(self, ie_device, precision, ir_version, temp_dir, api_2):
if ie_device == 'GPU':
pytest.skip('Loop not supported on GPU')
self._test(*self.create_loop(), ie_device, precision, ir_version, temp_dir=temp_dir,
infer_timeout=150, api_2=api_2)
@pytest.mark.precommit
@pytest.mark.timeout(250)
def test_loop_in_loop_simple_precommit(self, ie_device, precision, ir_version, temp_dir, api_2):
if ie_device == 'GPU':
pytest.skip('Loop not supported on GPU')
self._test(*self.create_loop_in_loop(), ie_device, precision, ir_version, temp_dir=temp_dir,
infer_timeout=150, api_2=api_2)
|
py | 1a315bcab2b5f72320f71218851bffb5ddec3b05 | #Copyright ReportLab Europe Ltd. 2000-2017
#see license.txt for license details
__doc__=''
__version__='3.3.0'
#REPORTLAB_TEST_SCRIPT
import sys
from reportlab.platypus import *
from reportlab.lib.styles import getSampleStyleSheet
from reportlab.rl_config import defaultPageSize
PAGE_HEIGHT=defaultPageSize[1]
styles = getSampleStyleSheet()
Title = "Integrating Diverse Data Sources with Gadfly 2"
Author = "Aaron Watters"
URL = "http://www.chordate.com/"
email = "[email protected]"
Abstract = """This paper describes the primative methods underlying the implementation
of SQL query evaluation in Gadfly 2, a database management system implemented
in Python [Van Rossum]. The major design goals behind
the architecture described here are to simplify the implementation
and to permit flexible and efficient extensions to the gadfly
engine. Using this architecture and its interfaces programmers
can add functionality to the engine such as alternative disk based
indexed table implementations, dynamic interfaces to remote data
bases or or other data sources, and user defined computations."""
from reportlab.lib.units import inch
pageinfo = "%s / %s / %s" % (Author, email, Title)
def myFirstPage(canvas, doc):
canvas.saveState()
#canvas.setStrokeColorRGB(1,0,0)
#canvas.setLineWidth(5)
#canvas.line(66,72,66,PAGE_HEIGHT-72)
canvas.setFont('Times-Bold',16)
canvas.drawString(108, PAGE_HEIGHT-108, Title)
canvas.setFont('Times-Roman',9)
canvas.drawString(inch, 0.75 * inch, "First Page / %s" % pageinfo)
canvas.restoreState()
def myLaterPages(canvas, doc):
#canvas.drawImage("snkanim.gif", 36, 36)
canvas.saveState()
#canvas.setStrokeColorRGB(1,0,0)
#canvas.setLineWidth(5)
#canvas.line(66,72,66,PAGE_HEIGHT-72)
canvas.setFont('Times-Roman',9)
canvas.drawString(inch, 0.75 * inch, "Page %d %s" % (doc.page, pageinfo))
canvas.restoreState()
def go():
Elements.insert(0,Spacer(0,inch))
doc = SimpleDocTemplate('gfe.pdf')
doc.build(Elements,onFirstPage=myFirstPage, onLaterPages=myLaterPages)
Elements = []
HeaderStyle = styles["Heading1"] # XXXX
def header(txt, style=HeaderStyle, klass=Paragraph, sep=0.3):
s = Spacer(0.2*inch, sep*inch)
Elements.append(s)
para = klass(txt, style)
Elements.append(para)
ParaStyle = styles["Normal"]
def p(txt):
return header(txt, style=ParaStyle, sep=0.1)
#pre = p # XXX
PreStyle = styles["Code"]
def pre(txt):
s = Spacer(0.1*inch, 0.1*inch)
Elements.append(s)
p = Preformatted(txt, PreStyle)
Elements.append(p)
#header(Title, sep=0.1. style=ParaStyle)
header(Author, sep=0.1, style=ParaStyle)
header(URL, sep=0.1, style=ParaStyle)
header(email, sep=0.1, style=ParaStyle)
header("ABSTRACT")
p(Abstract)
header("Backgrounder")
p("""\
The term "database" usually refers to a persistent
collection of data. Data is persistent if it continues
to exist whether or not it is associated with a running
process on the computer, or even if the computer is
shut down and restarted at some future time. Database
management systems provide support for constructing databases,
maintaining databases, and extracting information from databases.""")
p("""\
Relational databases manipulate and store persistent
table structures called relations, such as the following
three tables""")
pre("""\
-- drinkers who frequent bars (this is a comment)
select * from frequents
DRINKER | PERWEEK | BAR
============================
adam | 1 | lolas
woody | 5 | cheers
sam | 5 | cheers
norm | 3 | cheers
wilt | 2 | joes
norm | 1 | joes
lola | 6 | lolas
norm | 2 | lolas
woody | 1 | lolas
pierre | 0 | frankies
)
""")
pre("""\
-- drinkers who like beers
select * from likes
DRINKER | PERDAY | BEER
===============================
adam | 2 | bud
wilt | 1 | rollingrock
sam | 2 | bud
norm | 3 | rollingrock
norm | 2 | bud
nan | 1 | sierranevada
woody | 2 | pabst
lola | 5 | mickies
""")
pre("""\
-- beers served from bars
select * from serves
BAR | QUANTITY | BEER
=================================
cheers | 500 | bud
cheers | 255 | samadams
joes | 217 | bud
joes | 13 | samadams
joes | 2222 | mickies
lolas | 1515 | mickies
lolas | 333 | pabst
winkos | 432 | rollingrock
frankies | 5 | snafu
""")
p("""
The relational model for database structures makes
the simplifying assumption that all data in a database
can be represented in simple table structures
such as these. Although this assumption seems extreme
it provides a good foundation for defining solid and
well defined database management systems and some
of the most successful software companies in the
world, such as Oracle, Sybase, IBM, and Microsoft,
have marketed database management systems based on
the relational model quite successfully.
""")
p("""
SQL stands for Structured Query Language.
The SQL language defines industry standard
mechanisms for creating, querying, and modified
relational tables. Several years ago SQL was one
of many Relational Database Management System
(RDBMS) query languages in use, and many would
argue not the best on. Now, largely due
to standardization efforts and the
backing of IBM, SQL is THE standard way to talk
to database systems.
""")
p("""
There are many advantages SQL offers over other
database query languages and alternative paradigms
at this time (please see [O'Neill] or [Korth and Silberschatz]
for more extensive discussions and comparisons between the
SQL/relational approach and others.)
""")
p("""
The chief advantage over all contenders at this time
is that SQL and the relational model are now widely
used as interfaces and back end data stores to many
different products with different performance characteristics,
user interfaces, and other qualities: Oracle, Sybase,
Ingres, SQL Server, Access, Outlook,
Excel, IBM DB2, Paradox, MySQL, MSQL, POSTgres, and many
others. For this reason a program designed to use
an SQL database as its data storage mechanism can
easily be ported from one SQL data manager to another,
possibly on different platforms. In fact the same
program can seamlessly use several backends and/or
import/export data between different data base platforms
with trivial ease.
No other paradigm offers such flexibility at the moment.
""")
p("""
Another advantage which is not as immediately
obvious is that the relational model and the SQL
query language are easily understood by semi-technical
and non-technical professionals, such as business
people and accountants. Human resources managers
who would be terrified by an object model diagram
or a snippet of code that resembles a conventional
programming language will frequently feel quite at
ease with a relational model which resembles the
sort of tabular data they deal with on paper in
reports and forms on a daily basis. With a little training the
same HR managers may be able to translate the request
"Who are the drinkers who like bud and frequent cheers?"
into the SQL query
""")
pre("""
select drinker
from frequents
where bar='cheers'
and drinker in (
select drinker
from likes
where beer='bud')
""")
p("""
(or at least they have some hope of understanding
the query once it is written by a technical person
or generated by a GUI interface tool). Thus the use
of SQL and the relational model enables communication
between different communities which must understand
and interact with stored information. In contrast
many other approaches cannot be understood easily
by people without extensive programming experience.
""")
p("""
Furthermore the declarative nature of SQL
lends itself to automatic query optimization,
and engines such as Gadfly can automatically translate a user query
into an optimized query plan which takes
advantage of available indices and other data characteristics.
In contrast more navigational techniques require the application
program itself to optimize the accesses to the database and
explicitly make use of indices.
""")
# HACK
Elements.append(PageBreak())
p("""
While it must be admitted that there are application
domains such as computer aided engineering design where
the relational model is unnatural, it is also important
to recognize that for many application domains (such
as scheduling, accounting, inventory, finance, personal
information management, electronic mail) the relational
model is a very natural fit and the SQL query language
make most accesses to the underlying data (even sophisticated
ones) straightforward. """)
p("""For an example of a moderately
sophisticated query using the tables given above,
the following query lists the drinkers who frequent lolas bar
and like at least two beers not served by lolas
""")
if 0:
go()
sys.exit(1)
pre("""
select f.drinker
from frequents f, likes l
where f.drinker=l.drinker and f.bar='lolas'
and l.beer not in
(select beer from serves where bar='lolas')
group by f.drinker
having count(distinct beer)>=2
""")
p("""
yielding the result
""")
pre("""
DRINKER
=======
norm
""")
p("""
Experience shows that queries of this sort are actually
quite common in many applications, and are often much more
difficult to formulate using some navigational database
organizations, such as some "object oriented" database
paradigms.
""")
p("""
Certainly,
SQL does not provide all you need to interact with
databases -- in order to do "real work" with SQL you
need to use SQL and at least one other language
(such as C, Pascal, C++, Perl, Python, TCL, Visual Basic
or others) to do work (such as readable formatting a report
from raw data) that SQL was not designed to do.
""")
header("Why Gadfly 1?")
p("""Gadfly 1.0 is an SQL based relational database implementation
implemented entirely in the Python programming language, with
optional fast data structure accellerators implemented in the
C programming language. Gadfly is relatively small, highly portable,
very easy to use (especially for programmers with previous experience
with SQL databases such as MS Access or Oracle), and reasonably
fast (especially when the kjbuckets C accellerators are used).
For moderate sized problems Gadfly offers a fairly complete
set of features such as transaction semantics, failure recovery,
and a TCP/IP based client/server mode (Please see [Gadfly] for
detailed discussion).""")
header("Why Gadfly 2?")
p("""Gadfly 1.0 also has significant limitations. An active Gadfly
1.0 database keeps all data in (virtual) memory, and hence a Gadfly
1.0 database is limited in size to available virtual memory. Important
features such as date/time/interval operations, regular expression
matching and other standard SQL features are not implemented in
Gadfly 1.0. The optimizer and the query evaluator perform optimizations
using properties of the equality predicate but do not optimize
using properties of inequalities such as BETWEEN or less-than.
It is possible to add "extension views" to a Gadfly
1.0 database, but the mechanism is somewhat clumsy and indices
over extension views are not well supported. The features of Gadfly
2.0 discussed here attempt to address these deficiencies by providing
a uniform extension model that permits addition of alternate table,
function, and predicate implementations.""")
p("""Other deficiencies, such as missing constructs like "ALTER
TABLE" and the lack of outer joins and NULL values are not
addressed here, although they may be addressed in Gadfly 2.0 or
a later release. This paper also does not intend to explain
the complete operations of the internals; it is intended to provide
at least enough information to understand the basic mechanisms
for extending gadfly.""")
p("""Some concepts and definitions provided next help with the description
of the gadfly interfaces. [Note: due to the terseness of this
format the ensuing is not a highly formal presentation, but attempts
to approach precision where precision is important.]""")
header("The semilattice of substitutions")
p("""Underlying the gadfly implementation are the basic concepts
associated with substitutions. A substitution is a mapping
of attribute names to values (implemented in gadfly using kjbuckets.kjDict
objects). Here an attribute refers to some sort of "descriptive
variable", such as NAME and a value is an assignment for that variable,
like "Dave Ascher". In Gadfly a table is implemented as a sequence
of substitutions, and substitutions are used in many other ways as well.
""")
p("""
For example consider the substitutions""")
pre("""
A = [DRINKER=>'sam']
B = [DRINKER=>'sam', BAR=>'cheers']
C = [DRINKER=>'woody', BEER=>'bud']
D = [DRINKER=>'sam', BEER=>'mickies']
E = [DRINKER=>'sam', BAR=>'cheers', BEER=>'mickies']
F = [DRINKER=>'sam', BEER=>'mickies']
G = [BEER=>'bud', BAR=>'lolas']
H = [] # the empty substitution
I = [BAR=>'cheers', CAPACITY=>300]""")
p("""A trivial but important observation is that since substitutions
are mappings, no attribute can assume more than one value in a
substitution. In the operations described below whenever an operator
"tries" to assign more than one value to an attribute
the operator yields an "overdefined" or "inconsistent"
result.""")
header("Information Semi-order:")
p("""Substitution B is said to be
more informative than A because B agrees with all assignments
in A (in addition to providing more information as well). Similarly
we say that E is more informative than A, B, D, F. and H but E
is not more informative than the others since, for example G disagrees
with E on the value assigned to the BEER attribute and I provides
additional CAPACITY information not provided in E.""")
header("Joins and Inconsistency:")
p("""A join of two substitutions
X and Y is the least informative substitution Z such that Z is
more informative (or equally informative) than both X and Y. For
example B is the join of B with A, E is the join of B with D and""")
pre("""
E join I =
[DRINKER=>'sam', BAR=>'cheers', BEER=>'mickies', CAPACITY=>300]""")
p("""For any two substitutions either (1) they disagree on the value
assigned to some attribute and have no join or (2) they agree
on all common attributes (if there are any) and their join is
the union of all (name, value) assignments in both substitutions.
Written in terms of kjbucket.kjDict operations two kjDicts X and
Y have a join Z = (X+Y) if and only if Z.Clean() is not None.
Two substitutions that have no join are said to be inconsistent.
For example I and G are inconsistent since they disagree on
the value assigned to the BAR attribute and therefore have no
join. The algebra of substitutions with joins technically defines
an abstract algebraic structure called a semilattice.""")
header("Name space remapping")
p("""Another primitive operation over substitutions is the remap
operation S2 = S.remap(R) where S is a substitution and R is a
graph of attribute names and S2 is a substitution. This operation
is defined to produce the substitution S2 such that""")
pre("""
Name=>Value in S2 if and only if
Name1=>Value in S and Name<=Name1 in R
""")
p("""or if there is no such substitution S2 the remap value is said
to be overdefined.""")
p("""For example the remap operation may be used to eliminate attributes
from a substitution. For example""")
pre("""
E.remap([DRINKER<=DRINKER, BAR<=BAR])
= [DRINKER=>'sam', BAR=>'cheers']
""")
p("""Illustrating that remapping using the [DRINKER<=DRINKER,
BAR<=BAR] graph eliminates all attributes except DRINKER and
BAR, such as BEER. More generally remap can be used in this way
to implement the classical relational projection operation. (See [Korth and Silberschatz]
for a detailed discussion of the projection operator and other relational
algebra operators such as selection, rename, difference and joins.)""")
p("""The remap operation can also be used to implement "selection
on attribute equality". For example if we are interested
in the employee names of employees who are their own bosses we
can use the remapping graph""")
pre("""
R1 = [NAME<=NAME, NAME<=BOSS]
""")
p("""and reject substitutions where remapping using R1 is overdefined.
For example""")
pre("""
S1 = [NAME=>'joe', BOSS=>'joe']
S1.remap(R1) = [NAME=>'joe']
S2 = [NAME=>'fred', BOSS=>'joe']
S2.remap(R1) is overdefined.
""")
p("""The last remap is overdefined because the NAME attribute cannot
assume both the values 'fred' and 'joe' in a substitution.""")
p("""Furthermore, of course, the remap operation can be used to
"rename attributes" or "copy attribute values"
in substitutions. Note below that the missing attribute CAPACITY
in B is effectively ignored in the remapping operation.""")
pre("""
B.remap([D<=DRINKER, B<=BAR, B2<=BAR, C<=CAPACITY])
= [D=>'sam', B=>'cheers', B2=>'cheers']
""")
p("""More interestingly, a single remap operation can be used to
perform a combination of renaming, projection, value copying,
and attribute equality selection as one operation. In kjbuckets the remapper
graph is implemented using a kjbuckets.kjGraph and the remap operation
is an intrinsic method of kjbuckets.kjDict objects.""")
header("Generalized Table Joins and the Evaluator Mainloop""")
p("""Strictly speaking the Gadfly 2.0 query evaluator only uses
the join and remap operations as its "basic assembly language"
-- all other computations, including inequality comparisons and
arithmetic, are implemented externally to the evaluator as "generalized
table joins." """)
p("""A table is a sequence of substitutions (which in keeping with
SQL semantics may contain redundant entries). The join between
two tables T1 and T2 is the sequence of all possible defined joins
between pairs of elements from the two tables. Procedurally we
might compute the join as""")
pre("""
T1JoinT2 = empty
for t1 in T1:
for t2 in T2:
if t1 join t2 is defined:
add t1 join t2 to T1joinT2""")
p("""In general circumstances this intuitive implementation is a
very inefficient way to compute the join, and Gadfly almost always
uses other methods, particularly since, as described below, a
"generalized table" can have an "infinite"
number of entries.""")
p("""For an example of a table join consider the EMPLOYEES table
containing""")
pre("""
[NAME=>'john', JOB=>'executive']
[NAME=>'sue', JOB=>'programmer']
[NAME=>'eric', JOB=>'peon']
[NAME=>'bill', JOB=>'peon']
""")
p("""and the ACTIVITIES table containing""")
pre("""
[JOB=>'peon', DOES=>'windows']
[JOB=>'peon', DOES=>'floors']
[JOB=>'programmer', DOES=>'coding']
[JOB=>'secretary', DOES=>'phone']""")
p("""then the join between EMPLOYEES and ACTIVITIES must containining""")
pre("""
[NAME=>'sue', JOB=>'programmer', DOES=>'coding']
[NAME=>'eric', JOB=>'peon', DOES=>'windows']
[NAME=>'bill', JOB=>'peon', DOES=>'windows']
[NAME=>'eric', JOB=>'peon', DOES=>'floors']
[NAME=>'bill', JOB=>'peon', DOES=>'floors']""")
p("""A compiled gadfly subquery ultimately appears to the evaluator
as a sequence of generalized tables that must be joined (in combination
with certain remapping operations that are beyond the scope of
this discussion). The Gadfly mainloop proceeds following the very
loose pseudocode:""")
pre("""
Subs = [ [] ] # the unary sequence containing "true"
While some table hasn't been chosen yet:
Choose an unchosen table with the least cost join estimate.
Subs = Subs joined with the chosen table
return Subs""")
p("""[Note that it is a property of the join operation that the
order in which the joins are carried out will not affect the result,
so the greedy strategy of evaluating the "cheapest join next"
will not effect the result. Also note that the treatment of logical
OR and NOT as well as EXIST, IN, UNION, and aggregation and so
forth are not discussed here, even though they do fit into this
approach.]""")
p("""The actual implementation is a bit more complex than this,
but the above outline may provide some useful intuition. The "cost
estimation" step and the implementation of the join operation
itself are left up to the generalized table object implementation.
A table implementation has the ability to give an "infinite"
cost estimate, which essentially means "don't join me in
yet under any circumstances." """)
header("Implementing Functions")
p("""As mentioned above operations such as arithmetic are implemented
using generalized tables. For example the arithmetic Add operation
is implemented in Gadfly internally as an "infinite generalized
table" containing all possible substitutions""")
pre("""
ARG0=>a, ARG1=>b, RESULT=>a+b]
""")
p("""Where a and b are all possible values which can be summed.
Clearly, it is not possible to enumerate this table, but given
a sequence of substitutions with defined values for ARG0 and ARG1
such as""")
pre("""
[ARG0=>1, ARG1=-4]
[ARG0=>2.6, ARG1=50]
[ARG0=>99, ARG1=1]
""")
p("""it is possible to implement a "join operation" against
this sequence that performs the same augmentation as a join with
the infinite table defined above:""")
pre("""
[ARG0=>1, ARG1=-4, RESULT=-3]
[ARG0=>2.6, ARG1=50, RESULT=52.6]
[ARG0=>99, ARG1=1, RESULT=100]
""")
p("""Furthermore by giving an "infinite estimate" for
all attempts to evaluate the join where ARG0 and ARG1 are not
available the generalized table implementation for the addition
operation can refuse to compute an "infinite join." """)
p("""More generally all functions f(a,b,c,d) are represented in
gadfly as generalized tables containing all possible relevant
entries""")
pre("""
[ARG0=>a, ARG1=>b, ARG2=>c, ARG3=>d, RESULT=>f(a,b,c,d)]""")
p("""and the join estimation function refuses all attempts to perform
a join unless all the arguments are provided by the input substitution
sequence.""")
header("Implementing Predicates")
p("""Similarly to functions, predicates such as less-than and BETWEEN
and LIKE are implemented using the generalized table mechanism.
For example the "x BETWEEN y AND z" predicate is implemented
as a generalized table "containing" all possible""")
pre("""
[ARG0=>a, ARG1=>b, ARG2=>c]""")
p("""where b<a<c. Furthermore joins with this table are not
permitted unless all three arguments are available in the sequence
of input substitutions.""")
header("Some Gadfly extension interfaces")
p("""A gadfly database engine may be extended with user defined
functions, predicates, and alternative table and index implementations.
This section snapshots several Gadfly 2.0 interfaces, currently under
development and likely to change before the package is released.""")
p("""The basic interface for adding functions and predicates (logical tests)
to a gadfly engine are relatively straightforward. For example to add the
ability to match a regular expression within a gadfly query use the
following implementation.""")
pre("""
from re import match
def addrematch(gadflyinstance):
gadflyinstance.add_predicate("rematch", match)
""")
p("""
Then upon connecting to the database execute
""")
pre("""
g = gadfly(...)
...
addrematch(g)
""")
p("""
In this case the "semijoin operation" associated with the new predicate
"rematch" is automatically generated, and after the add_predicate
binding operation the gadfly instance supports queries such as""")
pre("""
select drinker, beer
from likes
where rematch('b*', beer) and drinker not in
(select drinker from frequents where rematch('c*', bar))
""")
p("""
By embedding the "rematch" operation within the query the SQL
engine can do "more work" for the programmer and reduce or eliminate the
need to process the query result externally to the engine.
""")
p("""
In a similar manner functions may be added to a gadfly instance,""")
pre("""
def modulo(x,y):
return x % y
def addmodulo(gadflyinstance):
gadflyinstance.add_function("modulo", modulo)
...
g = gadfly(...)
...
addmodulo(g)
""")
p("""
Then after the binding the modulo function can be used whereever
an SQL expression can occur.
""")
p("""
Adding alternative table implementations to a Gadfly instance
is more interesting and more difficult. An "extension table" implementation
must conform to the following interface:""")
pre("""
# get the kjbuckets.kjSet set of attribute names for this table
names = table.attributes()
# estimate the difficulty of evaluating a join given known attributes
# return None for "impossible" or n>=0 otherwise with larger values
# indicating greater difficulty or expense
estimate = table.estimate(known_attributes)
# return the join of the rows of the table with
# the list of kjbuckets.kjDict mappings as a list of mappings.
resultmappings = table.join(listofmappings)
""")
p("""
In this case add the table to a gadfly instance using""")
pre("""
gadflyinstance.add_table("table_name", table)
""")
p("""
For example to add a table which automatically queries filenames
in the filesystems of the host computer a gadfly instance could
be augmented with a GLOB table implemented using the standard
library function glob.glob as follows:""")
pre("""
import kjbuckets
class GlobTable:
def __init__(self): pass
def attributes(self):
return kjbuckets.kjSet("PATTERN", "NAME")
def estimate(self, known_attributes):
if known_attributes.member("PATTERN"):
return 66 # join not too difficult
else:
return None # join is impossible (must have PATTERN)
def join(self, listofmappings):
from glob import glob
result = []
for m in listofmappings:
pattern = m["PATTERN"]
for name in glob(pattern):
newmapping = kjbuckets.kjDict(m)
newmapping["NAME"] = name
if newmapping.Clean():
result.append(newmapping)
return result
...
gadfly_instance.add_table("GLOB", GlobTable())
""")
p("""
Then one could formulate queries such as "list the files in directories
associated with packages installed by guido"
""")
pre("""
select g.name as filename
from packages p, glob g
where p.installer = 'guido' and g.pattern=p.root_directory
""")
p("""
Note that conceptually the GLOB table is an infinite table including
all filenames on the current computer in the "NAME" column, paired with
a potentially infinite number of patterns.
""")
p("""
More interesting examples would allow queries to remotely access
data served by an HTTP server, or from any other resource.
""")
p("""
Furthermore an extension table can be augmented with update methods
""")
pre("""
table.insert_rows(listofmappings)
table.update_rows(oldlist, newlist)
table.delete_rows(oldlist)
""")
p("""
Note: at present the implementation does not enforce recovery or
transaction semantics for updates to extension tables, although this
may change in the final release.
""")
p("""
The table implementation is free to provide its own implementations of
indices which take advantage of data provided by the join argument.
""")
header("Efficiency Notes")
p("""The following thought experiment attempts to explain why the
Gadfly implementation is surprisingly fast considering that it
is almost entirely implemented in Python (an interpreted programming
language which is not especially fast when compared to alternatives).
Although Gadfly is quite complex, at an abstract level the process
of query evaluation boils down to a series of embedded loops.
Consider the following nested loops:""")
pre("""
iterate 1000:
f(...) # fixed cost of outer loop
iterate 10:
g(...) # fixed cost of middle loop
iterate 10:
# the real work (string parse, matrix mul, query eval...)
h(...)""")
p("""In my experience many computations follow this pattern where
f, g, are complex, dynamic, special purpose and h is simple, general
purpose, static. Some example computations that follow this pattern
include: file massaging (perl), matrix manipulation (python, tcl),
database/cgi page generation, and vector graphics/imaging.""")
p("""Suppose implementing f, g, h in python is easy but result in
execution times10 times slower than a much harder implementation
in C, choosing arbitrary and debatable numbers assume each function
call consumes 1 tick in C, 5 ticks in java, 10 ticks in python
for a straightforward implementation of each function f, g, and
h. Under these conditions we get the following cost analysis,
eliminating some uninteresting combinations, of implementing the
function f, g, and h in combinations of Python, C and java:""")
pre("""
COST | FLANG | GLANG | HLANG
==================================
111000 | C | C | C
115000 | java | C | C
120000 | python | C | C
155000 | java | java | C
210000 | python | python | C
555000 | java | java | java
560000 | python | java | java
610000 | python | python | java
1110000 | python | python | python
""")
p("""Note that moving only the innermost loop to C (python/python/C)
speeds up the calculation by half an order of magnitude compared
to the python-only implementation and brings the speed to within
a factor of 2 of an implementation done entirely in C.""")
p("""Although this artificial and contrived thought experiment is
far from conclusive, we may be tempted to draw the conclusion
that generally programmers should focus first on obtaining a working
implementation (because as John Ousterhout is reported to have
said "the biggest performance improvement is the transition
from non-working to working") using the methodology that
is most likely to obtain a working solution the quickest (Python). Only then if the performance
is inadequate should the programmer focus on optimizing
the inner most loops, perhaps moving them to a very efficient
implementation (C). Optimizing the outer loops will buy little
improvement, and should be done later, if ever.""")
p("""This was precisely the strategy behind the gadfly implementations,
where most of the inner loops are implemented in the kjbuckets
C extension module and the higher level logic is all in Python.
This also explains why gadfly appears to be "slower"
for simple queries over small data sets, but seems to be relatively
"faster" for more complex queries over larger data sets,
since larger queries and data sets take better advantage of the
optimized inner loops.""")
header("A Gadfly variant for OLAP?")
p("""In private correspondence Andy Robinson points out that the
basic logical design underlying Gadfly could be adapted to provide
Online Analytical Processing (OLAP) and other forms of data warehousing
and data mining. Since SQL is not particularly well suited for
the kinds of requests common in these domains the higher level
interfaces would require modification, but the underlying logic
of substitutions and name mappings seems to be appropriate.""")
header("Conclusion")
p("""The revamped query engine design in Gadfly 2 supports
a flexible and general extension methodology that permits programmers
to extend the gadfly engine to include additional computations
and access to remote data sources. Among other possibilities this
will permit the gadfly engine to make use of disk based indexed
tables and to dynamically retrieve information from remote data
sources (such as an Excel spreadsheet or an Oracle database).
These features will make gadfly a very useful tool for data manipulation
and integration.""")
header("References")
p("""[Van Rossum] Van Rossum, Python Reference Manual, Tutorial, and Library Manuals,
please look to http://www.python.org
for the latest versions, downloads and links to printed versions.""")
p("""[O'Neill] O'Neill, P., Data Base Principles, Programming, Performance,
Morgan Kaufmann Publishers, San Francisco, 1994.""")
p("""[Korth and Silberschatz] Korth, H. and Silberschatz, A. and Sudarshan, S.
Data Base System Concepts, McGraw-Hill Series in Computer Science, Boston,
1997""")
p("""[Gadfly]Gadfly: SQL Relational Database in Python,
http://www.chordate.com/kwParsing/gadfly.html""")
go()
|
py | 1a315cbec3771fe0a322a08c77a7128239c1aea9 | """
@ProjectName: DXY-2019-nCov-Crawler
@FileName: crawler.py
@Author: Jiabao Lin
@Date: 2020/1/21
"""
from bs4 import BeautifulSoup
from service.db import DB
from service.nameMap import country_type_map, city_name_map, country_name_map, continent_name_map
import re
import json
import time
import logging
import datetime
import requests
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(message)s')
logger = logging.getLogger(__name__)
headers = {
'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.117 Safari/537.36'
}
class Crawler:
def __init__(self):
self.session = requests.session()
self.session.headers.update(headers)
self.db = DB()
self.crawl_timestamp = int()
def run(self):
while True:
self.crawler()
time.sleep(60)
def crawler(self):
while True:
self.crawl_timestamp = int(datetime.datetime.timestamp(datetime.datetime.now()) * 1000)
try:
r = self.session.get(url='https://3g.dxy.cn/newh5/view/pneumonia')
except requests.exceptions.ChunkedEncodingError:
continue
soup = BeautifulSoup(r.content, 'lxml')
overall_information = re.search(r'\{("id".*?)\]\}', str(soup.find('script', attrs={'id': 'getStatisticsService'})))
province_information = re.search(r'\[(.*?)\]', str(soup.find('script', attrs={'id': 'getListByCountryTypeService1'})))
area_information = re.search(r'\[(.*)\]', str(soup.find('script', attrs={'id': 'getAreaStat'})))
abroad_information = re.search(r'\[(.*)\]', str(soup.find('script', attrs={'id': 'getListByCountryTypeService2'})))
news = re.search(r'\[(.*?)\]', str(soup.find('script', attrs={'id': 'getTimelineService'})))
if not overall_information or not province_information or not area_information or not news:
continue
self.overall_parser(overall_information=overall_information)
self.province_parser(province_information=province_information)
self.area_parser(area_information=area_information)
self.abroad_parser(abroad_information=abroad_information)
self.news_parser(news=news)
break
while True:
self.crawl_timestamp = int(datetime.datetime.timestamp(datetime.datetime.now()) * 1000)
try:
r = self.session.get(url='https://file1.dxycdn.com/2020/0127/797/3393185293879908067-115.json')
except requests.exceptions.ChunkedEncodingError:
continue
# Use try-except to ensure the .json() method will not raise exception.
try:
if r.status_code != 200:
continue
elif r.json().get('code') == 'success':
self.rumor_parser(rumors=r.json().get('data'))
break
else:
continue
except json.decoder.JSONDecodeError:
continue
logger.info('Successfully crawled.')
def overall_parser(self, overall_information):
overall_information = json.loads(overall_information.group(0))
overall_information.pop('id')
overall_information.pop('createTime')
overall_information.pop('modifyTime')
overall_information.pop('imgUrl')
overall_information.pop('deleted')
overall_information['countRemark'] = overall_information['countRemark'].replace(' 疑似', ',疑似').replace(' 治愈', ',治愈').replace(' 死亡', ',死亡').replace(' ', '')
if not self.db.find_one(collection='DXYOverall', data=overall_information):
overall_information['updateTime'] = self.crawl_timestamp
self.db.insert(collection='DXYOverall', data=overall_information)
def province_parser(self, province_information):
provinces = json.loads(province_information.group(0))
for province in provinces:
province.pop('id')
province.pop('tags')
province.pop('sort')
province['comment'] = province['comment'].replace(' ', '')
if self.db.find_one(collection='DXYProvince', data=province):
continue
province['provinceEnglishName'] = city_name_map[province['provinceShortName']]['engName']
province['crawlTime'] = self.crawl_timestamp
province['country'] = country_type_map.get(province['countryType'])
self.db.insert(collection='DXYProvince', data=province)
def area_parser(self, area_information):
area_information = json.loads(area_information.group(0))
for area in area_information:
area['comment'] = area['comment'].replace(' ', '')
# Because the cities are given other attributes,
# this part should not be used when checking the identical document.
cities_backup = area.pop('cities')
if self.db.find_one(collection='DXYArea', data=area):
continue
# If this document is not in current database, insert this attribute back to the document.
area['cities'] = cities_backup
area['countryName'] = '中国'
area['countryEnglishName'] = 'China'
area['continentName'] = '亚洲'
area['continentEnglishName'] = 'Asia'
area['provinceEnglishName'] = city_name_map[area['provinceShortName']]['engName']
for city in area['cities']:
if city['cityName'] != '待明确地区':
try:
city['cityEnglishName'] = city_name_map[area['provinceShortName']]['cities'][city['cityName']]
except KeyError:
print(area['provinceShortName'], city['cityName'])
pass
else:
city['cityEnglishName'] = 'Area not defined'
area['updateTime'] = self.crawl_timestamp
self.db.insert(collection='DXYArea', data=area)
def abroad_parser(self, abroad_information):
countries = json.loads(abroad_information.group(0))
for country in countries:
country.pop('id')
country.pop('tags')
country.pop('countryType')
country.pop('provinceId')
country.pop('cityName')
country.pop('sort')
# The original provinceShortName are blank string
country.pop('provinceShortName')
# Rename the key continents to continentName
country['continentName'] = country.pop('continents')
# Ding Xiang Yuan have a large number of duplicates,
# values are all the same, but the modifyTime are different.
# I suppose the modifyTime is modification time for all documents, other than for only this document.
# So this field will be popped out.
country.pop('modifyTime')
# createTime is also different even if the values are same.
# Originally, the createTime represent the first diagnosis of the virus in this area,
# but it seems different for abroad information.
country.pop('createTime')
country['comment'] = country['comment'].replace(' ', '')
if self.db.find_one(collection='DXYArea', data=country):
continue
country['countryName'] = country.get('provinceName')
country['provinceShortName'] = country.get('provinceName')
country['continentEnglishName'] = continent_name_map.get(country['continentName'])
country['countryEnglishName'] = country_name_map.get(country['countryName'])
country['provinceEnglishName'] = country_name_map.get(country['countryName'])
country['updateTime'] = self.crawl_timestamp
self.db.insert(collection='DXYArea', data=country)
def news_parser(self, news):
news = json.loads(news.group(0))
for _news in news:
_news.pop('pubDateStr')
if self.db.find_one(collection='DXYNews', data=_news):
continue
_news['crawlTime'] = self.crawl_timestamp
self.db.insert(collection='DXYNews', data=_news)
def rumor_parser(self, rumors):
for rumor in rumors:
rumor.pop('score')
rumor['body'] = rumor['body'].replace(' ', '')
if self.db.find_one(collection='DXYRumors', data=rumor):
continue
rumor['crawlTime'] = self.crawl_timestamp
self.db.insert(collection='DXYRumors', data=rumor)
if __name__ == '__main__':
crawler = Crawler()
crawler.run()
|
py | 1a315df1dc16a6978b63da71425ab2cc5ba80b5b | """ This is KAMINARIO-FLOCKER-DRIVER Module docstring """
from flocker import node
from kaminario_flocker_driver.k2_blockdevice_api \
import instantiate_driver_instance
from kaminario_flocker_driver.constants import DRIVER_NAME
def api_factory(cluster_id, **kwargs):
"""Entry point for Flocker to load driver instance."""
kwargs['cluster_id'] = cluster_id
return instantiate_driver_instance(
**kwargs)
FLOCKER_BACKEND = node.BackendDescription(
name=DRIVER_NAME,
needs_reactor=False,
needs_cluster_id=True,
api_factory=api_factory,
deployer_type=node.DeployerType.block)
|
py | 1a315eaa70d275cd9be10274b84a6afcb708c220 | # coding: utf-8
"""
DLRN API
DLRN API client
OpenAPI spec version: 1.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
class Params(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, max_age=None, success=None, job_id=None,
sequential_mode=None, previous_job_id=None, component=None):
"""Params - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'max_age': 'int',
'success': 'bool',
'job_id': 'str',
'sequential_mode': 'bool',
'previous_job_id': 'str',
'component': 'str'
}
self.attribute_map = {
'max_age': 'max_age',
'success': 'success',
'job_id': 'job_id',
'sequential_mode': 'sequential_mode',
'previous_job_id': 'previous_job_id',
'component': 'component'
}
self._max_age = max_age
self._success = success
self._job_id = job_id
self._sequential_mode = sequential_mode
self._previous_job_id = previous_job_id
self._component = component
@property
def max_age(self):
"""Gets the max_age of this Params.
Maximum age (in hours) for the repo to be considered.
Any repo tested or being tested after \"now - max_age\" will be taken
into account. If set to 0, all repos will be considered.
:return: The max_age of this Params.
:rtype: int
"""
return self._max_age
@max_age.setter
def max_age(self, max_age):
"""Sets the max_age of this Params.
Maximum age (in hours) for the repo to be considered.
Any repo tested or being tested after \"now - max_age\" will be taken
into account. If set to 0, all repos will be considered.
:param max_age: The max_age of this Params.
:type: int
"""
if max_age is None:
raise ValueError("Invalid value for `max_age`, must not be `None`")
if max_age is not None and max_age < 0:
raise ValueError("Invalid value for `max_age`, must be a value"
" greater than or equal to `0`")
self._max_age = max_age
@property
def success(self):
"""Gets the success of this Params.
If set to a value, find repos with a successful/unsuccessful vote
(as specified). If not set, any tested repo will be considered.
:return: The success of this Params.
:rtype: bool
"""
return self._success
@success.setter
def success(self, success):
"""Sets the success of this Params.
If set to a value, find repos with a successful/unsuccessful vote
(as specified). If not set, any tested repo will be considered.
:param success: The success of this Params.
:type: bool
"""
self._success = success
@property
def job_id(self):
"""Gets the job_id of this Params.
Name of the CI that sent the vote. If not set, no filter will be set
on CI.
:return: The job_id of this Params.
:rtype: str
"""
return self._job_id
@job_id.setter
def job_id(self, job_id):
"""Sets the job_id of this Params.
Name of the CI that sent the vote. If not set, no filter will be set
on CI.
:param job_id: The job_id of this Params.
:type: str
"""
self._job_id = job_id
@property
def sequential_mode(self):
"""Gets the sequential_mode of this Params.
Use the sequential mode algorithm. In this case, return the last tested
repo within that timeframe for the CI job described by previous_job_id.
Defaults to false.
:return: The sequential_mode of this Params.
:rtype: bool
"""
return self._sequential_mode
@sequential_mode.setter
def sequential_mode(self, sequential_mode):
"""Sets the sequential_mode of this Params.
Use the sequential mode algorithm. In this case, return the last tested
repo within that timeframe for the CI job described by previous_job_id.
Defaults to false.
:param sequential_mode: The sequential_mode of this Params.
:type: bool
"""
self._sequential_mode = sequential_mode
@property
def previous_job_id(self):
"""Gets the previous_job_id of this Params.
If sequential_mode is set to true, look for jobs tested by the CI
identified by previous_job_id.
:return: The previous_job_id of this Params.
:rtype: str
"""
return self._previous_job_id
@previous_job_id.setter
def previous_job_id(self, previous_job_id):
"""Sets the previous_job_id of this Params.
If sequential_mode is set to true, look for jobs tested by the CI
identified by previous_job_id.
:param previous_job_id: The previous_job_id of this Params.
:type: str
"""
self._previous_job_id = previous_job_id
@property
def component(self):
"""Gets the component of this Params.
additional notes
:return: The component of this Params.
:rtype: str
"""
return self._component
@component.setter
def component(self, component):
"""Sets the component of this Params.
additional notes
:param component: The component of this Params.
:type: str
"""
self._component = component
def to_dict(self):
"""Returns the model properties as a dict """
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model """
return pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint` """
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal """
if not isinstance(other, Params):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal """
return not self == other
|
py | 1a315ee26acf15e58a7094403a317435421fd56c | # -*- coding: utf-8 -*-
"""
test_invalid_headers.py
~~~~~~~~~~~~~~~~~~~~~~~
This module contains tests that use invalid header blocks, and validates that
they fail appropriately.
"""
import itertools
import pytest
import h2.config
import h2.connection
import h2.errors
import h2.events
import h2.exceptions
import h2.settings
import h2.utilities
import hyperframe.frame
from hypothesis import given
from hypothesis.strategies import binary, lists, tuples
HEADERS_STRATEGY = lists(tuples(binary(min_size=1), binary()))
class TestInvalidFrameSequences(object):
"""
Invalid header sequences cause ProtocolErrors to be thrown when received.
"""
base_request_headers = [
(':authority', 'example.com'),
(':path', '/'),
(':scheme', 'https'),
(':method', 'GET'),
('user-agent', 'someua/0.0.1'),
]
invalid_header_blocks = [
base_request_headers + [('Uppercase', 'name')],
base_request_headers + [(':late', 'pseudo-header')],
[(':path', 'duplicate-pseudo-header')] + base_request_headers,
base_request_headers + [('connection', 'close')],
base_request_headers + [('proxy-connection', 'close')],
base_request_headers + [('keep-alive', 'close')],
base_request_headers + [('transfer-encoding', 'gzip')],
base_request_headers + [('upgrade', 'super-protocol/1.1')],
base_request_headers + [('te', 'chunked')],
base_request_headers + [('host', 'notexample.com')],
base_request_headers + [(' name', 'name with leading space')],
base_request_headers + [('name ', 'name with trailing space')],
base_request_headers + [('name', ' value with leading space')],
base_request_headers + [('name', 'value with trailing space ')],
[header for header in base_request_headers
if header[0] != ':authority'],
]
server_config = h2.config.H2Configuration(
client_side=False, header_encoding='utf-8'
)
@pytest.mark.parametrize('headers', invalid_header_blocks)
def test_headers_event(self, frame_factory, headers):
"""
Test invalid headers are rejected with PROTOCOL_ERROR.
"""
c = h2.connection.H2Connection(config=self.server_config)
c.receive_data(frame_factory.preamble())
c.clear_outbound_data_buffer()
f = frame_factory.build_headers_frame(headers)
data = f.serialize()
with pytest.raises(h2.exceptions.ProtocolError):
c.receive_data(data)
expected_frame = frame_factory.build_goaway_frame(
last_stream_id=1, error_code=h2.errors.ErrorCodes.PROTOCOL_ERROR
)
assert c.data_to_send() == expected_frame.serialize()
@pytest.mark.parametrize('headers', invalid_header_blocks)
def test_push_promise_event(self, frame_factory, headers):
"""
If a PUSH_PROMISE header frame is received with an invalid header block
it is rejected with a PROTOCOL_ERROR.
"""
c = h2.connection.H2Connection()
c.initiate_connection()
c.send_headers(
stream_id=1, headers=self.base_request_headers, end_stream=True
)
c.clear_outbound_data_buffer()
f = frame_factory.build_push_promise_frame(
stream_id=1,
promised_stream_id=2,
headers=headers
)
data = f.serialize()
with pytest.raises(h2.exceptions.ProtocolError):
c.receive_data(data)
expected_frame = frame_factory.build_goaway_frame(
last_stream_id=0, error_code=h2.errors.ErrorCodes.PROTOCOL_ERROR
)
assert c.data_to_send() == expected_frame.serialize()
@pytest.mark.parametrize('headers', invalid_header_blocks)
def test_push_promise_skipping_validation(self, frame_factory, headers):
"""
If we have ``validate_inbound_headers`` disabled, then invalid header
blocks in push promise frames are allowed to pass.
"""
config = h2.config.H2Configuration(
client_side=True,
validate_inbound_headers=False,
header_encoding='utf-8'
)
c = h2.connection.H2Connection(config=config)
c.initiate_connection()
c.send_headers(
stream_id=1, headers=self.base_request_headers, end_stream=True
)
c.clear_outbound_data_buffer()
f = frame_factory.build_push_promise_frame(
stream_id=1,
promised_stream_id=2,
headers=headers
)
data = f.serialize()
events = c.receive_data(data)
assert len(events) == 1
pp_event = events[0]
assert pp_event.headers == headers
@pytest.mark.parametrize('headers', invalid_header_blocks)
def test_headers_event_skipping_validation(self, frame_factory, headers):
"""
If we have ``validate_inbound_headers`` disabled, then all of these
invalid header blocks are allowed to pass.
"""
config = h2.config.H2Configuration(
client_side=False,
validate_inbound_headers=False,
header_encoding='utf-8'
)
c = h2.connection.H2Connection(config=config)
c.receive_data(frame_factory.preamble())
f = frame_factory.build_headers_frame(headers)
data = f.serialize()
events = c.receive_data(data)
assert len(events) == 1
request_event = events[0]
assert request_event.headers == headers
def test_transfer_encoding_trailers_is_valid(self, frame_factory):
"""
Transfer-Encoding trailers is allowed by the filter.
"""
headers = (
self.base_request_headers + [('te', 'trailers')]
)
c = h2.connection.H2Connection(config=self.server_config)
c.receive_data(frame_factory.preamble())
f = frame_factory.build_headers_frame(headers)
data = f.serialize()
events = c.receive_data(data)
assert len(events) == 1
request_event = events[0]
assert request_event.headers == headers
def test_pseudo_headers_rejected_in_trailer(self, frame_factory):
"""
Ensure we reject pseudo headers included in trailers
"""
trailers = [(':path', '/'), ('extra', 'value')]
c = h2.connection.H2Connection(config=self.server_config)
c.receive_data(frame_factory.preamble())
c.clear_outbound_data_buffer()
header_frame = frame_factory.build_headers_frame(
self.base_request_headers
)
trailer_frame = frame_factory.build_headers_frame(
trailers, flags=["END_STREAM"]
)
head = header_frame.serialize()
trailer = trailer_frame.serialize()
c.receive_data(head)
# Raise exception if pseudo header in trailer
with pytest.raises(h2.exceptions.ProtocolError) as e:
c.receive_data(trailer)
assert "pseudo-header in trailer" in str(e)
# Test appropriate response frame is generated
expected_frame = frame_factory.build_goaway_frame(
last_stream_id=1, error_code=h2.errors.ErrorCodes.PROTOCOL_ERROR
)
assert c.data_to_send() == expected_frame.serialize()
class TestSendingInvalidFrameSequences(object):
"""
Trying to send invalid header sequences cause ProtocolErrors to
be thrown.
"""
base_request_headers = [
(':authority', 'example.com'),
(':path', '/'),
(':scheme', 'https'),
(':method', 'GET'),
('user-agent', 'someua/0.0.1'),
]
invalid_header_blocks = [
base_request_headers + [(':late', 'pseudo-header')],
[(':path', 'duplicate-pseudo-header')] + base_request_headers,
base_request_headers + [('te', 'chunked')],
base_request_headers + [('host', 'notexample.com')],
[header for header in base_request_headers
if header[0] != ':authority'],
]
strippable_header_blocks = [
base_request_headers + [('connection', 'close')],
base_request_headers + [('proxy-connection', 'close')],
base_request_headers + [('keep-alive', 'close')],
base_request_headers + [('transfer-encoding', 'gzip')],
base_request_headers + [('upgrade', 'super-protocol/1.1')]
]
all_header_blocks = invalid_header_blocks + strippable_header_blocks
server_config = h2.config.H2Configuration(client_side=False)
@pytest.mark.parametrize('headers', invalid_header_blocks)
def test_headers_event(self, frame_factory, headers):
"""
Test sending invalid headers raise a ProtocolError.
"""
c = h2.connection.H2Connection()
c.initiate_connection()
# Clear the data, then try to send headers.
c.clear_outbound_data_buffer()
with pytest.raises(h2.exceptions.ProtocolError):
c.send_headers(1, headers)
@pytest.mark.parametrize('headers', invalid_header_blocks)
def test_send_push_promise(self, frame_factory, headers):
"""
Sending invalid headers in a push promise raises a ProtocolError.
"""
c = h2.connection.H2Connection(config=self.server_config)
c.initiate_connection()
c.receive_data(frame_factory.preamble())
header_frame = frame_factory.build_headers_frame(
self.base_request_headers
)
c.receive_data(header_frame.serialize())
# Clear the data, then try to send a push promise.
c.clear_outbound_data_buffer()
with pytest.raises(h2.exceptions.ProtocolError):
c.push_stream(
stream_id=1, promised_stream_id=2, request_headers=headers
)
@pytest.mark.parametrize('headers', all_header_blocks)
def test_headers_event_skipping_validation(self, frame_factory, headers):
"""
If we have ``validate_outbound_headers`` disabled, then all of these
invalid header blocks are allowed to pass.
"""
config = h2.config.H2Configuration(
validate_outbound_headers=False
)
c = h2.connection.H2Connection(config=config)
c.initiate_connection()
# Clear the data, then send headers.
c.clear_outbound_data_buffer()
c.send_headers(1, headers)
# Ensure headers are still normalized.
norm_headers = h2.utilities.normalize_outbound_headers(headers, None)
f = frame_factory.build_headers_frame(norm_headers)
assert c.data_to_send() == f.serialize()
@pytest.mark.parametrize('headers', all_header_blocks)
def test_push_promise_skipping_validation(self, frame_factory, headers):
"""
If we have ``validate_outbound_headers`` disabled, then all of these
invalid header blocks are allowed to pass.
"""
config = h2.config.H2Configuration(
client_side=False,
validate_outbound_headers=False,
)
c = h2.connection.H2Connection(config=config)
c.initiate_connection()
c.receive_data(frame_factory.preamble())
header_frame = frame_factory.build_headers_frame(
self.base_request_headers
)
c.receive_data(header_frame.serialize())
# Create push promise frame with normalized headers.
frame_factory.refresh_encoder()
norm_headers = h2.utilities.normalize_outbound_headers(headers, None)
pp_frame = frame_factory.build_push_promise_frame(
stream_id=1, promised_stream_id=2, headers=norm_headers
)
# Clear the data, then send a push promise.
c.clear_outbound_data_buffer()
c.push_stream(
stream_id=1, promised_stream_id=2, request_headers=headers
)
assert c.data_to_send() == pp_frame.serialize()
@pytest.mark.parametrize('headers', all_header_blocks)
def test_headers_event_skip_normalization(self, frame_factory, headers):
"""
If we have ``normalize_outbound_headers`` disabled, then all of these
invalid header blocks are sent through unmodified.
"""
config = h2.config.H2Configuration(
validate_outbound_headers=False,
normalize_outbound_headers=False
)
c = h2.connection.H2Connection(config=config)
c.initiate_connection()
f = frame_factory.build_headers_frame(
headers,
stream_id=1,
)
# Clear the data, then send headers.
c.clear_outbound_data_buffer()
c.send_headers(1, headers)
assert c.data_to_send() == f.serialize()
@pytest.mark.parametrize('headers', all_header_blocks)
def test_push_promise_skip_normalization(self, frame_factory, headers):
"""
If we have ``normalize_outbound_headers`` disabled, then all of these
invalid header blocks are allowed to pass unmodified.
"""
config = h2.config.H2Configuration(
client_side=False,
validate_outbound_headers=False,
normalize_outbound_headers=False,
)
c = h2.connection.H2Connection(config=config)
c.initiate_connection()
c.receive_data(frame_factory.preamble())
header_frame = frame_factory.build_headers_frame(
self.base_request_headers
)
c.receive_data(header_frame.serialize())
frame_factory.refresh_encoder()
pp_frame = frame_factory.build_push_promise_frame(
stream_id=1, promised_stream_id=2, headers=headers
)
# Clear the data, then send a push promise.
c.clear_outbound_data_buffer()
c.push_stream(
stream_id=1, promised_stream_id=2, request_headers=headers
)
assert c.data_to_send() == pp_frame.serialize()
@pytest.mark.parametrize('headers', strippable_header_blocks)
def test_strippable_headers(self, frame_factory, headers):
"""
Test connection related headers are removed before sending.
"""
c = h2.connection.H2Connection()
c.initiate_connection()
# Clear the data, then try to send headers.
c.clear_outbound_data_buffer()
c.send_headers(1, headers)
f = frame_factory.build_headers_frame(self.base_request_headers)
assert c.data_to_send() == f.serialize()
class TestFilter(object):
"""
Test the filter function directly.
These tests exists to confirm the behaviour of the filter function in a
wide range of scenarios. Many of these scenarios may not be legal for
HTTP/2 and so may never hit the function, but it's worth validating that it
behaves as expected anyway.
"""
validation_functions = [
h2.utilities.validate_headers,
h2.utilities.validate_outbound_headers
]
hdr_validation_combos = [
h2.utilities.HeaderValidationFlags(
is_client, is_trailer, is_response_header, is_push_promise
)
for is_client, is_trailer, is_response_header, is_push_promise in (
itertools.product([True, False], repeat=4)
)
]
hdr_validation_response_headers = [
flags for flags in hdr_validation_combos
if flags.is_response_header
]
hdr_validation_request_headers_no_trailer = [
flags for flags in hdr_validation_combos
if not (flags.is_trailer or flags.is_response_header)
]
invalid_request_header_blocks_bytes = (
# First, missing :method
(
(b':authority', b'google.com'),
(b':path', b'/'),
(b':scheme', b'https'),
),
# Next, missing :path
(
(b':authority', b'google.com'),
(b':method', b'GET'),
(b':scheme', b'https'),
),
# Next, missing :scheme
(
(b':authority', b'google.com'),
(b':method', b'GET'),
(b':path', b'/'),
),
# Finally, path present but empty.
(
(b':authority', b'google.com'),
(b':method', b'GET'),
(b':scheme', b'https'),
(b':path', b''),
),
)
invalid_request_header_blocks_unicode = (
# First, missing :method
(
(u':authority', u'google.com'),
(u':path', u'/'),
(u':scheme', u'https'),
),
# Next, missing :path
(
(u':authority', u'google.com'),
(u':method', u'GET'),
(u':scheme', u'https'),
),
# Next, missing :scheme
(
(u':authority', u'google.com'),
(u':method', u'GET'),
(u':path', u'/'),
),
# Finally, path present but empty.
(
(u':authority', u'google.com'),
(u':method', u'GET'),
(u':scheme', u'https'),
(u':path', u''),
),
)
# All headers that are forbidden from either request or response blocks.
forbidden_request_headers_bytes = (b':status',)
forbidden_request_headers_unicode = (u':status',)
forbidden_response_headers_bytes = (
b':path', b':scheme', b':authority', b':method'
)
forbidden_response_headers_unicode = (
u':path', u':scheme', u':authority', u':method'
)
@pytest.mark.parametrize('validation_function', validation_functions)
@pytest.mark.parametrize('hdr_validation_flags', hdr_validation_combos)
@given(headers=HEADERS_STRATEGY)
def test_range_of_acceptable_outputs(self,
headers,
validation_function,
hdr_validation_flags):
"""
The header validation functions either return the data unchanged
or throw a ProtocolError.
"""
try:
assert headers == list(validation_function(
headers, hdr_validation_flags))
except h2.exceptions.ProtocolError:
assert True
@pytest.mark.parametrize('hdr_validation_flags', hdr_validation_combos)
def test_invalid_pseudo_headers(self, hdr_validation_flags):
headers = [(b':custom', b'value')]
with pytest.raises(h2.exceptions.ProtocolError):
list(h2.utilities.validate_headers(headers, hdr_validation_flags))
@pytest.mark.parametrize('validation_function', validation_functions)
@pytest.mark.parametrize(
'hdr_validation_flags', hdr_validation_request_headers_no_trailer
)
def test_matching_authority_host_headers(self,
validation_function,
hdr_validation_flags):
"""
If a header block has :authority and Host headers and they match,
the headers should pass through unchanged.
"""
headers = [
(b':authority', b'example.com'),
(b':path', b'/'),
(b':scheme', b'https'),
(b':method', b'GET'),
(b'host', b'example.com'),
]
assert headers == list(h2.utilities.validate_headers(
headers, hdr_validation_flags
))
@pytest.mark.parametrize(
'hdr_validation_flags', hdr_validation_response_headers
)
def test_response_header_without_status(self, hdr_validation_flags):
headers = [(b'content-length', b'42')]
with pytest.raises(h2.exceptions.ProtocolError):
list(h2.utilities.validate_headers(headers, hdr_validation_flags))
@pytest.mark.parametrize(
'hdr_validation_flags', hdr_validation_request_headers_no_trailer
)
@pytest.mark.parametrize(
'header_block',
(
invalid_request_header_blocks_bytes +
invalid_request_header_blocks_unicode
)
)
def test_outbound_req_header_missing_pseudo_headers(self,
hdr_validation_flags,
header_block):
with pytest.raises(h2.exceptions.ProtocolError):
list(
h2.utilities.validate_outbound_headers(
header_block, hdr_validation_flags
)
)
@pytest.mark.parametrize(
'hdr_validation_flags', hdr_validation_request_headers_no_trailer
)
@pytest.mark.parametrize(
'header_block', invalid_request_header_blocks_bytes
)
def test_inbound_req_header_missing_pseudo_headers(self,
hdr_validation_flags,
header_block):
with pytest.raises(h2.exceptions.ProtocolError):
list(
h2.utilities.validate_headers(
header_block, hdr_validation_flags
)
)
@pytest.mark.parametrize(
'hdr_validation_flags', hdr_validation_request_headers_no_trailer
)
@pytest.mark.parametrize(
'invalid_header',
forbidden_request_headers_bytes + forbidden_request_headers_unicode
)
def test_outbound_req_header_extra_pseudo_headers(self,
hdr_validation_flags,
invalid_header):
"""
Outbound request header blocks containing the forbidden request headers
fail validation.
"""
headers = [
(b':path', b'/'),
(b':scheme', b'https'),
(b':authority', b'google.com'),
(b':method', b'GET'),
]
headers.append((invalid_header, b'some value'))
with pytest.raises(h2.exceptions.ProtocolError):
list(
h2.utilities.validate_outbound_headers(
headers, hdr_validation_flags
)
)
@pytest.mark.parametrize(
'hdr_validation_flags', hdr_validation_request_headers_no_trailer
)
@pytest.mark.parametrize(
'invalid_header',
forbidden_request_headers_bytes
)
def test_inbound_req_header_extra_pseudo_headers(self,
hdr_validation_flags,
invalid_header):
"""
Inbound request header blocks containing the forbidden request headers
fail validation.
"""
headers = [
(b':path', b'/'),
(b':scheme', b'https'),
(b':authority', b'google.com'),
(b':method', b'GET'),
]
headers.append((invalid_header, b'some value'))
with pytest.raises(h2.exceptions.ProtocolError):
list(h2.utilities.validate_headers(headers, hdr_validation_flags))
@pytest.mark.parametrize(
'hdr_validation_flags', hdr_validation_response_headers
)
@pytest.mark.parametrize(
'invalid_header',
forbidden_response_headers_bytes + forbidden_response_headers_unicode
)
def test_outbound_resp_header_extra_pseudo_headers(self,
hdr_validation_flags,
invalid_header):
"""
Outbound response header blocks containing the forbidden response
headers fail validation.
"""
headers = [(b':status', b'200')]
headers.append((invalid_header, b'some value'))
with pytest.raises(h2.exceptions.ProtocolError):
list(
h2.utilities.validate_outbound_headers(
headers, hdr_validation_flags
)
)
@pytest.mark.parametrize(
'hdr_validation_flags', hdr_validation_response_headers
)
@pytest.mark.parametrize(
'invalid_header',
forbidden_response_headers_bytes
)
def test_inbound_resp_header_extra_pseudo_headers(self,
hdr_validation_flags,
invalid_header):
"""
Inbound response header blocks containing the forbidden response
headers fail validation.
"""
headers = [(b':status', b'200')]
headers.append((invalid_header, b'some value'))
with pytest.raises(h2.exceptions.ProtocolError):
list(h2.utilities.validate_headers(headers, hdr_validation_flags))
class TestOversizedHeaders(object):
"""
Tests that oversized header blocks are correctly rejected. This replicates
the "HPACK Bomb" attack, and confirms that we're resistant against it.
"""
request_header_block = [
(b':method', b'GET'),
(b':authority', b'example.com'),
(b':scheme', b'https'),
(b':path', b'/'),
]
response_header_block = [
(b':status', b'200'),
]
# The first header block contains a single header that fills the header
# table. To do that, we'll give it a single-character header name and a
# 4063 byte header value. This will make it exactly the size of the header
# table. It must come last, so that it evicts all other headers.
# This block must be appended to either a request or response block.
first_header_block = [
(b'a', b'a' * 4063),
]
# The second header "block" is actually a custom HEADERS frame body that
# simply repeatedly refers to the first entry for 16kB. Each byte has the
# high bit set (0x80), and then uses the remaining 7 bits to encode the
# number 62 (0x3e), leading to a repeat of the byte 0xbe.
second_header_block = b'\xbe' * 2**14
server_config = h2.config.H2Configuration(client_side=False)
def test_hpack_bomb_request(self, frame_factory):
"""
A HPACK bomb request causes the connection to be torn down with the
error code ENHANCE_YOUR_CALM.
"""
c = h2.connection.H2Connection(config=self.server_config)
c.receive_data(frame_factory.preamble())
c.clear_outbound_data_buffer()
f = frame_factory.build_headers_frame(
self.request_header_block + self.first_header_block
)
data = f.serialize()
c.receive_data(data)
# Build the attack payload.
attack_frame = hyperframe.frame.HeadersFrame(stream_id=3)
attack_frame.data = self.second_header_block
attack_frame.flags.add('END_HEADERS')
data = attack_frame.serialize()
with pytest.raises(h2.exceptions.DenialOfServiceError):
c.receive_data(data)
expected_frame = frame_factory.build_goaway_frame(
last_stream_id=1, error_code=h2.errors.ErrorCodes.ENHANCE_YOUR_CALM
)
assert c.data_to_send() == expected_frame.serialize()
def test_hpack_bomb_response(self, frame_factory):
"""
A HPACK bomb response causes the connection to be torn down with the
error code ENHANCE_YOUR_CALM.
"""
c = h2.connection.H2Connection()
c.initiate_connection()
c.send_headers(
stream_id=1, headers=self.request_header_block
)
c.send_headers(
stream_id=3, headers=self.request_header_block
)
c.clear_outbound_data_buffer()
f = frame_factory.build_headers_frame(
self.response_header_block + self.first_header_block
)
data = f.serialize()
c.receive_data(data)
# Build the attack payload.
attack_frame = hyperframe.frame.HeadersFrame(stream_id=3)
attack_frame.data = self.second_header_block
attack_frame.flags.add('END_HEADERS')
data = attack_frame.serialize()
with pytest.raises(h2.exceptions.DenialOfServiceError):
c.receive_data(data)
expected_frame = frame_factory.build_goaway_frame(
last_stream_id=0, error_code=h2.errors.ErrorCodes.ENHANCE_YOUR_CALM
)
assert c.data_to_send() == expected_frame.serialize()
def test_hpack_bomb_push(self, frame_factory):
"""
A HPACK bomb push causes the connection to be torn down with the
error code ENHANCE_YOUR_CALM.
"""
c = h2.connection.H2Connection()
c.initiate_connection()
c.send_headers(
stream_id=1, headers=self.request_header_block
)
c.clear_outbound_data_buffer()
f = frame_factory.build_headers_frame(
self.response_header_block + self.first_header_block
)
data = f.serialize()
c.receive_data(data)
# Build the attack payload. We need to shrink it by four bytes because
# the promised_stream_id consumes four bytes of body.
attack_frame = hyperframe.frame.PushPromiseFrame(stream_id=3)
attack_frame.promised_stream_id = 2
attack_frame.data = self.second_header_block[:-4]
attack_frame.flags.add('END_HEADERS')
data = attack_frame.serialize()
with pytest.raises(h2.exceptions.DenialOfServiceError):
c.receive_data(data)
expected_frame = frame_factory.build_goaway_frame(
last_stream_id=0, error_code=h2.errors.ErrorCodes.ENHANCE_YOUR_CALM
)
assert c.data_to_send() == expected_frame.serialize()
def test_reject_headers_when_list_size_shrunk(self, frame_factory):
"""
When we've shrunk the header list size, we reject new header blocks
that violate the new size.
"""
c = h2.connection.H2Connection(config=self.server_config)
c.receive_data(frame_factory.preamble())
c.clear_outbound_data_buffer()
# Receive the first request, which causes no problem.
f = frame_factory.build_headers_frame(
stream_id=1,
headers=self.request_header_block
)
data = f.serialize()
c.receive_data(data)
# Now, send a settings change. It's un-ACKed at this time. A new
# request arrives, also without incident.
c.update_settings({h2.settings.SettingCodes.MAX_HEADER_LIST_SIZE: 50})
c.clear_outbound_data_buffer()
f = frame_factory.build_headers_frame(
stream_id=3,
headers=self.request_header_block
)
data = f.serialize()
c.receive_data(data)
# We get a SETTINGS ACK.
f = frame_factory.build_settings_frame({}, ack=True)
data = f.serialize()
c.receive_data(data)
# Now a third request comes in. This explodes.
f = frame_factory.build_headers_frame(
stream_id=5,
headers=self.request_header_block
)
data = f.serialize()
with pytest.raises(h2.exceptions.DenialOfServiceError):
c.receive_data(data)
expected_frame = frame_factory.build_goaway_frame(
last_stream_id=3, error_code=h2.errors.ErrorCodes.ENHANCE_YOUR_CALM
)
assert c.data_to_send() == expected_frame.serialize()
def test_reject_headers_when_table_size_shrunk(self, frame_factory):
"""
When we've shrunk the header table size, we reject header blocks that
do not respect the change.
"""
c = h2.connection.H2Connection(config=self.server_config)
c.receive_data(frame_factory.preamble())
c.clear_outbound_data_buffer()
# Receive the first request, which causes no problem.
f = frame_factory.build_headers_frame(
stream_id=1,
headers=self.request_header_block
)
data = f.serialize()
c.receive_data(data)
# Now, send a settings change. It's un-ACKed at this time. A new
# request arrives, also without incident.
c.update_settings({h2.settings.SettingCodes.HEADER_TABLE_SIZE: 128})
c.clear_outbound_data_buffer()
f = frame_factory.build_headers_frame(
stream_id=3,
headers=self.request_header_block
)
data = f.serialize()
c.receive_data(data)
# We get a SETTINGS ACK.
f = frame_factory.build_settings_frame({}, ack=True)
data = f.serialize()
c.receive_data(data)
# Now a third request comes in. This explodes, as it does not contain
# a dynamic table size update.
f = frame_factory.build_headers_frame(
stream_id=5,
headers=self.request_header_block
)
data = f.serialize()
with pytest.raises(h2.exceptions.ProtocolError):
c.receive_data(data)
expected_frame = frame_factory.build_goaway_frame(
last_stream_id=3, error_code=h2.errors.ErrorCodes.PROTOCOL_ERROR
)
assert c.data_to_send() == expected_frame.serialize()
def test_reject_headers_exceeding_table_size(self, frame_factory):
"""
When the remote peer sends a dynamic table size update that exceeds our
setting, we reject it.
"""
c = h2.connection.H2Connection(config=self.server_config)
c.receive_data(frame_factory.preamble())
c.clear_outbound_data_buffer()
# Receive the first request, which causes no problem.
f = frame_factory.build_headers_frame(
stream_id=1,
headers=self.request_header_block
)
data = f.serialize()
c.receive_data(data)
# Now a second request comes in that sets the table size too high.
# This explodes.
frame_factory.change_table_size(c.local_settings.header_table_size + 1)
f = frame_factory.build_headers_frame(
stream_id=5,
headers=self.request_header_block
)
data = f.serialize()
with pytest.raises(h2.exceptions.ProtocolError):
c.receive_data(data)
expected_frame = frame_factory.build_goaway_frame(
last_stream_id=1, error_code=h2.errors.ErrorCodes.PROTOCOL_ERROR
)
assert c.data_to_send() == expected_frame.serialize()
|
py | 1a315fa5569934ac89a98118a3529c0e26445245 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @File : tests\test_model\test_resnet.py
# @Time : 2022-05-03 12:15:10
# @Author : Bingjie Yan
# @Email : [email protected]
# @License : Apache License 2.0
import torch
from torch import optim
from torch.utils.data import DataLoader
from fedhf.api import opts
from fedhf.model import build_model, build_optimizer
from fedhf.dataset import build_dataset
class TestResnet(object):
args = opts().parse([
'--model', 'resnet_mnist', '--num_classes', '10', '--model_pretrained', '--dataset',
'mnist', '--gpus', '-1', '--task', 'classification', '--resize', '--input_c', '1',
'--image_size', '224'
])
def test_resnet(self):
model = build_model(self.args.model)(self.args)
print(model)
assert model.__class__.__name__ == 'ResNetMNIST'
assert model.net.__class__.__name__ == 'ResNet'
assert model.num_classes == 10
assert model.net.fc.out_features == 10
dataset = build_dataset(self.args.dataset)(self.args)
dataloader = DataLoader(dataset.trainset, batch_size=1, shuffle=False)
model = model.to(self.args.device)
model.train()
for data, target in dataloader:
output = model(data)
assert output.shape == (1, 10)
assert output.dtype == torch.float32
assert output.device == torch.device('cpu')
break
model.save() |
py | 1a315ff0ead527ccfbe5e6757a14649d274551ed | import re
from typing import List
import numpy as np
from pandas.util._decorators import Appender, deprecate_kwarg
from pandas.core.dtypes.common import is_extension_array_dtype, is_list_like
from pandas.core.dtypes.concat import concat_compat
from pandas.core.dtypes.missing import notna
from pandas.core.arrays import Categorical
import pandas.core.common as com
from pandas.core.frame import DataFrame, _shared_docs
from pandas.core.indexes.api import Index, MultiIndex
from pandas.core.reshape.concat import concat
from pandas.core.tools.numeric import to_numeric
@Appender(
_shared_docs["melt"]
% dict(caller="pd.melt(df, ", versionadded="", other="DataFrame.melt")
)
def melt(
frame: DataFrame,
id_vars=None,
value_vars=None,
var_name=None,
value_name="value",
col_level=None,
) -> DataFrame:
# TODO: what about the existing index?
# If multiindex, gather names of columns on all level for checking presence
# of `id_vars` and `value_vars`
if isinstance(frame.columns, MultiIndex):
cols = [x for c in frame.columns for x in c]
else:
cols = list(frame.columns)
if id_vars is not None:
if not is_list_like(id_vars):
id_vars = [id_vars]
elif isinstance(frame.columns, MultiIndex) and not isinstance(id_vars, list):
raise ValueError(
"id_vars must be a list of tuples when columns are a MultiIndex"
)
else:
# Check that `id_vars` are in frame
id_vars = list(id_vars)
missing = Index(com.flatten(id_vars)).difference(cols)
if not missing.empty:
raise KeyError(
"The following 'id_vars' are not present "
f"in the DataFrame: {list(missing)}"
)
else:
id_vars = []
if value_vars is not None:
if not is_list_like(value_vars):
value_vars = [value_vars]
elif isinstance(frame.columns, MultiIndex) and not isinstance(value_vars, list):
raise ValueError(
"value_vars must be a list of tuples when columns are a MultiIndex"
)
else:
value_vars = list(value_vars)
# Check that `value_vars` are in frame
missing = Index(com.flatten(value_vars)).difference(cols)
if not missing.empty:
raise KeyError(
"The following 'value_vars' are not present in "
f"the DataFrame: {list(missing)}"
)
frame = frame.loc[:, id_vars + value_vars]
else:
frame = frame.copy()
if col_level is not None: # allow list or other?
# frame is a copy
frame.columns = frame.columns.get_level_values(col_level)
if var_name is None:
if isinstance(frame.columns, MultiIndex):
if len(frame.columns.names) == len(set(frame.columns.names)):
var_name = frame.columns.names
else:
var_name = [f"variable_{i}" for i in range(len(frame.columns.names))]
else:
var_name = [
frame.columns.name if frame.columns.name is not None else "variable"
]
if isinstance(var_name, str):
var_name = [var_name]
N, K = frame.shape
K -= len(id_vars)
mdata = {}
for col in id_vars:
id_data = frame.pop(col)
if is_extension_array_dtype(id_data):
id_data = concat([id_data] * K, ignore_index=True)
else:
id_data = np.tile(id_data._values, K)
mdata[col] = id_data
mcolumns = id_vars + var_name + [value_name]
mdata[value_name] = frame._values.ravel("F")
for i, col in enumerate(var_name):
# asanyarray will keep the columns as an Index
mdata[col] = np.asanyarray(frame.columns._get_level_values(i)).repeat(N)
return frame._constructor(mdata, columns=mcolumns)
@deprecate_kwarg(old_arg_name="label", new_arg_name=None)
def lreshape(data: DataFrame, groups, dropna: bool = True, label=None) -> DataFrame:
"""
Reshape long-format data to wide. Generalized inverse of DataFrame.pivot
Parameters
----------
data : DataFrame
groups : dict
{new_name : list_of_columns}
dropna : boolean, default True
Examples
--------
>>> data = pd.DataFrame({'hr1': [514, 573], 'hr2': [545, 526],
... 'team': ['Red Sox', 'Yankees'],
... 'year1': [2007, 2007], 'year2': [2008, 2008]})
>>> data
hr1 hr2 team year1 year2
0 514 545 Red Sox 2007 2008
1 573 526 Yankees 2007 2008
>>> pd.lreshape(data, {'year': ['year1', 'year2'], 'hr': ['hr1', 'hr2']})
team year hr
0 Red Sox 2007 514
1 Yankees 2007 573
2 Red Sox 2008 545
3 Yankees 2008 526
Returns
-------
reshaped : DataFrame
"""
if isinstance(groups, dict):
keys = list(groups.keys())
values = list(groups.values())
else:
keys, values = zip(*groups)
all_cols = list(set.union(*[set(x) for x in values]))
id_cols = list(data.columns.difference(all_cols))
K = len(values[0])
for seq in values:
if len(seq) != K:
raise ValueError("All column lists must be same length")
mdata = {}
pivot_cols = []
for target, names in zip(keys, values):
to_concat = [data[col]._values for col in names]
mdata[target] = concat_compat(to_concat)
pivot_cols.append(target)
for col in id_cols:
mdata[col] = np.tile(data[col]._values, K)
if dropna:
mask = np.ones(len(mdata[pivot_cols[0]]), dtype=bool)
for c in pivot_cols:
mask &= notna(mdata[c])
if not mask.all():
mdata = {k: v[mask] for k, v in mdata.items()}
return data._constructor(mdata, columns=id_cols + pivot_cols)
def wide_to_long(
df: DataFrame, stubnames, i, j, sep: str = "", suffix: str = r"\d+"
) -> DataFrame:
r"""
Wide panel to long format. Less flexible but more user-friendly than melt.
With stubnames ['A', 'B'], this function expects to find one or more
group of columns with format
A-suffix1, A-suffix2,..., B-suffix1, B-suffix2,...
You specify what you want to call this suffix in the resulting long format
with `j` (for example `j='year'`)
Each row of these wide variables are assumed to be uniquely identified by
`i` (can be a single column name or a list of column names)
All remaining variables in the data frame are left intact.
Parameters
----------
df : DataFrame
The wide-format DataFrame.
stubnames : str or list-like
The stub name(s). The wide format variables are assumed to
start with the stub names.
i : str or list-like
Column(s) to use as id variable(s).
j : str
The name of the sub-observation variable. What you wish to name your
suffix in the long format.
sep : str, default ""
A character indicating the separation of the variable names
in the wide format, to be stripped from the names in the long format.
For example, if your column names are A-suffix1, A-suffix2, you
can strip the hyphen by specifying `sep='-'`.
suffix : str, default '\\d+'
A regular expression capturing the wanted suffixes. '\\d+' captures
numeric suffixes. Suffixes with no numbers could be specified with the
negated character class '\\D+'. You can also further disambiguate
suffixes, for example, if your wide variables are of the form
A-one, B-two,.., and you have an unrelated column A-rating, you can
ignore the last one by specifying `suffix='(!?one|two)'`.
.. versionchanged:: 0.23.0
When all suffixes are numeric, they are cast to int64/float64.
Returns
-------
DataFrame
A DataFrame that contains each stub name as a variable, with new index
(i, j).
Notes
-----
All extra variables are left untouched. This simply uses
`pandas.melt` under the hood, but is hard-coded to "do the right thing"
in a typical case.
Examples
--------
>>> np.random.seed(123)
>>> df = pd.DataFrame({"A1970" : {0 : "a", 1 : "b", 2 : "c"},
... "A1980" : {0 : "d", 1 : "e", 2 : "f"},
... "B1970" : {0 : 2.5, 1 : 1.2, 2 : .7},
... "B1980" : {0 : 3.2, 1 : 1.3, 2 : .1},
... "X" : dict(zip(range(3), np.random.randn(3)))
... })
>>> df["id"] = df.index
>>> df
A1970 A1980 B1970 B1980 X id
0 a d 2.5 3.2 -1.085631 0
1 b e 1.2 1.3 0.997345 1
2 c f 0.7 0.1 0.282978 2
>>> pd.wide_to_long(df, ["A", "B"], i="id", j="year")
... # doctest: +NORMALIZE_WHITESPACE
X A B
id year
0 1970 -1.085631 a 2.5
1 1970 0.997345 b 1.2
2 1970 0.282978 c 0.7
0 1980 -1.085631 d 3.2
1 1980 0.997345 e 1.3
2 1980 0.282978 f 0.1
With multiple id columns
>>> df = pd.DataFrame({
... 'famid': [1, 1, 1, 2, 2, 2, 3, 3, 3],
... 'birth': [1, 2, 3, 1, 2, 3, 1, 2, 3],
... 'ht1': [2.8, 2.9, 2.2, 2, 1.8, 1.9, 2.2, 2.3, 2.1],
... 'ht2': [3.4, 3.8, 2.9, 3.2, 2.8, 2.4, 3.3, 3.4, 2.9]
... })
>>> df
famid birth ht1 ht2
0 1 1 2.8 3.4
1 1 2 2.9 3.8
2 1 3 2.2 2.9
3 2 1 2.0 3.2
4 2 2 1.8 2.8
5 2 3 1.9 2.4
6 3 1 2.2 3.3
7 3 2 2.3 3.4
8 3 3 2.1 2.9
>>> l = pd.wide_to_long(df, stubnames='ht', i=['famid', 'birth'], j='age')
>>> l
... # doctest: +NORMALIZE_WHITESPACE
ht
famid birth age
1 1 1 2.8
2 3.4
2 1 2.9
2 3.8
3 1 2.2
2 2.9
2 1 1 2.0
2 3.2
2 1 1.8
2 2.8
3 1 1.9
2 2.4
3 1 1 2.2
2 3.3
2 1 2.3
2 3.4
3 1 2.1
2 2.9
Going from long back to wide just takes some creative use of `unstack`
>>> w = l.unstack()
>>> w.columns = w.columns.map('{0[0]}{0[1]}'.format)
>>> w.reset_index()
famid birth ht1 ht2
0 1 1 2.8 3.4
1 1 2 2.9 3.8
2 1 3 2.2 2.9
3 2 1 2.0 3.2
4 2 2 1.8 2.8
5 2 3 1.9 2.4
6 3 1 2.2 3.3
7 3 2 2.3 3.4
8 3 3 2.1 2.9
Less wieldy column names are also handled
>>> np.random.seed(0)
>>> df = pd.DataFrame({'A(weekly)-2010': np.random.rand(3),
... 'A(weekly)-2011': np.random.rand(3),
... 'B(weekly)-2010': np.random.rand(3),
... 'B(weekly)-2011': np.random.rand(3),
... 'X' : np.random.randint(3, size=3)})
>>> df['id'] = df.index
>>> df # doctest: +NORMALIZE_WHITESPACE, +ELLIPSIS
A(weekly)-2010 A(weekly)-2011 B(weekly)-2010 B(weekly)-2011 X id
0 0.548814 0.544883 0.437587 0.383442 0 0
1 0.715189 0.423655 0.891773 0.791725 1 1
2 0.602763 0.645894 0.963663 0.528895 1 2
>>> pd.wide_to_long(df, ['A(weekly)', 'B(weekly)'], i='id',
... j='year', sep='-')
... # doctest: +NORMALIZE_WHITESPACE
X A(weekly) B(weekly)
id year
0 2010 0 0.548814 0.437587
1 2010 1 0.715189 0.891773
2 2010 1 0.602763 0.963663
0 2011 0 0.544883 0.383442
1 2011 1 0.423655 0.791725
2 2011 1 0.645894 0.528895
If we have many columns, we could also use a regex to find our
stubnames and pass that list on to wide_to_long
>>> stubnames = sorted(
... set([match[0] for match in df.columns.str.findall(
... r'[A-B]\(.*\)').values if match != []])
... )
>>> list(stubnames)
['A(weekly)', 'B(weekly)']
All of the above examples have integers as suffixes. It is possible to
have non-integers as suffixes.
>>> df = pd.DataFrame({
... 'famid': [1, 1, 1, 2, 2, 2, 3, 3, 3],
... 'birth': [1, 2, 3, 1, 2, 3, 1, 2, 3],
... 'ht_one': [2.8, 2.9, 2.2, 2, 1.8, 1.9, 2.2, 2.3, 2.1],
... 'ht_two': [3.4, 3.8, 2.9, 3.2, 2.8, 2.4, 3.3, 3.4, 2.9]
... })
>>> df
famid birth ht_one ht_two
0 1 1 2.8 3.4
1 1 2 2.9 3.8
2 1 3 2.2 2.9
3 2 1 2.0 3.2
4 2 2 1.8 2.8
5 2 3 1.9 2.4
6 3 1 2.2 3.3
7 3 2 2.3 3.4
8 3 3 2.1 2.9
>>> l = pd.wide_to_long(df, stubnames='ht', i=['famid', 'birth'], j='age',
... sep='_', suffix='\w+')
>>> l
... # doctest: +NORMALIZE_WHITESPACE
ht
famid birth age
1 1 one 2.8
two 3.4
2 one 2.9
two 3.8
3 one 2.2
two 2.9
2 1 one 2.0
two 3.2
2 one 1.8
two 2.8
3 one 1.9
two 2.4
3 1 one 2.2
two 3.3
2 one 2.3
two 3.4
3 one 2.1
two 2.9
"""
def get_var_names(df, stub: str, sep: str, suffix: str) -> List[str]:
regex = fr"^{re.escape(stub)}{re.escape(sep)}{suffix}$"
pattern = re.compile(regex)
return [col for col in df.columns if pattern.match(col)]
def melt_stub(df, stub: str, i, j, value_vars, sep: str):
newdf = melt(
df,
id_vars=i,
value_vars=value_vars,
value_name=stub.rstrip(sep),
var_name=j,
)
newdf[j] = Categorical(newdf[j])
newdf[j] = newdf[j].str.replace(re.escape(stub + sep), "")
# GH17627 Cast numerics suffixes to int/float
newdf[j] = to_numeric(newdf[j], errors="ignore")
return newdf.set_index(i + [j])
if not is_list_like(stubnames):
stubnames = [stubnames]
else:
stubnames = list(stubnames)
if any(col in stubnames for col in df.columns):
raise ValueError("stubname can't be identical to a column name")
if not is_list_like(i):
i = [i]
else:
i = list(i)
if df[i].duplicated().any():
raise ValueError("the id variables need to uniquely identify each row")
value_vars = [get_var_names(df, stub, sep, suffix) for stub in stubnames]
value_vars_flattened = [e for sublist in value_vars for e in sublist]
id_vars = list(set(df.columns.tolist()).difference(value_vars_flattened))
_melted = [melt_stub(df, s, i, j, v, sep) for s, v in zip(stubnames, value_vars)]
melted = _melted[0].join(_melted[1:], how="outer")
if len(i) == 1:
new = df[id_vars].set_index(i).join(melted)
return new
new = df[id_vars].merge(melted.reset_index(), on=i).set_index(i + [j])
return new
|
py | 1a31604e6e17b3f8cdde7fdca6c5b0cc040f8685 | #!/usr/bin/env python
import ConfigParser
import logging
import os
import string
import sys
import textwrap
import time
from datetime import datetime, timedelta
from optparse import OptionParser
from time import strftime
import sqlalchemy as sa
from sqlalchemy import and_, distinct, false, not_
sys.path.insert(1, os.path.join(os.path.dirname(__file__), os.pardir, os.pardir, 'lib'))
import galaxy.webapps.tool_shed.config as tool_shed_config
import galaxy.webapps.tool_shed.model.mapping
from galaxy.util import (
build_url,
send_mail as galaxy_send_mail
)
log = logging.getLogger()
log.setLevel(10)
log.addHandler(logging.StreamHandler(sys.stdout))
assert sys.version_info[:2] >= (2, 4)
def build_citable_url(host, repository):
return build_url(host, pathspec=['view', repository.user.username, repository.name])
def main():
'''
Script to deprecate any repositories that are older than n days, and have been empty since creation.
'''
parser = OptionParser()
parser.add_option("-d", "--days", dest="days", action="store", type="int", help="number of days (14)", default=14)
parser.add_option("-i", "--info_only", action="store_true", dest="info_only", help="info about the requested action", default=False)
parser.add_option("-v", "--verbose", action="store_true", dest="verbose", help="verbose mode, print the name of each repository", default=False)
(options, args) = parser.parse_args()
try:
ini_file = args[0]
except IndexError:
sys.exit("Usage: python %s <tool shed .ini file> [options]" % sys.argv[0])
config_parser = ConfigParser.ConfigParser({'here': os.getcwd()})
config_parser.read(ini_file)
config_dict = {}
for key, value in config_parser.items("app:main"):
config_dict[key] = value
config = tool_shed_config.Configuration(**config_dict)
app = DeprecateRepositoriesApplication(config)
cutoff_time = datetime.utcnow() - timedelta(days=options.days)
now = strftime("%Y-%m-%d %H:%M:%S")
print "\n####################################################################################"
print "# %s - Handling stuff older than %i days" % (now, options.days)
if options.info_only:
print "# Displaying info only ( --info_only )"
deprecate_repositories(app, cutoff_time, days=options.days, info_only=options.info_only, verbose=options.verbose)
def send_mail_to_owner(app, name, owner, email, repositories_deprecated, days=14):
'''
Sends an email to the owner of the provided repository.
'''
smtp_server = app.config.get('smtp_server', None)
from_address = app.config.get('email_from', None)
# Since there is no way to programmatically determine the URL for the tool shed from the .ini file, this method requires that
# an environment variable named TOOL_SHED_CANONICAL_URL be set, pointing to the tool shed that is being checked.
url = os.environ.get('TOOL_SHED_CANONICAL_URL', None)
if None in [smtp_server, from_address]:
print '# Mail not configured, not sending email to repository owner.'
return
elif url is None:
print '# Environment variable TOOL_SHED_CANONICAL_URL not set, not sending email to repository owner.'
return
subject = "Regarding your tool shed repositories at %s" % url
message_body_template = 'The tool shed automated repository checker has discovered that one or more of your repositories hosted ' + \
'at this tool shed url ${url} have remained empty for over ${days} days, so they have been marked as deprecated. If you have plans ' + \
'for these repositories, you can mark them as un-deprecated at any time.'
message_template = string.Template(message_body_template)
body = '\n'.join(textwrap.wrap(message_template.safe_substitute(days=days, url=url), width=95))
body += '\n\n'
body += 'Repositories that were deprecated:\n'
body += '\n'.join([build_citable_url(url, repository) for repository in repositories_deprecated])
try:
galaxy_send_mail(from_address, repository.user.email, subject, body, app.config)
print "# An email has been sent to %s, the owner of %s." % (repository.user.username, ', '.join([repository.name for repository in repositories_deprecated]))
return True
except Exception as e:
print "# An error occurred attempting to send email: %s" % str(e)
return False
def deprecate_repositories(app, cutoff_time, days=14, info_only=False, verbose=False):
# This method will get a list of repositories that were created on or before cutoff_time, but have never
# had any metadata records associated with them. Then it will iterate through that list and deprecate the
# repositories, sending an email to each repository owner.
start = time.time()
repository_ids_to_not_check = []
# Get a unique list of repository ids from the repository_metadata table. Any repository ID found in this table is not
# empty, and will not be checked.
metadata_records = sa.select([distinct(app.model.RepositoryMetadata.table.c.repository_id)],
from_obj=app.model.RepositoryMetadata.table) \
.execute()
for metadata_record in metadata_records:
repository_ids_to_not_check.append(metadata_record.repository_id)
# Get the repositories that are A) not present in the above list, and b) older than the specified time.
# This will yield a list of repositories that have been created more than n days ago, but never populated.
repository_query = sa.select([app.model.Repository.table.c.id],
whereclause=and_(app.model.Repository.table.c.create_time < cutoff_time,
app.model.Repository.table.c.deprecated == false(),
app.model.Repository.table.c.deleted == false(),
not_(app.model.Repository.table.c.id.in_(repository_ids_to_not_check))),
from_obj=[app.model.Repository.table])
query_result = repository_query.execute()
repositories = []
repositories_by_owner = {}
repository_ids = [row.id for row in query_result]
# Iterate through the list of repository ids for empty repositories and deprecate them unless info_only is set.
for repository_id in repository_ids:
repository = app.sa_session.query(app.model.Repository) \
.filter(app.model.Repository.table.c.id == repository_id).one()
owner = repository.user
if info_only:
print '# Repository %s owned by %s would have been deprecated, but info_only was set.' % (repository.name, repository.user.username)
else:
if verbose:
print '# Deprecating repository %s owned by %s.' % (repository.name, owner.username)
if owner.username not in repositories_by_owner:
repositories_by_owner[owner.username] = dict(owner=owner, repositories=[])
repositories_by_owner[owner.username]['repositories'].append(repository)
repositories.append(repository)
# Send an email to each repository owner, listing the repositories that were deprecated.
for repository_owner in repositories_by_owner:
for repository in repositories_by_owner[repository_owner]['repositories']:
repository.deprecated = True
app.sa_session.add(repository)
app.sa_session.flush()
owner = repositories_by_owner[repository_owner]['owner']
send_mail_to_owner(app, repository.name, owner.username, owner.email, repositories_by_owner[repository_owner]['repositories'], days)
stop = time.time()
print '# Deprecated %d repositories.' % len(repositories)
print "# Elapsed time: ", stop - start
print "####################################################################################"
class DeprecateRepositoriesApplication(object):
"""Encapsulates the state of a Universe application"""
def __init__(self, config):
if config.database_connection is False:
config.database_connection = "sqlite:///%s?isolation_level=IMMEDIATE" % config.database
# Setup the database engine and ORM
self.model = galaxy.webapps.tool_shed.model.mapping.init(config.file_path, config.database_connection, engine_options={}, create_tables=False)
self.config = config
@property
def sa_session(self):
"""
Returns a SQLAlchemy session -- currently just gets the current
session from the threadlocal session context, but this is provided
to allow migration toward a more SQLAlchemy 0.4 style of use.
"""
return self.model.context.current
def shutdown(self):
pass
if __name__ == "__main__":
main()
|
py | 1a3160952f966945be8d68741914f321358a5fa0 | """
The main purpose of this module is to expose LinkCollector.collect_sources().
"""
import cgi
import collections
import functools
import itertools
import logging
import os
import re
import urllib.parse
import urllib.request
import xml.etree.ElementTree
from html.parser import HTMLParser
from optparse import Values
from typing import (
TYPE_CHECKING,
Callable,
Dict,
Iterable,
List,
MutableMapping,
NamedTuple,
Optional,
Sequence,
Tuple,
Union,
)
from pipenv.patched.notpip._vendor import html5lib, requests
from pipenv.patched.notpip._vendor.requests import Response
from pipenv.patched.notpip._vendor.requests.exceptions import RetryError, SSLError
from pipenv.patched.notpip._internal.exceptions import NetworkConnectionError
from pipenv.patched.notpip._internal.models.link import Link
from pipenv.patched.notpip._internal.models.search_scope import SearchScope
from pipenv.patched.notpip._internal.network.session import PipSession
from pipenv.patched.notpip._internal.network.utils import raise_for_status
from pipenv.patched.notpip._internal.utils.filetypes import is_archive_file
from pipenv.patched.notpip._internal.utils.misc import pairwise, redact_auth_from_url
from pipenv.patched.notpip._internal.vcs import vcs
from .sources import CandidatesFromPage, LinkSource, build_source
if TYPE_CHECKING:
from typing import Protocol
else:
Protocol = object
logger = logging.getLogger(__name__)
HTMLElement = xml.etree.ElementTree.Element
ResponseHeaders = MutableMapping[str, str]
def _match_vcs_scheme(url: str) -> Optional[str]:
"""Look for VCS schemes in the URL.
Returns the matched VCS scheme, or None if there's no match.
"""
for scheme in vcs.schemes:
if url.lower().startswith(scheme) and url[len(scheme)] in "+:":
return scheme
return None
class _NotHTML(Exception):
def __init__(self, content_type: str, request_desc: str) -> None:
super().__init__(content_type, request_desc)
self.content_type = content_type
self.request_desc = request_desc
def _ensure_html_header(response: Response) -> None:
"""Check the Content-Type header to ensure the response contains HTML.
Raises `_NotHTML` if the content type is not text/html.
"""
content_type = response.headers.get("Content-Type", "")
if not content_type.lower().startswith("text/html"):
raise _NotHTML(content_type, response.request.method)
class _NotHTTP(Exception):
pass
def _ensure_html_response(url: str, session: PipSession) -> None:
"""Send a HEAD request to the URL, and ensure the response contains HTML.
Raises `_NotHTTP` if the URL is not available for a HEAD request, or
`_NotHTML` if the content type is not text/html.
"""
scheme, netloc, path, query, fragment = urllib.parse.urlsplit(url)
if scheme not in {"http", "https"}:
raise _NotHTTP()
resp = session.head(url, allow_redirects=True)
raise_for_status(resp)
_ensure_html_header(resp)
def _get_html_response(url: str, session: PipSession) -> Response:
"""Access an HTML page with GET, and return the response.
This consists of three parts:
1. If the URL looks suspiciously like an archive, send a HEAD first to
check the Content-Type is HTML, to avoid downloading a large file.
Raise `_NotHTTP` if the content type cannot be determined, or
`_NotHTML` if it is not HTML.
2. Actually perform the request. Raise HTTP exceptions on network failures.
3. Check the Content-Type header to make sure we got HTML, and raise
`_NotHTML` otherwise.
"""
if is_archive_file(Link(url).filename):
_ensure_html_response(url, session=session)
logger.debug("Getting page %s", redact_auth_from_url(url))
resp = session.get(
url,
headers={
"Accept": "text/html",
# We don't want to blindly returned cached data for
# /simple/, because authors generally expecting that
# twine upload && pip install will function, but if
# they've done a pip install in the last ~10 minutes
# it won't. Thus by setting this to zero we will not
# blindly use any cached data, however the benefit of
# using max-age=0 instead of no-cache, is that we will
# still support conditional requests, so we will still
# minimize traffic sent in cases where the page hasn't
# changed at all, we will just always incur the round
# trip for the conditional GET now instead of only
# once per 10 minutes.
# For more information, please see pypa/pip#5670.
"Cache-Control": "max-age=0",
},
)
raise_for_status(resp)
# The check for archives above only works if the url ends with
# something that looks like an archive. However that is not a
# requirement of an url. Unless we issue a HEAD request on every
# url we cannot know ahead of time for sure if something is HTML
# or not. However we can check after we've downloaded it.
_ensure_html_header(resp)
return resp
def _get_encoding_from_headers(headers: ResponseHeaders) -> Optional[str]:
"""Determine if we have any encoding information in our headers."""
if headers and "Content-Type" in headers:
content_type, params = cgi.parse_header(headers["Content-Type"])
if "charset" in params:
return params["charset"]
return None
def _determine_base_url(document: HTMLElement, page_url: str) -> str:
"""Determine the HTML document's base URL.
This looks for a ``<base>`` tag in the HTML document. If present, its href
attribute denotes the base URL of anchor tags in the document. If there is
no such tag (or if it does not have a valid href attribute), the HTML
file's URL is used as the base URL.
:param document: An HTML document representation. The current
implementation expects the result of ``html5lib.parse()``.
:param page_url: The URL of the HTML document.
TODO: Remove when `html5lib` is dropped.
"""
for base in document.findall(".//base"):
href = base.get("href")
if href is not None:
return href
return page_url
def _clean_url_path_part(part: str) -> str:
"""
Clean a "part" of a URL path (i.e. after splitting on "@" characters).
"""
# We unquote prior to quoting to make sure nothing is double quoted.
return urllib.parse.quote(urllib.parse.unquote(part))
def _clean_file_url_path(part: str) -> str:
"""
Clean the first part of a URL path that corresponds to a local
filesystem path (i.e. the first part after splitting on "@" characters).
"""
# We unquote prior to quoting to make sure nothing is double quoted.
# Also, on Windows the path part might contain a drive letter which
# should not be quoted. On Linux where drive letters do not
# exist, the colon should be quoted. We rely on urllib.request
# to do the right thing here.
return urllib.request.pathname2url(urllib.request.url2pathname(part))
# percent-encoded: /
_reserved_chars_re = re.compile("(@|%2F)", re.IGNORECASE)
def _clean_url_path(path: str, is_local_path: bool) -> str:
"""
Clean the path portion of a URL.
"""
if is_local_path:
clean_func = _clean_file_url_path
else:
clean_func = _clean_url_path_part
# Split on the reserved characters prior to cleaning so that
# revision strings in VCS URLs are properly preserved.
parts = _reserved_chars_re.split(path)
cleaned_parts = []
for to_clean, reserved in pairwise(itertools.chain(parts, [""])):
cleaned_parts.append(clean_func(to_clean))
# Normalize %xx escapes (e.g. %2f -> %2F)
cleaned_parts.append(reserved.upper())
return "".join(cleaned_parts)
def _clean_link(url: str) -> str:
"""
Make sure a link is fully quoted.
For example, if ' ' occurs in the URL, it will be replaced with "%20",
and without double-quoting other characters.
"""
# Split the URL into parts according to the general structure
# `scheme://netloc/path;parameters?query#fragment`.
result = urllib.parse.urlparse(url)
# If the netloc is empty, then the URL refers to a local filesystem path.
is_local_path = not result.netloc
path = _clean_url_path(result.path, is_local_path=is_local_path)
return urllib.parse.urlunparse(result._replace(path=path))
def _create_link_from_element(
element_attribs: Dict[str, Optional[str]],
page_url: str,
base_url: str,
) -> Optional[Link]:
"""
Convert an anchor element's attributes in a simple repository page to a Link.
"""
href = element_attribs.get("href")
if not href:
return None
url = _clean_link(urllib.parse.urljoin(base_url, href))
pyrequire = element_attribs.get("data-requires-python")
yanked_reason = element_attribs.get("data-yanked")
link = Link(
url,
comes_from=page_url,
requires_python=pyrequire,
yanked_reason=yanked_reason,
)
return link
class CacheablePageContent:
def __init__(self, page: "HTMLPage") -> None:
assert page.cache_link_parsing
self.page = page
def __eq__(self, other: object) -> bool:
return isinstance(other, type(self)) and self.page.url == other.page.url
def __hash__(self) -> int:
return hash(self.page.url)
class ParseLinks(Protocol):
def __call__(
self, page: "HTMLPage", use_deprecated_html5lib: bool
) -> Iterable[Link]:
...
def with_cached_html_pages(fn: ParseLinks) -> ParseLinks:
"""
Given a function that parses an Iterable[Link] from an HTMLPage, cache the
function's result (keyed by CacheablePageContent), unless the HTMLPage
`page` has `page.cache_link_parsing == False`.
"""
@functools.lru_cache(maxsize=None)
def wrapper(
cacheable_page: CacheablePageContent, use_deprecated_html5lib: bool
) -> List[Link]:
return list(fn(cacheable_page.page, use_deprecated_html5lib))
@functools.wraps(fn)
def wrapper_wrapper(page: "HTMLPage", use_deprecated_html5lib: bool) -> List[Link]:
if page.cache_link_parsing:
return wrapper(CacheablePageContent(page), use_deprecated_html5lib)
return list(fn(page, use_deprecated_html5lib))
return wrapper_wrapper
def _parse_links_html5lib(page: "HTMLPage") -> Iterable[Link]:
"""
Parse an HTML document, and yield its anchor elements as Link objects.
TODO: Remove when `html5lib` is dropped.
"""
document = html5lib.parse(
page.content,
transport_encoding=page.encoding,
namespaceHTMLElements=False,
)
url = page.url
base_url = _determine_base_url(document, url)
for anchor in document.findall(".//a"):
link = _create_link_from_element(
anchor.attrib,
page_url=url,
base_url=base_url,
)
if link is None:
continue
yield link
@with_cached_html_pages
def parse_links(page: "HTMLPage", use_deprecated_html5lib: bool) -> Iterable[Link]:
"""
Parse an HTML document, and yield its anchor elements as Link objects.
"""
if use_deprecated_html5lib:
yield from _parse_links_html5lib(page)
return
parser = HTMLLinkParser(page.url)
encoding = page.encoding or "utf-8"
parser.feed(page.content.decode(encoding))
url = page.url
base_url = parser.base_url or url
for anchor in parser.anchors:
link = _create_link_from_element(
anchor,
page_url=url,
base_url=base_url,
)
if link is None:
continue
yield link
class HTMLPage:
"""Represents one page, along with its URL"""
def __init__(
self,
content: bytes,
encoding: Optional[str],
url: str,
cache_link_parsing: bool = True,
) -> None:
"""
:param encoding: the encoding to decode the given content.
:param url: the URL from which the HTML was downloaded.
:param cache_link_parsing: whether links parsed from this page's url
should be cached. PyPI index urls should
have this set to False, for example.
"""
self.content = content
self.encoding = encoding
self.url = url
self.cache_link_parsing = cache_link_parsing
def __str__(self) -> str:
return redact_auth_from_url(self.url)
class HTMLLinkParser(HTMLParser):
"""
HTMLParser that keeps the first base HREF and a list of all anchor
elements' attributes.
"""
def __init__(self, url: str) -> None:
super().__init__(convert_charrefs=True)
self.url: str = url
self.base_url: Optional[str] = None
self.anchors: List[Dict[str, Optional[str]]] = []
def handle_starttag(self, tag: str, attrs: List[Tuple[str, Optional[str]]]) -> None:
if tag == "base" and self.base_url is None:
href = self.get_href(attrs)
if href is not None:
self.base_url = href
elif tag == "a":
self.anchors.append(dict(attrs))
def get_href(self, attrs: List[Tuple[str, Optional[str]]]) -> Optional[str]:
for name, value in attrs:
if name == "href":
return value
return None
def _handle_get_page_fail(
link: Link,
reason: Union[str, Exception],
meth: Optional[Callable[..., None]] = None,
) -> None:
if meth is None:
meth = logger.debug
meth("Could not fetch URL %s: %s - skipping", link, reason)
def _make_html_page(response: Response, cache_link_parsing: bool = True) -> HTMLPage:
encoding = _get_encoding_from_headers(response.headers)
return HTMLPage(
response.content,
encoding=encoding,
url=response.url,
cache_link_parsing=cache_link_parsing,
)
def _get_html_page(
link: Link, session: Optional[PipSession] = None
) -> Optional["HTMLPage"]:
if session is None:
raise TypeError(
"_get_html_page() missing 1 required keyword argument: 'session'"
)
url = link.url.split("#", 1)[0]
# Check for VCS schemes that do not support lookup as web pages.
vcs_scheme = _match_vcs_scheme(url)
if vcs_scheme:
logger.warning(
"Cannot look at %s URL %s because it does not support lookup as web pages.",
vcs_scheme,
link,
)
return None
# Tack index.html onto file:// URLs that point to directories
scheme, _, path, _, _, _ = urllib.parse.urlparse(url)
if scheme == "file" and os.path.isdir(urllib.request.url2pathname(path)):
# add trailing slash if not present so urljoin doesn't trim
# final segment
if not url.endswith("/"):
url += "/"
url = urllib.parse.urljoin(url, "index.html")
logger.debug(" file: URL is directory, getting %s", url)
try:
resp = _get_html_response(url, session=session)
except _NotHTTP:
logger.warning(
"Skipping page %s because it looks like an archive, and cannot "
"be checked by a HTTP HEAD request.",
link,
)
except _NotHTML as exc:
logger.warning(
"Skipping page %s because the %s request got Content-Type: %s."
"The only supported Content-Type is text/html",
link,
exc.request_desc,
exc.content_type,
)
except NetworkConnectionError as exc:
_handle_get_page_fail(link, exc)
except RetryError as exc:
_handle_get_page_fail(link, exc)
except SSLError as exc:
reason = "There was a problem confirming the ssl certificate: "
reason += str(exc)
_handle_get_page_fail(link, reason, meth=logger.info)
except requests.ConnectionError as exc:
_handle_get_page_fail(link, f"connection error: {exc}")
except requests.Timeout:
_handle_get_page_fail(link, "timed out")
else:
return _make_html_page(resp, cache_link_parsing=link.cache_link_parsing)
return None
class CollectedSources(NamedTuple):
find_links: Sequence[Optional[LinkSource]]
index_urls: Sequence[Optional[LinkSource]]
class LinkCollector:
"""
Responsible for collecting Link objects from all configured locations,
making network requests as needed.
The class's main method is its collect_sources() method.
"""
def __init__(
self,
session: PipSession,
search_scope: SearchScope,
index_lookup: Optional[Dict[str, List[str]]] = None,
) -> None:
self.search_scope = search_scope
self.session = session
self.index_lookup = index_lookup if index_lookup else {}
@classmethod
def create(
cls,
session: PipSession,
options: Values,
suppress_no_index: bool = False,
index_lookup: Optional[Dict[str, List[str]]] = None,
) -> "LinkCollector":
"""
:param session: The Session to use to make requests.
:param suppress_no_index: Whether to ignore the --no-index option
when constructing the SearchScope object.
"""
index_urls = [options.index_url] + options.extra_index_urls
if options.no_index and not suppress_no_index:
logger.debug(
"Ignoring indexes: %s",
",".join(redact_auth_from_url(url) for url in index_urls),
)
index_urls = []
# Make sure find_links is a list before passing to create().
find_links = options.find_links or []
search_scope = SearchScope.create(
find_links=find_links, index_urls=index_urls, index_lookup=index_lookup
)
link_collector = LinkCollector(
session=session, search_scope=search_scope, index_lookup=index_lookup
)
return link_collector
@property
def find_links(self) -> List[str]:
return self.search_scope.find_links
def fetch_page(self, location: Link) -> Optional[HTMLPage]:
"""
Fetch an HTML page containing package links.
"""
return _get_html_page(location, session=self.session)
def collect_sources(
self,
project_name: str,
candidates_from_page: CandidatesFromPage,
) -> CollectedSources:
# The OrderedDict calls deduplicate sources by URL.
index_url_sources = collections.OrderedDict(
build_source(
loc,
candidates_from_page=candidates_from_page,
page_validator=self.session.is_secure_origin,
expand_dir=False,
cache_link_parsing=False,
)
for loc in self.search_scope.get_index_urls_locations(project_name)
).values()
find_links_sources = collections.OrderedDict(
build_source(
loc,
candidates_from_page=candidates_from_page,
page_validator=self.session.is_secure_origin,
expand_dir=True,
cache_link_parsing=True,
)
for loc in self.find_links
).values()
if logger.isEnabledFor(logging.DEBUG):
lines = [
f"* {s.link}"
for s in itertools.chain(find_links_sources, index_url_sources)
if s is not None and s.link is not None
]
lines = [
f"{len(lines)} location(s) to search "
f"for versions of {project_name}:"
] + lines
logger.debug("\n".join(lines))
return CollectedSources(
find_links=list(find_links_sources),
index_urls=list(index_url_sources),
)
|
py | 1a3161113421c66731f766b78bae3abed5a994c1 | # coding: utf-8
# 2019/12/30 @ tongshiwei
import pytest
from CangJie.Features import Stroke, character_glyph, CDict
from CangJie import token2stroke, token2radical, char_features
def test_features():
cdict = CDict.from_file()
char_features("一")
assert len(cdict.get_stroke("一s")) == 1
assert len(cdict.get_radical(["一二", "三"])) == 2
cdict = CDict.from_file(allow_missing=False)
with pytest.raises(KeyError):
assert len(cdict.get_stroke("一s")) == 1
with pytest.raises(TypeError):
print(cdict.get_stroke(123))
def test_stroke():
stroke = Stroke.from_file()
assert len(stroke["一"]) == 1
assert len(stroke["一二"]) == 3
assert len(stroke[["一", "二"]]) == 2
with pytest.raises(TypeError):
print(stroke[123])
assert stroke["s"] == ""
stroke = Stroke.from_file(allow_missing=False)
with pytest.raises(KeyError):
assert stroke["s"] == ""
assert len(token2stroke("一s")) == 1
def test_radical():
token2radical("一")
@pytest.mark.skip(reason="require simsun, which are usually unavailable in most testing platform")
def test_glyph():
character_glyph("一")
|
py | 1a31617c125fc64e3614b43946abc6c332e0149a | # -*- coding: utf-8 -*-
# Copyright (c) 2013, Vispy Development Team.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
"""
This module contains manual annotations for the gl backends. Together
with the header files, we can generatre the full ES 2.0 API.
Every function-annotations consists of sections that apply to one or
more backends. If no backends are specified in the first section, it
applies to all backends.
"""
import ctypes
## bind / gen / delete stuff
def deleteBuffer(buffer):
# --- desktop angle
n = 1
buffers = (ctypes.c_uint*n)(buffer)
()
# --- pyopengl
GL.glDeleteBuffers(1, [buffer])
def deleteFramebuffer(framebuffer):
# --- desktop angle
n = 1
framebuffers = (ctypes.c_uint*n)(framebuffer)
()
# --- pyopengl
FBO.glDeleteFramebuffers(1, [framebuffer])
def deleteRenderbuffer(renderbuffer):
# --- desktop angle
n = 1
renderbuffers = (ctypes.c_uint*n)(renderbuffer)
()
# --- pyopengl
FBO.glDeleteRenderbuffers(1, [renderbuffer])
def deleteTexture(texture):
# --- desktop angle
n = 1
textures = (ctypes.c_uint*n)(texture)
()
# --- pyopengl
GL.glDeleteTextures([texture])
def createBuffer():
# --- desktop angle
n = 1
buffers = (ctypes.c_uint*n)()
()
return buffers[0]
# --- pyopengl
return GL.glGenBuffers(1)
# --- mock
return 1
def createFramebuffer():
# --- desktop angle
n = 1
framebuffers = (ctypes.c_uint*n)()
()
return framebuffers[0]
# --- pyopengl
return FBO.glGenFramebuffers(1)
# --- mock
return 1
def createRenderbuffer():
# --- desktop angle
n = 1
renderbuffers = (ctypes.c_uint*n)()
()
return renderbuffers[0]
# --- pyopengl
return FBO.glGenRenderbuffers(1)
# --- mock
return 1
def createTexture():
# --- desktop angle
n = 1
textures = (ctypes.c_uint*n)()
()
return textures[0]
# --- pyopengl
return GL.glGenTextures(1)
# --- mock
return 1
## Image stuff
def texImage2D(target, level, internalformat, format, type, pixels):
border = 0
# --- desktop angle
if isinstance(pixels, (tuple, list)):
height, width = pixels
pixels = ctypes.c_void_p(0)
pixels = None
else:
if not pixels.flags['C_CONTIGUOUS']:
pixels = pixels.copy('C')
pixels_ = pixels
pixels = pixels_.ctypes.data
height, width = pixels_.shape[:2]
()
# --- pyopengl
if isinstance(pixels, (tuple, list)):
height, width = pixels
pixels = None
else:
height, width = pixels.shape[:2]
GL.glTexImage2D(target, level, internalformat, width, height, border, format, type, pixels)
def texSubImage2D(target, level, xoffset, yoffset, format, type, pixels):
# --- desktop angle
if not pixels.flags['C_CONTIGUOUS']:
pixels = pixels.copy('C')
pixels_ = pixels
pixels = pixels_.ctypes.data
height, width = pixels_.shape[:2]
()
# --- pyopengl
height, width = pixels.shape[:2]
GL.glTexSubImage2D(target, level, xoffset, yoffset, width, height, format, type, pixels)
def readPixels(x, y, width, height, format, type):
# --- desktop angle mock
# GL_ALPHA, GL_RGB, GL_RGBA
t = {6406:1, 6407:3, 6408:4}[format]
# we kind of only support type GL_UNSIGNED_BYTE
size = int(width*height*t)
# --- desktop angle
pixels = ctypes.create_string_buffer(size)
()
return pixels[:]
# --- mock
return size * b'\x00'
def compressedTexImage2D(target, level, internalformat, width, height, border=0, data=None):
# border = 0 # set in args
# --- desktop angle
if not data.flags['C_CONTIGUOUS']:
data = data.copy('C')
data_ = data
size = data_.size
data = data_.ctypes.data
()
# --- pyopengl
size = data.size
GL.glCompressedTexImage2D(target, level, internalformat, width, height, border, size, data)
def compressedTexSubImage2D(target, level, xoffset, yoffset, width, height, format, data):
# --- desktop angle
if not data.flags['C_CONTIGUOUS']:
data = data.copy('C')
data_ = data
size = data_.size
data = data_.ctypes.data
()
# --- pyopengl
size = data.size
GL.glCompressedTexSubImage2D(target, level, xoffset, yoffset, width, height, format, size, data)
## Buffer data
def bufferData(target, data, usage):
""" Data can be numpy array or the size of data to allocate.
"""
# --- desktop angle
if isinstance(data, int):
size = data
data = ctypes.c_voidp(0)
else:
if not data.flags['C_CONTIGUOUS'] or not data.flags['ALIGNED']:
data = data.copy('C')
data_ = data
size = data_.nbytes
data = data_.ctypes.data
()
# --- pyopengl
if isinstance(data, int):
size = data
data = None
else:
size = data.nbytes
GL.glBufferData(target, size, data, usage)
def bufferSubData(target, offset, data):
# --- desktop angle
if not data.flags['C_CONTIGUOUS']:
data = data.copy('C')
data_ = data
size = data_.nbytes
data = data_.ctypes.data
()
# --- pyopengl
size = data.nbytes
GL.glBufferSubData(target, offset, size, data)
def drawElements(mode, count, type, offset):
# --- desktop angle
if offset is None:
offset = ctypes.c_void_p(0)
elif isinstance(offset, ctypes.c_void_p):
pass
elif isinstance(offset, (int, ctypes.c_int)):
offset = ctypes.c_void_p(int(offset))
else:
if not offset.flags['C_CONTIGUOUS']:
offset = offset.copy('C')
offset_ = offset
offset = offset.ctypes.data
indices = offset
()
# --- pyopengl
if offset is None:
offset = ctypes.c_void_p(0)
elif isinstance(offset, (int, ctypes.c_int)):
offset = ctypes.c_void_p(int(offset))
()
def vertexAttribPointer(indx, size, type, normalized, stride, offset):
# --- desktop angle
if offset is None:
offset = ctypes.c_void_p(0)
elif isinstance(offset, ctypes.c_void_p):
pass
elif isinstance(offset, (int, ctypes.c_int)):
offset = ctypes.c_void_p(int(offset))
else:
if not offset.flags['C_CONTIGUOUS']:
offset = offset.copy('C')
offset_ = offset
offset = offset.ctypes.data
# We need to ensure that the data exists at draw time :(
# PyOpenGL does this too
key = '_vert_attr_'+str(indx)
setattr(glVertexAttribPointer, key, offset_)
ptr = offset
()
# --- pyopengl
if offset is None:
offset = ctypes.c_void_p(0)
elif isinstance(offset, (int, ctypes.c_int)):
offset = ctypes.c_void_p(int(offset))
()
def bindAttribLocation(program, index, name):
# --- desktop angle
name = ctypes.c_char_p(name.encode('utf-8'))
()
# --- pyopengl
name = name.encode('utf-8')
()
## Setters
def shaderSource(shader, source):
# Some implementation do not like getting a list of single chars
if isinstance(source, (tuple, list)):
strings = [s for s in source]
else:
strings = [source]
# --- desktop angle
count = len(strings)
string = (ctypes.c_char_p*count)(*[s.encode('utf-8') for s in strings])
length = (ctypes.c_int*count)(*[len(s) for s in strings])
()
# --- pyopengl
GL.glShaderSource(shader, strings)
## Getters
def _getBooleanv(pname):
# --- desktop angle
params = (ctypes.c_bool*1)()
()
return params[0]
def _getIntegerv(pname):
# --- desktop angle
n = 16
d = -2**31 # smallest 32bit integer
params = (ctypes.c_int*n)(*[d for i in range(n)])
()
params = [p for p in params if p!=d]
if len(params) == 1:
return params[0]
else:
return tuple(params)
def _getFloatv(pname):
# --- desktop angle
n = 16
d = float('Inf')
params = (ctypes.c_float*n)(*[d for i in range(n)])
()
params = [p for p in params if p!=d]
if len(params) == 1:
return params[0]
else:
return tuple(params)
# def _getString(pname):
# # --- desktop angle
# ()
# return res.value
# # --- mock
# return ''
def getParameter(pname):
if pname in [33902, 33901, 32773, 3106, 2931, 2928,
2849, 32824, 10752, 32938]:
# GL_ALIASED_LINE_WIDTH_RANGE GL_ALIASED_POINT_SIZE_RANGE
# GL_BLEND_COLOR GL_COLOR_CLEAR_VALUE GL_DEPTH_CLEAR_VALUE
# GL_DEPTH_RANGE GL_LINE_WIDTH GL_POLYGON_OFFSET_FACTOR
# GL_POLYGON_OFFSET_UNITS GL_SAMPLE_COVERAGE_VALUE
return _glGetFloatv(pname)
elif pname in [7936, 7937, 7938, 35724, 7939]:
# GL_VENDOR, GL_RENDERER, GL_VERSION, GL_SHADING_LANGUAGE_VERSION,
# GL_EXTENSIONS are strings
pass # string handled below
else:
return _glGetIntegerv(pname)
name = pname
# --- desktop angle
()
return res.decode('utf-8') if res else ''
# --- pyopengl
res = GL.glGetString(pname)
return res.decode('utf-8')
def getUniform(program, location):
# --- desktop angle
n = 16
d = float('Inf')
params = (ctypes.c_float*n)(*[d for i in range(n)])
()
params = [p for p in params if p!=d]
if len(params) == 1:
return params[0]
else:
return tuple(params)
# --- pyopengl
n = 16
d = float('Inf')
params = (ctypes.c_float*n)(*[d for i in range(n)])
GL.glGetUniformfv(program, location, params)
params = [p for p in params if p!=d]
if len(params) == 1:
return params[0]
else:
return tuple(params)
def getVertexAttrib(index, pname):
# --- desktop angle
n = 4
d = float('Inf')
params = (ctypes.c_float*n)(*[d for i in range(n)])
()
params = [p for p in params if p!=d]
if len(params) == 1:
return params[0]
else:
return tuple(params)
# --- pyopengl
# From PyOpenGL v3.1.0 the glGetVertexAttribfv(index, pname) does
# work, but it always returns 4 values, with zeros in the empty
# spaces. We have no way to tell whether they are empty or genuine
# zeros. Fortunately, pyopengl also supports the old syntax.
n = 4
d = float('Inf')
params = (ctypes.c_float*n)(*[d for i in range(n)])
GL.glGetVertexAttribfv(index, pname, params)
params = [p for p in params if p!=d]
if len(params) == 1:
return params[0]
else:
return tuple(params)
def getTexParameter(target, pname):
# --- desktop angle
d = float('Inf')
params = (ctypes.c_float*1)(d)
()
return params[0]
def getActiveAttrib(program, index):
# --- desktop angle pyopengl
bufsize = 256
length = (ctypes.c_int*1)()
size = (ctypes.c_int*1)()
type = (ctypes.c_uint*1)()
name = ctypes.create_string_buffer(bufsize)
# --- desktop angle
()
name = name[:length[0]].decode('utf-8')
return name, size[0], type[0]
# --- pyopengl
# pyopengl has a bug, this is a patch
GL.glGetActiveAttrib(program, index, bufsize, length, size, type, name)
name = name[:length[0]].decode('utf-8')
return name, size[0], type[0]
# --- mock
return 'mock_val', 1, 5126
def getVertexAttribOffset(index, pname):
# --- desktop angle
pointer = (ctypes.c_void_p*1)()
()
return pointer[0] or 0
# --- pyopengl
try: # maybe the fixed it
()
except TypeError:
pointer = (ctypes.c_void_p*1)()
GL.glGetVertexAttribPointerv(index, pname, pointer)
return pointer[0] or 0
# --- mock
return 0
def getActiveUniform(program, index):
# --- desktop angle
bufsize = 256
length = (ctypes.c_int*1)()
size = (ctypes.c_int*1)()
type = (ctypes.c_uint*1)()
name = ctypes.create_string_buffer(bufsize)
()
name = name[:length[0]].decode('utf-8')
return name, size[0], type[0]
# --- pyopengl
name, size, type = GL.glGetActiveUniform(program, index)
return name.decode('utf-8'), size, type
def getAttachedShaders(program):
# --- desktop angle
maxcount = 256
count = (ctypes.c_int*1)()
shaders = (ctypes.c_uint*maxcount)()
()
return tuple(shaders[:count[0]])
def getAttribLocation(program, name):
# --- desktop angle
name = ctypes.c_char_p(name.encode('utf-8'))
()
return res
# --- pyopengl
name = name.encode('utf-8')
()
def getUniformLocation(program, name):
# --- desktop angle
name = ctypes.c_char_p(name.encode('utf-8'))
()
return res
# --- pyopengl
name = name.encode('utf-8')
()
def getProgramInfoLog(program):
# --- desktop angle
bufsize = 1024
length = (ctypes.c_int*1)()
infolog = ctypes.create_string_buffer(bufsize)
()
return infolog[:length[0]].decode('utf-8')
# --- pyopengl
res = GL.glGetProgramInfoLog(program)
return res.decode('utf-8')
def getShaderInfoLog(shader):
# --- desktop angle
bufsize = 1024
length = (ctypes.c_int*1)()
infolog = ctypes.create_string_buffer(bufsize)
()
return infolog[:length[0]].decode('utf-8')
# --- pyopengl
res = GL.glGetShaderInfoLog(shader)
return res.decode('utf-8')
def getProgramParameter(program, pname):
# --- desktop angle
params = (ctypes.c_int*1)()
()
return params[0]
def getShaderParameter(shader, pname):
# --- desktop angle
params = (ctypes.c_int*1)()
()
return params[0]
def getShaderPrecisionFormat(shadertype, precisiontype):
# --- desktop angle
range = (ctypes.c_int*1)()
precision = (ctypes.c_int*1)()
()
return range[0], precision[0]
def getShaderSource(shader):
# --- desktop angle
bufsize = 1024*1024
length = (ctypes.c_int*1)()
source = (ctypes.c_char*bufsize)()
()
return source.value[:length[0]].decode('utf-8')
# --- pyopengl
res = GL.glGetShaderSource(shader)
return res.decode('utf-8')
def getBufferParameter(target, pname):
# --- desktop angle
d = -2**31 # smallest 32bit integer
params = (ctypes.c_int*1)(d)
()
return params[0]
def getFramebufferAttachmentParameter(target, attachment, pname):
# --- desktop angle
d = -2**31 # smallest 32bit integer
params = (ctypes.c_int*1)(d)
()
return params[0]
# --- pyopengl
d = -2**31 # smallest 32bit integer
params = (ctypes.c_int*1)(d)
FBO.glGetFramebufferAttachmentParameteriv(target, attachment, pname, params)
return params[0]
def getRenderbufferParameter(target, pname):
# --- desktop angle
d = -2**31 # smallest 32bit integer
params = (ctypes.c_int*1)(d)
()
return params[0]
# --- pyopengl
d = -2**31 # smallest 32bit integer
params = (ctypes.c_int*1)(d)
FBO.glGetRenderbufferParameteriv(target, pname, params)
return params[0]
## ============================================================================
class FunctionAnnotation:
def __init__(self, name, args, output):
self.name = name
self.args = args
self.output = output
self.lines = [] # (line, comment) tuples
def __repr__(self):
return '<FunctionAnnotation for %s>' % self.name
def get_lines(self, call, backend):
""" Get the lines for this function based on the given backend.
The given API call is inserted at the correct location.
"""
backend_selector = backend # first lines are for all backends
lines = []
for line in self.lines:
if line.lstrip().startswith('# ---'):
backend_selector = line
continue
if backend in backend_selector:
if line.strip() == '()':
indent = line.split('(')[0][4:]
line = indent + call
lines.append(line)
return lines
def is_arg_set(self, name):
""" Get whether a given variable name is set.
This allows checking whether a variable that is an input to the C
function is not an input for the Python function, and may be an output.
"""
needle = '%s =' % name
for line, comment in self.lines:
if line.startswith(needle):
return True
else:
return False
def parse_anotations():
""" Parse this annotations file and produce a dictionary of
FunctionAnnotation objects.
"""
functions = {}
function = None
for line in open(__file__, 'rt').readlines():
# Stop?
if '='*40 in line:
break
if line.startswith('def '):
name = line.split(' ')[1].split('(')[0]
args = line.split('(')[1].split(')')[0].split(', ')
args = [arg for arg in args if arg]
out = line.partition('->')[2].strip()
function = FunctionAnnotation(name, args, out)
functions[name] = function
continue
elif not function:
continue
# Add line
line = line.rstrip()
indent = len(line) - len(line.strip())
if line.strip() and indent >=4:
function.lines.append(line)
return functions
if __name__ == '__main__':
print(parse_anotations().keys())
|
py | 1a316288ff0920df3694b3b53b9d92cd4c7fed63 | import json
import logging
import ssl
import requests
import socket
import websocket
import websocket._exceptions
logger = logging.getLogger(__name__)
class MattermostAPI(object):
def __init__(self, url, ssl_verify, token):
self.url = url
self.token = token
self.initial = None
self.default_team_id = None # the first team in API returned value
self.teams_channels_ids = None # struct:{team_id:[channel_id,...],...}
self.ssl_verify = ssl_verify
if not ssl_verify:
requests.packages.urllib3.disable_warnings(
requests.packages.urllib3.exceptions.InsecureRequestWarning)
def _get_headers(self):
return {"Authorization": "Bearer " + self.token}
def channel(self, channel_id):
channel = {'channel': self.get('/channels/{}'.format(channel_id))}
return channel
def create_reaction(self, user_id, post_id, emoji_name):
return self.post(
'/reactions',
{
'user_id': user_id,
'post_id': post_id,
'emoji_name': emoji_name,
})
def create_post(self, user_id, channel_id, message, files=None, pid="", props={}):
# create_at = int(time.time() * 1000)
return self.post(
'/posts',
{
'channel_id': channel_id,
'message': message,
'file_ids': files or [],
'root_id': pid,
'props': props
})
@staticmethod
def create_user_dict(self, v4_dict):
new_dict = {}
new_dict[v4_dict['id']] = v4_dict
return new_dict
def get(self, request):
return json.loads(
requests.get(
self.url + request,
headers=self._get_headers(),
verify=self.ssl_verify
).text)
def get_channel_by_name(self, channel_name, team_id=None):
return self.get('/teams/{}/channels/name/{}'.format(
team_id, channel_name))
def get_channels(self, team_id=None):
if team_id is None:
team_id = self.default_team_id
return self.get('/users/me/teams/{}/channels'.format(team_id))
def get_file_link(self, file_id):
return self.get('/files/{}/link'.format(file_id))
def get_team_by_name(self, team_name):
return self.get('/teams/name/{}'.format(team_name))
def get_team_id(self, channel_id):
for team_id, channels in self.teams_channels_ids.items():
if channel_id in channels:
return team_id
return None
def get_user_info(self, user_id):
return self.get('/users/{}'.format(user_id))
def hooks_create(self, **kwargs):
return self.post(
'/hooks/incoming', kwargs)
def hooks_get(self, webhook_id):
return self.get(
'/hooks/incoming/{}'.format(webhook_id))
def hooks_list(self):
return self.get('/hooks/incoming')
@staticmethod
def in_webhook(url, channel, text, username=None, as_user=None,
parse=None, link_names=None, attachments=None,
unfurl_links=None, unfurl_media=None, icon_url=None,
icon_emoji=None, ssl_verify=True, **kwargs):
return requests.post(
url, data={
'payload': json.dumps({
'channel': channel,
'text': text,
'username': username,
'as_user': as_user,
'parse': parse,
'link_names': link_names,
'attachments': attachments,
'unfurl_links': unfurl_links,
'unfurl_media': unfurl_media,
'icon_url': icon_url,
'icon_emoji': icon_emoji})
}, verify=ssl_verify)
def login(self, team, account, password):
props = {'login_id': account, 'password': password}
response = self._login(props)
if response.status_code in [301, 302, 307]:
# reset self.url to the new URL
self.url = response.headers['Location'].replace(
'/users/login', '')
# re-try login if redirected
response = self._login(props)
if response.status_code == 200:
self.token = response.headers["Token"]
self.load_initial_data()
user = json.loads(response.text)
return user
else:
response.raise_for_status()
def _login(self, props):
return requests.post(
self.url + '/users/login',
data=json.dumps(props),
verify=self.ssl_verify,
allow_redirects=False)
def load_initial_data(self):
self.teams = self.get('/users/me/teams')
if len(self.teams) == 0:
raise AssertionError(
'User account of this bot does not join any team yet.')
self.default_team_id = self.teams[0]['id']
self.teams_channels_ids = {}
for team in self.teams:
self.teams_channels_ids[team['id']] = []
# get all channels belonging to each team
for channel in self.get_channels(team['id']):
self.teams_channels_ids[team['id']].append(channel['id'])
def me(self):
return self.get('/users/me')
def post(self, request, data):
return json.loads(requests.post(
self.url + request,
headers=self._get_headers(),
data=json.dumps(data),
verify=self.ssl_verify
).text)
def update_post(self, message_id, user_id, channel_id,
message, files=None, pid=""):
return self.post(
'/posts/%s' % message_id,
{
'message': message,
})
def user(self, user_id):
return self.get_user_info(user_id)
def upload_file(self, file, channel_id):
files = {
'files': file,
'channel_id': (None, channel_id)
}
return json.loads(requests.post(
self.url + '/files',
headers=self._get_headers(),
files=files,
verify=self.ssl_verify
).text)
class MattermostClient(object):
def __init__(self, url, team, email, password, ssl_verify=True,
token=None, ws_origin=None):
self.users = {}
self.channels = {}
self.mentions = {}
self.api = MattermostAPI(url, ssl_verify, token)
self.user = None
self.websocket = None
self.email = None
self.team = team
self.email = email
self.password = password
self.ws_origin = ws_origin
if token:
self.user = self.api.me()
else:
self.login(team, email, password)
def login(self, team, email, password):
self.email = email
self.user = self.api.login(team, email, password)
return self.user
def react_msg(self, post_id, emoji_name):
return self.api.create_reaction(self.user["id"],
post_id, emoji_name)
def channel_msg(self, channel, message, files=None, pid="", props={}):
c_id = self.channels.get(channel, {}).get("id") or channel
return self.api.create_post(self.user["id"], c_id, "{}".format(message),
files, pid, props=props)
def update_msg(self, message_id, channel, message, pid=""):
c_id = self.channels.get(channel, {}).get("id") or channel
return self.api.update_post(message_id, self.user["id"],
c_id, message, pid=pid)
def connect_websocket(self):
host = self.api.url.replace('http', 'ws').replace('https', 'wss')
url = host + '/websocket'
self._connect_websocket(url, cookie_name='MMAUTHTOKEN')
return self.websocket.getstatus() == 101
def _connect_websocket(self, url, cookie_name):
self.websocket = websocket.create_connection(
url, header=["Cookie: %s=%s" % (cookie_name, self.api.token)],
origin=self.ws_origin,
sslopt={
"cert_reqs": ssl.CERT_REQUIRED if self.api.ssl_verify
else ssl.CERT_NONE})
def messages(self, ignore_own_msg=False, filter_actions=None):
filter_actions = filter_actions or []
if not self.connect_websocket():
return
while True:
try:
data = self.websocket.recv()
except websocket._exceptions.WebSocketException:
if not self.connect_websocket():
raise
continue
if data:
try:
post = json.loads(data)
event_action = post.get('event')
if event_action not in filter_actions:
continue
if event_action == 'posted':
if post.get('data', {}).get('post'):
dp = json.loads(post['data']['post'])
if ignore_own_msg is True and dp.get("user_id"):
if self.user["id"] == dp["user_id"]:
continue
yield post
elif event_action in ['added_to_team', 'leave_team',
'user_added', 'user_removed']:
self.api.load_initial_data() # reload teams & channels
except ValueError:
pass
def ping(self):
try:
self.websocket.ping()
except socket.error:
logger.error('\n'.join([
'socket.error while pinging the mattermost server',
'possible causes: expired cookie or broken socket pipe'
]))
if not self.connect_websocket(): # try to re-connect
logger.info('reconnecting websocket ... failed')
else:
logger.info('reconnecting websocket ... succeeded')
|
py | 1a316314458a38a967e4635ef1a0e4b7c94f20ac | '''
This module is for DiffChecker class.
'''
import sys
import os
import logging
from importlib import reload
import pickle
import pandas as pd
import numpy as np
sys.path.append('../')
from mlqa import checkers as ch
class DiffChecker():
'''Integrated QA performer on pd.DataFrame with logging functionality.
It only works in numerical columns.
Args:
qa_level (str): quick set for QA level, can be one of ['loose', 'mid', 'strict']
logger (str or logging.Logger): 'print' for print only, every other
str creates a file for logging. using external logging.Logger object
is highly recommended, i.e. logger=<mylogger>.
qa_log_level (int): qa message logging level
log_info (bool): `True` if method calls or arguments also need to be
logged
Notes:
Although `DiffChecker <identifiers.html#identifiers.DiffChecker>`_ is
able to create a `Logger <https://docs.python.org/3/library/logging.html#logging.Logger>`_
object by just passing a file name (i.e. `logger='mylog.log'`), creating
the `Logger <https://docs.python.org/3/library/logging.html#logging.Logger>`_
object externally then passing accordingly (i.e. `logger=<mylogger>`)
is highly recommended.
Example:
Basic usage:
>>> dc = DiffChecker()
>>> dc.fit(pd.DataFrame({'mean_col':[1, 2]*50, 'na_col':[None]*50+[1]*50}))
>>> dc.check(pd.DataFrame({'mean_col':[.99, 2.1]*50, 'na_col':[None]*70+[1]*30}))
True
>>> dc.set_threshold(0.1)
>>> dc.check(pd.DataFrame({'mean_col':[.99, 2.1]*50, 'na_col':[None]*70+[1]*30}))
False
Quick set for `qa_level`:
>>> dc = DiffChecker()
>>> dc.threshold
0.5
>>> dc = DiffChecker(qa_level='mid')
>>> dc.threshold
0.2
>>> dc = DiffChecker(qa_level='strict')
>>> dc.threshold
0.1
Logger can also be initiated:
>>> dc = DiffChecker(logger='mylog.log')
>>> dc.fit(pd.DataFrame({'mean_col':[1, 2]*50, 'na_col':[None]*50+[1]*50}))
>>> dc.set_threshold(0.1)
>>> dc.check(pd.DataFrame({'mean_col':[1, 1.5]*50, 'na_col':[None]*70+[1]*30}))
False
'''
stats = []
threshold = 0.0
threshold_df = pd.DataFrame()
df_fit_stats = pd.DataFrame()
def __init__(
self,
qa_level='loose',
logger=None,
qa_log_level=None,
log_info=False
):
# Class logger reloads logging module in each call not to create
# conflict, this is okay as long as this is the only logger in the
# environment. Having external logger is highly recommended in all
# other cases.
if logger == 'print':
logging.shutdown()
reload(logging)
logging.basicConfig(
format='%(asctime)-15s %(message)s',
level='DEBUG')
self.logger = logging.getLogger('DiffCheckerLogIdToPrint')
elif isinstance(logger, str):
logging.shutdown()
reload(logging)
handler = logging.FileHandler(logger, mode='w+')
handler.setFormatter(logging.Formatter(
fmt='%(levelname)s|%(asctime)s|%(message)s'))
self.logger = logging.getLogger('DiffCheckerLogIdToDump')
self.logger.setLevel(logging.DEBUG)
self.logger.addHandler(handler)
else:
# if external logger provided
self.logger = logger
self.log_level = qa_log_level or 30
self.log_info = log_info
qa_levels = {
'loose':{
'stats':['mean', ch.na_rate],
'threshold':.5
},
'mid':{
'stats':['mean', 'std', ch.na_rate],
'threshold':.2
},
'strict':{
'stats':['mean', 'std', 'count', 'min', 'max', ch.na_rate],
'threshold':.1
}
}
if qa_level not in qa_levels.keys():
raise ValueError('`qa_level` not right, choose one of {}'\
.format(qa_levels.keys()))
self.set_stats(qa_levels[qa_level]['stats'])
self.set_threshold(qa_levels[qa_level]['threshold'])
def set_stats(self, funcs):
'''Sets statistic functions list to check by.
Args:
funcs (list): list of functions and/or function names,
e.g. [np.sum, 'mean']
See Also:
`add_stat <#identifiers.DiffChecker.add_stat>`_: just to add one
'''
if not self.df_fit_stats.empty:
raise ValueError('self.stats cannot be altered after `fit()` call')
if not isinstance(funcs, list):
raise TypeError('`funcs` must be a list')
self._method_init_logger(locals())
self.stats = funcs
def add_stat(self, func):
'''Appends a statistic function into the existing list (i.e. `stats <#identifiers.DiffChecker.stats>`_).
Args:
func (func): function name (e.g. np.sum or 'mean')
See Also:
`set_stats <#identifiers.DiffChecker.set_stats>`_: to reset all
'''
if not self.df_fit_stats.empty:
raise ValueError('self.stats cannot be altered after `fit()` call')
if not (isinstance(func, str) or callable(func)):
raise TypeError('`func` must be str or callable')
if func in self.stats:
raise ValueError('`func` is already in `self.stats`')
self._method_init_logger(locals())
self.stats.append(func)
def set_threshold(self, threshold):
'''Sets threshold for statistic-column pairs.
Args:
threshold (float or dict): can be used to set for all or column
statistic pairs.
Example:
>>> dc = DiffChecker()
>>> dc.set_stats(['mean', 'max'])
>>> dc.set_threshold(0.1) # to reset all thresholds
>>> print(dc.threshold)
0.1
>>> dc.fit(pd.DataFrame({'col1':[1, 2, 3, 4], 'col2':[0]*4}))
>>> dc.set_threshold({'col1':0.2, 'col2':0.1}) # to set in column level
>>> print(dc.threshold_df)
col1 col2
mean 0.2 0.1
max 0.2 0.1
>>> dc.set_threshold({'col1':{'mean':0.3}}) # to set in column-stat level
>>> print(dc.threshold_df)
col1 col2
mean 0.3 0.1
max 0.2 0.1
'''
self._method_init_logger(locals())
if isinstance(threshold, dict):
if self.df_fit_stats.empty:
raise ValueError('call `fit()` first for column level threshold')
for col, v1 in threshold.items():
if col not in self.df_fit_stats.columns:
raise ValueError('{} not found in fitted DataFrame'\
.format(col))
if isinstance(v1, dict):
for stat, v2 in v1.items():
if stat not in self.df_fit_stats.index:
raise ValueError(
"'{0}' not set as stat, available stats are {1}"\
.format(stat, self.df_fit_stats.index.tolist()))
th = float(v2)
assert th >= 0
self.threshold_df.loc[stat, col] = th
else:
th = float(v1)
assert th >= 0
self.threshold_df.loc[:, col] = th
else:
th = float(threshold)
assert th >= 0
self.threshold = th
def fit(self, df):
'''Fits given `df`.
Based on given `df` and `stats <#identifiers.DiffChecker.stats>`_ attribute, this method constructs
`df_fit_stats <#identifiers.DiffChecker.df_fit_stats>`_ attribute to store column statistics. This is later to
be used by `check <#identifiers.DiffChecker.check>`_ method. Only works
in numerical columns.
Args:
df (pd.DataFrame): data to be fit
Example:
>>> dc = DiffChecker()
>>> dc.set_stats(['mean', 'max'])
>>> dc.fit(pd.DataFrame({'col1':[1, 2, 3, 4], 'col2':[0]*4}))
>>> print(dc.df_fit_stats)
col1 col2
mean 2.5 0.0
max 4.0 0.0
'''
assert isinstance(self.stats, list) and len(self.stats) >= 1
if not isinstance(df, pd.DataFrame):
raise TypeError('`df` must be a pd.DataFrame')
self._method_init_logger(locals())
self.df_fit_stats = pd.DataFrame()
for col in df.columns:
if pd.api.types.is_numeric_dtype(df[col]):
for stat in self.stats:
if isinstance(stat, str):
stat_name = stat
else:
stat_name = stat.__name__
self.df_fit_stats.loc[stat_name, col] = df[col].agg(stat)
self.threshold_df = self.df_fit_stats.copy()
self.threshold_df.loc[:, :] = np.NaN
def check(self, df_to_check, columns=None, columns_to_exclude=None):
'''Checks given `df_to_check` based on fitted `df` stats.
For each column stat pairs, it checks if stat is in given threshold by
utilizing `qa_array_statistics <checkers.html#checkers.qa_array_statistics>`_.
If any stat qa fails, returns `False`, `True otherwise`.
Args:
df_to_check (pd.DataFrame): data to check
columns (None or list): if given, only these columns will be
considered for qa
columns_to_exclude (None or list): columns to exclude from qa
Returns:
bool: is QA passed or not
Example:
>>> dc = DiffChecker()
>>> dc.set_threshold(0.2)
>>> dc.set_stats(['mean', 'max', np.sum])
>>> dc.fit(pd.DataFrame({'col1':[1, 2, 3, 4], 'col2':[1]*4}))
>>> dc.check(pd.DataFrame({'col1':[1, 2, 3, 4], 'col2':[0]*4}))
False
>>> dc.check(pd.DataFrame({'col1':[1, 2.1, 3.2, 4.2], 'col2':[1.1]*4}))
True
'''
assert isinstance(self.stats, list) and len(self.stats) >= 1
if not isinstance(df_to_check, pd.DataFrame):
raise TypeError('`df_to_check` must be a pd.DataFrame')
if columns is not None and columns_to_exclude is not None:
raise ValueError('only one must be given, '
'`columns` or `columns_to_exclude`')
if columns is not None:
if not isinstance(columns, list):
raise TypeError('`columns` must be a list')
if columns_to_exclude is not None:
if not isinstance(columns_to_exclude, list):
raise TypeError('`columns_to_exclude` must be a list')
self._method_init_logger(locals())
cols_to_check = self.df_fit_stats.columns.tolist()
if columns:
cols_to_check = list(set(cols_to_check) & set(columns))
if columns_to_exclude:
cols_to_check = [c for c in cols_to_check if c not \
in columns_to_exclude]
qa_results = []
for col in cols_to_check:
for stat in self.stats:
if isinstance(stat, str):
stat_name = stat
else:
stat_name = stat.__name__
th = self.threshold_df.loc[stat_name, col]
th = self.threshold if pd.isna(th) else th
val = self.df_fit_stats.loc[stat_name, col]
tol = abs(val)*th
ll, ul = val-tol, val+tol
result = ch.qa_array_statistics(
df_to_check[col],
{stat:[ll, ul]},
logger=self.logger,
log_level=self.log_level,
name=col)
qa_results.append(result)
return all(qa_results)
def to_pickle(self, path='DiffChecker.pkl'):
'''Pickle (serialize) object to a file.
Args:
path (str): file path where the pickled object will be stored
Example:
To save a `*.pkl` file:
>>> dc1 = DiffChecker()
>>> dc1.fit(pd.DataFrame({'col1':[1, 2, 3, 4], 'col2':[0]*4}))
>>> dc1.to_pickle(path='DiffChecker.pkl')
To load the same object later:
>>> import pickle
>>> pkl_file = open('DiffChecker.pkl', 'rb')
>>> dc2 = pickle.load(pkl_file)
>>> pkl_file.close()
>>> os.remove('DiffChecker.pkl')
'''
self._method_init_logger(locals())
self.logger = None
output = open(path, 'wb')
pickle.dump(self, output, -1)
output.close()
def _method_init_logger(self, args, exclude=['self']):
'''Logs method initiation with given arguments.
Args:
args (dict): local arguments, i.e. `locals()`
exclude (list): arguments to exclude, e.g. `self`
'''
if self.logger and self.log_info:
method_name = sys._getframe(1).f_code.co_name
self.logger.info("{} initiated.".format(method_name))
for k, v in args.items():
if k not in exclude:
self.logger.info(method_name+' locals: '+k+'='+str(v)[:100])
if __name__ == "__main__":
import doctest
doctest.testmod()
|
py | 1a316333940ef90c618d631e5324c322853f9d2c | from pymoo.algorithms.nsga2 import NSGA2
from pymoo.optimize import minimize
from pymoo.problems.multi.srn import SRN
from pymoo.visualization.scatter import Scatter
problem = SRN()
algorithm = NSGA2(pop_size=100)
res = minimize(problem,
algorithm,
# ('n_gen', 1000),
seed=1,
verbose=True)
plot = Scatter()
plot.add(problem.pareto_set(), plot_type="line", color="black", alpha=0.7)
plot.add(res.X, color="red")
plot.show()
plot = Scatter()
plot.add(problem.pareto_front(), plot_type="line", color="black", alpha=0.7)
plot.add(res.F, color="red")
plot.show()
|
py | 1a3163938d29cc8641e415f0219ecbcd4ea79c84 | import sys
sys.path += ['../utils']
import csv
from tqdm import tqdm
import collections
import gzip
import pickle
import numpy as np
import faiss
import os
import pytrec_eval
import json
from msmarco_eval import quality_checks_qids, compute_metrics, load_reference
# location for dumpped query and passage/document embeddings which is output_dir
#checkpoint_path ='/home/dihe/cudnn_file/recommender_shuqi/MIND_data/raw_data/exp_12_02_04/ann_data/'
# checkpoint = 150000 # embedding from which checkpoint(ie: 200000)
# data_type = 0 # 0 for document, 1 for passage
# test_set = 1 # 0 for dev_set, 1 for eval_set
# raw_data_dir = '/home/dihe/cudnn_file/recommender_shuqi/MIND_data/raw_data/'
# processed_data_dir = '/home/dihe/cudnn_file/recommender_shuqi/MIND_data/raw_data/ann_data_roberta-base-fast-doc_512'
# checkpoint_path ='/home/dihe/Projects/data/raw_data/exp_12_02_04/ann_data/'
# raw_data_dir = '/home/dihe/Projects/data/raw_data/'
# processed_data_dir = '/home/dihe/Projects/data/raw_data/ann_data_roberta-base-fast-doc_512'
# checkpoint = 0
# data_type = 0
# test_set = 1
# checkpoint_path ='/home/dihe/Projects/data/raw_data/test_roberta_decode_doc/ann_data/'
# raw_data_dir = '/home/dihe/Projects/data/raw_data/'
# processed_data_dir = '/home/dihe/Projects/data/raw_data/ann_data_roberta-base-fast-doc_512'
#--------------------------------------------------------------------------------------
# checkpoint = 0
# data_type = 0
# test_set = 0
# checkpoint_path ='/home/dihe/cudnn_file/recommender_shuqi/MIND_data/raw_data/exp_12_19_01/ann_data2/'
# raw_data_dir = '/home/dihe/Projects/data/raw_data/'
# processed_data_dir = '/home/dihe/Projects/data/raw_data/ann_data_roberta-base-fast-docdev_512'
# checkpoint = 0
# data_type = 0
# test_set =0
# checkpoint_path ='/home/dihe/cudnn_file/recommender_shuqi/MIND_data/raw_data/exp_12_23_02/ann_data400000/'
# raw_data_dir = '/home/dihe/Projects/data/raw_data/'
# processed_data_dir = '/home/dihe/Projects/data/raw_data/ann_data_roberta-base-fast-docdev_512'
# checkpoint = 0
# data_type = 0
# test_set = 0
# checkpoint_path ='/home/dihe/cudnn_file/recommender_shuqi/MIND_data/raw_data/exp_12_23_02/ann_data4/'
# raw_data_dir = '/home/dihe/Projects/data/raw_data/'
# processed_data_dir = '/home/dihe/Projects/data/raw_data/ann_data_roberta-base-fast-docdev_512'
# checkpoint = 0
# data_type = 0
# test_set = 1
# checkpoint_path ='/home/dihe/cudnn_file/recommender_shuqi/MIND_data/raw_data/exp_01_05_09/ann_data820000/'
# raw_data_dir = '/home/dihe/Projects/data/raw_data/'
# processed_data_dir = '/home/dihe/Projects/data/raw_data/ann_data_roberta-base-fast-docdev2_512'
# # processed_data_dir2 = '/home/dihe/Projects/data/raw_data/ann_data_roberta-base-fast-docdev2_512'
# # checkpoint_path2 ='/home/dihe/cudnn_file/recommender_shuqi/MIND_data/raw_data/exp_01_05_09/ann_data820000/'
# processed_data_dir2 = '/home/dihe/Projects/data/raw_data/ann_data_roberta-base-fast-doceval_512'
# checkpoint_path2 ='/home/dihe/Projects/data/raw_data/eval_exp_01_05_09_820000/ann_data/'
# query_emb_num=4
checkpoint = 0
data_type = 0
test_set = 1
checkpoint_path ='/home/dihe/cudnn_file/recommender_shuqi/MIND_data/raw_data/exp_01_05_09/ann_data820000/'
raw_data_dir = '/home/dihe/Projects/data/raw_data/'
processed_data_dir = '/home/dihe/Projects/data/raw_data/ann_data_roberta-base-fast-docdev2_512'
# processed_data_dir2 = '/home/dihe/Projects/data/raw_data/ann_data_roberta-base-fast-docdev2_512'
# checkpoint_path2 ='/home/dihe/cudnn_file/recommender_shuqi/MIND_data/raw_data/exp_01_05_09/ann_data820000/'
processed_data_dir2 = '/home/dihe/Projects/data/raw_data/ann_data_roberta-base-fast-doceval_512'
checkpoint_path2 ='/home/dihe/Projects/data/raw_data/eval_exp_01_05_09_820000/ann_data/'
query_emb_num=4
# processed_data_dir2 = '/home/dihe/Projects/data/raw_data/ann_data_roberta-base-fast-doceval_dev_512'
# checkpoint_path2 ='/home/dihe/Projects/data/raw_data/eval_exp_01_05_09_820000_dev/ann_data/'
# query_emb_num=4
# checkpoint = 0
# data_type = 1
# test_set = 0
# checkpoint_path ='/home/dihe/cudnn_file/recommender_shuqi/MIND_data/raw_data/exp_12_21_05/ann_data2/'
# raw_data_dir = '/home/dihe/Projects/data/raw_data/'
# processed_data_dir = '/home/dihe/Projects/data/raw_data/ann_data_roberta-base-fast_512'
# checkpoint = 0
# data_type = 1
# test_set = 0
# checkpoint_path ='/home/dihe/cudnn_file/recommender_shuqi/MIND_data/raw_data/exp_02_03_02/ann_data/'
# raw_data_dir = '/home/dihe/Projects/data/raw_data/'
# processed_data_dir = '/home/dihe/Projects/data/raw_data/ann_data_roberta-base-fast-passsmall5_512'
# raw_data_dir = '/home/dihe/Projects/data/raw_data/'
# processed_data_dir2 = '/home/dihe/Projects/data/raw_data/ann_data_roberta-base-fast-passsmall5_512'
# checkpoint_path2 ='/home/dihe/cudnn_file/recommender_shuqi/MIND_data/raw_data/exp_02_03_02/ann_data/'
# checkpoint = 0
# data_type = 0
# test_set = 0
# checkpoint_path ='/home/dihe/cudnn_file/recommender_shuqi/MIND_data/raw_data/exp_12_23_02/ann_data400000/'
# raw_data_dir = '/home/dihe/Projects/data/raw_data/'
# processed_data_dir = '/home/dihe/Projects/data/raw_data/ann_data_roberta-base-fast-docdev_512'
# processed_data_dir2 = '/home/dihe/Projects/data/raw_data/ann_data_roberta-base-fast-doceval_dev_512'
# checkpoint_path2 ='/home/dihe/Projects/data/raw_data/eval_exp_12_23_02_400000_dev/ann_data/'
#820
# checkpoint = 0
# data_type = 1
# test_set = 0
# checkpoint_path ='/home/dihe/cudnn_file/recommender_shuqi/MIND_data/raw_data/exp_12_23_08/ann_data2/'
# raw_data_dir = '/home/dihe/Projects/data/raw_data/'
# processed_data_dir = '/home/dihe/Projects/data/raw_data/ann_data_roberta-base-fast_512'
# checkpoint = 0
# data_type = 1
# test_set = 0
# checkpoint_path ='/home/dihe/cudnn_file/recommender_shuqi/MIND_data/raw_data/exp_12_20_03/ann_data2/'
# raw_data_dir = '/home/dihe/Projects/data/raw_data/'
# processed_data_dir = '/home/dihe/Projects/data/raw_data/ann_data_roberta-base-fast_512'
# checkpoint = 0
# data_type = 1
# test_set = 0
# checkpoint_path ='/home/dihe/cudnn_file/recommender_shuqi/exp_12_02_14_01/save/ann_data/'
# raw_data_dir = '/home/dihe/Projects/data/raw_data/'
# processed_data_dir = '/home/dihe/Projects/data/raw_data/ann_data_roberta-base-fast_512'
# checkpoint = 0
# data_type = 1
# test_set = 0
# checkpoint_path ='/home/dihe/cudnn_file/recommender_shuqi/exp_01_07_09/save/ann_data/'
# raw_data_dir = '/home/dihe/Projects/data/raw_data/'
# processed_data_dir = '/home/dihe/Projects/data/raw_data/ann_data_roberta-base-fast_512'
# checkpoint = 0
# data_type = 1
# test_set = 0
# checkpoint_path ='/home/dihe/cudnn_file/recommender_shuqi/MIND_data/raw_data/exp_12_23_08/ann_data/'
# raw_data_dir = '/home/dihe/Projects/data/raw_data/'
# processed_data_dir = '/home/dihe/Projects/data/raw_data/ann_data_roberta-base-fast_512'
# checkpoint = 0
# data_type = 1
# test_set = 1
# checkpoint_path ='/home/dihe/cudnn_file/recommender_shuqi/MIND_data/raw_data/exp_11_11_01/ann_data3/'
# raw_data_dir = '/home/dihe/Projects/data/raw_data/'
# processed_data_dir = '/home/dihe/Projects/data/raw_data/ann_data_roberta-base-fast-passtest_512'
#--------------------------------------------------------------------------------------------
# checkpoint = 180000
# data_type = 1
# test_set = 1
# checkpoint_path ='/home/dihe/cudnn_file/recommender_shuqi/MIND_data/raw_data/exp_12_02_02/ann_data/'
# raw_data_dir = '/home/dihe/Projects/data/raw_data/'
# processed_data_dir = '/home/dihe/Projects/data/raw_data/ann_data_roberta-base-fast_512'
# checkpoint = 210000
# data_type = 1
# test_set = 1
# checkpoint_path ='/home/dihe/cudnn_file/recommender_shuqi/MIND_data/raw_data/exp_12_02_01/ann_data/'
# raw_data_dir = '/home/dihe/Projects/data/raw_data/'
# processed_data_dir = '/home/dihe/Projects/data/raw_data/ann_data_roberta-base-fast_512'
# checkpoint = 0
# data_type = 1
# test_set = 0
# checkpoint_path ='/home/dihe/cudnn_file/recommender_shuqi/MIND_data/raw_data/exp_12_11_03/ann_data2/'
# raw_data_dir = '/home/dihe/Projects/data/raw_data/'
# processed_data_dir = '/home/dihe/Projects/data/raw_data/ann_data_roberta-base-fast_512'
if data_type == 0:
topN = 100
else:
topN = 1000
# dev_query_positive_id = {}
# query_positive_id_path = os.path.join(processed_data_dir, "dev-qrel.tsv")
# with open(query_positive_id_path, 'r', encoding='utf8') as f:
# tsvreader = csv.reader(f, delimiter="\t")
# for [topicid, docid, rel] in tsvreader:
# topicid = int(topicid)
# docid = int(docid)
# if topicid not in dev_query_positive_id:
# dev_query_positive_id[topicid] = {}
# dev_query_positive_id[topicid][docid] = int(rel)
qidmap_path = processed_data_dir2+"/qid2offset.pickle"
pidmap_path = processed_data_dir+"/pid2offset.pickle"
if data_type == 0:
if test_set == 1:
query_path = raw_data_dir+"/docleaderboard-queries.tsv"
passage_path = raw_data_dir+"/docleaderboard-top100.tsv"
else:
query_path = raw_data_dir+"/msmarco-docdev-queries.tsv"
passage_path = raw_data_dir+"/msmarco-docdev-top100"
else:
if test_set == 1:
query_path = raw_data_dir+"/msmarco-test2019-queries.tsv"
passage_path = raw_data_dir+"/msmarco-passagetest2019-top1000.tsv"
else:
query_path = raw_data_dir+"/queries.dev.small.tsv"
passage_path = raw_data_dir+"/top1000.dev.tsv"
with open(qidmap_path, 'rb') as handle:
qidmap = pickle.load(handle)
with open(pidmap_path, 'rb') as handle:
pidmap = pickle.load(handle)
qidmap_re={}
for item in qidmap:
assert qidmap[item] not in qidmap_re
qidmap_re[qidmap[item]]=item
pidmap_re={}
for item in pidmap:
assert pidmap[item] not in pidmap_re
pidmap_re[pidmap[item]]='D'+str(item)
qset = set()
with gzip.open(query_path, 'rt', encoding='utf-8') if query_path[-2:] == "gz" else open(query_path, 'rt', encoding='utf-8') as f:
tsvreader = csv.reader(f, delimiter="\t")
for [qid, query] in tsvreader:
qset.add(qid)
bm25 = collections.defaultdict(set)
with gzip.open(passage_path, 'rt', encoding='utf-8') if passage_path[-2:] == "gz" else open(passage_path, 'rt', encoding='utf-8') as f:
for line in tqdm(f):
if data_type == 0:
[qid, Q0, pid, rank, score, runstring] = line.split(' ')
pid = pid[1:]
else:
[qid, pid, query, passage] = line.split("\t")
#print('???',qid)
if qid in qset and int(qid) in qidmap:
bm25[qidmap[int(qid)]].add(pidmap[int(pid)])
# else:
# print('???',qid,qid in qset)
#assert 1==0
print("number of queries with " +str(topN) + " BM25 passages:", len(bm25))
def convert_to_string_id(result_dict):
string_id_dict = {}
# format [string, dict[string, val]]
for k, v in result_dict.items():
_temp_v = {}
for inner_k, inner_v in v.items():
_temp_v[str(inner_k)] = inner_v
string_id_dict[str(k)] = _temp_v
return string_id_dict
def EvalDevQuery(query_embedding2id, passage_embedding2id, qidmap_re,pidmap_re, I_nearest_neighbor,topN):
prediction = {} #[qid][docid] = docscore, here we use -rank as score, so the higher the rank (1 > 2), the higher the score (-1 > -2)
w=open('result_eval.txt','w')
total = 0
labeled = 0
Atotal = 0
Alabeled = 0
qids_to_ranked_candidate_passages = {}
for query_idx in range(len(I_nearest_neighbor)):
seen_pid = set()
query_id = qidmap_re[query_embedding2id[query_idx]]
prediction[query_id] = {}
top_ann_pid = I_nearest_neighbor[query_idx].copy()
selected_ann_idx = top_ann_pid[:topN]
rank = 0
if query_id in qids_to_ranked_candidate_passages:
pass
else:
# By default, all PIDs in the list of 1000 are 0. Only override those that are given
tmp = [0] * 1000
qids_to_ranked_candidate_passages[query_id] = tmp
for idx in selected_ann_idx:
pred_pid = pidmap_re[passage_embedding2id[idx]]
if not pred_pid in seen_pid:
# this check handles multiple vector per document
qids_to_ranked_candidate_passages[query_id][rank]=pred_pid
w.write(str(query_id)+'\t'+str(pred_pid)+'\t'+str(rank+1)+'\n')
# Atotal += 1
# if pred_pid not in dev_query_positive_id[query_id]:
# Alabeled += 1
# if rank < 10:
# total += 1
# if pred_pid not in dev_query_positive_id[query_id]:
# labeled += 1
rank += 1
#prediction[query_id][pred_pid] = -rank
seen_pid.add(pred_pid)
w.close()
dev_query_embedding = []
dev_query_embedding2id = []
passage_embedding = []
passage_embedding2id = []
for i in range(query_emb_num):
#try:
print('???',checkpoint_path2 + "dev_query_"+str(checkpoint)+"__emb_p__data_obj_"+str(i)+".pb")
with open(checkpoint_path2 + "dev_query_"+str(checkpoint)+"__emb_p__data_obj_"+str(i)+".pb", 'rb') as handle:
dev_query_embedding.append(pickle.load(handle))
print('ok1???')
with open(checkpoint_path2 + "dev_query_"+str(checkpoint)+"__embid_p__data_obj_"+str(i)+".pb", 'rb') as handle:
dev_query_embedding2id.append(pickle.load(handle))
print('ok???',2)
for i in range(8):
#try:
# print('???',checkpoint_path2 + "dev_query_"+str(checkpoint)+"__emb_p__data_obj_"+str(i)+".pb")
# with open(checkpoint_path2 + "dev_query_"+str(checkpoint)+"__emb_p__data_obj_"+str(i)+".pb", 'rb') as handle:
# dev_query_embedding.append(pickle.load(handle))
# print('ok1???')
# with open(checkpoint_path2 + "dev_query_"+str(checkpoint)+"__embid_p__data_obj_"+str(i)+".pb", 'rb') as handle:
# dev_query_embedding2id.append(pickle.load(handle))
# print('ok???',2)
with open(checkpoint_path + "passage_"+str(checkpoint)+"__emb_p__data_obj_"+str(i)+".pb", 'rb') as handle:
passage_embedding.append(pickle.load(handle))
print('ok???',3)
with open(checkpoint_path + "passage_"+str(checkpoint)+"__embid_p__data_obj_"+str(i)+".pb", 'rb') as handle:
passage_embedding2id.append(pickle.load(handle))
print('ok???',4)
# except:
# break
if (not dev_query_embedding) or (not dev_query_embedding2id) or (not passage_embedding) or not (passage_embedding2id):
print("No data found for checkpoint: ",checkpoint)
dev_query_embedding = np.concatenate(dev_query_embedding, axis=0)
dev_query_embedding2id = np.concatenate(dev_query_embedding2id, axis=0)
passage_embedding = np.concatenate(passage_embedding, axis=0)
passage_embedding2id = np.concatenate(passage_embedding2id, axis=0)
#full ranking
dim = passage_embedding.shape[1]
faiss.omp_set_num_threads(16)
cpu_index = faiss.IndexFlatIP(dim)
cpu_index.add(passage_embedding)
_, dev_I = cpu_index.search(dev_query_embedding, topN)
EvalDevQuery(dev_query_embedding2id, passage_embedding2id, qidmap_re,pidmap_re , dev_I, topN)
|
py | 1a3163a14e6f20b4b315cd22499a2545f6628879 | import binascii
import hashlib
import hmac
import json
import time
from datetime import datetime, timedelta
from itertools import chain
import pytz
from django.contrib.auth.models import User
from django.db.models import Prefetch
from django.forms import ChoiceField, Form, IntegerField, ModelForm, Select
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
from django.utils.translation import ungettext_lazy
from c3nav.control.models import UserPermissions, UserSpaceAccess
from c3nav.mapdata.forms import I18nModelFormMixin
from c3nav.mapdata.models import MapUpdate, Space
from c3nav.mapdata.models.access import (AccessPermission, AccessPermissionToken, AccessPermissionTokenItem,
AccessRestriction, AccessRestrictionGroup)
from c3nav.site.models import Announcement
class UserPermissionsForm(ModelForm):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['review_group_reports'].label_from_instance = lambda obj: obj.title
class Meta:
model = UserPermissions
exclude = ('user', 'max_changeset_changes', 'api_secret')
class AccessPermissionForm(Form):
def __init__(self, request=None, author=None, expire_date=None, *args, **kwargs):
super().__init__(*args, **kwargs)
# remember author if this form is saved
self.author = author or request.user
author_permissions = request.user_permissions if request else author.permissions
self.expire_date = expire_date
# determine which access permissions the author can grant
self.author_access_permissions = AccessPermission.get_for_request_with_expire_date(request, can_grant=True)
access_restrictions = AccessRestriction.objects.filter(
pk__in=self.author_access_permissions.keys()
)
self.access_restrictions = {
access_restriction.pk: access_restriction
for access_restriction in access_restrictions
}
access_restrictions_ids = set(self.access_restrictions.keys())
self.access_restriction_choices = {
'all': self.access_restrictions.values(),
**{str(pk): (access_restriction, ) for pk, access_restriction in self.access_restrictions.items()}
}
# get access permission groups
groups = AccessRestrictionGroup.qs_for_request(request).prefetch_related(
Prefetch('accessrestrictions', AccessRestriction.objects.only('pk'))
)
group_contents = {
group.pk: set(r.pk for r in group.accessrestrictions.all())
for group in groups
}
group_contents = {
pk: restrictions for pk, restrictions in group_contents.items()
if not (restrictions - access_restrictions_ids)
}
self.access_restriction_choices.update({
('g%d' % pk): tuple(
self.access_restrictions[restriction] for restriction in restrictions
) for pk, restrictions in group_contents.items()
})
# construct choice field for access permissions
choices = [('', _('choose permissions…')),
('all', ungettext_lazy('everything possible (%d permission)',
'everything possible (%d permissions)',
len(access_restrictions)) % len(access_restrictions))]
choices.append((_('Access Permission Groups'), tuple(
('g%d' % group.pk, group.title)
for group in groups
)))
choices.append((_('Access Permissions'), tuple(
(str(pk), access_restriction.title)
for pk, access_restriction in self.access_restrictions.items()
)))
self.fields['access_restrictions'] = ChoiceField(choices=choices, required=True)
# construct choices for the expire field
expire_choices = [
('', _('never')),
]
for minutes in range(15, 60, 15):
expire_choices.append(
(str(minutes), ungettext_lazy('in %d minute', 'in %d minutes', minutes) % minutes))
for hours in chain(range(1, 6), range(6, 24, 6)):
expire_choices.append(
(str(hours*60), ungettext_lazy('in %d hour', 'in %d hours', hours) % hours)
)
expire_choices.insert(
5, (str(90), _('in 1½ hour'))
)
for days in range(1, 14):
expire_choices.append(
(str(days*24*60), ungettext_lazy('in %d day', 'in %d days', days) % days)
)
self.fields['expires'] = ChoiceField(required=False, initial='60', choices=expire_choices)
# if applicable, add field to grant pass on permissions
if author_permissions.grant_all_access:
choices = [('0', '---')]*6 + [('1', _('can pass on'))] + [('0', '---')]*3
self.fields['can_grant'] = ChoiceField(required=False, initial='60', choices=choices)
def clean_access_restrictions(self):
data = self.cleaned_data['access_restrictions']
return self.access_restriction_choices[data]
def clean_expires(self):
data = self.cleaned_data['expires']
if data == '':
return None
return timezone.now()+timedelta(minutes=int(data))
def save(self, user):
self._save_code(self._create_code(), user)
def get_token(self, unique_key=None):
# create an AccessPermissionToken from this form and return it
restrictions = []
default_expire_date = self.expire_date or self.cleaned_data['expires']
for restriction in self.cleaned_data['access_restrictions']:
expire_date = default_expire_date
author_expire_date = self.author_access_permissions.get(restriction.pk)
# make sure that each permission is not granted for a longer time than the author has it
if author_expire_date is not None:
expire_date = author_expire_date if expire_date is None else min(expire_date, author_expire_date)
restrictions.append(AccessPermissionTokenItem(pk=restriction.pk, expire_date=expire_date,
title=restriction.title))
return AccessPermissionToken(author=self.author,
can_grant=self.cleaned_data.get('can_grant', '0') == '1',
restrictions=tuple(restrictions),
unique_key=unique_key)
def get_signed_data(self, key=None):
if not self.author.permissions.api_secret:
raise ValueError('Author has no api secret.')
data = {
'id': self.data['access_restrictions'],
'time': int(time.time()),
'valid_until': int(self.cleaned_data['expires'].strftime('%s')),
'author': self.author.pk,
}
if key is not None:
data['key'] = key
data = json.dumps(data, separators=(',', ':'))
signature = hmac.new(self.author.permissions.api_secret.encode(),
msg=data.encode(), digestmod=hashlib.sha256).digest()
return '%s:%s' % (data, binascii.b2a_base64(signature).strip().decode())
@classmethod
def load_signed_data(cls, signed_data: str):
if ':' not in signed_data:
raise SignedPermissionDataError('Invalid data.')
raw_data, signature = signed_data.rsplit(':', 1)
try:
data = json.loads(raw_data)
except json.JSONDecodeError:
raise SignedPermissionDataError('Invalid JSON.')
try:
restrictions = data.pop('id')
author_id = data.pop('author')
issue_time = data.pop('time')
valid_until = data.pop('valid_until')
unique_key = data.pop('key', None)
except KeyError as e:
raise SignedPermissionDataError('Missing %s.' % str(e))
for unknown_key in data:
raise SignedPermissionDataError('Unknown value: %s' % unknown_key)
try:
issue_time = int(issue_time)
except ValueError:
raise SignedPermissionDataError('Invalid time.')
try:
valid_until = int(valid_until) if valid_until is not None else None
except ValueError:
raise SignedPermissionDataError('Invalid valid_until.')
else:
valid_until = valid_until and datetime.utcfromtimestamp(valid_until).replace(tzinfo=pytz.utc)
try:
author_id = int(author_id)
except ValueError:
raise SignedPermissionDataError('Invalid author.')
if unique_key is not None and not isinstance(unique_key, str):
raise SignedPermissionDataError('key has to be null or a string.')
if issue_time > time.time()+5:
raise SignedPermissionDataError('time cannot be in the future.')
if issue_time < time.time()-60:
raise SignedPermissionDataError('token has expired.')
if unique_key is not None and not (1 <= len(unique_key) <= 32):
raise SignedPermissionDataError('key has to be 1-32 characters')
try:
author = User.objects.select_related('permissions').get(pk=author_id)
except User.DoesNotExist:
raise SignedPermissionDataError('Author does not exist.')
try:
api_secret = author.permissions.api_secret
except AttributeError:
raise SignedPermissionDataError('Author has no API secret.')
verify_signature = binascii.b2a_base64(hmac.new(api_secret.encode(),
msg=raw_data.encode(), digestmod=hashlib.sha256).digest())
print(verify_signature, signature)
if signature != verify_signature.strip().decode():
raise SignedPermissionDataError('Invalid signature.')
form = cls(author=author, expire_date=valid_until, data={
'access_restrictions': str(restrictions),
})
if not form.is_valid():
raise SignedPermissionDataError(' '.join(form.errors))
return form.get_token(unique_key=unique_key)
class UserSpaceAccessForm(ModelForm):
class Meta:
model = UserSpaceAccess
fields = ('space', 'can_edit')
def __init__(self, *args, request=None, **kwargs):
super().__init__(*args, **kwargs)
self.fields['space'].label_from_instance = lambda obj: obj.title
self.fields['space'].queryset = Space.qs_for_request(request).order_by('slug')
choices = [('0', _('no'))] * 6 + [('1', _('yes'))] + [('0', _('no'))] * 3
self.fields['can_edit'].widget = Select(choices=choices)
class SignedPermissionDataError(Exception):
pass
class AnnouncementForm(I18nModelFormMixin, ModelForm):
class Meta:
model = Announcement
fields = ('text', 'active', 'active_until')
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['active_until'].initial = timezone.now()
class MapUpdateFilterForm(Form):
type = ChoiceField(
choices=(('', _('any type')), ) + MapUpdate.TYPES,
required=False
)
geometries_changed = ChoiceField(
choices=(('', _('any')), ('1', _('geometries changed')), ('0', _('no geometries changed'))),
required=False
)
processed = ChoiceField(
choices=(('', _('any')), ('1', _('processed')), ('0', _('not processed'))),
required=False
)
user_id = IntegerField(min_value=1, required=False)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['user_id'].widget.attrs['placeholder'] = _('user id')
class MapUpdateForm(ModelForm):
class Meta:
model = MapUpdate
fields = ('geometries_changed', )
|
py | 1a3163b340adf68f96374a1c46213143dc7fdad8 | from output.models.nist_data.atomic.int_pkg.schema_instance.nistschema_sv_iv_atomic_int_min_exclusive_3_xsd.nistschema_sv_iv_atomic_int_min_exclusive_3 import NistschemaSvIvAtomicIntMinExclusive3
__all__ = [
"NistschemaSvIvAtomicIntMinExclusive3",
]
|
py | 1a31649bd3d3b87ca558d84490ad3e396e723138 | """
MIT License
Copyright (c) 2020 Airbyte
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import operator
from functools import partial, reduce
from json.decoder import JSONDecodeError
from typing import Mapping, Tuple
import requests
from base_python import BaseClient
from requests.auth import HTTPBasicAuth
from requests.exceptions import ConnectionError
class Client(BaseClient):
"""
Jira API Reference: https://developer.atlassian.com/cloud/jira/platform/rest/v3/intro/
"""
API_VERSION = 3
DEFAULT_ITEMS_PER_PAGE = 100
PARAMS = {"maxResults": DEFAULT_ITEMS_PER_PAGE, "startAt": 0}
ENTITIES_MAP = {
"projects": {"url": "/project/search", "func": lambda v: v["values"], "params": PARAMS},
"issues": {"url": "/search", "func": lambda v: v["issues"], "params": PARAMS},
"issue_comments": {
"url": "/search",
"func": lambda v: reduce(operator.iadd, [obj["fields"]["comment"]["comments"] for obj in v["issues"]], []),
"params": {**PARAMS, **{"fields": ["comment"]}},
},
"users": {"url": "/users/search", "func": lambda v: v, "params": PARAMS},
"resolutions": {"url": "/resolution", "func": lambda v: v, "params": {}},
}
def __init__(self, api_token, domain, email):
self.auth = HTTPBasicAuth(email, api_token)
self.base_api_url = f"https://{domain}/rest/api/{self.API_VERSION}"
super().__init__()
def lists(self, name, url, params, func, **kwargs):
while True:
response = requests.get(f"{self.base_api_url}{url}", params=params, auth=self.auth)
data = func(response.json())
yield from data
if name == "resolutions" or len(data) < self.DEFAULT_ITEMS_PER_PAGE:
break
params["startAt"] += self.DEFAULT_ITEMS_PER_PAGE
def _enumerate_methods(self) -> Mapping[str, callable]:
return {entity: partial(self.lists, name=entity, **value) for entity, value in self.ENTITIES_MAP.items()}
def health_check(self) -> Tuple[bool, str]:
alive = True
error_msg = None
try:
next(self.lists(name="resolutions", **self.ENTITIES_MAP["resolutions"]))
except ConnectionError as error:
alive, error_msg = False, str(error)
# If the input domain is incorrect or doesn't exist, then the response would be empty, resulting in a JSONDecodeError
except JSONDecodeError:
alive, error_msg = (
False,
"Unable to connect to the Jira API with the provided credentials. Please make sure the input credentials and environment are correct.",
)
return alive, error_msg
|
py | 1a3164dc51b7b11eb4ca634199a0850e14b12996 | #!/usr/bin/python3
"""Generate a DOT graph showing load module dependencies.
Runs objdump to gather load module dependency relationships.
"""
from collections import defaultdict
import getopt
import locale
import os
import re
import shlex
import subprocess
import sys
import script_utils as u
# Output DOT file
flag_outfile = None
# List of input files collected from command line
flag_input_files = []
input_sonames = {}
# Selection for -r arg (either 32 or 64)
flag_restrict_elf = None
# Objdump cmd, determined on the fly
objdump_cmd = None
# Target or host mode
flag_filemode = "target"
# Complain if input files are not in .../symbols directory
flag_check_in_symbols = True
# Prune out common *.so files
flag_prune = False
# Include backward slice of depth N
flag_backward_slice = 0
# Setting of $ANDROID_BUILD_TOP
abt = ""
# Setting of $ANDROID_PRODUCT_OUT
apo = ""
# Setting of $ANDROID_HOST_OUT
aho = ""
# Load module dependency table. Key is load module name (not path),
# value is dict of dependency names. rdepends is reverse graph
depends = defaultdict(lambda: defaultdict(str))
rdepends = defaultdict(lambda: defaultdict(str))
# Populated with all possible load modules of interest. Key
# is load module path, value is 0 (unvisited) or 1 (visited).
all_loadmodules = {}
# Maps load module base name to dict of paths
base_to_paths = defaultdict(lambda: defaultdict(str))
# Things to omit for -p option
toprune = {"libm.so": 1, "libc.so": 1, "libdl.so": 1, "libc++.so": 1}
# Node colors (key is soname)
nodecolor = {}
def in_symbols_dir(filename):
"""Make sure input file is part of $ANROID_PRODUCT_OUT/symbols."""
if flag_filemode == "host" or not flag_check_in_symbols:
return True
u.verbose(2, "in_symbols_dir(%s)" % filename)
smatch = re.compile(r"^(\S+)\/symbols\/\S+$")
sm = smatch.match(filename)
if sm is None:
u.verbose(2, "/symbols/ match failed for %s" % filename)
return False
pre = sm.group(1)
u.verbose(2, "pre=%s apo=%s abt=%s" % (pre, apo, abt))
if pre == apo:
return True
fp = "%s/%s" % (abt, pre)
return fp == apo
def determine_objdump(filename):
"""Figure out what flavor of object dumper we should use."""
global objdump_cmd
lines = u.docmdlines("file %s" % filename)
matchers = [(re.compile(r".*ELF.+ARM aarch64"),
"aarch64-linux-android-objdump"),
(re.compile(r".*ELF.+ARM"),
"arm-linux-androideabi-objdump"),
(re.compile(r".*ELF.+x86\-64"),
"objdump"),
(re.compile(r".*ELF.+Intel"),
"objdump")]
for l in lines:
for tup in matchers:
m = tup[0]
res = m.match(l)
if res is None:
continue
objdump_cmd = tup[1]
return
u.error("unable to determine objdump flavor to use on %s" % filename)
def run_objdump_cmd(cargs, filename):
"""Run objdump with specified args, returning list of lines."""
if not objdump_cmd:
determine_objdump(filename)
cmd = "%s %s %s" % (objdump_cmd, cargs, filename)
u.verbose(2, "objdump cmd: %s" % cmd)
splargs = shlex.split(cmd)
mypipe = subprocess.Popen(splargs, stdout=subprocess.PIPE)
pout, _ = mypipe.communicate()
if mypipe.returncode != 0:
u.error("command failed (rc=%d): cmd was %s" % (mypipe.returncode, cmd))
encoding = locale.getdefaultlocale()[1]
decoded = pout.decode(encoding)
return decoded.strip().split("\n")
def skip_this_elf(filename, lines, eflav):
"""Return whether we should skip this elf."""
matcher = re.compile(r"^\S+:\s+file format elf(\d\d)\-")
for line in lines:
if not line:
continue
m = matcher.match(line)
if m:
dd = int(m.group(1))
if dd != 32 and dd != 64:
u.error("internal error: bad elf %s flavor %d "
"(line %s)" % (filename, dd, line))
if dd != eflav:
# Not correct flavor
return True
else:
return False
u.error("internal error: could not find file format line")
def examine_deps(filename):
"""Run objdump to collect depends info."""
u.verbose(2, "examine_deps(%s)" % filename)
objdump_args = "-p"
lines = run_objdump_cmd(objdump_args, filename)
if flag_restrict_elf and skip_this_elf(filename, lines, flag_restrict_elf):
u.verbose(1, "skipping file %s, wrong elf flavor" % filename)
return None
bn = os.path.basename(filename)
u.verbose(2, "examining objdump output for %s (%s)" % (bn, filename))
# Pattern we're looking for in the objdump output
matcher = re.compile(r"^\s+(\S+)\s+(\S+)\s*$")
deps = {}
soname = None
# Run through all of the lines:
for line in lines:
if not line:
continue
# Match
m = matcher.match(line)
if m:
which = m.group(1)
if which == "NEEDED":
lm = m.group(2)
deps[lm] = 1
u.verbose(3, "file %s has dep %s" % (filename, lm))
elif which == "SONAME":
soname = m.group(2)
if soname:
if soname != bn:
u.verbose(1, "soname %s disagrees with "
"basename for file %s" % (soname, filename))
else:
soname = bn
if deps:
ddict = depends[soname]
for d in deps:
u.verbose(2, "processing dep %s -> %s" % (soname, d))
ddict[d] = 1
rdict = rdepends[d]
rdict[soname] = 1
return soname
def examinefile(filename):
"""Perform symbol analysis on specified file."""
u.verbose(2, "examinefile(%s)" % filename)
if filename not in all_loadmodules:
fullpath = os.path.join(os.getcwd(), filename)
if fullpath in all_loadmodules:
filename = fullpath
else:
u.warning("unable to visit %s (not "
"in %s out)" % (filename, flag_filemode))
return
if all_loadmodules[filename] == 1:
return
if not in_symbols_dir(filename):
u.warning("%s: does not appear to be in "
"%s/symbols directory? skipping" % (filename, apo))
return
soname = examine_deps(filename)
if not soname:
all_loadmodules[filename] = 1
return
worklist = []
ddict = depends[soname]
for dep in ddict:
pdict = base_to_paths[dep]
for path in pdict:
if path in all_loadmodules and all_loadmodules[path] == 0:
all_loadmodules[path] = 1
worklist.append(path)
for item in worklist:
examine_deps(item)
def collect_all_loadmodules():
"""Collect names of all interesting loadmodules."""
locations = None
if flag_filemode == "target":
locations = "%s/symbols/system" % apo
else:
locations = "%s/bin %s/lib64" % (aho, aho)
u.verbose(1, "collecting loadmodules from %s" % locations)
cmd = "find %s -type f -print" % locations
u.verbose(1, "find cmd: %s" % cmd)
cargs = shlex.split(cmd)
mypipe = subprocess.Popen(cargs, stdout=subprocess.PIPE)
pout, _ = mypipe.communicate()
if mypipe.returncode != 0:
u.error("command failed (rc=%d): cmd was %s" % (mypipe.returncode, cmd))
encoding = locale.getdefaultlocale()[1]
decoded = pout.decode(encoding)
lines = decoded.strip().split("\n")
u.verbose(1, "found a total of %d load modules" % len(lines))
for line in lines:
path = line.strip()
u.verbose(2, "adding LM %s" % path)
all_loadmodules[path] = 0
bn = os.path.basename(path)
pdict = base_to_paths[bn]
pdict[path] = 1
if flag_backward_slice:
for filearg in flag_input_files:
bn = os.path.basename(filearg)
if bn not in all_loadmodules:
u.warning("argument %s not found in all_loadmodules "
"-- unable to compute slice" % filearg)
def get_nodename(soname, nodenames):
"""Generate DOT nodename."""
if soname in nodenames:
return nodenames[soname]
nn = len(nodenames)
seed = "".join([x if x.isalnum() else "_" for x in soname])
nn = "%s_%d" % (seed, nn)
nodenames[soname] = nn
return nn
def emit_helper(fh, soname, mode, emitted, nodenames, restrictnodes):
"""Emit dot node."""
if soname in emitted:
return
emitted[soname] = 1
this_nn = get_nodename(soname, nodenames)
if mode == "node":
if not flag_prune or soname not in toprune:
shape = "record"
if soname in input_sonames:
shape = "box3d"
color = "lightblue"
if soname in nodecolor:
color = nodecolor[soname]
fh.write(" %s [shape=%s,style=filled,"
"fillcolor=%s,"
"label=\"%s\"];\n" % (this_nn, shape, color, soname))
ddict = depends[soname]
for dep in ddict:
res = False
if restrictnodes and dep not in restrictnodes:
res = True
if flag_prune and dep in toprune:
res = True
if res:
continue
if mode == "edge":
dep_nn = get_nodename(dep, nodenames)
fh.write(" %s -> %s [style=\"solid,bold\","
"color=black,weight=10,constraint=true];\n" % (this_nn, dep_nn))
emit_helper(fh, dep, mode, emitted, nodenames, restrictnodes)
def collect_slice_nodes(seednode, depth):
"""Collect nodes in backward slice."""
if depth == 0:
return
rval = {}
rdict = rdepends[seednode]
for rdep in rdict:
rval[rdep] = 1
nodecolor[rdep] = "lightyellow"
collect_slice_nodes(rdep, depth-1)
return rval
def emit_to_file(fh):
"""Emit output DOT to file or stdout."""
fh.write("digraph \"graph\" {\n")
fh.write(" overlap=false;\n")
# Nodes
nodes_emitted = {}
nodenames = {}
slicenodes = {}
empty = {}
restrictnodes = {}
for filename in flag_input_files:
bn = os.path.basename(filename)
nodecolor[bn] = "red"
input_sonames[bn] = 1
u.verbose(1, "input sonames: %s" % " ".join(list(input_sonames.keys())))
for filename in flag_input_files:
bn = os.path.basename(filename)
emit_helper(fh, bn, "node", nodes_emitted, nodenames, empty)
preds = collect_slice_nodes(bn, flag_backward_slice)
if preds:
slicenodes.update(preds)
restrictnodes.update(nodes_emitted)
if slicenodes:
u.verbose(1, "slice nodes: %s" % " ".join(list(slicenodes.keys())))
for slicenode in slicenodes:
restrictnodes[slicenode] = 1
emit_helper(fh, slicenode, "node", nodes_emitted, nodenames, nodes_emitted)
u.verbose(1, "restrictnodes: %s" % " ".join(list(restrictnodes.keys())))
# Edges
edges_emitted = {}
for filename in flag_input_files:
bn = os.path.basename(filename)
emit_helper(fh, bn, "edge", edges_emitted, nodenames, empty)
for slicenode in slicenodes:
emit_helper(fh, slicenode, "edge", edges_emitted, nodenames, restrictnodes)
fh.write("}\n")
def emit():
"""Emit output DOT."""
if flag_outfile:
u.verbose(1, "opening %s" % flag_outfile)
fh = open(flag_outfile, "w")
else:
fh = sys.stdout
emit_to_file(fh)
def usage(msgarg):
"""Print usage and exit."""
if msgarg:
sys.stderr.write("error: %s\n" % msgarg)
print("""\
usage: %s [options] <ELF files>
options:
-d increase debug msg verbosity level
-H image of interest is host and not target (testing/debugging)
-X skip check to to make sure lib is in symbols dir
-r {32,64} restrict analysis to just ELF-32 or just ELF-64 files
-o F write output DOT to file F (default is stdout)
-p omit nodes for common "base" libraries, including
libc.so, libdl.so, libm.so, libc++.so
-B N include backward slice of depth N from input load modules
Notes:
- arguments are expected to be linked (.so or .exe) but unstripped
""" % os.path.basename(sys.argv[0]))
sys.exit(1)
def parse_args():
"""Command line argument parsing."""
global flag_check_in_symbols, flag_filemode
global flag_input_files, apo, abt, aho
global flag_restrict_elf, flag_outfile, flag_prune
global flag_backward_slice
try:
optlist, args = getopt.getopt(sys.argv[1:], "dpHB:Xr:o:")
except getopt.GetoptError as err:
# unrecognized option
usage(str(err))
for opt, arg in optlist:
if opt == "-d":
u.increment_verbosity()
elif opt == "-p":
flag_prune = True
elif opt == "-H":
flag_filemode = "host"
elif opt == "-B":
backdepth = int(arg)
if backdepth < 1:
u.usage("use positive arg for -B")
flag_backward_slice = backdepth
elif opt == "-r":
if arg == "32":
flag_restrict_elf = 32
elif arg == "64":
flag_restrict_elf = 64
else:
usage("argument to -r option must be either 32 or 64")
elif opt == "-X":
flag_check_in_symbols = False
elif opt == "-o":
flag_outfile = arg
if not args:
usage("specify at least one input file")
for a in args:
if not os.path.exists(a):
usage("unable to read/access input arg %s" % a)
flag_input_files = args
abt = os.getenv("ANDROID_BUILD_TOP")
if abt is None:
u.error("ANDROID_BUILD_TOP not set (did you run lunch?)")
apo = os.getenv("ANDROID_PRODUCT_OUT")
if apo is None:
u.error("ANDROID_PRODUCT_OUT not set (did you run lunch?)")
aho = os.getenv("ANDROID_HOST_OUT")
if aho is None:
u.error("ANDROID_HOST_OUT not set (did you run lunch?)")
#----------------------------------------------------------------------
# Main portion of script
#
parse_args()
u.setdeflanglocale()
collect_all_loadmodules()
if flag_backward_slice:
for f in all_loadmodules:
examinefile(f)
else:
for filearg in flag_input_files:
examinefile(filearg)
emit()
|
py | 1a3164dd19482101f73e3e45051c1370a7ab6287 | import numpy as np
""" This file creates a grid of stars for the HRD plot """
metallicities = [0.0001, 0.001, 0.01, 0.01416]
# make a small grid for all metallicities
with open("grid.txt", "w") as f:
masses = np.round(np.logspace(np.log10(0.1), np.log10(150.0), 500), 3)
grid_lines = ["--initial-mass {} --metallicity {} \n".format(masses[i], metallicities[j]) for j in range(len(metallicities)) for i in range(len(masses))]
f.writelines(grid_lines)
# make a small grid just for solar
with open("rapid_grid.txt", "w") as f:
masses = np.round(np.logspace(np.log10(0.1), np.log10(150.0), 500), 3)
grid_lines = ["--initial-mass {} --metallicity {} \n".format(masses[i], 0.0001) for i in range(len(masses))]
f.writelines(grid_lines)
# make a dense grid of solar (mostly dense for NSs and low mass BHs)
with open("MM20_grid.txt", "w") as f:
low_masses = np.round(np.logspace(np.log10(0.1), np.log10(8.0), 200), 4)
med_masses = np.round(np.logspace(np.log10(8.0), np.log10(50.0), 4600), 4)
high_masses = np.round(np.logspace(np.log10(50.0), np.log10(150.0), 200), 4)
masses = np.concatenate((low_masses, med_masses, high_masses))
grid_lines = ["--initial-mass {} --metallicity {} \n".format(masses[i], 0.01416) for i in range(len(masses))]
f.writelines(grid_lines) |
py | 1a316538f87fd2df45edc1191891ac2c4f221063 | import statsapi
import pandas as pd
# logging
import logging
logger = logging.getLogger('statsapi')
logger.setLevel(logging.DEBUG)
rootLogger = logging.getLogger()
rootLogger.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
formatter = logging.Formatter("%(asctime)s - %(levelname)8s - %(name)s(%(thread)s) - %(message)s")
ch.setFormatter(formatter)
rootLogger.addHandler(ch)
# from ptimeit import timethis, Timer
# @timethis()
# def get_rookie_hr_leader():
# rookie_hr_leaders = statsapi.league_leaders('homeRuns', season=2021, playerPool = 'rookies', limit=15)
# print(rookie_hr_leaders)
# Timer.run(1)
from functools import wraps
from time import time
def timing(f):
@wraps(f)
def wrap(*args, **kw):
ts = time()
result = f(*args, **kw)
te = time()
print(f"func: {f.__name__} args:{args},{kw} took{te-ts:2.4f} secs")
# print 'func:%r args:[%r, %r] took: %2.4f sec' % \
# (f.__name__, args, kw, te-ts)
return result
return wrap
@timing
def get_rookie_hr_leader():
rookie_hr_leaders = statsapi.league_leaders('homeRuns', season=2021, playerPool = 'rookies', limit=15)
print(rookie_hr_leaders)
get_rookie_hr_leader() |
py | 1a31661d6e41d863898adefd18b3f0dc9e40c83b |
class MatcherType(object):
"""
Interface for each Matcher functions. All matchers types must be implement get_ratio_match method between two
objects and return a ratio value.
"""
def get_ratio_match(self, object_a, object_b):
pass
|
py | 1a31664b6f0244c0dc018b34cf56e1aad7543a26 | #! /usr/bin/python
# coding: utf-8
# Copyright 2018 IBM All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import numpy as np
import matplotlib
matplotlib.use('Agg') # Generate images without having a window appear
import matplotlib.pyplot as plt
import matplotlib.lines as mlines
from argparse import ArgumentParser
from itertools import cycle
import pandas as pd
import csv
from __init__ import INTENT_JUDGE_COLUMN, UTF_8, CONFIDENCE_COLUMN, \
PREDICTED_INTENT_COLUMN, GOLDEN_INTENT_COLUMN, \
INTENT_COLUMN, POPULATION_WEIGHT_MODE, \
EQUAL_WEIGHT_MODE, DEFAULT_CONF_THRES, SCORE_COLUMN
# total different number of line style len(line_styles) * len(line_color) = 12
line_styles = ['-', '--', '-.', ':']
line_color = ['b', 'g', 'r']
LEGEND_AXIS_FONT_SIZE = 14
TITLE_FONT_SIZE = 16
WEIGHT_COLUMN = 'weight'
def func(args):
""" Read classifiers results and draw the curves on one canvas for comparision
Input Schema:
| predicted intent | confidence | does intent match |
| intent 0 | confidence score | yes/no value |
"""
classifier_stat_list = []
cf_frames = []
confidences_list = []
intents_in_results = pd.Series()
classifier_num = len(args.classifiers_results)
# Prepare labels for each curve
labels = [os.path.splitext(os.path.basename(file_path))[0]
for file_path in args.classifiers_results]
# Only do cutomization on labels if numbers match
if args.classifier_names and \
(len(args.classifier_names) == classifier_num):
labels = args.classifier_names
# Initialization
for i in range(classifier_num):
file_path = args.classifiers_results[i]
frame = pd.read_csv(file_path, encoding=UTF_8, quoting=csv.QUOTE_ALL)
if INTENT_JUDGE_COLUMN not in frame.columns: # Column validation
raise ValueError("'{}' column not in {}".format(
INTENT_JUDGE_COLUMN, file_path))
# Read the cf files into list
cf_frames.append(frame)
# Collect all intents from the classification results
intents_in_results = pd.concat([intents_in_results,
frame[PREDICTED_INTENT_COLUMN]])
# Convert nan to zero values to avoid use zero as divider
confidences_list.append(frame[CONFIDENCE_COLUMN].fillna(0)
.drop_duplicates().sort_values().tolist())
intents_in_results = intents_in_results.drop_duplicates()
# Read weight
weights_df = None
weight_mode = args.weight.lower()
# Read the intent weights pairs from file
if weight_mode != POPULATION_WEIGHT_MODE and \
weight_mode != EQUAL_WEIGHT_MODE:
try:
weights_df = pd.read_csv(args.weight, encoding=UTF_8,
quoting=csv.QUOTE_ALL)
# Validate the completeness
for _, intent in intents_in_results.iteritems():
if not any(weights_df[INTENT_COLUMN] == intent):
raise ValueError("'{}' intent not in {}".format(
intent, args.weight))
except Exception as e:
print(e)
weight_mode = POPULATION_WEIGHT_MODE # default population mode
print('Fall back to {} mode'.format(POPULATION_WEIGHT_MODE))
# Init the classifier_stat_list:
for i in range(classifier_num):
# array of zeros to hold precision values
classifier_stat_list.append(np.zeros([len(confidences_list[i]), 3]))
for j in range(classifier_num):
confidences = confidences_list[j]
for i in range(len(confidences)):
conf = confidences[i]
cf_frame = cf_frames[j]
precision = 0
answered = \
cf_frame[cf_frame[CONFIDENCE_COLUMN] >= conf].shape[0]
if weight_mode == POPULATION_WEIGHT_MODE:
correct = cf_frame[
cf_frame[CONFIDENCE_COLUMN] >= conf][SCORE_COLUMN].sum()
precision = correct / answered
# print(precision)
else:
intent_uttr_num_map = \
cf_frame[cf_frame[CONFIDENCE_COLUMN] >= conf] \
.groupby(PREDICTED_INTENT_COLUMN)[PREDICTED_INTENT_COLUMN] \
.count().to_dict()
# Calulate precision use equal weights
uttr_correct_intent = \
cf_frame[cf_frame[CONFIDENCE_COLUMN] >= conf] \
.groupby(GOLDEN_INTENT_COLUMN)[SCORE_COLUMN] \
.sum()
intent_weights = None
weight_coeff = 1 / len(intent_uttr_num_map)
if weight_mode != EQUAL_WEIGHT_MODE:
required_weights_df = \
weights_df[
weights_df[INTENT_COLUMN]
.isin(uttr_correct_intent.index)]
weight_sum = required_weights_df[WEIGHT_COLUMN].sum()
# Normalize weights
weights_df[WEIGHT_COLUMN] = \
weights_df[WEIGHT_COLUMN] / weight_sum
intent_weights = \
weights_df.set_index(INTENT_COLUMN)[WEIGHT_COLUMN] \
.to_dict()
for intent, correct_intent_num in \
uttr_correct_intent.iteritems():
if weight_mode != EQUAL_WEIGHT_MODE:
weight_coeff = intent_weights[intent]
precision += \
weight_coeff * correct_intent_num \
/ intent_uttr_num_map[intent]
classifier_stat_list[j][i, 0] = precision
classifier_stat_list[j][i, 1] = 100 * answered / len(cf_frame)
classifier_stat_list[j][i, 2] = conf
for idx in range(len(classifier_stat_list)):
# reversing order for helpful plotting
classifier_stat_list[idx] = classifier_stat_list[idx][::-1]
# plotting
fig = plt.figure()
ax = fig.gca()
ax.set_ylim([0, 1.0]) # Hardcoding y-axis to a consistent 0-1.0 for the benefit of easing historical comparisions
ax.grid(color='b', linestyle='--', alpha=0.3)
ax.set_xlabel('Percentage of Questions Answered',
fontsize=LEGEND_AXIS_FONT_SIZE)
ax.set_ylabel('Precision', fontsize=LEGEND_AXIS_FONT_SIZE)
line_style_cycler = cycle(line_styles)
line_color_cycler = cycle(line_color)
lines = [] # reference to lines
# plot the curve and save the figure
for i in range(len(classifier_stat_list)):
classifier_stat = classifier_stat_list[i]
# Default to the idx of lowest conf
tau_idx = len(classifier_stat[:, 2]) - 1
indices_gtr_tau, = np.where(classifier_stat[:, 2] <= args.tau)
if len(indices_gtr_tau) > 0:
tau_idx = indices_gtr_tau[0]
color = next(line_color_cycler)
line, = plt.plot(classifier_stat[:, 1], classifier_stat[:, 0],
color=color, label=labels[i],
linestyle=next(line_style_cycler))
plt.plot(classifier_stat[tau_idx, 1], classifier_stat[tau_idx, 0],
'{}o'.format(color), markerfacecolor='None')
lines.append(line)
tau_desc = mlines.Line2D([], [], markeredgecolor='black', marker='o',
linestyle='None', markerfacecolor='None',
markersize=10,
label='tau = {}'.format(args.tau))
ax.legend(handles=lines + [tau_desc], loc='lower left', shadow=False,
prop={'size': LEGEND_AXIS_FONT_SIZE})
ax.set_title(args.figure_title,
fontsize=TITLE_FONT_SIZE)
if args.ymin != 0.0:
plt.ylim(args.ymin, 1.0)
# Save figure as file
plt.savefig(args.outfile)
print("Wrote precision curve to {}".format(args.outfile))
def create_parser():
parser = ArgumentParser(description="Draw precision curves on a single canvas \
from multiple classifiers' classification results")
parser.add_argument('-i', '--classifiers_results', nargs='+',
required=True,
help='Files of results from individual classifiers')
parser.add_argument('-n', '--classifier_names', nargs='*',
help='Names of each classifier')
parser.add_argument('-t', '--figure_title', required=True, type=str,
help='Title of output figure')
parser.add_argument('-o', '--outfile', help='File of the output figure',
default='figure.png', type=str)
parser.add_argument('-w', '--weight', default='population', type=str,
help='Weight configuration for each intent')
parser.add_argument('--tau', default=DEFAULT_CONF_THRES, type=float,
help='Confidence threshold for curve marker')
parser.add_argument('--ymin', default=0.0, type=float,
help='Minimum for Y axis')
return parser
if __name__ == '__main__':
ARGS = create_parser().parse_args()
func(ARGS)
|
py | 1a316710c2553473f4cd3acb4b8136f3958af237 | # Copyright The Cloud Custodian Authors.
# SPDX-License-Identifier: Apache-2.0
#
# PYTHON_ARGCOMPLETE_OK (Must be in first 1024 bytes, so if tab completion
# is failing, move this above the license)
import argcomplete
import argparse
import importlib
import logging
import os
import pdb
import sys
import traceback
from datetime import datetime
from dateutil.parser import parse as date_parse
try:
from setproctitle import setproctitle
except ImportError:
def setproctitle(t):
return None
from c7n import deprecated
from c7n.config import Config
DEFAULT_REGION = 'us-east-1'
log = logging.getLogger('custodian.cli')
def _default_options(p, exclude=[]):
""" Add basic options ot the subparser.
`exclude` is a list of options to exclude from the default set.
e.g.: ['region', 'log-group']
"""
provider = p.add_argument_group(
"provider", "AWS account information, defaults per the aws cli")
if 'region' not in exclude:
provider.add_argument(
"-r", "--region", action='append', default=[],
dest='regions', metavar='REGION',
help="AWS Region to target. Can be used multiple times")
provider.add_argument(
"--profile",
help="AWS Account Config File Profile to utilize")
provider.add_argument("--assume", default=None, dest="assume_role",
help="Role to assume")
provider.add_argument("--external-id", default=None, dest="external_id",
help="External Id to provide when assuming a role")
config = p.add_argument_group(
"config", "Policy config file(s) and policy selectors")
# -c is deprecated. Supported for legacy reasons
config.add_argument("-c", "--config", help=argparse.SUPPRESS)
config.add_argument("configs", nargs='*',
help="Policy configuration file(s)")
config.add_argument("-p", "--policies", default=[], dest='policy_filters',
action='append', help="Only use named/matched policies")
config.add_argument("-t", "--resource", default=[], dest='resource_types',
action='append',
help="Only use policies with the given resource type")
output = p.add_argument_group("output", "Output control")
output.add_argument("-v", "--verbose", action="count", help="Verbose logging")
if 'quiet' not in exclude:
output.add_argument("-q", "--quiet", action="count",
help="Less logging (repeatable, -qqq for no output)")
else:
output.add_argument("-q", "--quiet", action="count", help=argparse.SUPPRESS)
output.add_argument("--debug", default=False, help=argparse.SUPPRESS,
action="store_true")
if 'vars' not in exclude:
# p.add_argument('--vars', default=None,
# help='Vars file to substitute into policy')
p.set_defaults(vars=None)
if 'log-group' not in exclude:
p.add_argument(
"-l", "--log-group", default=None,
help="Location to send policy logs (Ex: AWS CloudWatch Log Group)")
else:
p.add_argument("--log-group", default=None, help=argparse.SUPPRESS)
if 'output-dir' not in exclude:
p.add_argument("-s", "--output-dir", required=True,
help="[REQUIRED] Directory or S3 URL For policy output")
if 'cache' not in exclude:
p.add_argument(
"-f", "--cache", default="~/.cache/cloud-custodian.cache",
help="Cache file (default %(default)s)")
p.add_argument(
"--cache-period", default=15, type=int,
help="Cache validity in minutes (default %(default)i)")
else:
p.add_argument("--cache", default=None, help=argparse.SUPPRESS)
def _report_options(p):
""" Add options specific to the report subcommand. """
_default_options(p, exclude=['cache', 'log-group', 'quiet'])
p.add_argument(
'--days', type=float, default=1,
help="Number of days of history to consider")
p.add_argument(
'--raw', type=argparse.FileType('w'),
help="Store raw json of collected records to given file path")
p.add_argument(
'--field', action='append', default=[], type=_key_val_pair,
metavar='HEADER=FIELD',
help='Repeatable. JMESPath of field to include in the output OR '
'for a tag use prefix `tag:`. Special case fields `region` and'
'`policy` are available')
p.add_argument(
'--no-default-fields', action="store_true",
help='Exclude default fields for report.')
p.add_argument(
'--format', default='csv', choices=['csv', 'grid', 'simple', 'json'],
help="Format to output data in (default: %(default)s). "
"Options include simple, grid, csv, json")
p.add_argument(
'--all-findings', default=False, action="store_true",
help="Outputs all findings per resource. Defaults to a single finding per resource. ")
def _metrics_options(p):
""" Add options specific to metrics subcommand. """
_default_options(p, exclude=['log-group', 'output-dir', 'cache', 'quiet'])
p.add_argument(
'--start', type=date_parse,
help='Start date (requires --end, overrides --days)')
p.add_argument(
'--end', type=date_parse, help='End date')
p.add_argument(
'--days', type=int, default=14,
help='Number of days of history to consider (default: %(default)i)')
p.add_argument('--period', type=int, default=60 * 24 * 24)
def _logs_options(p):
""" Add options specific to logs subcommand. """
_default_options(p, exclude=['cache', 'quiet'])
# default time range is 0 to "now" (to include all log entries)
p.add_argument(
'--start',
default='the beginning', # invalid, will result in 0
help='Start date and/or time',
)
p.add_argument(
'--end',
default=datetime.now().strftime('%c'),
help='End date and/or time',
)
def _schema_options(p):
""" Add options specific to schema subcommand. """
p.add_argument(
'resource', metavar='selector', nargs='?', default=None)
p.add_argument(
'--summary', action="store_true",
help="Summarize counts of available resources, actions and filters")
p.add_argument('--json', action="store_true",
help="Export custodian's jsonschema")
p.add_argument('--outline', action="store_true",
help="Print outline of all resources and their actions and filters")
p.add_argument("-v", "--verbose", action="count", help="Verbose logging")
p.add_argument("-q", "--quiet", action="count", help=argparse.SUPPRESS)
p.add_argument("--debug", default=False, help=argparse.SUPPRESS)
def _dryrun_option(p):
p.add_argument(
"-d", "--dryrun", "--dry-run", action="store_true",
help="Don't execute actions but filter resources")
def _key_val_pair(value):
"""
Type checker to ensure that --field values are of the format key=val
"""
if '=' not in value:
msg = 'values must be of the form `header=field`'
raise argparse.ArgumentTypeError(msg)
return value
def setup_parser():
c7n_desc = "Cloud Custodian - Cloud fleet management"
parser = argparse.ArgumentParser(description=c7n_desc)
# Setting `dest` means we capture which subparser was used.
subs = parser.add_subparsers(
title='commands',
dest='subparser')
run_desc = "\n".join((
"Execute the policies in a config file.",
"",
"Multiple regions can be passed in, as can the symbolic region 'all'. ",
"",
"When running across multiple regions, policies targeting resources in ",
"regions where they do not exist will not be run. The output directory ",
"when passing multiple regions is suffixed with the region. Resources ",
"with global endpoints are run just once and are suffixed with the first ",
"region passed in or us-east-1 if running against 'all' regions.",
""
))
run = subs.add_parser(
"run", description=run_desc,
help="Execute the policies in a config file",
formatter_class=argparse.RawDescriptionHelpFormatter)
run.set_defaults(command="c7n.commands.run")
_default_options(run)
_dryrun_option(run)
run.add_argument(
"--skip-validation",
action="store_true",
help="Skips validation of policies (assumes you've run the validate command seperately).")
metrics_help = ("Emit metrics to provider metrics. Specify 'aws', 'gcp', or 'azure'. "
"For more details on aws metrics options, see: "
"https://cloudcustodian.io/docs/aws/usage.html#metrics")
run.add_argument(
"-m", "--metrics-enabled", metavar="PROVIDER",
default=None, nargs="?", const="aws",
help=metrics_help)
run.add_argument(
"--trace",
dest="tracer",
help="Tracing integration",
default=None, nargs="?", const="default")
schema_desc = ("Browse the available vocabularies (resources, filters, modes, and "
"actions) for policy construction. The selector "
"is specified with RESOURCE[.CATEGORY[.ITEM]] "
"examples: s3, ebs.actions, or ec2.filters.instance-age")
schema = subs.add_parser(
'schema', description=schema_desc,
help="Interactive cli docs for policy authors")
schema.set_defaults(command="c7n.commands.schema_cmd")
_schema_options(schema)
report_desc = ("Report of resources that a policy matched/ran on. "
"The default output format is csv, but other formats "
"are available.")
report = subs.add_parser(
"report", description=report_desc,
help="Tabular report on policy matched resources")
report.set_defaults(command="c7n.commands.report")
_report_options(report)
logs = subs.add_parser(
'logs')
logs.set_defaults(command="c7n.commands.logs")
_logs_options(logs)
metrics = subs.add_parser('metrics')
metrics.set_defaults(command="c7n.commands.metrics_cmd")
_metrics_options(metrics)
version = subs.add_parser(
'version', help="Display installed version of custodian")
version.set_defaults(command='c7n.commands.version_cmd')
version.add_argument('-v', '--verbose', action="count", help="Verbose logging")
version.add_argument("-q", "--quiet", action="count", help=argparse.SUPPRESS)
version.add_argument(
"--debug", action="store_true",
help="Print info for bug reports")
validate_desc = (
"Validate config files against the json schema")
validate = subs.add_parser(
'validate', description=validate_desc, help=validate_desc)
validate.set_defaults(command="c7n.commands.validate", check_deprecations="yes")
validate.add_argument(
"-c", "--config", help=argparse.SUPPRESS)
validate.add_argument("configs", nargs='*',
help="Policy Configuration File(s)")
validate.add_argument("-v", "--verbose", action="count", help="Verbose Logging")
validate.add_argument("-q", "--quiet", action="count", help="Less logging (repeatable)")
validate.add_argument("--debug", default=False, help=argparse.SUPPRESS)
deprecations = validate.add_mutually_exclusive_group(required=False)
deprecations.add_argument("--no-deps", dest="check_deprecations",
action='store_const', const=deprecated.SKIP,
help="Do not check for deprecations")
deprecations.add_argument("--strict", dest="check_deprecations",
action='store_const', const=deprecated.STRICT,
help="Any deprecations will cause a non-zero exit code")
return parser
def _setup_logger(options):
level = 3 + (options.verbose or 0) - (options.quiet or 0)
if level <= 0:
# print nothing
log_level = logging.CRITICAL + 1
elif level == 1:
log_level = logging.ERROR
elif level == 2:
log_level = logging.WARNING
elif level == 3:
# default
log_level = logging.INFO
else:
log_level = logging.DEBUG
logging.basicConfig(
level=log_level,
format="%(asctime)s: %(name)s:%(levelname)s %(message)s")
external_log_level = logging.ERROR
if level <= 0:
external_log_level = logging.CRITICAL + 1
elif level >= 5:
external_log_level = logging.INFO
logging.getLogger('botocore').setLevel(external_log_level)
logging.getLogger('urllib3').setLevel(external_log_level)
logging.getLogger('s3transfer').setLevel(external_log_level)
logging.getLogger('urllib3').setLevel(logging.ERROR)
def main():
parser = setup_parser()
argcomplete.autocomplete(parser)
options = parser.parse_args()
if options.subparser is None:
parser.print_help(file=sys.stderr)
return sys.exit(2)
_setup_logger(options)
# Support the deprecated -c option
if getattr(options, 'config', None) is not None:
options.configs.append(options.config)
config = Config.empty(**vars(options))
try:
command = options.command
if not callable(command):
command = getattr(
importlib.import_module(command.rsplit('.', 1)[0]),
command.rsplit('.', 1)[-1])
# Set the process name to something cleaner
process_name = [os.path.basename(sys.argv[0])]
process_name.extend(sys.argv[1:])
setproctitle(' '.join(process_name))
command(config)
except Exception:
if not options.debug:
raise
traceback.print_exc()
pdb.post_mortem(sys.exc_info()[-1])
if __name__ == '__main__':
main()
|
py | 1a3167229e4ec1c58cee9dc1915fd0550b9b7330 | #!/usr/bin/env python
'''
cchecker_web.reverse_proxy
Ruthlessly stolen from:
http://flask.pocoo.org/snippets/35/
'''
class ReverseProxied(object):
'''Wrap the application in this middleware and configure the
front-end server to add these headers, to let you quietly bind
this to a URL other than / and to an HTTP scheme that is
different than what is used locally.
In nginx:
location /myprefix {
proxy_pass http://192.168.0.1:5001;
proxy_set_header Host $host;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Scheme $scheme;
proxy_set_header X-Script-Name /myprefix;
}
:param app: the WSGI application
'''
def __init__(self, app):
self.app = app
def __call__(self, environ, start_response):
script_name = environ.get('HTTP_X_SCRIPT_NAME', '')
if script_name:
environ['SCRIPT_NAME'] = script_name
path_info = environ['PATH_INFO']
if path_info.startswith(script_name):
environ['PATH_INFO'] = path_info[len(script_name):]
scheme = environ.get('HTTP_X_SCHEME', '')
if scheme:
environ['wsgi.url_scheme'] = scheme
return self.app(environ, start_response)
|
py | 1a31681e06abf86489e3e859457b89a94277097a | __usage__ = """
To run tests locally:
python tests/test_arpack.py [-l<int>] [-v<int>]
"""
import threading
import itertools
import sys
import platform
import numpy as np
from numpy.testing import (assert_allclose, assert_array_almost_equal_nulp,
assert_equal, assert_array_equal, suppress_warnings)
from pytest import raises as assert_raises
import pytest
from numpy import dot, conj, random
from scipy.linalg import eig, eigh, hilbert, svd
from scipy.sparse import csc_matrix, csr_matrix, isspmatrix, diags, rand
from scipy.sparse.linalg import LinearOperator, aslinearoperator
from scipy.sparse.linalg.eigen.arpack import eigs, eigsh, svds, \
ArpackNoConvergence, arpack
from scipy._lib._gcutils import assert_deallocated, IS_PYPY
IS_MACOS_ARM64 = sys.platform == 'darwin' and platform.machine() == 'arm64'
# precision for tests
_ndigits = {'f': 3, 'd': 11, 'F': 3, 'D': 11}
def _get_test_tolerance(type_char, mattype=None):
"""
Return tolerance values suitable for a given test:
Parameters
----------
type_char : {'f', 'd', 'F', 'D'}
Data type in ARPACK eigenvalue problem
mattype : {csr_matrix, aslinearoperator, asarray}, optional
Linear operator type
Returns
-------
tol
Tolerance to pass to the ARPACK routine
rtol
Relative tolerance for outputs
atol
Absolute tolerance for outputs
"""
rtol = {'f': 3000 * np.finfo(np.float32).eps,
'F': 3000 * np.finfo(np.float32).eps,
'd': 2000 * np.finfo(np.float64).eps,
'D': 2000 * np.finfo(np.float64).eps}[type_char]
atol = rtol
tol = 0
if mattype is aslinearoperator and type_char in ('f', 'F'):
# iterative methods in single precision: worse errors
# also: bump ARPACK tolerance so that the iterative method converges
tol = 30 * np.finfo(np.float32).eps
rtol *= 5
if mattype is csr_matrix and type_char in ('f', 'F'):
# sparse in single precision: worse errors
rtol *= 5
return tol, rtol, atol
def generate_matrix(N, complex_=False, hermitian=False,
pos_definite=False, sparse=False):
M = np.random.random((N, N))
if complex_:
M = M + 1j * np.random.random((N, N))
if hermitian:
if pos_definite:
if sparse:
i = np.arange(N)
j = np.random.randint(N, size=N-2)
i, j = np.meshgrid(i, j)
M[i, j] = 0
M = np.dot(M.conj(), M.T)
else:
M = np.dot(M.conj(), M.T)
if sparse:
i = np.random.randint(N, size=N * N // 4)
j = np.random.randint(N, size=N * N // 4)
ind = np.nonzero(i == j)
j[ind] = (j[ind] + 1) % N
M[i, j] = 0
M[j, i] = 0
else:
if sparse:
i = np.random.randint(N, size=N * N // 2)
j = np.random.randint(N, size=N * N // 2)
M[i, j] = 0
return M
def generate_matrix_symmetric(N, pos_definite=False, sparse=False):
M = np.random.random((N, N))
M = 0.5 * (M + M.T) # Make M symmetric
if pos_definite:
Id = N * np.eye(N)
if sparse:
M = csr_matrix(M)
M += Id
else:
if sparse:
M = csr_matrix(M)
return M
def _aslinearoperator_with_dtype(m):
m = aslinearoperator(m)
if not hasattr(m, 'dtype'):
x = np.zeros(m.shape[1])
m.dtype = (m * x).dtype
return m
def assert_allclose_cc(actual, desired, **kw):
"""Almost equal or complex conjugates almost equal"""
try:
assert_allclose(actual, desired, **kw)
except AssertionError:
assert_allclose(actual, conj(desired), **kw)
def argsort_which(eigenvalues, typ, k, which,
sigma=None, OPpart=None, mode=None):
"""Return sorted indices of eigenvalues using the "which" keyword
from eigs and eigsh"""
if sigma is None:
reval = np.round(eigenvalues, decimals=_ndigits[typ])
else:
if mode is None or mode == 'normal':
if OPpart is None:
reval = 1. / (eigenvalues - sigma)
elif OPpart == 'r':
reval = 0.5 * (1. / (eigenvalues - sigma)
+ 1. / (eigenvalues - np.conj(sigma)))
elif OPpart == 'i':
reval = -0.5j * (1. / (eigenvalues - sigma)
- 1. / (eigenvalues - np.conj(sigma)))
elif mode == 'cayley':
reval = (eigenvalues + sigma) / (eigenvalues - sigma)
elif mode == 'buckling':
reval = eigenvalues / (eigenvalues - sigma)
else:
raise ValueError("mode='%s' not recognized" % mode)
reval = np.round(reval, decimals=_ndigits[typ])
if which in ['LM', 'SM']:
ind = np.argsort(abs(reval))
elif which in ['LR', 'SR', 'LA', 'SA', 'BE']:
ind = np.argsort(np.real(reval))
elif which in ['LI', 'SI']:
# for LI,SI ARPACK returns largest,smallest abs(imaginary) why?
if typ.islower():
ind = np.argsort(abs(np.imag(reval)))
else:
ind = np.argsort(np.imag(reval))
else:
raise ValueError("which='%s' is unrecognized" % which)
if which in ['LM', 'LA', 'LR', 'LI']:
return ind[-k:]
elif which in ['SM', 'SA', 'SR', 'SI']:
return ind[:k]
elif which == 'BE':
return np.concatenate((ind[:k//2], ind[k//2-k:]))
def eval_evec(symmetric, d, typ, k, which, v0=None, sigma=None,
mattype=np.asarray, OPpart=None, mode='normal'):
general = ('bmat' in d)
if symmetric:
eigs_func = eigsh
else:
eigs_func = eigs
if general:
err = ("error for %s:general, typ=%s, which=%s, sigma=%s, "
"mattype=%s, OPpart=%s, mode=%s" % (eigs_func.__name__,
typ, which, sigma,
mattype.__name__,
OPpart, mode))
else:
err = ("error for %s:standard, typ=%s, which=%s, sigma=%s, "
"mattype=%s, OPpart=%s, mode=%s" % (eigs_func.__name__,
typ, which, sigma,
mattype.__name__,
OPpart, mode))
a = d['mat'].astype(typ)
ac = mattype(a)
if general:
b = d['bmat'].astype(typ)
bc = mattype(b)
# get exact eigenvalues
exact_eval = d['eval'].astype(typ.upper())
ind = argsort_which(exact_eval, typ, k, which,
sigma, OPpart, mode)
exact_eval = exact_eval[ind]
# compute arpack eigenvalues
kwargs = dict(which=which, v0=v0, sigma=sigma)
if eigs_func is eigsh:
kwargs['mode'] = mode
else:
kwargs['OPpart'] = OPpart
# compute suitable tolerances
kwargs['tol'], rtol, atol = _get_test_tolerance(typ, mattype)
# on rare occasions, ARPACK routines return results that are proper
# eigenvalues and -vectors, but not necessarily the ones requested in
# the parameter which. This is inherent to the Krylov methods, and
# should not be treated as a failure. If such a rare situation
# occurs, the calculation is tried again (but at most a few times).
ntries = 0
while ntries < 5:
# solve
if general:
try:
eigenvalues, evec = eigs_func(ac, k, bc, **kwargs)
except ArpackNoConvergence:
kwargs['maxiter'] = 20*a.shape[0]
eigenvalues, evec = eigs_func(ac, k, bc, **kwargs)
else:
try:
eigenvalues, evec = eigs_func(ac, k, **kwargs)
except ArpackNoConvergence:
kwargs['maxiter'] = 20*a.shape[0]
eigenvalues, evec = eigs_func(ac, k, **kwargs)
ind = argsort_which(eigenvalues, typ, k, which,
sigma, OPpart, mode)
eigenvalues = eigenvalues[ind]
evec = evec[:, ind]
# check eigenvectors
LHS = np.dot(a, evec)
if general:
RHS = eigenvalues * np.dot(b, evec)
else:
RHS = eigenvalues * evec
assert_allclose(LHS, RHS, rtol=rtol, atol=atol, err_msg=err)
try:
# check eigenvalues
assert_allclose_cc(eigenvalues, exact_eval, rtol=rtol, atol=atol,
err_msg=err)
break
except AssertionError:
ntries += 1
# check eigenvalues
assert_allclose_cc(eigenvalues, exact_eval, rtol=rtol, atol=atol, err_msg=err)
class DictWithRepr(dict):
def __init__(self, name):
self.name = name
def __repr__(self):
return "<%s>" % self.name
class SymmetricParams:
def __init__(self):
self.eigs = eigsh
self.which = ['LM', 'SM', 'LA', 'SA', 'BE']
self.mattypes = [csr_matrix, aslinearoperator, np.asarray]
self.sigmas_modes = {None: ['normal'],
0.5: ['normal', 'buckling', 'cayley']}
# generate matrices
# these should all be float32 so that the eigenvalues
# are the same in float32 and float64
N = 6
np.random.seed(2300)
Ar = generate_matrix(N, hermitian=True,
pos_definite=True).astype('f').astype('d')
M = generate_matrix(N, hermitian=True,
pos_definite=True).astype('f').astype('d')
Ac = generate_matrix(N, hermitian=True, pos_definite=True,
complex_=True).astype('F').astype('D')
Mc = generate_matrix(N, hermitian=True, pos_definite=True,
complex_=True).astype('F').astype('D')
v0 = np.random.random(N)
# standard symmetric problem
SS = DictWithRepr("std-symmetric")
SS['mat'] = Ar
SS['v0'] = v0
SS['eval'] = eigh(SS['mat'], eigvals_only=True)
# general symmetric problem
GS = DictWithRepr("gen-symmetric")
GS['mat'] = Ar
GS['bmat'] = M
GS['v0'] = v0
GS['eval'] = eigh(GS['mat'], GS['bmat'], eigvals_only=True)
# standard hermitian problem
SH = DictWithRepr("std-hermitian")
SH['mat'] = Ac
SH['v0'] = v0
SH['eval'] = eigh(SH['mat'], eigvals_only=True)
# general hermitian problem
GH = DictWithRepr("gen-hermitian")
GH['mat'] = Ac
GH['bmat'] = M
GH['v0'] = v0
GH['eval'] = eigh(GH['mat'], GH['bmat'], eigvals_only=True)
# general hermitian problem with hermitian M
GHc = DictWithRepr("gen-hermitian-Mc")
GHc['mat'] = Ac
GHc['bmat'] = Mc
GHc['v0'] = v0
GHc['eval'] = eigh(GHc['mat'], GHc['bmat'], eigvals_only=True)
self.real_test_cases = [SS, GS]
self.complex_test_cases = [SH, GH, GHc]
class NonSymmetricParams:
def __init__(self):
self.eigs = eigs
self.which = ['LM', 'LR', 'LI'] # , 'SM', 'LR', 'SR', 'LI', 'SI']
self.mattypes = [csr_matrix, aslinearoperator, np.asarray]
self.sigmas_OPparts = {None: [None],
0.1: ['r'],
0.1 + 0.1j: ['r', 'i']}
# generate matrices
# these should all be float32 so that the eigenvalues
# are the same in float32 and float64
N = 6
np.random.seed(2300)
Ar = generate_matrix(N).astype('f').astype('d')
M = generate_matrix(N, hermitian=True,
pos_definite=True).astype('f').astype('d')
Ac = generate_matrix(N, complex_=True).astype('F').astype('D')
v0 = np.random.random(N)
# standard real nonsymmetric problem
SNR = DictWithRepr("std-real-nonsym")
SNR['mat'] = Ar
SNR['v0'] = v0
SNR['eval'] = eig(SNR['mat'], left=False, right=False)
# general real nonsymmetric problem
GNR = DictWithRepr("gen-real-nonsym")
GNR['mat'] = Ar
GNR['bmat'] = M
GNR['v0'] = v0
GNR['eval'] = eig(GNR['mat'], GNR['bmat'], left=False, right=False)
# standard complex nonsymmetric problem
SNC = DictWithRepr("std-cmplx-nonsym")
SNC['mat'] = Ac
SNC['v0'] = v0
SNC['eval'] = eig(SNC['mat'], left=False, right=False)
# general complex nonsymmetric problem
GNC = DictWithRepr("gen-cmplx-nonsym")
GNC['mat'] = Ac
GNC['bmat'] = M
GNC['v0'] = v0
GNC['eval'] = eig(GNC['mat'], GNC['bmat'], left=False, right=False)
self.real_test_cases = [SNR, GNR]
self.complex_test_cases = [SNC, GNC]
def test_symmetric_modes():
params = SymmetricParams()
k = 2
symmetric = True
for D in params.real_test_cases:
for typ in 'fd':
for which in params.which:
for mattype in params.mattypes:
for (sigma, modes) in params.sigmas_modes.items():
for mode in modes:
eval_evec(symmetric, D, typ, k, which,
None, sigma, mattype, None, mode)
def test_hermitian_modes():
params = SymmetricParams()
k = 2
symmetric = True
for D in params.complex_test_cases:
for typ in 'FD':
for which in params.which:
if which == 'BE':
continue # BE invalid for complex
for mattype in params.mattypes:
for sigma in params.sigmas_modes:
eval_evec(symmetric, D, typ, k, which,
None, sigma, mattype)
def test_symmetric_starting_vector():
params = SymmetricParams()
symmetric = True
for k in [1, 2, 3, 4, 5]:
for D in params.real_test_cases:
for typ in 'fd':
v0 = random.rand(len(D['v0'])).astype(typ)
eval_evec(symmetric, D, typ, k, 'LM', v0)
def test_symmetric_no_convergence():
np.random.seed(1234)
m = generate_matrix(30, hermitian=True, pos_definite=True)
tol, rtol, atol = _get_test_tolerance('d')
try:
w, v = eigsh(m, 4, which='LM', v0=m[:, 0], maxiter=5, tol=tol, ncv=9)
raise AssertionError("Spurious no-error exit")
except ArpackNoConvergence as err:
k = len(err.eigenvalues)
if k <= 0:
raise AssertionError("Spurious no-eigenvalues-found case") from err
w, v = err.eigenvalues, err.eigenvectors
assert_allclose(dot(m, v), w * v, rtol=rtol, atol=atol)
def test_real_nonsymmetric_modes():
params = NonSymmetricParams()
k = 2
symmetric = False
for D in params.real_test_cases:
for typ in 'fd':
for which in params.which:
for mattype in params.mattypes:
for sigma, OPparts in params.sigmas_OPparts.items():
for OPpart in OPparts:
eval_evec(symmetric, D, typ, k, which,
None, sigma, mattype, OPpart)
def test_complex_nonsymmetric_modes():
params = NonSymmetricParams()
k = 2
symmetric = False
for D in params.complex_test_cases:
for typ in 'DF':
for which in params.which:
for mattype in params.mattypes:
for sigma in params.sigmas_OPparts:
eval_evec(symmetric, D, typ, k, which,
None, sigma, mattype)
def test_standard_nonsymmetric_starting_vector():
params = NonSymmetricParams()
sigma = None
symmetric = False
for k in [1, 2, 3, 4]:
for d in params.complex_test_cases:
for typ in 'FD':
A = d['mat']
n = A.shape[0]
v0 = random.rand(n).astype(typ)
eval_evec(symmetric, d, typ, k, "LM", v0, sigma)
def test_general_nonsymmetric_starting_vector():
params = NonSymmetricParams()
sigma = None
symmetric = False
for k in [1, 2, 3, 4]:
for d in params.complex_test_cases:
for typ in 'FD':
A = d['mat']
n = A.shape[0]
v0 = random.rand(n).astype(typ)
eval_evec(symmetric, d, typ, k, "LM", v0, sigma)
@pytest.mark.skipif(IS_MACOS_ARM64, reason='failing on arm64')
def test_standard_nonsymmetric_no_convergence():
np.random.seed(1234)
m = generate_matrix(30, complex_=True)
tol, rtol, atol = _get_test_tolerance('d')
try:
w, v = eigs(m, 4, which='LM', v0=m[:, 0], maxiter=5, tol=tol)
raise AssertionError("Spurious no-error exit")
except ArpackNoConvergence as err:
k = len(err.eigenvalues)
if k <= 0:
raise AssertionError("Spurious no-eigenvalues-found case") from err
w, v = err.eigenvalues, err.eigenvectors
for ww, vv in zip(w, v.T):
assert_allclose(dot(m, vv), ww * vv, rtol=rtol, atol=atol)
def test_eigen_bad_shapes():
# A is not square.
A = csc_matrix(np.zeros((2, 3)))
assert_raises(ValueError, eigs, A)
def test_eigen_bad_kwargs():
# Test eigen on wrong keyword argument
A = csc_matrix(np.zeros((8, 8)))
assert_raises(ValueError, eigs, A, which='XX')
def test_ticket_1459_arpack_crash():
for dtype in [np.float32, np.float64]:
# This test does not seem to catch the issue for float32,
# but we made the same fix there, just to be sure
N = 6
k = 2
np.random.seed(2301)
A = np.random.random((N, N)).astype(dtype)
v0 = np.array([-0.71063568258907849895, -0.83185111795729227424,
-0.34365925382227402451, 0.46122533684552280420,
-0.58001341115969040629, -0.78844877570084292984e-01],
dtype=dtype)
# Should not crash:
evals, evecs = eigs(A, k, v0=v0)
#----------------------------------------------------------------------
# sparse SVD tests
def sorted_svd(m, k, which='LM'):
# Compute svd of a dense matrix m, and return singular vectors/values
# sorted.
if isspmatrix(m):
m = m.todense()
u, s, vh = svd(m)
if which == 'LM':
ii = np.argsort(s)[-k:]
elif which == 'SM':
ii = np.argsort(s)[:k]
else:
raise ValueError("unknown which=%r" % (which,))
return u[:, ii], s[ii], vh[ii]
def svd_estimate(u, s, vh):
return np.dot(u, np.dot(np.diag(s), vh))
def svd_test_input_check():
x = np.array([[1, 2, 3],
[3, 4, 3],
[1, 0, 2],
[0, 0, 1]], float)
assert_raises(ValueError, svds, x, k=-1)
assert_raises(ValueError, svds, x, k=0)
assert_raises(ValueError, svds, x, k=10)
assert_raises(ValueError, svds, x, k=x.shape[0])
assert_raises(ValueError, svds, x, k=x.shape[1])
assert_raises(ValueError, svds, x.T, k=x.shape[0])
assert_raises(ValueError, svds, x.T, k=x.shape[1])
def test_svd_simple_real():
x = np.array([[1, 2, 3],
[3, 4, 3],
[1, 0, 2],
[0, 0, 1]], float)
y = np.array([[1, 2, 3, 8],
[3, 4, 3, 5],
[1, 0, 2, 3],
[0, 0, 1, 0]], float)
z = csc_matrix(x)
for solver in [None, 'arpack', 'lobpcg']:
for m in [x.T, x, y, z, z.T]:
for k in range(1, min(m.shape)):
u, s, vh = sorted_svd(m, k)
su, ss, svh = svds(m, k, solver=solver)
m_hat = svd_estimate(u, s, vh)
sm_hat = svd_estimate(su, ss, svh)
assert_array_almost_equal_nulp(m_hat, sm_hat, nulp=1000)
def test_svd_simple_complex():
x = np.array([[1, 2, 3],
[3, 4, 3],
[1 + 1j, 0, 2],
[0, 0, 1]], complex)
y = np.array([[1, 2, 3, 8 + 5j],
[3 - 2j, 4, 3, 5],
[1, 0, 2, 3],
[0, 0, 1, 0]], complex)
z = csc_matrix(x)
for solver in [None, 'arpack', 'lobpcg']:
for m in [x, x.T.conjugate(), x.T, y, y.conjugate(), z, z.T]:
for k in range(1, min(m.shape) - 1):
u, s, vh = sorted_svd(m, k)
su, ss, svh = svds(m, k, solver=solver)
m_hat = svd_estimate(u, s, vh)
sm_hat = svd_estimate(su, ss, svh)
assert_array_almost_equal_nulp(m_hat, sm_hat, nulp=1000)
def test_svd_maxiter():
# check that maxiter works as expected
x = hilbert(6)
# ARPACK shouldn't converge on such an ill-conditioned matrix with just
# one iteration
assert_raises(ArpackNoConvergence, svds, x, 1, maxiter=1, ncv=3)
# but 100 iterations should be more than enough
u, s, vt = svds(x, 1, maxiter=100, ncv=3)
assert_allclose(s, [1.7], atol=0.5)
def test_svd_return():
# check that the return_singular_vectors parameter works as expected
x = hilbert(6)
_, s, _ = sorted_svd(x, 2)
ss = svds(x, 2, return_singular_vectors=False)
assert_allclose(s, ss)
def test_svd_which():
# check that the which parameter works as expected
x = hilbert(6)
for which in ['LM', 'SM']:
_, s, _ = sorted_svd(x, 2, which=which)
for solver in [None, 'arpack', 'lobpcg']:
ss = svds(x, 2, which=which, return_singular_vectors=False,
solver=solver)
ss.sort()
assert_allclose(s, ss, atol=np.sqrt(1e-15))
def test_svd_v0():
# check that the v0 parameter works as expected
x = np.array([[1, 2, 3, 4], [5, 6, 7, 8]], float)
for solver in [None, 'arpack', 'lobpcg']:
u, s, vh = svds(x, 1, solver=solver)
u2, s2, vh2 = svds(x, 1, v0=u[:, 0], solver=solver)
assert_allclose(s, s2, atol=np.sqrt(1e-15))
def _check_svds(A, k, U, s, VH):
n, m = A.shape
# Check shapes.
assert_equal(U.shape, (n, k))
assert_equal(s.shape, (k,))
assert_equal(VH.shape, (k, m))
# Check that the original matrix can be reconstituted.
A_rebuilt = (U*s).dot(VH)
assert_equal(A_rebuilt.shape, A.shape)
assert_allclose(A_rebuilt, A)
# Check that U is a semi-orthogonal matrix.
UH_U = np.dot(U.T.conj(), U)
assert_equal(UH_U.shape, (k, k))
assert_allclose(UH_U, np.identity(k), atol=1e-12)
# Check that V is a semi-orthogonal matrix.
VH_V = np.dot(VH, VH.T.conj())
assert_equal(VH_V.shape, (k, k))
assert_allclose(VH_V, np.identity(k), atol=1e-12)
def test_svd_LM_ones_matrix():
# Check that svds can deal with matrix_rank less than k in LM mode.
k = 3
for n, m in (6, 5), (5, 5), (5, 6):
for t in float, complex:
A = np.ones((n, m), dtype=t)
for solver in [None, 'arpack', 'lobpcg']:
U, s, VH = svds(A, k, solver=solver)
# Check some generic properties of svd.
_check_svds(A, k, U, s, VH)
# Check that the largest singular value is near sqrt(n*m)
# and the other singular values have been forced to zero.
assert_allclose(np.max(s), np.sqrt(n*m))
assert_array_equal(sorted(s)[:-1], 0)
def test_svd_LM_zeros_matrix():
# Check that svds can deal with matrices containing only zeros.
k = 1
for n, m in (3, 4), (4, 4), (4, 3):
for t in float, complex:
A = np.zeros((n, m), dtype=t)
for solver in [None, 'arpack', 'lobpcg']:
U, s, VH = svds(A, k, solver=solver)
# Check some generic properties of svd.
_check_svds(A, k, U, s, VH)
# Check that the singular values are zero.
assert_array_equal(s, 0)
def test_svd_LM_zeros_matrix_gh_3452():
# Regression test for a github issue.
# https://github.com/scipy/scipy/issues/3452
# Note that for complex dype the size of this matrix is too small for k=1.
n, m, k = 4, 2, 1
A = np.zeros((n, m))
for solver in [None, 'arpack', 'lobpcg']:
U, s, VH = svds(A, k, solver=solver)
# Check some generic properties of svd.
_check_svds(A, k, U, s, VH)
# Check that the singular values are zero.
assert_array_equal(s, 0)
class CheckingLinearOperator(LinearOperator):
def __init__(self, A):
self.A = A
self.dtype = A.dtype
self.shape = A.shape
def _matvec(self, x):
assert_equal(max(x.shape), np.size(x))
return self.A.dot(x)
def _rmatvec(self, x):
assert_equal(max(x.shape), np.size(x))
return self.A.T.conjugate().dot(x)
def test_svd_linop():
nmks = [(6, 7, 3),
(9, 5, 4),
(10, 8, 5)]
def reorder(args):
U, s, VH = args
j = np.argsort(s)
return U[:, j], s[j], VH[j, :]
for n, m, k in nmks:
# Test svds on a LinearOperator.
A = np.random.RandomState(52).randn(n, m)
L = CheckingLinearOperator(A)
v0 = np.ones(min(A.shape))
for solver in [None, 'arpack', 'lobpcg']:
U1, s1, VH1 = reorder(svds(A, k, v0=v0, solver=solver))
U2, s2, VH2 = reorder(svds(L, k, v0=v0, solver=solver))
assert_allclose(np.abs(U1), np.abs(U2))
assert_allclose(s1, s2)
assert_allclose(np.abs(VH1), np.abs(VH2))
assert_allclose(np.dot(U1, np.dot(np.diag(s1), VH1)),
np.dot(U2, np.dot(np.diag(s2), VH2)))
# Try again with which="SM".
A = np.random.RandomState(1909).randn(n, m)
L = CheckingLinearOperator(A)
for solver in [None, 'arpack', 'lobpcg']:
U1, s1, VH1 = reorder(svds(A, k, which="SM", solver=solver))
U2, s2, VH2 = reorder(svds(L, k, which="SM", solver=solver))
assert_allclose(np.abs(U1), np.abs(U2))
assert_allclose(s1, s2)
assert_allclose(np.abs(VH1), np.abs(VH2))
assert_allclose(np.dot(U1, np.dot(np.diag(s1), VH1)),
np.dot(U2, np.dot(np.diag(s2), VH2)))
if k < min(n, m) - 1:
# Complex input and explicit which="LM".
for (dt, eps) in [(complex, 1e-7), (np.complex64, 1e-3)]:
rng = np.random.RandomState(1648)
A = (rng.randn(n, m) + 1j * rng.randn(n, m)).astype(dt)
L = CheckingLinearOperator(A)
for solver in [None, 'arpack', 'lobpcg']:
U1, s1, VH1 = reorder(svds(A, k, which="LM", solver=solver))
U2, s2, VH2 = reorder(svds(L, k, which="LM", solver=solver))
assert_allclose(np.abs(U1), np.abs(U2), rtol=eps)
assert_allclose(s1, s2, rtol=eps)
assert_allclose(np.abs(VH1), np.abs(VH2), rtol=eps)
assert_allclose(np.dot(U1, np.dot(np.diag(s1), VH1)),
np.dot(U2, np.dot(np.diag(s2), VH2)),
rtol=eps)
@pytest.mark.skipif(IS_PYPY, reason="Test not meaningful on PyPy")
def test_linearoperator_deallocation():
# Check that the linear operators used by the Arpack wrappers are
# deallocatable by reference counting -- they are big objects, so
# Python's cyclic GC may not collect them fast enough before
# running out of memory if eigs/eigsh are called in a tight loop.
M_d = np.eye(10)
M_s = csc_matrix(M_d)
M_o = aslinearoperator(M_d)
with assert_deallocated(lambda: arpack.SpLuInv(M_s)):
pass
with assert_deallocated(lambda: arpack.LuInv(M_d)):
pass
with assert_deallocated(lambda: arpack.IterInv(M_s)):
pass
with assert_deallocated(lambda: arpack.IterOpInv(M_o, None, 0.3)):
pass
with assert_deallocated(lambda: arpack.IterOpInv(M_o, M_o, 0.3)):
pass
def test_svds_partial_return():
x = np.array([[1, 2, 3],
[3, 4, 3],
[1, 0, 2],
[0, 0, 1]], float)
# test vertical matrix
z = csr_matrix(x)
vh_full = svds(z, 2)[-1]
vh_partial = svds(z, 2, return_singular_vectors='vh')[-1]
dvh = np.linalg.norm(np.abs(vh_full) - np.abs(vh_partial))
if dvh > 1e-10:
raise AssertionError('right eigenvector matrices differ when using return_singular_vectors parameter')
if svds(z, 2, return_singular_vectors='vh')[0] is not None:
raise AssertionError('left eigenvector matrix was computed when it should not have been')
# test horizontal matrix
z = csr_matrix(x.T)
u_full = svds(z, 2)[0]
u_partial = svds(z, 2, return_singular_vectors='vh')[0]
du = np.linalg.norm(np.abs(u_full) - np.abs(u_partial))
if du > 1e-10:
raise AssertionError('left eigenvector matrices differ when using return_singular_vectors parameter')
if svds(z, 2, return_singular_vectors='u')[-1] is not None:
raise AssertionError('right eigenvector matrix was computed when it should not have been')
def test_svds_wrong_eigen_type():
# Regression test for a github issue.
# https://github.com/scipy/scipy/issues/4590
# Function was not checking for eigenvalue type and unintended
# values could be returned.
x = np.array([[1, 2, 3],
[3, 4, 3],
[1, 0, 2],
[0, 0, 1]], float)
assert_raises(ValueError, svds, x, 1, which='LA')
def test_parallel_threads():
results = []
v0 = np.random.rand(50)
def worker():
x = diags([1, -2, 1], [-1, 0, 1], shape=(50, 50))
w, v = eigs(x, k=3, v0=v0)
results.append(w)
w, v = eigsh(x, k=3, v0=v0)
results.append(w)
threads = [threading.Thread(target=worker) for k in range(10)]
for t in threads:
t.start()
for t in threads:
t.join()
worker()
for r in results:
assert_allclose(r, results[-1])
def test_reentering():
# Just some linear operator that calls eigs recursively
def A_matvec(x):
x = diags([1, -2, 1], [-1, 0, 1], shape=(50, 50))
w, v = eigs(x, k=1)
return v / w[0]
A = LinearOperator(matvec=A_matvec, dtype=float, shape=(50, 50))
# The Fortran code is not reentrant, so this fails (gracefully, not crashing)
assert_raises(RuntimeError, eigs, A, k=1)
assert_raises(RuntimeError, eigsh, A, k=1)
def test_regression_arpackng_1315():
# Check that issue arpack-ng/#1315 is not present.
# Adapted from arpack-ng/TESTS/bug_1315_single.c
# If this fails, then the installed ARPACK library is faulty.
for dtype in [np.float32, np.float64]:
np.random.seed(1234)
w0 = np.arange(1, 1000+1).astype(dtype)
A = diags([w0], [0], shape=(1000, 1000))
v0 = np.random.rand(1000).astype(dtype)
w, v = eigs(A, k=9, ncv=2*9+1, which="LM", v0=v0)
assert_allclose(np.sort(w), np.sort(w0[-9:]),
rtol=1e-4)
def test_eigs_for_k_greater():
# Test eigs() for k beyond limits.
A_sparse = diags([1, -2, 1], [-1, 0, 1], shape=(4, 4)) # sparse
A = generate_matrix(4, sparse=False)
M_dense = np.random.random((4, 4))
M_sparse = generate_matrix(4, sparse=True)
M_linop = aslinearoperator(M_dense)
eig_tuple1 = eig(A, b=M_dense)
eig_tuple2 = eig(A, b=M_sparse)
with suppress_warnings() as sup:
sup.filter(RuntimeWarning)
assert_equal(eigs(A, M=M_dense, k=3), eig_tuple1)
assert_equal(eigs(A, M=M_dense, k=4), eig_tuple1)
assert_equal(eigs(A, M=M_dense, k=5), eig_tuple1)
assert_equal(eigs(A, M=M_sparse, k=5), eig_tuple2)
# M as LinearOperator
assert_raises(TypeError, eigs, A, M=M_linop, k=3)
# Test 'A' for different types
assert_raises(TypeError, eigs, aslinearoperator(A), k=3)
assert_raises(TypeError, eigs, A_sparse, k=3)
def test_eigsh_for_k_greater():
# Test eigsh() for k beyond limits.
A_sparse = diags([1, -2, 1], [-1, 0, 1], shape=(4, 4)) # sparse
A = generate_matrix(4, sparse=False)
M_dense = generate_matrix_symmetric(4, pos_definite=True)
M_sparse = generate_matrix_symmetric(4, pos_definite=True, sparse=True)
M_linop = aslinearoperator(M_dense)
eig_tuple1 = eigh(A, b=M_dense)
eig_tuple2 = eigh(A, b=M_sparse)
with suppress_warnings() as sup:
sup.filter(RuntimeWarning)
assert_equal(eigsh(A, M=M_dense, k=4), eig_tuple1)
assert_equal(eigsh(A, M=M_dense, k=5), eig_tuple1)
assert_equal(eigsh(A, M=M_sparse, k=5), eig_tuple2)
# M as LinearOperator
assert_raises(TypeError, eigsh, A, M=M_linop, k=4)
# Test 'A' for different types
assert_raises(TypeError, eigsh, aslinearoperator(A), k=4)
assert_raises(TypeError, eigsh, A_sparse, M=M_dense, k=4)
def test_real_eigs_real_k_subset():
np.random.seed(1)
n = 10
A = rand(n, n, density=0.5)
A.data *= 2
A.data -= 1
v0 = np.ones(n)
whichs = ['LM', 'SM', 'LR', 'SR', 'LI', 'SI']
dtypes = [np.float32, np.float64]
for which, sigma, dtype in itertools.product(whichs, [None, 0, 5], dtypes):
prev_w = np.array([], dtype=dtype)
eps = np.finfo(dtype).eps
for k in range(1, 9):
w, z = eigs(A.astype(dtype), k=k, which=which, sigma=sigma,
v0=v0.astype(dtype), tol=0)
assert_allclose(np.linalg.norm(A.dot(z) - z * w), 0, atol=np.sqrt(eps))
# Check that the set of eigenvalues for `k` is a subset of that for `k+1`
dist = abs(prev_w[:,None] - w).min(axis=1)
assert_allclose(dist, 0, atol=np.sqrt(eps))
prev_w = w
# Check sort order
if sigma is None:
d = w
else:
d = 1 / (w - sigma)
if which == 'LM':
# ARPACK is systematic for 'LM', but sort order
# appears not well defined for other modes
assert np.all(np.diff(abs(d)) <= 1e-6)
|
py | 1a3168461a705bef6dc88b869903422fd3a022e4 | from dataclasses import dataclass
from typing import Optional
@dataclass
class Object:
name: str
type: str
x: float
y: float
width: float
height: float
@dataclass
class ObjectLayer:
name: str
type: Optional[str]
object_count: int
objects: list[Object]
|
py | 1a316854468485c5e234815802066968ed2e8e30 | # coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: release-1.24
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from kubernetes.client.configuration import Configuration
class V1Service(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'api_version': 'str',
'kind': 'str',
'metadata': 'V1ObjectMeta',
'spec': 'V1ServiceSpec',
'status': 'V1ServiceStatus'
}
attribute_map = {
'api_version': 'apiVersion',
'kind': 'kind',
'metadata': 'metadata',
'spec': 'spec',
'status': 'status'
}
def __init__(self, api_version=None, kind=None, metadata=None, spec=None, status=None, local_vars_configuration=None): # noqa: E501
"""V1Service - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._api_version = None
self._kind = None
self._metadata = None
self._spec = None
self._status = None
self.discriminator = None
if api_version is not None:
self.api_version = api_version
if kind is not None:
self.kind = kind
if metadata is not None:
self.metadata = metadata
if spec is not None:
self.spec = spec
if status is not None:
self.status = status
@property
def api_version(self):
"""Gets the api_version of this V1Service. # noqa: E501
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:return: The api_version of this V1Service. # noqa: E501
:rtype: str
"""
return self._api_version
@api_version.setter
def api_version(self, api_version):
"""Sets the api_version of this V1Service.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:param api_version: The api_version of this V1Service. # noqa: E501
:type: str
"""
self._api_version = api_version
@property
def kind(self):
"""Gets the kind of this V1Service. # noqa: E501
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:return: The kind of this V1Service. # noqa: E501
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""Sets the kind of this V1Service.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:param kind: The kind of this V1Service. # noqa: E501
:type: str
"""
self._kind = kind
@property
def metadata(self):
"""Gets the metadata of this V1Service. # noqa: E501
:return: The metadata of this V1Service. # noqa: E501
:rtype: V1ObjectMeta
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""Sets the metadata of this V1Service.
:param metadata: The metadata of this V1Service. # noqa: E501
:type: V1ObjectMeta
"""
self._metadata = metadata
@property
def spec(self):
"""Gets the spec of this V1Service. # noqa: E501
:return: The spec of this V1Service. # noqa: E501
:rtype: V1ServiceSpec
"""
return self._spec
@spec.setter
def spec(self, spec):
"""Sets the spec of this V1Service.
:param spec: The spec of this V1Service. # noqa: E501
:type: V1ServiceSpec
"""
self._spec = spec
@property
def status(self):
"""Gets the status of this V1Service. # noqa: E501
:return: The status of this V1Service. # noqa: E501
:rtype: V1ServiceStatus
"""
return self._status
@status.setter
def status(self, status):
"""Sets the status of this V1Service.
:param status: The status of this V1Service. # noqa: E501
:type: V1ServiceStatus
"""
self._status = status
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1Service):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1Service):
return True
return self.to_dict() != other.to_dict()
|
py | 1a3168f0bee6d670a5d1ee1719ea4d7598faba0b | #
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nitro.resource.base.base_resource import base_resource
from nitro.resource.base.base_resource import base_response
from nitro.service.options import options
from nitro.exception.nitro_exception import nitro_exception
from nitro.util.nitro_util import nitro_util
class locationparameter(base_resource) :
"""Configuration for location parameter resource."""
def __init__(self) :
self._context = ""
self._q1label = ""
self._q2label = ""
self._q3label = ""
self._q4label = ""
self._q5label = ""
self._q6label = ""
self._Locationfile = ""
self._format = ""
self._custom = 0
self._Static = 0
self._lines = 0
self._errors = 0
self._warnings = 0
self._entries = 0
self._locationfile6 = ""
self._format6 = ""
self._custom6 = 0
self._static6 = 0
self._lines6 = 0
self._errors6 = 0
self._warnings6 = 0
self._entries6 = 0
self._flags = 0
self._status = 0
self._databasemode = ""
self._flushing = ""
self._loading = ""
@property
def context(self) :
"""Context for describing locations. In geographic context, qualifier labels are assigned by default in the following sequence: Continent.Country.Region.City.ISP.Organization. In custom context, the qualifiers labels can have any meaning that you designate.<br/>Possible values = geographic, custom."""
try :
return self._context
except Exception as e:
raise e
@context.setter
def context(self, context) :
"""Context for describing locations. In geographic context, qualifier labels are assigned by default in the following sequence: Continent.Country.Region.City.ISP.Organization. In custom context, the qualifiers labels can have any meaning that you designate.<br/>Possible values = geographic, custom
:param context:
"""
try :
self._context = context
except Exception as e:
raise e
@property
def q1label(self) :
"""Label specifying the meaning of the first qualifier. Can be specified for custom context only.<br/>Minimum length = 1."""
try :
return self._q1label
except Exception as e:
raise e
@q1label.setter
def q1label(self, q1label) :
"""Label specifying the meaning of the first qualifier. Can be specified for custom context only.<br/>Minimum length = 1
:param q1label:
"""
try :
self._q1label = q1label
except Exception as e:
raise e
@property
def q2label(self) :
"""Label specifying the meaning of the second qualifier. Can be specified for custom context only.<br/>Minimum length = 1."""
try :
return self._q2label
except Exception as e:
raise e
@q2label.setter
def q2label(self, q2label) :
"""Label specifying the meaning of the second qualifier. Can be specified for custom context only.<br/>Minimum length = 1
:param q2label:
"""
try :
self._q2label = q2label
except Exception as e:
raise e
@property
def q3label(self) :
"""Label specifying the meaning of the third qualifier. Can be specified for custom context only.<br/>Minimum length = 1."""
try :
return self._q3label
except Exception as e:
raise e
@q3label.setter
def q3label(self, q3label) :
"""Label specifying the meaning of the third qualifier. Can be specified for custom context only.<br/>Minimum length = 1
:param q3label:
"""
try :
self._q3label = q3label
except Exception as e:
raise e
@property
def q4label(self) :
"""Label specifying the meaning of the fourth qualifier. Can be specified for custom context only.<br/>Minimum length = 1."""
try :
return self._q4label
except Exception as e:
raise e
@q4label.setter
def q4label(self, q4label) :
"""Label specifying the meaning of the fourth qualifier. Can be specified for custom context only.<br/>Minimum length = 1
:param q4label:
"""
try :
self._q4label = q4label
except Exception as e:
raise e
@property
def q5label(self) :
"""Label specifying the meaning of the fifth qualifier. Can be specified for custom context only.<br/>Minimum length = 1."""
try :
return self._q5label
except Exception as e:
raise e
@q5label.setter
def q5label(self, q5label) :
"""Label specifying the meaning of the fifth qualifier. Can be specified for custom context only.<br/>Minimum length = 1
:param q5label:
"""
try :
self._q5label = q5label
except Exception as e:
raise e
@property
def q6label(self) :
"""Label specifying the meaning of the sixth qualifier. Can be specified for custom context only.<br/>Minimum length = 1."""
try :
return self._q6label
except Exception as e:
raise e
@q6label.setter
def q6label(self, q6label) :
"""Label specifying the meaning of the sixth qualifier. Can be specified for custom context only.<br/>Minimum length = 1
:param q6label:
"""
try :
self._q6label = q6label
except Exception as e:
raise e
@property
def Locationfile(self) :
"""Currently loaded location database file."""
try :
return self._Locationfile
except Exception as e:
raise e
@property
def format(self) :
"""Location file format.<br/>Possible values = netscaler, ip-country, ip-country-isp, ip-country-region-city, ip-country-region-city-isp, geoip-country, geoip-region, geoip-city, geoip-country-org, geoip-country-isp, geoip-city-isp-org."""
try :
return self._format
except Exception as e:
raise e
@property
def custom(self) :
"""Number of configured custom locations."""
try :
return self._custom
except Exception as e:
raise e
@property
def Static(self) :
"""Number of configured locations in the database file (static locations)."""
try :
return self._Static
except Exception as e:
raise e
@property
def lines(self) :
"""Number of lines in the databse files."""
try :
return self._lines
except Exception as e:
raise e
@property
def errors(self) :
"""Number of errros encountered while reading the database file."""
try :
return self._errors
except Exception as e:
raise e
@property
def warnings(self) :
"""Number of warnings encountered while reading the database file."""
try :
return self._warnings
except Exception as e:
raise e
@property
def entries(self) :
"""Number of successfully added entries."""
try :
return self._entries
except Exception as e:
raise e
@property
def locationfile6(self) :
"""Currently loaded location database file."""
try :
return self._locationfile6
except Exception as e:
raise e
@property
def format6(self) :
"""Location file format.<br/>Possible values = netscaler6, geoip-country6."""
try :
return self._format6
except Exception as e:
raise e
@property
def custom6(self) :
"""Number of configured custom locations."""
try :
return self._custom6
except Exception as e:
raise e
@property
def static6(self) :
"""Number of configured locations in the database file (static locations)."""
try :
return self._static6
except Exception as e:
raise e
@property
def lines6(self) :
"""Number of lines in the databse files."""
try :
return self._lines6
except Exception as e:
raise e
@property
def errors6(self) :
"""Number of errros encountered while reading the database file."""
try :
return self._errors6
except Exception as e:
raise e
@property
def warnings6(self) :
"""Number of warnings encountered while reading the database file."""
try :
return self._warnings6
except Exception as e:
raise e
@property
def entries6(self) :
"""Number of successfully added entries."""
try :
return self._entries6
except Exception as e:
raise e
@property
def flags(self) :
"""Information needed for display. This argument passes information from the kernel to the user space."""
try :
return self._flags
except Exception as e:
raise e
@property
def status(self) :
"""This argument displays when the status (success or failure) of database loading."""
try :
return self._status
except Exception as e:
raise e
@property
def databasemode(self) :
"""This argument displays the database mode.<br/>Possible values = File, Internal, Not applicable."""
try :
return self._databasemode
except Exception as e:
raise e
@property
def flushing(self) :
"""This argument displays the state of flushing.<br/>Possible values = In progress, Idle."""
try :
return self._flushing
except Exception as e:
raise e
@property
def loading(self) :
"""This argument displays the state of loading.<br/>Possible values = In progress, Idle."""
try :
return self._loading
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
"""converts nitro response into object and returns the object array in case of get request.
:param service:
:param response:
"""
try :
result = service.payload_formatter.string_to_resource(locationparameter_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.locationparameter
except Exception as e :
raise e
def _get_object_name(self) :
"""Returns the value of object identifier argument"""
try :
return 0
except Exception as e :
raise e
@classmethod
def update(cls, client, resource) :
"""Use this API to update locationparameter.
:param client:
:param resource:
"""
try :
if type(resource) is not list :
updateresource = locationparameter()
updateresource.context = resource.context
updateresource.q1label = resource.q1label
updateresource.q2label = resource.q2label
updateresource.q3label = resource.q3label
updateresource.q4label = resource.q4label
updateresource.q5label = resource.q5label
updateresource.q6label = resource.q6label
return updateresource.update_resource(client)
except Exception as e :
raise e
@classmethod
def unset(cls, client, resource, args) :
"""Use this API to unset the properties of locationparameter resource.
Properties that need to be unset are specified in args array.
:param client:
:param resource:
:param args:
"""
try :
if type(resource) is not list :
unsetresource = locationparameter()
return unsetresource.unset_resource(client, args)
except Exception as e :
raise e
@classmethod
def get(cls, client, name="", option_="") :
"""Use this API to fetch all the locationparameter resources that are configured on netscaler.
:param client:
:param name: (Default value = "")
:param option_: (Default value = "")
"""
try :
if not name :
obj = locationparameter()
response = obj.get_resources(client, option_)
return response
except Exception as e :
raise e
class Loading:
""" """
In_progress = "In progress"
Idle = "Idle"
class Databasemode:
""" """
File = "File"
Internal = "Internal"
Not_applicable = "Not applicable"
class Format:
""" """
netscaler = "netscaler"
ip_country = "ip-country"
ip_country_isp = "ip-country-isp"
ip_country_region_city = "ip-country-region-city"
ip_country_region_city_isp = "ip-country-region-city-isp"
geoip_country = "geoip-country"
geoip_region = "geoip-region"
geoip_city = "geoip-city"
geoip_country_org = "geoip-country-org"
geoip_country_isp = "geoip-country-isp"
geoip_city_isp_org = "geoip-city-isp-org"
class Context:
""" """
geographic = "geographic"
custom = "custom"
class Flushing:
""" """
In_progress = "In progress"
Idle = "Idle"
class Format6:
""" """
netscaler6 = "netscaler6"
geoip_country6 = "geoip-country6"
class locationparameter_response(base_response) :
""" """
def __init__(self, length=1) :
self.locationparameter = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.locationparameter = [locationparameter() for _ in range(length)]
|
py | 1a31692972934fe64ee971eaa73cd6c16ee4d4a5 | import sys
import time
import cv2
from confluent_kafka import Producer
import certifi
import os
topic = "IBM-SiB"
username = os.environ["USERNAME"]
password = os.environ["PASSWORD"]
brokers = os.environ.get("BROKERS") or "broker-5-3k507nvhhcbmm13z.kafka.svc08.us-south.eventstreams.cloud.ibm.com:9093,broker-3-3k507nvhhcbmm13z.kafka.svc08.us-south.eventstreams.cloud.ibm.com:9093,broker-2-3k507nvhhcbmm13z.kafka.svc08.us-south.eventstreams.cloud.ibm.com:9093,broker-1-3k507nvhhcbmm13z.kafka.svc08.us-south.eventstreams.cloud.ibm.com:9093,broker-0-3k507nvhhcbmm13z.kafka.svc08.us-south.eventstreams.cloud.ibm.com:9093,broker-4-3k507nvhhcbmm13z.kafka.svc08.us-south.eventstreams.cloud.ibm.com:9093"
def publish_video():
# Start up producer
conf = {
'bootstrap.servers': brokers,
'security.protocol': 'SASL_SSL',
'ssl.ca.location': certifi.where(),
'sasl.mechanism': 'PLAIN',
'sasl.username': username,
'sasl.password': password,
'max.in.flight.requests.per.connection': 1
}
producer = Producer(**conf)
#videofile = cv2.VideoCapture("video.mp4")
videofile = cv2.VideoCapture(0)
try:
print("Sending the frames")
while(True):
success, frame = videofile.read()
frame = cv2.resize(frame,(320,200)) # Reduce the size of the frame so we get speed
ret, buffer = cv2.imencode('.jpg', frame)
producer.produce(topic, buffer.tobytes())
# No need to send faster than this
time.sleep(0.2)
except:
producer.flush()
videofile.release()
print("\nExiting.")
sys.exit(1)
if __name__ == '__main__':
print("publishing video")
publish_video() |
py | 1a3169ab2b5fd8aa1c03b39270804b8100056254 | # -*- coding: utf-8 -*-
'''
Outputter for displaying results of state runs
==============================================
The return data from the Highstate command is a standard data structure
which is parsed by the highstate outputter to deliver a clean and readable
set of information about the HighState run on minions.
Two configurations can be set to modify the highstate outputter. These values
can be set in the master config to change the output of the ``salt`` command or
set in the minion config to change the output of the ``salt-call`` command.
state_verbose:
By default `state_verbose` is set to `True`, setting this to `False` will
instruct the highstate outputter to omit displaying anything in green, this
means that nothing with a result of True and no changes will not be printed
state_output:
The highstate outputter has six output modes, ``full``, ``terse``,
``mixed``, ``mixed_id``, ``changes`` and ``filter``.
* The default is set to ``full``, which will display many lines of detailed
information for each executed chunk.
* If ``terse`` is used, then the output is greatly simplified and shown in
only one line.
* If ``mixed`` is used, then terse output will be used unless a state
failed, in which case full output will be used.
* If ``mixed_id`` is used, then the mixed form will be used, but the value for ``name``
will be drawn from the state ID. This is useful for cases where the name
value might be very long and hard to read.
* If ``changes`` is used, then terse output will be used if there was no
error and no changes, otherwise full output will be used.
* If ``filter`` is used, then either or both of two different filters can be
used: ``exclude`` or ``terse``.
* for ``exclude``, state.highstate expects a list of states to be excluded
(or ``None``)
followed by ``True`` for terse output or ``False`` for regular output.
Because of parsing nuances, if only one of these is used, it must still
contain a comma. For instance: `exclude=True,`.
* for ``terse``, state.highstate expects simply ``True`` or ``False``.
These can be set as such from the command line, or in the Salt config as
`state_output_exclude` or `state_output_terse`, respectively.
state_tabular:
If `state_output` uses the terse output, set this to `True` for an aligned
output format. If you wish to use a custom format, this can be set to a
string.
Example usage:
If ``state_output: filter`` is set in the configuration file:
.. code-block:: bash
salt '*' state.highstate exclude=None,True
means to exclude no states from the highstate and turn on terse output.
.. code-block:: bash
salt twd state.highstate exclude=problemstate1,problemstate2,False
means to exclude states ``problemstate1`` and ``problemstate2``
from the highstate, and use regular output.
Example output for the above highstate call when ``top.sls`` defines only
one other state to apply to minion ``twd``:
.. code-block:: text
twd:
Summary for twd
------------
Succeeded: 1 (changed=1)
Failed: 0
------------
Total states run: 1
Example output with no special settings in configuration files:
.. code-block:: text
myminion:
----------
ID: test.ping
Function: module.run
Result: True
Comment: Module function test.ping executed
Changes:
----------
ret:
True
Summary for myminion
------------
Succeeded: 1
Failed: 0
------------
Total: 0
'''
# Import python libs
from __future__ import absolute_import
import pprint
import textwrap
# Import salt libs
import salt.utils
import salt.output
from salt.utils.locales import sdecode
# Import 3rd-party libs
import salt.ext.six as six
import logging
log = logging.getLogger(__name__)
def output(data, **kwargs): # pylint: disable=unused-argument
'''
The HighState Outputter is only meant to be used with the state.highstate
function, or a function that returns highstate return data.
'''
# Discard retcode in dictionary as present in orchestrate data
local_masters = [key for key in data.keys() if key.endswith('.local_master')]
orchestrator_output = 'retcode' in data.keys() and len(local_masters) == 1
if orchestrator_output:
del data['retcode']
# If additional information is passed through via the "data" dictionary to
# the highstate outputter, such as "outputter" or "retcode", discard it.
# We only want the state data that was passed through, if it is wrapped up
# in the "data" key, as the orchestrate runner does. See Issue #31330,
# pull request #27838, and pull request #27175 for more information.
if 'data' in data:
data = data.pop('data')
ret = [
_format_host(host, hostdata)[0]
for host, hostdata in six.iteritems(data)
]
if ret:
return "\n".join(ret)
log.error(
'Data passed to highstate outputter is not a valid highstate return: %s',
data
)
# We should not reach here, but if we do return empty string
return ''
def _format_host(host, data):
host = sdecode(host)
colors = salt.utils.get_colors(
__opts__.get('color'),
__opts__.get('color_theme'))
tabular = __opts__.get('state_tabular', False)
rcounts = {}
rdurations = []
hcolor = colors['GREEN']
hstrs = []
nchanges = 0
strip_colors = __opts__.get('strip_colors', True)
if isinstance(data, int) or isinstance(data, str):
# Data in this format is from saltmod.function,
# so it is always a 'change'
nchanges = 1
hstrs.append((u'{0} {1}{2[ENDC]}'
.format(hcolor, data, colors)))
hcolor = colors['CYAN'] # Print the minion name in cyan
if isinstance(data, list):
# Errors have been detected, list them in RED!
hcolor = colors['LIGHT_RED']
hstrs.append((u' {0}Data failed to compile:{1[ENDC]}'
.format(hcolor, colors)))
for err in data:
if strip_colors:
err = salt.output.strip_esc_sequence(sdecode(err))
hstrs.append((u'{0}----------\n {1}{2[ENDC]}'
.format(hcolor, err, colors)))
if isinstance(data, dict):
# Verify that the needed data is present
data_tmp = {}
for tname, info in six.iteritems(data):
if isinstance(info, dict) and tname is not 'changes' and info and '__run_num__' not in info:
err = (u'The State execution failed to record the order '
'in which all states were executed. The state '
'return missing data is:')
hstrs.insert(0, pprint.pformat(info))
hstrs.insert(0, err)
if isinstance(info, dict) and 'result' in info:
data_tmp[tname] = info
data = data_tmp
# Everything rendered as it should display the output
for tname in sorted(
data,
key=lambda k: data[k].get('__run_num__', 0)):
ret = data[tname]
# Increment result counts
rcounts.setdefault(ret['result'], 0)
rcounts[ret['result']] += 1
rduration = ret.get('duration', 0)
try:
float(rduration)
rdurations.append(rduration)
except ValueError:
rduration, _, _ = rduration.partition(' ms')
try:
float(rduration)
rdurations.append(rduration)
except ValueError:
log.error('Cannot parse a float from duration {0}'
.format(ret.get('duration', 0)))
tcolor = colors['GREEN']
orchestration = ret.get('__orchestration__', False)
schanged, ctext = _format_changes(ret['changes'], orchestration)
nchanges += 1 if schanged else 0
# Skip this state if it was successful & diff output was requested
if __opts__.get('state_output_diff', False) and \
ret['result'] and not schanged:
continue
# Skip this state if state_verbose is False, the result is True and
# there were no changes made
if not __opts__.get('state_verbose', False) and \
ret['result'] and not schanged:
continue
if schanged:
tcolor = colors['CYAN']
if ret['result'] is False:
hcolor = colors['RED']
tcolor = colors['RED']
if ret['result'] is None:
hcolor = colors['LIGHT_YELLOW']
tcolor = colors['LIGHT_YELLOW']
comps = [sdecode(comp) for comp in tname.split('_|-')]
if __opts__.get('state_output', 'full').lower() == 'filter':
# By default, full data is shown for all types. However, return
# data may be excluded by setting state_output_exclude to a
# comma-separated list of True, False or None, or including the
# same list with the exclude option on the command line. For
# now, this option must include a comma. For example:
# exclude=True,
# The same functionality is also available for making return
# data terse, instead of excluding it.
cliargs = __opts__.get('arg', [])
clikwargs = {}
for item in cliargs:
if isinstance(item, dict) and '__kwarg__' in item:
clikwargs = item.copy()
exclude = clikwargs.get(
'exclude', __opts__.get('state_output_exclude', [])
)
if isinstance(exclude, six.string_types):
exclude = str(exclude).split(',')
terse = clikwargs.get(
'terse', __opts__.get('state_output_terse', [])
)
if isinstance(terse, six.string_types):
terse = str(terse).split(',')
if str(ret['result']) in terse:
msg = _format_terse(tcolor, comps, ret, colors, tabular)
hstrs.append(msg)
continue
if str(ret['result']) in exclude:
continue
elif __opts__.get('state_output', 'full').lower() == 'terse':
# Print this chunk in a terse way and continue in the
# loop
msg = _format_terse(tcolor, comps, ret, colors, tabular)
hstrs.append(msg)
continue
elif __opts__.get('state_output', 'full').lower().startswith('mixed'):
if __opts__['state_output'] == 'mixed_id':
# Swap in the ID for the name. Refs #35137
comps[2] = comps[1]
# Print terse unless it failed
if ret['result'] is not False:
msg = _format_terse(tcolor, comps, ret, colors, tabular)
hstrs.append(msg)
continue
elif __opts__.get('state_output', 'full').lower() == 'changes':
# Print terse if no error and no changes, otherwise, be
# verbose
if ret['result'] and not schanged:
msg = _format_terse(tcolor, comps, ret, colors, tabular)
hstrs.append(msg)
continue
state_lines = [
u'{tcolor}----------{colors[ENDC]}',
u' {tcolor} ID: {comps[1]}{colors[ENDC]}',
u' {tcolor}Function: {comps[0]}.{comps[3]}{colors[ENDC]}',
u' {tcolor} Result: {ret[result]!s}{colors[ENDC]}',
u' {tcolor} Comment: {comment}{colors[ENDC]}',
]
if __opts__.get('state_output_profile', True) and 'start_time' in ret:
state_lines.extend([
u' {tcolor} Started: {ret[start_time]!s}{colors[ENDC]}',
u' {tcolor}Duration: {ret[duration]!s}{colors[ENDC]}',
])
# This isn't the prettiest way of doing this, but it's readable.
if comps[1] != comps[2]:
state_lines.insert(
3, u' {tcolor} Name: {comps[2]}{colors[ENDC]}')
# be sure that ret['comment'] is utf-8 friendly
try:
if not isinstance(ret['comment'], six.text_type):
ret['comment'] = str(ret['comment']).decode('utf-8')
except UnicodeDecodeError:
# but try to continue on errors
pass
try:
comment = sdecode(ret['comment'])
comment = comment.strip().replace(
u'\n',
u'\n' + u' ' * 14)
except AttributeError: # Assume comment is a list
try:
comment = ret['comment'].join(' ').replace(
u'\n',
u'\n' + u' ' * 13)
except AttributeError:
# Comment isn't a list either, just convert to string
comment = str(ret['comment'])
comment = comment.strip().replace(
u'\n',
u'\n' + u' ' * 14)
# If there is a data attribute, append it to the comment
if 'data' in ret:
if isinstance(ret['data'], list):
for item in ret['data']:
comment = '{0} {1}'.format(comment, item)
elif isinstance(ret['data'], dict):
for key, value in ret['data'].items():
comment = '{0}\n\t\t{1}: {2}'.format(comment, key, value)
else:
comment = '{0} {1}'.format(comment, ret['data'])
for detail in ['start_time', 'duration']:
ret.setdefault(detail, u'')
if ret['duration'] != '':
ret['duration'] = u'{0} ms'.format(ret['duration'])
svars = {
'tcolor': tcolor,
'comps': comps,
'ret': ret,
'comment': sdecode(comment),
# This nukes any trailing \n and indents the others.
'colors': colors
}
hstrs.extend([sline.format(**svars) for sline in state_lines])
changes = u' Changes: ' + ctext
hstrs.append((u'{0}{1}{2[ENDC]}'
.format(tcolor, changes, colors)))
if 'warnings' in ret:
rcounts.setdefault('warnings', 0)
rcounts['warnings'] += 1
wrapper = textwrap.TextWrapper(
width=80,
initial_indent=u' ' * 14,
subsequent_indent=u' ' * 14
)
hstrs.append(
u' {colors[LIGHT_RED]} Warnings: {0}{colors[ENDC]}'.format(
wrapper.fill('\n'.join(ret['warnings'])).lstrip(),
colors=colors
)
)
# Append result counts to end of output
colorfmt = u'{0}{1}{2[ENDC]}'
rlabel = {True: u'Succeeded', False: u'Failed', None: u'Not Run', 'warnings': u'Warnings'}
count_max_len = max([len(str(x)) for x in six.itervalues(rcounts)] or [0])
label_max_len = max([len(x) for x in six.itervalues(rlabel)] or [0])
line_max_len = label_max_len + count_max_len + 2 # +2 for ': '
hstrs.append(
colorfmt.format(
colors['CYAN'],
u'\nSummary for {0}\n{1}'.format(host, '-' * line_max_len),
colors
)
)
def _counts(label, count):
return u'{0}: {1:>{2}}'.format(
label,
count,
line_max_len - (len(label) + 2)
)
# Successful states
changestats = []
if None in rcounts and rcounts.get(None, 0) > 0:
# test=True states
changestats.append(
colorfmt.format(
colors['LIGHT_YELLOW'],
u'unchanged={0}'.format(rcounts.get(None, 0)),
colors
)
)
if nchanges > 0:
changestats.append(
colorfmt.format(
colors['GREEN'],
u'changed={0}'.format(nchanges),
colors
)
)
if changestats:
changestats = u' ({0})'.format(', '.join(changestats))
else:
changestats = u''
hstrs.append(
colorfmt.format(
colors['GREEN'],
_counts(
rlabel[True],
rcounts.get(True, 0) + rcounts.get(None, 0)
),
colors
) + changestats
)
# Failed states
num_failed = rcounts.get(False, 0)
hstrs.append(
colorfmt.format(
colors['RED'] if num_failed else colors['CYAN'],
_counts(rlabel[False], num_failed),
colors
)
)
num_warnings = rcounts.get('warnings', 0)
if num_warnings:
hstrs.append(
colorfmt.format(
colors['LIGHT_RED'],
_counts(rlabel['warnings'], num_warnings),
colors
)
)
totals = u'{0}\nTotal states run: {1:>{2}}'.format('-' * line_max_len,
sum(six.itervalues(rcounts)) - rcounts.get('warnings', 0),
line_max_len - 7)
hstrs.append(colorfmt.format(colors['CYAN'], totals, colors))
if __opts__.get('state_output_profile', True):
sum_duration = sum(rdurations)
duration_unit = 'ms'
# convert to seconds if duration is 1000ms or more
if sum_duration > 999:
sum_duration /= 1000
duration_unit = 's'
total_duration = u'Total run time: {0} {1}'.format(
'{0:.3f}'.format(sum_duration).rjust(line_max_len - 5),
duration_unit)
hstrs.append(colorfmt.format(colors['CYAN'], total_duration, colors))
if strip_colors:
host = salt.output.strip_esc_sequence(host)
hstrs.insert(0, (u'{0}{1}:{2[ENDC]}'.format(hcolor, host, colors)))
return u'\n'.join(hstrs), nchanges > 0
def _nested_changes(changes):
'''
Print the changes data using the nested outputter
'''
global __opts__ # pylint: disable=W0601
opts = __opts__.copy()
# Pass the __opts__ dict. The loader will splat this modules __opts__ dict
# anyway so have to restore it after the other outputter is done
if __opts__['color']:
__opts__['color'] = u'CYAN'
ret = u'\n'
ret += salt.output.out_format(
changes,
'nested',
__opts__,
nested_indent=14)
__opts__ = opts
return ret
def _format_changes(changes, orchestration=False):
'''
Format the changes dict based on what the data is
'''
if not changes:
return False, u''
if orchestration:
return True, _nested_changes(changes)
if not isinstance(changes, dict):
return True, u'Invalid Changes data: {0}'.format(changes)
ret = changes.get('ret')
if ret is not None and changes.get('out') == 'highstate':
ctext = u''
changed = False
for host, hostdata in six.iteritems(ret):
s, c = _format_host(host, hostdata)
ctext += u'\n' + u'\n'.join((u' ' * 14 + l) for l in s.splitlines())
changed = changed or c
else:
changed = True
ctext = _nested_changes(changes)
return changed, ctext
def _format_terse(tcolor, comps, ret, colors, tabular):
'''
Terse formatting of a message.
'''
result = u'Clean'
if ret['changes']:
result = u'Changed'
if ret['result'] is False:
result = u'Failed'
elif ret['result'] is None:
result = u'Differs'
if tabular is True:
fmt_string = ''
if 'warnings' in ret:
fmt_string += u'{c[LIGHT_RED]}Warnings:\n{w}{c[ENDC]}\n'.format(
c=colors, w='\n'.join(ret['warnings'])
)
fmt_string += u'{0}'
if __opts__.get('state_output_profile', True) and 'start_time' in ret:
fmt_string += u'{6[start_time]!s} [{6[duration]!s:>7} ms] '
fmt_string += u'{2:>10}.{3:<10} {4:7} Name: {1}{5}'
elif isinstance(tabular, str):
fmt_string = tabular
else:
fmt_string = ''
if 'warnings' in ret:
fmt_string += u'{c[LIGHT_RED]}Warnings:\n{w}{c[ENDC]}'.format(
c=colors, w='\n'.join(ret['warnings'])
)
fmt_string += u' {0} Name: {1} - Function: {2}.{3} - Result: {4}'
if __opts__.get('state_output_profile', True) and 'start_time' in ret:
fmt_string += u' Started: - {6[start_time]!s} Duration: {6[duration]!s} ms'
fmt_string += u'{5}'
msg = fmt_string.format(tcolor,
comps[2],
comps[0],
comps[-1],
result,
colors['ENDC'],
ret)
return msg
|
py | 1a3169cae37ab639803dbb4d38193f63a6aec93b | import asyncio
import logging
import unittest
from contextlib import contextmanager
import aiohttp
from jsonrpcclient.aiohttp_client import aiohttpClient
from quarkchain.cluster.cluster_config import ClusterConfig
from quarkchain.cluster.jsonrpc import JSONRPCServer, quantity_encoder
from quarkchain.cluster.miner import DoubleSHA256, MiningWork
from quarkchain.cluster.tests.test_utils import (
create_transfer_transaction,
ClusterContext,
create_contract_creation_transaction,
create_contract_creation_with_event_transaction,
create_contract_with_storage_transaction,
)
from quarkchain.core import Address, Branch, Code, Identity, Transaction
from quarkchain.core import MinorBlock, RootBlock
from quarkchain.env import DEFAULT_ENV
from quarkchain.evm import opcodes
from quarkchain.evm.messages import mk_contract_address
from quarkchain.evm.transactions import Transaction as EvmTransaction
from quarkchain.utils import call_async, sha3_256
# disable jsonrpcclient verbose logging
logging.getLogger("jsonrpcclient.client.request").setLevel(logging.WARNING)
logging.getLogger("jsonrpcclient.client.response").setLevel(logging.WARNING)
@contextmanager
def jrpc_server_context(master):
env = DEFAULT_ENV.copy()
env.cluster_config = ClusterConfig()
env.cluster_config.JSON_RPC_PORT = 38391
server = JSONRPCServer.start_test_server(env, master)
yield server
server.shutdown()
def send_request(*args):
async def __send_request(*args):
async with aiohttp.ClientSession(loop=asyncio.get_event_loop()) as session:
client = aiohttpClient(session, "http://localhost:38391")
response = await client.request(*args)
return response
return call_async(__send_request(*args))
class TestJSONRPC(unittest.TestCase):
def test_getTransactionCount(self):
id1 = Identity.create_random_identity()
acc1 = Address.create_from_identity(id1, full_shard_id=0)
acc2 = Address.create_random_account(full_shard_id=1)
with ClusterContext(1, acc1) as clusters, jrpc_server_context(
clusters[0].master
):
master = clusters[0].master
slaves = clusters[0].slave_list
branch = Branch.create(2, 0)
self.assertEqual(
call_async(master.get_primary_account_data(acc1)).transaction_count, 0
)
for i in range(3):
tx = create_transfer_transaction(
shard_state=slaves[0].shards[branch].state,
key=id1.get_key(),
from_address=acc1,
to_address=acc1,
value=12345,
)
self.assertTrue(slaves[0].add_tx(tx))
_, block = call_async(master.get_next_block_to_mine(address=acc1))
self.assertEqual(i + 1, block.header.height)
self.assertTrue(call_async(clusters[0].get_shard(0).add_block(block)))
response = send_request(
"getTransactionCount", "0x" + acc2.serialize().hex()
)
self.assertEqual(response, "0x0")
response = send_request(
"getTransactionCount", "0x" + acc1.serialize().hex()
)
self.assertEqual(response, "0x3")
response = send_request(
"getTransactionCount", "0x" + acc1.serialize().hex(), "latest"
)
self.assertEqual(response, "0x3")
for i in range(3):
response = send_request(
"getTransactionCount", "0x" + acc1.serialize().hex(), hex(i + 1)
)
self.assertEqual(response, hex(i + 1))
def test_sendTransaction(self):
id1 = Identity.create_random_identity()
acc1 = Address.create_from_identity(id1, full_shard_id=0)
acc2 = Address.create_random_account(full_shard_id=1)
with ClusterContext(1, acc1) as clusters, jrpc_server_context(
clusters[0].master
):
slaves = clusters[0].slave_list
branch = Branch.create(2, 0)
evm_tx = EvmTransaction(
nonce=0,
gasprice=6,
startgas=30000,
to=acc2.recipient,
value=15,
data=b"",
from_full_shard_id=acc1.full_shard_id,
to_full_shard_id=acc2.full_shard_id,
network_id=slaves[0].env.quark_chain_config.NETWORK_ID,
)
evm_tx.sign(id1.get_key())
request = dict(
to="0x" + acc2.recipient.hex(),
gasPrice="0x6",
gas=hex(30000),
value="0xf", # 15
v=quantity_encoder(evm_tx.v),
r=quantity_encoder(evm_tx.r),
s=quantity_encoder(evm_tx.s),
nonce="0x0",
fromFullShardId="0x00000000",
toFullShardId="0x00000001",
network_id=hex(slaves[0].env.quark_chain_config.NETWORK_ID),
)
tx = Transaction(code=Code.create_evm_code(evm_tx))
response = send_request("sendTransaction", request)
self.assertEqual(response, "0x" + tx.get_hash().hex() + "00000000")
self.assertEqual(len(slaves[0].shards[branch].state.tx_queue), 1)
self.assertEqual(
slaves[0].shards[branch].state.tx_queue.pop_transaction(), evm_tx
)
def test_sendTransaction_with_bad_signature(self):
""" sendTransaction validates signature """
id1 = Identity.create_random_identity()
acc1 = Address.create_from_identity(id1, full_shard_id=0)
acc2 = Address.create_random_account(full_shard_id=1)
with ClusterContext(1, acc1) as clusters, jrpc_server_context(
clusters[0].master
):
slaves = clusters[0].slave_list
branch = Branch.create(2, 0)
request = dict(
to="0x" + acc2.recipient.hex(),
gasPrice="0x6",
gas=hex(30000),
value="0xf",
v="0x1",
r="0x2",
s="0x3",
nonce="0x0",
fromFullShardId="0x00000000",
toFullShardId="0x00000001",
)
self.assertIsNone(send_request("sendTransaction", request))
self.assertEqual(len(slaves[0].shards[branch].state.tx_queue), 0)
def test_sendTransaction_missing_from_full_shard_id(self):
id1 = Identity.create_random_identity()
acc1 = Address.create_from_identity(id1, full_shard_id=0)
with ClusterContext(1, acc1) as clusters, jrpc_server_context(
clusters[0].master
):
request = dict(
to="0x" + acc1.recipient.hex(),
gasPrice="0x6",
gas=hex(30000),
value="0xf",
v="0x1",
r="0x2",
s="0x3",
nonce="0x0",
)
with self.assertRaises(Exception):
send_request("sendTransaction", request)
def test_getNextBlockToMine_and_addBlock(self):
id1 = Identity.create_random_identity()
acc1 = Address.create_from_identity(id1, full_shard_id=0)
acc3 = Address.create_random_account(full_shard_id=1)
with ClusterContext(1, acc1) as clusters, jrpc_server_context(
clusters[0].master
):
slaves = clusters[0].slave_list
# Expect to mine root that confirms the genesis minor blocks
response = send_request(
"getNextBlockToMine", "0x" + acc1.serialize().hex(), "0x0"
)
self.assertTrue(response["isRootBlock"])
block = RootBlock.deserialize(bytes.fromhex(response["blockData"][2:]))
self.assertEqual(block.header.height, 1)
self.assertEqual(len(block.minor_block_header_list), 2)
self.assertEqual(block.minor_block_header_list[0].height, 0)
self.assertEqual(block.minor_block_header_list[1].height, 0)
send_request("addBlock", "0x0", response["blockData"])
tx = create_transfer_transaction(
shard_state=clusters[0].get_shard_state(0),
key=id1.get_key(),
from_address=acc1,
to_address=acc3,
value=14,
gas=opcodes.GTXXSHARDCOST + opcodes.GTXCOST,
)
self.assertTrue(slaves[0].add_tx(tx))
# Expect to mine shard 0 since it has one tx
response = send_request(
"getNextBlockToMine", "0x" + acc1.serialize().hex(), "0x0"
)
self.assertFalse(response["isRootBlock"])
block1 = MinorBlock.deserialize(bytes.fromhex(response["blockData"][2:]))
self.assertEqual(block1.header.branch.value, 0b10)
self.assertTrue(send_request("addBlock", "0x2", response["blockData"]))
self.assertEqual(
clusters[0].get_shard_state(1).get_balance(acc3.recipient), 0
)
# Expect to mine shard 1 due to proof-of-progress
response = send_request(
"getNextBlockToMine", "0x" + acc1.serialize().hex(), "0x0"
)
self.assertFalse(response["isRootBlock"])
block2 = MinorBlock.deserialize(bytes.fromhex(response["blockData"][2:]))
self.assertEqual(block2.header.branch.value, 0b11)
self.assertTrue(send_request("addBlock", "0x3", response["blockData"]))
# Expect to mine root
response = send_request(
"getNextBlockToMine", "0x" + acc1.serialize().hex(), "0x0"
)
self.assertTrue(response["isRootBlock"])
block = RootBlock.deserialize(bytes.fromhex(response["blockData"][2:]))
self.assertEqual(block.header.height, 2)
self.assertEqual(len(block.minor_block_header_list), 2)
self.assertEqual(block.minor_block_header_list[0], block1.header)
self.assertEqual(block.minor_block_header_list[1], block2.header)
send_request("addBlock", "0x0", response["blockData"])
self.assertEqual(
clusters[0].get_shard_state(1).get_balance(acc3.recipient), 0
)
# Expect to mine shard 1 for the gas on xshard tx to acc3
response = send_request(
"getNextBlockToMine", "0x" + acc1.serialize().hex(), "0x0"
)
self.assertFalse(response["isRootBlock"])
block3 = MinorBlock.deserialize(bytes.fromhex(response["blockData"][2:]))
self.assertEqual(block3.header.branch.value, 0b11)
self.assertTrue(send_request("addBlock", "0x3", response["blockData"]))
# Expect withdrawTo is included in acc3's balance
resp = send_request("getBalance", "0x" + acc3.serialize().hex())
self.assertEqual(resp["branch"], "0x3")
self.assertEqual(resp["balance"], "0xe")
def test_getNextBlockToMine_with_shard_mask(self):
id1 = Identity.create_random_identity()
acc1 = Address.create_from_identity(id1, full_shard_id=0)
with ClusterContext(1, acc1) as clusters, jrpc_server_context(
clusters[0].master
):
response = send_request(
"getNextBlockToMine", "0x" + acc1.serialize().hex(), "0x2"
)
self.assertFalse(response["isRootBlock"])
block1 = MinorBlock.deserialize(bytes.fromhex(response["blockData"][2:]))
self.assertEqual(block1.header.branch.value, 0b10)
response = send_request(
"getNextBlockToMine", "0x" + acc1.serialize().hex(), "0x3"
)
self.assertFalse(response["isRootBlock"])
block1 = MinorBlock.deserialize(bytes.fromhex(response["blockData"][2:]))
self.assertEqual(block1.header.branch.value, 0b11)
def test_getMinorBlock(self):
id1 = Identity.create_random_identity()
acc1 = Address.create_from_identity(id1, full_shard_id=0)
with ClusterContext(1, acc1) as clusters, jrpc_server_context(
clusters[0].master
):
master = clusters[0].master
slaves = clusters[0].slave_list
branch = Branch.create(2, 0)
self.assertEqual(
call_async(master.get_primary_account_data(acc1)).transaction_count, 0
)
tx = create_transfer_transaction(
shard_state=slaves[0].shards[branch].state,
key=id1.get_key(),
from_address=acc1,
to_address=acc1,
value=12345,
)
self.assertTrue(slaves[0].add_tx(tx))
_, block1 = call_async(master.get_next_block_to_mine(address=acc1))
self.assertTrue(call_async(clusters[0].get_shard(0).add_block(block1)))
# By id
resp = send_request(
"getMinorBlockById",
"0x" + block1.header.get_hash().hex() + "0" * 8,
False,
)
self.assertEqual(
resp["transactions"][0], "0x" + tx.get_hash().hex() + "0" * 8
)
resp = send_request(
"getMinorBlockById",
"0x" + block1.header.get_hash().hex() + "0" * 8,
True,
)
self.assertEqual(
resp["transactions"][0]["hash"], "0x" + tx.get_hash().hex()
)
resp = send_request("getMinorBlockById", "0x" + "ff" * 36, True)
self.assertIsNone(resp)
# By height
resp = send_request("getMinorBlockByHeight", "0x0", "0x1", False)
self.assertEqual(
resp["transactions"][0], "0x" + tx.get_hash().hex() + "0" * 8
)
resp = send_request("getMinorBlockByHeight", "0x0", "0x1", True)
self.assertEqual(
resp["transactions"][0]["hash"], "0x" + tx.get_hash().hex()
)
resp = send_request("getMinorBlockByHeight", "0x1", "0x2", False)
self.assertIsNone(resp)
resp = send_request("getMinorBlockByHeight", "0x0", "0x4", False)
self.assertIsNone(resp)
def test_getTransactionById(self):
id1 = Identity.create_random_identity()
acc1 = Address.create_from_identity(id1, full_shard_id=0)
with ClusterContext(1, acc1) as clusters, jrpc_server_context(
clusters[0].master
):
master = clusters[0].master
slaves = clusters[0].slave_list
branch = Branch.create(2, 0)
self.assertEqual(
call_async(master.get_primary_account_data(acc1)).transaction_count, 0
)
tx = create_transfer_transaction(
shard_state=slaves[0].shards[branch].state,
key=id1.get_key(),
from_address=acc1,
to_address=acc1,
value=12345,
)
self.assertTrue(slaves[0].add_tx(tx))
_, block1 = call_async(master.get_next_block_to_mine(address=acc1))
self.assertTrue(call_async(clusters[0].get_shard(0).add_block(block1)))
resp = send_request(
"getTransactionById",
"0x"
+ tx.get_hash().hex()
+ acc1.full_shard_id.to_bytes(4, "big").hex(),
)
self.assertEqual(resp["hash"], "0x" + tx.get_hash().hex())
def test_call_success(self):
id1 = Identity.create_random_identity()
acc1 = Address.create_from_identity(id1, full_shard_id=0)
with ClusterContext(1, acc1) as clusters, jrpc_server_context(
clusters[0].master
):
slaves = clusters[0].slave_list
branch = Branch.create(2, 0)
response = send_request(
"call",
{"to": "0x" + acc1.serialize().hex(), "gas": hex(21000)},
"latest",
)
self.assertEqual(response, "0x")
self.assertEqual(
len(slaves[0].shards[branch].state.tx_queue),
0,
"should not affect tx queue",
)
def test_call_failure(self):
id1 = Identity.create_random_identity()
acc1 = Address.create_from_identity(id1, full_shard_id=0)
with ClusterContext(1, acc1) as clusters, jrpc_server_context(
clusters[0].master
):
slaves = clusters[0].slave_list
branch = Branch.create(2, 0)
# insufficient gas
response = send_request(
"call", {"to": "0x" + acc1.serialize().hex(), "gas": "0x1"}, None
)
self.assertIsNone(response, "failed tx should return None")
self.assertEqual(
len(slaves[0].shards[branch].state.tx_queue),
0,
"should not affect tx queue",
)
def test_getTransactionReceipt_not_exist(self):
id1 = Identity.create_random_identity()
acc1 = Address.create_from_identity(id1, full_shard_id=0)
with ClusterContext(1, acc1) as clusters, jrpc_server_context(
clusters[0].master
):
for endpoint in ("getTransactionReceipt", "eth_getTransactionReceipt"):
resp = send_request(endpoint, "0x" + bytes(36).hex())
self.assertIsNone(resp)
def test_getTransactionReceipt_on_transfer(self):
id1 = Identity.create_random_identity()
acc1 = Address.create_from_identity(id1, full_shard_id=0)
with ClusterContext(1, acc1) as clusters, jrpc_server_context(
clusters[0].master
):
master = clusters[0].master
slaves = clusters[0].slave_list
branch = Branch.create(2, 0)
tx = create_transfer_transaction(
shard_state=slaves[0].shards[branch].state,
key=id1.get_key(),
from_address=acc1,
to_address=acc1,
value=12345,
)
self.assertTrue(slaves[0].add_tx(tx))
_, block1 = call_async(master.get_next_block_to_mine(address=acc1))
self.assertTrue(call_async(clusters[0].get_shard(0).add_block(block1)))
for endpoint in ("getTransactionReceipt", "eth_getTransactionReceipt"):
resp = send_request(
endpoint,
"0x"
+ tx.get_hash().hex()
+ acc1.full_shard_id.to_bytes(4, "big").hex(),
)
self.assertEqual(resp["transactionHash"], "0x" + tx.get_hash().hex())
self.assertEqual(resp["status"], "0x1")
self.assertEqual(resp["cumulativeGasUsed"], "0x5208")
self.assertIsNone(resp["contractAddress"])
def test_getTransactionReceipt_on_x_shard_transfer(self):
id1 = Identity.create_random_identity()
acc1 = Address.create_from_identity(id1, full_shard_id=0)
acc2 = Address.create_from_identity(id1, full_shard_id=1)
with ClusterContext(1, acc1) as clusters, jrpc_server_context(
clusters[0].master
):
master = clusters[0].master
slaves = clusters[0].slave_list
is_root, block = call_async(master.get_next_block_to_mine(address=acc2))
self.assertTrue(is_root)
call_async(master.add_root_block(block))
s1, s2 = clusters[0].get_shard_state(0), clusters[0].get_shard_state(1)
tx_gen = lambda s, f, t: create_transfer_transaction(
shard_state=s,
key=id1.get_key(),
from_address=f,
to_address=t,
gas=21000 if f == t else 30000,
value=12345,
)
self.assertTrue(slaves[0].add_tx(tx_gen(s1, acc1, acc2)))
_, b1 = call_async(master.get_next_block_to_mine(address=acc1))
self.assertTrue(call_async(clusters[0].get_shard(0).add_block(b1)))
_, b2 = call_async(master.get_next_block_to_mine(address=acc2))
self.assertTrue(call_async(clusters[0].get_shard(1).add_block(b2)))
_, root_block = call_async(
master.get_next_block_to_mine(address=acc1, prefer_root=True)
)
call_async(master.add_root_block(root_block))
tx = tx_gen(s2, acc2, acc2)
self.assertTrue(slaves[1].add_tx(tx))
_, b3 = call_async(master.get_next_block_to_mine(address=acc2))
self.assertTrue(call_async(clusters[0].get_shard(1).add_block(b3)))
# in-shard tx 21000 + receiving x-shard tx 9000
self.assertEqual(s2.evm_state.gas_used, 30000)
self.assertEqual(s2.evm_state.xshard_receive_gas_used, 9000)
for endpoint in ("getTransactionReceipt", "eth_getTransactionReceipt"):
resp = send_request(
endpoint,
"0x"
+ tx.get_hash().hex()
+ acc2.full_shard_id.to_bytes(4, "big").hex(),
)
self.assertEqual(resp["transactionHash"], "0x" + tx.get_hash().hex())
self.assertEqual(resp["status"], "0x1")
self.assertEqual(resp["cumulativeGasUsed"], hex(30000))
self.assertEqual(resp["gasUsed"], hex(21000))
self.assertIsNone(resp["contractAddress"])
def test_getTransactionReceipt_on_contract_creation(self):
id1 = Identity.create_random_identity()
acc1 = Address.create_from_identity(id1, full_shard_id=0)
with ClusterContext(1, acc1) as clusters, jrpc_server_context(
clusters[0].master
):
master = clusters[0].master
slaves = clusters[0].slave_list
branch = Branch.create(2, 0)
to_full_shard_id = acc1.full_shard_id + 2
tx = create_contract_creation_transaction(
shard_state=slaves[0].shards[branch].state,
key=id1.get_key(),
from_address=acc1,
to_full_shard_id=to_full_shard_id,
)
self.assertTrue(slaves[0].add_tx(tx))
_, block1 = call_async(master.get_next_block_to_mine(address=acc1))
self.assertTrue(call_async(clusters[0].get_shard(0).add_block(block1)))
for endpoint in ("getTransactionReceipt", "eth_getTransactionReceipt"):
resp = send_request(
endpoint, "0x" + tx.get_hash().hex() + branch.serialize().hex()
)
self.assertEqual(resp["transactionHash"], "0x" + tx.get_hash().hex())
self.assertEqual(resp["status"], "0x1")
self.assertEqual(resp["cumulativeGasUsed"], "0x213eb")
contract_address = mk_contract_address(
acc1.recipient, to_full_shard_id, 0
)
self.assertEqual(
resp["contractAddress"],
"0x"
+ contract_address.hex()
+ to_full_shard_id.to_bytes(4, "big").hex(),
)
def test_getTransactionReceipt_on_contract_creation_failure(self):
id1 = Identity.create_random_identity()
acc1 = Address.create_from_identity(id1, full_shard_id=0)
with ClusterContext(1, acc1) as clusters, jrpc_server_context(
clusters[0].master
):
master = clusters[0].master
slaves = clusters[0].slave_list
# Add a root block to update block gas limit for xshard tx throttling
# so that the following tx can be processed
is_root, root_block = call_async(master.get_next_block_to_mine(acc1))
self.assertTrue(is_root)
call_async(master.add_root_block(root_block))
branch = Branch.create(2, 0)
to_full_shard_id = (
acc1.full_shard_id + 1
) # x-shard contract creation should fail
tx = create_contract_creation_transaction(
shard_state=slaves[0].shards[branch].state,
key=id1.get_key(),
from_address=acc1,
to_full_shard_id=to_full_shard_id,
)
self.assertTrue(slaves[0].add_tx(tx))
_, block1 = call_async(master.get_next_block_to_mine(address=acc1))
self.assertTrue(call_async(clusters[0].get_shard(0).add_block(block1)))
for endpoint in ("getTransactionReceipt", "eth_getTransactionReceipt"):
resp = send_request(
endpoint, "0x" + tx.get_hash().hex() + branch.serialize().hex()
)
self.assertEqual(resp["transactionHash"], "0x" + tx.get_hash().hex())
self.assertEqual(resp["status"], "0x0")
self.assertEqual(resp["cumulativeGasUsed"], "0x13d6c")
self.assertIsNone(resp["contractAddress"])
def test_getLogs(self):
id1 = Identity.create_random_identity()
acc1 = Address.create_from_identity(id1, full_shard_id=0)
expected_log_parts = {
"logIndex": "0x0",
"transactionIndex": "0x0",
"blockNumber": "0x1",
"blockHeight": "0x1",
"data": "0x",
}
with ClusterContext(1, acc1) as clusters, jrpc_server_context(
clusters[0].master
):
master = clusters[0].master
slaves = clusters[0].slave_list
branch = Branch.create(2, 0)
tx = create_contract_creation_with_event_transaction(
shard_state=slaves[0].shards[branch].state,
key=id1.get_key(),
from_address=acc1,
to_full_shard_id=acc1.full_shard_id,
)
self.assertTrue(slaves[0].add_tx(tx))
_, block = call_async(master.get_next_block_to_mine(address=acc1))
self.assertTrue(call_async(clusters[0].get_shard(0).add_block(block)))
for using_eth_endpoint in (True, False):
shard_id = hex(acc1.full_shard_id)
if using_eth_endpoint:
req = lambda o: send_request("eth_getLogs", o, shard_id)
else:
# `None` needed to bypass some request modification
req = lambda o: send_request("getLogs", o, shard_id)
# no filter object as wild cards
resp = req({})
self.assertEqual(1, len(resp))
self.assertDictContainsSubset(expected_log_parts, resp[0])
# filter by contract address
contract_addr = mk_contract_address(
acc1.recipient, acc1.full_shard_id, 0
)
filter_obj = {
"address": "0x"
+ contract_addr.hex()
+ (
""
if using_eth_endpoint
else hex(acc1.full_shard_id)[2:].zfill(8)
)
}
resp = req(filter_obj)
self.assertEqual(1, len(resp))
# filter by topics
filter_obj = {
"topics": [
"0xa9378d5bd800fae4d5b8d4c6712b2b64e8ecc86fdc831cb51944000fc7c8ecfa"
]
}
filter_obj_nested = {
"topics": [
[
"0xa9378d5bd800fae4d5b8d4c6712b2b64e8ecc86fdc831cb51944000fc7c8ecfa"
]
]
}
for f in (filter_obj, filter_obj_nested):
resp = req(f)
self.assertEqual(1, len(resp))
self.assertDictContainsSubset(expected_log_parts, resp[0])
self.assertEqual(
"0xa9378d5bd800fae4d5b8d4c6712b2b64e8ecc86fdc831cb51944000fc7c8ecfa",
resp[0]["topics"][0],
)
def test_estimateGas(self):
id1 = Identity.create_random_identity()
acc1 = Address.create_from_identity(id1, full_shard_id=0)
with ClusterContext(1, acc1) as clusters, jrpc_server_context(
clusters[0].master
):
response = send_request(
"estimateGas", {"to": "0x" + acc1.serialize().hex()}
)
self.assertEqual(response, "0x5208") # 21000
def test_getStorageAt(self):
key = bytes.fromhex(
"c987d4506fb6824639f9a9e3b8834584f5165e94680501d1b0044071cd36c3b3"
)
id1 = Identity.create_from_key(key)
acc1 = Address.create_from_identity(id1, full_shard_id=0)
created_addr = "0x8531eb33bba796115f56ffa1b7df1ea3acdd8cdd00000000"
with ClusterContext(1, acc1) as clusters, jrpc_server_context(
clusters[0].master
):
master = clusters[0].master
slaves = clusters[0].slave_list
branch = Branch.create(2, 0)
tx = create_contract_with_storage_transaction(
shard_state=slaves[0].shards[branch].state,
key=id1.get_key(),
from_address=acc1,
to_full_shard_id=acc1.full_shard_id,
)
self.assertTrue(slaves[0].add_tx(tx))
_, block = call_async(master.get_next_block_to_mine(address=acc1))
self.assertTrue(call_async(clusters[0].get_shard(0).add_block(block)))
for using_eth_endpoint in (True, False):
if using_eth_endpoint:
req = lambda k: send_request(
"eth_getStorageAt", created_addr[:-8], k, "0x0"
)
else:
req = lambda k: send_request("getStorageAt", created_addr, k)
# first storage
response = req("0x0")
# equals 1234
self.assertEqual(
response,
"0x00000000000000000000000000000000000000000000000000000000000004d2",
)
# mapping storage
k = sha3_256(
bytes.fromhex(acc1.recipient.hex().zfill(64) + "1".zfill(64))
)
response = req("0x" + k.hex())
self.assertEqual(
response,
"0x000000000000000000000000000000000000000000000000000000000000162e",
)
# doesn't exist
response = req("0x3")
self.assertEqual(
response,
"0x0000000000000000000000000000000000000000000000000000000000000000",
)
def test_getCode(self):
key = bytes.fromhex(
"c987d4506fb6824639f9a9e3b8834584f5165e94680501d1b0044071cd36c3b3"
)
id1 = Identity.create_from_key(key)
acc1 = Address.create_from_identity(id1, full_shard_id=0)
created_addr = "0x8531eb33bba796115f56ffa1b7df1ea3acdd8cdd00000000"
with ClusterContext(1, acc1) as clusters, jrpc_server_context(
clusters[0].master
):
master = clusters[0].master
slaves = clusters[0].slave_list
branch = Branch.create(2, 0)
tx = create_contract_with_storage_transaction(
shard_state=slaves[0].shards[branch].state,
key=id1.get_key(),
from_address=acc1,
to_full_shard_id=acc1.full_shard_id,
)
self.assertTrue(slaves[0].add_tx(tx))
_, block = call_async(master.get_next_block_to_mine(address=acc1))
self.assertTrue(call_async(clusters[0].get_shard(0).add_block(block)))
for using_eth_endpoint in (True, False):
if using_eth_endpoint:
resp = send_request("eth_getCode", created_addr[:-8], "0x0")
else:
resp = send_request("getCode", created_addr)
self.assertEqual(
resp,
"0x6080604052600080fd00a165627a7a72305820a6ef942c101f06333ac35072a8ff40332c71d0e11cd0e6d86de8cae7b42696550029",
)
def test_gasPrice(self):
id1 = Identity.create_random_identity()
acc1 = Address.create_from_identity(id1, full_shard_id=0)
with ClusterContext(1, acc1) as clusters, jrpc_server_context(
clusters[0].master
):
master = clusters[0].master
slaves = clusters[0].slave_list
branch = Branch.create(2, 0)
# run for multiple times
for _ in range(3):
tx = create_transfer_transaction(
shard_state=slaves[0].shards[branch].state,
key=id1.get_key(),
from_address=acc1,
to_address=acc1,
value=0,
gas_price=12,
)
self.assertTrue(slaves[0].add_tx(tx))
_, block = call_async(master.get_next_block_to_mine(address=acc1))
self.assertTrue(call_async(clusters[0].get_shard(0).add_block(block)))
for using_eth_endpoint in (True, False):
if using_eth_endpoint:
resp = send_request("eth_gasPrice", "0x0")
else:
resp = send_request("gasPrice", "0x0")
self.assertEqual(resp, "0xc")
def test_getWork_and_submitWork(self):
id1 = Identity.create_random_identity()
acc1 = Address.create_from_identity(id1, full_shard_id=0)
with ClusterContext(
1, acc1, remote_mining=True, shard_size=1
) as clusters, jrpc_server_context(clusters[0].master):
master = clusters[0].master
slaves = clusters[0].slave_list
branch = Branch.create(1, 0)
tx = create_transfer_transaction(
shard_state=slaves[0].shards[branch].state,
key=id1.get_key(),
from_address=acc1,
to_address=acc1,
value=0,
gas_price=12,
)
self.assertTrue(slaves[0].add_tx(tx))
for shard_id in ["0x0", None]: # shard, then root
resp = send_request("getWork", shard_id)
self.assertEqual(resp[1:], ["0x1", "0xa"]) # height and diff
header_hash_hex = resp[0]
_, block = call_async(
master.get_next_block_to_mine(
address=master.env.quark_chain_config.testnet_master_address,
prefer_root=shard_id is None,
)
)
self.assertEqual(
header_hash_hex[2:], block.header.get_hash_for_mining().hex()
)
# solve it and submit
work = MiningWork(bytes.fromhex(resp[0][2:]), 1, 10)
solver = DoubleSHA256(work)
nonce = solver.mine(0, 10000).nonce
mixhash = "0x" + sha3_256(b"").hex()
resp = send_request(
"submitWork", shard_id, header_hash_hex, hex(nonce), mixhash
)
self.assertTrue(resp)
# show progress
_, new_block = call_async(master.get_next_block_to_mine(address=acc1))
self.assertIsInstance(new_block, MinorBlock)
self.assertEqual(new_block.header.height, 2)
|
py | 1a316a807e5add430eb8e2f3d9d2ccec3f9811ca | import logging
import os
from unittest.mock import create_autospec, patch
import boto3
import botocore
import botocore.client
import botocore.config
import pytest
import awswrangler as wr
from awswrangler._config import apply_configs
from awswrangler.s3._fs import open_s3_object
logging.getLogger("awswrangler").setLevel(logging.DEBUG)
def _urls_test(glue_database):
original = botocore.client.ClientCreator.create_client
def wrapper(self, **kwarg):
name = kwarg["service_name"]
url = kwarg["endpoint_url"]
if name == "sts":
assert url == wr.config.sts_endpoint_url
elif name == "athena":
assert url == wr.config.athena_endpoint_url
elif name == "s3":
assert url == wr.config.s3_endpoint_url
elif name == "glue":
assert url == wr.config.glue_endpoint_url
return original(self, **kwarg)
with patch("botocore.client.ClientCreator.create_client", new=wrapper):
wr.athena.read_sql_query(sql="SELECT 1 as col0", database=glue_database)
def test_basics(path, glue_database, glue_table, workgroup0, workgroup1):
args = {"table": glue_table, "path": "", "columns_types": {"col0": "bigint"}}
# Missing database argument
with pytest.raises(TypeError):
wr.catalog.create_parquet_table(**args)
# Configuring default database value
wr.config.database = glue_database
# Testing configured database
wr.catalog.create_parquet_table(**args)
# Configuring default database with wrong value
wr.config.database = "missing_database"
with pytest.raises(boto3.client("glue").exceptions.EntityNotFoundException):
wr.catalog.create_parquet_table(**args)
# Overwriting configured database
wr.catalog.create_parquet_table(database=glue_database, **args)
# Testing configured s3 block size
size = 1 * 2 ** 20 # 1 MB
wr.config.s3_block_size = size
with open_s3_object(path, mode="wb") as s3obj:
s3obj.write(b"foo")
with open_s3_object(path, mode="rb") as s3obj:
assert s3obj._s3_block_size == size
# Resetting all configs
wr.config.reset()
# Missing database argument
with pytest.raises(TypeError):
wr.catalog.does_table_exist(table=glue_table)
# Configuring default database value again
wr.config.database = glue_database
# Testing configured database again
assert wr.catalog.does_table_exist(table=glue_table) is True
# Resetting this specific config
wr.config.reset("database")
# Missing database argument
with pytest.raises(TypeError):
wr.catalog.does_table_exist(table=glue_table)
# exporting environment variable
os.environ["WR_DATABASE"] = glue_database
wr.config.reset("database")
assert wr.catalog.does_table_exist(table=glue_table) is True
del os.environ["WR_DATABASE"]
wr.config.reset("database")
# Missing database argument
with pytest.raises(TypeError):
wr.catalog.does_table_exist(table=glue_table)
assert wr.config.to_pandas().shape == (len(wr._config._CONFIG_ARGS), 7)
# Workgroup
wr.config.workgroup = workgroup0
df = wr.athena.read_sql_query(sql="SELECT 1 as col0", database=glue_database)
assert df.query_metadata["WorkGroup"] == workgroup0
os.environ["WR_WORKGROUP"] = workgroup1
wr.config.reset()
df = wr.athena.read_sql_query(sql="SELECT 1 as col0", database=glue_database)
assert df.query_metadata["WorkGroup"] == workgroup1
# Endpoints URLs
region = boto3.Session().region_name
wr.config.sts_endpoint_url = f"https://sts.{region}.amazonaws.com"
wr.config.s3_endpoint_url = f"https://s3.{region}.amazonaws.com"
wr.config.athena_endpoint_url = f"https://athena.{region}.amazonaws.com"
wr.config.glue_endpoint_url = f"https://glue.{region}.amazonaws.com"
_urls_test(glue_database)
os.environ["WR_STS_ENDPOINT_URL"] = f"https://sts.{region}.amazonaws.com"
os.environ["WR_S3_ENDPOINT_URL"] = f"https://s3.{region}.amazonaws.com"
os.environ["WR_ATHENA_ENDPOINT_URL"] = f"https://athena.{region}.amazonaws.com"
os.environ["WR_GLUE_ENDPOINT_URL"] = f"https://glue.{region}.amazonaws.com"
wr.config.reset()
_urls_test(glue_database)
def test_athena_cache_configuration():
wr.config.max_local_cache_entries = 20
assert wr.config.max_remote_cache_entries == 20
def test_botocore_config(path):
original = botocore.client.ClientCreator.create_client
# Default values for botocore.config.Config
expected_max_retries_attempt = 5
expected_connect_timeout = 10
expected_max_pool_connections = 10
expected_retry_mode = None
def wrapper(self, **kwarg):
assert kwarg["client_config"].retries["max_attempts"] == expected_max_retries_attempt
assert kwarg["client_config"].connect_timeout == expected_connect_timeout
assert kwarg["client_config"].max_pool_connections == expected_max_pool_connections
assert kwarg["client_config"].retries.get("mode") == expected_retry_mode
return original(self, **kwarg)
# Check for default values
with patch("botocore.client.ClientCreator.create_client", new=wrapper):
with open_s3_object(path, mode="wb") as s3obj:
s3obj.write(b"foo")
# Update default config with environment variables
expected_max_retries_attempt = 20
expected_connect_timeout = 10
expected_max_pool_connections = 10
expected_retry_mode = "adaptive"
os.environ["AWS_MAX_ATTEMPTS"] = str(expected_max_retries_attempt)
os.environ["AWS_RETRY_MODE"] = expected_retry_mode
with patch("botocore.client.ClientCreator.create_client", new=wrapper):
with open_s3_object(path, mode="wb") as s3obj:
s3obj.write(b"foo")
del os.environ["AWS_MAX_ATTEMPTS"]
del os.environ["AWS_RETRY_MODE"]
# Update botocore.config.Config
expected_max_retries_attempt = 30
expected_connect_timeout = 40
expected_max_pool_connections = 50
expected_retry_mode = "legacy"
botocore_config = botocore.config.Config(
retries={"max_attempts": expected_max_retries_attempt, "mode": expected_retry_mode},
connect_timeout=expected_connect_timeout,
max_pool_connections=expected_max_pool_connections,
)
wr.config.botocore_config = botocore_config
with patch("botocore.client.ClientCreator.create_client", new=wrapper):
with open_s3_object(path, mode="wb") as s3obj:
s3obj.write(b"foo")
wr.config.reset()
def test_chunk_size():
expected_chunksize = 123
wr.config.chunksize = expected_chunksize
for function_to_mock in [wr.postgresql.to_sql, wr.mysql.to_sql, wr.sqlserver.to_sql, wr.redshift.to_sql]:
mock = create_autospec(function_to_mock)
apply_configs(mock)(df=None, con=None, table=None, schema=None)
mock.assert_called_with(df=None, con=None, table=None, schema=None, chunksize=expected_chunksize)
expected_chunksize = 456
os.environ["WR_CHUNKSIZE"] = str(expected_chunksize)
wr.config.reset()
for function_to_mock in [wr.postgresql.to_sql, wr.mysql.to_sql, wr.sqlserver.to_sql, wr.redshift.to_sql]:
mock = create_autospec(function_to_mock)
apply_configs(mock)(df=None, con=None, table=None, schema=None)
mock.assert_called_with(df=None, con=None, table=None, schema=None, chunksize=expected_chunksize)
|
py | 1a316b056399e595e0cf1222fe4b87f7b15bf0b0 | import unittest
from cloudrail.dev_tools.rule_test_utils import create_empty_entity
from cloudrail.knowledge.context.aws.cloudfront.cloud_front_distribution_list import CloudFrontDistribution, ViewerCertificate
from cloudrail.knowledge.context.aws.aws_environment_context import AwsEnvironmentContext
from cloudrail.knowledge.rules.aws.non_context_aware.protocol_enforcments.ensure_cloudfront_protocol_version_is_good import \
CloudFrontEnsureVersionRule
from cloudrail.knowledge.rules.base_rule import RuleResultType
class TestCloudFrontEnsureVersionRule(unittest.TestCase):
def setUp(self):
self.rule = CloudFrontEnsureVersionRule()
def test_non_car_cloudfront_protocol_version_fail(self):
# Arrange
cloudfront_dist_list: CloudFrontDistribution = create_empty_entity(CloudFrontDistribution)
viewer_cert: ViewerCertificate = create_empty_entity(ViewerCertificate)
viewer_cert.minimum_protocol_version = 'TLSv1.2_2018'
cloudfront_dist_list.viewer_cert = viewer_cert
context = AwsEnvironmentContext(cloudfront_distribution_list=[cloudfront_dist_list])
# Act
result = self.rule.run(context, {})
# Assert
self.assertEqual(RuleResultType.FAILED, result.status)
self.assertEqual(1, len(result.issues))
def test_non_car_cloudfront_protocol_version_pass(self):
# Arrange
cloudfront_dist_list: CloudFrontDistribution = create_empty_entity(CloudFrontDistribution)
viewer_cert: ViewerCertificate = create_empty_entity(ViewerCertificate)
viewer_cert.minimum_protocol_version = 'TLSv1.2_2019'
cloudfront_dist_list.viewer_cert = viewer_cert
context = AwsEnvironmentContext(cloudfront_distribution_list=[cloudfront_dist_list])
# Act
result = self.rule.run(context, {})
# Assert
self.assertEqual(RuleResultType.SUCCESS, result.status)
self.assertEqual(0, len(result.issues))
|
py | 1a316bf36712bbfd9bc23cadf493e23aa0bf5a32 | r"""
LaTeX options for graphs
This module provides a class to hold, manipulate and employ various
options for rendering a graph in LaTeX, in addition to providing
the code that actually generates a LaTeX representation
of a (combinatorial) graph.
AUTHORS:
- Rob Beezer (2009-05-20): :class:`~sage.graphs.graph_latex.GraphLatex` class
- Fidel Barerra Cruz (2009-05-20): ``tkz-graph`` commands to render a graph
- Nicolas M. Thiery (2010-02): dot2tex/graphviz interface
- Rob Beezer (2010-05-29): Extended range of ``tkz-graph`` options
LaTeX Versions of Graphs
-------------------------------------
.. image:: ../../media/heawood-graph-latex.png
:align: center
Many mathematical objects in Sage have LaTeX representations, and graphs are no exception. For a graph ``g``, the command ``view(g)``, issued at the Sage command line or in the notebook, will create a graphic version of ``g``. Similarly, ``latex(g)`` will return a (long) string that is a representation of the graph in LaTeX. Other ways of employing LaTeX in Sage, such as ``%latex`` in a notebook cell, or the Typeset checkbox in the notebook, will handle ``g`` appropriately.
Support through the ``tkz-graph`` package is by Alain Matthes, the author of ``tkz-graph``, whose work can be found at his `Altermundus.com <http://altermundus.com/>`_ site.
The range of possible options for customizing the appearance of a graph are carefully documented at :meth:`sage.graphs.graph_latex.GraphLatex.set_option`. As a broad overview, the following options are supported:
- Pre-built Styles: the pre-built styles of the tkz-graph package provide nice drawings quickly
- Dimensions: can be specified in natural units, then uniformly scaled after design work
- Vertex Colors: the perimeter and fill color for vertices can be specified, including on a per-vertex basis
- Vertex Shapes: may be circles, shaded spheres, rectangles or diamonds, including on a per-vertex basis
- Vertex Sizes: may be specified as minimums, and will automatically sized to contain vertex labels, including on a per-vertex basis
- Vertex Labels: can use latex formatting, and may have their colors specified, including on a per-vertex basis
- Vertex Label Placement: can be interior to the vertex, or external at a configurable location
- Edge Colors: a solid color with or without a second color down the middle, on a per-edge basis
- Edge Thickness: can be set, including on a per-edge basis
- Edge Labels: can use latex formatting, and may have their colors specified, including on a per-edge basis
- Edge Label Placement: can be to the left, right, above, below, inline, and then sloped or horizontal
- Digraph Edges: are slightly curved, with arrowheads
- Loops: may be specified by their size, and with a direction equaling one of the four compass points
To use LaTeX in Sage you of course need a working TeX installation and it will work best if you have the ``dvipng`` and ``convert`` utilities. For graphs you need the ``tkz-graph.sty`` and ``tkz-berge.sty`` style files of the tkz-graph package. TeX, dvipng, and convert should be widely available through package managers or installers. You may need to install the tkz-graph style files in the appropriate locations, a task beyond the scope of this introduction. Primary locations for these programs are:
- TeX: http://ctan.org/
- dvipng: http://sourceforge.net/projects/dvipng/
- convert: http://www.imagemagick.org (the ImageMagick suite)
- tkz-graph: http://altermundus.com/pages/tkz/
Customizing the output is accomplished in several ways. Suppose ``g`` is a graph, then ``g.set_latex_options()`` can be used to efficiently set or modify various options. Setting individual options, or querying options, can be accomplished by first using a command like ``opts = g.latex_options()`` to obtain a :class:`sage.graphs.graph_latex.GraphLatex` object which has several methods to set and retrieve options.
Here is a minimal session demonstrating how to use these features. The following setup should work in the notebook or at the command-line. ::
sage: H = graphs.HeawoodGraph()
sage: H.set_latex_options(
... graphic_size=(5,5),
... vertex_size=0.2,
... edge_thickness=0.04,
... edge_color='green',
... vertex_color='green',
... vertex_label_color='red'
... )
At this point, ``view(H)`` should call ``pdflatex`` to process the string created by ``latex(H)`` and then display the resulting graphic.
To use this image in a LaTeX document, you could of course just copy and save the resulting graphic. However, the ``latex()`` command will produce the underlying LaTeX code, which can be incorporated into a standalone LaTeX document. ::
sage: from sage.graphs.graph_latex import check_tkz_graph
sage: check_tkz_graph() # random - depends on TeX installation
sage: latex(H)
\begin{tikzpicture}
%
\useasboundingbox (0,0) rectangle (5.0cm,5.0cm);
%
\definecolor{cv0}{rgb}{0.0,0.502,0.0}
\definecolor{cfv0}{rgb}{1.0,1.0,1.0}
\definecolor{clv0}{rgb}{1.0,0.0,0.0}
\definecolor{cv1}{rgb}{0.0,0.502,0.0}
\definecolor{cfv1}{rgb}{1.0,1.0,1.0}
\definecolor{clv1}{rgb}{1.0,0.0,0.0}
\definecolor{cv2}{rgb}{0.0,0.502,0.0}
\definecolor{cfv2}{rgb}{1.0,1.0,1.0}
\definecolor{clv2}{rgb}{1.0,0.0,0.0}
\definecolor{cv3}{rgb}{0.0,0.502,0.0}
\definecolor{cfv3}{rgb}{1.0,1.0,1.0}
\definecolor{clv3}{rgb}{1.0,0.0,0.0}
\definecolor{cv4}{rgb}{0.0,0.502,0.0}
\definecolor{cfv4}{rgb}{1.0,1.0,1.0}
\definecolor{clv4}{rgb}{1.0,0.0,0.0}
\definecolor{cv5}{rgb}{0.0,0.502,0.0}
\definecolor{cfv5}{rgb}{1.0,1.0,1.0}
\definecolor{clv5}{rgb}{1.0,0.0,0.0}
\definecolor{cv6}{rgb}{0.0,0.502,0.0}
\definecolor{cfv6}{rgb}{1.0,1.0,1.0}
\definecolor{clv6}{rgb}{1.0,0.0,0.0}
\definecolor{cv7}{rgb}{0.0,0.502,0.0}
\definecolor{cfv7}{rgb}{1.0,1.0,1.0}
\definecolor{clv7}{rgb}{1.0,0.0,0.0}
\definecolor{cv8}{rgb}{0.0,0.502,0.0}
\definecolor{cfv8}{rgb}{1.0,1.0,1.0}
\definecolor{clv8}{rgb}{1.0,0.0,0.0}
\definecolor{cv9}{rgb}{0.0,0.502,0.0}
\definecolor{cfv9}{rgb}{1.0,1.0,1.0}
\definecolor{clv9}{rgb}{1.0,0.0,0.0}
\definecolor{cv10}{rgb}{0.0,0.502,0.0}
\definecolor{cfv10}{rgb}{1.0,1.0,1.0}
\definecolor{clv10}{rgb}{1.0,0.0,0.0}
\definecolor{cv11}{rgb}{0.0,0.502,0.0}
\definecolor{cfv11}{rgb}{1.0,1.0,1.0}
\definecolor{clv11}{rgb}{1.0,0.0,0.0}
\definecolor{cv12}{rgb}{0.0,0.502,0.0}
\definecolor{cfv12}{rgb}{1.0,1.0,1.0}
\definecolor{clv12}{rgb}{1.0,0.0,0.0}
\definecolor{cv13}{rgb}{0.0,0.502,0.0}
\definecolor{cfv13}{rgb}{1.0,1.0,1.0}
\definecolor{clv13}{rgb}{1.0,0.0,0.0}
\definecolor{cv0v1}{rgb}{0.0,0.502,0.0}
\definecolor{cv0v5}{rgb}{0.0,0.502,0.0}
\definecolor{cv0v13}{rgb}{0.0,0.502,0.0}
\definecolor{cv1v2}{rgb}{0.0,0.502,0.0}
\definecolor{cv1v10}{rgb}{0.0,0.502,0.0}
\definecolor{cv2v3}{rgb}{0.0,0.502,0.0}
\definecolor{cv2v7}{rgb}{0.0,0.502,0.0}
\definecolor{cv3v4}{rgb}{0.0,0.502,0.0}
\definecolor{cv3v12}{rgb}{0.0,0.502,0.0}
\definecolor{cv4v5}{rgb}{0.0,0.502,0.0}
\definecolor{cv4v9}{rgb}{0.0,0.502,0.0}
\definecolor{cv5v6}{rgb}{0.0,0.502,0.0}
\definecolor{cv6v7}{rgb}{0.0,0.502,0.0}
\definecolor{cv6v11}{rgb}{0.0,0.502,0.0}
\definecolor{cv7v8}{rgb}{0.0,0.502,0.0}
\definecolor{cv8v9}{rgb}{0.0,0.502,0.0}
\definecolor{cv8v13}{rgb}{0.0,0.502,0.0}
\definecolor{cv9v10}{rgb}{0.0,0.502,0.0}
\definecolor{cv10v11}{rgb}{0.0,0.502,0.0}
\definecolor{cv11v12}{rgb}{0.0,0.502,0.0}
\definecolor{cv12v13}{rgb}{0.0,0.502,0.0}
%
\Vertex[style={minimum size=0.2cm,draw=cv0,fill=cfv0,text=clv0,shape=circle},LabelOut=false,L=\hbox{$0$},x=2.5cm,y=5.0cm]{v0}
\Vertex[style={minimum size=0.2cm,draw=cv1,fill=cfv1,text=clv1,shape=circle},LabelOut=false,L=\hbox{$1$},x=1.3874cm,y=4.7524cm]{v1}
\Vertex[style={minimum size=0.2cm,draw=cv2,fill=cfv2,text=clv2,shape=circle},LabelOut=false,L=\hbox{$2$},x=0.4952cm,y=4.0587cm]{v2}
\Vertex[style={minimum size=0.2cm,draw=cv3,fill=cfv3,text=clv3,shape=circle},LabelOut=false,L=\hbox{$3$},x=0.0cm,y=3.0563cm]{v3}
\Vertex[style={minimum size=0.2cm,draw=cv4,fill=cfv4,text=clv4,shape=circle},LabelOut=false,L=\hbox{$4$},x=0.0cm,y=1.9437cm]{v4}
\Vertex[style={minimum size=0.2cm,draw=cv5,fill=cfv5,text=clv5,shape=circle},LabelOut=false,L=\hbox{$5$},x=0.4952cm,y=0.9413cm]{v5}
\Vertex[style={minimum size=0.2cm,draw=cv6,fill=cfv6,text=clv6,shape=circle},LabelOut=false,L=\hbox{$6$},x=1.3874cm,y=0.2476cm]{v6}
\Vertex[style={minimum size=0.2cm,draw=cv7,fill=cfv7,text=clv7,shape=circle},LabelOut=false,L=\hbox{$7$},x=2.5cm,y=0.0cm]{v7}
\Vertex[style={minimum size=0.2cm,draw=cv8,fill=cfv8,text=clv8,shape=circle},LabelOut=false,L=\hbox{$8$},x=3.6126cm,y=0.2476cm]{v8}
\Vertex[style={minimum size=0.2cm,draw=cv9,fill=cfv9,text=clv9,shape=circle},LabelOut=false,L=\hbox{$9$},x=4.5048cm,y=0.9413cm]{v9}
\Vertex[style={minimum size=0.2cm,draw=cv10,fill=cfv10,text=clv10,shape=circle},LabelOut=false,L=\hbox{$10$},x=5.0cm,y=1.9437cm]{v10}
\Vertex[style={minimum size=0.2cm,draw=cv11,fill=cfv11,text=clv11,shape=circle},LabelOut=false,L=\hbox{$11$},x=5.0cm,y=3.0563cm]{v11}
\Vertex[style={minimum size=0.2cm,draw=cv12,fill=cfv12,text=clv12,shape=circle},LabelOut=false,L=\hbox{$12$},x=4.5048cm,y=4.0587cm]{v12}
\Vertex[style={minimum size=0.2cm,draw=cv13,fill=cfv13,text=clv13,shape=circle},LabelOut=false,L=\hbox{$13$},x=3.6126cm,y=4.7524cm]{v13}
%
\Edge[lw=0.04cm,style={color=cv0v1,},](v0)(v1)
\Edge[lw=0.04cm,style={color=cv0v5,},](v0)(v5)
\Edge[lw=0.04cm,style={color=cv0v13,},](v0)(v13)
\Edge[lw=0.04cm,style={color=cv1v2,},](v1)(v2)
\Edge[lw=0.04cm,style={color=cv1v10,},](v1)(v10)
\Edge[lw=0.04cm,style={color=cv2v3,},](v2)(v3)
\Edge[lw=0.04cm,style={color=cv2v7,},](v2)(v7)
\Edge[lw=0.04cm,style={color=cv3v4,},](v3)(v4)
\Edge[lw=0.04cm,style={color=cv3v12,},](v3)(v12)
\Edge[lw=0.04cm,style={color=cv4v5,},](v4)(v5)
\Edge[lw=0.04cm,style={color=cv4v9,},](v4)(v9)
\Edge[lw=0.04cm,style={color=cv5v6,},](v5)(v6)
\Edge[lw=0.04cm,style={color=cv6v7,},](v6)(v7)
\Edge[lw=0.04cm,style={color=cv6v11,},](v6)(v11)
\Edge[lw=0.04cm,style={color=cv7v8,},](v7)(v8)
\Edge[lw=0.04cm,style={color=cv8v9,},](v8)(v9)
\Edge[lw=0.04cm,style={color=cv8v13,},](v8)(v13)
\Edge[lw=0.04cm,style={color=cv9v10,},](v9)(v10)
\Edge[lw=0.04cm,style={color=cv10v11,},](v10)(v11)
\Edge[lw=0.04cm,style={color=cv11v12,},](v11)(v12)
\Edge[lw=0.04cm,style={color=cv12v13,},](v12)(v13)
%
\end{tikzpicture}
EXAMPLES:
This example illustrates switching between the built-in styles when using the tkz_graph format. ::
sage: g = graphs.PetersenGraph()
sage: g.set_latex_options(tkz_style = 'Classic')
sage: from sage.graphs.graph_latex import check_tkz_graph
sage: check_tkz_graph() # random - depends on TeX installation
sage: latex(g)
\begin{tikzpicture}
...
\GraphInit[vstyle=Classic]
...
\end{tikzpicture}
sage: opts = g.latex_options()
sage: opts
LaTeX options for Petersen graph: {'tkz_style': 'Classic'}
sage: g.set_latex_options(tkz_style = 'Art')
sage: opts.get_option('tkz_style')
'Art'
sage: opts
LaTeX options for Petersen graph: {'tkz_style': 'Art'}
sage: latex(g)
\begin{tikzpicture}
...
\GraphInit[vstyle=Art]
...
\end{tikzpicture}
This example illustrates using the optional dot2tex module::
sage: g = graphs.PetersenGraph()
sage: g.set_latex_options(format='dot2tex',prog='neato') # optional - dot2tex
sage: from sage.graphs.graph_latex import check_tkz_graph
sage: check_tkz_graph() # random - depends on TeX installation
sage: latex(g) # optional - dot2tex graphviz
\begin{tikzpicture}[>=latex,line join=bevel,]
...
\end{tikzpicture}
Among other things, this supports the flexible ``edge_options`` option
(see :meth:`sage.graphs.generic_graph.GenericGraph.graphviz_string`);
here we color in red all edges touching the vertex ``0``::
sage: g = graphs.PetersenGraph()
sage: g.set_latex_options(format="dot2tex", edge_options=lambda u_v_label: {"color": "red"} if u_v_label[0] == 0 else {})
sage: latex(g) # optional - dot2tex graphviz
\begin{tikzpicture}[>=latex,line join=bevel,]
...
\end{tikzpicture}
TEST:
This graph will look horrible, but it illustrates (and tests) a
great variety of the possible options available through Sage's
interface to the ``tkz-graph`` package. So it is worth viewing
this in the notebook to see the effects of various defaults and
choices. ::
sage: var('x y u w')
(x, y, u, w)
sage: G = Graph(loops=True)
sage: for i in range(5):
... for j in range(i+1, 5):
... G.add_edge((i, j), label=(x^i*y^j).expand())
sage: G.add_edge((0,0), label=sin(u))
sage: G.add_edge((4,4), label=w^5)
sage: G.set_pos(G.layout_circular())
sage: G.set_latex_options(
... units='in',
... graphic_size=(8,8),
... margins=(1,2,2,1),
... scale=0.5,
... vertex_color='0.8',
... vertex_colors={1:'aqua', 3:'y', 4:'#0000FF'},
... vertex_fill_color='blue',
... vertex_fill_colors={1:'green', 3:'b', 4:'#FF00FF'},
... vertex_label_color='brown',
... vertex_label_colors={0:'g',1:'purple',2:'#007F00'},
... vertex_shape='diamond',
... vertex_shapes={1:'rectangle', 2:'sphere', 3:'sphere', 4:'circle'},
... vertex_size=0.3,
... vertex_sizes={0:1.0, 2:0.3, 4:1.0},
... vertex_label_placements = {2:(0.6, 180), 4:(0,45)},
... edge_color='purple',
... edge_colors={(0,2):'g',(3,4):'red'},
... edge_fills=True,
... edge_fill_color='green',
... edge_label_colors={(2,3):'y',(0,4):'blue'},
... edge_thickness=0.05,
... edge_thicknesses={(3,4):0.2, (0,4):0.02},
... edge_labels=True,
... edge_label_sloped=True,
... edge_label_slopes={(0,3):False, (2,4):False},
... edge_label_placement=0.50,
... edge_label_placements={(0,4):'above', (2,3):'left', (0,0):'above', (4,4):'below'},
... loop_placement=(2.0, 'NO'),
... loop_placements={4:(8.0, 'EA')}
... )
sage: from sage.graphs.graph_latex import check_tkz_graph
sage: check_tkz_graph() # random - depends on TeX installation
sage: print(latex(G))
\begin{tikzpicture}
%
\useasboundingbox (0,0) rectangle (4.0in,4.0in);
%
\definecolor{cv0}{rgb}{0.8,0.8,0.8}
\definecolor{cfv0}{rgb}{0.0,0.0,1.0}
\definecolor{clv0}{rgb}{0.0,0.5,0.0}
\definecolor{cv1}{rgb}{0.0,1.0,1.0}
\definecolor{cfv1}{rgb}{0.0,0.502,0.0}
\definecolor{clv1}{rgb}{0.502,0.0,0.502}
\definecolor{cv2}{rgb}{0.8,0.8,0.8}
\definecolor{cfv2}{rgb}{0.0,0.0,1.0}
\definecolor{clv2}{rgb}{0.0,0.498,0.0}
\definecolor{cv3}{rgb}{0.75,0.75,0.0}
\definecolor{cfv3}{rgb}{0.0,0.0,1.0}
\definecolor{clv3}{rgb}{0.6471,0.1647,0.1647}
\definecolor{cv4}{rgb}{0.0,0.0,1.0}
\definecolor{cfv4}{rgb}{1.0,0.0,1.0}
\definecolor{clv4}{rgb}{0.6471,0.1647,0.1647}
\definecolor{cv0v0}{rgb}{0.502,0.0,0.502}
\definecolor{cfv0v0}{rgb}{0.0,0.502,0.0}
\definecolor{clv0v0}{rgb}{0.0,0.0,0.0}
\definecolor{cv0v1}{rgb}{0.502,0.0,0.502}
\definecolor{cfv0v1}{rgb}{0.0,0.502,0.0}
\definecolor{clv0v1}{rgb}{0.0,0.0,0.0}
\definecolor{cv0v2}{rgb}{0.0,0.5,0.0}
\definecolor{cfv0v2}{rgb}{0.0,0.502,0.0}
\definecolor{clv0v2}{rgb}{0.0,0.0,0.0}
\definecolor{cv0v3}{rgb}{0.502,0.0,0.502}
\definecolor{cfv0v3}{rgb}{0.0,0.502,0.0}
\definecolor{clv0v3}{rgb}{0.0,0.0,0.0}
\definecolor{cv0v4}{rgb}{0.502,0.0,0.502}
\definecolor{cfv0v4}{rgb}{0.0,0.502,0.0}
\definecolor{clv0v4}{rgb}{0.0,0.0,1.0}
\definecolor{cv1v2}{rgb}{0.502,0.0,0.502}
\definecolor{cfv1v2}{rgb}{0.0,0.502,0.0}
\definecolor{clv1v2}{rgb}{0.0,0.0,0.0}
\definecolor{cv1v3}{rgb}{0.502,0.0,0.502}
\definecolor{cfv1v3}{rgb}{0.0,0.502,0.0}
\definecolor{clv1v3}{rgb}{0.0,0.0,0.0}
\definecolor{cv1v4}{rgb}{0.502,0.0,0.502}
\definecolor{cfv1v4}{rgb}{0.0,0.502,0.0}
\definecolor{clv1v4}{rgb}{0.0,0.0,0.0}
\definecolor{cv2v3}{rgb}{0.502,0.0,0.502}
\definecolor{cfv2v3}{rgb}{0.0,0.502,0.0}
\definecolor{clv2v3}{rgb}{0.75,0.75,0.0}
\definecolor{cv2v4}{rgb}{0.502,0.0,0.502}
\definecolor{cfv2v4}{rgb}{0.0,0.502,0.0}
\definecolor{clv2v4}{rgb}{0.0,0.0,0.0}
\definecolor{cv3v4}{rgb}{1.0,0.0,0.0}
\definecolor{cfv3v4}{rgb}{0.0,0.502,0.0}
\definecolor{clv3v4}{rgb}{0.0,0.0,0.0}
\definecolor{cv4v4}{rgb}{0.502,0.0,0.502}
\definecolor{cfv4v4}{rgb}{0.0,0.502,0.0}
\definecolor{clv4v4}{rgb}{0.0,0.0,0.0}
%
\Vertex[style={minimum size=0.5in,draw=cv0,fill=cfv0,text=clv0,shape=diamond},LabelOut=false,L=\hbox{$0$},x=1.75in,y=3.0in]{v0}
\Vertex[style={minimum size=0.15in,draw=cv1,fill=cfv1,text=clv1,shape=rectangle},LabelOut=false,L=\hbox{$1$},x=0.5in,y=2.0451in]{v1}
\Vertex[style={minimum size=0.15in,draw=cv2,fill=cfv2,text=clv2,shape=circle,shading=ball,line width=0pt,ball color=cv2,},LabelOut=true,Ldist=0.3in,Lpos=180.0,L=\hbox{$2$},x=0.9775in,y=0.5in]{v2}
\Vertex[style={minimum size=0.15in,draw=cv3,fill=cfv3,text=clv3,shape=circle,shading=ball,line width=0pt,ball color=cv3,},LabelOut=false,L=\hbox{$3$},x=2.5225in,y=0.5in]{v3}
\Vertex[style={minimum size=0.5in,draw=cv4,fill=cfv4,text=clv4,shape=circle},LabelOut=true,Ldist=0.0in,Lpos=45.0,L=\hbox{$4$},x=3.0in,y=2.0451in]{v4}
%
\Loop[dist=1.0in,dir=NO,style={color=cv0v0,double=cfv0v0},labelstyle={sloped,above,text=clv0v0,},label=\hbox{$\sin\left(u\right)$},](v0)
\Edge[lw=0.025in,style={color=cv0v1,double=cfv0v1},labelstyle={sloped,pos=0.5,text=clv0v1,},label=\hbox{$y$},](v0)(v1)
\Edge[lw=0.025in,style={color=cv0v2,double=cfv0v2},labelstyle={sloped,pos=0.5,text=clv0v2,},label=\hbox{$y^{2}$},](v0)(v2)
\Edge[lw=0.025in,style={color=cv0v3,double=cfv0v3},labelstyle={pos=0.5,text=clv0v3,},label=\hbox{$y^{3}$},](v0)(v3)
\Edge[lw=0.01in,style={color=cv0v4,double=cfv0v4},labelstyle={sloped,above,text=clv0v4,},label=\hbox{$y^{4}$},](v0)(v4)
\Edge[lw=0.025in,style={color=cv1v2,double=cfv1v2},labelstyle={sloped,pos=0.5,text=clv1v2,},label=\hbox{$x y^{2}$},](v1)(v2)
\Edge[lw=0.025in,style={color=cv1v3,double=cfv1v3},labelstyle={sloped,pos=0.5,text=clv1v3,},label=\hbox{$x y^{3}$},](v1)(v3)
\Edge[lw=0.025in,style={color=cv1v4,double=cfv1v4},labelstyle={sloped,pos=0.5,text=clv1v4,},label=\hbox{$x y^{4}$},](v1)(v4)
\Edge[lw=0.025in,style={color=cv2v3,double=cfv2v3},labelstyle={sloped,left,text=clv2v3,},label=\hbox{$x^{2} y^{3}$},](v2)(v3)
\Edge[lw=0.025in,style={color=cv2v4,double=cfv2v4},labelstyle={pos=0.5,text=clv2v4,},label=\hbox{$x^{2} y^{4}$},](v2)(v4)
\Edge[lw=0.1in,style={color=cv3v4,double=cfv3v4},labelstyle={sloped,pos=0.5,text=clv3v4,},label=\hbox{$x^{3} y^{4}$},](v3)(v4)
\Loop[dist=4.0in,dir=EA,style={color=cv4v4,double=cfv4v4},labelstyle={sloped,below,text=clv4v4,},label=\hbox{$w^{5}$},](v4)
%
\end{tikzpicture}
GraphLatex class and functions
------------------------------
"""
#*****************************************************************************
# Copyright (C) 2009 Robert Beezer <[email protected]>
# Copyright (C) 2009 Fidel Barrera Cruz <[email protected]>
#
# Distributed under the terms of the GNU General Public License (GPL)
# as published by the Free Software Foundation; either version 2 of
# the License, or (at your option) any later version.
# http://www.gnu.org/licenses/
#*****************************************************************************
from __future__ import print_function
from sage.structure.sage_object import SageObject
from sage.misc.cachefunc import cached_function
from sage.misc.latex import latex
def check_tkz_graph():
r"""
Checks if the proper LaTeX
packages for the ``tikzpicture`` environment are
installed in the user's environment, and issue
a warning otherwise.
The warning is only issued on the first call to this function. So
any doctest that illustrates the use of the tkz-graph packages
should call this once as having random output to exhaust the
warnings before testing output.
See also :meth:`sage.misc.latex.Latex.check_file`
TESTS::
sage: from sage.graphs.graph_latex import check_tkz_graph
sage: check_tkz_graph() # random - depends on TeX installation
sage: check_tkz_graph() # at least the second time, so no output
"""
latex.check_file("tikz.sty", """This package is required to render graphs in LaTeX.
Visit '...'.
""")
latex.check_file("tkz-graph.sty", """This package is required to render graphs in LaTeX.
Visit 'http://altermundus.com/pages/tkz/'.
""")
latex.check_file("tkz-berge.sty", """This package is required to render graphs in LaTeX.
Visit 'http://altermundus.com/pages/tkz/'.
""")
def have_tkz_graph():
r"""
Returns ``True`` if the proper LaTeX packages
for the ``tikzpicture`` environment are installed in the
user's environment, namely tikz, tkz-graph and tkz-berge.
The result is cached.
See also :meth:`sage.misc.latex.Latex.has_file`
TESTS::
sage: from sage.graphs.graph_latex import have_tkz_graph
sage: have_tkz_graph() # random - depends on TeX installation
sage: have_tkz_graph() in [True, False]
True
"""
return latex.has_file("tikz.sty") and latex.has_file("tkz-graph.sty") and latex.has_file("tkz-berge.sty")
@cached_function
def setup_latex_preamble():
"""
Adds appropriate ``\usepackage{...}``, and other instructions to
the latex preamble for the packages that are needed for processing
graphs(``tikz``, ``tkz-graph``, ``tkz-berge``), if available
in the ``LaTeX`` installation.
See also :meth:`sage.misc.latex.Latex.add_package_to_preamble_if_available`.
EXAMPLES::
sage: sage.graphs.graph_latex.setup_latex_preamble()
TESTS::
sage: ("\\usepackage{tikz}" in latex.extra_preamble()) == latex.has_file("tikz.sty")
True
"""
latex.add_package_to_preamble_if_available("tikz")
latex.add_to_mathjax_avoid_list("tikz")
latex.add_package_to_preamble_if_available("tkz-graph")
latex.add_package_to_preamble_if_available("tkz-berge")
if have_tkz_graph():
latex.add_to_preamble("\\usetikzlibrary{arrows,shapes}")
class GraphLatex(SageObject):
r"""
A class to hold, manipulate and employ options for converting
a graph to LaTeX.
This class serves two purposes. First it holds the values of
various options designed to work with the ``tkz-graph``
LaTeX package for rendering graphs. As such, a
graph that uses this class will hold a reference to it. Second,
this class contains the code to convert a graph into the
corresponding LaTeX constructs, returning a string.
EXAMPLES::
sage: from sage.graphs.graph_latex import GraphLatex
sage: opts = GraphLatex(graphs.PetersenGraph())
sage: opts
LaTeX options for Petersen graph: {}
sage: g = graphs.PetersenGraph()
sage: opts = g.latex_options()
sage: g == loads(dumps(g))
True
"""
# These are the "allowed" options for a graph, private to the class,
# along with their default value and description
# This allows intelligent errors when non-existent options are referenced
# Additionally, for each new option added here:
# 1. Document values in GraphLatex.set_option() docstring
# 2. Describe also in docstring for the sage.graphs.graph_latex module
#
# TODO: use some standard option handling mechanism
# This dictionary could also contain type information (list of admissible values)
# and a description
# See e.g. @option
__graphlatex_options = {
'tkz_style': 'Custom',
'format': 'tkz_graph',
'layout': 'acyclic',
'prog': 'dot',
'units': 'cm',
'scale': 1.0,
'graphic_size': (5, 5),
'margins': (0,0,0,0),
'vertex_color': 'black',
'vertex_colors': {},
'vertex_fill_color': 'white',
'vertex_fill_colors': {},
'vertex_shape': 'circle',
'vertex_shapes': {},
'vertex_size': 1.0,
'vertex_sizes': {},
'vertex_labels': True,
'vertex_labels_math': True,
'vertex_label_color': 'black',
'vertex_label_colors': {},
'vertex_label_placement': 'center',
'vertex_label_placements': {},
'edge_options': (),
'edge_color': 'black',
'edge_colors': {},
'edge_fills': False,
'edge_fill_color': 'black',
'edge_fill_colors': {},
'edge_thickness': 0.1,
'edge_thicknesses': {},
'edge_labels': False,
'edge_labels_math': True,
'edge_label_color': 'black',
'edge_label_colors': {},
'edge_label_sloped': True,
'edge_label_slopes': {},
'edge_label_placement': 0.50,
'edge_label_placements': {},
'loop_placement': (3.0, 'NO'),
'loop_placements': {},
'color_by_label' : False,
'rankdir': 'down'
}
def __init__(self, graph, **options):
r"""
Returns a GraphLatex object, which holds all the parameters needed for
creating a LaTeX string that will be rendered as a picture of the graph.
See :mod:`sage.graphs.graph_latex` for more documentation.
EXAMPLES::
sage: from sage.graphs.graph_latex import GraphLatex
sage: GraphLatex(graphs.PetersenGraph())
LaTeX options for Petersen graph: {}
"""
self._graph = graph
self._options = {}
self.set_options(**options)
def __eq__(self, other):
r"""
Two :class:`sage.graphs.graph_latex.GraphLatex` objects
are equal if their options are equal.
The graphs they are associated with are ignored in the comparison.
TESTS::
sage: from sage.graphs.graph_latex import GraphLatex
sage: opts1 = GraphLatex(graphs.PetersenGraph())
sage: opts2 = GraphLatex(graphs.CompleteGraph(10))
sage: opts1.set_option('tkz_style', 'Art')
sage: opts2.set_option('tkz_style', 'Art')
sage: opts1 == opts2
True
sage: opts2.set_option('tkz_style', 'Normal')
sage: opts1 == opts2
False
"""
if not(isinstance(other, GraphLatex)):
return False
else:
return self._options == other._options
def _repr_(self):
r"""
Returns a string representation of a
:class:`sage.graphs.graph_latex.GraphLatex` object
which includes the name of the graph and the dictionary
of current options.
EXAMPLES::
sage: g = graphs.PetersenGraph()
sage: opts = g.latex_options()
sage: opts.set_option('tkz_style', 'Classic')
sage: opts.set_option('vertex_size', 3.6)
sage: print(opts._repr_())
LaTeX options for Petersen graph: {'tkz_style': 'Classic', 'vertex_size': 3.60000000000000}
"""
return "LaTeX options for %s: %s"%(self._graph, self._options)
def set_option(self, option_name, option_value = None):
r"""
Sets, modifies, clears a LaTeX
option for controlling the rendering of a graph.
The possible options are documented here, because ultimately it is this
routine that sets the values. However, the
:meth:`sage.graphs.generic_graph.GenericGraph.set_latex_options` method
is the easiest way to set options, and allows several to be set at once.
INPUT:
- ``option_name`` - a string for a latex option contained in the list
``sage.graphs.graph_latex.GraphLatex.__graphlatex_options``. A
``ValueError`` is raised if the option is not allowed.
- ``option_value`` - a value for the option. If omitted, or
set to ``None``, the option will use the default value.
The output can be either handled internally by ``Sage``, or
delegated to the external software ``dot2tex`` and
``graphviz``. This is controlled by the option 'format':
- ``format`` -- default: 'tkz_graph' -- either 'dot2tex'
or 'tkz_graph'.
If format is 'dot2tex', then all the LaTeX generation
will be delegated to ``dot2tex`` (which must be installed).
For ``tkz_graph``, the possible option names, and associated
values are given below. This first group allows you to set a
style for a graph and specify some sizes related to the eventual
image. (For more information consult the
documentation for the ``tkz-graph`` package.)
- ``tkz_style`` -- default: 'Custom' -- the name of a pre-defined
``tkz-graph`` style such as 'Shade', 'Art', 'Normal', 'Dijkstra',
'Welsh', 'Classic', and 'Simple', or the string 'Custom'. Using
one of these styles alone will often give a reasonably good
drawing with minimal effort. For a custom appearance set this
to 'Custom' and use the options described below to override
the default values.
- ``units`` -- default: 'cm' -- a natural unit of measurement
used for all dimensions. Possible values are:
'in','mm','cm','pt', 'em', 'ex'
- ``scale`` -- default: '1.0' -- a dimensionless number that
multiplies every linear dimension. So you can design at sizes
you are accustomed to, then shrink or expand to meet other needs.
Though fonts do not scale.
- ``graphic_size`` -- default: (5,5) -- overall dimensions
(width, length) of the bounding box around the entire graphic image
- ``margins`` -- default: (0,0,0,0) -- portion of graphic given
over to a plain border as a tuple of four numbers:
(left, right, top, bottom). These are subtracted from the
``graphic_size`` to create the area left for the vertices
of the graph itself. Note that the processing done by
Sage will trim the graphic down to the minimum
possible size, removing any border. So this is only useful
if you use the latex string in a latex document.
If not using a pre-built style the following options are used, so
the following defaults will apply. It is not possible to begin with
a pre-built style and modify it (other than editing the latex
string by hand after the fact).
- ``vertex_color`` -- default: 'black' -- a single color
to use as the default for outline of vertices. For the
``sphere`` shape this color is used for the entire vertex,
which is drawn with a 3D shading. Colors must be specified
as a string recognized by the matplotlib library:
a standard color name like 'red', or a hex string like
'#2D87A7', or a single character from the choices
'rgbcmykw'. Additionally, a number between 0 and 1
will create a grayscale value. These color specifications
are consistent throughout the options for a ``tkzpicture``.
- ``vertex_colors`` -- a dictionary whose keys are vertices
of the graph and whose values are colors. These will be used
to color the outline of vertices. See the explanation
above for the ``vertex_color`` option to see possible values.
These values need only be specified for a proper subset of the
vertices. Specified values will supersede a default value.
- ``vertex_fill_color`` -- default: 'white' -- a single color
to use as the default for the fill color of vertices. See
the explanation above for the ``vertex_color`` option
to see possible values. This color is ignored for the
``sphere`` vertex shape.
- ``vertex__fill_colors`` -- a dictionary whose keys are vertices
of the graph and whose values are colors. These will be used
to fill the interior of vertices. See the explanation
above for the ``vertex_color`` option to see possible values.
These values need only be specified for a proper subset of the
vertices. Specified values will supersede a default value.
- ``vertex_shape`` -- default: 'circle' -- a string for
the shape of the vertices. Allowable values are 'circle',
'sphere', 'rectangle', 'diamond'. The sphere shape has
a 3D look to its coloring and is uses only one color,
that specified by ``vertex_color`` and ``vertex_colors``,
which are normally used for the outline of the vertex.
- ``vertex_shapes`` -- a dictionary whose keys are vertices
of the graph and whose values are shapes. See ``vertex_shape``
for the allowable possibilities.
- ``vertex_size``-- default: 1.0 -- the minimum size of a vertex
as a number. Vertices will expand to contain their labels if
the labels are placed inside the vertices. If you set this
value to zero the vertex will be as small as possible
(up to tkz-graph's "inner sep" parameter), while still
containing labels. However, if labels are not of a uniform
size, then the verrices will not be either.
- ``vertex_sizes`` -- a dictionary of sizes for some of the vertices.
- ``vertex_labels`` -- default: ``True`` -- a boolean to
determine whether or not to display the vertex labels.
If ``False`` subsequent options about vertex labels are ignored.
- ``vertex_labels_math`` -- default: ``True`` -- when true, if a label
is a string that begins and ends with dollar signs, then the string
will be rendered as a latex string. Otherwise, the label will be
automatically subjected to the ``latex()`` method and rendered
accordingly. If ``False`` the label is rendered as its textual
representation according to the ``_repr`` method. Support for
arbitrarily-complicated mathematics is not especially robust.
- ``vertex_label_color`` -- default: 'black' -- a single color to use
as the default for labels of vertices. See the explanation above
for the ``vertex_color`` option to see possible values.
- ``vertex_label_colors`` -- a dictionary whose keys are vertices
of the graph and whose values are colors. These will be used
for the text of the labels of vertices. See the explanation
above for the ``vertex_color`` option to see possible values.
These values need only be specified for a proper subset of the
vertices. Specified values will supersede a default value.
- ``vertex_label_placement`` -- default: 'center' -- if 'center'
the label is centered in the interior of the vertex and the vertex
will expand to contain the label. Giving instead a pair of numbers
will place the label exterior to the vertex at a certain distance
from the edge, and at an angle to the positive x-axis, similar
in spirt to polar coordinates.
- ``vertex_label_placements`` -- a dictionary of placements
indexed by the vertices. See the explanation for
``vertex_label_placement`` for the possible values.
- ``edge_color`` -- default: 'black' -- a single color to use as
the default for an edge. See the explanation above for the
``vertex_color`` option to see possible values.
- ``edge_colors`` -- a dictionary whose keys are edges of the
graph and whose values are colors. These will be used to
color the edges.See the explanation above for the
``vertex_color`` option to see possible values. These
values need only be specified for a proper subset of the
vertices. Specified values will supersede a default value.
- ``edge_fills`` -- default: ``False`` -- a boolean that
determines if an edge has a second color running down
the middle. This can be a useful effect for highlighting
edge crossings.
- ``edge_fill_color`` -- default: 'black' -- a single color
to use as the default for the fill color of an edge.
The boolean switch ``edge_fills`` must be set to True
for theis to have an effect. See the explanation above
for the ``vertex_color`` option to see possible values.
- ``edge__fill_colors`` -- a dictionary whose keys are edges
of the graph and whose values are colors. See the explanation
above for the ``vertex_color`` option to see possible values.
These values need only be specified for a proper subset of the
vertices. Specified values will supersede a default value.
- ``edge_thickness`` -- default: 0.1 - a number specifying the
width of the edges. Note that tkz-graph does not interpret
this number for loops.
- ``edge_thicknesses`` -- a dictionary of thicknesses for
some of the edges of a graph. These values need only
be specified for a proper subset of the vertices. Specified
values will supersede a default value.
- ``edge_labels`` -- default: ``False`` -- a boolean that
determines if edge labels are shown. If ``False`` subsequent
options about edge labels are ignored.
- ``edge_labels_math`` -- default: ``True`` -- a boolean that
controls how edge labels are rendered. Read the explanation
for the ``vertex_labels_math`` option, which behaves identically.
Support for arbitrarily-complicated mathematics is not
especially robust.
- ``edge_label_color`` -- default: 'black' -- a single color
to use as the default for labels of edges. See the explanation
above for the ``vertex_color`` option to see possible values.
- ``edge_label_colors`` -- a dictionary whose keys are edges
of the graph and whose values are colors. These will be used
for the text of the labels of edges. See the explanation
above for the ``vertex_color`` option to see possible values.
These values need only be specified for a proper subset of
the vertices. Specified values will supersede a default
value. Note that labels must be used for this to have any
effect, and no care is taken to ensure that label and
fill colors work well together.
- ``edge_label_sloped`` -- default: ``True`` a boolean that
specifies how edge labels are place. ``False`` results
in a horizontal label, while ``True`` means the label
is rotated to follow the direction of the edge it labels.
- ``edge_label_slopes`` -- a dictionary of booleans, indexed
by some subset of the edges. See the ``edge_label_sloped``
option for a description of sloped edge labels.
- ``edge_label_placement`` -- default: 0.50 -- a number between
0.0 and 1.0, or one of: 'above', 'below', 'left', 'right'. These
adjust the location of an edge label along an edge. A
number specifies how far along the edge the label is
located. ``left`` and ``right`` are conveniences.
``above`` and ``below`` move the label off the edge
itself while leaving it near the midpoint of the edge.
The default value of ``0.50`` places the label on the
midpoint of the edge.
- ``edge_label_placements`` -- a dictionary of edge placements,
indexed by the edges. See the ``edge_label_placement`` option
for a description of the allowable values.
- ``loop_placement`` -- default: (3.0, 'NO') -- a pair,
that determines how loops are rendered. the first
element of the pair is a distance, which determines
how big the loop is and the second element is a string
specifying a compass point (North, South, East, West)
as one of 'NO','SO','EA','WE'.
- ``loop_placements`` -- a dictionary of loop placements.
See the ``loop_placements`` option for the allowable values.
While loops are technically edges, this dictionary is
indexed by vertices.
For the 'dot2tex' format, the possible option names and
associated values are given below:
- ``prog`` -- the program used for the layout. It must be a
string corresponding to one of the software of the graphviz
suite: 'dot', 'neato', 'twopi', 'circo' or 'fdp'.
- ``edge_labels`` -- a boolean (default: False). Whether to
display the labels on edges.
- ``edge_colors`` -- a color. Can be used to set a global
color to the edge of the graph.
- ``color_by_label`` - a boolean (default: False). Colors the
edges according to their labels
OUTPUT:
There are none. Success happens silently.
EXAMPLES:
Set, then modify, then clear the ``tkz_style`` option, and
finally show an error for an unrecognized option name::
sage: g = graphs.PetersenGraph()
sage: opts = g.latex_options()
sage: opts
LaTeX options for Petersen graph: {}
sage: opts.set_option('tkz_style', 'Art')
sage: opts
LaTeX options for Petersen graph: {'tkz_style': 'Art'}
sage: opts.set_option('tkz_style', 'Simple')
sage: opts
LaTeX options for Petersen graph: {'tkz_style': 'Simple'}
sage: opts.set_option('tkz_style')
sage: opts
LaTeX options for Petersen graph: {}
sage: opts.set_option('bad_name', 'nonsense')
Traceback (most recent call last):
...
ValueError: bad_name is not a LaTeX option for a graph.
See :meth:`sage.graphs.generic_graph.GenericGraph.layout_graphviz` for
installation instructions for ``graphviz`` and ``dot2tex``. Further
more, pgf >= 2.00 should be available inside LaTeX's tree for LaTeX
compilation (e.g. when using ``view``). In case your LaTeX distribution
does not provide it, here are short instructions:
- download pgf from http://sourceforge.net/projects/pgf/
- unpack it in ``/usr/share/texmf/tex/generic`` (depends on your system)
- clean out remaining pgf files from older version
- run texhash
TESTS:
These test all of the options and one example of each allowable
proper input. They should all execute silently. ::
sage: G=Graph()
sage: G.add_edge((0,1))
sage: opts = G.latex_options()
sage: opts.set_option('tkz_style', 'Custom')
sage: opts.set_option('tkz_style', 'Art')
sage: opts.set_option('format', 'tkz_graph')
sage: opts.set_option('layout', 'acyclic')
sage: opts.set_option('prog', 'dot')
sage: opts.set_option('units', 'cm')
sage: opts.set_option('scale', 1.0)
sage: opts.set_option('graphic_size', (5, 5))
sage: opts.set_option('margins', (0,0,0,0))
sage: opts.set_option('vertex_color', 'black')
sage: opts.set_option('vertex_colors', {0:'#ABCDEF'})
sage: opts.set_option('vertex_fill_color', 'white')
sage: opts.set_option('vertex_fill_colors', {0:'c'})
sage: opts.set_option('vertex_shape', 'circle')
sage: opts.set_option('vertex_shapes', {0:'sphere'})
sage: opts.set_option('vertex_size', 1.0)
sage: opts.set_option('vertex_sizes', {0:3.4})
sage: opts.set_option('vertex_labels', True)
sage: opts.set_option('vertex_labels_math', True)
sage: opts.set_option('vertex_label_color', 'black')
sage: opts.set_option('vertex_label_colors', {0:'.23'})
sage: opts.set_option('vertex_label_placement', 'center')
sage: opts.set_option('vertex_label_placement', (3, 4.2))
sage: opts.set_option('vertex_label_placements', {0:'center'})
sage: opts.set_option('vertex_label_placements', {0:(4.7,1)})
sage: opts.set_option('edge_color', 'black')
sage: opts.set_option('edge_colors', {(0,1):'w'})
sage: opts.set_option('edge_fills', False)
sage: opts.set_option('edge_fill_color', 'black')
sage: opts.set_option('edge_fill_colors', {(0,1):"#123456"})
sage: opts.set_option('edge_thickness', 0.1)
sage: opts.set_option('edge_thicknesses', {(0,1):5.2})
sage: opts.set_option('edge_labels', False)
sage: opts.set_option('edge_labels_math', True)
sage: opts.set_option('edge_label_color', 'black')
sage: opts.set_option('edge_label_colors', {(0,1):'red'})
sage: opts.set_option('edge_label_sloped', True)
sage: opts.set_option('edge_label_slopes', {(0,1): False})
sage: opts.set_option('edge_label_placement', 'left')
sage: opts.set_option('edge_label_placement', 0.50)
sage: opts.set_option('edge_label_placements', {(0,1):'above'})
sage: opts.set_option('edge_label_placements', {(0,1):0.75})
sage: opts.set_option('loop_placement', (3.0, 'NO'))
sage: opts.set_option('loop_placements', {0:(5.7,'WE')})
These test some of the logic of possible failures. Some tests,
such as inputs of colors, are handled by somewhat general sections
of code and are not tested for each possible option. ::
sage: G=Graph()
sage: G.add_edge((0,1))
sage: opts = G.latex_options()
sage: opts.set_option('tkz_style', 'Crazed')
Traceback (most recent call last):
...
ValueError: tkz_style is not "Custom", nor an implemented tkz-graph style
sage: opts.set_option('format', 'NonExistent')
Traceback (most recent call last):
...
ValueError: format option must be one of: tkz_graph, dot2tex not NonExistent
sage: opts.set_option('units', 'furlongs')
Traceback (most recent call last):
...
ValueError: units option must be one of: in, mm, cm, pt, em, ex, not furlongs
sage: opts.set_option('graphic_size', (1,2,3))
Traceback (most recent call last):
...
ValueError: graphic_size option must be an ordered pair, not (1, 2, 3)
sage: opts.set_option('margins', (1,2,3))
Traceback (most recent call last):
...
ValueError: margins option must be 4-tuple, not (1, 2, 3)
sage: opts.set_option('vertex_color', 'chartruse')
Traceback (most recent call last):
...
ValueError: vertex_color option needs to be a matplotlib color (always as a string), not chartruse
sage: opts.set_option('vertex_labels_math', 'maybe')
Traceback (most recent call last):
...
ValueError: vertex_labels_math option must be True or False, not maybe
sage: opts.set_option('vertex_shape', 'decagon')
Traceback (most recent call last):
...
ValueError: vertex_shape option must be the shape of a vertex, not decagon
sage: opts.set_option('scale', 'big')
Traceback (most recent call last):
...
ValueError: scale option must be a positive number, not big
sage: opts.set_option('scale', -6)
Traceback (most recent call last):
...
ValueError: scale option must be a positive number, not -6
sage: opts.set_option('vertex_label_placement', (2,-4))
Traceback (most recent call last):
...
ValueError: vertex_label_placement option must be None, or a pair of positive numbers, not (2, -4)
sage: opts.set_option('edge_label_placement', 3.6)
Traceback (most recent call last):
...
ValueError: edge_label_placement option must be a number between 0.0 and 1.0 or a place (like "above"), not 3.60000000000000
sage: opts.set_option('loop_placement', (5,'SW'))
Traceback (most recent call last):
...
ValueError: loop_placement option must be a pair that is a positive number followed by a compass point abbreviation, not (5, 'SW')
sage: opts.set_option('vertex_fill_colors', {0:'#GG0000'})
Traceback (most recent call last):
...
ValueError: vertex_fill_colors option for 0 needs to be a matplotlib color (always as a string), not #GG0000
sage: opts.set_option('vertex_sizes', {0:-10})
Traceback (most recent call last):
...
ValueError: vertex_sizes option for 0 needs to be a positive number, not -10
sage: opts.set_option('edge_label_slopes', {(0,1):'possibly'})
Traceback (most recent call last):
...
ValueError: edge_label_slopes option for (0, 1) needs to be True or False, not possibly
sage: opts.set_option('vertex_shapes', {0:'pentagon'})
Traceback (most recent call last):
...
ValueError: vertex_shapes option for 0 needs to be a vertex shape, not pentagon
sage: opts.set_option('vertex_label_placements', {0:(1,2,3)})
Traceback (most recent call last):
...
ValueError: vertex_label_placements option for 0 needs to be None or a pair of positive numbers, not (1, 2, 3)
sage: opts.set_option('edge_label_placements', {(0,1):'partway'})
Traceback (most recent call last):
...
ValueError: edge_label_placements option for (0, 1) needs to be a number between 0.0 and 1.0 or a place (like "above"), not partway
sage: opts.set_option('loop_placements', {0:(-3,'WE')})
Traceback (most recent call last):
...
ValueError: loop_placements option for 0 needs to be a positive number and a compass point (like "EA"), not (-3, 'WE')
sage: opts.set_option('margins', (1,2,3,-5))
Traceback (most recent call last):
...
ValueError: margins option of (1, 2, 3, -5) cannot contain -5
"""
#TODO: Needed improvements, possible extensions, dubious ideas
#- digraph edges should be optionally curved or straight with
#perhaps a variable curvature (exit angle from vertex). Always
#curved now to allow for bidirectional.
#- the "draw" option will make boxes around labels as
#extensions of the edge color and thickness
#- edge labels can have colored backgrounds (which look like
#fills when boxed.
#- edge label fonts can be sized (latex style), which will
#make scaling work totally
#- edges can be dotted or dashed, Beezer suggests calling
#this "edge shape" to mirror vertex shapes
#- "line width" works for vertices, should be configurable
#- allow injection of latex code to style a pre-built style
#for example, \SetUpVertex[style={fill=green}] could overide
#color selection in a style like "Art"
#- "inner sep" is distance from vertex label to edge of vertex
#this should be set as small as possible - but bigger than the
#line width.
#- aspect ratio could be preserved, see hints near
#creation of affine transformation.
#- "outer sep" causes edges to stop some distance before
#reaching vertices. Seems of limited value.
#- Multi-edges are not supported. Need to recognize them,
#twiddle keys in dictionaries, plot with a spectrum of bends.
#Seems like a substantial project.
from matplotlib.colors import ColorConverter
from sage.rings.integer import Integer
from sage.rings.real_mpfr import RealLiteral
cc = ColorConverter() # used as a color tester
if not(option_name in GraphLatex.__graphlatex_options):
raise ValueError( "%s is not a LaTeX option for a graph." % option_name )
if option_value is None: # clear the option, if set
if option_name in self._options:
del self._options[option_name]
else:
# Test options here when attempt to set
name = option_name; value = option_value
#
# Tuples of constants
#
formats = ('tkz_graph', 'dot2tex')
styles = ('Custom', 'Shade', 'Art', 'Normal', 'Dijkstra', 'Welsh', 'Classic', 'Simple')
unit_names = ('in','mm','cm','pt', 'em', 'ex')
shape_names = ('circle', 'sphere','rectangle', 'diamond')
label_places = ('above', 'below', 'right', 'left')
compass_points = ('NO', 'SO', 'EA', 'WE')
number_types = (int, Integer, float, RealLiteral)
#
# Options with structurally similar tests
#
boolean_options = ('vertex_labels','vertex_labels_math','edge_fills','edge_labels','edge_labels_math','edge_label_sloped')
color_options = ('vertex_color', 'vertex_fill_color', 'vertex_label_color','edge_color','edge_fill_color','edge_label_color')
color_dicts = ('vertex_colors','vertex_fill_colors','vertex_label_colors','edge_colors','edge_fill_colors','edge_label_colors')
boolean_dicts = ('edge_label_slopes',)
positive_scalars = ('scale', 'vertex_size', 'edge_thickness')
positive_scalar_dicts=('vertex_sizes', 'edge_thicknesses')
positive_tuples=('graphic_size', 'margins')
#
# Checks/test on single values (ie graph-wide defaults)
#
if name == 'tkz_style' and not( value in styles ):
raise ValueError('%s is not "Custom", nor an implemented tkz-graph style' % name)
elif name == 'format' and not( value in formats ):
raise ValueError('%s option must be one of: tkz_graph, dot2tex not %s' % (name, value))
elif name == 'units' and not( value in unit_names ):
raise ValueError('%s option must be one of: in, mm, cm, pt, em, ex, not %s' % (name, value))
elif name == 'graphic_size' and not( isinstance(value, tuple) and (len(value) == 2) ):
raise ValueError( '%s option must be an ordered pair, not %s' % (name, value))
elif name == 'margins' and not( (isinstance(value, tuple)) and (len(value) == 4) ):
raise ValueError( '%s option must be 4-tuple, not %s' % (name, value))
elif name in color_options:
try:
cc.to_rgb(value)
except Exception:
raise ValueError('%s option needs to be a matplotlib color (always as a string), not %s' % (name, value))
elif name in boolean_options and not isinstance(value, bool):
raise ValueError('%s option must be True or False, not %s' % (name, value))
elif name == 'vertex_shape' and not value in shape_names:
raise ValueError('%s option must be the shape of a vertex, not %s' % (name, value))
elif name in positive_scalars and not ( type(value) in number_types and (value >= 0.0) ):
raise ValueError( '%s option must be a positive number, not %s' % (name, value))
elif name == 'vertex_label_placement' and not( value == 'center') and not( isinstance(value, tuple) and len(value) == 2 and type(value[0]) in number_types and value[0]>=0 and type(value[1]) in number_types and value[1]>=0 ):
raise ValueError( '%s option must be None, or a pair of positive numbers, not %s' % (name, value))
elif name == 'edge_label_placement' and not( ((type(value) in number_types) and (0<= value) and (value <= 1)) or (value in label_places)):
raise ValueError( '%s option must be a number between 0.0 and 1.0 or a place (like "above"), not %s' % (name, value))
elif name == 'loop_placement' and not( (isinstance(value, tuple)) and (len(value) == 2) and (value[0] >=0) and (value[1] in compass_points) ):
raise ValueError( '%s option must be a pair that is a positive number followed by a compass point abbreviation, not %s' % (name, value))
#
# Checks/test on dictionaries of values (ie per-vertex or per-edge defaults)
#
elif name in color_dicts:
if not isinstance(value, dict):
raise TypeError('%s option must be a dictionary, not %s' (name, value))
else:
for key, c in value.items():
try:
cc.to_rgb(c)
except Exception:
raise ValueError('%s option for %s needs to be a matplotlib color (always as a string), not %s' % (name, key, c))
elif name in positive_scalar_dicts:
if not isinstance(value, dict):
raise TypeError('%s option must be a dictionary, not %s' (name, value))
else:
for key, x in value.items():
if not type(x) in [int, Integer, float, RealLiteral] or not x >= 0.0:
raise ValueError('%s option for %s needs to be a positive number, not %s' % (name, key, x))
elif name in boolean_dicts:
if not isinstance(value, dict):
raise TypeError('%s option must be a dictionary, not %s' (name, value))
else:
for key, b in value.items():
if not isinstance(b, bool):
raise ValueError('%s option for %s needs to be True or False, not %s' % (name, key, b))
elif name == 'vertex_shapes':
if not isinstance(value, dict):
raise TypeError('%s option must be a dictionary, not %s' (name, value))
else:
for key, s in value.items():
if not s in shape_names:
raise ValueError('%s option for %s needs to be a vertex shape, not %s' % (name, key, s))
elif name == 'vertex_label_placements':
if not isinstance(value, dict):
raise TypeError('%s option must be a dictionary, not %s' (name, value))
else:
for key, p in value.items():
if not( p == 'center') and not( isinstance(p, tuple) and len(p) == 2 and type(p[0]) in number_types and p[0]>=0 and type(p[1]) in number_types and p[1]>=0 ):
raise ValueError('%s option for %s needs to be None or a pair of positive numbers, not %s' % (name, key, p))
elif name == 'edge_label_placements':
if not isinstance(value, dict):
raise TypeError('%s option must be a dictionary, not %s' (name, value))
else:
for key, p in value.items():
if not(type(p) in [float, RealLiteral] and (0 <= p) and (p <= 1)) and not(p in label_places):
raise ValueError('%s option for %s needs to be a number between 0.0 and 1.0 or a place (like "above"), not %s' % (name, key, p))
elif name == 'loop_placements':
if not isinstance(value, dict):
raise TypeError('%s option must be a dictionary, not %s' (name, value))
else:
for key, p in value.items():
if not( (isinstance(p, tuple)) and (len(p)==2) and (p[0] >=0) and (p[1] in compass_points) ):
raise ValueError('%s option for %s needs to be a positive number and a compass point (like "EA"), not %s' % (name, key, p))
# These have been verified as tuples before going into this next check
elif name in positive_tuples:
for x in value:
if not type(x) in [int, Integer, float, RealLiteral] or not x >= 0.0:
raise ValueError( '%s option of %s cannot contain %s' % (name, value, x))
#
# Verified. Set it.
self._options[option_name] = option_value
def set_options(self, **kwds):
r"""
Set several LaTeX options for a graph all at once.
INPUT:
- kwds - any number of option/value pairs to se many graph latex
options at once (a variable number, in any order). Existing
values are overwritten, new values are added. Existing
values can be cleared by setting the value to ``None``.
Errors are raised in the :func:`set_option` method.
EXAMPLES::
sage: g = graphs.PetersenGraph()
sage: opts = g.latex_options()
sage: opts.set_options(tkz_style = 'Welsh')
sage: opts.get_option('tkz_style')
'Welsh'
"""
if kwds:
for name, value in kwds.items():
self.set_option(name, value)
def get_option(self, option_name):
r"""
Returns the current value of the named option.
INPUT:
- option_name - the name of an option
OUTPUT:
If the name is not present in
``__graphlatex_options`` it is an
error to ask for it. If an option has not been set then the
default value is returned. Otherwise, the value of the
option is returned.
EXAMPLES::
sage: g = graphs.PetersenGraph()
sage: opts = g.latex_options()
sage: opts.set_option('tkz_style', 'Art')
sage: opts.get_option('tkz_style')
'Art'
sage: opts.set_option('tkz_style')
sage: opts.get_option('tkz_style') == "Custom"
True
sage: opts.get_option('bad_name')
Traceback (most recent call last):
...
ValueError: bad_name is not a Latex option for a graph.
"""
if not(option_name in GraphLatex.__graphlatex_options):
raise ValueError( "%s is not a Latex option for a graph." % option_name )
else:
if option_name in self._options:
return self._options[option_name]
else:
return GraphLatex.__graphlatex_options[option_name]
def latex(self):
r"""
Returns a string in LaTeX representing a graph.
This is the command that is invoked by
``sage.graphs.generic_graph.GenericGraph._latex_`` for a graph, so
it returns a string of LaTeX commands that can be incorporated into a
LaTeX document unmodified. The exact contents of this string are
influenced by the options set via the methods
:meth:`sage.graphs.generic_graph.GenericGraph.set_latex_options`,
:meth:`set_option`, and :meth:`set_options`.
By setting the ``format`` option different packages can be used to
create the latex version of a graph. Supported packages are
``tkz-graph`` and ``dot2tex``.
EXAMPLES::
sage: from sage.graphs.graph_latex import check_tkz_graph
sage: check_tkz_graph() # random - depends on TeX installation
sage: g = graphs.CompleteGraph(2)
sage: opts = g.latex_options()
sage: print(opts.latex())
\begin{tikzpicture}
%
\useasboundingbox (0,0) rectangle (5.0cm,5.0cm);
%
\definecolor{cv0}{rgb}{0.0,0.0,0.0}
\definecolor{cfv0}{rgb}{1.0,1.0,1.0}
\definecolor{clv0}{rgb}{0.0,0.0,0.0}
\definecolor{cv1}{rgb}{0.0,0.0,0.0}
\definecolor{cfv1}{rgb}{1.0,1.0,1.0}
\definecolor{clv1}{rgb}{0.0,0.0,0.0}
\definecolor{cv0v1}{rgb}{0.0,0.0,0.0}
%
\Vertex[style={minimum size=1.0cm,draw=cv0,fill=cfv0,text=clv0,shape=circle},LabelOut=false,L=\hbox{$0$},x=5.0cm,y=5.0cm]{v0}
\Vertex[style={minimum size=1.0cm,draw=cv1,fill=cfv1,text=clv1,shape=circle},LabelOut=false,L=\hbox{$1$},x=0.0cm,y=0.0cm]{v1}
%
\Edge[lw=0.1cm,style={color=cv0v1,},](v0)(v1)
%
\end{tikzpicture}
"""
format = self.get_option('format')
if format == "tkz_graph":
return self.tkz_picture()
elif format == "dot2tex":
return self.dot2tex_picture()
def dot2tex_picture(self):
r"""
Calls dot2tex to construct a string of LaTeX commands
representing a graph as a ``tikzpicture``.
EXAMPLES::
sage: g = digraphs.ButterflyGraph(1)
sage: from sage.graphs.graph_latex import check_tkz_graph
sage: check_tkz_graph() # random - depends on TeX installation
sage: print(g.latex_options().dot2tex_picture()) # optional - dot2tex graphviz
\begin{tikzpicture}[>=latex,line join=bevel,]
%%
\node (node_3) at (...bp,...bp) [draw,draw=none] {$\left(1, 1\right)$};
\node (node_2) at (...bp,...bp) [draw,draw=none] {$\left(1, 0\right)$};
\node (node_1) at (...bp,...bp) [draw,draw=none] {$\left(0, 1\right)$};
\node (node_0) at (...bp,...bp) [draw,draw=none] {$\left(0, 0\right)$};
\draw [black,->] (node_0) ..controls (...bp,...bp) and (...bp,...bp) .. (node_3);
\draw [black,->] (node_2) ..controls (...bp,...bp) and (...bp,...bp) .. (node_1);
\draw [black,->] (node_0) ..controls (...bp,...bp) and (...bp,...bp) .. (node_1);
\draw [black,->] (node_2) ..controls (...bp,...bp) and (...bp,...bp) .. (node_3);
%
\end{tikzpicture}
We make sure :trac:`13624` is fixed::
sage: G = DiGraph()
sage: G.add_edge(3333, 88, 'my_label')
sage: G.set_latex_options(edge_labels=True)
sage: print(G.latex_options().dot2tex_picture()) # optional - dot2tex graphviz
\begin{tikzpicture}[>=latex,line join=bevel,]
%%
\node (node_1) at (...bp,...bp) [draw,draw=none] {$3333$};
\node (node_0) at (...bp,...bp) [draw,draw=none] {$88$};
\draw [black,->] (node_1) ..controls (...bp,...bp) and (...bp,...bp) .. (node_0);
\definecolor{strokecol}{rgb}{0.0,0.0,0.0};
\pgfsetstrokecolor{strokecol}
\draw (...bp,...bp) node {$\text{\texttt{my{\char`\_}label}}$};
%
\end{tikzpicture}
Note: there is a lot of overlap between what tkz_picture and
dot2tex do. It would be best to merge them! dot2tex probably
can work without graphviz if layout information is provided.
"""
from sage.graphs.dot2tex_utils import assert_have_dot2tex
assert_have_dot2tex()
options = self.__graphlatex_options.copy()
options.update(self._options)
dotdata = self._graph.graphviz_string(labels="latex", **options)
import dot2tex
return dot2tex.dot2tex(
dotdata,
format = 'tikz',
autosize = True,
crop = True,
figonly = 'True',
prog=self.get_option('prog'))
# usepdflatex = True, debug = True)
def tkz_picture(self):
r"""
Return a string of LaTeX commands representing a graph as a ``tikzpicture``.
This routine interprets the graph's properties and the options in
``_options`` to render the graph with commands from the ``tkz-graph``
LaTeX package.
This requires that the LaTeX optional packages
tkz-graph and tkz-berge be installed. You may also need a
current version of the pgf package. If the tkz-graph and
tkz-berge packages are present in the system's TeX
installation, the appropriate ``\\usepackage{}`` commands
will be added to the LaTeX preamble as part of
the initialization of the graph. If these two packages
are not present, then this command will return a warning
on its first use, but will return a string that could be
used elsewhere, such as a LaTeX document.
For more information about tkz-graph you can visit
`Altermundus.com <http://altermundus.com/>`_
EXAMPLES:
With a pre-built ``tkz-graph`` style specified, the latex
representation will be relatively simple. ::
sage: from sage.graphs.graph_latex import check_tkz_graph
sage: check_tkz_graph() # random - depends on TeX installation
sage: g = graphs.CompleteGraph(3)
sage: opts = g.latex_options()
sage: g.set_latex_options(tkz_style='Art')
sage: print(opts.tkz_picture())
\begin{tikzpicture}
%
\GraphInit[vstyle=Art]
%
\useasboundingbox (0,0) rectangle (5.0cm,5.0cm);
%
\Vertex[L=\hbox{$0$},x=2.5cm,y=5.0cm]{v0}
\Vertex[L=\hbox{$1$},x=0.0cm,y=0.0cm]{v1}
\Vertex[L=\hbox{$2$},x=5.0cm,y=0.0cm]{v2}
%
\Edge[](v0)(v1)
\Edge[](v0)(v2)
\Edge[](v1)(v2)
%
\end{tikzpicture}
Setting the style to "Custom" results in various configurable
aspects set to the defaults, so the string is more involved. ::
sage: from sage.graphs.graph_latex import check_tkz_graph
sage: check_tkz_graph() # random - depends on TeX installation
sage: g = graphs.CompleteGraph(3)
sage: opts = g.latex_options()
sage: g.set_latex_options(tkz_style='Custom')
sage: print(opts.tkz_picture())
\begin{tikzpicture}
%
\useasboundingbox (0,0) rectangle (5.0cm,5.0cm);
%
\definecolor{cv0}{rgb}{0.0,0.0,0.0}
\definecolor{cfv0}{rgb}{1.0,1.0,1.0}
\definecolor{clv0}{rgb}{0.0,0.0,0.0}
\definecolor{cv1}{rgb}{0.0,0.0,0.0}
\definecolor{cfv1}{rgb}{1.0,1.0,1.0}
\definecolor{clv1}{rgb}{0.0,0.0,0.0}
\definecolor{cv2}{rgb}{0.0,0.0,0.0}
\definecolor{cfv2}{rgb}{1.0,1.0,1.0}
\definecolor{clv2}{rgb}{0.0,0.0,0.0}
\definecolor{cv0v1}{rgb}{0.0,0.0,0.0}
\definecolor{cv0v2}{rgb}{0.0,0.0,0.0}
\definecolor{cv1v2}{rgb}{0.0,0.0,0.0}
%
\Vertex[style={minimum size=1.0cm,draw=cv0,fill=cfv0,text=clv0,shape=circle},LabelOut=false,L=\hbox{$0$},x=2.5cm,y=5.0cm]{v0}
\Vertex[style={minimum size=1.0cm,draw=cv1,fill=cfv1,text=clv1,shape=circle},LabelOut=false,L=\hbox{$1$},x=0.0cm,y=0.0cm]{v1}
\Vertex[style={minimum size=1.0cm,draw=cv2,fill=cfv2,text=clv2,shape=circle},LabelOut=false,L=\hbox{$2$},x=5.0cm,y=0.0cm]{v2}
%
\Edge[lw=0.1cm,style={color=cv0v1,},](v0)(v1)
\Edge[lw=0.1cm,style={color=cv0v2,},](v0)(v2)
\Edge[lw=0.1cm,style={color=cv1v2,},](v1)(v2)
%
\end{tikzpicture}
See the introduction to the :mod:`~sage.graphs.graph_latex` module
for more information on the use of this routine.
TESTS:
Graphs with preset layouts that are vertical or horizontal
can cause problems. First test is a horizontal layout on a
path with three vertices. ::
sage: from sage.graphs.graph_latex import check_tkz_graph
sage: check_tkz_graph() # random - depends on TeX installation
sage: g = graphs.PathGraph(3)
sage: opts = g.latex_options()
sage: print(opts.tkz_picture())
\begin{tikzpicture}
...
\end{tikzpicture}
Scaling to a bounding box is problematic for graphs with
just one vertex, or none. ::
sage: from sage.graphs.graph_latex import check_tkz_graph
sage: check_tkz_graph() # random - depends on TeX installation
sage: g = graphs.CompleteGraph(1)
sage: opts = g.latex_options()
sage: print(opts.tkz_picture())
\begin{tikzpicture}
...
\end{tikzpicture}
"""
# This routine does not handle multiple edges
# It will properly handle digraphs where a pair of vertices
# has an edge in each direction, since edges of a digraph are
# curved.
if self._graph.has_multiple_edges():
raise NotImplementedError('it is not possible create a tkz-graph version of a graph with multiple edges')
from matplotlib.colors import ColorConverter
from sage.misc.latex import latex
from sage.rings.real_mpfr import RealLiteral # remove?
import copy
# On first use of this method, the next call may print warnings
# as a side effect, but will be silent on any subsequent use.
check_tkz_graph()
# Overhead
cc = ColorConverter() # .to_rgb method to convert "colors" to triples
prefix = 'v' # leading string on internal (to tkz-graph) vertex names
####################
### Pre-built syles
####################
# We preserve the pre-built style OR
# get defaults for each option, but we do not mix the two
style = self.get_option('tkz_style')
customized = (style == 'Custom')
# We don't do much for a pre-built style
# Layout information from the graph
# And vertex labels (if used) are the latex representation of Sage objects
if not customized:
vertex_labels_math = True
###################################
### Layout, image sizing placement
###################################
units = self.get_option('units')
scale = self.get_option('scale')
graphic_size = self.get_option('graphic_size')
margins = self.get_option('margins')
# The positions of the vertices will get scaled to fill the
# specified size of the image, as given by graphic_size.
# But first a border is subtracted away and the graph
# is scaled to fit there.
# Lower left, upper right corners of box inside borders
llx = margins[0]; lly = margins[3]
urx = graphic_size[0]-margins[1]; ury = graphic_size[1]-margins[2]
# width and height of space
w = urx - llx; h = ury - lly
# TODO: Could use self._graph._layout_bounding_box(pos)
# trans = lambda x,y: [x[0]-y[0],x[1]-y[1]]
# Determine the spread in the x and y directions (i.e. xmax, ymax)
# Needs care for perfectly horizontal and vertical layouts
# We grab the graph's layout (or it is computed as a consequence of the request)
pos = self._graph.layout()
if len(pos.values()) > 0:
xmin = min([ i[0] for i in pos.values()])
ymin = min([ i[1] for i in pos.values()])
xmax = max([ i[0] for i in pos.values()])
ymax = max([ i[1] for i in pos.values()])
else:
xmax, ymax = 0, 0
# Linear scaling factors that will be used to scale the image to
# fit into the bordered region. Purely horizontal, or purely vertical,
# layouts get put in the middle of the bounding box by setting the
# scaling to a constant value on a midline
xspread = xmax - xmin
if xspread == 0:
x_scale = 0.0
llx = llx + 0.5*w
else:
x_scale = float(w)/xspread
yspread = ymax - ymin
if yspread == 0:
y_scale = 0.0
lly = lly + 0.5*h
else:
y_scale = float(h)/yspread
# Could preserve aspect ratio here by setting both scale factors to the minimum
# and doing a shift of the larger to center
# A linear function will map layout positions into the bordered graphic space
translate = lambda p: ((p[0]-xmin)*x_scale+llx, (p[1]-ymin)*y_scale+lly)
# The positions of the vertices will get scaled to fill the
# specified size of the image, as given by graphic_size.
# But first a border is subtracted away and the graph
# is scaled to fit there.
# Lower left, upper right corners of box inside borders
llx = margins[0]; lly = margins[3]
urx = graphic_size[0]-margins[1]; ury = graphic_size[1]-margins[2]
# width and height of space
w = urx - llx; h = ury - lly
# TODO: Could use self._graph._layout_bounding_box(pos)
# trans = lambda x,y: [x[0]-y[0],x[1]-y[1]]
# Determine the spread in the x and y directions (i.e. xmax, ymax)
# Needs care for perfectly horizontal and vertical layouts
### pos = copy.deepcopy(self._graph.layout(layout = layout, labels = "latex"))
pos = self._graph.layout()
if len(pos.values()) > 0:
xmin = min([ i[0] for i in pos.values()])
ymin = min([ i[1] for i in pos.values()])
xmax = max([ i[0] for i in pos.values()])
ymax = max([ i[1] for i in pos.values()])
else:
xmax, ymax = 0, 0
# Linear scaling factors that will be used to scale the image to
# fit into the bordered region. Purely horizontal, or purely vertical,
# layouts get put in the middle of the bounding box by setting the
# scaling to a constant value on a midline
xspread = xmax - xmin
if xspread == 0:
x_scale = 0.0
llx = llx + 0.5*w
else:
x_scale = float(w)/xspread
yspread = ymax - ymin
if yspread == 0:
y_scale = 0.0
lly = lly + 0.5*h
else:
y_scale = float(h)/yspread
# Could preserve aspect ratio here by setting both scale factors to the minimum
# and doing a shift of the larger to center
# A linear function will map layout positions into the bordered graphic space
translate = lambda p: ((p[0]-xmin)*x_scale+llx, (p[1]-ymin)*y_scale+lly)
#############
### Vertices
#############
# We record the index of each vertex in the graph's list of vertices
# Which is just a convenience for forming vertex names internal to tkz-graph
index_of_vertex={}
vertex_list = self._graph.vertices()
for u in self._graph:
index_of_vertex[u]=vertex_list.index(u)
# Vertex labels can be switched on/off, and we don't record
# or use this type of extra information if they are switched off
vertex_labels = self.get_option('vertex_labels')
# We collect options for vertices, default values and and for-some-vertices information
# These are combined into dictionaries on a per-vertex basis, for all vertices
# This only applies for a custom style
#
# Defaults
#
if customized:
dvc = cc.to_rgb(self.get_option('vertex_color'))
dvfc = cc.to_rgb(self.get_option('vertex_fill_color'))
dsh = self.get_option( 'vertex_shape' )
dvs = self.get_option('vertex_size')
#
# Default label information, if using vertex labels
#
if vertex_labels:
vertex_labels_math = self.get_option('vertex_labels_math')
dvlc = cc.to_rgb(self.get_option('vertex_label_color'))
dvlp = self.get_option('vertex_label_placement')
# needs test for a pair of numbers, angle and distance (or None)
# Retrieve dictionaries for selected vertices
vertex_colors = self.get_option('vertex_colors')
vertex_fill_colors = self.get_option('vertex_fill_colors')
vertex_shapes = self.get_option('vertex_shapes')
vertex_sizes = self.get_option('vertex_sizes')
if vertex_labels:
vertex_label_colors = self.get_option('vertex_label_colors')
vertex_label_placements = self.get_option('vertex_label_placements')
# Form dictionaries, each indexed for all vertices
v_color = {}
vf_color = {}
v_shape = {}
v_size = {}
if vertex_labels:
vl_color = {}
vl_placement = {}
for u in vertex_list:
#
c = dvc
if u in vertex_colors:
c = cc.to_rgb(vertex_colors[u])
v_color[ u ] = c
#
c = dvfc
if u in vertex_fill_colors:
c = cc.to_rgb(vertex_fill_colors[u])
vf_color[u] = c
#
sh = dsh
if u in vertex_shapes:
sh = vertex_shapes[u]
v_shape[u] = sh
#
vs = dvs
if u in vertex_sizes:
vs = vertex_sizes[u]
v_size[u] = vs
#
if vertex_labels:
#
c = dvlc
if u in vertex_label_colors:
c = cc.to_rgb(vertex_label_colors[u])
vl_color[u] = c
#
vlp = dvlp
if u in vertex_label_placements:
vlp = vertex_label_placements[u]
# test vlp here
vl_placement[u] = vlp
##########
### Edges
##########
if customized:
# An "edge fill" is a bit unusual, so we allow it to
# be turned off as the default.
edge_fills = self.get_option('edge_fills')
# Edge labels can be switched on/off, and we don't record
# or use this type of extra information if they are switched off
edge_labels = self.get_option('edge_labels')
# We collect options for edges, default values and for-some-edges information
# These are combined into dictionaries on a per-edge basis, for all edges
#
# Defaults
#
dec = cc.to_rgb(self.get_option('edge_color'))
if edge_fills:
defc = cc.to_rgb(self.get_option('edge_fill_color'))
det = self.get_option('edge_thickness')
#
if edge_labels:
edge_labels_math = self.get_option('edge_labels_math')
delc = cc.to_rgb(self.get_option('edge_label_color'))
dels = self.get_option('edge_label_sloped')
delp = self.get_option('edge_label_placement')
# Retrieve dictionaries for selected edges
edge_colors = self.get_option('edge_colors')
if edge_fills:
edge_fill_colors = self.get_option('edge_fill_colors')
edge_thicknesses = self.get_option('edge_thicknesses')
if edge_labels:
edge_label_colors = self.get_option('edge_label_colors')
edge_label_slopes = self.get_option('edge_label_slopes')
edge_label_placements = self.get_option('edge_label_placements')
# Form dictionaries, each indexed for all edges
#
# A key of a dictionary indexed by edges may be
# set for an edge of an undirected
# graph in the "wrong" order, so we use a
# "reverse" to test for this case. Everything formed
# here conforms to the order used in the graph.
#
e_color = {}
if edge_fills:
ef_color = {}
e_thick = {}
if edge_labels:
el_color = {}
el_slope={}
el_placement={}
for e in self._graph.edges():
edge=(e[0],e[1]); reverse=(e[1],e[0])
#
c = dec
if edge in edge_colors or (not self._graph.is_directed() and reverse in edge_colors):
if edge in edge_colors:
c = cc.to_rgb(edge_colors[edge])
else:
c = cc.to_rgb(edge_colors[reverse])
e_color[edge] = c
#
if edge_fills:
c = defc
if edge in edge_fill_colors or (not self._graph.is_directed() and reverse in edge_fill_colors):
if edge in edge_colors:
c = cc.to_rgb(edge_fill_colors[edge])
else:
c = cc.to_rgb(edge_fill_colors[reverse])
ef_color[edge] = c
#
et = det
if edge in edge_thicknesses or (not self._graph.is_directed() and reverse in edge_thicknesses):
if edge in edge_thicknesses:
et = edge_thicknesses[edge]
else:
et = edge_thicknesses[reverse]
e_thick[edge] = et
#
if edge_labels:
c = delc
if edge in edge_label_colors or (not self._graph.is_directed() and reverse in edge_label_colors):
if edge in edge_label_colors:
c = cc.to_rgb(edge_label_colors[edge])
else:
c = cc.to_rgb(edge_label_colors[reverse])
el_color[edge] = c
#
els = dels
if edge in edge_label_slopes or (not self._graph.is_directed() and reverse in edge_label_slopes):
if edge in edge_label_slopes:
els = edge_label_slopes[edge]
else:
els = edge_label_slopes[reverse]
el_slope[edge] = els
#
elp = delp
if edge in edge_label_placements or (not self._graph.is_directed() and reverse in edge_label_placements):
if edge in edge_label_placements:
elp = edge_label_placements[edge]
else:
elp = edge_label_placements[reverse]
el_placement[edge] = elp
##########
### Loops
##########
# Loops can be styled much like any other edge
# By indexing on a pair of two equal vertices
# Though edge thickness is not implemented in tkz-graph!
# Size and direction are unique, and are indexed by the vertex
# rather than on edges.
# Loop placements are pairs of length, compass-point
if customized:
if self._graph.has_loops():
dlp = self.get_option('loop_placement')
loop_placements = self.get_option('loop_placements')
lp_placement = {}
for u in vertex_list:
lp = dlp
if u in loop_placements:
lp = loop_placements[u]
lp_placement[u] = lp
############################
### Build the output string
############################
# s is the eventual tkz string
# Everything should now be in place
# We build a list and then concatenate it as the return value
s = ['\\begin{tikzpicture}\n%\n']
if not customized:
s+=['\\GraphInit[vstyle=', style, ']\n%\n']
# Specify the bounding box for the latex result
# If too big, then the latex paper size may need to be expanded
s+=['\\useasboundingbox (0,0) rectangle (', str(round(scale*graphic_size[0],4)), units, ',', str(round(scale*graphic_size[1],4)), units, ');\n%\n']
# Internal strings representing colors are defined here in custom style
if customized:
# Define all the colors for the vertices: perimeter, fill, label
vertex_color_names = {}
vertex_fill_color_names = {}
vertex_label_color_names = {}
for u in vertex_list:
vertex_color_names[ u ] = 'c' + prefix + str(index_of_vertex[ u ])
s+=['\definecolor{', vertex_color_names[ u ], '}{rgb}', '{']
s+=[str(round( v_color[u][0],4)), ',']
s+=[str(round( v_color[u][1],4)), ',']
s+=[str(round( v_color[u][2],4)), '}\n']
vertex_fill_color_names[ u ] = 'cf' + prefix + str(index_of_vertex[ u ])
s+=['\definecolor{', vertex_fill_color_names[ u ], '}{rgb}', '{']
s+=[str(round( vf_color[u][0],4)), ',']
s+=[str(round( vf_color[u][1],4)), ',']
s+=[str(round( vf_color[u][2],4)), '}\n']
if vertex_labels:
vertex_label_color_names[u] = 'cl' + prefix + str(index_of_vertex[ u ])
s+=['\definecolor{', vertex_label_color_names[ u ], '}{rgb}{']
s+=[str(round( vl_color[u][0],4)), ',']
s+=[str(round( vl_color[u][1],4)), ',']
s+=[str(round( vl_color[u][2],4)), '}\n']
# Define all the colors for the edges: perimeter, fill, label
edge_color_names = {}
edge_fill_color_names = {}
edge_label_color_names = {}
for e in self._graph.edges():
edge = (e[0], e[1])
edge_color_names[edge] = 'c' + prefix + str(index_of_vertex[edge[0]])+ prefix + str(index_of_vertex[edge[1]])
s+=['\definecolor{', edge_color_names[edge], '}{rgb}{']
s+=[str(round( e_color[edge][0],4)), ',']
s+=[str(round( e_color[edge][1],4)), ',']
s+=[str(round( e_color[edge][2],4)), '}\n']
if edge_fills:
edge_fill_color_names[edge] = 'cf' + prefix + str(index_of_vertex[edge[0]])+ prefix + str(index_of_vertex[edge[1]])
s+=['\definecolor{', edge_fill_color_names[edge], '}{rgb}{']
s+=[str(round( ef_color[edge][0],4)), ',']
s+=[str(round( ef_color[edge][1],4)), ',']
s+=[str(round( ef_color[edge][2],4)), '}\n']
if edge_labels:
edge_label_color_names[edge] = 'cl' + prefix + str(index_of_vertex[edge[0]])+ prefix + str(index_of_vertex[edge[1]])
s+=['\definecolor{', edge_label_color_names[edge], '}{rgb}{']
s+=[str(round( el_color[edge][0],4)), ',']
s+=[str(round( el_color[edge][1],4)), ',']
s+=[str(round( el_color[edge][2],4)), '}\n']
s = s+['%\n']
# Create each vertex
for u in vertex_list:
s+=['\\Vertex[']
# colors, shapes, sizes, labels/placement for 'Custom' style
if customized:
s+=['style={'] # begin style list
s+=['minimum size=', str(round(scale*v_size[u],4)), units, ',']
s+=['draw=', vertex_color_names[u], ',']
s+=['fill=', vertex_fill_color_names[u], ',']
if vertex_labels:
s+=['text=', vertex_label_color_names[u], ',']
if v_shape[u] == 'sphere':
s+=['shape=circle,shading=ball,line width=0pt,ball color=', vertex_color_names[u], ',']
else:
s+=['shape=', v_shape[u]]
s+=['},'] # end style list
if vertex_labels:
if vl_placement[u] == 'center':
s+=['LabelOut=false,']
else:
s+=['LabelOut=true,']
s+=['Ldist=', str(round(scale*vl_placement[u][0],4)), units, ',']
s+=['Lpos=',str(round(vl_placement[u][1],4)), ','] # degrees, no units
else:
s+=['NoLabel,']
# vertex label information is available to all pre-built styles
# but may be ignored by the style, so not apparent
if vertex_labels or not customized:
if vertex_labels_math and not (isinstance(u, str) and u[0]=='$' and u[-1]=='$'):
lab = '\hbox{$%s$}' % latex(u)
else:
lab = '\hbox{%s}' % u
s+=['L=', lab, ',']
scaled_pos = translate(pos[u])
s+=['x=', str(round(scale*scaled_pos[0],4)), units, ',']
s+=['y=', str(round(scale*scaled_pos[1],4)), units]
s+=[']']
s+=['{', prefix, str(index_of_vertex[u]), '}\n']
s+=['%\n']
# Create each edge or loop
for e in self._graph.edges():
edge = (e[0],e[1])
loop = e[0] == e[1]
if loop:
u=e[0]
s+=['\\Loop[']
if customized:
s+=['dist=', str(round(scale*lp_placement[u][0],4)), units, ',']
s+=['dir=', lp_placement[u][1], ',']
else:
s+=['\\Edge[']
# colors, shapes, sizes, labels/placement for 'Custom' style
if customized:
if not loop: # lw not available for loops!
s+=['lw=', str(round(scale*e_thick[edge],4)), units, ',']
s+=['style={'] # begin style list
if self._graph.is_directed() and not loop:
s+=['post, bend right', ',']
s+=['color=', edge_color_names[edge], ',']
if edge_fills:
s+=['double=', edge_fill_color_names[edge]]
s+=['},'] # end style list
if edge_labels:
s+=['labelstyle={']
if el_slope[edge]:
s+=['sloped,']
if isinstance(el_placement[edge], str):
s+=[el_placement[edge],',']
else:
s+=['pos=', str(round(el_placement[edge],4)), ','] # no units needed
s+=['text=', edge_label_color_names[edge], ',']
s+=['},']
el = self._graph.edge_label(edge[0],edge[1])
if edge_labels_math and not (isinstance(el, str) and el[0]=='$' and el[-1]=='$'):
lab = '\hbox{$%s$}' % latex(el)
else:
lab = '\hbox{%s}' % el
s+=['label=', lab, ',']
s+=[']']
if not loop:
s+=['(', prefix, str(index_of_vertex[e[0]]), ')']
s+=['(', prefix, str(index_of_vertex[e[1]]), ')\n']
# Wrap it up
s+=['%\n']
s+=['\\end{tikzpicture}']
return ''.join(s)
|
py | 1a316c219f17df1df7b52fea5cb1549f6bb25d51 | # coding: utf-8
"""
Contain solution for the python/numpy training
"""
__authors__ = ["Pierre Knobel", "Jerome Kieffer", "Henri Payno",
"Armando Sole", "Valentin Valls", "Thomas Vincent"]
__date__ = "18/09/2018"
__license__ = "MIT"
import inspect
import numpy
def show(exercice_name):
function = globals()[exercice_name]
print(inspect.getsource(function))
return function()
def ex3_1():
""" Simple example of an element wise comparaison"""
x = numpy.arange(10)
y = numpy.arange(1, 11)
difference = x - y
return difference
def ex3_2():
""" Simple way to compute the difference x[i+1]-x[i] for all the elements
of the 1D array"""
x = numpy.arange(10)
difference = x[1:] - x[:-1]
return difference
def ex4_1():
"""Generate a 1D array of [1..99] then operate a binning 1 2 3 4 -> 1+2 3+4
"""
data = numpy.arange(100) + 1
binned = data[::2] + data[1::2]
return data, binned
def ex4_2():
"""Generate a 2D array of [1..9999] then operate a 2x2 binning
"""
data = numpy.arange(10000).reshape(100, 100)
data = data + 1
binned = data[::2, ::2] + data[::2, 1::2] + data[1::2, ::2] + data[1::2, 1::2]
return data, binned
def ex4_2_alt():
"""Generate a 2D array of [1..9999] then operate a 2x2 binning using numpy
sum and moving the array to 4D
"""
height = 100
width = 100
data = numpy.arange(10000).reshape(height, width)
data = data + 1
reshaped_data = data.reshape(height // 2, 2, width // 2, 2)
binned = reshaped_data.sum(axis=3).sum(axis=1)
return data, binned
def ex5_inefficient_fill(height=1000, width=1000):
"""Inefficient fill using 2 for loops"""
data = numpy.zeros((height, width), dtype=numpy.float)
for row in range(int(height)):
for col in range(int(width)):
data[row, col] = numpy.cos(row) * numpy.sin(col)
return data
def ex5_naive_fill(height=1000, width=1000):
"""Fill using 2 for loops but pre-computing sin and cos"""
width_sin = numpy.sin(numpy.arange(width))
height_cos = numpy.cos(numpy.arange(height))
data = numpy.zeros((height, width), numpy.float)
for row in range(int(height)):
for col in range(int(width)):
data[row, col] = height_cos[row] * width_sin[col]
return data
def ex5_clever_fill(height=1000, width=1000):
"""Fill using 2 outer products"""
width_sin = numpy.sin(numpy.arange(width))
height_cos = numpy.cos(numpy.arange(height))
cos_loop = numpy.outer(height_cos, numpy.ones(width))
sin_loop = numpy.outer(numpy.ones(height), width_sin)
return cos_loop * sin_loop
def ex5_practical_fill(height=1000, width=1000):
"""Fill using meshgrid"""
width_sin = numpy.sin(numpy.arange(width))
height_cos = numpy.cos(numpy.arange(height))
sin_loop, cos_loop = numpy.meshgrid(width_sin, height_cos)
return sin_loop * cos_loop
def ex5_optimized_fill(height=1000, width=1000):
"""Fill using outer product"""
width_sin = numpy.sin(numpy.arange(width))
height_cos = numpy.cos(numpy.arange(height))
return numpy.outer(height_cos, width_sin)
def ex5_atleast_2d_fill(height=1000, width=1000):
"""Fill using atleast_2d and transpose"""
sine = numpy.sin(numpy.arange(width))
cosine = numpy.cos(numpy.arange(height))
return numpy.atleast_2d(sine) * numpy.atleast_2d(cosine).T
|
py | 1a316cb812962c576fb86545ba4ffd3c6b54f396 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.contrib import admin
from .models import Diary
# Register your models here.
admin.site.register(Diary)
|
py | 1a316cc75b8d562ca2b18942a82ce440881d1f8c | """ Cisco_IOS_XR_asr9k_lpts_oper
This module contains a collection of YANG definitions
for Cisco IOS\-XR asr9k\-lpts package operational data.
This module contains definitions
for the following management objects\:
platform\-lptsp\-ifib\: ASR9K platform ifib operational data
Copyright (c) 2013\-2016 by Cisco Systems, Inc.
All rights reserved.
"""
import re
import collections
from enum import Enum
from ydk.types import Empty, YList, YLeafList, DELETE, Decimal64, FixedBitsDict
from ydk.errors import YPYError, YPYModelError
class PlatformLptspIfib(object):
"""
ASR9K platform ifib operational data
.. attribute:: nodes
List of nodes with platform specific lpts operation data
**type**\: :py:class:`Nodes <ydk.models.cisco_ios_xr.Cisco_IOS_XR_asr9k_lpts_oper.PlatformLptspIfib.Nodes>`
"""
_prefix = 'asr9k-lpts-oper'
_revision = '2015-11-09'
def __init__(self):
self.nodes = PlatformLptspIfib.Nodes()
self.nodes.parent = self
class Nodes(object):
"""
List of nodes with platform specific lpts
operation data
.. attribute:: node
Node with platform specific lpts data
**type**\: list of :py:class:`Node <ydk.models.cisco_ios_xr.Cisco_IOS_XR_asr9k_lpts_oper.PlatformLptspIfib.Nodes.Node>`
"""
_prefix = 'asr9k-lpts-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.node = YList()
self.node.parent = self
self.node.name = 'node'
class Node(object):
"""
Node with platform specific lpts data
.. attribute:: node_name <key>
Node name
**type**\: str
**pattern:** ([a\-zA\-Z0\-9\_]\*\\d+/){1,2}([a\-zA\-Z0\-9\_]\*\\d+)
.. attribute:: police
pl\_pifib police data
**type**\: :py:class:`Police <ydk.models.cisco_ios_xr.Cisco_IOS_XR_asr9k_lpts_oper.PlatformLptspIfib.Nodes.Node.Police>`
.. attribute:: stats
pl\_pifib stats
**type**\: :py:class:`Stats <ydk.models.cisco_ios_xr.Cisco_IOS_XR_asr9k_lpts_oper.PlatformLptspIfib.Nodes.Node.Stats>`
"""
_prefix = 'asr9k-lpts-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.node_name = None
self.police = PlatformLptspIfib.Nodes.Node.Police()
self.police.parent = self
self.stats = PlatformLptspIfib.Nodes.Node.Stats()
self.stats.parent = self
class Police(object):
"""
pl\_pifib police data
.. attribute:: police_info
Per flow type police info
**type**\: list of :py:class:`PoliceInfo <ydk.models.cisco_ios_xr.Cisco_IOS_XR_asr9k_lpts_oper.PlatformLptspIfib.Nodes.Node.Police.PoliceInfo>`
"""
_prefix = 'asr9k-lpts-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.police_info = YList()
self.police_info.parent = self
self.police_info.name = 'police_info'
class PoliceInfo(object):
"""
Per flow type police info
.. attribute:: accepted_stats
accepted stats
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: acl_config
acl config
**type**\: int
**range:** 0..255
.. attribute:: acl_str
acl str
**type**\: str
**pattern:** ([0\-9a\-fA\-F]{2}(\:[0\-9a\-fA\-F]{2})\*)?
.. attribute:: avgrate
avgrate
**type**\: int
**range:** 0..4294967295
.. attribute:: avgrate_type
avgrate type
**type**\: str
**length:** 0..50
.. attribute:: burst
burst
**type**\: int
**range:** 0..4294967295
.. attribute:: change_type
change type
**type**\: int
**range:** 0..255
.. attribute:: dropped_stats
dropped stats
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: flow_type
flow type
**type**\: str
**length:** 0..50
.. attribute:: iptos_value
iptos value
**type**\: int
**range:** 0..255
.. attribute:: np
np
**type**\: int
**range:** 0..255
.. attribute:: policer
policer
**type**\: int
**range:** 0..4294967295
.. attribute:: static_avgrate
static avgrate
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'asr9k-lpts-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.accepted_stats = None
self.acl_config = None
self.acl_str = None
self.avgrate = None
self.avgrate_type = None
self.burst = None
self.change_type = None
self.dropped_stats = None
self.flow_type = None
self.iptos_value = None
self.np = None
self.policer = None
self.static_avgrate = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-asr9k-lpts-oper:police-info'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.accepted_stats is not None:
return True
if self.acl_config is not None:
return True
if self.acl_str is not None:
return True
if self.avgrate is not None:
return True
if self.avgrate_type is not None:
return True
if self.burst is not None:
return True
if self.change_type is not None:
return True
if self.dropped_stats is not None:
return True
if self.flow_type is not None:
return True
if self.iptos_value is not None:
return True
if self.np is not None:
return True
if self.policer is not None:
return True
if self.static_avgrate is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_asr9k_lpts_oper as meta
return meta._meta_table['PlatformLptspIfib.Nodes.Node.Police.PoliceInfo']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-asr9k-lpts-oper:police'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.police_info is not None:
for child_ref in self.police_info:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_asr9k_lpts_oper as meta
return meta._meta_table['PlatformLptspIfib.Nodes.Node.Police']['meta_info']
class Stats(object):
"""
pl\_pifib stats
.. attribute:: accepted
Deleted\-entry accepted packets counter
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: clear_ts
Statistics clear timestamp
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: dropped
Deleted\-entry dropped packets counter
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: no_stats_mem_err
No statistics memory error
**type**\: int
**range:** 0..18446744073709551615
"""
_prefix = 'asr9k-lpts-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.accepted = None
self.clear_ts = None
self.dropped = None
self.no_stats_mem_err = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-asr9k-lpts-oper:stats'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.accepted is not None:
return True
if self.clear_ts is not None:
return True
if self.dropped is not None:
return True
if self.no_stats_mem_err is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_asr9k_lpts_oper as meta
return meta._meta_table['PlatformLptspIfib.Nodes.Node.Stats']['meta_info']
@property
def _common_path(self):
if self.node_name is None:
raise YPYModelError('Key property node_name is None')
return '/Cisco-IOS-XR-asr9k-lpts-oper:platform-lptsp-ifib/Cisco-IOS-XR-asr9k-lpts-oper:nodes/Cisco-IOS-XR-asr9k-lpts-oper:node[Cisco-IOS-XR-asr9k-lpts-oper:node-name = ' + str(self.node_name) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.node_name is not None:
return True
if self.police is not None and self.police._has_data():
return True
if self.stats is not None and self.stats._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_asr9k_lpts_oper as meta
return meta._meta_table['PlatformLptspIfib.Nodes.Node']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-asr9k-lpts-oper:platform-lptsp-ifib/Cisco-IOS-XR-asr9k-lpts-oper:nodes'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.node is not None:
for child_ref in self.node:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_asr9k_lpts_oper as meta
return meta._meta_table['PlatformLptspIfib.Nodes']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-asr9k-lpts-oper:platform-lptsp-ifib'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.nodes is not None and self.nodes._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_asr9k_lpts_oper as meta
return meta._meta_table['PlatformLptspIfib']['meta_info']
|
py | 1a316d873114c7f4af2d1595d865a50913124a4e | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# pylint: disable=C0302
"""
VirtualBox Validation Kit - Guest Control Tests.
"""
__copyright__ = \
"""
Copyright (C) 2010-2018 Oracle Corporation
This file is part of VirtualBox Open Source Edition (OSE), as
available from http://www.virtualbox.org. This file is free software;
you can redistribute it and/or modify it under the terms of the GNU
General Public License (GPL) as published by the Free Software
Foundation, in version 2 as it comes in the "COPYING" file of the
VirtualBox OSE distribution. VirtualBox OSE is distributed in the
hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
The contents of this file may alternatively be used under the terms
of the Common Development and Distribution License Version 1.0
(CDDL) only, as it comes in the "COPYING.CDDL" file of the
VirtualBox OSE distribution, in which case the provisions of the
CDDL are applicable instead of those of the GPL.
You may elect to license modified versions of this file under the
terms and conditions of either the GPL or the CDDL or both.
"""
__version__ = "$Revision: 72192 $"
# Disable bitching about too many arguments per function.
# pylint: disable=R0913
# Disable bitching about semicolons at the end of lines.
# pylint: disable=W0301
## @todo Convert map() usage to a cleaner alternative Python now offers.
# pylint: disable=W0141
## @todo Convert the context/test classes into named tuples. Not in the mood right now, so
# disabling it.
# pylint: disable=R0903
# Standard Python imports.
from array import array
import errno
import os
import random
import string # pylint: disable=W0402
import struct
import sys
import time
# Only the main script needs to modify the path.
try: __file__
except: __file__ = sys.argv[0];
g_ksValidationKitDir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))));
sys.path.append(g_ksValidationKitDir);
# Validation Kit imports.
from testdriver import reporter;
from testdriver import base;
from testdriver import vbox;
from testdriver import vboxcon;
from testdriver import vboxwrappers;
# Python 3 hacks:
if sys.version_info[0] >= 3:
long = int # pylint: disable=W0622,C0103
class GuestStream(bytearray):
"""
Class for handling a guest process input/output stream.
"""
def appendStream(self, stream, convertTo='<b'):
"""
Appends and converts a byte sequence to this object;
handy for displaying a guest stream.
"""
self.extend(struct.pack(convertTo, stream));
class tdCtxTest(object):
"""
Provides the actual test environment. Should be kept
as generic as possible.
"""
def __init__(self, oSession, oTxsSession, oTestVm): # pylint: disable=W0613
## The desired Main API result.
self.fRc = False;
## IGuest reference.
self.oGuest = oSession.o.console.guest;
# Rest not used (yet).
class tdCtxCreds(object):
"""
Provides credentials to pass to the guest.
"""
def __init__(self, sUser = None, sPassword = None, sDomain = None, oTestVm = None):
# If no user is specified, select the default user and
# password for the given test VM.
if sUser is None:
assert sPassword is None;
assert sDomain is None;
assert oTestVm is not None;
## @todo fix this so all VMs have several usable test users with the same passwords (or none).
sUser = 'test';
sPassword = 'password';
if oTestVm.isWindows():
#sPassword = ''; # stupid config mistake.
sPassword = 'password';
sUser = 'Administrator';
sDomain = '';
self.sUser = sUser;
self.sPassword = sPassword if sPassword is not None else '';
self.sDomain = sDomain if sDomain is not None else '';
class tdTestGuestCtrlBase(object):
"""
Base class for all guest control tests.
Note: This test ASSUMES that working Guest Additions
were installed and running on the guest to be tested.
"""
def __init__(self):
self.oTest = None;
self.oCreds = None;
self.timeoutMS = 30 * 1000; # 30s timeout
## IGuestSession reference or None.
self.oGuestSession = None;
def setEnvironment(self, oSession, oTxsSession, oTestVm):
"""
Sets the test environment required for this test.
"""
self.oTest = tdCtxTest(oSession, oTxsSession, oTestVm);
return self.oTest;
def createSession(self, sName):
"""
Creates (opens) a guest session.
Returns (True, IGuestSession) on success or (False, None) on failure.
"""
if self.oGuestSession is None:
if sName is None:
sName = "<untitled>";
try:
reporter.log('Creating session "%s" ...' % (sName,));
self.oGuestSession = self.oTest.oGuest.createSession(self.oCreds.sUser,
self.oCreds.sPassword,
self.oCreds.sDomain,
sName);
except:
# Just log, don't assume an error here (will be done in the main loop then).
reporter.logXcpt('Creating a guest session "%s" failed; sUser="%s", pw="%s", sDomain="%s":'
% (sName, self.oCreds.sUser, self.oCreds.sPassword, self.oCreds.sDomain));
return (False, None);
try:
reporter.log('Waiting for session "%s" to start within %dms...' % (sName, self.timeoutMS));
fWaitFor = [ vboxcon.GuestSessionWaitForFlag_Start ];
waitResult = self.oGuestSession.waitForArray(fWaitFor, self.timeoutMS);
#
# Be nice to Guest Additions < 4.3: They don't support session handling and
# therefore return WaitFlagNotSupported.
#
if waitResult != vboxcon.GuestSessionWaitResult_Start \
and waitResult != vboxcon.GuestSessionWaitResult_WaitFlagNotSupported:
# Just log, don't assume an error here (will be done in the main loop then).
reporter.log('Session did not start successfully, returned wait result: %d' \
% (waitResult,));
return (False, None);
reporter.log('Session "%s" successfully started' % (sName,));
except:
# Just log, don't assume an error here (will be done in the main loop then).
reporter.logXcpt('Waiting for guest session "%s" (usr=%s;pw=%s;dom=%s) to start failed:'
% (sName, self.oCreds.sUser, self.oCreds.sPassword, self.oCreds.sDomain,));
return (False, None);
else:
reporter.log('Warning: Session already set; this is probably not what you want');
return (True, self.oGuestSession);
def setSession(self, oGuestSession):
"""
Sets the current guest session and closes
an old one if necessary.
"""
if self.oGuestSession is not None:
self.closeSession();
self.oGuestSession = oGuestSession;
return self.oGuestSession;
def closeSession(self):
"""
Closes the guest session.
"""
if self.oGuestSession is not None:
sName = self.oGuestSession.name;
try:
reporter.log('Closing session "%s" ...' % (sName,));
self.oGuestSession.close();
self.oGuestSession = None;
except:
# Just log, don't assume an error here (will be done in the main loop then).
reporter.logXcpt('Closing guest session "%s" failed:' % (sName,));
return False;
return True;
class tdTestCopyFrom(tdTestGuestCtrlBase):
"""
Test for copying files from the guest to the host.
"""
def __init__(self, sSrc = "", sDst = "", sUser = "", sPassword = "", aFlags = None):
tdTestGuestCtrlBase.__init__(self);
self.oCreds = tdCtxCreds(sUser, sPassword, sDomain = "");
self.sSrc = sSrc;
self.sDst = sDst;
self.aFlags = aFlags;
class tdTestCopyTo(tdTestGuestCtrlBase):
"""
Test for copying files from the host to the guest.
"""
def __init__(self, sSrc = "", sDst = "", sUser = "", sPassword = "", aFlags = None):
tdTestGuestCtrlBase.__init__(self);
self.oCreds = tdCtxCreds(sUser, sPassword, sDomain = "");
self.sSrc = sSrc;
self.sDst = sDst;
self.aFlags = aFlags;
class tdTestDirCreate(tdTestGuestCtrlBase):
"""
Test for directoryCreate call.
"""
def __init__(self, sDirectory = "", sUser = "", sPassword = "", fMode = 0, aFlags = None):
tdTestGuestCtrlBase.__init__(self);
self.oCreds = tdCtxCreds(sUser, sPassword, sDomain = "");
self.sDirectory = sDirectory;
self.fMode = fMode;
self.aFlags = aFlags;
class tdTestDirCreateTemp(tdTestGuestCtrlBase):
"""
Test for the directoryCreateTemp call.
"""
def __init__(self, sDirectory = "", sTemplate = "", sUser = "", sPassword = "", fMode = 0, fSecure = False):
tdTestGuestCtrlBase.__init__(self);
self.oCreds = tdCtxCreds(sUser, sPassword, sDomain = "");
self.sDirectory = sDirectory;
self.sTemplate = sTemplate;
self.fMode = fMode;
self.fSecure = fSecure;
class tdTestDirOpen(tdTestGuestCtrlBase):
"""
Test for the directoryOpen call.
"""
def __init__(self, sDirectory = "", sUser = "", sPassword = "",
sFilter = "", aFlags = None):
tdTestGuestCtrlBase.__init__(self);
self.oCreds = tdCtxCreds(sUser, sPassword, sDomain = "");
self.sDirectory = sDirectory;
self.sFilter = sFilter;
self.aFlags = aFlags or [];
class tdTestDirRead(tdTestDirOpen):
"""
Test for the opening, reading and closing a certain directory.
"""
def __init__(self, sDirectory = "", sUser = "", sPassword = "",
sFilter = "", aFlags = None):
tdTestDirOpen.__init__(self, sDirectory, sUser, sPassword, sFilter, aFlags);
class tdTestExec(tdTestGuestCtrlBase):
"""
Specifies exactly one guest control execution test.
Has a default timeout of 5 minutes (for safety).
"""
def __init__(self, sCmd = "", aArgs = None, aEnv = None, \
aFlags = None, timeoutMS = 5 * 60 * 1000, \
sUser = "", sPassword = "", sDomain = "", \
fWaitForExit = True):
tdTestGuestCtrlBase.__init__(self);
self.oCreds = tdCtxCreds(sUser, sPassword, sDomain);
self.sCmd = sCmd;
self.aArgs = aArgs if aArgs is not None else [sCmd,];
self.aEnv = aEnv;
self.aFlags = aFlags or [];
self.timeoutMS = timeoutMS;
self.fWaitForExit = fWaitForExit;
self.uExitStatus = 0;
self.iExitCode = 0;
self.cbStdOut = 0;
self.cbStdErr = 0;
self.sBuf = '';
class tdTestFileExists(tdTestGuestCtrlBase):
"""
Test for the file exists API call (fileExists).
"""
def __init__(self, sFile = "", sUser = "", sPassword = ""):
tdTestGuestCtrlBase.__init__(self);
self.oCreds = tdCtxCreds(sUser, sPassword, sDomain = "");
self.sFile = sFile;
class tdTestFileRemove(tdTestGuestCtrlBase):
"""
Test querying guest file information.
"""
def __init__(self, sFile = "", sUser = "", sPassword = ""):
tdTestGuestCtrlBase.__init__(self);
self.oCreds = tdCtxCreds(sUser, sPassword, sDomain = "");
self.sFile = sFile;
class tdTestFileStat(tdTestGuestCtrlBase):
"""
Test querying guest file information.
"""
def __init__(self, sFile = "", sUser = "", sPassword = "", cbSize = 0, eFileType = 0):
tdTestGuestCtrlBase.__init__(self);
self.oCreds = tdCtxCreds(sUser, sPassword, sDomain = "");
self.sFile = sFile;
self.cbSize = cbSize;
self.eFileType = eFileType;
class tdTestFileIO(tdTestGuestCtrlBase):
"""
Test for the IGuestFile object.
"""
def __init__(self, sFile = "", sUser = "", sPassword = ""):
tdTestGuestCtrlBase.__init__(self);
self.oCreds = tdCtxCreds(sUser, sPassword, sDomain = "");
self.sFile = sFile;
class tdTestFileQuerySize(tdTestGuestCtrlBase):
"""
Test for the file size query API call (fileQuerySize).
"""
def __init__(self, sFile = "", sUser = "", sPassword = ""):
tdTestGuestCtrlBase.__init__(self);
self.oCreds = tdCtxCreds(sUser, sPassword, sDomain = "");
self.sFile = sFile;
class tdTestFileReadWrite(tdTestGuestCtrlBase):
"""
Tests reading from guest files.
"""
def __init__(self, sFile = "", sUser = "", sPassword = "",
sOpenMode = "r", sDisposition = "",
sSharingMode = "",
lCreationMode = 0, cbOffset = 0, cbToReadWrite = 0,
aBuf = None):
tdTestGuestCtrlBase.__init__(self);
self.oCreds = tdCtxCreds(sUser, sPassword, sDomain = "");
self.sFile = sFile;
self.sOpenMode = sOpenMode;
self.sDisposition = sDisposition;
self.sSharingMode = sSharingMode;
self.lCreationMode = lCreationMode;
self.cbOffset = cbOffset;
self.cbToReadWrite = cbToReadWrite;
self.aBuf = aBuf;
def getOpenAction(self):
""" Converts string disposition to open action enum. """
if self.sDisposition == 'oe': return vboxcon.FileOpenAction_OpenExisting;
if self.sDisposition == 'oc': return vboxcon.FileOpenAction_OpenOrCreate;
if self.sDisposition == 'ce': return vboxcon.FileOpenAction_CreateNew;
if self.sDisposition == 'ca': return vboxcon.FileOpenAction_CreateOrReplace;
if self.sDisposition == 'ot': return vboxcon.FileOpenAction_OpenExistingTruncated;
if self.sDisposition == 'oa': return vboxcon.FileOpenAction_AppendOrCreate;
raise base.GenError(self.sDisposition);
def getAccessMode(self):
""" Converts open mode to access mode enum. """
if self.sOpenMode == 'r': return vboxcon.FileOpenMode_ReadOnly;
if self.sOpenMode == 'w': return vboxcon.FileOpenMode_WriteOnly;
if self.sOpenMode == 'w+': return vboxcon.FileOpenMode_ReadWrite;
if self.sOpenMode == 'r+': return vboxcon.FileOpenMode_ReadWrite;
raise base.GenError(self.sOpenMode);
def getSharingMode(self):
""" Converts the sharing mode. """
return vboxcon.FileSharingMode_All;
class tdTestSession(tdTestGuestCtrlBase):
"""
Test the guest session handling.
"""
def __init__(self, sUser = "", sPassword = "", sDomain = "", \
sSessionName = ""):
tdTestGuestCtrlBase.__init__(self);
self.sSessionName = sSessionName;
self.oCreds = tdCtxCreds(sUser, sPassword, sDomain);
def getSessionCount(self, oVBoxMgr):
"""
Helper for returning the number of currently
opened guest sessions of a VM.
"""
if self.oTest.oGuest is None:
return 0;
aoSession = oVBoxMgr.getArray(self.oTest.oGuest, 'sessions')
return len(aoSession);
class tdTestSessionEx(tdTestGuestCtrlBase):
"""
Test the guest session.
"""
def __init__(self, aoSteps = None, enmUser = None):
tdTestGuestCtrlBase.__init__(self);
assert enmUser is None; # For later.
self.enmUser = enmUser;
self.aoSteps = aoSteps if aoSteps is not None else [];
def execute(self, oTstDrv, oVmSession, oTxsSession, oTestVm, sMsgPrefix):
"""
Executes the test.
"""
#
# Create a session.
#
assert self.enmUser is None; # For later.
self.oCreds = tdCtxCreds(oTestVm = oTestVm);
self.setEnvironment(oVmSession, oTxsSession, oTestVm);
reporter.log2('%s: %s steps' % (sMsgPrefix, len(self.aoSteps),));
fRc, oCurSession = self.createSession(sMsgPrefix);
if fRc is True:
#
# Execute the tests.
#
try:
fRc = self.executeSteps(oTstDrv, oCurSession, sMsgPrefix);
except:
reporter.errorXcpt('%s: Unexpected exception executing test steps' % (sMsgPrefix,));
fRc = False;
fRc2 = self.closeSession();
if fRc2 is False:
reporter.error('%s: Session could not be closed' % (sMsgPrefix,));
fRc = False;
else:
reporter.error('%s: Session creation failed' % (sMsgPrefix,));
fRc = False;
return fRc;
def executeSteps(self, oTstDrv, oGstCtrlSession, sMsgPrefix):
"""
Executes just the steps.
Returns True on success, False on test failure.
"""
fRc = True;
for (i, oStep) in enumerate(self.aoSteps):
fRc2 = oStep.execute(oTstDrv, oGstCtrlSession, sMsgPrefix + ', step #%d' % i);
if fRc2 is True:
pass;
elif fRc2 is None:
reporter.log('skipping remaining %d steps' % (len(self.aoSteps) - i - 1,));
break;
else:
fRc = False;
return fRc;
@staticmethod
def executeListTestSessions(aoTests, oTstDrv, oVmSession, oTxsSession, oTestVm, sMsgPrefix):
"""
Works thru a list of tdTestSessionEx object.
"""
fRc = True;
for (i, oCurTest) in enumerate(aoTests):
try:
fRc2 = oCurTest.execute(oTstDrv, oVmSession, oTxsSession, oTestVm, '%s, test %#d' % (sMsgPrefix, i,));
if fRc2 is not True:
fRc = False;
except:
reporter.errorXcpt('Unexpected exception executing test #%d' % (i,));
fRc = False;
return (fRc, oTxsSession);
class tdSessionStepBase(object):
"""
Base class for the guest control session test steps.
"""
def execute(self, oTstDrv, oGstCtrlSession, sMsgPrefix):
"""
Executes the test step.
Returns True on success.
Returns False on failure (must be reported as error).
Returns None if to skip the remaining steps.
"""
reporter.error('%s: Missing execute implementation: %s' % (sMsgPrefix, self,));
_ = oTstDrv;
_ = oGstCtrlSession;
return False;
class tdStepRequireMinimumApiVer(tdSessionStepBase):
"""
Special test step which will cause executeSteps to skip the remaining step
if the VBox API is too old:
"""
def __init__(self, fpMinApiVer):
self.fpMinApiVer = fpMinApiVer;
def execute(self, oTstDrv, oGstCtrlSession, sMsgPrefix):
""" Returns None if API version is too old, otherwise True. """
if oTstDrv.fpApiVer >= self.fpMinApiVer:
return True;
_ = oGstCtrlSession;
_ = sMsgPrefix;
return None; # Special return value. Don't use elsewhere.
#
# Scheduling Environment Changes with the Guest Control Session.
#
class tdStepSessionSetEnv(tdSessionStepBase):
"""
Guest session environment: schedule putenv
"""
def __init__(self, sVar, sValue, hrcExpected = 0):
self.sVar = sVar;
self.sValue = sValue;
self.hrcExpected = hrcExpected;
def execute(self, oTstDrv, oGstCtrlSession, sMsgPrefix):
"""
Executes the step.
Returns True on success, False on test failure.
"""
reporter.log2('tdStepSessionSetEnv: sVar=%s sValue=%s hrcExpected=%#x' % (self.sVar, self.sValue, self.hrcExpected,));
try:
if oTstDrv.fpApiVer >= 5.0:
oGstCtrlSession.environmentScheduleSet(self.sVar, self.sValue);
else:
oGstCtrlSession.environmentSet(self.sVar, self.sValue);
except vbox.ComException as oXcpt:
# Is this an expected failure?
if vbox.ComError.equal(oXcpt, self.hrcExpected):
return True;
reporter.errorXcpt('%s: Expected hrc=%#x (%s) got %#x (%s) instead (setenv %s=%s)'
% (sMsgPrefix, self.hrcExpected, vbox.ComError.toString(self.hrcExpected),
vbox.ComError.getXcptResult(oXcpt),
vbox.ComError.toString(vbox.ComError.getXcptResult(oXcpt)),
self.sVar, self.sValue,));
return False;
except:
reporter.errorXcpt('%s: Unexpected exception in tdStepSessionSetEnv::execute (%s=%s)'
% (sMsgPrefix, self.sVar, self.sValue,));
return False;
# Should we succeed?
if self.hrcExpected != 0:
reporter.error('%s: Expected hrcExpected=%#x, got S_OK (putenv %s=%s)'
% (sMsgPrefix, self.hrcExpected, self.sVar, self.sValue,));
return False;
return True;
class tdStepSessionUnsetEnv(tdSessionStepBase):
"""
Guest session environment: schedule unset.
"""
def __init__(self, sVar, hrcExpected = 0):
self.sVar = sVar;
self.hrcExpected = hrcExpected;
def execute(self, oTstDrv, oGstCtrlSession, sMsgPrefix):
"""
Executes the step.
Returns True on success, False on test failure.
"""
reporter.log2('tdStepSessionUnsetEnv: sVar=%s hrcExpected=%#x' % (self.sVar, self.hrcExpected,));
try:
if oTstDrv.fpApiVer >= 5.0:
oGstCtrlSession.environmentScheduleUnset(self.sVar);
else:
oGstCtrlSession.environmentUnset(self.sVar);
except vbox.ComException as oXcpt:
# Is this an expected failure?
if vbox.ComError.equal(oXcpt, self.hrcExpected):
return True;
reporter.errorXcpt('%s: Expected hrc=%#x (%s) got %#x (%s) instead (unsetenv %s)'
% (sMsgPrefix, self.hrcExpected, vbox.ComError.toString(self.hrcExpected),
vbox.ComError.getXcptResult(oXcpt),
vbox.ComError.toString(vbox.ComError.getXcptResult(oXcpt)),
self.sVar,));
return False;
except:
reporter.errorXcpt('%s: Unexpected exception in tdStepSessionUnsetEnv::execute (%s)'
% (sMsgPrefix, self.sVar,));
return False;
# Should we succeed?
if self.hrcExpected != 0:
reporter.error('%s: Expected hrcExpected=%#x, got S_OK (unsetenv %s)'
% (sMsgPrefix, self.hrcExpected, self.sVar,));
return False;
return True;
class tdStepSessionBulkEnv(tdSessionStepBase):
"""
Guest session environment: Bulk environment changes.
"""
def __init__(self, asEnv = None, hrcExpected = 0):
self.asEnv = asEnv if asEnv is not None else [];
self.hrcExpected = hrcExpected;
def execute(self, oTstDrv, oGstCtrlSession, sMsgPrefix):
"""
Executes the step.
Returns True on success, False on test failure.
"""
reporter.log2('tdStepSessionBulkEnv: asEnv=%s hrcExpected=%#x' % (self.asEnv, self.hrcExpected,));
try:
if oTstDrv.fpApiVer >= 5.0:
oTstDrv.oVBoxMgr.setArray(oGstCtrlSession, 'environmentChanges', self.asEnv);
else:
oTstDrv.oVBoxMgr.setArray(oGstCtrlSession, 'environment', self.asEnv);
except vbox.ComException as oXcpt:
# Is this an expected failure?
if vbox.ComError.equal(oXcpt, self.hrcExpected):
return True;
reporter.errorXcpt('%s: Expected hrc=%#x (%s) got %#x (%s) instead (asEnv=%s)'
% (sMsgPrefix, self.hrcExpected, vbox.ComError.toString(self.hrcExpected),
vbox.ComError.getXcptResult(oXcpt),
vbox.ComError.toString(vbox.ComError.getXcptResult(oXcpt)),
self.asEnv,));
return False;
except:
reporter.errorXcpt('%s: Unexpected exception writing the environmentChanges property (asEnv=%s).'
% (sMsgPrefix, self.asEnv));
return False;
return True;
class tdStepSessionClearEnv(tdStepSessionBulkEnv):
"""
Guest session environment: clears the scheduled environment changes.
"""
def __init__(self):
tdStepSessionBulkEnv.__init__(self);
class tdStepSessionCheckEnv(tdSessionStepBase):
"""
Check the currently scheduled environment changes of a guest control session.
"""
def __init__(self, asEnv = None):
self.asEnv = asEnv if asEnv is not None else [];
def execute(self, oTstDrv, oGstCtrlSession, sMsgPrefix):
"""
Executes the step.
Returns True on success, False on test failure.
"""
reporter.log2('tdStepSessionCheckEnv: asEnv=%s' % (self.asEnv,));
#
# Get the environment change list.
#
try:
if oTstDrv.fpApiVer >= 5.0:
asCurEnv = oTstDrv.oVBoxMgr.getArray(oGstCtrlSession, 'environmentChanges');
else:
asCurEnv = oTstDrv.oVBoxMgr.getArray(oGstCtrlSession, 'environment');
except:
reporter.errorXcpt('%s: Unexpected exception reading the environmentChanges property.' % (sMsgPrefix,));
return False;
#
# Compare it with the expected one by trying to remove each expected value
# and the list anything unexpected.
#
fRc = True;
asCopy = list(asCurEnv); # just in case asCurEnv is immutable
for sExpected in self.asEnv:
try:
asCopy.remove(sExpected);
except:
reporter.error('%s: Expected "%s" to be in the resulting environment' % (sMsgPrefix, sExpected,));
fRc = False;
for sUnexpected in asCopy:
reporter.error('%s: Unexpected "%s" in the resulting environment' % (sMsgPrefix, sUnexpected,));
fRc = False;
if fRc is not True:
reporter.log2('%s: Current environment: %s' % (sMsgPrefix, asCurEnv));
return fRc;
#
# File system object statistics (i.e. stat()).
#
class tdStepStat(tdSessionStepBase):
"""
Stats a file system object.
"""
def __init__(self, sPath, hrcExpected = 0, fFound = True, fFollowLinks = True, enmType = None):
self.sPath = sPath;
self.hrcExpected = hrcExpected;
self.fFound = fFound;
self.fFollowLinks = fFollowLinks;
self.enmType = enmType if enmType is not None else vboxcon.FsObjType_File;
self.cbExactSize = None;
self.cbMinSize = None;
def execute(self, oTstDrv, oGstCtrlSession, sMsgPrefix):
"""
Execute the test step.
"""
reporter.log2('tdStepStat: sPath=%s enmType=%s hrcExpected=%s fFound=%s fFollowLinks=%s'
% (self.sPath, self.enmType, self.hrcExpected, self.fFound, self.fFollowLinks,));
# Don't execute non-file tests on older VBox version.
if oTstDrv.fpApiVer >= 5.0 or self.enmType == vboxcon.FsObjType_File or not self.fFound:
#
# Call the API.
#
try:
if oTstDrv.fpApiVer >= 5.0:
oFsInfo = oGstCtrlSession.fsObjQueryInfo(self.sPath, self.fFollowLinks);
else:
oFsInfo = oGstCtrlSession.fileQueryInfo(self.sPath);
except vbox.ComException as oXcpt:
## @todo: The error reporting in the API just plain sucks! Most of the errors are
## VBOX_E_IPRT_ERROR and there seems to be no way to distinguish between
## non-existing files/path and a lot of other errors. Fix API and test!
if not self.fFound:
return True;
if vbox.ComError.equal(oXcpt, self.hrcExpected): # Is this an expected failure?
return True;
return reporter.errorXcpt('%s: Unexpected exception for exiting path "%s" (enmType=%s, hrcExpected=%s):'
% (sMsgPrefix, self.sPath, self.enmType, self.hrcExpected,));
except:
return reporter.errorXcpt('%s: Unexpected exception in tdStepStat::execute (%s)'
% (sMsgPrefix, self.sPath,));
if oFsInfo is None:
return reporter.error('%s: "%s" got None instead of IFsObjInfo instance!' % (sMsgPrefix, self.sPath,));
#
# Check type expectations.
#
try:
enmType = oFsInfo.type;
except:
return reporter.errorXcpt('%s: Unexpected exception in reading "IFsObjInfo::type"' % (sMsgPrefix,));
if enmType != self.enmType:
return reporter.error('%s: "%s" has type %s, expected %s'
% (sMsgPrefix, self.sPath, enmType, self.enmType));
#
# Check size expectations.
# Note! This is unicode string here on windows, for some reason.
# long long mapping perhaps?
#
try:
cbObject = long(oFsInfo.objectSize);
except:
return reporter.errorXcpt('%s: Unexpected exception in reading "IFsObjInfo::objectSize"'
% (sMsgPrefix,));
if self.cbExactSize is not None \
and cbObject != self.cbExactSize:
return reporter.error('%s: "%s" has size %s bytes, expected %s bytes'
% (sMsgPrefix, self.sPath, cbObject, self.cbExactSize));
if self.cbMinSize is not None \
and cbObject < self.cbMinSize:
return reporter.error('%s: "%s" has size %s bytes, expected as least %s bytes'
% (sMsgPrefix, self.sPath, cbObject, self.cbMinSize));
return True;
class tdStepStatDir(tdStepStat):
""" Checks for an existing directory. """
def __init__(self, sDirPath):
tdStepStat.__init__(self, sPath = sDirPath, enmType = vboxcon.FsObjType_Directory);
class tdStepStatFile(tdStepStat):
""" Checks for an existing file """
def __init__(self, sFilePath):
tdStepStat.__init__(self, sPath = sFilePath, enmType = vboxcon.FsObjType_File);
class tdStepStatFileSize(tdStepStat):
""" Checks for an existing file of a given expected size.. """
def __init__(self, sFilePath, cbExactSize = 0):
tdStepStat.__init__(self, sPath = sFilePath, enmType = vboxcon.FsObjType_File);
self.cbExactSize = cbExactSize;
class tdStepStatFileNotFound(tdStepStat):
""" Checks for an existing directory. """
def __init__(self, sPath):
tdStepStat.__init__(self, sPath = sPath, fFound = False);
class tdStepStatPathNotFound(tdStepStat):
""" Checks for an existing directory. """
def __init__(self, sPath):
tdStepStat.__init__(self, sPath = sPath, fFound = False);
#
#
#
class tdTestSessionFileRefs(tdTestGuestCtrlBase):
"""
Tests session file (IGuestFile) reference counting.
"""
def __init__(self, cRefs = 0):
tdTestGuestCtrlBase.__init__(self);
self.cRefs = cRefs;
class tdTestSessionDirRefs(tdTestGuestCtrlBase):
"""
Tests session directory (IGuestDirectory) reference counting.
"""
def __init__(self, cRefs = 0):
tdTestGuestCtrlBase.__init__(self);
self.cRefs = cRefs;
class tdTestSessionProcRefs(tdTestGuestCtrlBase):
"""
Tests session process (IGuestProcess) reference counting.
"""
def __init__(self, cRefs = 0):
tdTestGuestCtrlBase.__init__(self);
self.cRefs = cRefs;
class tdTestUpdateAdditions(tdTestGuestCtrlBase):
"""
Test updating the Guest Additions inside the guest.
"""
def __init__(self, sSrc = "", aArgs = None, aFlags = None,
sUser = "", sPassword = "", sDomain = ""):
tdTestGuestCtrlBase.__init__(self);
self.oCreds = tdCtxCreds(sUser, sPassword, sDomain);
self.sSrc = sSrc;
self.aArgs = aArgs;
self.aFlags = aFlags;
class tdTestResult(object):
"""
Base class for test results.
"""
def __init__(self, fRc = False):
## The overall test result.
self.fRc = fRc;
class tdTestResultDirRead(tdTestResult):
"""
Test result for reading guest directories.
"""
def __init__(self, fRc = False,
numFiles = 0, numDirs = 0):
tdTestResult.__init__(self, fRc = fRc);
self.numFiles = numFiles;
self.numDirs = numDirs;
class tdTestResultExec(tdTestResult):
"""
Holds a guest process execution test result,
including the exit code, status + aFlags.
"""
def __init__(self, fRc = False, \
uExitStatus = 500, iExitCode = 0, \
sBuf = None, cbBuf = 0, \
cbStdOut = 0, cbStdErr = 0):
tdTestResult.__init__(self);
## The overall test result.
self.fRc = fRc;
## Process exit stuff.
self.uExitStatus = uExitStatus;
self.iExitCode = iExitCode;
## Desired buffer length returned back from stdout/stderr.
self.cbBuf = cbBuf;
## Desired buffer result from stdout/stderr. Use with caution!
self.sBuf = sBuf;
self.cbStdOut = cbStdOut;
self.cbStdErr = cbStdErr;
class tdTestResultFileStat(tdTestResult):
"""
Test result for stat'ing guest files.
"""
def __init__(self, fRc = False,
cbSize = 0, eFileType = 0):
tdTestResult.__init__(self, fRc = fRc);
self.cbSize = cbSize;
self.eFileType = eFileType;
## @todo Add more information.
class tdTestResultFileReadWrite(tdTestResult):
"""
Test result for reading + writing guest directories.
"""
def __init__(self, fRc = False,
cbProcessed = 0, cbOffset = 0, aBuf = None):
tdTestResult.__init__(self, fRc = fRc);
self.cbProcessed = cbProcessed;
self.cbOffset = cbOffset;
self.aBuf = aBuf;
class tdTestResultSession(tdTestResult):
"""
Test result for guest session counts.
"""
def __init__(self, fRc = False, cNumSessions = 0):
tdTestResult.__init__(self, fRc = fRc);
self.cNumSessions = cNumSessions;
class SubTstDrvAddGuestCtrl(base.SubTestDriverBase):
"""
Sub-test driver for executing guest control (VBoxService, IGuest) tests.
"""
def __init__(self, oTstDrv):
base.SubTestDriverBase.__init__(self, 'add-guest-ctrl', oTstDrv);
## @todo base.TestBase.
self.asTestsDef = \
[
'session_basic', 'session_env', 'session_file_ref', 'session_dir_ref', 'session_proc_ref',
'exec_basic', 'exec_errorlevel', 'exec_timeout',
'dir_create', 'dir_create_temp', 'dir_read',
'file_remove', 'file_stat', 'file_read', 'file_write',
'copy_to', 'copy_from',
'update_additions'
];
self.asTests = self.asTestsDef;
def parseOption(self, asArgs, iArg): # pylint: disable=R0912,R0915
if asArgs[iArg] == '--add-guest-ctrl-tests':
iArg += 1;
if asArgs[iArg] == 'all': # Nice for debugging scripts.
self.asTests = self.asTestsDef;
return iArg + 1;
iNext = self.oTstDrv.requireMoreArgs(1, asArgs, iArg);
self.asTests = asArgs[iArg].split(':');
for s in self.asTests:
if s not in self.asTestsDef:
raise base.InvalidOption('The "--add-guest-ctrl-tests" value "%s" is not valid; valid values are: %s' \
% (s, ' '.join(self.asTestsDef)));
return iNext;
return iArg;
def showUsage(self):
base.SubTestDriverBase.showUsage(self);
reporter.log(' --add-guest-ctrl-tests <s1[:s2[:]]>');
reporter.log(' Default: %s (all)' % (':'.join(self.asTestsDef)));
return True;
def testIt(self, oTestVm, oSession, oTxsSession):
"""
Executes the test.
Returns fRc, oTxsSession. The latter may have changed.
"""
reporter.log("Active tests: %s" % (self.asTests,));
fRc = True;
# Do the testing.
reporter.testStart('Session Basics');
fSkip = 'session_basic' not in self.asTests;
if fSkip is False:
fRc, oTxsSession = self.testGuestCtrlSession(oSession, oTxsSession, oTestVm);
reporter.testDone(fSkip);
reporter.testStart('Session Environment');
fSkip = 'session_env' not in self.asTests or fRc is False;
if fSkip is False:
fRc, oTxsSession = self.testGuestCtrlSessionEnvironment(oSession, oTxsSession, oTestVm);
reporter.testDone(fSkip);
reporter.testStart('Session File References');
fSkip = 'session_file_ref' not in self.asTests;
if fSkip is False:
fRc, oTxsSession = self.testGuestCtrlSessionFileRefs(oSession, oTxsSession, oTestVm);
reporter.testDone(fSkip);
## @todo Implement this.
#reporter.testStart('Session Directory References');
#fSkip = 'session_dir_ref' not in self.asTests;
#if fSkip is False:
# fRc, oTxsSession = self.testGuestCtrlSessionDirRefs(oSession, oTxsSession, oTestVm);
#reporter.testDone(fSkip);
reporter.testStart('Session Process References');
fSkip = 'session_proc_ref' not in self.asTests or fRc is False;
if fSkip is False:
fRc, oTxsSession = self.testGuestCtrlSessionProcRefs(oSession, oTxsSession, oTestVm);
reporter.testDone(fSkip);
reporter.testStart('Execution');
fSkip = 'exec_basic' not in self.asTests or fRc is False;
if fSkip is False:
fRc, oTxsSession = self.testGuestCtrlExec(oSession, oTxsSession, oTestVm);
reporter.testDone(fSkip);
reporter.testStart('Execution Error Levels');
fSkip = 'exec_errorlevel' not in self.asTests or fRc is False;
if fSkip is False:
fRc, oTxsSession = self.testGuestCtrlExecErrorLevel(oSession, oTxsSession, oTestVm);
reporter.testDone(fSkip);
reporter.testStart('Execution Timeouts');
fSkip = 'exec_timeout' not in self.asTests or fRc is False;
if fSkip is False:
fRc, oTxsSession = self.testGuestCtrlExecTimeout(oSession, oTxsSession, oTestVm);
reporter.testDone(fSkip);
reporter.testStart('Creating directories');
fSkip = 'dir_create' not in self.asTests or fRc is False;
if fSkip is False:
fRc, oTxsSession = self.testGuestCtrlDirCreate(oSession, oTxsSession, oTestVm);
reporter.testDone(fSkip);
reporter.testStart('Creating temporary directories');
fSkip = 'dir_create_temp' not in self.asTests or fRc is False;
if fSkip is False:
fRc, oTxsSession = self.testGuestCtrlDirCreateTemp(oSession, oTxsSession, oTestVm);
reporter.testDone(fSkip);
reporter.testStart('Reading directories');
fSkip = 'dir_read' not in self.asTests or fRc is False;
if fSkip is False:
fRc, oTxsSession = self.testGuestCtrlDirRead(oSession, oTxsSession, oTestVm);
reporter.testDone(fSkip);
reporter.testStart('Copy to guest');
fSkip = 'copy_to' not in self.asTests or fRc is False;
if fSkip is False:
fRc, oTxsSession = self.testGuestCtrlCopyTo(oSession, oTxsSession, oTestVm);
reporter.testDone(fSkip);
reporter.testStart('Copy from guest');
fSkip = 'copy_from' not in self.asTests or fRc is False;
if fSkip is False:
fRc, oTxsSession = self.testGuestCtrlCopyFrom(oSession, oTxsSession, oTestVm);
reporter.testDone(fSkip);
reporter.testStart('Removing files');
fSkip = 'file_remove' not in self.asTests or fRc is False;
if fSkip is False:
fRc, oTxsSession = self.testGuestCtrlFileRemove(oSession, oTxsSession, oTestVm);
reporter.testDone(fSkip);
reporter.testStart('Querying file information (stat)');
fSkip = 'file_stat' not in self.asTests or fRc is False;
if fSkip is False:
fRc, oTxsSession = self.testGuestCtrlFileStat(oSession, oTxsSession, oTestVm);
reporter.testDone(fSkip);
# FIXME: Failing tests.
# reporter.testStart('File read');
# fSkip = 'file_read' not in self.asTests or fRc is False;
# if fSkip is False:
# fRc, oTxsSession = self.testGuestCtrlFileRead(oSession, oTxsSession, oTestVm);
# reporter.testDone(fSkip);
# reporter.testStart('File write');
# fSkip = 'file_write' not in self.asTests or fRc is False;
# if fSkip is False:
# fRc, oTxsSession = self.testGuestCtrlFileWrite(oSession, oTxsSession, oTestVm);
# reporter.testDone(fSkip);
reporter.testStart('Updating Guest Additions');
fSkip = 'update_additions' not in self.asTests or fRc is False;
# Skip test for updating Guest Additions if we run on a too old (Windows) guest.
fSkip = oTestVm.sKind in ('WindowsNT4', 'Windows2000', 'WindowsXP', 'Windows2003');
if fSkip is False:
fRc, oTxsSession = self.testGuestCtrlUpdateAdditions(oSession, oTxsSession, oTestVm);
reporter.testDone(fSkip);
return (fRc, oTxsSession);
def gctrlCopyFileFrom(self, oGuestSession, sSrc, sDst, aFlags):
"""
Helper function to copy a single file from the guest to the host.
"""
fRc = True; # Be optimistic.
try:
reporter.log2('Copying guest file "%s" to host "%s"' % (sSrc, sDst));
if self.oTstDrv.fpApiVer >= 5.0:
curProgress = oGuestSession.fileCopyFromGuest(sSrc, sDst, aFlags);
else:
curProgress = oGuestSession.copyFrom(sSrc, sDst, aFlags);
if curProgress is not None:
oProgress = vboxwrappers.ProgressWrapper(curProgress, self.oTstDrv.oVBoxMgr, self.oTstDrv, "gctrlFileCopyFrom");
try:
oProgress.wait();
if not oProgress.isSuccess():
oProgress.logResult(fIgnoreErrors = True);
fRc = False;
except:
reporter.logXcpt('Waiting exception for sSrc="%s", sDst="%s":' % (sSrc, sDst));
fRc = False;
else:
reporter.error('No progress object returned');
fRc = False;
except:
# Just log, don't assume an error here (will be done in the main loop then).
reporter.logXcpt('Copy from exception for sSrc="%s", sDst="%s":' % (sSrc, sDst));
fRc = False;
return fRc;
def gctrlCopyFileTo(self, oGuestSession, sSrc, sDst, aFlags):
"""
Helper function to copy a single file from host to the guest.
"""
fRc = True; # Be optimistic.
try:
reporter.log2('Copying host file "%s" to guest "%s" (flags %s)' % (sSrc, sDst, aFlags));
if self.oTstDrv.fpApiVer >= 5.0:
curProgress = oGuestSession.fileCopyToGuest(sSrc, sDst, aFlags);
else:
curProgress = oGuestSession.copyTo(sSrc, sDst, aFlags);
if curProgress is not None:
oProgress = vboxwrappers.ProgressWrapper(curProgress, self.oTstDrv.oVBoxMgr, self.oTstDrv, "gctrlCopyFileTo");
try:
oProgress.wait();
if not oProgress.isSuccess():
oProgress.logResult(fIgnoreErrors = True);
fRc = False;
except:
reporter.logXcpt('Wait exception for sSrc="%s", sDst="%s":' % (sSrc, sDst));
fRc = False;
else:
reporter.error('No progress object returned');
fRc = False;
except:
# Just log, don't assume an error here (will be done in the main loop then).
reporter.logXcpt('Copy to exception for sSrc="%s", sDst="%s":' % (sSrc, sDst));
fRc = False;
return fRc;
def gctrlCreateDir(self, oTest, oRes, oGuestSession):
"""
Helper function to create a guest directory specified in
the current test.
"""
fRc = True; # Be optimistic.
reporter.log2('Creating directory "%s"' % (oTest.sDirectory,));
try:
oGuestSession.directoryCreate(oTest.sDirectory, \
oTest.fMode, oTest.aFlags);
if self.oTstDrv.fpApiVer >= 5.0:
fDirExists = oGuestSession.directoryExists(oTest.sDirectory, False);
else:
fDirExists = oGuestSession.directoryExists(oTest.sDirectory);
if fDirExists is False \
and oRes.fRc is True:
# Directory does not exist but we want it to.
fRc = False;
except:
reporter.logXcpt('Directory create exception for directory "%s":' % (oTest.sDirectory,));
if oRes.fRc is True:
# Just log, don't assume an error here (will be done in the main loop then).
fRc = False;
# Directory creation failed, which was the expected result.
return fRc;
def gctrlReadDir(self, oTest, oRes, oGuestSession, subDir = ''): # pylint: disable=R0914
"""
Helper function to read a guest directory specified in
the current test.
"""
sDir = oTest.sDirectory;
sFilter = oTest.sFilter;
aFlags = oTest.aFlags;
fRc = True; # Be optimistic.
cDirs = 0; # Number of directories read.
cFiles = 0; # Number of files read.
try:
sCurDir = os.path.join(sDir, subDir);
#reporter.log2('Directory="%s", filter="%s", aFlags="%s"' % (sCurDir, sFilter, aFlags));
oCurDir = oGuestSession.directoryOpen(sCurDir, sFilter, aFlags);
while fRc:
try:
oFsObjInfo = oCurDir.read();
if oFsObjInfo.name == "." \
or oFsObjInfo.name == "..":
#reporter.log2('\tSkipping "%s"' % oFsObjInfo.name);
continue; # Skip "." and ".." entries.
if oFsObjInfo.type is vboxcon.FsObjType_Directory:
#reporter.log2('\tDirectory "%s"' % oFsObjInfo.name);
cDirs += 1;
sSubDir = oFsObjInfo.name;
if subDir != "":
sSubDir = os.path.join(subDir, oFsObjInfo.name);
fRc, cSubDirs, cSubFiles = self.gctrlReadDir(oTest, oRes, oGuestSession, sSubDir);
cDirs += cSubDirs;
cFiles += cSubFiles;
elif oFsObjInfo.type is vboxcon.FsObjType_File:
#reporter.log2('\tFile "%s"' % oFsObjInfo.name);
cFiles += 1;
elif oFsObjInfo.type is vboxcon.FsObjType_Symlink:
#reporter.log2('\tSymlink "%s" -- not tested yet' % oFsObjInfo.name);
pass;
else:
reporter.error('\tDirectory "%s" contains invalid directory entry "%s" (type %d)' % \
(sCurDir, oFsObjInfo.name, oFsObjInfo.type));
fRc = False;
except Exception as oXcpt:
# No necessarily an error -- could be VBOX_E_OBJECT_NOT_FOUND. See reference.
if vbox.ComError.equal(oXcpt, vbox.ComError.VBOX_E_OBJECT_NOT_FOUND):
#reporter.log2('\tNo more directory entries for "%s"' % (sCurDir,));
break
# Just log, don't assume an error here (will be done in the main loop then).
reporter.logXcpt('\tDirectory open exception for directory="%s":' % (sCurDir,));
fRc = False;
break;
oCurDir.close();
except:
# Just log, don't assume an error here (will be done in the main loop then).
reporter.logXcpt('\tDirectory open exception for directory="%s":' % (sCurDir,));
fRc = False;
return (fRc, cDirs, cFiles);
def gctrlExecDoTest(self, i, oTest, oRes, oGuestSession):
"""
Wrapper function around gctrlExecute to provide more sanity checking
when needed in actual execution tests.
"""
reporter.log('Testing #%d, cmd="%s" ...' % (i, oTest.sCmd));
fRc = self.gctrlExecute(oTest, oGuestSession);
if fRc is oRes.fRc:
if fRc is True:
# Compare exit status / code on successful process execution.
if oTest.uExitStatus != oRes.uExitStatus \
or oTest.iExitCode != oRes.iExitCode:
reporter.error('Test #%d failed: Got exit status + code %d,%d, expected %d,%d'
% (i, oTest.uExitStatus, oTest.iExitCode, oRes.uExitStatus, oRes.iExitCode));
return False;
if fRc is True:
# Compare test / result buffers on successful process execution.
if oTest.sBuf is not None \
and oRes.sBuf is not None:
if bytes(oTest.sBuf) != bytes(oRes.sBuf):
reporter.error('Test #%d failed: Got buffer\n%s (%d bytes), expected\n%s (%d bytes)'
% (i, map(hex, map(ord, oTest.sBuf)), len(oTest.sBuf), \
map(hex, map(ord, oRes.sBuf)), len(oRes.sBuf)));
return False;
else:
reporter.log2('Test #%d passed: Buffers match (%d bytes)' % (i, len(oRes.sBuf)));
elif oRes.sBuf is not None \
and oRes.sBuf:
reporter.error('Test #%d failed: Got no buffer data, expected\n%s (%dbytes)' %
(i, map(hex, map(ord, oRes.sBuf)), len(oRes.sBuf)));
return False;
elif oRes.cbStdOut > 0 \
and oRes.cbStdOut != oTest.cbStdOut:
reporter.error('Test #%d failed: Got %d stdout data, expected %d'
% (i, oTest.cbStdOut, oRes.cbStdOut));
return False;
else:
reporter.error('Test #%d failed: Got %s, expected %s' % (i, fRc, oRes.fRc));
return False;
return True;
def gctrlExecute(self, oTest, oGuestSession):
"""
Helper function to execute a program on a guest, specified in
the current test.
"""
fRc = True; # Be optimistic.
## @todo Compare execution timeouts!
#tsStart = base.timestampMilli();
reporter.log2('Using session user=%s, sDomain=%s, name=%s, timeout=%d' \
% (oGuestSession.user, oGuestSession.domain, \
oGuestSession.name, oGuestSession.timeout));
reporter.log2('Executing sCmd=%s, aFlags=%s, timeoutMS=%d, aArgs=%s, aEnv=%s' \
% (oTest.sCmd, oTest.aFlags, oTest.timeoutMS, \
oTest.aArgs, oTest.aEnv));
try:
curProc = oGuestSession.processCreate(oTest.sCmd,
oTest.aArgs if self.oTstDrv.fpApiVer >= 5.0 else oTest.aArgs[1:],
oTest.aEnv, oTest.aFlags, oTest.timeoutMS);
if curProc is not None:
reporter.log2('Process start requested, waiting for start (%dms) ...' % (oTest.timeoutMS,));
fWaitFor = [ vboxcon.ProcessWaitForFlag_Start ];
waitResult = curProc.waitForArray(fWaitFor, oTest.timeoutMS);
reporter.log2('Wait result returned: %d, current process status is: %d' % (waitResult, curProc.status));
if curProc.status == vboxcon.ProcessStatus_Started:
fWaitFor = [ vboxcon.ProcessWaitForFlag_Terminate ];
if vboxcon.ProcessCreateFlag_WaitForStdOut in oTest.aFlags:
fWaitFor.append(vboxcon.ProcessWaitForFlag_StdOut);
if vboxcon.ProcessCreateFlag_WaitForStdErr in oTest.aFlags:
fWaitFor.append(vboxcon.ProcessWaitForFlag_StdErr);
## @todo Add vboxcon.ProcessWaitForFlag_StdIn.
reporter.log2('Process (PID %d) started, waiting for termination (%dms), waitFlags=%s ...' \
% (curProc.PID, oTest.timeoutMS, fWaitFor));
while True:
waitResult = curProc.waitForArray(fWaitFor, oTest.timeoutMS);
reporter.log2('Wait returned: %d' % (waitResult,));
try:
# Try stdout.
if waitResult == vboxcon.ProcessWaitResult_StdOut \
or waitResult == vboxcon.ProcessWaitResult_WaitFlagNotSupported:
reporter.log2('Reading stdout ...');
abBuf = curProc.Read(1, 64 * 1024, oTest.timeoutMS);
if abBuf:
reporter.log2('Process (PID %d) got %d bytes of stdout data' % (curProc.PID, len(abBuf)));
oTest.cbStdOut += len(abBuf);
oTest.sBuf = abBuf; # Appending does *not* work atm, so just assign it. No time now.
# Try stderr.
if waitResult == vboxcon.ProcessWaitResult_StdErr \
or waitResult == vboxcon.ProcessWaitResult_WaitFlagNotSupported:
reporter.log2('Reading stderr ...');
abBuf = curProc.Read(2, 64 * 1024, oTest.timeoutMS);
if abBuf:
reporter.log2('Process (PID %d) got %d bytes of stderr data' % (curProc.PID, len(abBuf)));
oTest.cbStdErr += len(abBuf);
oTest.sBuf = abBuf; # Appending does *not* work atm, so just assign it. No time now.
# Use stdin.
if waitResult == vboxcon.ProcessWaitResult_StdIn \
or waitResult == vboxcon.ProcessWaitResult_WaitFlagNotSupported:
pass; #reporter.log2('Process (PID %d) needs stdin data' % (curProc.pid,));
# Termination or error?
if waitResult == vboxcon.ProcessWaitResult_Terminate \
or waitResult == vboxcon.ProcessWaitResult_Error \
or waitResult == vboxcon.ProcessWaitResult_Timeout:
reporter.log2('Process (PID %d) reported terminate/error/timeout: %d, status: %d' \
% (curProc.PID, waitResult, curProc.status));
break;
except:
# Just skip reads which returned nothing.
pass;
reporter.log2('Final process status (PID %d) is: %d' % (curProc.PID, curProc.status));
reporter.log2('Process (PID %d) %d stdout, %d stderr' % (curProc.PID, oTest.cbStdOut, oTest.cbStdErr));
oTest.uExitStatus = curProc.status;
oTest.iExitCode = curProc.exitCode;
reporter.log2('Process (PID %d) has exit code: %d' % (curProc.PID, oTest.iExitCode));
except KeyboardInterrupt:
reporter.error('Process (PID %d) execution interrupted' % (curProc.PID,));
if curProc is not None:
curProc.close();
except:
# Just log, don't assume an error here (will be done in the main loop then).
reporter.logXcpt('Execution exception for command "%s":' % (oTest.sCmd,));
fRc = False;
return fRc;
def testGuestCtrlSessionEnvironment(self, oSession, oTxsSession, oTestVm): # pylint: disable=R0914
"""
Tests the guest session environment changes.
"""
aoTests = [
# Check basic operations.
tdTestSessionEx([ # Initial environment is empty.
tdStepSessionCheckEnv(),
# Check clearing empty env.
tdStepSessionClearEnv(),
tdStepSessionCheckEnv(),
# Check set.
tdStepSessionSetEnv('FOO', 'BAR'),
tdStepSessionCheckEnv(['FOO=BAR',]),
tdStepRequireMinimumApiVer(5.0), # 4.3 can't cope with the remainder.
tdStepSessionClearEnv(),
tdStepSessionCheckEnv(),
# Check unset.
tdStepSessionUnsetEnv('BAR'),
tdStepSessionCheckEnv(['BAR']),
tdStepSessionClearEnv(),
tdStepSessionCheckEnv(),
# Set + unset.
tdStepSessionSetEnv('FOO', 'BAR'),
tdStepSessionCheckEnv(['FOO=BAR',]),
tdStepSessionUnsetEnv('FOO'),
tdStepSessionCheckEnv(['FOO']),
# Bulk environment changes (via attrib) (shall replace existing 'FOO').
tdStepSessionBulkEnv( ['PATH=/bin:/usr/bin', 'TMPDIR=/var/tmp', 'USER=root']),
tdStepSessionCheckEnv(['PATH=/bin:/usr/bin', 'TMPDIR=/var/tmp', 'USER=root']),
]),
tdTestSessionEx([ # Check that setting the same value several times works.
tdStepSessionSetEnv('FOO','BAR'),
tdStepSessionCheckEnv([ 'FOO=BAR',]),
tdStepSessionSetEnv('FOO','BAR2'),
tdStepSessionCheckEnv([ 'FOO=BAR2',]),
tdStepSessionSetEnv('FOO','BAR3'),
tdStepSessionCheckEnv([ 'FOO=BAR3',]),
tdStepRequireMinimumApiVer(5.0), # 4.3 can't cope with the remainder.
# Add a little unsetting to the mix.
tdStepSessionSetEnv('BAR', 'BEAR'),
tdStepSessionCheckEnv([ 'FOO=BAR3', 'BAR=BEAR',]),
tdStepSessionUnsetEnv('FOO'),
tdStepSessionCheckEnv([ 'FOO', 'BAR=BEAR',]),
tdStepSessionSetEnv('FOO','BAR4'),
tdStepSessionCheckEnv([ 'FOO=BAR4', 'BAR=BEAR',]),
# The environment is case sensitive.
tdStepSessionSetEnv('foo','BAR5'),
tdStepSessionCheckEnv([ 'FOO=BAR4', 'BAR=BEAR', 'foo=BAR5']),
tdStepSessionUnsetEnv('foo'),
tdStepSessionCheckEnv([ 'FOO=BAR4', 'BAR=BEAR', 'foo']),
]),
tdTestSessionEx([ # Bulk settings merges stuff, last entry standing.
tdStepSessionBulkEnv(['FOO=bar', 'foo=bar', 'FOO=doofus', 'TMPDIR=/tmp', 'foo=bar2']),
tdStepSessionCheckEnv(['FOO=doofus', 'TMPDIR=/tmp', 'foo=bar2']),
tdStepRequireMinimumApiVer(5.0), # 4.3 is buggy!
tdStepSessionBulkEnv(['2=1+1', 'FOO=doofus2', ]),
tdStepSessionCheckEnv(['2=1+1', 'FOO=doofus2' ]),
]),
# Invalid variable names.
tdTestSessionEx([
tdStepSessionSetEnv('', 'FOO', vbox.ComError.E_INVALIDARG),
tdStepSessionCheckEnv(),
tdStepRequireMinimumApiVer(5.0), # 4.3 is too relaxed checking input!
tdStepSessionSetEnv('=', '===', vbox.ComError.E_INVALIDARG),
tdStepSessionCheckEnv(),
tdStepSessionSetEnv('FOO=', 'BAR', vbox.ComError.E_INVALIDARG),
tdStepSessionCheckEnv(),
tdStepSessionSetEnv('=FOO', 'BAR', vbox.ComError.E_INVALIDARG),
tdStepSessionCheckEnv(),
tdStepRequireMinimumApiVer(5.0), # 4.3 is buggy and too relaxed!
tdStepSessionBulkEnv(['', 'foo=bar'], vbox.ComError.E_INVALIDARG),
tdStepSessionCheckEnv(),
tdStepSessionBulkEnv(['=', 'foo=bar'], vbox.ComError.E_INVALIDARG),
tdStepSessionCheckEnv(),
tdStepSessionBulkEnv(['=FOO', 'foo=bar'], vbox.ComError.E_INVALIDARG),
tdStepSessionCheckEnv(),
]),
# A bit more weird keys/values.
tdTestSessionEx([ tdStepSessionSetEnv('$$$', ''),
tdStepSessionCheckEnv([ '$$$=',]), ]),
tdTestSessionEx([ tdStepSessionSetEnv('$$$', '%%%'),
tdStepSessionCheckEnv([ '$$$=%%%',]),
]),
tdTestSessionEx([ tdStepRequireMinimumApiVer(5.0), # 4.3 is buggy!
tdStepSessionSetEnv(u'ß$%ß&', ''),
tdStepSessionCheckEnv([ u'ß$%ß&=',]),
]),
# Misc stuff.
tdTestSessionEx([ tdStepSessionSetEnv('FOO', ''),
tdStepSessionCheckEnv(['FOO=',]),
]),
tdTestSessionEx([ tdStepSessionSetEnv('FOO', 'BAR'),
tdStepSessionCheckEnv(['FOO=BAR',])
],),
tdTestSessionEx([ tdStepSessionSetEnv('FOO', 'BAR'),
tdStepSessionSetEnv('BAR', 'BAZ'),
tdStepSessionCheckEnv([ 'FOO=BAR', 'BAR=BAZ',]),
]),
];
return tdTestSessionEx.executeListTestSessions(aoTests, self.oTstDrv, oSession, oTxsSession, oTestVm, 'SessionEnv');
def testGuestCtrlSession(self, oSession, oTxsSession, oTestVm): # pylint: disable=R0914
"""
Tests the guest session handling.
"""
if oTestVm.isWindows():
sUser = "Administrator";
else:
sUser = "vbox";
sPassword = "password";
aaTests = [
# Invalid parameters.
[ tdTestSession(),
tdTestResultSession(fRc = False) ],
[ tdTestSession(sUser = ''),
tdTestResultSession(fRc = False) ],
[ tdTestSession(sPassword = 'bar'),
tdTestResultSession(fRc = False) ],
[ tdTestSession(sDomain = 'boo'),
tdTestResultSession(fRc = False) ],
[ tdTestSession(sPassword = 'bar', sDomain = 'boo'),
tdTestResultSession(fRc = False) ],
# User account without a passwort - forbidden.
[ tdTestSession(sUser = sUser),
tdTestResultSession(fRc = False) ],
# Wrong credentials.
# Note: On Guest Additions < 4.3 this always succeeds because these don't
# support creating dedicated sessions. Instead, guest process creation
# then will fail. See note below.
[ tdTestSession(sUser = 'foo', sPassword = 'bar', sDomain = 'boo'),
tdTestResultSession(fRc = False) ],
# Correct credentials.
[ tdTestSession(sUser = sUser, sPassword = sPassword),
tdTestResultSession(fRc = True, cNumSessions = 1) ]
];
# Parameters.
fRc = True;
for (i, aTest) in enumerate(aaTests):
curTest = aTest[0]; # tdTestSession, use an index, later.
curRes = aTest[1]; # tdTestResult
curTest.setEnvironment(oSession, oTxsSession, oTestVm);
reporter.log('Testing #%d, user="%s", sPassword="%s", sDomain="%s" ...' \
% (i, curTest.oCreds.sUser, curTest.oCreds.sPassword, curTest.oCreds.sDomain));
curGuestSessionName = 'testGuestCtrlSession: Test #%d' % (i);
fRc2, curGuestSession = curTest.createSession(curGuestSessionName);
# See note about < 4.3 Guest Additions above.
if curGuestSession is not None \
and curGuestSession.protocolVersion >= 2 \
and fRc2 is not curRes.fRc:
reporter.error('Test #%d failed: Session creation failed: Got %s, expected %s' \
% (i, fRc2, curRes.fRc));
fRc = False;
if fRc2:
# On Guest Additions < 4.3 getSessionCount() always will return 1, so skip the
# check then.
if curGuestSession.protocolVersion >= 2:
curSessionCount = curTest.getSessionCount(self.oTstDrv.oVBoxMgr);
if curSessionCount is not curRes.cNumSessions:
reporter.error('Test #%d failed: Session count does not match: Got %d, expected %d' \
% (i, curSessionCount, curRes.cNumSessions));
fRc = False;
break;
if curGuestSession is not None \
and curGuestSession.name != curGuestSessionName:
reporter.error('Test #%d failed: Session name does not match: Got "%s", expected "%s"' \
% (i, curGuestSession.name, curGuestSessionName));
fRc = False;
break;
fRc2 = curTest.closeSession();
if fRc2 is False:
reporter.error('Test #%d failed: Session could not be closed' % (i,));
fRc = False;
break;
if fRc is False:
return (False, oTxsSession);
# Multiple sessions.
iMaxGuestSessions = 31; # Maximum number of concurrent guest session allowed.
# Actually, this is 32, but we don't test session 0.
multiSession = {};
reporter.log2('Opening multiple guest tsessions at once ...');
for i in range(iMaxGuestSessions + 1):
multiSession[i] = tdTestSession(sUser = sUser, sPassword = sPassword, sSessionName = 'MultiSession #%d' % (i,));
multiSession[i].setEnvironment(oSession, oTxsSession, oTestVm);
curSessionCount = multiSession[i].getSessionCount(self.oTstDrv.oVBoxMgr);
reporter.log2('MultiSession test #%d count is %d' % (i, curSessionCount));
if curSessionCount is not i:
reporter.error('MultiSession count #%d must be %d, got %d' % (i, i, curSessionCount));
fRc = False;
break;
fRc2, _ = multiSession[i].createSession('MultiSession #%d' % (i,));
if fRc2 is not True:
if i < iMaxGuestSessions:
reporter.error('MultiSession #%d test failed' % (i,));
fRc = False;
else:
reporter.log('MultiSession #%d exceeded concurrent guest session count, good' % (i,));
break;
curSessionCount = multiSession[i].getSessionCount(self.oTstDrv.oVBoxMgr);
if curSessionCount is not iMaxGuestSessions:
reporter.error('Final MultiSession count must be %d, got %d'
% (iMaxGuestSessions, curSessionCount));
return (False, oTxsSession);
reporter.log2('Closing MultiSessions ...');
iLastSession = iMaxGuestSessions - 1;
for i in range(iLastSession): # Close all but the last opened session.
fRc2 = multiSession[i].closeSession();
reporter.log2('MultiSession #%d count is %d' % (i, multiSession[i].getSessionCount(self.oTstDrv.oVBoxMgr),));
if fRc2 is False:
reporter.error('Closing MultiSession #%d failed' % (i,));
fRc = False;
break;
curSessionCount = multiSession[i].getSessionCount(self.oTstDrv.oVBoxMgr);
if curSessionCount is not 1:
reporter.error('Final MultiSession count #2 must be 1, got %d' % (curSessionCount,));
fRc = False;
try:
# r=bird: multiSession[0].oGuestSession is None! Why don't you just use 'assert' or 'if' to check
# the functioning of the __testcase__?
# Make sure that accessing the first opened guest session does not work anymore because we just removed (closed) it.
curSessionName = multiSession[0].oGuestSession.name;
reporter.error('Accessing first removed MultiSession should not be possible, got name="%s"' % (curSessionName,));
fRc = False;
except:
reporter.logXcpt('Could not access first removed MultiSession object, good:');
try:
# Try Accessing last opened session which did not get removed yet.
curSessionName = multiSession[iLastSession].oGuestSession.name;
reporter.log('Accessing last standing MultiSession worked, got name="%s"' % (curSessionName,));
multiSession[iLastSession].closeSession();
curSessionCount = multiSession[i].getSessionCount(self.oTstDrv.oVBoxMgr);
if curSessionCount is not 0:
reporter.error('Final MultiSession count #3 must be 0, got %d' % (curSessionCount,));
fRc = False;
except:
reporter.logXcpt('Could not access last standing MultiSession object:');
fRc = False;
## @todo Test session timeouts.
return (fRc, oTxsSession);
def testGuestCtrlSessionFileRefs(self, oSession, oTxsSession, oTestVm): # pylint: disable=R0914
"""
Tests the guest session file reference handling.
"""
if oTestVm.isWindows():
sUser = "Administrator";
sPassword = "password";
sDomain = "";
sFile = "C:\\windows\\system32\\kernel32.dll";
# Number of stale guest files to create.
cStaleFiles = 10;
fRc = True;
try:
oGuest = oSession.o.console.guest;
oGuestSession = oGuest.createSession(sUser, sPassword, sDomain, \
"testGuestCtrlSessionFileRefs");
fWaitFor = [ vboxcon.GuestSessionWaitForFlag_Start ];
waitResult = oGuestSession.waitForArray(fWaitFor, 30 * 1000);
#
# Be nice to Guest Additions < 4.3: They don't support session handling and
# therefore return WaitFlagNotSupported.
#
if waitResult != vboxcon.GuestSessionWaitResult_Start \
and waitResult != vboxcon.GuestSessionWaitResult_WaitFlagNotSupported:
# Just log, don't assume an error here (will be done in the main loop then).
reporter.log('Session did not start successfully, returned wait result: %d' \
% (waitResult));
return (False, oTxsSession);
reporter.log('Session successfully started');
#
# Open guest files and "forget" them (stale entries).
# For them we don't have any references anymore intentionally.
#
reporter.log2('Opening stale files');
for i in range(0, cStaleFiles):
try:
if self.oTstDrv.fpApiVer >= 5.0:
oGuestSession.fileOpen(sFile, vboxcon.FileAccessMode_ReadOnly, vboxcon.FileOpenAction_OpenExisting, 0);
else:
oGuestSession.fileOpen(sFile, "r", "oe", 0);
# Note: Use a timeout in the call above for not letting the stale processes
# hanging around forever. This can happen if the installed Guest Additions
# do not support terminating guest processes.
except:
reporter.errorXcpt('Opening stale file #%d failed:' % (i,));
fRc = False;
break;
if fRc:
cFiles = len(self.oTstDrv.oVBoxMgr.getArray(oGuestSession, 'files'));
if cFiles != cStaleFiles:
reporter.error('Test failed: Got %d stale files, expected %d' % (cFiles, cStaleFiles));
fRc = False;
if fRc:
#
# Open non-stale files and close them again.
#
reporter.log2('Opening non-stale files');
aaFiles = [];
for i in range(0, cStaleFiles):
try:
if self.oTstDrv.fpApiVer >= 5.0:
oCurFile = oGuestSession.fileOpen(sFile, vboxcon.FileAccessMode_ReadOnly,
vboxcon.FileOpenAction_OpenExisting, 0);
else:
oCurFile = oGuestSession.fileOpen(sFile, "r", "oe", 0);
aaFiles.append(oCurFile);
except:
reporter.errorXcpt('Opening non-stale file #%d failed:' % (i,));
fRc = False;
break;
if fRc:
cFiles = len(self.oTstDrv.oVBoxMgr.getArray(oGuestSession, 'files'));
if cFiles != cStaleFiles * 2:
reporter.error('Test failed: Got %d total files, expected %d' % (cFiles, cStaleFiles * 2));
fRc = False;
if fRc:
reporter.log2('Closing all non-stale files again ...');
for i in range(0, cStaleFiles):
try:
aaFiles[i].close();
except:
reporter.errorXcpt('Waiting for non-stale file #%d failed:' % (i,));
fRc = False;
break;
cFiles = len(self.oTstDrv.oVBoxMgr.getArray(oGuestSession, 'files'));
# Here we count the stale files (that is, files we don't have a reference
# anymore for) and the opened and then closed non-stale files (that we still keep
# a reference in aaFiles[] for).
if cFiles != cStaleFiles:
reporter.error('Test failed: Got %d total files, expected %d' \
% (cFiles, cStaleFiles));
fRc = False;
if fRc:
#
# Check if all (referenced) non-stale files now are in "closed" state.
#
reporter.log2('Checking statuses of all non-stale files ...');
for i in range(0, cStaleFiles):
try:
curFilesStatus = aaFiles[i].status;
if curFilesStatus != vboxcon.FileStatus_Closed:
reporter.error('Test failed: Non-stale file #%d has status %d, expected %d' \
% (i, curFilesStatus, vboxcon.FileStatus_Closed));
fRc = False;
except:
reporter.errorXcpt('Checking status of file #%d failed:' % (i,));
fRc = False;
break;
if fRc:
reporter.log2('All non-stale files closed');
cFiles = len(self.oTstDrv.oVBoxMgr.getArray(oGuestSession, 'files'));
reporter.log2('Final guest session file count: %d' % (cFiles,));
# Now try to close the session and see what happens.
reporter.log2('Closing guest session ...');
oGuestSession.close();
except:
reporter.errorXcpt('Testing for stale processes failed:');
fRc = False;
return (fRc, oTxsSession);
#def testGuestCtrlSessionDirRefs(self, oSession, oTxsSession, oTestVm):
# """
# Tests the guest session directory reference handling.
# """
# fRc = True;
# return (fRc, oTxsSession);
def testGuestCtrlSessionProcRefs(self, oSession, oTxsSession, oTestVm): # pylint: disable=R0914
"""
Tests the guest session process reference handling.
"""
if oTestVm.isWindows():
sUser = "Administrator";
sPassword = "password";
sDomain = "";
sCmd = "C:\\windows\\system32\\cmd.exe";
aArgs = [sCmd,];
# Number of stale guest processes to create.
cStaleProcs = 10;
fRc = True;
try:
oGuest = oSession.o.console.guest;
oGuestSession = oGuest.createSession(sUser, sPassword, sDomain, \
"testGuestCtrlSessionProcRefs");
fWaitFor = [ vboxcon.GuestSessionWaitForFlag_Start ];
waitResult = oGuestSession.waitForArray(fWaitFor, 30 * 1000);
#
# Be nice to Guest Additions < 4.3: They don't support session handling and
# therefore return WaitFlagNotSupported.
#
if waitResult != vboxcon.GuestSessionWaitResult_Start \
and waitResult != vboxcon.GuestSessionWaitResult_WaitFlagNotSupported:
# Just log, don't assume an error here (will be done in the main loop then).
reporter.log('Session did not start successfully, returned wait result: %d' \
% (waitResult));
return (False, oTxsSession);
reporter.log('Session successfully started');
#
# Fire off forever-running processes and "forget" them (stale entries).
# For them we don't have any references anymore intentionally.
#
reporter.log2('Starting stale processes');
for i in range(0, cStaleProcs):
try:
oGuestSession.processCreate(sCmd,
aArgs if self.oTstDrv.fpApiVer >= 5.0 else aArgs[1:], [],
[ vboxcon.ProcessCreateFlag_WaitForStdOut ], \
30 * 1000);
# Note: Use a timeout in the call above for not letting the stale processes
# hanging around forever. This can happen if the installed Guest Additions
# do not support terminating guest processes.
except:
reporter.logXcpt('Creating stale process #%d failed:' % (i,));
fRc = False;
break;
if fRc:
cProcs = len(self.oTstDrv.oVBoxMgr.getArray(oGuestSession, 'processes'));
if cProcs != cStaleProcs:
reporter.error('Test failed: Got %d stale processes, expected %d' % (cProcs, cStaleProcs));
fRc = False;
if fRc:
#
# Fire off non-stale processes and wait for termination.
#
if oTestVm.isWindows():
aArgs = [ sCmd, '/C', 'dir', '/S', 'C:\\Windows\\system'];
reporter.log2('Starting non-stale processes');
aaProcs = [];
for i in range(0, cStaleProcs):
try:
oCurProc = oGuestSession.processCreate(sCmd, aArgs if self.oTstDrv.fpApiVer >= 5.0 else aArgs[1:],
[], [], 0); # Infinite timeout.
aaProcs.append(oCurProc);
except:
reporter.logXcpt('Creating non-stale process #%d failed:' % (i,));
fRc = False;
break;
if fRc:
reporter.log2('Waiting for non-stale processes to terminate');
for i in range(0, cStaleProcs):
try:
aaProcs[i].waitForArray([ vboxcon.ProcessWaitForFlag_Terminate ], 30 * 1000);
curProcStatus = aaProcs[i].status;
if aaProcs[i].status != vboxcon.ProcessStatus_TerminatedNormally:
reporter.error('Test failed: Waiting for non-stale processes #%d'
' resulted in status %d, expected %d' \
% (i, curProcStatus, vboxcon.ProcessStatus_TerminatedNormally));
fRc = False;
except:
reporter.logXcpt('Waiting for non-stale process #%d failed:' % (i,));
fRc = False;
break;
cProcs = len(self.oTstDrv.oVBoxMgr.getArray(oGuestSession, 'processes'));
# Here we count the stale processes (that is, processes we don't have a reference
# anymore for) and the started + terminated non-stale processes (that we still keep
# a reference in aaProcs[] for).
if cProcs != (cStaleProcs * 2):
reporter.error('Test failed: Got %d total processes, expected %d' \
% (cProcs, cStaleProcs));
fRc = False;
if fRc:
#
# Check if all (referenced) non-stale processes now are in "terminated" state.
#
for i in range(0, cStaleProcs):
curProcStatus = aaProcs[i].status;
if aaProcs[i].status != vboxcon.ProcessStatus_TerminatedNormally:
reporter.error('Test failed: Non-stale processes #%d has status %d, expected %d' \
% (i, curProcStatus, vboxcon.ProcessStatus_TerminatedNormally));
fRc = False;
if fRc:
reporter.log2('All non-stale processes terminated');
# Fire off blocking processes which are terminated via terminate().
if oTestVm.isWindows():
aArgs = [ sCmd, '/C', 'dir', '/S', 'C:\\Windows'];
reporter.log2('Starting blocking processes');
aaProcs = [];
for i in range(0, cStaleProcs):
try:
oCurProc = oGuestSession.processCreate(sCmd, aArgs if self.oTstDrv.fpApiVer >= 5.0 else aArgs[1:],
[], [], 30 * 1000);
# Note: Use a timeout in the call above for not letting the stale processes
# hanging around forever. This can happen if the installed Guest Additions
# do not support terminating guest processes.
aaProcs.append(oCurProc);
except:
reporter.logXcpt('Creating blocking process failed:');
fRc = False;
break;
if fRc:
reporter.log2('Terminating blocking processes');
for i in range(0, cStaleProcs):
try:
aaProcs[i].terminate();
except: # Termination might not be supported, just skip and log it.
reporter.logXcpt('Termination of blocking process failed, skipped:');
cProcs = len(self.oTstDrv.oVBoxMgr.getArray(oGuestSession, 'processes'));
if cProcs != (cStaleProcs * 2): # Still should be 20 processes because we terminated the 10 newest ones.
reporter.error('Test failed: Got %d total processes, expected %d' % (cProcs, cStaleProcs * 2));
fRc = False;
cProcs = len(self.oTstDrv.oVBoxMgr.getArray(oGuestSession, 'processes'));
reporter.log2('Final guest session processes count: %d' % (cProcs,));
# Now try to close the session and see what happens.
reporter.log2('Closing guest session ...');
oGuestSession.close();
except:
reporter.logXcpt('Testing for stale processes failed:');
fRc = False;
return (fRc, oTxsSession);
def testGuestCtrlExec(self, oSession, oTxsSession, oTestVm): # pylint: disable=R0914,R0915
"""
Tests the basic execution feature.
"""
if oTestVm.isWindows():
sUser = "Administrator";
else:
sUser = "vbox";
sPassword = "password";
if oTestVm.isWindows():
# Outputting stuff.
sImageOut = "C:\\windows\\system32\\cmd.exe";
else:
reporter.error('Implement me!'); ## @todo Implement non-Windows bits.
return (False, oTxsSession);
aaInvalid = [
# Invalid parameters.
[ tdTestExec(sUser = sUser, sPassword = sPassword),
tdTestResultExec(fRc = False) ],
# Non-existent / invalid image.
[ tdTestExec(sCmd = "non-existent", sUser = sUser, sPassword = sPassword),
tdTestResultExec(fRc = False) ],
[ tdTestExec(sCmd = "non-existent2", sUser = sUser, sPassword = sPassword, fWaitForExit = True),
tdTestResultExec(fRc = False) ],
# Use an invalid format string.
[ tdTestExec(sCmd = "%$%%%&", sUser = sUser, sPassword = sPassword),
tdTestResultExec(fRc = False) ],
# More stuff.
[ tdTestExec(sCmd = u"ƒ‰‹ˆ÷‹¸", sUser = sUser, sPassword = sPassword),
tdTestResultExec(fRc = False) ],
[ tdTestExec(sCmd = "???://!!!", sUser = sUser, sPassword = sPassword),
tdTestResultExec(fRc = False) ],
[ tdTestExec(sCmd = "<>!\\", sUser = sUser, sPassword = sPassword),
tdTestResultExec(fRc = False) ]
# Enable as soon as ERROR_BAD_DEVICE is implemented.
#[ tdTestExec(sCmd = "CON", sUser = sUser, sPassword = sPassword),
# tdTestResultExec(fRc = False) ]
];
if oTestVm.isWindows():
sVBoxControl = "C:\\Program Files\\Oracle\\VirtualBox Guest Additions\\VBoxControl.exe";
aaExec = [
# Basic executon.
[ tdTestExec(sCmd = sImageOut, aArgs = [ sImageOut, '/C', 'dir', '/S', 'c:\\windows\\system32' ],
sUser = sUser, sPassword = sPassword),
tdTestResultExec(fRc = True) ],
[ tdTestExec(sCmd = sImageOut, aArgs = [ sImageOut, '/C', 'dir', '/S', 'c:\\windows\\system32\\kernel32.dll' ],
sUser = sUser, sPassword = sPassword),
tdTestResultExec(fRc = True) ],
[ tdTestExec(sCmd = sImageOut, aArgs = [ sImageOut, '/C', 'dir', '/S', 'c:\\windows\\system32\\nonexist.dll' ],
sUser = sUser, sPassword = sPassword),
tdTestResultExec(fRc = True, iExitCode = 1) ],
[ tdTestExec(sCmd = sImageOut, aArgs = [ sImageOut, '/C', 'dir', '/S', '/wrongparam' ],
sUser = sUser, sPassword = sPassword),
tdTestResultExec(fRc = True, iExitCode = 1) ],
# Paths with spaces.
## @todo Get path of installed Guest Additions. Later.
[ tdTestExec(sCmd = sVBoxControl, aArgs = [ sVBoxControl, 'version' ],
sUser = sUser, sPassword = sPassword),
tdTestResultExec(fRc = True) ],
# StdOut.
[ tdTestExec(sCmd = sImageOut, aArgs = [ sImageOut, '/C', 'dir', '/S', 'c:\\windows\\system32' ],
sUser = sUser, sPassword = sPassword),
tdTestResultExec(fRc = True) ],
[ tdTestExec(sCmd = sImageOut, aArgs = [ sImageOut, '/C', 'dir', '/S', 'stdout-non-existing' ],
sUser = sUser, sPassword = sPassword),
tdTestResultExec(fRc = True, iExitCode = 1) ],
# StdErr.
[ tdTestExec(sCmd = sImageOut, aArgs = [ sImageOut, '/C', 'dir', '/S', 'c:\\windows\\system32' ],
sUser = sUser, sPassword = sPassword),
tdTestResultExec(fRc = True) ],
[ tdTestExec(sCmd = sImageOut, aArgs = [ sImageOut, '/C', 'dir', '/S', 'stderr-non-existing' ],
sUser = sUser, sPassword = sPassword),
tdTestResultExec(fRc = True, iExitCode = 1) ],
# StdOut + StdErr.
[ tdTestExec(sCmd = sImageOut, aArgs = [ sImageOut, '/C', 'dir', '/S', 'c:\\windows\\system32' ],
sUser = sUser, sPassword = sPassword),
tdTestResultExec(fRc = True) ],
[ tdTestExec(sCmd = sImageOut, aArgs = [ sImageOut, '/C', 'dir', '/S', 'stdouterr-non-existing' ],
sUser = sUser, sPassword = sPassword),
tdTestResultExec(fRc = True, iExitCode = 1) ]
# FIXME: Failing tests.
# Environment variables.
# [ tdTestExec(sCmd = sImageOut, aArgs = [ sImageOut, '/C', 'set', 'TEST_NONEXIST' ],
# sUser = sUser, sPassword = sPassword),
# tdTestResultExec(fRc = True, iExitCode = 1) ]
# [ tdTestExec(sCmd = sImageOut, aArgs = [ sImageOut, '/C', 'set', 'windir' ],
# sUser = sUser, sPassword = sPassword,
# aFlags = [ vboxcon.ProcessCreateFlag_WaitForStdOut, vboxcon.ProcessCreateFlag_WaitForStdErr ]),
# tdTestResultExec(fRc = True, sBuf = 'windir=C:\\WINDOWS\r\n') ],
# [ tdTestExec(sCmd = sImageOut, aArgs = [ sImageOut, '/C', 'set', 'TEST_FOO' ],
# sUser = sUser, sPassword = sPassword,
# aEnv = [ 'TEST_FOO=BAR' ],
# aFlags = [ vboxcon.ProcessCreateFlag_WaitForStdOut, vboxcon.ProcessCreateFlag_WaitForStdErr ]),
# tdTestResultExec(fRc = True, sBuf = 'TEST_FOO=BAR\r\n') ],
# [ tdTestExec(sCmd = sImageOut, aArgs = [ sImageOut, '/C', 'set', 'TEST_FOO' ],
# sUser = sUser, sPassword = sPassword,
# aEnv = [ 'TEST_FOO=BAR', 'TEST_BAZ=BAR' ],
# aFlags = [ vboxcon.ProcessCreateFlag_WaitForStdOut, vboxcon.ProcessCreateFlag_WaitForStdErr ]),
# tdTestResultExec(fRc = True, sBuf = 'TEST_FOO=BAR\r\n') ]
## @todo Create some files (or get files) we know the output size of to validate output length!
## @todo Add task which gets killed at some random time while letting the guest output something.
];
# Manual test, not executed automatically.
aaManual = [
[ tdTestExec(sCmd = sImageOut, aArgs = [ sImageOut, '/C', 'dir /S C:\\Windows' ],
sUser = sUser, sPassword = sPassword,
aFlags = [ vboxcon.ProcessCreateFlag_WaitForStdOut, vboxcon.ProcessCreateFlag_WaitForStdErr ]),
tdTestResultExec(fRc = True, cbStdOut = 497917) ] ];
else:
reporter.log('No OS-specific tests for non-Windows yet!');
# Build up the final test array for the first batch.
aaTests = [];
aaTests.extend(aaInvalid);
if aaExec is not None:
aaTests.extend(aaExec);
fRc = True;
#
# Single execution stuff. Nice for debugging.
#
fManual = False;
if fManual:
curTest = aaTests[1][0]; # tdTestExec, use an index, later.
curRes = aaTests[1][1]; # tdTestResultExec
curTest.setEnvironment(oSession, oTxsSession, oTestVm);
fRc, curGuestSession = curTest.createSession('testGuestCtrlExec: Single test 1');
if fRc is False:
reporter.error('Single test failed: Could not create session');
else:
fRc = self.gctrlExecDoTest(0, curTest, curRes, curGuestSession);
curTest.closeSession();
curTest = aaTests[2][0]; # tdTestExec, use an index, later.
curRes = aaTests[2][1]; # tdTestResultExec
curTest.setEnvironment(oSession, oTxsSession, oTestVm);
fRc, curGuestSession = curTest.createSession('testGuestCtrlExec: Single test 2');
if fRc is False:
reporter.error('Single test failed: Could not create session');
else:
fRc = self.gctrlExecDoTest(0, curTest, curRes, curGuestSession);
curTest.closeSession();
curTest = aaTests[3][0]; # tdTestExec, use an index, later.
curRes = aaTests[3][1]; # tdTestResultExec
curTest.setEnvironment(oSession, oTxsSession, oTestVm);
fRc, curGuestSession = curTest.createSession('testGuestCtrlExec: Single test 3');
if fRc is False:
reporter.error('Single test failed: Could not create session');
else:
fRc = self.gctrlExecDoTest(0, curTest, curRes, curGuestSession);
curTest.closeSession();
return (fRc, oTxsSession);
else:
aaManual = aaManual; # Workaround for pylint #W0612.
if fRc is False:
return (fRc, oTxsSession);
#
# First batch: One session per guest process.
#
reporter.log('One session per guest process ...');
for (i, aTest) in enumerate(aaTests):
curTest = aTest[0]; # tdTestExec, use an index, later.
curRes = aTest[1]; # tdTestResultExec
curTest.setEnvironment(oSession, oTxsSession, oTestVm);
fRc, curGuestSession = curTest.createSession('testGuestCtrlExec: Test #%d' % (i,));
if fRc is False:
reporter.error('Test #%d failed: Could not create session' % (i,));
break;
fRc = self.gctrlExecDoTest(i, curTest, curRes, curGuestSession);
if fRc is False:
break;
fRc = curTest.closeSession();
if fRc is False:
break;
# No sessions left?
if fRc is True:
aSessions = self.oTstDrv.oVBoxMgr.getArray(oSession.o.console.guest, 'sessions');
cSessions = len(aSessions);
if cSessions is not 0:
reporter.error('Found %d stale session(s), expected 0:' % (cSessions,));
for (i, aSession) in enumerate(aSessions):
reporter.log('\tStale session #%d ("%s")' % (aSession.id, aSession.name));
fRc = False;
if fRc is False:
return (fRc, oTxsSession);
reporter.log('Now using one guest session for all tests ...');
#
# Second batch: One session for *all* guest processes.
#
oGuest = oSession.o.console.guest;
try:
reporter.log('Creating session for all tests ...');
curGuestSession = oGuest.createSession(sUser, sPassword, '', 'testGuestCtrlExec: One session for all tests');
try:
fWaitFor = [ vboxcon.GuestSessionWaitForFlag_Start ];
waitResult = curGuestSession.waitForArray(fWaitFor, 30 * 1000);
if waitResult != vboxcon.GuestSessionWaitResult_Start \
and waitResult != vboxcon.GuestSessionWaitResult_WaitFlagNotSupported:
reporter.error('Session did not start successfully, returned wait result: %d' \
% (waitResult));
return (False, oTxsSession);
reporter.log('Session successfully started');
except:
# Just log, don't assume an error here (will be done in the main loop then).
reporter.logXcpt('Waiting for guest session to start failed:');
return (False, oTxsSession);
# Note: Not waiting for the guest session to start here
# is intentional. This must be handled by the process execution
# call then.
for (i, aTest) in enumerate(aaTests):
curTest = aTest[0]; # tdTestExec, use an index, later.
curRes = aTest[1]; # tdTestResultExec
curTest.setEnvironment(oSession, oTxsSession, oTestVm);
fRc = self.gctrlExecDoTest(i, curTest, curRes, curGuestSession);
if fRc is False:
break;
try:
reporter.log2('Closing guest session ...');
curGuestSession.close();
curGuestSession = None;
except:
# Just log, don't assume an error here (will be done in the main loop then).
reporter.logXcpt('Closing guest session failed:');
fRc = False;
except:
reporter.logXcpt('Could not create one session:');
# No sessions left?
if fRc is True:
cSessions = len(self.oTstDrv.oVBoxMgr.getArray(oSession.o.console.guest, 'sessions'));
if cSessions is not 0:
reporter.error('Found %d stale session(s), expected 0' % (cSessions,));
fRc = False;
return (fRc, oTxsSession);
def testGuestCtrlExecErrorLevel(self, oSession, oTxsSession, oTestVm):
"""
Tests handling of error levels from started guest processes.
"""
if oTestVm.isWindows():
sUser = "Administrator";
else:
sUser = "vbox";
sPassword = "password";
if oTestVm.isWindows():
# Outputting stuff.
sImage = "C:\\windows\\system32\\cmd.exe";
else:
reporter.error('Implement me!'); ## @todo Implement non-Windows bits.
return (False, oTxsSession);
aaTests = [];
if oTestVm.isWindows():
aaTests.extend([
# Simple.
[ tdTestExec(sCmd = sImage, aArgs = [ sImage, '/C', 'wrongcommand' ],
sUser = sUser, sPassword = sPassword),
tdTestResultExec(fRc = True, iExitCode = 1) ],
[ tdTestExec(sCmd = sImage, aArgs = [ sImage, '/C', 'exit', '22' ],
sUser = sUser, sPassword = sPassword),
tdTestResultExec(fRc = True, iExitCode = 22) ],
[ tdTestExec(sCmd = sImage, aArgs = [ sImage, '/C', 'set', 'ERRORLEVEL=234' ],
sUser = sUser, sPassword = sPassword),
tdTestResultExec(fRc = True, iExitCode = 0) ],
[ tdTestExec(sCmd = sImage, aArgs = [ sImage, '/C', 'echo', '%WINDIR%' ],
sUser = sUser, sPassword = sPassword),
tdTestResultExec(fRc = True, iExitCode = 0) ],
[ tdTestExec(sCmd = sImage, aArgs = [ sImage, '/C', 'set', 'ERRORLEVEL=0' ],
sUser = sUser, sPassword = sPassword),
tdTestResultExec(fRc = True, iExitCode = 0) ],
[ tdTestExec(sCmd = sImage, aArgs = [ sImage, '/C', 'dir', 'c:\\windows\\system32' ],
sUser = sUser, sPassword = sPassword),
tdTestResultExec(fRc = True, iExitCode = 0) ],
[ tdTestExec(sCmd = sImage, aArgs = [ sImage, '/C', 'dir', 'c:\\windows\\system32\\kernel32.dll' ],
sUser = sUser, sPassword = sPassword),
tdTestResultExec(fRc = True, iExitCode = 0) ],
[ tdTestExec(sCmd = sImage, aArgs = [ sImage, '/C', 'dir', 'c:\\nonexisting-file' ],
sUser = sUser, sPassword = sPassword),
tdTestResultExec(fRc = True, iExitCode = 1) ],
[ tdTestExec(sCmd = sImage, aArgs = [ sImage, '/C', 'dir', 'c:\\nonexisting-dir\\' ],
sUser = sUser, sPassword = sPassword),
tdTestResultExec(fRc = True, iExitCode = 1) ]
# FIXME: Failing tests.
# With stdout.
# [ tdTestExec(sCmd = sImage, aArgs = [ sImage, '/C', 'dir', 'c:\\windows\\system32' ],
# sUser = sUser, sPassword = sPassword, aFlags = [ vboxcon.ProcessCreateFlag_WaitForStdOut ]),
# tdTestResultExec(fRc = True, iExitCode = 0) ],
# [ tdTestExec(sCmd = sImage, aArgs = [ sImage, '/C', 'dir', 'c:\\nonexisting-file' ],
# sUser = sUser, sPassword = sPassword, aFlags = [ vboxcon.ProcessCreateFlag_WaitForStdOut ]),
# tdTestResultExec(fRc = True, iExitCode = 1) ],
# [ tdTestExec(sCmd = sImage, aArgs = [ sImage, '/C', 'dir', 'c:\\nonexisting-dir\\' ],
# sUser = sUser, sPassword = sPassword, aFlags = [ vboxcon.ProcessCreateFlag_WaitForStdOut ]),
# tdTestResultExec(fRc = True, iExitCode = 1) ],
# With stderr.
# [ tdTestExec(sCmd = sImage, aArgs = [ sImage, '/C', 'dir', 'c:\\windows\\system32' ],
# sUser = sUser, sPassword = sPassword, aFlags = [ vboxcon.ProcessCreateFlag_WaitForStdErr ]),
# tdTestResultExec(fRc = True, iExitCode = 0) ],
# [ tdTestExec(sCmd = sImage, aArgs = [ sImage, '/C', 'dir', 'c:\\nonexisting-file' ],
# sUser = sUser, sPassword = sPassword, aFlags = [ vboxcon.ProcessCreateFlag_WaitForStdErr ]),
# tdTestResultExec(fRc = True, iExitCode = 1) ],
# [ tdTestExec(sCmd = sImage, aArgs = [ sImage, '/C', 'dir', 'c:\\nonexisting-dir\\' ],
# sUser = sUser, sPassword = sPassword, aFlags = [ vboxcon.ProcessCreateFlag_WaitForStdErr ]),
# tdTestResultExec(fRc = True, iExitCode = 1) ],
# With stdout/stderr.
# [ tdTestExec(sCmd = sImage, aArgs = [ sImage, '/C', 'dir', 'c:\\windows\\system32' ],
# sUser = sUser, sPassword = sPassword,
# aFlags = [ vboxcon.ProcessCreateFlag_WaitForStdOut, vboxcon.ProcessCreateFlag_WaitForStdErr ]),
# tdTestResultExec(fRc = True, iExitCode = 0) ],
# [ tdTestExec(sCmd = sImage, aArgs = [ sImage, '/C', 'dir', 'c:\\nonexisting-file' ],
# sUser = sUser, sPassword = sPassword,
# aFlags = [ vboxcon.ProcessCreateFlag_WaitForStdOut, vboxcon.ProcessCreateFlag_WaitForStdErr ]),
# tdTestResultExec(fRc = True, iExitCode = 1) ],
# [ tdTestExec(sCmd = sImage, aArgs = [ sImage, '/C', 'dir', 'c:\\nonexisting-dir\\' ],
# sUser = sUser, sPassword = sPassword,
# aFlags = [ vboxcon.ProcessCreateFlag_WaitForStdOut, vboxcon.ProcessCreateFlag_WaitForStdErr ]),
# tdTestResultExec(fRc = True, iExitCode = 1) ]
## @todo Test stdin!
]);
else:
reporter.log('No OS-specific tests for non-Windows yet!');
fRc = True;
for (i, aTest) in enumerate(aaTests):
curTest = aTest[0]; # tdTestExec, use an index, later.
curRes = aTest[1]; # tdTestResult
curTest.setEnvironment(oSession, oTxsSession, oTestVm);
fRc, curGuestSession = curTest.createSession('testGuestCtrlExecErrorLevel: Test #%d' % (i,));
if fRc is False:
reporter.error('Test #%d failed: Could not create session' % (i,));
break;
fRc = self.gctrlExecDoTest(i, curTest, curRes, curGuestSession);
curTest.closeSession();
if fRc is False:
break;
return (fRc, oTxsSession);
def testGuestCtrlExecTimeout(self, oSession, oTxsSession, oTestVm):
"""
Tests handling of timeouts of started guest processes.
"""
if oTestVm.isWindows():
sUser = "Administrator";
else:
sUser = "vbox";
sPassword = "password";
sDomain = "";
if oTestVm.isWindows():
# Outputting stuff.
sImage = "C:\\windows\\system32\\cmd.exe";
else:
reporter.error('Implement me!'); ## @todo Implement non-Windows bits.
return (False, oTxsSession);
fRc = True;
try:
oGuest = oSession.o.console.guest;
oGuestSession = oGuest.createSession(sUser, sPassword, sDomain, "testGuestCtrlExecTimeout");
oGuestSession.waitForArray([ vboxcon.GuestSessionWaitForFlag_Start ], 30 * 1000);
# Create a process which never terminates and should timeout when
# waiting for termination.
try:
curProc = oGuestSession.processCreate(sImage, [sImage,] if self.oTstDrv.fpApiVer >= 5.0 else [], \
[], [], 30 * 1000);
reporter.log('Waiting for process 1 being started ...');
waitRes = curProc.waitForArray([ vboxcon.ProcessWaitForFlag_Start ], 30 * 1000);
if waitRes != vboxcon.ProcessWaitResult_Start:
reporter.error('Waiting for process 1 to start failed, got status %d');
fRc = False;
if fRc:
reporter.log('Waiting for process 1 to time out within 1ms ...');
waitRes = curProc.waitForArray([ vboxcon.ProcessWaitForFlag_Terminate ], 1);
if waitRes != vboxcon.ProcessWaitResult_Timeout:
reporter.error('Waiting for process 1 did not time out when it should (1)');
fRc = False;
else:
reporter.log('Waiting for process 1 timed out (1), good');
if fRc:
reporter.log('Waiting for process 1 to time out within 5000ms ...');
waitRes = curProc.waitForArray([ vboxcon.ProcessWaitForFlag_Terminate ], 5000);
if waitRes != vboxcon.ProcessWaitResult_Timeout:
reporter.error('Waiting for process 1 did not time out when it should, got wait result %d' % (waitRes,));
fRc = False;
else:
reporter.log('Waiting for process 1 timed out (5000), good');
## @todo Add curProc.terminate() as soon as it's implemented.
except:
reporter.errorXcpt('Exception for process 1:');
fRc = False;
# Create a lengthly running guest process which will be killed by VBoxService on the
# guest because it ran out of execution time (5 seconds).
if fRc:
try:
curProc = oGuestSession.processCreate(sImage, [sImage,] if self.oTstDrv.fpApiVer >= 5.0 else [], \
[], [], 5 * 1000);
reporter.log('Waiting for process 2 being started ...');
waitRes = curProc.waitForArray([ vboxcon.ProcessWaitForFlag_Start ], 30 * 1000);
if waitRes != vboxcon.ProcessWaitResult_Start:
reporter.error('Waiting for process 1 to start failed, got status %d');
fRc = False;
if fRc:
reporter.log('Waiting for process 2 to get killed because it ran out of execution time ...');
waitRes = curProc.waitForArray([ vboxcon.ProcessWaitForFlag_Terminate ], 30 * 1000);
if waitRes != vboxcon.ProcessWaitResult_Timeout:
reporter.error('Waiting for process 2 did not time out when it should, got wait result %d' \
% (waitRes,));
fRc = False;
if fRc:
reporter.log('Waiting for process 2 indicated an error, good');
if curProc.status != vboxcon.ProcessStatus_TimedOutKilled:
reporter.error('Status of process 2 wrong; excepted %d, got %d' \
% (vboxcon.ProcessStatus_TimedOutKilled, curProc.status));
fRc = False;
else:
reporter.log('Status of process 2 correct (%d)' % (vboxcon.ProcessStatus_TimedOutKilled,));
## @todo Add curProc.terminate() as soon as it's implemented.
except:
reporter.errorXcpt('Exception for process 2:');
fRc = False;
oGuestSession.close();
except:
reporter.errorXcpt('Could not handle session:');
fRc = False;
return (fRc, oTxsSession);
def testGuestCtrlDirCreate(self, oSession, oTxsSession, oTestVm):
"""
Tests creation of guest directories.
"""
if oTestVm.isWindows():
sUser = "Administrator";
else:
sUser = "vbox";
sPassword = "password";
if oTestVm.isWindows():
sScratch = "C:\\Temp\\vboxtest\\testGuestCtrlDirCreate\\";
aaTests = [];
if oTestVm.isWindows():
aaTests.extend([
# Invalid stuff.
[ tdTestDirCreate(sUser = sUser, sPassword = sPassword, sDirectory = '' ),
tdTestResult(fRc = False) ],
# More unusual stuff.
[ tdTestDirCreate(sUser = sUser, sPassword = sPassword, sDirectory = '..\\..\\' ),
tdTestResult(fRc = False) ],
[ tdTestDirCreate(sUser = sUser, sPassword = sPassword, sDirectory = '../../' ),
tdTestResult(fRc = False) ],
[ tdTestDirCreate(sUser = sUser, sPassword = sPassword, sDirectory = 'z:\\' ),
tdTestResult(fRc = False) ],
[ tdTestDirCreate(sUser = sUser, sPassword = sPassword, sDirectory = '\\\\uncrulez\\foo' ),
tdTestResult(fRc = False) ],
# Creating directories.
[ tdTestDirCreate(sUser = sUser, sPassword = sPassword, sDirectory = sScratch ),
tdTestResult(fRc = False) ],
[ tdTestDirCreate(sUser = sUser, sPassword = sPassword, sDirectory = os.path.join(sScratch, 'foo\\bar\\baz'),
aFlags = [ vboxcon.DirectoryCreateFlag_Parents ] ),
tdTestResult(fRc = True) ],
[ tdTestDirCreate(sUser = sUser, sPassword = sPassword, sDirectory = os.path.join(sScratch, 'foo\\bar\\baz'),
aFlags = [ vboxcon.DirectoryCreateFlag_Parents ] ),
tdTestResult(fRc = True) ],
# Long (+ random) stuff.
[ tdTestDirCreate(sUser = sUser, sPassword = sPassword,
sDirectory = os.path.join(sScratch,
"".join(random.choice(string.ascii_lowercase) for i in range(32))) ),
tdTestResult(fRc = True) ],
[ tdTestDirCreate(sUser = sUser, sPassword = sPassword,
sDirectory = os.path.join(sScratch,
"".join(random.choice(string.ascii_lowercase) for i in range(128))) ),
tdTestResult(fRc = True) ],
# Following two should fail on Windows (paths too long). Both should timeout.
[ tdTestDirCreate(sUser = sUser, sPassword = sPassword,
sDirectory = os.path.join(sScratch,
"".join(random.choice(string.ascii_lowercase) for i in range(255))) ),
tdTestResult(fRc = False) ],
[ tdTestDirCreate(sUser = sUser, sPassword = sPassword,
sDirectory = os.path.join(sScratch,
"".join(random.choice(string.ascii_lowercase) for i in range(1024)))
),
tdTestResult(fRc = False) ]
]);
else:
reporter.log('No OS-specific tests for non-Windows yet!');
fRc = True;
for (i, aTest) in enumerate(aaTests):
curTest = aTest[0]; # tdTestExec, use an index, later.
curRes = aTest[1]; # tdTestResult
reporter.log('Testing #%d, sDirectory="%s" ...' % (i, curTest.sDirectory));
curTest.setEnvironment(oSession, oTxsSession, oTestVm);
fRc, curGuestSession = curTest.createSession('testGuestCtrlDirCreate: Test #%d' % (i,));
if fRc is False:
reporter.error('Test #%d failed: Could not create session' % (i,));
break;
fRc = self.gctrlCreateDir(curTest, curRes, curGuestSession);
curTest.closeSession();
if fRc is False:
reporter.error('Test #%d failed' % (i,));
fRc = False;
break;
return (fRc, oTxsSession);
def testGuestCtrlDirCreateTemp(self, oSession, oTxsSession, oTestVm): # pylint: disable=R0914
"""
Tests creation of temporary directories.
"""
if oTestVm.isWindows():
sUser = "Administrator";
else:
sUser = "vbox";
sPassword = "password";
# if oTestVm.isWindows():
# sScratch = "C:\\Temp\\vboxtest\\testGuestCtrlDirCreateTemp\\";
aaTests = [];
if oTestVm.isWindows():
aaTests.extend([
# Invalid stuff.
[ tdTestDirCreateTemp(sUser = sUser, sPassword = sPassword, sDirectory = ''),
tdTestResult(fRc = False) ],
[ tdTestDirCreateTemp(sUser = sUser, sPassword = sPassword, sDirectory = 'C:\\Windows',
fMode = 1234),
tdTestResult(fRc = False) ],
[ tdTestDirCreateTemp(sUser = sUser, sPassword = sPassword, sTemplate = '',
sDirectory = 'C:\\Windows', fMode = 1234),
tdTestResult(fRc = False) ],
[ tdTestDirCreateTemp(sUser = sUser, sPassword = sPassword, sTemplate = 'xXx',
sDirectory = 'C:\\Windows', fMode = 0o700),
tdTestResult(fRc = False) ],
[ tdTestDirCreateTemp(sUser = sUser, sPassword = sPassword, sTemplate = 'xxx',
sDirectory = 'C:\\Windows', fMode = 0o700),
tdTestResult(fRc = False) ],
# More unusual stuff.
[ tdTestDirCreateTemp(sUser = sUser, sPassword = sPassword, sTemplate = 'foo',
sDirectory = 'z:\\'),
tdTestResult(fRc = False) ],
[ tdTestDirCreateTemp(sUser = sUser, sPassword = sPassword, sTemplate = 'foo',
sDirectory = '\\\\uncrulez\\foo'),
tdTestResult(fRc = False) ],
# Non-existing stuff.
[ tdTestDirCreateTemp(sUser = sUser, sPassword = sPassword, sTemplate = 'bar',
sDirectory = 'c:\\Apps\\nonexisting\\foo'),
tdTestResult(fRc = False) ],
# FIXME: Failing test. Non Windows path
# [ tdTestDirCreateTemp(sUser = sUser, sPassword = sPassword, sTemplate = 'bar',
# sDirectory = '/tmp/non/existing'),
# tdTestResult(fRc = False) ]
]);
else:
reporter.log('No OS-specific tests for non-Windows yet!');
# FIXME: Failing tests.
# aaTests.extend([
# Non-secure variants.
# [ tdTestDirCreateTemp(sUser = sUser, sPassword = sPassword, sTemplate = 'XXX',
# sDirectory = sScratch),
# tdTestResult(fRc = True) ],
# [ tdTestDirCreateTemp(sUser = sUser, sPassword = sPassword, sTemplate = 'XXX',
# sDirectory = sScratch),
# tdTestResult(fRc = True) ],
# [ tdTestDirCreateTemp(sUser = sUser, sPassword = sPassword, sTemplate = 'X',
# sDirectory = sScratch),
# tdTestResult(fRc = True) ],
# [ tdTestDirCreateTemp(sUser = sUser, sPassword = sPassword, sTemplate = 'X',
# sDirectory = sScratch),
# tdTestResult(fRc = True) ],
# [ tdTestDirCreateTemp(sUser = sUser, sPassword = sPassword, sTemplate = 'XXX',
# sDirectory = sScratch,
# fMode = 0o700),
# tdTestResult(fRc = True) ],
# [ tdTestDirCreateTemp(sUser = sUser, sPassword = sPassword, sTemplate = 'XXX',
# sDirectory = sScratch,
# fMode = 0o700),
# tdTestResult(fRc = True) ],
# [ tdTestDirCreateTemp(sUser = sUser, sPassword = sPassword, sTemplate = 'XXX',
# sDirectory = sScratch,
# fMode = 0o755),
# tdTestResult(fRc = True) ],
# [ tdTestDirCreateTemp(sUser = sUser, sPassword = sPassword, sTemplate = 'XXX',
# sDirectory = sScratch,
# fMode = 0o755),
# tdTestResult(fRc = True) ],
# Secure variants.
# [ tdTestDirCreateTemp(sUser = sUser, sPassword = sPassword, sTemplate = 'XXX',
# sDirectory = sScratch, fSecure = True),
# tdTestResult(fRc = True) ],
# [ tdTestDirCreateTemp(sUser = sUser, sPassword = sPassword, sTemplate = 'XXX',
# sDirectory = sScratch, fSecure = True),
# tdTestResult(fRc = True) ],
# [ tdTestDirCreateTemp(sUser = sUser, sPassword = sPassword, sTemplate = 'XXX',
# sDirectory = sScratch, fSecure = True),
# tdTestResult(fRc = True) ],
# [ tdTestDirCreateTemp(sUser = sUser, sPassword = sPassword, sTemplate = 'XXX',
# sDirectory = sScratch, fSecure = True),
# tdTestResult(fRc = True) ],
# [ tdTestDirCreateTemp(sUser = sUser, sPassword = sPassword, sTemplate = 'XXX',
# sDirectory = sScratch,
# fSecure = True, fMode = 0o700),
# tdTestResult(fRc = True) ],
# [ tdTestDirCreateTemp(sUser = sUser, sPassword = sPassword, sTemplate = 'XXX',
# sDirectory = sScratch,
# fSecure = True, fMode = 0o700),
# tdTestResult(fRc = True) ],
# [ tdTestDirCreateTemp(sUser = sUser, sPassword = sPassword, sTemplate = 'XXX',
# sDirectory = sScratch,
# fSecure = True, fMode = 0o755),
# tdTestResult(fRc = True) ],
# [ tdTestDirCreateTemp(sUser = sUser, sPassword = sPassword, sTemplate = 'XXX',
# sDirectory = sScratch,
# fSecure = True, fMode = 0o755),
# tdTestResult(fRc = True) ],
# Random stuff.
# [ tdTestDirCreateTemp(sUser = sUser, sPassword = sPassword,
# sTemplate = "XXX-".join(random.choice(string.ascii_lowercase) for i in range(32)),
# sDirectory = sScratch,
# fSecure = True, fMode = 0o755),
# tdTestResult(fRc = True) ],
# [ tdTestDirCreateTemp(sUser = sUser, sPassword = sPassword, sTemplate = "".join('X' for i in range(32)),
# sDirectory = sScratch,
# fSecure = True, fMode = 0o755),
# tdTestResult(fRc = True) ],
# [ tdTestDirCreateTemp(sUser = sUser, sPassword = sPassword, sTemplate = "".join('X' for i in range(128)),
# sDirectory = sScratch,
# fSecure = True, fMode = 0o755),
# tdTestResult(fRc = True) ]
# ]);
fRc = True;
for (i, aTest) in enumerate(aaTests):
curTest = aTest[0]; # tdTestExec, use an index, later.
curRes = aTest[1]; # tdTestResult
reporter.log('Testing #%d, sTemplate="%s", fMode=%#o, path="%s", secure="%s" ...' %
(i, curTest.sTemplate, curTest.fMode, curTest.sDirectory, curTest.fSecure));
curTest.setEnvironment(oSession, oTxsSession, oTestVm);
fRc, curGuestSession = curTest.createSession('testGuestCtrlDirCreateTemp: Test #%d' % (i,));
if fRc is False:
reporter.error('Test #%d failed: Could not create session' % (i,));
break;
sDirTemp = "";
try:
sDirTemp = curGuestSession.directoryCreateTemp(curTest.sTemplate, curTest.fMode,
curTest.sDirectory, curTest.fSecure);
except:
if curRes.fRc is True:
reporter.errorXcpt('Creating temp directory "%s" failed:' % (curTest.sDirectory,));
fRc = False;
break;
else:
reporter.logXcpt('Creating temp directory "%s" failed expectedly, skipping:' % (curTest.sDirectory,));
curTest.closeSession();
if sDirTemp != "":
reporter.log2('Temporary directory is: %s' % (sDirTemp,));
if self.oTstDrv.fpApiVer >= 5.0:
fExists = curGuestSession.directoryExists(sDirTemp, False);
else:
fExists = curGuestSession.directoryExists(sDirTemp);
if fExists is False:
reporter.error('Test #%d failed: Temporary directory "%s" does not exists' % (i, sDirTemp));
fRc = False;
break;
return (fRc, oTxsSession);
def testGuestCtrlDirRead(self, oSession, oTxsSession, oTestVm): # pylint: disable=R0914
"""
Tests opening and reading (enumerating) guest directories.
"""
if oTestVm.isWindows():
sUser = "Administrator";
else:
sUser = "vbox";
sPassword = "password";
aaTests = [];
if oTestVm.isWindows():
aaTests.extend([
# Invalid stuff.
[ tdTestDirRead(sUser = sUser, sPassword = sPassword, sDirectory = ''),
tdTestResultDirRead(fRc = False) ],
[ tdTestDirRead(sUser = sUser, sPassword = sPassword, sDirectory = 'C:\\Windows', aFlags = [ 1234 ]),
tdTestResultDirRead(fRc = False) ],
[ tdTestDirRead(sUser = sUser, sPassword = sPassword, sDirectory = 'C:\\Windows', sFilter = '*.foo'),
tdTestResultDirRead(fRc = False) ],
# More unusual stuff.
[ tdTestDirRead(sUser = sUser, sPassword = sPassword, sDirectory = 'z:\\'),
tdTestResultDirRead(fRc = False) ],
[ tdTestDirRead(sUser = sUser, sPassword = sPassword, sDirectory = '\\\\uncrulez\\foo'),
tdTestResultDirRead(fRc = False) ],
# Non-existing stuff.
[ tdTestDirRead(sUser = sUser, sPassword = sPassword, sDirectory = 'c:\\Apps\\nonexisting'),
tdTestResultDirRead(fRc = False) ],
[ tdTestDirRead(sUser = sUser, sPassword = sPassword, sDirectory = 'c:\\Apps\\testDirRead'),
tdTestResultDirRead(fRc = False) ]
]);
if oTestVm.sVmName == 'tst-xppro':
aaTests.extend([
# Reading directories.
[ tdTestDirRead(sUser = sUser, sPassword = sPassword, sDirectory = '../../Windows/Fonts'),
tdTestResultDirRead(fRc = True, numFiles = 191) ],
[ tdTestDirRead(sUser = sUser, sPassword = sPassword, sDirectory = 'c:\\Windows\\Help'),
tdTestResultDirRead(fRc = True, numDirs = 13, numFiles = 569) ],
[ tdTestDirRead(sUser = sUser, sPassword = sPassword, sDirectory = 'c:\\Windows\\Web'),
tdTestResultDirRead(fRc = True, numDirs = 3, numFiles = 55) ]
]);
else:
reporter.log('No OS-specific tests for non-Windows yet!');
fRc = True;
for (i, aTest) in enumerate(aaTests):
curTest = aTest[0]; # tdTestExec, use an index, later.
curRes = aTest[1]; # tdTestResult
reporter.log('Testing #%d, dir="%s" ...' % (i, curTest.sDirectory));
curTest.setEnvironment(oSession, oTxsSession, oTestVm);
fRc, curGuestSession = curTest.createSession('testGuestCtrlDirRead: Test #%d' % (i,));
if fRc is False:
reporter.error('Test #%d failed: Could not create session' % (i,));
break;
(fRc2, cDirs, cFiles) = self.gctrlReadDir(curTest, curRes, curGuestSession);
curTest.closeSession();
reporter.log2('Test #%d: Returned %d directories, %d files total' % (i, cDirs, cFiles));
if fRc2 is curRes.fRc:
if fRc2 is True:
if curRes.numFiles != cFiles:
reporter.error('Test #%d failed: Got %d files, expected %d' % (i, cFiles, curRes.numFiles));
fRc = False;
break;
if curRes.numDirs != cDirs:
reporter.error('Test #%d failed: Got %d directories, expected %d' % (i, cDirs, curRes.numDirs));
fRc = False;
break;
else:
reporter.error('Test #%d failed: Got %s, expected %s' % (i, fRc2, curRes.fRc));
fRc = False;
break;
return (fRc, oTxsSession);
def testGuestCtrlFileRemove(self, oSession, oTxsSession, oTestVm):
"""
Tests removing guest files.
"""
if oTestVm.isWindows():
sUser = "Administrator";
else:
sUser = "vbox";
sPassword = "password";
aaTests = [];
if oTestVm.isWindows():
aaTests.extend([
# Invalid stuff.
[ tdTestFileRemove(sUser = sUser, sPassword = sPassword, sFile = ''),
tdTestResult(fRc = False) ],
[ tdTestFileRemove(sUser = sUser, sPassword = sPassword, sFile = 'C:\\Windows'),
tdTestResult(fRc = False) ],
[ tdTestFileRemove(sUser = sUser, sPassword = sPassword, sFile = 'C:\\Windows'),
tdTestResult(fRc = False) ],
# More unusual stuff.
[ tdTestFileRemove(sUser = sUser, sPassword = sPassword, sFile = 'z:\\'),
tdTestResult(fRc = False) ],
[ tdTestFileRemove(sUser = sUser, sPassword = sPassword, sFile = '\\\\uncrulez\\foo'),
tdTestResult(fRc = False) ],
# Non-existing stuff.
[ tdTestFileRemove(sUser = sUser, sPassword = sPassword, sFile = 'c:\\Apps\\nonexisting'),
tdTestResult(fRc = False) ],
[ tdTestFileRemove(sUser = sUser, sPassword = sPassword, sFile = 'c:\\Apps\\testFileRemove'),
tdTestResult(fRc = False) ],
# Try to delete system files.
[ tdTestFileRemove(sUser = sUser, sPassword = sPassword, sFile = 'c:\\pagefile.sys'),
tdTestResult(fRc = False) ],
[ tdTestFileRemove(sUser = sUser, sPassword = sPassword, sFile = 'c:\\Windows\\kernel32.sys'),
tdTestResult(fRc = False) ]
]);
if oTestVm.sVmName == 'tst-xppro':
aaTests.extend([
# Try delete some unimportant media stuff.
[ tdTestFileRemove(sUser = sUser, sPassword = sPassword, sFile = 'c:\\Windows\\Media\\chimes.wav'),
tdTestResult(fRc = True) ],
# Second attempt should fail.
[ tdTestFileRemove(sUser = sUser, sPassword = sPassword, sFile = 'c:\\Windows\\Media\\chimes.wav'),
tdTestResult(fRc = False) ],
[ tdTestFileRemove(sUser = sUser, sPassword = sPassword, sFile = 'c:\\Windows\\Media\\chord.wav'),
tdTestResult(fRc = True) ],
[ tdTestFileRemove(sUser = sUser, sPassword = sPassword, sFile = 'c:\\Windows\\Media\\chord.wav'),
tdTestResult(fRc = False) ]
]);
else:
reporter.log('No OS-specific tests for non-Windows yet!');
fRc = True;
for (i, aTest) in enumerate(aaTests):
curTest = aTest[0]; # tdTestExec, use an index, later.
curRes = aTest[1]; # tdTestResult
reporter.log('Testing #%d, file="%s" ...' % (i, curTest.sFile));
curTest.setEnvironment(oSession, oTxsSession, oTestVm);
fRc, curGuestSession = curTest.createSession('testGuestCtrlFileRemove: Test #%d' % (i,));
if fRc is False:
reporter.error('Test #%d failed: Could not create session' % (i,));
break;
try:
if self.oTstDrv.fpApiVer >= 5.0:
curGuestSession.fsObjRemove(curTest.sFile);
else:
curGuestSession.fileRemove(curTest.sFile);
except:
if curRes.fRc is True:
reporter.errorXcpt('Removing file "%s" failed:' % (curTest.sFile,));
fRc = False;
break;
else:
reporter.logXcpt('Removing file "%s" failed expectedly, skipping:' % (curTest.sFile,));
curTest.closeSession();
return (fRc, oTxsSession);
def testGuestCtrlFileStat(self, oSession, oTxsSession, oTestVm): # pylint: disable=R0914
"""
Tests querying file information through stat.
"""
# Basic stuff, existing stuff.
aoTests = [
tdTestSessionEx([ tdStepStatDir('.'),
tdStepStatDir('..'),
]),
];
if oTestVm.isWindows():
aoTests += [ tdTestSessionEx([ tdStepStatDir('C:\\Windows'),
tdStepStatDir('C:\\Windows\\System32'),
tdStepStatDir('C:\\Windows\\System32\\'),
tdStepStatDir('C:\\Windows\\System32\\.'),
tdStepStatDir('C:\\Windows\\System32\\.\\'),
tdStepStatDir('C:\\Windows\\System32\\..'),
tdStepStatDir('C:\\Windows\\System32\\..\\'),
tdStepStatDir('C:\\Windows\\System32\\..\\\\'),
tdStepStatDir('C:\\Windows\\System32\\\\..\\\\'),
tdStepStatDir('C:/Windows/System32'),
tdStepStatDir('C:/Windows/System32/'),
tdStepStatDir('c:/winDowS/sYsTeM32/'),
tdStepStatDir('C:/Windows/System32/.'),
tdStepStatDir('C:/Windows/System32/./'),
tdStepStatDir('C:/Windows/System32/..'),
tdStepStatDir('C:/Windows/System32/../'),
tdStepStatDir('C:/Windows/System32/..//'),
tdStepStatDir('C:/Windows/System32//..//'),
tdStepStatFile('C:\\Windows\\System32\\kernel32.dll'),
tdStepStatFile('C:/Windows/System32/kernel32.dll')
]) ];
elif oTestVm.isOS2():
aoTests += [ tdTestSessionEx([ tdStepStatDir('C:\\OS2'),
tdStepStatDir('C:\\OS2\\DLL'),
tdStepStatDir('C:\\OS2\\DLL\\'),
tdStepStatDir('C:/OS2/DLL'),
tdStepStatDir('c:/OS2/DLL'),
tdStepStatDir('c:/OS2/DLL/'),
tdStepStatFile('C:\\CONFIG.SYS'),
tdStepStatFile('C:\\OS2\\DLL\\DOSCALL1.DLL'),
]) ];
else: # generic unix.
aoTests += [ tdTestSessionEx([ tdStepStatDir('/'),
tdStepStatDir('///'),
tdStepStatDir('/usr/bin/.'),
tdStepStatDir('/usr/bin/./'),
tdStepStatDir('/usr/bin/..'),
tdStepStatDir('/usr/bin/../'),
tdStepStatFile('/bin/ls'),
tdStepStatFile('/bin/cp'),
tdStepStatFile('/bin/date'),
]) ];
# None existing stuff.
if oTestVm.isWindows() or oTestVm.isOS2():
aoTests += [ tdTestSessionEx([ tdStepStatFileNotFound('C:\\NoSuchFileOrDirectory', ),
tdStepStatPathNotFound('C:\\NoSuchDirectory\\'),
tdStepStatPathNotFound('C:/NoSuchDirectory/'),
tdStepStatPathNotFound('C:\\NoSuchDirectory\\.'),
tdStepStatPathNotFound('C:/NoSuchDirectory/.'),
tdStepStatPathNotFound('C:\\NoSuchDirectory\\NoSuchFileOrDirectory'),
tdStepStatPathNotFound('C:/NoSuchDirectory/NoSuchFileOrDirectory'),
tdStepStatPathNotFound('C:/NoSuchDirectory/NoSuchFileOrDirectory/'),
tdStepStatPathNotFound('N:\\'), # ASSUMES nothing mounted on N:!
tdStepStatPathNotFound('\\\\NoSuchUncServerName\\NoSuchShare'),
]) ];
else: # generic unix.
aoTests += [ tdTestSessionEx([ tdStepStatFileNotFound('/NoSuchFileOrDirectory', ),
tdStepStatFileNotFound('/bin/NoSuchFileOrDirectory'),
tdStepStatPathNotFound('/NoSuchDirectory/'),
tdStepStatPathNotFound('/NoSuchDirectory/.'),
]) ];
# Invalid parameter check.
aoTests += [ tdTestSessionEx([ tdStepStat('', vbox.ComError.E_INVALIDARG), ]), ];
# Some test VM specific tests.
if oTestVm.sVmName == 'tst-xppro':
aoTests += [ tdTestSessionEx([ tdStepStatFileSize('c:\\Windows\\system32\\kernel32.dll', 926720), ]) ];
#
# Execute the tests.
#
return tdTestSessionEx.executeListTestSessions(aoTests, self.oTstDrv, oSession, oTxsSession, oTestVm, 'FsStat');
def testGuestCtrlFileRead(self, oSession, oTxsSession, oTestVm): # pylint: disable=R0914
"""
Tests reading from guest files.
"""
if oTestVm.isWindows():
sUser = "Administrator";
else:
sUser = "vbox";
sPassword = "password";
if oTxsSession.syncMkDir('${SCRATCH}/testGuestCtrlFileRead') is False:
reporter.error('Could not create scratch directory on guest');
return (False, oTxsSession);
aaTests = [];
aaTests.extend([
# Invalid stuff.
[ tdTestFileReadWrite(sUser = sUser, sPassword = sPassword, cbToReadWrite = 0),
tdTestResultFileReadWrite(fRc = False) ],
[ tdTestFileReadWrite(sUser = sUser, sPassword = sPassword, sFile = ''),
tdTestResultFileReadWrite(fRc = False) ],
[ tdTestFileReadWrite(sUser = sUser, sPassword = sPassword, sFile = 'non-existing.file'),
tdTestResultFileReadWrite(fRc = False) ],
# Wrong open mode.
[ tdTestFileReadWrite(sUser = sUser, sPassword = sPassword, sFile = 'non-existing.file', \
sOpenMode = 'rt', sDisposition = 'oe'),
tdTestResultFileReadWrite(fRc = False) ],
[ tdTestFileReadWrite(sUser = sUser, sPassword = sPassword, sFile = '\\\\uncrulez\\non-existing.file', \
sOpenMode = 'tr', sDisposition = 'oe'),
tdTestResultFileReadWrite(fRc = False) ],
[ tdTestFileReadWrite(sUser = sUser, sPassword = sPassword, sFile = '../../non-existing.file', \
sOpenMode = 'wr', sDisposition = 'oe'),
tdTestResultFileReadWrite(fRc = False) ],
# Wrong disposition.
[ tdTestFileReadWrite(sUser = sUser, sPassword = sPassword, sFile = 'non-existing.file', \
sOpenMode = 'r', sDisposition = 'e'),
tdTestResultFileReadWrite(fRc = False) ],
[ tdTestFileReadWrite(sUser = sUser, sPassword = sPassword, sFile = '\\\\uncrulez\\non-existing.file', \
sOpenMode = 'r', sDisposition = 'o'),
tdTestResultFileReadWrite(fRc = False) ],
[ tdTestFileReadWrite(sUser = sUser, sPassword = sPassword, sFile = '../../non-existing.file', \
sOpenMode = 'r', sDisposition = 'c'),
tdTestResultFileReadWrite(fRc = False) ],
# Opening non-existing file when it should exist.
[ tdTestFileReadWrite(sUser = sUser, sPassword = sPassword, sFile = 'non-existing.file', \
sOpenMode = 'r', sDisposition = 'oe'),
tdTestResultFileReadWrite(fRc = False) ],
[ tdTestFileReadWrite(sUser = sUser, sPassword = sPassword, sFile = '\\\\uncrulez\\non-existing.file', \
sOpenMode = 'r', sDisposition = 'oe'),
tdTestResultFileReadWrite(fRc = False) ],
[ tdTestFileReadWrite(sUser = sUser, sPassword = sPassword, sFile = '../../non-existing.file', \
sOpenMode = 'r', sDisposition = 'oe'),
tdTestResultFileReadWrite(fRc = False) ]
]);
if oTestVm.isWindows():
aaTests.extend([
# Create a file which must not exist (but it hopefully does).
[ tdTestFileReadWrite(sUser = sUser, sPassword = sPassword, sFile = 'C:\\Windows\\System32\\calc.exe', \
sOpenMode = 'w', sDisposition = 'ce'),
tdTestResultFileReadWrite(fRc = False) ],
# Open a file which must exist.
[ tdTestFileReadWrite(sUser = sUser, sPassword = sPassword, sFile = 'C:\\Windows\\System32\\kernel32.dll', \
sOpenMode = 'r', sDisposition = 'oe'),
tdTestResultFileReadWrite(fRc = True) ],
# Try truncating a file which already is opened with a different sharing mode (and thus should fail).
[ tdTestFileReadWrite(sUser = sUser, sPassword = sPassword, sFile = 'C:\\Windows\\System32\\kernel32.dll', \
sOpenMode = 'w', sDisposition = 'ot'),
tdTestResultFileReadWrite(fRc = False) ]
]);
if oTestVm.sKind == "WindowsXP":
aaTests.extend([
# Reading from beginning.
[ tdTestFileReadWrite(sUser = sUser, sPassword = sPassword, sFile = 'C:\\Windows\\System32\\eula.txt', \
sOpenMode = 'r', sDisposition = 'oe', cbToReadWrite = 33),
tdTestResultFileReadWrite(fRc = True, aBuf = 'Microsoft Windows XP Professional', \
cbProcessed = 33, cbOffset = 33) ],
# Reading from offset.
[ tdTestFileReadWrite(sUser = sUser, sPassword = sPassword, sFile = 'C:\\Windows\\System32\\eula.txt', \
sOpenMode = 'r', sDisposition = 'oe', cbOffset = 17782, cbToReadWrite = 26),
tdTestResultFileReadWrite(fRc = True, aBuf = 'LINKS TO THIRD PARTY SITES', \
cbProcessed = 26, cbOffset = 17782 + 26) ]
]);
fRc = True;
for (i, aTest) in enumerate(aaTests):
curTest = aTest[0]; # tdTestFileReadWrite, use an index, later.
curRes = aTest[1]; # tdTestResult
reporter.log('Testing #%d, sFile="%s", cbToReadWrite=%d, sOpenMode="%s", sDisposition="%s", cbOffset=%d ...' % \
(i, curTest.sFile, curTest.cbToReadWrite, curTest.sOpenMode, curTest.sDisposition, curTest.cbOffset));
curTest.setEnvironment(oSession, oTxsSession, oTestVm);
fRc, curGuestSession = curTest.createSession('testGuestCtrlFileRead: Test #%d' % (i,));
if fRc is False:
reporter.error('Test #%d failed: Could not create session' % (i,));
break;
try:
if curTest.cbOffset > 0: # The offset parameter is gone.
if self.oTstDrv.fpApiVer >= 5.0:
curFile = curGuestSession.fileOpenEx(curTest.sFile, curTest.getAccessMode(), curTest.getOpenAction(),
curTest.getSharingMode(), curTest.lCreationMode, []);
curFile.seek(curTest.cbOffset, vboxcon.FileSeekOrigin_Begin);
else:
curFile = curGuestSession.fileOpenEx(curTest.sFile, curTest.sOpenMode, curTest.sDisposition, \
curTest.sSharingMode, curTest.lCreationMode, curTest.cbOffset);
curOffset = long(curFile.offset);
resOffset = long(curTest.cbOffset);
if curOffset != resOffset:
reporter.error('Test #%d failed: Initial offset on open does not match: Got %d, expected %d' \
% (i, curOffset, resOffset));
fRc = False;
else:
if self.oTstDrv.fpApiVer >= 5.0:
curFile = curGuestSession.fileOpen(curTest.sFile, curTest.getAccessMode(), curTest.getOpenAction(),
curTest.lCreationMode);
else:
curFile = curGuestSession.fileOpen(curTest.sFile, curTest.sOpenMode, curTest.sDisposition, \
curTest.lCreationMode);
if fRc \
and curTest.cbToReadWrite > 0:
## @todo Split this up in 64K reads. Later.
## @todo Test timeouts.
aBufRead = curFile.read(curTest.cbToReadWrite, 30 * 1000);
if curRes.cbProcessed > 0 \
and curRes.cbProcessed is not len(aBufRead):
reporter.error('Test #%d failed: Read buffer length does not match: Got %d, expected %d' \
% (i, len(aBufRead), curRes.cbProcessed));
fRc = False;
if fRc:
if curRes.aBuf is not None \
and bytes(curRes.aBuf) != bytes(aBufRead):
reporter.error('Test #%d failed: Got buffer\n%s (%d bytes), expected\n%s (%d bytes)' \
% (i, map(hex, map(ord, aBufRead)), len(aBufRead), \
map(hex, map(ord, curRes.aBuf)), len(curRes.aBuf)));
reporter.error('Test #%d failed: Got buffer\n%s, expected\n%s' \
% (i, aBufRead, curRes.aBuf));
fRc = False;
# Test final offset.
curOffset = long(curFile.offset);
resOffset = long(curRes.cbOffset);
if curOffset != resOffset:
reporter.error('Test #%d failed: Final offset does not match: Got %d, expected %d' \
% (i, curOffset, resOffset));
fRc = False;
curFile.close();
except:
reporter.logXcpt('Opening "%s" failed:' % (curTest.sFile,));
fRc = False;
curTest.closeSession();
if fRc != curRes.fRc:
reporter.error('Test #%d failed: Got %s, expected %s' % (i, fRc, curRes.fRc));
fRc = False;
break;
return (fRc, oTxsSession);
def testGuestCtrlFileWrite(self, oSession, oTxsSession, oTestVm): # pylint: disable=R0914
"""
Tests writing to guest files.
"""
if oTestVm.isWindows():
sUser = "Administrator";
else:
sUser = "vbox";
sPassword = "password";
if oTestVm.isWindows():
sScratch = "C:\\Temp\\vboxtest\\testGuestCtrlFileWrite\\";
if oTxsSession.syncMkDir('${SCRATCH}/testGuestCtrlFileWrite') is False:
reporter.error('Could not create scratch directory on guest');
return (False, oTxsSession);
aaTests = [];
cScratchBuf = 512;
aScratchBuf = array('b', [random.randint(-128, 127) for i in range(cScratchBuf)]);
aaTests.extend([
# Write to a non-existing file.
[ tdTestFileReadWrite(sUser = sUser, sPassword = sPassword, sFile = sScratch + 'testGuestCtrlFileWrite.txt', \
sOpenMode = 'w+', sDisposition = 'ce', cbToReadWrite = cScratchBuf,
aBuf = aScratchBuf),
tdTestResultFileReadWrite(fRc = True, aBuf = aScratchBuf, \
cbProcessed = cScratchBuf, cbOffset = cScratchBuf) ]
]);
aScratchBuf2 = array('b', [random.randint(-128, 127) for i in range(cScratchBuf)]);
aaTests.extend([
# Append the same amount of data to the just created file.
[ tdTestFileReadWrite(sUser = sUser, sPassword = sPassword, sFile = sScratch + 'testGuestCtrlFileWrite.txt', \
sOpenMode = 'w+', sDisposition = 'oa', cbToReadWrite = cScratchBuf,
cbOffset = cScratchBuf, aBuf = aScratchBuf2),
tdTestResultFileReadWrite(fRc = True, aBuf = aScratchBuf2, \
cbProcessed = cScratchBuf, cbOffset = cScratchBuf * 2) ],
]);
fRc = True;
for (i, aTest) in enumerate(aaTests):
curTest = aTest[0]; # tdTestFileReadWrite, use an index, later.
curRes = aTest[1]; # tdTestResult
reporter.log('Testing #%d, sFile="%s", cbToReadWrite=%d, sOpenMode="%s", sDisposition="%s", cbOffset=%d ...' %
(i, curTest.sFile, curTest.cbToReadWrite, curTest.sOpenMode, curTest.sDisposition, curTest.cbOffset));
curTest.setEnvironment(oSession, oTxsSession, oTestVm);
fRc, curGuestSession = curTest.createSession('testGuestCtrlFileWrite: Test #%d' % (i,));
if fRc is False:
reporter.error('Test #%d failed: Could not create session' % (i,));
break;
try:
if curTest.cbOffset > 0: # The offset parameter is gone.
if self.oTstDrv.fpApiVer >= 5.0:
curFile = curGuestSession.fileOpenEx(curTest.sFile, curTest.getAccessMode(), curTest.getOpenAction(),
curTest.getSharingMode(), []);
curFile.seek(curTest.cbOffset, vboxcon.FileSeekOrigin_Begin);
else:
curFile = curGuestSession.fileOpenEx(curTest.sFile, curTest.sOpenMode, curTest.sDisposition,
curTest.sSharingMode, curTest.lCreationMode, curTest.cbOffset);
curOffset = long(curFile.offset);
resOffset = long(curTest.cbOffset);
if curOffset != resOffset:
reporter.error('Test #%d failed: Initial offset on open does not match: Got %d, expected %d'
% (i, curOffset, resOffset));
fRc = False;
else:
if self.oTstDrv.fpApiVer >= 5.0:
curFile = curGuestSession.fileOpen(curTest.sFile, curTest.getAccessMode(), curTest.getOpenAction(),
curTest.lCreationMode);
else:
curFile = curGuestSession.fileOpen(curTest.sFile, curTest.sOpenMode, curTest.sDisposition,
curTest.lCreationMode);
if fRc and curTest.cbToReadWrite > 0:
## @todo Split this up in 64K writes. Later.
## @todo Test timeouts.
cBytesWritten = curFile.write(curTest.aBuf, 30 * 1000);
if curRes.cbProcessed > 0 \
and curRes.cbProcessed != cBytesWritten:
reporter.error('Test #%d failed: Written buffer length does not match: Got %d, expected %d'
% (i, cBytesWritten, curRes.cbProcessed));
fRc = False;
if fRc:
# Verify written content by seeking back to the initial offset and
# re-read & compare the written data.
try:
if self.oTstDrv.fpApiVer >= 5.0:
curFile.seek(-(curTest.cbToReadWrite), vboxcon.FileSeekOrigin_Current);
else:
curFile.seek(-(curTest.cbToReadWrite), vboxcon.FileSeekType_Current);
except:
reporter.logXcpt('Seeking back to initial write position failed:');
fRc = False;
if fRc and long(curFile.offset) != curTest.cbOffset:
reporter.error('Test #%d failed: Initial write position does not match current position, '
'got %d, expected %d' % (i, long(curFile.offset), curTest.cbOffset));
fRc = False;
if fRc:
aBufRead = curFile.read(curTest.cbToReadWrite, 30 * 1000);
if len(aBufRead) != curTest.cbToReadWrite:
reporter.error('Test #%d failed: Got buffer length %d, expected %d'
% (i, len(aBufRead), curTest.cbToReadWrite));
fRc = False;
if fRc \
and curRes.aBuf is not None \
and curRes.aBuf != aBufRead:
reporter.error('Test #%d failed: Got buffer\n%s, expected\n%s'
% (i, aBufRead, curRes.aBuf));
fRc = False;
# Test final offset.
curOffset = long(curFile.offset);
resOffset = long(curRes.cbOffset);
if curOffset != resOffset:
reporter.error('Test #%d failed: Final offset does not match: Got %d, expected %d'
% (i, curOffset, resOffset));
fRc = False;
curFile.close();
except:
reporter.logXcpt('Opening "%s" failed:' % (curTest.sFile,));
fRc = False;
curTest.closeSession();
if fRc != curRes.fRc:
reporter.error('Test #%d failed: Got %s, expected %s' % (i, fRc, curRes.fRc));
fRc = False;
break;
return (fRc, oTxsSession);
def testGuestCtrlCopyTo(self, oSession, oTxsSession, oTestVm):
"""
Tests copying files from host to the guest.
"""
if oTestVm.isWindows():
sUser = "Administrator";
sScratchGst = "C:\\Temp\\vboxtest\\testGuestCtrlCopyTo\\";
sScratchGstNotExist = "C:\\does-not-exist\\";
sScratchGstInvalid = "?*|invalid-name?*|";
else:
sUser = "vbox";
sScratchGst = "/tmp/testGuestCtrlCopyTo/";
sScratchGstNotExist = "/tmp/does-not-exist/";
sScratchGstInvalid = "/";
sPassword = "password";
if oTxsSession.syncMkDir('${SCRATCH}/testGuestCtrlCopyTo') is False:
reporter.error('Could not create scratch directory on guest');
return (False, oTxsSession);
# Some stupid trickery to guess the location of the iso.
sVBoxValidationKitISO = os.path.abspath(os.path.join(os.path.dirname(__file__), '../../VBoxValidationKit.iso'));
if not os.path.isfile(sVBoxValidationKitISO):
sVBoxValidationKitISO = os.path.abspath(os.path.join(os.path.dirname(__file__), '../../VBoxTestSuite.iso'));
if not os.path.isfile(sVBoxValidationKitISO):
sCur = os.getcwd();
for i in range(0, 10):
sVBoxValidationKitISO = os.path.join(sCur, 'validationkit/VBoxValidationKit.iso');
if os.path.isfile(sVBoxValidationKitISO):
break;
sVBoxValidationKitISO = os.path.join(sCur, 'testsuite/VBoxTestSuite.iso');
if os.path.isfile(sVBoxValidationKitISO):
break;
sCur = os.path.abspath(os.path.join(sCur, '..'));
if i is None: pass; # shut up pychecker/pylint.
if os.path.isfile(sVBoxValidationKitISO):
reporter.log('Validation Kit .ISO found at: %s' % (sVBoxValidationKitISO,));
else:
reporter.log('Warning: Validation Kit .ISO not found -- some tests might fail');
aaTests = [];
if oTestVm.isWindows():
aaTests.extend([
# Destination missing.
[ tdTestCopyTo(sUser = sUser, sPassword = sPassword, sSrc = ''),
tdTestResult(fRc = False) ],
[ tdTestCopyTo(sUser = sUser, sPassword = sPassword, sSrc = '/placeholder',
aFlags = [ 80 ] ),
tdTestResult(fRc = False) ],
# Source missing.
[ tdTestCopyTo(sUser = sUser, sPassword = sPassword, sDst = ''),
tdTestResult(fRc = False) ],
[ tdTestCopyTo(sUser = sUser, sPassword = sPassword, sDst = '/placeholder',
aFlags = [ 80 ] ),
tdTestResult(fRc = False) ],
# Testing DirectoryCopyFlag flags.
[ tdTestCopyTo(sUser = sUser, sPassword = sPassword, sSrc = sVBoxValidationKitISO,
sDst = sScratchGstInvalid, aFlags = [ 80 ] ),
tdTestResult(fRc = False) ],
# Testing FileCopyFlag flags.
[ tdTestCopyTo(sUser = sUser, sPassword = sPassword, sSrc = sVBoxValidationKitISO,
sDst = sScratchGstInvalid, aFlags = [ 80 ] ),
tdTestResult(fRc = False) ],
# Nothing to copy (source and/or destination is empty).
[ tdTestCopyTo(sUser = sUser, sPassword = sPassword, sSrc = 'z:\\'),
tdTestResult(fRc = False) ],
[ tdTestCopyTo(sUser = sUser, sPassword = sPassword, sSrc = '\\\\uncrulez\\foo'),
tdTestResult(fRc = False) ],
[ tdTestCopyTo(sUser = sUser, sPassword = sPassword, sSrc = 'non-exist',
sDst = os.path.join(sScratchGst, 'non-exist.dll')),
tdTestResult(fRc = False) ]
]);
#
# Single file handling.
#
if self.oTstDrv.fpApiVer > 5.2:
aaTests.extend([
[ tdTestCopyTo(sUser = sUser, sPassword = sPassword, sSrc = sVBoxValidationKitISO,
sDst = sScratchGstInvalid),
tdTestResult(fRc = False) ],
[ tdTestCopyTo(sUser = sUser, sPassword = sPassword, sSrc = sVBoxValidationKitISO,
sDst = sScratchGstNotExist),
tdTestResult(fRc = False) ],
[ tdTestCopyTo(sUser = sUser, sPassword = sPassword, sSrc = sVBoxValidationKitISO,
sDst = sScratchGstNotExist),
tdTestResult(fRc = False) ],
[ tdTestCopyTo(sUser = sUser, sPassword = sPassword, sSrc = sVBoxValidationKitISO,
sDst = os.path.join(sScratchGstNotExist, 'renamedfile.dll')),
tdTestResult(fRc = False) ],
[ tdTestCopyTo(sUser = sUser, sPassword = sPassword, sSrc = sVBoxValidationKitISO,
sDst = os.path.join(sScratchGst, 'HostGuestAdditions.iso')),
tdTestResult(fRc = True) ],
[ tdTestCopyTo(sUser = sUser, sPassword = sPassword, sSrc = sVBoxValidationKitISO,
sDst = os.path.join(sScratchGst, 'HostGuestAdditions.iso')),
tdTestResult(fRc = True) ],
# Note: Copying files into directories via Main is supported only in versions > 5.2.
# Destination is a directory.
[ tdTestCopyTo(sUser = sUser, sPassword = sPassword, sSrc = sVBoxValidationKitISO,
sDst = sScratchGst),
tdTestResult(fRc = True) ],
# Copy over file again into same directory (overwrite).
[ tdTestCopyTo(sUser = sUser, sPassword = sPassword, sSrc = sVBoxValidationKitISO,
sDst = sScratchGst),
tdTestResult(fRc = True) ]
]);
aaTests.extend([
# Copy the same file over to the guest, but this time store the file into the former
# file's ADS (Alternate Data Stream). Only works on Windows, of course.
[ tdTestCopyTo(sUser = sUser, sPassword = sPassword, sSrc = sVBoxValidationKitISO,
sDst = os.path.join(sScratchGst, 'HostGuestAdditions.iso:ADS-Test')),
tdTestResult(fRc = True) ]
]);
#
# Directory handling.
#
## @todo r=michaln disabled completely, can fill up the guest disk or fail without giving a reason
if self.oTstDrv.fpApiVer > 6.0: # Copying directories via Main is supported only in versions > 5.2.
if self.oTstDrv.sHost == "win":
sSystemRoot = os.getenv('SystemRoot', 'C:\\Windows')
aaTests.extend([
# Copying directories with contain files we don't have read access to.
## @todo r=klaus disabled, because this can fill up the guest disk, making other tests fail,
## additionally it's not really clear if this fails reliably on all Windows versions, even
## the old ones like XP with a "proper" administrator.
#[ tdTestCopyTo(sUser = sUser, sPassword = sPassword, sSrc = os.path.join(sSystemRoot, 'security'),
# sDst = sScratchGst, aFlags = [ vboxcon.DirectoryCopyFlag_CopyIntoExisting ]),
# tdTestResult(fRc = False) ],
# Copying directories with regular files.
[ tdTestCopyTo(sUser = sUser, sPassword = sPassword, sSrc = os.path.join(sSystemRoot, 'Help'),
sDst = sScratchGst, aFlags = [ vboxcon.DirectoryCopyFlag_CopyIntoExisting ]),
tdTestResult(fRc = True) ]
]);
else:
reporter.log('No OS-specific tests for non-Windows yet!');
fRc = True;
for (i, aTest) in enumerate(aaTests):
curTest = aTest[0]; # tdTestExec, use an index, later.
curRes = aTest[1]; # tdTestResult
reporter.log('Testing #%d, sSrc=%s, sDst=%s, aFlags=%s ...' % \
(i, curTest.sSrc, curTest.sDst, curTest.aFlags));
curTest.setEnvironment(oSession, oTxsSession, oTestVm);
fRc, curGuestSession = curTest.createSession('testGuestCtrlCopyTo: Test #%d' % (i,));
if fRc is False:
reporter.error('Test #%d failed: Could not create session' % (i,));
break;
fRc2 = False;
if os.path.isdir(curTest.sSrc):
try:
curProgress = curGuestSession.directoryCopyToGuest(curTest.sSrc, curTest.sDst, curTest.aFlags);
if curProgress is not None:
oProgress = vboxwrappers.ProgressWrapper(curProgress, self.oTstDrv.oVBoxMgr, self.oTstDrv, \
"gctrlDirCopyTo");
try:
oProgress.wait();
if not oProgress.isSuccess():
oProgress.logResult(fIgnoreErrors = True);
fRc = False;
except:
reporter.logXcpt('Waiting exception for sSrc="%s", sDst="%s":' % (curTest.sSrc, curTest.sDst));
fRc2 = False;
else:
reporter.error('No progress object returned');
fRc2 = False;
except:
fRc2 = False;
else:
fRc2 = self.gctrlCopyFileTo(curGuestSession, curTest.sSrc, curTest.sDst, curTest.aFlags);
curTest.closeSession();
if fRc2 is curRes.fRc:
## @todo Verify the copied results (size, checksum?).
pass;
else:
reporter.error('Test #%d failed: Got %s, expected %s' % (i, fRc2, curRes.fRc));
fRc = False;
break;
return (fRc, oTxsSession);
def testGuestCtrlCopyFrom(self, oSession, oTxsSession, oTestVm): # pylint: disable=R0914
"""
Tests copying files from guest to the host.
"""
if oTestVm.isWindows():
sUser = "Administrator";
else:
sUser = "vbox";
sPassword = "password";
sScratchHst = os.path.join(self.oTstDrv.sScratchPath, "testGctrlCopyFrom");
if self.oTstDrv.sHost == "win":
sScratchHstNotExist = sScratchHst + "\\does-not-exist\\";
sScratchHstNotExistChain = sScratchHst + "\\does\\not\\exist\\";
sScratchHstInvalid = "?*|invalid-name?*|";
else:
sScratchHstNotExist = sScratchHst + "/does-not-exist/";
sScratchHstNotExistChain = sScratchHst + "/does/not/exist/";
sScratchHstInvalid = "/";
try:
os.makedirs(sScratchHst);
except OSError as e:
if e.errno != errno.EEXIST:
reporter.error('Failed: Unable to create scratch directory \"%s\"' % (sScratchHst,));
return (False, oTxsSession);
reporter.log('Scratch path is: %s' % (sScratchHst,));
aaTests = [];
if oTestVm.isWindows():
aaTests.extend([
# Destination missing.
[ tdTestCopyFrom(sUser = sUser, sPassword = sPassword, sSrc = ''),
tdTestResult(fRc = False) ],
[ tdTestCopyFrom(sUser = sUser, sPassword = sPassword, sSrc = 'Something',
aFlags = [ 80 ] ),
tdTestResult(fRc = False) ],
# Source missing.
[ tdTestCopyFrom(sUser = sUser, sPassword = sPassword, sDst = ''),
tdTestResult(fRc = False) ],
[ tdTestCopyFrom(sUser = sUser, sPassword = sPassword, sDst = 'Something',
aFlags = [ 80 ] ),
tdTestResult(fRc = False) ],
# Testing DirectoryCopyFlag flags.
[ tdTestCopyFrom(sUser = sUser, sPassword = sPassword, sSrc = 'C:\\Windows\\system32',
sDst = sScratchHstInvalid, aFlags = [ 80 ] ),
tdTestResult(fRc = False) ],
# Testing FileCopyFlag flags.
[ tdTestCopyFrom(sUser = sUser, sPassword = sPassword, sSrc = 'C:\\Windows\\system32\\ole32.dll',
sDst = sScratchHstInvalid, aFlags = [ 80 ] ),
tdTestResult(fRc = False) ],
# Nothing to copy (sDst is empty / unreachable).
[ tdTestCopyFrom(sUser = sUser, sPassword = sPassword, sSrc = 'z:\\'),
tdTestResult(fRc = False) ],
[ tdTestCopyFrom(sUser = sUser, sPassword = sPassword, sSrc = '\\\\uncrulez\\foo'),
tdTestResult(fRc = False) ],
[ tdTestCopyFrom(sUser = sUser, sPassword = sPassword, sSrc = 'non-exist',
sDst = os.path.join(sScratchHst, 'non-exist.dll')),
tdTestResult(fRc = False) ]
]);
#
# Single file handling.
#
if self.oTstDrv.fpApiVer > 5.2:
aaTests.extend([
# Copying single files.
[ tdTestCopyFrom(sUser = sUser, sPassword = sPassword, sSrc = 'C:\\Windows\\system32\\ole32.dll',
sDst = sScratchHstInvalid),
tdTestResult(fRc = False) ],
[ tdTestCopyFrom(sUser = sUser, sPassword = sPassword, sSrc = 'C:\\Windows\\system32\\ole32.dll',
sDst = os.path.join(sScratchHstInvalid, 'renamedfile.dll')),
tdTestResult(fRc = False) ],
# Copy over file using a different destination name.
[ tdTestCopyFrom(sUser = sUser, sPassword = sPassword, sSrc = 'C:\\Windows\\system32\\ole32.dll',
sDst = os.path.join(sScratchHst, 'renamedfile.dll')),
tdTestResult(fRc = True) ],
# Copy over same file (and overwrite existing one).
[ tdTestCopyFrom(sUser = sUser, sPassword = sPassword, sSrc = 'C:\\Windows\\system32\\ole32.dll',
sDst = os.path.join(sScratchHst, 'renamedfile.dll')),
tdTestResult(fRc = True) ],
# Note: Copying files into directories via Main is supported only in versions > 5.2.
# Destination is a directory with a trailing slash (should work).
# See "cp" syntax.
[ tdTestCopyFrom(sUser = sUser, sPassword = sPassword, sSrc = 'C:\\Windows\\system32\\ole32.dll',
sDst = sScratchHst + "/"),
tdTestResult(fRc = True) ],
# Destination is a directory (without a trailing slash, should also work).
# See "cp" syntax.
[ tdTestCopyFrom(sUser = sUser, sPassword = sPassword, sSrc = 'C:\\Windows\\system32\\ole32.dll',
sDst = sScratchHst),
tdTestResult(fRc = True) ],
# Destination is a non-existing directory.
[ tdTestCopyFrom(sUser = sUser, sPassword = sPassword, sSrc = 'C:\\Windows\\system32\\ole32.dll',
sDst = sScratchHstNotExist),
tdTestResult(fRc = False) ]
]);
#
# Directory handling.
#
if self.oTstDrv.fpApiVer > 5.2: # Copying directories via Main is supported only in versions > 5.2.
aaTests.extend([
# Copying entire directories (destination is "<sScratchHst>\Web").
[ tdTestCopyFrom(sUser = sUser, sPassword = sPassword, sSrc = 'C:\\Windows\\Web',
sDst = sScratchHst),
tdTestResult(fRc = True) ],
# Repeat -- this time it should fail, as the destination directory already exists (and
# DirectoryCopyFlag_CopyIntoExisting is not specified).
[ tdTestCopyFrom(sUser = sUser, sPassword = sPassword, sSrc = 'C:\\Windows\\Web',
sDst = sScratchHst + "/"),
tdTestResult(fRc = False) ],
# Next try with the DirectoryCopyFlag_CopyIntoExisting flag being set.
[ tdTestCopyFrom(sUser = sUser, sPassword = sPassword, sSrc = 'C:\\Windows\\Web',
sDst = sScratchHst, aFlags = [ vboxcon.DirectoryCopyFlag_CopyIntoExisting ]),
tdTestResult(fRc = True) ],
# Ditto, with trailing slash.
[ tdTestCopyFrom(sUser = sUser, sPassword = sPassword, sSrc = 'C:\\Windows\\Web',
sDst = sScratchHst + "/", aFlags = [ vboxcon.DirectoryCopyFlag_CopyIntoExisting ]),
tdTestResult(fRc = True) ],
# Copying contents of directories into a non-existing directory chain on the host which fail.
[ tdTestCopyFrom(sUser = sUser, sPassword = sPassword, sSrc = 'C:\\Windows\\Web\\',
sDst = sScratchHstNotExistChain,
aFlags = [ vboxcon.DirectoryCopyFlag_CopyIntoExisting ]),
tdTestResult(fRc = False) ],
# Copying contents of directories into a non-existing directory on the host, which should succeed.
[ tdTestCopyFrom(sUser = sUser, sPassword = sPassword, sSrc = 'C:\\Windows\\Web\\',
sDst = sScratchHstNotExist,
aFlags = [ vboxcon.DirectoryCopyFlag_CopyIntoExisting ]),
tdTestResult(fRc = True) ]
]);
else:
reporter.log('No OS-specific tests for non-Windows yet!');
fRc = True;
for (i, aTest) in enumerate(aaTests):
curTest = aTest[0]; # tdTestExec, use an index, later.
curRes = aTest[1]; # tdTestResult
reporter.log('Testing #%d, sSrc="%s", sDst="%s", aFlags="%s" ...' % \
(i, curTest.sSrc, curTest.sDst, curTest.aFlags));
curTest.setEnvironment(oSession, oTxsSession, oTestVm);
fRc, curGuestSession = curTest.createSession('testGuestCtrlCopyFrom: Test #%d' % (i,));
if fRc is False:
reporter.error('Test #%d failed: Could not create session' % (i,));
break;
fRc2 = True;
try:
if self.oTstDrv.fpApiVer >= 5.0:
oFsInfo = curGuestSession.fsObjQueryInfo(curTest.sSrc, True); # fFollowSymlinks
else:
oFsInfo = curGuestSession.fileQueryInfo(curTest.sSrc);
if oFsInfo.type is vboxcon.FsObjType_Directory:
curProgress = curGuestSession.directoryCopyFromGuest(curTest.sSrc, curTest.sDst, curTest.aFlags);
if curProgress is not None:
oProgress = vboxwrappers.ProgressWrapper(curProgress, self.oTstDrv.oVBoxMgr, self.oTstDrv, \
"gctrlDirCopyFrom");
try:
oProgress.wait();
if not oProgress.isSuccess():
oProgress.logResult(fIgnoreErrors = True);
fRc2 = False;
except:
reporter.logXcpt('Waiting exception for sSrc="%s", sDst="%s":' % (curTest.sSrc, curTest.sDst));
fRc2 = False;
else:
reporter.error('No progress object returned');
fRc2 = False;
elif oFsInfo.type is vboxcon.FsObjType_File:
fRc2 = self.gctrlCopyFileFrom(curGuestSession, curTest.sSrc, curTest.sDst, curTest.aFlags);
else:
reporter.log2('Element "%s" not handled (yet), skipping' % oFsInfo.name);
except:
reporter.logXcpt('Query information exception for sSrc="%s", sDst="%s":' % (curTest.sSrc, curTest.sDst));
fRc2 = False;
curTest.closeSession();
if fRc2 is curRes.fRc:
## @todo Verify the copied results (size, checksum?).
pass;
else:
reporter.error('Test #%d failed: Got %s, expected %s' % (i, fRc2, curRes.fRc));
fRc = False;
break;
return (fRc, oTxsSession);
def testGuestCtrlUpdateAdditions(self, oSession, oTxsSession, oTestVm): # pylint: disable=R0914
"""
Tests updating the Guest Additions inside the guest.
"""
if oTestVm.isWindows():
sUser = "Administrator";
else:
sUser = "vbox";
sPassword = "password";
# Some stupid trickery to guess the location of the iso.
sVBoxValidationKitISO = os.path.abspath(os.path.join(os.path.dirname(__file__), '../../VBoxValidationKit.iso'));
if not os.path.isfile(sVBoxValidationKitISO):
sVBoxValidationKitISO = os.path.abspath(os.path.join(os.path.dirname(__file__), '../../VBoxTestSuite.iso'));
if not os.path.isfile(sVBoxValidationKitISO):
sCur = os.getcwd();
for i in range(0, 10):
sVBoxValidationKitISO = os.path.join(sCur, 'validationkit/VBoxValidationKit.iso');
if os.path.isfile(sVBoxValidationKitISO):
break;
sVBoxValidationKitISO = os.path.join(sCur, 'testsuite/VBoxTestSuite.iso');
if os.path.isfile(sVBoxValidationKitISO):
break;
sCur = os.path.abspath(os.path.join(sCur, '..'));
if i is None: pass; # shut up pychecker/pylint.
if os.path.isfile(sVBoxValidationKitISO):
reporter.log('Validation Kit .ISO found at: %s' % (sVBoxValidationKitISO,));
else:
reporter.log('Warning: Validation Kit .ISO not found -- some tests might fail');
sScratch = os.path.join(self.oTstDrv.sScratchPath, "testGctrlUpdateAdditions");
try:
os.makedirs(sScratch);
except OSError as e:
if e.errno != errno.EEXIST:
reporter.error('Failed: Unable to create scratch directory \"%s\"' % (sScratch,));
return (False, oTxsSession);
reporter.log('Scratch path is: %s' % (sScratch,));
aaTests = [];
if oTestVm.isWindows():
aaTests.extend([
# Source is missing.
[ tdTestUpdateAdditions(sUser = sUser, sPassword = sPassword, sSrc = ''),
tdTestResult(fRc = False) ],
# Wrong aFlags.
[ tdTestUpdateAdditions(sUser = sUser, sPassword = sPassword, sSrc = self.oTstDrv.getGuestAdditionsIso(),
aFlags = [ 1234 ]),
tdTestResult(fRc = False) ],
# Non-existing .ISO.
[ tdTestUpdateAdditions(sUser = sUser, sPassword = sPassword, sSrc = "non-existing.iso"),
tdTestResult(fRc = False) ],
# Wrong .ISO.
[ tdTestUpdateAdditions(sUser = sUser, sPassword = sPassword, sSrc = sVBoxValidationKitISO),
tdTestResult(fRc = False) ],
# The real thing.
[ tdTestUpdateAdditions(sUser = sUser, sPassword = sPassword, sSrc = self.oTstDrv.getGuestAdditionsIso()),
tdTestResult(fRc = True) ],
# Test the (optional) installer arguments. This will extract the
# installer into our guest's scratch directory.
[ tdTestUpdateAdditions(sUser = sUser, sPassword = sPassword, sSrc = self.oTstDrv.getGuestAdditionsIso(),
aArgs = [ '/extract', '/D=' + sScratch ]),
tdTestResult(fRc = True) ]
# Some debg ISO. Only enable locally.
#[ tdTestUpdateAdditions(sUser = sUser, sPassword = sPassword,
# sSrc = "V:\\Downloads\\VBoxGuestAdditions-r80354.iso"),
# tdTestResult(fRc = True) ]
]);
else:
reporter.log('No OS-specific tests for non-Windows yet!');
fRc = True;
for (i, aTest) in enumerate(aaTests):
curTest = aTest[0]; # tdTestExec, use an index, later.
curRes = aTest[1]; # tdTestResult
reporter.log('Testing #%d, sSrc="%s", aFlags="%s" ...' % \
(i, curTest.sSrc, curTest.aFlags));
curTest.setEnvironment(oSession, oTxsSession, oTestVm);
fRc, _ = curTest.createSession('Test #%d' % (i,));
if fRc is False:
reporter.error('Test #%d failed: Could not create session' % (i,));
break;
try:
curProgress = curTest.oTest.oGuest.updateGuestAdditions(curTest.sSrc, curTest.aArgs, curTest.aFlags);
if curProgress is not None:
oProgress = vboxwrappers.ProgressWrapper(curProgress, self.oTstDrv.oVBoxMgr, self.oTstDrv, "gctrlUpGA");
try:
oProgress.wait();
if not oProgress.isSuccess():
oProgress.logResult(fIgnoreErrors = True);
fRc = False;
except:
reporter.logXcpt('Waiting exception for updating Guest Additions:');
fRc = False;
else:
reporter.error('No progress object returned');
fRc = False;
except:
# Just log, don't assume an error here (will be done in the main loop then).
reporter.logXcpt('Updating Guest Additions exception for sSrc="%s", aFlags="%s":' \
% (curTest.sSrc, curTest.aFlags));
fRc = False;
curTest.closeSession();
if fRc is curRes.fRc:
if fRc:
## @todo Verify if Guest Additions were really updated (build, revision, ...).
pass;
else:
reporter.error('Test #%d failed: Got %s, expected %s' % (i, fRc, curRes.fRc));
fRc = False;
break;
return (fRc, oTxsSession);
class tdAddGuestCtrl(vbox.TestDriver): # pylint: disable=R0902,R0904
"""
Guest control using VBoxService on the guest.
"""
def __init__(self):
vbox.TestDriver.__init__(self);
self.oTestVmSet = self.oTestVmManager.getStandardVmSet('nat');
self.fQuick = False; # Don't skip lengthly tests by default.
self.addSubTestDriver(SubTstDrvAddGuestCtrl(self));
#
# Overridden methods.
#
def showUsage(self):
"""
Shows the testdriver usage.
"""
rc = vbox.TestDriver.showUsage(self);
reporter.log('');
reporter.log('tdAddGuestCtrl Options:');
reporter.log(' --quick');
reporter.log(' Same as --virt-modes hwvirt --cpu-counts 1.');
return rc;
def parseOption(self, asArgs, iArg): # pylint: disable=R0912,R0915
"""
Parses the testdriver arguments from the command line.
"""
if asArgs[iArg] == '--quick':
self.parseOption(['--virt-modes', 'hwvirt'], 0);
self.parseOption(['--cpu-counts', '1'], 0);
self.fQuick = True;
else:
return vbox.TestDriver.parseOption(self, asArgs, iArg);
return iArg + 1;
def actionConfig(self):
if not self.importVBoxApi(): # So we can use the constant below.
return False;
eNic0AttachType = vboxcon.NetworkAttachmentType_NAT;
sGaIso = self.getGuestAdditionsIso();
return self.oTestVmSet.actionConfig(self, eNic0AttachType = eNic0AttachType, sDvdImage = sGaIso);
def actionExecute(self):
return self.oTestVmSet.actionExecute(self, self.testOneCfg);
#
# Test execution helpers.
#
def testOneCfg(self, oVM, oTestVm): # pylint: disable=R0915
"""
Runs the specified VM thru the tests.
Returns a success indicator on the general test execution. This is not
the actual test result.
"""
self.logVmInfo(oVM);
fRc = True;
oSession, oTxsSession = self.startVmAndConnectToTxsViaTcp(oTestVm.sVmName, fCdWait = False);
reporter.log("TxsSession: %s" % (oTxsSession,));
if oSession is not None:
self.addTask(oTxsSession);
fManual = False; # Manual override for local testing. (Committed version shall be False.)
if not fManual:
fRc, oTxsSession = self.aoSubTstDrvs[0].testIt(oTestVm, oSession, oTxsSession);
else:
fRc, oTxsSession = self.testGuestCtrlManual(oSession, oTxsSession, oTestVm);
# Cleanup.
self.removeTask(oTxsSession);
if not fManual:
self.terminateVmBySession(oSession);
else:
fRc = False;
return fRc;
def gctrlReportError(self, progress):
"""
Helper function to report an error of a
given progress object.
"""
if progress is None:
reporter.log('No progress object to print error for');
else:
errInfo = progress.errorInfo;
if errInfo:
reporter.log('%s' % (errInfo.text,));
return False;
def gctrlGetRemainingTime(self, msTimeout, msStart):
"""
Helper function to return the remaining time (in ms)
based from a timeout value and the start time (both in ms).
"""
if msTimeout is 0:
return 0xFFFFFFFE; # Wait forever.
msElapsed = base.timestampMilli() - msStart;
if msElapsed > msTimeout:
return 0; # No time left.
return msTimeout - msElapsed;
def testGuestCtrlManual(self, oSession, oTxsSession, oTestVm): # pylint: disable=R0914,R0915,W0613,W0612
"""
For manually testing certain bits.
"""
reporter.log('Manual testing ...');
fRc = True;
sUser = 'Administrator';
sPassword = 'password';
oGuest = oSession.o.console.guest;
oGuestSession = oGuest.createSession(sUser,
sPassword,
"", "Manual Test");
aWaitFor = [ vboxcon.GuestSessionWaitForFlag_Start ];
_ = oGuestSession.waitForArray(aWaitFor, 30 * 1000);
sCmd = 'c:\\windows\\system32\\cmd.exe';
aArgs = [ sCmd, '/C', 'dir', '/S', 'c:\\windows' ];
aEnv = [];
aFlags = [];
for _ in range(100):
oProc = oGuestSession.processCreate(sCmd, aArgs if self.fpApiVer >= 5.0 else aArgs[1:],
aEnv, aFlags, 30 * 1000);
aWaitFor = [ vboxcon.ProcessWaitForFlag_Terminate ];
_ = oProc.waitForArray(aWaitFor, 30 * 1000);
oGuestSession.close();
oGuestSession = None;
time.sleep(5);
oSession.o.console.PowerDown();
return (fRc, oTxsSession);
if __name__ == '__main__':
sys.exit(tdAddGuestCtrl().main(sys.argv));
|
py | 1a316d89f8dcad3c542fa736ce261b387ab4e470 | import torch
from torch.utils.data import DataLoader
import isao
def main():
train_dataset = isao.Isao('./data/preprocessed', use_label=True, resize=(64,64))
train_dataloader = DataLoader(train_dataset, batch_size=1000, shuffle=True)
for batch in train_dataloader:
print(batch['img'].shape)
if __name__ == '__main__':
main() |
py | 1a316e81b491b48e95802e0ec1bbd2504c6753f9 | from .bottom_up_coco import BottomUpCocoDataset
__all__ = ['BottomUpCocoDataset']
|
py | 1a316e84635b9d25ef4fd6b832b7252aeb74b8af | #!/usr/bin/env python
"""This module provides utility classes and functions for threading/multiprocessing"""
from __future__ import print_function
from .logutil import GetLogger
from . import sfdefaults as _sfdefaults
from . import SolidFireError, SFTimeoutError
import atexit
import fcntl as _fcntl
import functools as _functools
import multiprocessing as _multiprocessing
import multiprocessing.pool as _multiprocessing_pool
import sys as _sys
import threading as _threading
import traceback as _traceback
from io import open
# Helpful multiprocessing debug for threadpools
# from logging import DEBUG as _DEBUG_LEVEL
# import multiprocessing.util as _multiprocessing_util
# _multiprocessing_util.log_to_stderr(_DEBUG_LEVEL)
CPU_THREADS = _multiprocessing.cpu_count()
_globalPool = None
_globalPoolLock = _multiprocessing.Lock()
def GlobalPool():
""" Get the global thread pool """
#pylint: disable=global-statement
global _globalPool
#pylint: enable=global-statement
with _globalPoolLock:
if not _globalPool:
_globalPool = ThreadPool()
return _globalPool
def ShutdownGlobalPool():
with _globalPoolLock:
if _globalPool:
_globalPool.Shutdown()
def IsMainThread():
"""
Check if the current thread is the main thread
Returns:
Boolean true if this is the main thread, false otherwise
"""
return _threading.current_thread().name == "MainThread"
def IsMainProcess():
"""
Check if the current process is the main process
Returns:
Boolean true if this is the main process, false otherwise
"""
return _multiprocessing.current_process().name == "MainProcess"
class AsyncResult(object):
"""Result object from posting to a ThreadPool"""
def __init__(self, result):
self.result = result
def Get(self):
"""
Wait for and return the result of the thread
Returns:
The return value of the thread
"""
return self.result.get(0xFFFF)
def GetWithTimeout(self, timeout):
try:
return self.result.get(timeout)
except _multiprocessing.TimeoutError as e:
SFTimeoutError("Timeout waiting for thread to complete", innerException=e)
def Wait(self, timeout):
"""
Wait for the thread to complete
Args:
timeout: how long to wait before giving up, in seconds (float)
Returns:
Boolean true if the thread is ready or false if the timeout expired (bool)
"""
return self.result.wait(timeout)
def _initworkerprocess():
"""
Initialization function for workers in a process pool.
This turns off SIGINT handling in sub-processes
"""
import signal
signal.signal(signal.SIGINT, signal.SIG_IGN)
class ThreadPool(object):
"""Helper to manage status and lifetime of threads/processes"""
def __init__(self, maxThreads=CPU_THREADS, useMultiprocessing=_sfdefaults.use_multiprocessing):
if useMultiprocessing:
self.threadPool = _multiprocessing.Pool(processes=maxThreads, initializer=_initworkerprocess)
else:
self.threadPool = _multiprocessing_pool.ThreadPool(processes=maxThreads)
self.results = []
atexit.register(self.threadPool.close)
def Post(self, threadFunc, *args, **kwargs):
"""
Add a new work item
Args:
threadFunc: the function to be run as a thread
args: args to pass to the thread function
kwargs: keyword args to pass to the thread function
Returns:
AsyncResult object
"""
async_res = self.threadPool.apply_async(threadFunc, args, kwargs)
res = AsyncResult(async_res)
self.results.append(res)
return res
def Wait(self):
"""
Wait for all threads to finish and collect the results
Returns:
Boolean true if all threads succeeded, False if one or more failed
"""
return WaitForThreads(self.results)
def Shutdown(self):
"""
Abort any running processes and shut down the pool
"""
self.threadPool.close()
self.threadPool.terminate()
def WaitForThreads(asyncResults):
"""
Wait for a list of threads to finish and collect the results
Args:
asyncResults: a list of async results to wait for (multiprocessing.pool.AsyncResult)
Returns:
Boolean true if all threads succeeded, False if one or more failed
"""
log = GetLogger()
allgood = True
for async_res in asyncResults:
# If the result is not True, or if there is an exception, this thread failed
try:
result = async_res.Get()
if result is False:
allgood = False
except SolidFireError as e:
log.error(e)
allgood = False
return allgood
def threadwrapper(func):
"""Decorator for functions to be run as threads"""
@_functools.wraps(func)
def wrapper(*args, **kwargs):
orig_name = _threading.current_thread().name
try:
return func(*args, **kwargs)
except (KeyboardInterrupt, SystemExit):
print("KeyboardInterrupt/SystemExit in thread {}".format(_threading.current_thread().name))
raise
except:
# For exceptions from child threads/processes, we want to extract and store the original traceback, otherwise it may
# be lost to multiprocessing/pickling and inaccessible when the exception gets rethrown in the parent process
# For convenience, we also convert all exceptions into our rooted exception hierarchy
ex_type, ex_val, ex_tb = _sys.exc_info()
str_tb = "".join(_traceback.format_tb(ex_tb))
if isinstance(ex_val, SolidFireError):
ex_val.originalTraceback = str_tb
raise
log = GetLogger()
log.debug(str_tb)
raise SolidFireError("{}: {}".format(ex_type.__name__, ex_val), str_tb)
finally:
_threading.current_thread().name = orig_name
return wrapper
class LockFile(object):
def __init__(self, lockname):
self.lockFile = "/var/tmp/{}.lockfile".format(lockname)
self.fd = open(self.lockFile, "w")
def __enter__(self):
self.Lock()
def __exit__(self, extype, exval, tb):
self.Unlock()
def __del__(self):
"""Make sure the lock gets unlocked when we exit"""
self.Unlock()
self.fd.close()
def Lock(self):
"""Lock"""
_fcntl.flock(self.fd, _fcntl.LOCK_EX | _fcntl.LOCK_NB)
def Unlock(self):
"""Unlock"""
_fcntl.flock(self.fd, _fcntl.LOCK_UN)
|
py | 1a316ec8aeadc5145e7af21a4e4123cd669876bf | import cPickle
import sys
with open(sys.argv[1], 'rb') as f:
jpg_list = cPickle.load(f)
for jpg_item in jpg_list:
f2 = open(sys.argv[2] + '/' + jpg_item[0], 'wb')
f2.write(jpg_item[1])
|
py | 1a316f0f27284fa1d079ec5b3bd2ff1441bc9543 | from typing import Sequence
import editdistance
import pytorch_lightning as pl
import torch
class CharacterErrorRate(pl.metrics.Metric):
"""Character error rate metric, computed using Levenshtein distance."""
def __init__(self, ignore_tokens: Sequence[int], *args):
super().__init__(*args)
self.ignore_tokens = set(ignore_tokens)
self.add_state("error", default=torch.tensor(0.0), dist_reduce_fx="sum")
self.add_state("total", default=torch.tensor(0), dist_reduce_fx="sum")
def update(self, preds: torch.Tensor, targets: torch.Tensor) -> None:
N = preds.shape[0]
for ind in range(N):
pred = [_ for _ in preds[ind].tolist() if _ not in self.ignore_tokens]
target = [_ for _ in targets[ind].tolist() if _ not in self.ignore_tokens]
distance = editdistance.distance(pred, target)
error = distance / max(len(pred), len(target))
self.error = self.error + error
self.total = self.total + N
def compute(self) -> torch.Tensor:
return self.error / self.total
def test_character_error_rate():
metric = CharacterErrorRate([0, 1])
X = torch.tensor(
[
[0, 2, 2, 3, 3, 1], # error will be 0
[0, 2, 1, 1, 1, 1], # error will be .75
[0, 2, 2, 4, 4, 1], # error will be .5
]
)
Y = torch.tensor([[0, 2, 2, 3, 3, 1], [0, 2, 2, 3, 3, 1], [0, 2, 2, 3, 3, 1],])
metric(X, Y)
print(metric.compute())
assert metric.compute() == sum([0, 0.75, 0.5]) / 3
if __name__ == "__main__":
test_character_error_rate()
|
py | 1a316f4a2fc67b5492b6a624cbc84f949c9619df | # -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.async_support.base.exchange import Exchange
import hashlib
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import PermissionDenied
from ccxt.base.errors import AccountSuspended
from ccxt.base.errors import ArgumentsRequired
from ccxt.base.errors import BadRequest
from ccxt.base.errors import RateLimitExceeded
from ccxt.base.errors import ExchangeNotAvailable
from ccxt.base.errors import InvalidNonce
from ccxt.base.decimal_to_precision import TICK_SIZE
class latoken(Exchange):
def describe(self):
return self.deep_extend(super(latoken, self).describe(), {
'id': 'latoken',
'name': 'Latoken',
'countries': ['KY'], # Cayman Islands
'version': 'v2',
'rateLimit': 1000,
'has': {
'cancelAllOrders': True,
'cancelOrder': True,
'createOrder': True,
'fetchBalance': True,
'fetchCurrencies': True,
'fetchMarkets': True,
'fetchMyTrades': True,
'fetchOpenOrders': True,
'fetchOrderBook': True,
'fetchOrder': True,
'fetchOrders': True,
'fetchTicker': True,
'fetchTickers': True,
'fetchTrades': True,
'fetchTransactions': True,
},
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/61511972-24c39f00-aa01-11e9-9f7c-471f1d6e5214.jpg',
'api': 'https://api.latoken.com',
'www': 'https://latoken.com',
'doc': [
'https://api.latoken.com',
],
'fees': 'https://latoken.com/fees',
'referral': 'https://latoken.com/invite?r=mvgp2djk',
},
'api': {
'public': {
'get': {
'book/{currency}/{quote}': 1,
'chart/week': 1,
'chart/week/{currency}/{quote}': 1,
'currency': 1,
'currency/available': 1,
'currency/quotes': 1,
'currency/{currency}': 1,
'pair': 1,
'pair/available': 1,
'ticker': 1,
'ticker/{base}/{quote}': 1,
'time': 1,
'trade/history/{currency}/{quote}': 1,
'trade/fee/{currency}/{quote}': 1,
'trade/feeLevels': 1,
'transaction/bindings': 1,
},
},
'private': {
'get': {
'auth/account': 1,
'auth/account/currency/{currency}/{type}': 1,
'auth/order': 1,
'auth/order/getOrder/{id}': 1,
'auth/order/pair/{currency}/{quote}': 1,
'auth/order/pair/{currency}/{quote}/active': 1,
'auth/stopOrder': 1,
'auth/stopOrder/getOrder/{id}': 1,
'auth/stopOrder/pair/{currency}/{quote}': 1,
'auth/stopOrder/pair/{currency}/{quote}/active': 1,
'auth/trade': 1,
'auth/trade/pair/{currency}/{quote}': 1,
'auth/trade/fee/{currency}/{quote}': 1,
'auth/transaction': 1,
'auth/transaction/bindings': 1,
'auth/transaction/bindings/{currency}': 1,
'auth/transaction/{id}': 1,
'auth/transfer': 1,
},
'post': {
'auth/order/cancel': 1,
'auth/order/cancelAll': 1,
'auth/order/cancelAll/{currency}/{quote}': 1,
'auth/order/place': 1,
'auth/spot/deposit': 1,
'auth/spot/withdraw': 1,
'auth/stopOrder/cancel': 1,
'auth/stopOrder/cancelAll': 1,
'auth/stopOrder/cancelAll/{currency}/{quote}': 1,
'auth/stopOrder/place': 1,
'auth/transaction/depositAddress': 1,
'auth/transaction/withdraw': 1,
'auth/transaction/withdraw/cancel': 1,
'auth/transaction/withdraw/confirm': 1,
'auth/transaction/withdraw/resendCode': 1,
'auth/transfer/email': 1,
'auth/transfer/id': 1,
'auth/transfer/phone': 1,
},
},
},
'precisionMode': TICK_SIZE,
'fees': {
'trading': {
'feeSide': 'get',
'tierBased': False,
'percentage': True,
'maker': self.parse_number('0.0049'),
'taker': self.parse_number('0.0049'),
},
},
'commonCurrencies': {
'MT': 'Monarch',
'TPAY': 'Tetra Pay',
'TRADE': 'Smart Trade Coin',
'TSL': 'Treasure SL',
},
'exceptions': {
'exact': {
'INTERNAL_ERROR': ExchangeError, # internal server error. You can contact our support to solve self problem. {"message":"Internal Server Error","error":"INTERNAL_ERROR","status":"FAILURE"}
'SERVICE_UNAVAILABLE': ExchangeNotAvailable, # requested information currently not available. You can contact our support to solve self problem or retry later.
'NOT_AUTHORIZED': AuthenticationError, # user's query not authorized. Check if you are logged in.
'FORBIDDEN': PermissionDenied, # you don't have enough access rights.
'BAD_REQUEST': BadRequest, # some bad request, for example bad fields values or something else. Read response message for more information.
'NOT_FOUND': ExchangeError, # entity not found. Read message for more information.
'ACCESS_DENIED': PermissionDenied, # access is denied. Probably you don't have enough access rights, you contact our support.
'REQUEST_REJECTED': ExchangeError, # user's request rejected for some reasons. Check error message.
'HTTP_MEDIA_TYPE_NOT_SUPPORTED': BadRequest, # http media type not supported.
'MEDIA_TYPE_NOT_ACCEPTABLE': BadRequest, # media type not acceptable
'METHOD_ARGUMENT_NOT_VALID': BadRequest, # one of method argument is invalid. Check argument types and error message for more information.
'VALIDATION_ERROR': BadRequest, # check errors field to get reasons.
'ACCOUNT_EXPIRED': AccountSuspended, # restore your account or create a new one.
'BAD_CREDENTIALS': AuthenticationError, # invalid username or password.
'COOKIE_THEFT': AuthenticationError, # cookie has been stolen. Let's try reset your cookies.
'CREDENTIALS_EXPIRED': AccountSuspended, # credentials expired.
'INSUFFICIENT_AUTHENTICATION': AuthenticationError, # for example, 2FA required.
'UNKNOWN_LOCATION': AuthenticationError, # user logged from unusual location, email confirmation required.
'TOO_MANY_REQUESTS': RateLimitExceeded, # too many requests at the time. A response header X-Rate-Limit-Remaining indicates the number of allowed request per a period.
},
'broad': {
'invalid API key, signature or digest': AuthenticationError, # {"result":false,"message":"invalid API key, signature or digest","error":"BAD_REQUEST","status":"FAILURE"}
'request expired or bad': InvalidNonce, # {"result":false,"message":"request expired or bad <timeAlive>/<timestamp> format","error":"BAD_REQUEST","status":"FAILURE"}
'For input string': BadRequest, # {"result":false,"message":"Internal error","error":"For input string: \"NaN\"","status":"FAILURE"}
},
},
'options': {
'defaultType': 'spot',
'types': {
'wallet': 'ACCOUNT_TYPE_WALLET',
'spot': 'ACCOUNT_TYPE_SPOT',
},
'accounts': {
'ACCOUNT_TYPE_WALLET': 'wallet',
'ACCOUNT_TYPE_SPOT': 'spot',
},
},
})
def nonce(self):
return self.milliseconds() - self.options['timeDifference']
async def fetch_time(self, params={}):
response = await self.publicGetTime(params)
#
# {
# "serverTime": 1570615577321
# }
#
return self.safe_integer(response, 'serverTime')
async def load_time_difference(self, params={}):
serverTime = await self.fetch_time(params)
after = self.milliseconds()
self.options['timeDifference'] = after - serverTime
return self.options['timeDifference']
async def fetch_markets(self, params={}):
currencies = await self.fetch_currencies_from_cache(params)
#
# [
# {
# "id":"1a075819-9e0b-48fc-8784-4dab1d186d6d",
# "status":"CURRENCY_STATUS_ACTIVE",
# "type":"CURRENCY_TYPE_ALTERNATIVE", # CURRENCY_TYPE_CRYPTO, CURRENCY_TYPE_IEO
# "name":"MyCryptoBank",
# "tag":"MCB",
# "description":"",
# "logo":"",
# "decimals":18,
# "created":1572912000000,
# "tier":1,
# "assetClass":"ASSET_CLASS_UNKNOWN",
# "minTransferAmount":0
# },
# {
# "id":"db02758e-2507-46a5-a805-7bc60355b3eb",
# "status":"CURRENCY_STATUS_ACTIVE",
# "type":"CURRENCY_TYPE_FUTURES_CONTRACT",
# "name":"BTC USDT Futures Contract",
# "tag":"BTCUSDT",
# "description":"",
# "logo":"",
# "decimals":8,
# "created":1589459984395,
# "tier":1,
# "assetClass":"ASSET_CLASS_UNKNOWN",
# "minTransferAmount":0
# },
# ]
#
response = await self.publicGetPair(params)
#
# [
# {
# "id":"dba4289b-6b46-4d94-bf55-49eec9a163ad",
# "status":"PAIR_STATUS_ACTIVE", # CURRENCY_STATUS_INACTIVE
# "baseCurrency":"fb9b53d6-bbf6-472f-b6ba-73cc0d606c9b",
# "quoteCurrency":"620f2019-33c0-423b-8a9d-cde4d7f8ef7f",
# "priceTick":"0.000000100000000000",
# "priceDecimals":7,
# "quantityTick":"0.010000000",
# "quantityDecimals":2,
# "costDisplayDecimals":7,
# "created":1572957210501,
# "minOrderQuantity":"0",
# "maxOrderCostUsd":"999999999999999999",
# "minOrderCostUsd":"0",
# "externalSymbol":""
# }
# ]
#
if self.safe_value(self.options, 'adjustForTimeDifference', True):
await self.load_time_difference()
currenciesById = self.index_by(currencies, 'id')
result = []
for i in range(0, len(response)):
market = response[i]
id = self.safe_string(market, 'id')
# the exchange shows them inverted
baseId = self.safe_string(market, 'baseCurrency')
quoteId = self.safe_string(market, 'quoteCurrency')
baseCurrency = self.safe_value(currenciesById, baseId)
quoteCurrency = self.safe_value(currenciesById, quoteId)
if baseCurrency is not None and quoteCurrency is not None:
base = self.safe_currency_code(self.safe_string(baseCurrency, 'tag'))
quote = self.safe_currency_code(self.safe_string(quoteCurrency, 'tag'))
symbol = base + '/' + quote
precision = {
'price': self.safe_number(market, 'priceTick'),
'amount': self.safe_number(market, 'quantityTick'),
}
lowercaseQuote = quote.lower()
capitalizedQuote = self.capitalize(lowercaseQuote)
limits = {
'amount': {
'min': self.safe_number(market, 'minOrderQuantity'),
'max': None,
},
'price': {
'min': None,
'max': None,
},
'cost': {
'min': self.safe_number(market, 'minOrderCost' + capitalizedQuote),
'max': self.safe_number(market, 'maxOrderCost' + capitalizedQuote),
},
}
status = self.safe_string(market, 'status')
active = (status == 'PAIR_STATUS_ACTIVE')
result.append({
'id': id,
'info': market,
'symbol': symbol,
'base': base,
'quote': quote,
'baseId': baseId,
'quoteId': quoteId,
'type': 'spot',
'spot': True,
'active': active, # assuming True
'precision': precision,
'limits': limits,
})
return result
async def fetch_currencies_from_cache(self, params={}):
# self method is now redundant
# currencies are now fetched before markets
options = self.safe_value(self.options, 'fetchCurrencies', {})
timestamp = self.safe_integer(options, 'timestamp')
expires = self.safe_integer(options, 'expires', 1000)
now = self.milliseconds()
if (timestamp is None) or ((now - timestamp) > expires):
response = await self.publicGetCurrency(params)
self.options['fetchCurrencies'] = self.extend(options, {
'response': response,
'timestamp': now,
})
return self.safe_value(self.options['fetchCurrencies'], 'response')
async def fetch_currencies(self, params={}):
response = await self.fetch_currencies_from_cache(params)
#
# [
# {
# "id":"1a075819-9e0b-48fc-8784-4dab1d186d6d",
# "status":"CURRENCY_STATUS_ACTIVE",
# "type":"CURRENCY_TYPE_ALTERNATIVE", # CURRENCY_TYPE_CRYPTO, CURRENCY_TYPE_IEO
# "name":"MyCryptoBank",
# "tag":"MCB",
# "description":"",
# "logo":"",
# "decimals":18,
# "created":1572912000000,
# "tier":1,
# "assetClass":"ASSET_CLASS_UNKNOWN",
# "minTransferAmount":0
# },
# {
# "id":"db02758e-2507-46a5-a805-7bc60355b3eb",
# "status":"CURRENCY_STATUS_ACTIVE",
# "type":"CURRENCY_TYPE_FUTURES_CONTRACT",
# "name":"BTC USDT Futures Contract",
# "tag":"BTCUSDT",
# "description":"",
# "logo":"",
# "decimals":8,
# "created":1589459984395,
# "tier":1,
# "assetClass":"ASSET_CLASS_UNKNOWN",
# "minTransferAmount":0
# },
# ]
#
result = {}
for i in range(0, len(response)):
currency = response[i]
id = self.safe_string(currency, 'id')
tag = self.safe_string(currency, 'tag')
code = self.safe_currency_code(tag)
decimals = self.safe_string(currency, 'decimals')
precision = self.parse_number('1e-' + decimals)
fee = self.safe_number(currency, 'fee')
currencyType = self.safe_string(currency, 'type')
parts = currencyType.split('_')
numParts = len(parts)
lastPart = self.safe_value(parts, numParts - 1)
type = lastPart.lower()
status = self.safe_string(currency, 'status')
active = (status == 'CURRENCY_STATUS_ACTIVE')
name = self.safe_string(currency, 'name')
result[code] = {
'id': id,
'code': code,
'info': currency,
'name': name,
'type': type,
'active': active,
'fee': fee,
'precision': precision,
'limits': {
'amount': {
'min': self.safe_number(currency, 'minTransferAmount'),
'max': None,
},
'withdraw': {
'min': None,
'max': None,
},
},
}
return result
async def fetch_balance(self, params={}):
await self.load_markets()
response = await self.privateGetAuthAccount(params)
#
# [
# {
# id: "e5852e02-8711-431c-9749-a6f5503c6dbe",
# status: "ACCOUNT_STATUS_ACTIVE",
# type: "ACCOUNT_TYPE_WALLET",
# timestamp: "1635920106506",
# currency: "0c3a106d-bde3-4c13-a26e-3fd2394529e5",
# available: "100.000000",
# blocked: "0.000000"
# },
# {
# id: "369df204-acbc-467e-a25e-b16e3cc09cf6",
# status: "ACCOUNT_STATUS_ACTIVE",
# type: "ACCOUNT_TYPE_SPOT",
# timestamp: "1635920106504",
# currency: "0c3a106d-bde3-4c13-a26e-3fd2394529e5",
# available: "100.000000",
# blocked: "0.000000"
# }
# ]
#
result = {
'info': response,
'timestamp': None,
'datetime': None,
}
maxTimestamp = None
defaultType = self.safe_string_2(self.options, 'fetchBalance', 'defaultType', 'spot')
type = self.safe_string(params, 'type', defaultType)
types = self.safe_value(self.options, 'types', {})
accountType = self.safe_string(types, type, type)
balancesByType = self.group_by(response, 'type')
balances = self.safe_value(balancesByType, accountType, [])
for i in range(0, len(balances)):
balance = balances[i]
currencyId = self.safe_string(balance, 'currency')
timestamp = self.safe_integer(balance, 'timestamp')
if timestamp is not None:
if maxTimestamp is None:
maxTimestamp = timestamp
else:
maxTimestamp = max(maxTimestamp, timestamp)
code = self.safe_currency_code(currencyId)
account = self.account()
account['free'] = self.safe_string(balance, 'available')
account['used'] = self.safe_string(balance, 'blocked')
result[code] = account
result['timestamp'] = maxTimestamp
result['datetime'] = self.iso8601(maxTimestamp)
return self.parse_balance(result)
async def fetch_order_book(self, symbol, limit=None, params={}):
await self.load_markets()
market = self.market(symbol)
request = {
'currency': market['baseId'],
'quote': market['quoteId'],
}
if limit is not None:
request['limit'] = limit # max 1000
response = await self.publicGetBookCurrencyQuote(self.extend(request, params))
#
# {
# "ask":[
# {"price":"4428.76","quantity":"0.08136","cost":"360.3239136","accumulated":"360.3239136"},
# {"price":"4429.77","quantity":"1.11786","cost":"4951.8626922","accumulated":"5312.1866058"},
# {"price":"4430.94","quantity":"1.78418","cost":"7905.5945292","accumulated":"13217.781135"},
# ],
# "bid":[
# {"price":"4428.43","quantity":"0.13675","cost":"605.5878025","accumulated":"605.5878025"},
# {"price":"4428.19","quantity":"0.03619","cost":"160.2561961","accumulated":"765.8439986"},
# {"price":"4428.15","quantity":"0.02926","cost":"129.567669","accumulated":"895.4116676"},
# ],
# "totalAsk":"53.14814",
# "totalBid":"112216.9029791"
# }
#
return self.parse_order_book(response, symbol, None, 'bid', 'ask', 'price', 'quantity')
def parse_ticker(self, ticker, market=None):
#
# {
# "symbol":"620f2019-33c0-423b-8a9d-cde4d7f8ef7f/0c3a106d-bde3-4c13-a26e-3fd2394529e5",
# "baseCurrency":"620f2019-33c0-423b-8a9d-cde4d7f8ef7f",
# "quoteCurrency":"0c3a106d-bde3-4c13-a26e-3fd2394529e5",
# "volume24h":"76411867.852585600000000000",
# "volume7d":"637809926.759451100000000000",
# "change24h":"2.5300",
# "change7d":"5.1300",
# "lastPrice":"4426.9"
# }
#
marketId = self.safe_string(ticker, 'symbol')
symbol = self.safe_symbol(marketId, market)
last = self.safe_number(ticker, 'lastPrice')
change = self.safe_number(ticker, 'change24h')
timestamp = self.nonce()
return self.safe_ticker({
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'low': self.safe_number(ticker, 'low'),
'high': self.safe_number(ticker, 'high'),
'bid': None,
'bidVolume': None,
'ask': None,
'askVolume': None,
'vwap': None,
'open': None,
'close': last,
'last': last,
'previousClose': None,
'change': change,
'percentage': None,
'average': None,
'baseVolume': None,
'quoteVolume': self.safe_number(ticker, 'volume24h'),
'info': ticker,
})
async def fetch_ticker(self, symbol, params={}):
await self.load_markets()
market = self.market(symbol)
request = {
'base': market['baseId'],
'quote': market['quoteId'],
}
response = await self.publicGetTickerBaseQuote(self.extend(request, params))
#
# {
# "symbol":"620f2019-33c0-423b-8a9d-cde4d7f8ef7f/0c3a106d-bde3-4c13-a26e-3fd2394529e5",
# "baseCurrency":"620f2019-33c0-423b-8a9d-cde4d7f8ef7f",
# "quoteCurrency":"0c3a106d-bde3-4c13-a26e-3fd2394529e5",
# "volume24h":"76411867.852585600000000000",
# "volume7d":"637809926.759451100000000000",
# "change24h":"2.5300",
# "change7d":"5.1300",
# "lastPrice":"4426.9"
# }
#
return self.parse_ticker(response, market)
async def fetch_tickers(self, symbols=None, params={}):
await self.load_markets()
response = await self.publicGetTicker(params)
#
# [
# {
# "symbol":"DASH/BTC",
# "baseCurrency":"ed75c263-4ab9-494b-8426-031dab1c7cc1",
# "quoteCurrency":"92151d82-df98-4d88-9a4d-284fa9eca49f",
# "volume24h":"1.977753278000000000",
# "volume7d":"18.964342670000000000",
# "change24h":"-1.4800",
# "change7d":"-5.5200",
# "lastPrice":"0.003066"
# },
# ]
#
return self.parse_tickers(response, symbols)
def parse_trade(self, trade, market=None):
#
# fetchTrades(public)
#
# {
# "id":"c152f814-8eeb-44f0-8f3f-e5c568f2ffcf",
# "isMakerBuyer":false,
# "baseCurrency":"620f2019-33c0-423b-8a9d-cde4d7f8ef7f",
# "quoteCurrency":"0c3a106d-bde3-4c13-a26e-3fd2394529e5",
# "price":"4435.56",
# "quantity":"0.32534",
# "cost":"1443.0650904",
# "timestamp":1635854642725,
# "makerBuyer":false
# }
#
# fetchMyTrades(private)
#
# {
# "id":"02e02533-b4bf-4ba9-9271-24e2108dfbf7",
# "isMakerBuyer":false,
# "direction":"TRADE_DIRECTION_BUY",
# "baseCurrency":"620f2019-33c0-423b-8a9d-cde4d7f8ef7f",
# "quoteCurrency":"0c3a106d-bde3-4c13-a26e-3fd2394529e5",
# "price":"4564.32",
# "quantity":"0.01000",
# "cost":"45.6432",
# "fee":"0.223651680000000000",
# "order":"c9cac6a0-484c-4892-88e7-ad51b39f2ce1",
# "timestamp":1635921580399,
# "makerBuyer":false
# }
#
type = None
timestamp = self.safe_integer(trade, 'timestamp')
priceString = self.safe_string(trade, 'price')
amountString = self.safe_string(trade, 'quantity')
costString = self.safe_string(trade, 'cost')
makerBuyer = self.safe_value(trade, 'makerBuyer')
side = self.safe_string(trade, 'direction')
if side is None:
side = 'sell' if makerBuyer else 'buy'
else:
if side == 'TRADE_DIRECTION_BUY':
side = 'buy'
elif side == 'TRADE_DIRECTION_SELL':
side = 'sell'
isBuy = (side == 'buy')
takerOrMaker = 'maker' if (makerBuyer and isBuy) else 'taker'
baseId = self.safe_string(trade, 'baseCurrency')
quoteId = self.safe_string(trade, 'quoteCurrency')
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
symbol = base + '/' + quote
if symbol in self.markets:
market = self.market(symbol)
id = self.safe_string(trade, 'id')
orderId = self.safe_string(trade, 'order')
feeCost = self.safe_string(trade, 'fee')
fee = None
if feeCost is not None:
fee = {
'cost': feeCost,
'currency': quote,
}
return self.safe_trade({
'info': trade,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': symbol,
'id': id,
'order': orderId,
'type': type,
'takerOrMaker': takerOrMaker,
'side': side,
'price': priceString,
'amount': amountString,
'cost': costString,
'fee': fee,
}, market)
async def fetch_trades(self, symbol, since=None, limit=None, params={}):
await self.load_markets()
market = self.market(symbol)
request = {
'currency': market['baseId'],
'quote': market['quoteId'],
# 'from': str(since), # milliseconds
# 'limit': limit, # default 100, max 1000
}
if limit is not None:
request['limit'] = limit # default 100, max 1000
response = await self.publicGetTradeHistoryCurrencyQuote(self.extend(request, params))
#
# [
# {"id":"c152f814-8eeb-44f0-8f3f-e5c568f2ffcf","isMakerBuyer":false,"baseCurrency":"620f2019-33c0-423b-8a9d-cde4d7f8ef7f","quoteCurrency":"0c3a106d-bde3-4c13-a26e-3fd2394529e5","price":"4435.56","quantity":"0.32534","cost":"1443.0650904","timestamp":1635854642725,"makerBuyer":false},
# {"id":"cfecbefb-3d11-43d7-b9d4-fa16211aad8a","isMakerBuyer":false,"baseCurrency":"620f2019-33c0-423b-8a9d-cde4d7f8ef7f","quoteCurrency":"0c3a106d-bde3-4c13-a26e-3fd2394529e5","price":"4435.13","quantity":"0.26540","cost":"1177.083502","timestamp":1635854641114,"makerBuyer":false},
# {"id":"f43d3ec8-db94-49f3-b534-91dbc2779296","isMakerBuyer":true,"baseCurrency":"620f2019-33c0-423b-8a9d-cde4d7f8ef7f","quoteCurrency":"0c3a106d-bde3-4c13-a26e-3fd2394529e5","price":"4435.00","quantity":"0.41738","cost":"1851.0803","timestamp":1635854640323,"makerBuyer":true},
# ]
#
return self.parse_trades(response, market, since, limit)
async def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}):
await self.load_markets()
request = {
# 'currency': market['baseId'],
# 'quote': market['quoteId'],
# 'from': self.milliseconds(),
# 'limit': limit, # default '100'
}
method = 'privateGetAuthTrade'
market = None
if symbol is not None:
market = self.market(symbol)
request['currency'] = market['baseId']
request['quote'] = market['quoteId']
method = 'privateGetAuthTradePairCurrencyQuote'
if limit is not None:
request['limit'] = limit # default 100
response = await getattr(self, method)(self.extend(request, params))
#
# [
# {
# "id":"02e02533-b4bf-4ba9-9271-24e2108dfbf7",
# "isMakerBuyer":false,
# "direction":"TRADE_DIRECTION_BUY",
# "baseCurrency":"620f2019-33c0-423b-8a9d-cde4d7f8ef7f",
# "quoteCurrency":"0c3a106d-bde3-4c13-a26e-3fd2394529e5",
# "price":"4564.32",
# "quantity":"0.01000",
# "cost":"45.6432",
# "fee":"0.223651680000000000",
# "order":"c9cac6a0-484c-4892-88e7-ad51b39f2ce1",
# "timestamp":1635921580399,
# "makerBuyer":false
# }
# ]
#
return self.parse_trades(response, market, since, limit)
def parse_order_status(self, status):
statuses = {
'ORDER_STATUS_PLACED': 'open',
'ORDER_STATUS_CLOSED': 'closed',
'ORDER_STATUS_CANCELLED': 'canceled',
}
return self.safe_string(statuses, status, status)
def parse_order_type(self, status):
statuses = {
'ORDER_TYPE_MARKET': 'market',
'ORDER_TYPE_LIMIT': 'limit',
}
return self.safe_string(statuses, status, status)
def parse_time_in_force(self, timeInForce):
timeInForces = {
'ORDER_CONDITION_GOOD_TILL_CANCELLED': 'GTC',
'ORDER_CONDITION_IMMEDIATE_OR_CANCEL': 'IOC',
'ORDER_CONDITION_FILL_OR_KILL': 'FOK',
}
return self.safe_string(timeInForces, timeInForce, timeInForce)
def parse_order(self, order, market=None):
#
# createOrder
#
# {
# "orderId":"1563460093.134037.704945@0370:2",
# "cliOrdId":"",
# "pairId":370,
# "symbol":"ETHBTC",
# "side":"sell",
# "orderType":"limit",
# "price":1.0,
# "amount":1.0
# }
#
# fetchOrder, fetchOpenOrders, fetchOrders
#
# {
# "id":"a76bd262-3560-4bfb-98ac-1cedd394f4fc",
# "status":"ORDER_STATUS_PLACED",
# "side":"ORDER_SIDE_BUY",
# "condition":"ORDER_CONDITION_GOOD_TILL_CANCELLED",
# "type":"ORDER_TYPE_LIMIT",
# "baseCurrency":"620f2019-33c0-423b-8a9d-cde4d7f8ef7f",
# "quoteCurrency":"0c3a106d-bde3-4c13-a26e-3fd2394529e5",
# "clientOrderId":"web-macos_chrome_1a6a6659-6f7c-4fac-be0b-d1d7ac06d",
# "price":"4000.00",
# "quantity":"0.01",
# "cost":"40.000000000000000000",
# "filled":"0",
# "trader":"7244bb3a-b6b2-446a-ac78-fa4bce5b59a9",
# "creator":"ORDER_CREATOR_USER",
# "creatorId":"",
# "timestamp":1635920767648
# }
#
# cancelOrder
#
# {
# "message":"cancellation request successfully submitted",
# "status":"SUCCESS",
# "id":"a631426d-3543-45ba-941e-75f7825afb0f"
# }
#
id = self.safe_string(order, 'id')
timestamp = self.safe_integer(order, 'timestamp')
baseId = self.safe_string(order, 'baseCurrency')
quoteId = self.safe_string(order, 'quoteCurrency')
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
symbol = None
if (base is not None) and (quote is not None):
symbol = base + '/' + quote
if symbol in self.markets:
market = self.market(symbol)
orderSide = self.safe_string(order, 'side')
side = None
if orderSide is not None:
parts = orderSide.split('_')
side = self.safe_string_lower(parts, len(parts) - 1)
type = self.parse_order_type(self.safe_string(order, 'type'))
price = self.safe_string(order, 'price')
amount = self.safe_string(order, 'quantity')
filled = self.safe_string(order, 'filled')
cost = self.safe_string(order, 'cost')
status = self.parse_order_status(self.safe_string(order, 'status'))
message = self.safe_string(order, 'message')
if message is not None:
if message.find('cancel') >= 0:
status = 'canceled'
elif message.find('accept') >= 0:
status = 'open'
clientOrderId = self.safe_string(order, 'clientOrderId')
timeInForce = self.parse_time_in_force(self.safe_string(order, 'condition'))
return self.safe_order2({
'id': id,
'clientOrderId': clientOrderId,
'info': order,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': None,
'status': status,
'symbol': symbol,
'type': type,
'timeInForce': timeInForce,
'postOnly': None,
'side': side,
'price': price,
'stopPrice': None,
'cost': cost,
'amount': amount,
'filled': filled,
'average': None,
'remaining': None,
'fee': None,
'trades': None,
}, market)
async def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchOpenOrders() requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
request = {
'currency': market['baseId'],
'quote': market['quoteId'],
}
response = await self.privateGetAuthOrderPairCurrencyQuoteActive(self.extend(request, params))
#
# [
# {
# "id":"a76bd262-3560-4bfb-98ac-1cedd394f4fc",
# "status":"ORDER_STATUS_PLACED",
# "side":"ORDER_SIDE_BUY",
# "condition":"ORDER_CONDITION_GOOD_TILL_CANCELLED",
# "type":"ORDER_TYPE_LIMIT",
# "baseCurrency":"620f2019-33c0-423b-8a9d-cde4d7f8ef7f",
# "quoteCurrency":"0c3a106d-bde3-4c13-a26e-3fd2394529e5",
# "clientOrderId":"web-macos_chrome_1a6a6659-6f7c-4fac-be0b-d1d7ac06d",
# "price":"4000.00",
# "quantity":"0.01000",
# "cost":"40.00",
# "filled":"0.00000",
# "trader":"7244bb3a-b6b2-446a-ac78-fa4bce5b59a9",
# "creator":"USER",
# "creatorId":"",
# "timestamp":1635920767648
# }
# ]
#
return self.parse_orders(response, market, since, limit)
async def fetch_orders(self, symbol=None, since=None, limit=None, params={}):
await self.load_markets()
request = {
# 'currency': market['baseId'],
# 'quote': market['quoteId'],
# 'from': self.milliseconds(),
# 'limit': limit, # default '100'
}
method = 'privateGetAuthOrder'
market = None
if symbol is not None:
market = self.market(symbol)
request['currency'] = market['baseId']
request['quote'] = market['quoteId']
method = 'privateGetAuthOrderPairCurrencyQuote'
if limit is not None:
request['limit'] = limit # default 100
response = await getattr(self, method)(self.extend(request, params))
#
# [
# {
# "id":"a76bd262-3560-4bfb-98ac-1cedd394f4fc",
# "status":"ORDER_STATUS_PLACED",
# "side":"ORDER_SIDE_BUY",
# "condition":"ORDER_CONDITION_GOOD_TILL_CANCELLED",
# "type":"ORDER_TYPE_LIMIT",
# "baseCurrency":"620f2019-33c0-423b-8a9d-cde4d7f8ef7f",
# "quoteCurrency":"0c3a106d-bde3-4c13-a26e-3fd2394529e5",
# "clientOrderId":"web-macos_chrome_1a6a6659-6f7c-4fac-be0b-d1d7ac06d",
# "price":"4000.00",
# "quantity":"0.01000",
# "cost":"40.00",
# "filled":"0.00000",
# "trader":"7244bb3a-b6b2-446a-ac78-fa4bce5b59a9",
# "creator":"USER",
# "creatorId":"",
# "timestamp":1635920767648
# }
# ]
#
return self.parse_orders(response, market, since, limit)
async def fetch_order(self, id, symbol=None, params={}):
await self.load_markets()
request = {
'id': id,
}
response = await self.privateGetAuthOrderGetOrderId(self.extend(request, params))
#
# {
# "id":"a76bd262-3560-4bfb-98ac-1cedd394f4fc",
# "status":"ORDER_STATUS_PLACED",
# "side":"ORDER_SIDE_BUY",
# "condition":"ORDER_CONDITION_GOOD_TILL_CANCELLED",
# "type":"ORDER_TYPE_LIMIT",
# "baseCurrency":"620f2019-33c0-423b-8a9d-cde4d7f8ef7f",
# "quoteCurrency":"0c3a106d-bde3-4c13-a26e-3fd2394529e5",
# "clientOrderId":"web-macos_chrome_1a6a6659-6f7c-4fac-be0b-d1d7ac06d",
# "price":"4000.00",
# "quantity":"0.01",
# "cost":"40.000000000000000000",
# "filled":"0",
# "trader":"7244bb3a-b6b2-446a-ac78-fa4bce5b59a9",
# "creator":"ORDER_CREATOR_USER",
# "creatorId":"",
# "timestamp":1635920767648
# }
#
return self.parse_order(response)
async def create_order(self, symbol, type, side, amount, price=None, params={}):
await self.load_markets()
market = self.market(symbol)
uppercaseType = type.upper()
request = {
'baseCurrency': market['baseId'],
'quoteCurrency': market['quoteId'],
'side': side.upper(), # "BUY", "BID", "SELL", "ASK"
'condition': 'GTC', # "GTC", "GOOD_TILL_CANCELLED", "IOC", "IMMEDIATE_OR_CANCEL", "FOK", "FILL_OR_KILL"
'type': uppercaseType, # "LIMIT", "MARKET"
'clientOrderId': self.uuid(), # 50 characters max
# 'price': self.price_to_precision(symbol, price),
# 'quantity': self.amount_to_precision(symbol, amount),
}
if uppercaseType == 'LIMIT':
request['price'] = self.price_to_precision(symbol, price)
request['quantity'] = self.amount_to_precision(symbol, amount)
request['timestamp'] = self.seconds()
response = await self.privatePostAuthOrderPlace(self.extend(request, params))
#
# {
# "orderId":"1563460093.134037.704945@0370:2",
# "cliOrdId":"",
# "pairId":370,
# "symbol":"ETHBTC",
# "side":"sell",
# "orderType":"limit",
# "price":1.0,
# "amount":1.0
# }
#
return self.parse_order(response, market)
async def cancel_order(self, id, symbol=None, params={}):
await self.load_markets()
request = {
'id': id,
}
response = await self.privatePostAuthOrderCancel(self.extend(request, params))
#
# {
# "id": "12345678-1234-1244-1244-123456789012",
# "message": "cancellation request successfully submitted",
# "status": "SUCCESS",
# "error": "",
# "errors": {}
# }
#
return self.parse_order(response)
async def cancel_all_orders(self, symbol=None, params={}):
await self.load_markets()
request = {
# 'currency': market['baseId'],
# 'quote': market['quoteId'],
}
method = 'privatePostAuthOrderCancelAll'
market = None
if symbol is not None:
market = self.market(symbol)
request['currency'] = market['baseId']
request['quote'] = market['quoteId']
method = 'privatePostAuthOrderCancelAllCurrencyQuote'
response = await getattr(self, method)(self.extend(request, params))
#
# {
# "message":"cancellation request successfully submitted",
# "status":"SUCCESS"
# }
#
return response
async def fetch_transactions(self, code=None, since=None, limit=None, params={}):
await self.load_markets()
request = {
# 'page': '1',
# 'size': 100,
}
response = await self.privateGetAuthTransaction(self.extend(request, params))
#
# {
# "hasNext":false,
# "content":[
# {
# "id":"fbf7d0d1-2629-4ad8-9def-7a1dba423362",
# "status":"TRANSACTION_STATUS_CONFIRMED",
# "type":"TRANSACTION_TYPE_DEPOSIT",
# "senderAddress":"",
# "recipientAddress":"0x3c46fa2e3f9023bc4897828ed173f8ecb3a554bc",
# "amount":"200.000000000000000000",
# "transactionFee":"0.000000000000000000",
# "timestamp":1635893208404,
# "transactionHash":"0x28bad3b74a042df13d64ddfbca855566a51bf7f190b8cd565c236a18d5cd493f#42",
# "blockHeight":13540262,
# "currency":"0c3a106d-bde3-4c13-a26e-3fd2394529e5",
# "memo":null,
# "paymentProvider":"a8d6d1cb-f84a-4e9d-aa82-c6a08b356ee1",
# "requiresCode":false
# }
# ],
# "first":true,
# "hasContent":true,
# "pageSize":10
# }
#
currency = None
if code is not None:
currency = self.currency(code)
content = self.safe_value(response, 'content', [])
return self.parse_transactions(content, currency, since, limit)
def parse_transaction(self, transaction, currency=None):
#
# {
# "id":"fbf7d0d1-2629-4ad8-9def-7a1dba423362",
# "status":"TRANSACTION_STATUS_CONFIRMED",
# "type":"TRANSACTION_TYPE_DEPOSIT",
# "senderAddress":"",
# "recipientAddress":"0x3c46fa2e3f9023bc4897828ed173f8ecb3a554bc",
# "amount":"200.000000000000000000",
# "transactionFee":"0.000000000000000000",
# "timestamp":1635893208404,
# "transactionHash":"0x28bad3b74a042df13d64ddfbca855566a51bf7f190b8cd565c236a18d5cd493f#42",
# "blockHeight":13540262,
# "currency":"0c3a106d-bde3-4c13-a26e-3fd2394529e5",
# "memo":null,
# "paymentProvider":"a8d6d1cb-f84a-4e9d-aa82-c6a08b356ee1",
# "requiresCode":false
# }
#
id = self.safe_string(transaction, 'id')
timestamp = self.safe_integer(transaction, 'timestamp')
currencyId = self.safe_string(transaction, 'currency')
code = self.safe_currency_code(currencyId, currency)
status = self.parse_transaction_status(self.safe_string(transaction, 'status'))
amount = self.safe_number(transaction, 'amount')
addressFrom = self.safe_string(transaction, 'senderAddress')
if addressFrom == '':
addressFrom = None
addressTo = self.safe_string(transaction, 'recipientAddress')
if addressTo == '':
addressTo = None
txid = self.safe_string(transaction, 'transactionHash')
tagTo = self.safe_string(transaction, 'memo')
fee = None
feeCost = self.safe_number(transaction, 'transactionFee')
if feeCost is not None:
fee = {
'cost': feeCost,
'currency': code,
}
type = self.parse_transaction_type(self.safe_string(transaction, 'type'))
return {
'info': transaction,
'id': id,
'txid': txid,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'addressFrom': addressFrom,
'addressTo': addressTo,
'address': addressTo,
'tagFrom': None,
'tagTo': tagTo,
'tag': tagTo,
'type': type,
'amount': amount,
'currency': code,
'status': status,
'updated': None,
'fee': fee,
}
def parse_transaction_status(self, status):
statuses = {
'TRANSACTION_STATUS_CONFIRMED': 'ok',
'TRANSACTION_STATUS_EXECUTED': 'ok',
}
return self.safe_string(statuses, status, status)
def parse_transaction_type(self, type):
types = {
'TRANSACTION_TYPE_DEPOSIT': 'deposit',
'TRANSACTION_TYPE_WITHDRAWAL': 'withdrawal',
}
return self.safe_string(types, type, type)
def sign(self, path, api='public', method='GET', params=None, headers=None, body=None):
request = '/' + self.version + '/' + self.implode_params(path, params)
requestString = request
query = self.omit(params, self.extract_params(path))
urlencodedQuery = self.urlencode(query)
if method == 'GET':
if query:
requestString += '?' + urlencodedQuery
if api == 'private':
self.check_required_credentials()
auth = method + request + urlencodedQuery
signature = self.hmac(self.encode(auth), self.encode(self.secret), hashlib.sha512)
headers = {
'X-LA-APIKEY': self.apiKey,
'X-LA-SIGNATURE': signature,
'X-LA-DIGEST': 'HMAC-SHA512', # HMAC-SHA384, HMAC-SHA512, optional
}
if method == 'POST':
headers['Content-Type'] = 'application/json'
body = self.json(query)
url = self.urls['api'] + requestString
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def handle_errors(self, code, reason, url, method, headers, body, response, requestHeaders, requestBody):
if not response:
return
#
# {"result":false,"message":"invalid API key, signature or digest","error":"BAD_REQUEST","status":"FAILURE"}
# {"result":false,"message":"request expired or bad <timeAlive>/<timestamp> format","error":"BAD_REQUEST","status":"FAILURE"}
# {"message":"Internal Server Error","error":"INTERNAL_ERROR","status":"FAILURE"}
# {"result":false,"message":"Internal error","error":"For input string: \"NaN\"","status":"FAILURE"}
#
message = self.safe_string(response, 'message')
feedback = self.id + ' ' + body
if message is not None:
self.throw_exactly_matched_exception(self.exceptions['exact'], message, feedback)
self.throw_broadly_matched_exception(self.exceptions['broad'], message, feedback)
error = self.safe_string(response, 'error')
errorMessage = self.safe_string(error, 'message')
if (error is not None) or (errorMessage is not None):
self.throw_exactly_matched_exception(self.exceptions['exact'], error, feedback)
self.throw_broadly_matched_exception(self.exceptions['broad'], body, feedback)
raise ExchangeError(feedback) # unknown message
|
py | 1a316f6f5495e132c1fabdedcec98f6925d4224c | import sys
import time
if sys.version_info < (3, 6, 5):
sys.exit('RoboMaster Sdk requires Python 3.6.5 or later')
import logging
logger_name = "multi_robot"
logger = logging.getLogger(logger_name)
logger.setLevel(logging.ERROR)
fmt = "%(asctime)-15s %(levelname)s %(filename)s:%(lineno)d %(message)s"
formatter = logging.Formatter(fmt)
sh = logging.StreamHandler()
sh.setFormatter(formatter)
logger.addHandler(sh)
def enable_logging_to_file():
logger.setLevel(logging.INFO)
filename = "RoboMasterSDK_MultiRobot_{0}_log.txt".format(time.strftime("%Y%m%d%H%M%S", time.localtime()))
fh = logging.FileHandler(filename)
fh.setFormatter(formatter)
logger.addHandler(fh)
__all__ = ['multi_robot', 'multi_group', 'multi_module', 'tool']
|
py | 1a316fb32f908168b67d242a53a85ab09fda67be | import logging
from time import time
from threading import Timer
from contextlib import contextmanager
import progressbar
import numpy as np
from pybar.analysis.analyze_raw_data import AnalyzeRawData
from pybar.fei4.register_utils import invert_pixel_mask, make_box_pixel_mask_from_col_row
from pybar.fei4_run_base import Fei4RunBase
from pybar.run_manager import RunManager
class ExtTriggerScan(Fei4RunBase):
'''External trigger scan with FE-I4
For use with external scintillator (user RX0), TLU (use RJ45), FE-I4 HitOR (USBpix self-trigger).
Note:
Set up trigger in DUT configuration file (e.g. dut_configuration_mio.yaml).
'''
_default_run_conf = {
"broadcast_commands": True,
"threaded_scan": True,
"trig_count": 0, # FE-I4 trigger count, number of consecutive BCs, 0 means 16, from 0 to 15
"trigger_latency": 232, # FE-I4 trigger latency, in BCs, external scintillator / TLU / HitOR: 232, USBpix self-trigger: 220
"trigger_delay": 8, # trigger delay, in BCs
"trigger_rate_limit": 500, # artificially limiting the trigger rate, in BCs (25ns)
"col_span": [1, 80], # defining active column interval, 2-tuple, from 1 to 80
"row_span": [1, 336], # defining active row interval, 2-tuple, from 1 to 336
"overwrite_enable_mask": False, # if True, use col_span and row_span to define an active region regardless of the Enable pixel register. If False, use col_span and row_span to define active region by also taking Enable pixel register into account.
"use_enable_mask_for_imon": True, # if True, apply inverted Enable pixel mask to Imon pixel mask
"no_data_timeout": 10, # no data timeout after which the scan will be aborted, in seconds
"scan_timeout": 60, # timeout for scan after which the scan will be stopped, in seconds
"max_triggers": 10000, # maximum triggers after which the scan will be stopped, if 0, no maximum triggers are set
"enable_tdc": False, # if True, enables TDC
"reset_rx_on_error": False # long scans have a high propability for ESD related data transmission errors; recover and continue here
}
def configure(self):
commands = []
commands.extend(self.register.get_commands("ConfMode"))
# Enable
enable_pixel_mask = make_box_pixel_mask_from_col_row(column=self.col_span, row=self.row_span)
if not self.overwrite_enable_mask:
enable_pixel_mask = np.logical_and(enable_pixel_mask, self.register.get_pixel_register_value('Enable'))
self.register.set_pixel_register_value('Enable', enable_pixel_mask)
commands.extend(self.register.get_commands("WrFrontEnd", same_mask_for_all_dc=False, name='Enable'))
# Imon
if self.use_enable_mask_for_imon:
imon_pixel_mask = invert_pixel_mask(enable_pixel_mask)
else:
imon_pixel_mask = make_box_pixel_mask_from_col_row(column=self.col_span, row=self.row_span, default=1, value=0) # 0 for selected columns, else 1
imon_pixel_mask = np.logical_or(imon_pixel_mask, self.register.get_pixel_register_value('Imon'))
self.register.set_pixel_register_value('Imon', imon_pixel_mask)
commands.extend(self.register.get_commands("WrFrontEnd", same_mask_for_all_dc=False, name='Imon'))
# C_High
self.register.set_pixel_register_value('C_High', 0)
commands.extend(self.register.get_commands("WrFrontEnd", same_mask_for_all_dc=True, name='C_High'))
# C_Low
self.register.set_pixel_register_value('C_Low', 0)
commands.extend(self.register.get_commands("WrFrontEnd", same_mask_for_all_dc=True, name='C_Low'))
# Registers
self.register.set_global_register_value("Trig_Lat", self.trigger_latency) # set trigger latency
self.register.set_global_register_value("Trig_Count", self.trig_count) # set number of consecutive triggers
commands.extend(self.register.get_commands("WrRegister", name=["Trig_Lat", "Trig_Count"]))
commands.extend(self.register.get_commands("RunMode"))
self.register_utils.send_commands(commands)
def scan(self):
# preload command
lvl1_command = self.register.get_commands("zeros", length=self.trigger_delay)[0] + self.register.get_commands("LV1")[0] + self.register.get_commands("zeros", length=self.trigger_rate_limit)[0]
self.register_utils.set_command(lvl1_command)
with self.readout(no_data_timeout=self.no_data_timeout, **self.scan_parameters._asdict()):
with self.trigger():
got_data = False
start = time()
while not self.stop_run.wait(1.0):
if not got_data:
if self.data_words_per_second() > 0:
got_data = True
logging.info('Taking data...')
if self.max_triggers:
self.progressbar = progressbar.ProgressBar(widgets=['', progressbar.Percentage(), ' ', progressbar.Bar(marker='*', left='|', right='|'), ' ', progressbar.AdaptiveETA()], maxval=self.max_triggers, poll=10, term_width=80).start()
else:
self.progressbar = progressbar.ProgressBar(widgets=['', progressbar.Percentage(), ' ', progressbar.Bar(marker='*', left='|', right='|'), ' ', progressbar.Timer()], maxval=self.scan_timeout, poll=10, term_width=80).start()
else:
triggers = self.dut['TLU']['TRIGGER_COUNTER']
try:
if self.max_triggers:
self.progressbar.update(triggers)
else:
self.progressbar.update(time() - start)
except ValueError:
pass
if self.max_triggers and triggers >= self.max_triggers:
self.progressbar.finish()
self.stop(msg='Trigger limit was reached: %i' % self.max_triggers)
logging.info('Total amount of triggers collected: %d', self.dut['TLU']['TRIGGER_COUNTER'])
def analyze(self):
with AnalyzeRawData(raw_data_file=self.output_filename, create_pdf=True) as analyze_raw_data:
analyze_raw_data.trigger_data_format = self.dut['TLU']['DATA_FORMAT']
analyze_raw_data.create_source_scan_hist = True
analyze_raw_data.create_cluster_size_hist = True
analyze_raw_data.create_cluster_tot_hist = True
analyze_raw_data.align_at_trigger = True
if self.enable_tdc:
analyze_raw_data.create_tdc_counter_hist = True # histogram all TDC words
analyze_raw_data.create_tdc_hist = True # histogram the hit TDC information
analyze_raw_data.align_at_tdc = False # align events at the TDC word
analyze_raw_data.interpreter.set_warning_output(False)
analyze_raw_data.interpret_word_table()
analyze_raw_data.interpreter.print_summary()
analyze_raw_data.plot_histograms()
@contextmanager
def trigger(self):
self.start_trigger()
try:
yield
finally:
try:
self.stop_trigger()
except Exception:
# in case something fails, call this on last resort
self.scan_timeout_timer.cancel()
self.connect_cancel(["abort"])
def start_trigger(self, *args, **kwargs):
self.connect_cancel(["stop"])
self.dut['TDC']['ENABLE'] = self.enable_tdc
self.dut['TLU']['TRIGGER_COUNTER'] = 0
if self.max_triggers:
self.dut['TLU']['MAX_TRIGGERS'] = self.max_triggers
else:
self.dut['TLU']['MAX_TRIGGERS'] = 0 # infinity triggers
self.dut['TX']['EN_EXT_TRIGGER'] = True
with self.synchronized():
self.dut['TLU']['TRIGGER_ENABLE'] = True
def timeout():
try:
self.progressbar.finish()
except AttributeError:
pass
self.stop(msg='Scan timeout was reached')
self.scan_timeout_timer = Timer(self.scan_timeout, timeout)
if self.scan_timeout:
self.scan_timeout_timer.start()
def stop_trigger(self):
self.scan_timeout_timer.cancel()
with self.synchronized():
self.dut['TLU']['TRIGGER_ENABLE'] = False
self.dut['TX']['EN_EXT_TRIGGER'] = False
self.dut['TDC']['ENABLE'] = False
self.connect_cancel(["abort"])
if __name__ == "__main__":
with RunManager('configuration.yaml') as runmngr:
runmngr.run_run(ExtTriggerScan)
|
py | 1a317043616a18d4b1e966c9737b11c570a001d2 | # --------------------------------------------------------
# Deformable Convolutional Networks
# Copyright (c) 2017 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Modified by Zheng Zhang
# --------------------------------------------------------
# Based on:
# MX-RCNN
# Copyright (c) 2016 by Contributors
# Licence under The Apache 2.0 License
# https://github.com/ijkguo/mx-rcnn/
# --------------------------------------------------------
import pprint
from config.config import config
from core.loader import TestDataLoader
from core.tester import Predictor, pred_eval
from dataset import *
from symbols import *
from utils.load_model import load_param
def test_deeplab(network, dataset, image_set, root_path, dataset_path,
ctx, prefix, epoch,
vis, logger=None, output_path=None):
if not logger:
assert False, 'require a logger'
# print config
pprint.pprint(config)
logger.info('testing config:{}\n'.format(pprint.pformat(config)))
# load symbol and testing data
sym = eval('get_' + network + '_test')(num_classes=config.dataset.NUM_CLASSES)
imdb = eval(dataset)(image_set, root_path, dataset_path, result_path=output_path)
segdb = imdb.gt_segdb()
# get test data iter
test_data = TestDataLoader(segdb, batch_size=len(ctx))
# load model
# arg_params, aux_params = load_param(prefix, epoch, convert=True, ctx=ctx, process=True)
arg_params, aux_params = load_param(prefix, epoch, process=True)
# infer shape
data_shape_dict = dict(test_data.provide_data_single)
arg_shape, _, aux_shape = sym.infer_shape(**data_shape_dict)
arg_shape_dict = dict(zip(sym.list_arguments(), arg_shape))
aux_shape_dict = dict(zip(sym.list_auxiliary_states(), aux_shape))
# check parameters
for k in sym.list_arguments():
if k in data_shape_dict or k in ['softmax_label']:
continue
assert k in arg_params, k + ' not initialized'
assert arg_params[k].shape == arg_shape_dict[k], \
'shape inconsistent for ' + k + ' inferred ' + str(arg_shape_dict[k]) + ' provided ' + str(
arg_params[k].shape)
for k in sym.list_auxiliary_states():
assert k in aux_params, k + ' not initialized'
assert aux_params[k].shape == aux_shape_dict[k], \
'shape inconsistent for ' + k + ' inferred ' + str(aux_shape_dict[k]) + ' provided ' + str(
aux_params[k].shape)
# decide maximum shape
data_names = [k[0] for k in test_data.provide_data_single]
label_names = ['softmax_label']
max_data_shape = [[('data', (1, 3, max([v[0] for v in config.SCALES]), max([v[1] for v in config.SCALES])))]]
# create predictor
predictor = Predictor(sym, data_names, label_names,
context=ctx, max_data_shapes=max_data_shape,
provide_data=test_data.provide_data, provide_label=test_data.provide_label,
arg_params=arg_params, aux_params=aux_params)
# start detection
pred_eval(predictor, test_data, imdb, vis=vis, logger=logger)
|
py | 1a3171c1f642e8f0b82bb16668a9b10605e30288 | #!/usr/bin/env python3
# Copyright (c) 2018-2019 The Bitcoin Core developers
# Copyright (c) 2017 The Raven Core developers
# Copyright (c) 2018 The Rito Core developers
# Copyright (c) 2020 The KringleProjectCoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Testing asset use cases
"""
from test_framework.test_framework import KringleProjectCoinTestFramework
from test_framework.util import *
import string
class AssetTest(KringleProjectCoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 3
self.extra_args = [['-assetindex'], ['-assetindex'], ['-assetindex']]
def activate_assets(self):
self.log.info("Generating KPC for node[0] and activating assets...")
n0, n1, n2 = self.nodes[0], self.nodes[1], self.nodes[2]
n0.generate(1)
self.sync_all()
n0.generate(431)
self.sync_all()
assert_equal("active", n0.getblockchaininfo()['bip9_softforks']['assets']['status'])
def big_test(self):
self.log.info("Running big test!")
n0, n1, n2 = self.nodes[0], self.nodes[1], self.nodes[2]
self.log.info("Calling issue()...")
address0 = n0.getnewaddress()
ipfs_hash = "QmcvyefkqQX3PpjpY5L8B2yMd47XrVwAipr6cxUt2zvYU8"
n0.issue(asset_name="MY_ASSET", qty=1000, to_address=address0, change_address="", \
units=4, reissuable=True, has_ipfs=True, ipfs_hash=ipfs_hash)
self.log.info("Waiting for ten confirmations after issue...")
n0.generate(10)
self.sync_all()
self.log.info("Checkout getassetdata()...")
assetdata = n0.getassetdata("MY_ASSET")
assert_equal(assetdata["name"], "MY_ASSET")
assert_equal(assetdata["amount"], 1000)
assert_equal(assetdata["units"], 4)
assert_equal(assetdata["reissuable"], 1)
assert_equal(assetdata["has_ipfs"], 1)
assert_equal(assetdata["ipfs_hash"], ipfs_hash)
self.log.info("Checking listmyassets()...")
myassets = n0.listmyassets(asset="MY_ASSET*", verbose=True)
assert_equal(len(myassets), 2)
asset_names = list(myassets.keys())
assert_equal(asset_names.count("MY_ASSET"), 1)
assert_equal(asset_names.count("MY_ASSET!"), 1)
assert_equal(myassets["MY_ASSET"]["balance"], 1000)
assert_equal(myassets["MY_ASSET!"]["balance"], 1)
assert_equal(len(myassets["MY_ASSET"]["outpoints"]), 1)
assert_equal(len(myassets["MY_ASSET!"]["outpoints"]), 1)
assert_is_hash_string(myassets["MY_ASSET"]["outpoints"][0]["txid"])
assert_equal(myassets["MY_ASSET"]["outpoints"][0]["txid"], \
myassets["MY_ASSET!"]["outpoints"][0]["txid"])
assert(int(myassets["MY_ASSET"]["outpoints"][0]["vout"]) >= 0)
assert(int(myassets["MY_ASSET!"]["outpoints"][0]["vout"]) >= 0)
assert_equal(myassets["MY_ASSET"]["outpoints"][0]["amount"], 1000)
assert_equal(myassets["MY_ASSET!"]["outpoints"][0]["amount"], 1)
self.log.info("Checking listassetbalancesbyaddress()...")
assert_equal(n0.listassetbalancesbyaddress(address0)["MY_ASSET"], 1000)
assert_equal(n0.listassetbalancesbyaddress(address0)["MY_ASSET!"], 1)
self.log.info("Checking listassetbalancesbyaddress()...")
assert_equal(n0.listaddressesbyasset("MY_ASSET"), n1.listaddressesbyasset("MY_ASSET"))
self.log.info("Calling transfer()...")
address1 = n1.getnewaddress()
n0.transfer(asset_name="MY_ASSET", qty=200, to_address=address1)
self.log.info("Waiting for ten confirmations after transfer...")
n0.generate(10)
self.sync_all()
self.log.info("Checking listmyassets()...")
myassets = n1.listmyassets(asset="MY_ASSET*", verbose=True)
assert_equal(len(myassets), 1)
asset_names = list(myassets.keys())
assert_equal(asset_names.count("MY_ASSET"), 1)
assert_equal(asset_names.count("MY_ASSET!"), 0)
assert_equal(myassets["MY_ASSET"]["balance"], 200)
assert_equal(len(myassets["MY_ASSET"]["outpoints"]), 1)
assert_is_hash_string(myassets["MY_ASSET"]["outpoints"][0]["txid"])
assert(int(myassets["MY_ASSET"]["outpoints"][0]["vout"]) >= 0)
assert_equal(n0.listmyassets(asset="MY_ASSET")["MY_ASSET"], 800)
self.log.info("Checking listassetbalancesbyaddress()...")
assert_equal(n1.listassetbalancesbyaddress(address1)["MY_ASSET"], 200)
changeaddress = None
assert_equal(n0.listaddressesbyasset("MY_ASSET"), n1.listaddressesbyasset("MY_ASSET"))
assert_equal(sum(n0.listaddressesbyasset("MY_ASSET").values()), 1000)
assert_equal(sum(n1.listaddressesbyasset("MY_ASSET").values()), 1000)
for assaddr in n0.listaddressesbyasset("MY_ASSET").keys():
if n0.validateaddress(assaddr)["ismine"] == True:
changeaddress = assaddr
assert_equal(n0.listassetbalancesbyaddress(changeaddress)["MY_ASSET"], 800)
assert(changeaddress != None)
assert_equal(n0.listassetbalancesbyaddress(address0)["MY_ASSET!"], 1)
self.log.info("Burning all units to test reissue on zero units...")
n0.transfer(asset_name="MY_ASSET", qty=800, to_address="n1BurnXXXXXXXXXXXXXXXXXXXXXXU1qejP")
n0.generate(1)
assert_does_not_contain_key("MY_ASSET", n0.listmyassets(asset="MY_ASSET", verbose=True))
self.log.info("Calling reissue()...")
address1 = n0.getnewaddress()
ipfs_hash2 = "QmcvyefkqQX3PpjpY5L8B2yMd47XrVwAipr6cxUt2zvYU8"
n0.reissue(asset_name="MY_ASSET", qty=2000, to_address=address0, change_address=address1, \
reissuable=False, new_unit=-1, new_ipfs=ipfs_hash2)
self.log.info("Waiting for ten confirmations after reissue...")
self.sync_all()
n0.generate(10)
self.sync_all()
self.log.info("Checkout getassetdata()...")
assetdata = n0.getassetdata("MY_ASSET")
assert_equal(assetdata["name"], "MY_ASSET")
assert_equal(assetdata["amount"], 3000)
assert_equal(assetdata["units"], 4)
assert_equal(assetdata["reissuable"], 0)
assert_equal(assetdata["has_ipfs"], 1)
assert_equal(assetdata["ipfs_hash"], ipfs_hash2)
self.log.info("Checking listassetbalancesbyaddress()...")
assert_equal(n0.listassetbalancesbyaddress(address0)["MY_ASSET"], 2000)
self.log.info("Checking listassets()...")
n0.issue("KPC1", 1000)
n0.issue("KPC2", 1000)
n0.issue("KPC3", 1000)
n0.generate(1)
self.sync_all()
n0.listassets(asset="KPC*", verbose=False, count=2, start=-2)
self.log.info("Creating some sub-assets...")
n0.issue(asset_name="MY_ASSET/SUB1", qty=1000, to_address=address0, change_address=address0,\
units=4, reissuable=True, has_ipfs=True, ipfs_hash=ipfs_hash)
self.sync_all()
self.log.info("Waiting for ten confirmations after issuesubasset...")
n0.generate(10)
self.sync_all()
self.log.info("Checkout getassetdata()...")
assetdata = n0.getassetdata("MY_ASSET/SUB1")
assert_equal(assetdata["name"], "MY_ASSET/SUB1")
assert_equal(assetdata["amount"], 1000)
assert_equal(assetdata["units"], 4)
assert_equal(assetdata["reissuable"], 1)
assert_equal(assetdata["has_ipfs"], 1)
assert_equal(assetdata["ipfs_hash"], ipfs_hash)
kringleprojectcoin_assets = n0.listassets(asset="KPC*", verbose=False, count=2, start=-2)
assert_equal(len(kringleprojectcoin_assets), 2)
assert_equal(kringleprojectcoin_assets[0], "KPC2")
assert_equal(kringleprojectcoin_assets[1], "KPC3")
self.sync_all()
def issue_param_checks(self):
self.log.info("Checking bad parameter handling!")
n0, n1, n2 = self.nodes[0], self.nodes[1], self.nodes[2]
# just plain bad asset name
assert_raises_rpc_error(-8, "Invalid asset name: bad-asset-name", \
n0.issue, "bad-asset-name");
# trying to issue things that can't be issued
assert_raises_rpc_error(-8, "Unsupported asset type: OWNER", \
n0.issue, "AN_OWNER!");
assert_raises_rpc_error(-8, "Unsupported asset type: MSGCHANNEL", \
n0.issue, "A_MSGCHANNEL~CHANNEL_4");
assert_raises_rpc_error(-8, "Unsupported asset type: VOTE", \
n0.issue, "A_VOTE^PEDRO");
# check bad unique params
assert_raises_rpc_error(-8, "Invalid parameters for issuing a unique asset.", \
n0.issue, "A_UNIQUE#ASSET", 2)
assert_raises_rpc_error(-8, "Invalid parameters for issuing a unique asset.", \
n0.issue, "A_UNIQUE#ASSET", 1, "", "", 1)
assert_raises_rpc_error(-8, "Invalid parameters for issuing a unique asset.", \
n0.issue, "A_UNIQUE#ASSET", 1, "", "", 0, True)
def chain_assets(self):
self.log.info("Issuing chained assets in depth issue()...")
n0, n1, n2 = self.nodes[0], self.nodes[1], self.nodes[2]
chain_address = n0.getnewaddress()
ipfs_hash = "QmacSRmrkVmvJfbCpmU6pK72furJ8E8fbKHindrLxmYMQo"
chain_string = "CHAIN1"
n0.issue(asset_name=chain_string, qty=1000, to_address=chain_address, change_address="", \
units=4, reissuable=True, has_ipfs=True, ipfs_hash=ipfs_hash)
for c in string.ascii_uppercase:
chain_string += '/' + c
if len(chain_string) > 30:
break
n0.issue(asset_name=chain_string, qty=1000, to_address=chain_address, change_address="", \
units=4, reissuable=True, has_ipfs=True, ipfs_hash=ipfs_hash)
n0.generate(1)
self.sync_all()
chain_assets = n1.listassets(asset="CHAIN1*", verbose=False)
assert_equal(len(chain_assets), 13)
self.log.info("Issuing chained assets in width issue()...")
chain_address = n0.getnewaddress()
chain_string = "CHAIN2"
n0.issue(asset_name=chain_string, qty=1000, to_address=chain_address, change_address="", \
units=4, reissuable=True, has_ipfs=True, ipfs_hash=ipfs_hash)
for c in string.ascii_uppercase:
asset_name = chain_string + '/' + c
n0.issue(asset_name=asset_name, qty=1000, to_address=chain_address, change_address="", \
units=4, reissuable=True, has_ipfs=True, ipfs_hash=ipfs_hash)
n0.generate(1)
self.sync_all()
chain_assets = n1.listassets(asset="CHAIN2/*", verbose=False)
assert_equal(len(chain_assets), 26)
self.log.info("Chaining reissue transactions...")
address0 = n0.getnewaddress()
n0.issue(asset_name="CHAIN_REISSUE", qty=1000, to_address=address0, change_address="", \
units=4, reissuable=True, has_ipfs=False)
n0.generate(1)
self.sync_all()
n0.reissue(asset_name="CHAIN_REISSUE", qty=1000, to_address=address0, change_address="", \
reissuable=True)
assert_raises_rpc_error(-4, "Error: The transaction was rejected! Reason given: bad-tx-reissue-chaining-not-allowed", n0.reissue, "CHAIN_REISSUE", 1000, address0, "", True)
n0.generate(1)
self.sync_all()
n0.reissue(asset_name="CHAIN_REISSUE", qty=1000, to_address=address0, change_address="", \
reissuable=True)
n0.generate(1)
self.sync_all()
assetdata = n0.getassetdata("CHAIN_REISSUE")
assert_equal(assetdata["name"], "CHAIN_REISSUE")
assert_equal(assetdata["amount"], 3000)
assert_equal(assetdata["units"], 4)
assert_equal(assetdata["reissuable"], 1)
assert_equal(assetdata["has_ipfs"], 0)
def ipfs_state(self):
self.log.info("Checking ipfs hash state changes...")
n0 = self.nodes[0]
asset_name1 = "ASSET111"
asset_name2 = "ASSET222"
address1 = n0.getnewaddress()
address2 = n0.getnewaddress()
ipfs_hash = "QmcvyefkqQX3PpjpY5L8B2yMd47XrVwAipr6cxUt2zvYU8"
bad_hash = "RncvyefkqQX3PpjpY5L8B2yMd47XrVwAipr6cxUt2zvYU8"
########################################
# bad hash (isn't a valid multihash sha2-256)
try:
n0.issue(asset_name=asset_name1, qty=1000, to_address=address1, change_address=address2, \
units=0, reissuable=True, has_ipfs=True, ipfs_hash=bad_hash)
except JSONRPCException as e:
if "Invalid IPFS hash (doesn't start with 'Qm')" not in e.error['message']:
raise AssertionError("Expected substring not found:" + e.error['message'])
except Exception as e:
raise AssertionError("Unexpected exception raised: " + type(e).__name__)
else:
raise AssertionError("No exception raised")
########################################
# no hash
n0.issue(asset_name=asset_name2, qty=1000, to_address=address1, change_address=address2, \
units=0, reissuable=True, has_ipfs=False)
n0.generate(1)
ad = n0.getassetdata(asset_name2)
assert_equal(0, ad['has_ipfs'])
assert_does_not_contain_key('ipfs_hash', ad)
########################################
# reissue w/ bad hash
try:
n0.reissue(asset_name=asset_name2, qty=2000, to_address=address1, change_address=address2, \
reissuable=True, new_unit=-1, new_ipfs=bad_hash)
except JSONRPCException as e:
if "Invalid IPFS hash (doesn't start with 'Qm')" not in e.error['message']:
raise AssertionError("Expected substring not found:" + e.error['message'])
except Exception as e:
raise AssertionError("Unexpected exception raised: " + type(e).__name__)
else:
raise AssertionError("No exception raised")
########################################
# reissue w/ hash
n0.reissue(asset_name=asset_name2, qty=2000, to_address=address1, change_address=address2, \
reissuable=True, new_unit=-1, new_ipfs=ipfs_hash)
n0.generate(1)
ad = n0.getassetdata(asset_name2)
assert_equal(1, ad['has_ipfs'])
assert_equal(ipfs_hash, ad['ipfs_hash'])
########################################
# invalidate and reconsider
best = n0.getbestblockhash()
n0.invalidateblock(n0.getbestblockhash())
ad = n0.getassetdata(asset_name2)
assert_equal(0, ad['has_ipfs'])
assert_does_not_contain_key('ipfs_hash', ad)
n0.reconsiderblock(best)
ad = n0.getassetdata(asset_name2)
assert_equal(1, ad['has_ipfs'])
assert_equal(ipfs_hash, ad['ipfs_hash'])
def db_corruption_regression(self):
self.log.info("Checking db corruption invalidate block...")
n0 = self.nodes[0]
asset_name = "DATA_CORRUPT"
# Test to make sure that undoing a reissue and an issue during a reorg doesn't screw up the database/cache
n0.issue(asset_name)
a = n0.generate(1)[0]
n0.reissue(asset_name, 500, n0.getnewaddress())
b = n0.generate(1)[0]
self.log.info(f"Invalidating {a}...")
n0.invalidateblock(a)
assert_equal(0, len(n0.listassets(asset_name, True)))
def reissue_prec_change(self):
self.log.info("Testing precision change on reissue...")
n0 = self.nodes[0]
asset_name = "PREC_CHANGES"
address = n0.getnewaddress()
n0.issue(asset_name, 10, "", "", 0, True, False)
n0.generate(1)
assert_equal(0, n0.listassets("*", True)[asset_name]["units"])
for i in range(0, 8):
n0.reissue(asset_name, 10.0**(-i), address, "", True, i+1)
n0.generate(1)
assert_equal(i+1, n0.listassets("*", True)[asset_name]["units"])
assert_raises_rpc_error(-25, "Error: Unable to reissue asset: unit must be larger than current unit selection", \
n0.reissue, asset_name, 10.0**(-i), address, "", True, i)
n0.reissue(asset_name, 0.00000001, address)
n0.generate(1)
assert_equal(Decimal('11.11111111'), n0.listassets("*", True)[asset_name]["amount"])
def run_test(self):
self.activate_assets()
self.big_test()
self.issue_param_checks()
self.chain_assets()
self.ipfs_state()
self.db_corruption_regression()
self.reissue_prec_change()
if __name__ == '__main__':
AssetTest().main()
|
py | 1a31728f770cab4e8e1f6e48fa23a25e5e2d156c | import sys
import colorsys
import pygame.gfxdraw
try:
import pygame
except ImportError:
print("To simulate a unicorn HAT on your computer, please pip install pygame")
class UnicornHatSim(object):
def __init__(self, width, height, rotation_offset = 0):
# Compat with old library
self.AUTO = None
self.PHAT = None
# Set some defaults
self.rotation_offset = rotation_offset
self.rotation(0)
self.pixels = [(0, 0, 0)] * width * height
self.pixel_size = 15
self.width = width
self.height = height
self.window_width = width * self.pixel_size
self.window_height = height * self.pixel_size
# Init pygame and off we go
pygame.init()
pygame.display.set_caption("Unicorn HAT simulator")
self.screen = pygame.display.set_mode([self.window_width, self.window_height])
self.clear()
def set_pixel(self, x, y, r, g, b):
i = (x * self.width) + y
self.pixels[i] = [int(r), int(g), int(b)]
def draw(self):
for event in pygame.event.get(): # User did something
if event.type == pygame.QUIT:
print("Exiting...")
sys.exit()
for x in range(self.width):
for y in range(self.height):
self.draw_led(x, y)
def show(self):
self.clear()
self.draw()
pygame.display.flip()
def draw_led(self, x, y):
self.draw_gfxcircle(x,y)
def draw_gfxcircle(self, x, y):
p = self.pixel_size
w_x = int(x * p + self.pixel_size / 2)
w_y = int((self.height - 1 - y) * p + self.pixel_size / 2)
r = int(self.pixel_size / 4)
color = self.pixels[self.index(x, y)]
pygame.gfxdraw.aacircle(self.screen, w_x, w_y, r, color)
pygame.gfxdraw.filled_circle(self.screen, w_x, w_y, r, color)
def get_shape(self):
return (self.width, self.height)
def brightness(self, *args):
pass
def rotation(self, r):
self._rotation = int(round(r/90.0)) % 3
def clear(self):
self.screen.fill((0, 0, 0))
def get_rotation(self):
return self._rotation * 90
def set_layout(self, *args):
pass
def set_pixel_hsv(self, x, y, h, s=1.0, v=1.0):
r, g, b = [int(n*255) for n in colorsys.hsv_to_rgb(h, s, v)]
self.set_pixel(x, y, r, g, b)
def off(self):
print("Closing window")
pygame.quit()
def index(self, x, y):
# Offset to match device rotation
rot = (self.get_rotation() + self.rotation_offset) % 360
if rot == 0:
xx = x
yy = y
elif rot == 90:
xx = self.height - 1 - y
yy = x
elif rot == 180:
xx = self.width - 1 - x
yy = self.height - 1 - y
elif rot == 270:
xx = y
yy = self.width - 1 - x
return (xx * self.width) + yy
# SD hats works as expected
unicornhat = UnicornHatSim(8,8)
unicornphat = UnicornHatSim(8, 4)
# Unicornhat HD seems to be the other way around (not that there's anything wrong with that), so we rotate it 180°
unicornhathd = UnicornHatSim(16, 16, 180)
|
py | 1a3172cae8cc24f912b6b77d43e594db26ef0107 | import os
import json
import multiprocessing
import random
import math
from math import log2, floor
from functools import partial
from contextlib import contextmanager, ExitStack
from pathlib import Path
from shutil import rmtree
from collections import Counter
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import torch
import torchvision
from torch.cuda.amp import autocast, GradScaler
from torch.optim import Adam
from torch import nn, einsum
import torch.nn.functional as F
from torch.utils.data import DataLoader
from torch.autograd import grad as torch_grad
from torch.utils.data.distributed import DistributedSampler
from torch.utils.data.sampler import Sampler
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.utils.data.dataset import Dataset
from PIL import Image
import pandas as pd
from kornia import filter2D
from tensorboardX import SummaryWriter
import numpy as np
import torch.distributed as dist
from tqdm import tqdm
import torch.multiprocessing as mp
import fire
from einops import rearrange
from gsa_pytorch import GSA
from linear_attention_transformer import ImageLinearAttention
from torchvision import transforms
|
py | 1a3172eb1ee9bdc679b93046681b4005fac9fc8c | #!/usr/bin/env python
"""These are standard aff4 objects."""
import hashlib
import re
import StringIO
from grr.lib import aff4
from grr.lib import data_store
from grr.lib import flow
from grr.lib import rdfvalue
from grr.lib import utils
class VFSDirectory(aff4.AFF4Volume):
"""This represents a directory from the client."""
default_container = "VFSDirectory"
# We contain other objects within the tree.
_behaviours = frozenset(["Container"])
def Update(self, attribute=None, priority=None):
"""Refresh an old attribute.
Note that refreshing the attribute is asynchronous. It does not change
anything about the current object - you need to reopen the same URN some
time later to get fresh data.
Attributes:
CONTAINS - Refresh the content of the directory listing.
Args:
attribute: An attribute object as listed above.
priority: Priority to set for updating flow, None for default.
Returns:
The Flow ID that is pending
Raises:
IOError: If there has been an error starting the flow.
"""
# client id is the first path element
client_id = self.urn.Split()[0]
if attribute == "CONTAINS":
# Get the pathspec for this object
flow_id = flow.GRRFlow.StartFlow(client_id=client_id,
flow_name="ListDirectory",
pathspec=self.real_pathspec,
priority=priority,
notify_to_user=False,
token=self.token)
return flow_id
class SchemaCls(aff4.AFF4Volume.SchemaCls):
"""Attributes specific to VFSDirectory."""
STAT = aff4.Attribute("aff4:stat", rdfvalue.StatEntry,
"A StatResponse protobuf describing this file.",
"stat")
PATHSPEC = aff4.Attribute(
"aff4:pathspec", rdfvalue.PathSpec,
"The pathspec used to retrieve this object from the client.",
"pathspec")
class HashList(rdfvalue.RDFBytes):
"""A list of hashes."""
HASH_SIZE = 32
def __len__(self):
return len(self._value) / self.HASH_SIZE
def __iter__(self):
for i in range(len(self)):
yield self[i]
def __getitem__(self, idx):
return rdfvalue.HashDigest(
self._value[idx * self.HASH_SIZE: (idx + 1) * self.HASH_SIZE])
class BlobImage(aff4.AFF4Image):
"""An AFF4 stream which stores chunks by hashes.
The hash stream is kept within an AFF4 Attribute, instead of another stream
making it more efficient for smaller files.
"""
# Size of a sha256 hash
_HASH_SIZE = 32
# How many chunks we read ahead
_READAHEAD = 5
def Initialize(self):
super(BlobImage, self).Initialize()
self.content_dirty = False
if self.mode == "w":
self.index = StringIO.StringIO("")
self.finalized = False
else:
self.index = StringIO.StringIO(self.Get(self.Schema.HASHES, ""))
self.finalized = self.Get(self.Schema.FINALIZED, False)
def Truncate(self, offset=0):
if offset != 0:
raise IOError("Non-zero truncation not supported for BlobImage")
super(BlobImage, self).Truncate(0)
self.index = StringIO.StringIO("")
self.finalized = False
def _GetChunkForWriting(self, chunk):
"""Chunks must be added using the AddBlob() method."""
raise NotImplementedError("Direct writing of HashImage not allowed.")
def _GetChunkForReading(self, chunk):
"""Retrieve the relevant blob from the AFF4 data store or cache."""
result = None
offset = chunk * self._HASH_SIZE
self.index.seek(offset)
chunk_name = self.index.read(self._HASH_SIZE)
try:
result = self.chunk_cache.Get(chunk_name)
except KeyError:
# Read ahead a few chunks.
self.index.seek(offset)
readahead = {}
for _ in range(self._READAHEAD):
name = self.index.read(self._HASH_SIZE)
if name and name not in self.chunk_cache:
urn = aff4.ROOT_URN.Add("blobs").Add(name.encode("hex"))
readahead[urn] = name
fds = aff4.FACTORY.MultiOpen(readahead, mode="r", token=self.token)
for fd in fds:
name = readahead[fd.urn]
# Remember the right fd
if name == chunk_name:
result = fd
# Put back into the cache
self.chunk_cache.Put(readahead[fd.urn], fd)
if result is None:
raise IOError("Chunk '%s' not found for reading!" % chunk)
return result
def FromBlobImage(self, fd):
"""Copy this file cheaply from another BlobImage."""
self.content_dirty = True
self.SetChunksize(fd.chunksize)
self.index = StringIO.StringIO(fd.index.getvalue())
self.size = fd.size
def Flush(self, sync=True):
if self.content_dirty:
self.Set(self.Schema.SIZE(self.size))
self.Set(self.Schema.HASHES(self.index.getvalue()))
self.Set(self.Schema.FINALIZED(self.finalized))
super(BlobImage, self).Flush(sync)
def AppendContent(self, src_fd):
"""Create new blob hashes and append to BlobImage.
We don't support writing at arbitrary file offsets, but this method provides
a convenient way to add blobs for a new file, or append content to an
existing one.
Args:
src_fd: source file handle open for read
Raises:
IOError: if blob has already been finalized.
"""
while 1:
blob = src_fd.read(self.chunksize)
if not blob:
break
blob_hash = hashlib.sha256(blob).digest()
blob_urn = rdfvalue.RDFURN("aff4:/blobs").Add(blob_hash.encode("hex"))
try:
fd = aff4.FACTORY.Open(blob_urn, "AFF4MemoryStream", mode="r",
token=self.token)
except IOError:
fd = aff4.FACTORY.Create(blob_urn, "AFF4MemoryStream", mode="w",
token=self.token)
fd.Write(blob)
fd.Close(sync=True)
self.AddBlob(blob_hash, len(blob))
self.Flush()
def AddBlob(self, blob_hash, length):
"""Add another blob to this image using its hash.
Once a blob is added that is smaller than the chunksize we finalize the
file, since handling adding more blobs makes the code much more complex.
Args:
blob_hash: sha256 binary digest
length: int length of blob
Raises:
IOError: if blob has been finalized.
"""
if self.finalized and length > 0:
raise IOError("Can't add blobs to finalized BlobImage")
self.content_dirty = True
self.index.seek(0, 2)
self.index.write(blob_hash)
self.size += length
if length < self.chunksize:
self.finalized = True
class SchemaCls(aff4.AFF4Image.SchemaCls):
"""The schema for Blob Images."""
STAT = aff4.AFF4Object.VFSDirectory.SchemaCls.STAT
HASHES = aff4.Attribute("aff4:hashes", rdfvalue.HashList,
"List of hashes of each chunk in this file.")
FINGERPRINT = aff4.Attribute("aff4:fingerprint",
rdfvalue.FingerprintResponse,
"DEPRECATED protodict containing arrays of "
" hashes. Use AFF4Stream.HASH instead.")
FINALIZED = aff4.Attribute("aff4:finalized",
rdfvalue.RDFBool,
"Once a blobimage is finalized, further writes"
" will raise exceptions.")
class HashImage(aff4.AFF4Image):
"""An AFF4 Image which refers to chunks by their hash.
This object stores a large image in chunks. Each chunk is stored using its
hash in the AFF4 data store. We have an index with a series of hashes stored
back to back. When we need to read a chunk, we seek the index for the hash,
and then open the data blob indexed by this hash. Chunks are cached as per the
AFF4Image implementation.
Assumptions:
Hashes do not collide.
All data blobs have the same size (the chunk size), except possibly the last
one in the file.
"""
# Size of a sha256 hash
_HASH_SIZE = 32
# How many chunks we read ahead
_READAHEAD = 5
_data_dirty = False
def Initialize(self):
super(HashImage, self).Initialize()
self.index = None
def _OpenIndex(self):
if self.index is None:
index_urn = self.urn.Add("index")
self.index = aff4.FACTORY.Create(index_urn, "AFF4Image", mode=self.mode,
token=self.token)
def _GetChunkForWriting(self, chunk):
"""Chunks must be added using the AddBlob() method."""
raise NotImplementedError("Direct writing of HashImage not allowed.")
def _GetChunkForReading(self, chunk):
"""Retrieve the relevant blob from the AFF4 data store or cache."""
result = None
self._OpenIndex()
self.index.Seek(chunk * self._HASH_SIZE)
chunk_name = self.index.Read(self._HASH_SIZE)
try:
result = self.chunk_cache.Get(chunk_name)
except KeyError:
# Read ahead a few chunks.
self.index.Seek(-self._HASH_SIZE, whence=1)
readahead = {}
for _ in range(self._READAHEAD):
name = self.index.Read(self._HASH_SIZE)
if name and name not in self.chunk_cache:
urn = aff4.ROOT_URN.Add("blobs").Add(name.encode("hex"))
readahead[urn] = name
fds = aff4.FACTORY.MultiOpen(readahead, mode="r", token=self.token)
for fd in fds:
name = readahead[fd.urn]
# Remember the right fd
if name == chunk_name:
result = fd
# Put back into the cache
self.chunk_cache.Put(readahead[fd.urn], fd)
return result
def Close(self, sync=True):
if self._data_dirty:
self.Set(self.Schema.SIZE(self.size))
if self.index:
self.index.Close(sync)
super(HashImage, self).Close(sync)
def AddBlob(self, blob_hash, length):
"""Add another blob to this image using its hash."""
self._OpenIndex()
self._data_dirty = True
self.index.Seek(0, 2)
self.index.Write(blob_hash)
self.size += length
class SchemaCls(aff4.AFF4Image.SchemaCls):
"""The schema for AFF4 files in the GRR VFS."""
STAT = aff4.AFF4Object.VFSDirectory.SchemaCls.STAT
CONTENT_LOCK = aff4.Attribute(
"aff4:content_lock", rdfvalue.RDFURN,
"This lock contains a URN pointing to the flow that is currently "
"updating this object.")
FINGERPRINT = aff4.Attribute("aff4:fingerprint",
rdfvalue.FingerprintResponse,
"DEPRECATED protodict containing arrays of "
" hashes. Use AFF4Stream.HASH instead.")
class AFF4SparseImage(BlobImage):
"""A class to store partial files."""
class SchemaCls(aff4.BlobImage.SchemaCls):
PATHSPEC = VFSDirectory.SchemaCls.PATHSPEC
def Initialize(self):
super(AFF4SparseImage, self).Initialize()
self._OpenIndex()
def _OpenIndex(self):
"""Create the index if it doesn't exist, otherwise open it."""
index_urn = self.urn.Add("index")
self.index = aff4.FACTORY.Create(index_urn, "AFF4SparseIndex", mode="rw",
token=self.token)
def Truncate(self, offset=0):
if offset != 0:
raise IOError("Non-zero truncation not supported for AFF4SparseImage")
super(AFF4SparseImage, self).Truncate(0)
self._OpenIndex()
self.finalized = False
def Read(self, length):
result = []
while length > 0:
data = self._ReadPartial(length)
if not data:
break
length -= len(data)
result.append(data)
return "".join(result)
def _GetChunkForReading(self, chunk):
"""Retrieve the relevant blob from the AFF4 data store or cache."""
result = None
offset = chunk * self._HASH_SIZE
self.index.seek(offset)
chunk_name = self.index.read(self._HASH_SIZE)
try:
result = self.chunk_cache.Get(chunk_name)
# Cache hit, so we're done.
return result
except KeyError:
# Read ahead a few chunks.
self.index.seek(offset)
readahead = {}
# Read all the hashes in one go, then split up the result.
chunks = self.index.read(self._HASH_SIZE * self._READAHEAD)
chunk_names = [chunks[i:i + self._HASH_SIZE]
for i in xrange(0, len(chunks), self._HASH_SIZE)]
for name in chunk_names:
# Try and read ahead a few chunks from the datastore and add them to the
# cache. If the chunks ahead aren't there, that's okay, we just can't
# cache them. We still keep reading to see if chunks after them are
# there, since the image is sparse.
try:
if name not in self.chunk_cache:
urn = aff4.ROOT_URN.Add("blobs").Add(name.encode("hex"))
readahead[urn] = name
except aff4.ChunkNotFoundError:
pass
fds = aff4.FACTORY.MultiOpen(readahead, mode="r", token=self.token)
for fd in fds:
name = readahead[fd.urn]
# Remember the right fd
if name == chunk_name:
result = fd
# Put back into the cache
self.chunk_cache.Put(readahead[fd.urn], fd)
if result is None:
raise aff4.ChunkNotFoundError("Chunk '%s' (urn: %s) not "
"found for reading!"
% (chunk, chunk_name))
return result
def _ReadPartial(self, length):
"""Read as much as possible, but not more than length."""
chunk = self.offset / self.chunksize
chunk_offset = self.offset % self.chunksize
# If we're past the end of the file, we don't have a chunk to read from, so
# we can't read anymore. We return the empty string here so we can read off
# the end of a file without raising, and get as much data as is there.
if chunk > self.index.last_chunk:
return ""
available_to_read = min(length, self.chunksize - chunk_offset)
fd = self._GetChunkForReading(chunk)
fd.Seek(chunk_offset)
result = fd.Read(available_to_read)
self.offset += len(result)
return result
def AddBlob(self, blob_hash, length, chunk_number):
"""Add another blob to this image using its hash."""
# TODO(user) Allow the index's chunksize to be > self._HASH_SIZE.
# This will reduce the number of rows we need to store in the datastore.
# We'll fill chunks with 0s when we don't have enough information to write
# to them fully, and ignore 0s when we're reading chunks.
# There's one hash in the index for each chunk in the file.
offset = chunk_number * self.index.chunksize
self.index.Seek(offset)
# If we're adding a new blob, we should increase the size. If we're just
# updating an existing blob, the size should stay the same.
# That is, if we read the index at the right offset and no hash is there, we
# must not have seen this blob before, so we say we're adding a new one and
# increase in size.
if not self.index.ChunkExists(chunk_number):
# We say that we've increased in size by the size of the blob,
# but really we only store its hash in the AFF4SparseImage.
self.size += length
# Seek back in case we've read past the offset we're meant to write to.
self.index.Seek(offset)
self.index.Write(blob_hash)
self._dirty = True
def Flush(self, sync=True):
if self._dirty:
self.index.Flush(sync=sync)
super(AFF4SparseImage, self).Flush(sync=sync)
class AFF4SparseIndex(aff4.AFF4Image):
"""A sparse index for AFF4SparseImage."""
# TODO(user) Allow for a bigger chunk size. At the moment, the
# chunksize must be exactly the hash size.
chunksize = 32
class SchemaCls(aff4.AFF4Image.SchemaCls):
_CHUNKSIZE = aff4.Attribute("aff4:chunksize", rdfvalue.RDFInteger,
"Total size of each chunk.", default=32)
LAST_CHUNK = aff4.Attribute("aff4:lastchunk", rdfvalue.RDFInteger,
"The highest numbered chunk in this object.",
default=-1)
def Initialize(self):
# The rightmost chunk we've seen so far. We'll use this to keep track of
# what the biggest possible size this file could be is.
self.last_chunk = self.Get(self.Schema.LAST_CHUNK)
super(AFF4SparseIndex, self).Initialize()
def _GetChunkForWriting(self, chunk):
"""Look in the datastore for a chunk, and create it if it isn't there."""
chunk_name = self.urn.Add(self.CHUNK_ID_TEMPLATE % chunk)
try:
fd = self.chunk_cache.Get(chunk_name)
except KeyError:
# Try and get a lock on the chunk.
fd = aff4.FACTORY.OpenWithLock(chunk_name, token=self.token)
# If the chunk didn't exist in the datastore, create it.
if fd.Get(fd.Schema.LAST) is None:
# Each time we create a new chunk, we grow in size.
self.size += self.chunksize
self._dirty = True
fd = aff4.FACTORY.Create(chunk_name, "AFF4MemoryStream", mode="rw",
token=self.token)
self.chunk_cache.Put(chunk_name, fd)
# Keep track of the biggest chunk_number we've seen so far.
if chunk > self.last_chunk:
self.last_chunk = chunk
self._dirty = True
return fd
def ChunkExists(self, chunk_number):
"""Do we have this chunk in the index?"""
try:
self._GetChunkForReading(chunk_number)
return True
except aff4.ChunkNotFoundError:
return False
def Write(self, data):
"""Write data to the file."""
self._dirty = True
if not isinstance(data, bytes):
raise IOError("Cannot write unencoded string.")
while data:
data = self._WritePartial(data)
def Read(self, length):
"""Read a block of data from the file."""
result = ""
# The total available size in the file
length = int(length)
# Make sure we don't read past the "end" of the file. We say the end is the
# end of the last chunk. If we do try and read past the end, we should
# return an empty string.
# The end of the file is the *end* of the last chunk, so we add one here.
length = min(length,
((self.last_chunk + 1) * self.chunksize) - self.offset)
while length > 0:
data = self._ReadPartial(length)
if not data:
break
length -= len(data)
result += data
return result
def Flush(self, sync=True):
if self._dirty:
self.Set(self.Schema.LAST_CHUNK, rdfvalue.RDFInteger(self.last_chunk))
super(AFF4SparseIndex, self).Flush(sync=sync)
class AFF4Index(aff4.AFF4Object):
"""An aff4 object which manages access to an index.
This object has no actual attributes, it simply manages the index.
"""
# Value to put in the cell for index hits.
PLACEHOLDER_VALUE = "X"
def __init__(self, urn, **kwargs):
# Never read anything directly from the table by forcing an empty clone.
kwargs["clone"] = {}
super(AFF4Index, self).__init__(urn, **kwargs)
# We collect index data here until we flush.
self.to_set = set()
self.to_delete = set()
def Flush(self, sync=False):
"""Flush the data to the index."""
super(AFF4Index, self).Flush(sync=sync)
# Remove entries from deletion set that are going to be added anyway.
self.to_delete = self.to_delete.difference(self.to_set)
# Convert sets into dicts that MultiSet handles.
to_set = dict(zip(self.to_set, self.PLACEHOLDER_VALUE * len(self.to_set)))
data_store.DB.MultiSet(self.urn, to_set, to_delete=list(self.to_delete),
token=self.token, replace=True, sync=sync)
self.to_set = set()
self.to_delete = set()
def Close(self, sync=False):
self.Flush(sync=sync)
super(AFF4Index, self).Close(sync=sync)
def Add(self, urn, attribute, value):
"""Add the attribute of an AFF4 object to the index.
Args:
urn: The URN of the AFF4 object this attribute belongs to.
attribute: The attribute to add to the index.
value: The value of the attribute to index.
Raises:
RuntimeError: If a bad URN is passed in.
"""
if not isinstance(urn, rdfvalue.RDFURN):
raise RuntimeError("Bad urn parameter for index addition.")
column_name = "index:%s:%s:%s" % (
attribute.predicate, value.lower(), urn)
self.to_set.add(column_name)
def Query(self, attributes, regex, limit=100):
"""Query the index for the attribute.
Args:
attributes: A list of attributes to query for.
regex: The regex to search this attribute.
limit: A (start, length) tuple of integers representing subjects to
return. Useful for paging. If its a single integer we take it as the
length limit (start=0).
Returns:
A list of RDFURNs which match the index search.
"""
# Make the regular expressions.
regex = regex.lstrip("^") # Begin and end string matches work because
regex = regex.rstrip("$") # they are explicit in the storage.
regexes = ["index:%s:%s:.*" % (a.predicate, regex.lower())
for a in attributes]
start = 0
try:
start, length = limit # pylint: disable=unpacking-non-sequence
except TypeError:
length = limit
# Get all the hits
index_hits = set()
for col, _, _ in data_store.DB.ResolveRegex(
self.urn, regexes, token=self.token,
timestamp=data_store.DB.ALL_TIMESTAMPS):
# Extract URN from the column_name.
index_hits.add(rdfvalue.RDFURN(col.rsplit("aff4:/", 1)[1]))
hits = []
for i, hit in enumerate(index_hits):
if i < start: continue
hits.append(hit)
if i >= start + length - 1:
break
return hits
def _QueryRaw(self, regex):
return set([(x, y) for (y, x, _) in data_store.DB.ResolveRegex(
self.urn, regex, token=self.token,
timestamp=data_store.DB.ALL_TIMESTAMPS)])
def MultiQuery(self, attributes, regexes):
"""Query the index for the attribute, matching multiple regexes at a time.
Args:
attributes: A list of attributes to query for.
regexes: A list of regexes to search the attributes for.
Returns:
A dict mapping each matched attribute name to a list of RDFURNs.
"""
# Make the regular expressions.
combined_regexes = []
# Begin and end string matches work because they are explicit in storage.
regexes = [r.lstrip("^").rstrip("$").lower() for r in regexes]
for attribute in attributes:
combined_regexes.append("index:%s:(%s):.*" % (
attribute.predicate, "|".join(regexes)))
# Get all the hits
result = {}
for col, _, _ in data_store.DB.ResolveRegex(
self.urn, combined_regexes, token=self.token,
timestamp=data_store.DB.ALL_TIMESTAMPS):
# Extract the attribute name.
attribute_name = col.split(":")[3]
# Extract URN from the column_name.
urn = rdfvalue.RDFURN(col.rsplit("aff4:/", 1)[1])
result.setdefault(attribute_name, []).append(urn)
return result
def DeleteAttributeIndexesForURN(self, attribute, value, urn):
"""Remove all entries for a given attribute referring to a specific urn."""
if not isinstance(urn, rdfvalue.RDFURN):
raise RuntimeError("Bad urn parameter for index deletion.")
column_name = "index:%s:%s:%s" % (
attribute.predicate, value.lower(), urn)
self.to_delete.add(column_name)
class AFF4IndexSet(aff4.AFF4Object):
"""Index that behaves as a set of strings."""
PLACEHOLDER_VALUE = "X"
INDEX_PREFIX = "index:"
INDEX_PREFIX_LEN = len(INDEX_PREFIX)
def Initialize(self):
super(AFF4IndexSet, self).Initialize()
self.to_set = {}
self.to_delete = set()
def Add(self, value):
column_name = self.INDEX_PREFIX + utils.SmartStr(value)
self.to_set[column_name] = self.PLACEHOLDER_VALUE
def Remove(self, value):
column_name = self.INDEX_PREFIX + utils.SmartStr(value)
self.to_delete.add(column_name)
def ListValues(self, regex=".*", limit=10000):
values = data_store.DB.ResolveRegex(self.urn, self.INDEX_PREFIX + regex,
token=self.token, limit=limit)
result = set()
for v in values:
column_name = v[0]
if column_name in self.to_delete:
continue
result.add(column_name[self.INDEX_PREFIX_LEN:])
for column_name in self.to_set:
if column_name in self.to_delete:
continue
result.add(column_name[self.INDEX_PREFIX_LEN:])
return result
def Flush(self, sync=False):
super(AFF4IndexSet, self).Flush(sync=sync)
data_store.DB.MultiSet(self.urn, self.to_set, token=self.token,
to_delete=list(self.to_delete), replace=True,
sync=sync)
self.to_set = {}
self.to_delete = set()
def Close(self, sync=False):
self.Flush(sync=sync)
super(AFF4IndexSet, self).Close(sync=sync)
class AFF4LabelsIndex(aff4.AFF4Volume):
"""Index for objects' labels with vaiorus querying capabilities."""
# Separator is a character that's not allowed in labels names.
SEPARATOR = "|"
ESCAPED_SEPARATOR = re.escape("|")
def Initialize(self):
super(AFF4LabelsIndex, self).Initialize()
self._urns_index = None
self._used_labels_index = None
@property
def urns_index(self):
if self._urns_index is None:
self._urns_index = aff4.FACTORY.Create(
self.urn.Add("urns_index"), "AFF4Index", mode=self.mode,
token=self.token)
return self._urns_index
@property
def used_labels_index(self):
if self._used_labels_index is None:
self._used_labels_index = aff4.FACTORY.Create(
self.urn.Add("used_labels_index"), "AFF4IndexSet", mode=self.mode,
token=self.token)
return self._used_labels_index
def IndexNameForLabel(self, label_name, label_owner):
return label_owner + self.SEPARATOR + label_name
def LabelForIndexName(self, index_name):
label_owner, label_name = utils.SmartStr(index_name).split(
self.SEPARATOR, 1)
return rdfvalue.AFF4ObjectLabel(name=label_name, owner=label_owner)
def AddLabel(self, urn, label_name, owner=None):
if owner is None:
raise ValueError("owner can't be None")
index_name = self.IndexNameForLabel(label_name, owner)
self.urns_index.Add(urn, aff4.AFF4Object.SchemaCls.LABELS, index_name)
self.used_labels_index.Add(index_name)
def RemoveLabel(self, urn, label_name, owner=None):
if owner is None:
raise ValueError("owner can't be None")
self.urns_index.DeleteAttributeIndexesForURN(
aff4.AFF4Object.SchemaCls.LABELS,
self.IndexNameForLabel(label_name, owner), urn)
def ListUsedLabels(self):
index_results = self.used_labels_index.ListValues()
return [self.LabelForIndexName(name) for name in index_results]
def FindUrnsByLabel(self, label, owner=None):
results = self.MultiFindUrnsByLabel([label], owner=owner).values()
if not results:
return []
else:
return results[0]
def MultiFindUrnsByLabel(self, labels, owner=None):
if owner is None:
owner = ".+"
else:
owner = re.escape(owner)
query_results = self.urns_index.MultiQuery(
[aff4.AFF4Object.SchemaCls.LABELS],
[owner + self.ESCAPED_SEPARATOR + re.escape(label) for label in labels])
results = {}
for key, value in query_results.iteritems():
results[self.LabelForIndexName(key)] = value
return results
def FindUrnsByLabelNameRegex(self, label_name_regex, owner=None):
return self.MultiFindUrnsByLabelNameRegex([label_name_regex], owner=owner)
def MultiFindUrnsByLabelNameRegex(self, label_name_regexes, owner=None):
if owner is None:
owner = ".+"
else:
owner = re.escape(owner)
query_results = self.urns_index.MultiQuery(
[aff4.AFF4Object.SchemaCls.LABELS],
[owner + self.ESCAPED_SEPARATOR + regex
for regex in label_name_regexes])
results = {}
for key, value in query_results.iteritems():
results[self.LabelForIndexName(key)] = value
return results
def CleanUpUsedLabelsIndex(self):
raise NotImplementedError()
def Flush(self, sync=False):
super(AFF4LabelsIndex, self).Flush(sync=sync)
self.urns_index.Flush(sync=sync)
self.used_labels_index.Flush(sync=sync)
def Close(self, sync=False):
self.Flush(sync=sync)
super(AFF4LabelsIndex, self).Close(sync=sync)
class TempMemoryFile(aff4.AFF4MemoryStream):
"""A temporary AFF4MemoryStream-based file with a random URN."""
def __init__(self, urn, **kwargs):
if urn is None:
urn = rdfvalue.RDFURN("aff4:/tmp").Add("%X" % utils.PRNG.GetULong())
super(TempMemoryFile, self).__init__(urn, **kwargs)
class TempImageFile(aff4.AFF4Image):
"""A temporary file AFF4Image-based file with a random URN."""
def __init__(self, urn, **kwargs):
if urn is None:
urn = rdfvalue.RDFURN("aff4:/tmp").Add("%X" % utils.PRNG.GetULong())
super(TempImageFile, self).__init__(urn, **kwargs)
|
py | 1a3172eb6778c93fcdfa22e7af5cd56e20916cd5 | import warnings
import mmcv
import numpy as np
import pycocotools.mask as maskUtils
import torch
from mmcv.runner import load_checkpoint
from mmdet.core import get_classes
from mmdet.datasets import to_tensor
from mmdet.datasets.transforms import ImageTransform
from mmdet.models import build_detector
from imantics import Polygons, Mask
import cv2
def init_detector(config, checkpoint=None, device='cuda:0'):
"""Initialize a detector from config file.
Args:
config (str or :obj:`mmcv.Config`): Config file path or the config
object.
checkpoint (str, optional): Checkpoint path. If left as None, the model
will not load any weights.
Returns:
nn.Module: The constructed detector.
"""
if isinstance(config, str):
config = mmcv.Config.fromfile(config)
elif not isinstance(config, mmcv.Config):
raise TypeError('config must be a filename or Config object, '
'but got {}'.format(type(config)))
config.model.pretrained = None
model = build_detector(config.model, test_cfg=config.test_cfg)
if checkpoint is not None:
checkpoint = load_checkpoint(model, checkpoint)
if 'CLASSES' in checkpoint['meta']:
model.CLASSES = checkpoint['meta']['classes']
else:
warnings.warn('Class names are not saved in the checkpoint\'s '
'meta data, use COCO classes by default.')
model.CLASSES = get_classes('coco')
model.cfg = config # save the config in the model for convenience
model.to(device)
model.eval()
return model
def inference_detector(model, imgs):
"""Inference image(s) with the detector.
Args:
model (nn.Module): The loaded detector.
imgs (str/ndarray or list[str/ndarray]): Either image files or loaded
images.
Returns:
If imgs is a str, a generator will be returned, otherwise return the
detection results directly.
"""
cfg = model.cfg
img_transform = ImageTransform(
size_divisor=cfg.data.test.size_divisor, **cfg.img_norm_cfg)
device = next(model.parameters()).device # model device
if not isinstance(imgs, list):
return _inference_single(model, imgs, img_transform, device)
else:
return _inference_generator(model, imgs, img_transform, device)
def _prepare_data(img, img_transform, cfg, device):
ori_shape = img.shape
img, img_shape, pad_shape, scale_factor = img_transform(
img,
scale=cfg.data.test.img_scale,
keep_ratio=cfg.data.test.get('resize_keep_ratio', True))
img = to_tensor(img).to(device).unsqueeze(0)
img_meta = [
dict(
ori_shape=ori_shape,
img_shape=img_shape,
pad_shape=pad_shape,
scale_factor=scale_factor,
flip=False)
]
return dict(img=[img], img_meta=[img_meta])
def _inference_single(model, img, img_transform, device):
img = mmcv.imread(img)
data = _prepare_data(img, img_transform, model.cfg, device)
with torch.no_grad():
result = model(return_loss=False, rescale=True, **data)
return result
def _inference_generator(model, imgs, img_transform, device):
for img in imgs:
yield _inference_single(model, img, img_transform, device)
# TODO: merge this method with the one in BaseDetector
def show_result(img, result, class_names, score_thr=0.3, out_file=None):
"""Visualize the detection results on the image.
Args:
img (str or np.ndarray): Image filename or loaded image.
result (tuple[list] or list): The detection result, can be either
(bbox, segm) or just bbox.
class_names (list[str] or tuple[str]): A list of class names.
score_thr (float): The threshold to visualize the bboxes and masks.
out_file (str, optional): If specified, the visualization result will
be written to the out file instead of shown in a window.
"""
assert isinstance(class_names, (tuple, list))
img = mmcv.imread(img)
if isinstance(result, tuple):
bbox_result, segm_result = result
else:
bbox_result, segm_result = result, None
bboxes = np.vstack(bbox_result)
# draw segmentation masks
if segm_result is not None:
segms = mmcv.concat_list(segm_result)
inds = np.where(bboxes[:, -1] > score_thr)[0]
for i in inds:
colors = {k: [] for k in 'rgb'}
temp = {k: np.random.randint(0, 255) for k in 'rgb'}
for k in temp:
while 1:
c = temp[k]
t = set(j for j in range(c - 25, c + 25) if 0 <= j <= 255)
if t.intersection(colors[k]):
temp[k] = np.random.randint(0, 255)
else:
break
colors[k].append(temp[k])
color_mask = np.array([colors['r'][0], colors['g'][0], colors['b'][0]])
# color_mask = np.random.randint(
# 0, 256, (1, 3), dtype=np.uint8)
mask = maskUtils.decode(segms[i]).astype(np.bool)
img[mask] = img[mask] * 0.5 + color_mask * 0.5
# draw bounding boxes
labels = [
np.full(bbox.shape[0], i, dtype=np.int32)
for i, bbox in enumerate(bbox_result)
]
labels = np.concatenate(labels)
thickness_t = 2 if img.shape[0] < 3000 else 6
font_scale_t = 0.65 if img.shape[0] < 3000 else 2.5
mmcv.imshow_det_bboxes(
img.copy(),
bboxes,
labels,
class_names=class_names,
score_thr=score_thr,
text_color='yellow',
thickness=thickness_t,
font_scale=font_scale_t,
show=out_file is None,
out_file=out_file,
win_name='demo')
def result2dict(img, result, class_names, score_thr=0.3, out_file=None):
assert isinstance(class_names, (tuple, list))
img = mmcv.imread(img)
if isinstance(result, tuple):
bbox_result, segm_result = result
else:
bbox_result, segm_result = result, None
bboxes = np.vstack(bbox_result)
# draw segmentation masks
arr_poly = []
arr_masks = []
if segm_result is not None:
segms = mmcv.concat_list(segm_result)
inds = np.where(bboxes[:, -1] > score_thr)[0]
for i in inds:
color_mask = np.random.randint(
0, 256, (1, 3), dtype=np.uint8)
mask = maskUtils.decode(segms[i]).astype(np.bool)
# img[mask] = img[mask] * 0.5 + color_mask * 0.5
polygons = Mask(mask).polygons()
if polygons.points:
arr_poly.append(polygons.points[0])
arr_masks.append(mask)
else:
arr_poly.append(np.empty([]))
arr_masks.append(np.empty([]))
# draw bounding boxes
labels = [
np.full(bbox.shape[0], i, dtype=np.int32)
for i, bbox in enumerate(bbox_result)
]
if not arr_poly:
return None
keep = np.array([i for i, val in enumerate(arr_poly) if (val != np.empty([])).any()])
labels = np.concatenate(labels)
ret_poly = [i.tolist() for i in np.array(arr_poly)[keep]]
ret_mask = [i.tolist() for i in np.array(arr_masks)[keep]]
return bboxes[keep].tolist(), ret_poly, np.array([class_names[i] for i in labels])[keep].tolist(), ret_mask
# mmcv.imshow_det_bboxes(
# img.copy(),
# bboxes,
# labels,
# class_names=class_names,
# score_thr=score_thr,
# show=out_file is None,
# out_file=out_file)
|
py | 1a31754be5b337d81d4b79f87324bebed5897cd3 | from os import path
from pandas.api.types import CategoricalDtype
from numpy import mean, concatenate, ones, sqrt, zeros, arange
from scipy.stats import norm
from sklearn.impute import SimpleImputer
from sklearn.linear_model import LinearRegression
from sklearn.ensemble import RandomForestClassifier
from attack_models.attack_model import PrivacyAttack
from utils.constants import *
from utils.logging import LOGGER
class AttributeInferenceAttack(PrivacyAttack):
"""A privacy attack that aims to reconstruct a sensitive attribute c given a partial target record T"""
def __init__(self, PredictionModel, sensitiveAttribute, metadata, quids=None):
"""
Parent class for simple regression attribute inference attack
:param PredictionModel: object: sklearn-type prediction model
:param sensitiveAttribute: string: name of a column in a DataFrame that is considered the unknown, sensitive attribute
:param metadata: dict: schema for the data to be attacked
:param backgroundKnowledge: pd.DataFrame: adversary's background knowledge dataset
"""
self.PredictionModel = PredictionModel
self.sensitiveAttribute = sensitiveAttribute
self.metadata, self.knownAttributes, self.categoricalAttributes, self.nfeatures = self._read_meta(metadata, quids)
self.ImputerCat = SimpleImputer(strategy='most_frequent')
self.ImputerNum = SimpleImputer(strategy='median')
self.trained = False
self.__name__ = f'{self.PredictionModel.__class__.__name__}'
def attack(self, targetAux, attemptLinkage=False, data=None):
"""Makes a guess about the target's secret attribute"""
assert self.trained, 'Attack must first be trained on some data before can predict sensitive target value'
if attemptLinkage:
assert data is not None, "Need a dataset for linkage attack."
try:
groups = data.groupby(self.categoricalAttributes)
targetCats = targetAux[self.categoricalAttributes].values
groupSize = groups.size()[targetCats]
if all(groupSize == 1):
guess = groups.get_group(tuple(targetCats[0]))[self.sensitiveAttribute].values[0]
else:
guess = self._make_guess(targetAux)
except:
guess = self._make_guess(targetAux)
else:
guess = self._make_guess(targetAux)
return guess
def _make_guess(self, targetAux):
raise NotImplementedError('Method must be overriden by a subclass')
def _read_meta(self, metadata, quids):
if quids is None:
quids = []
meta_dict = {}
knownAttributes = []
categoricalAttributes = []
nfeatures = 0
for cdict in metadata['columns']:
attr_name = cdict['name']
data_type = cdict['type']
if data_type == FLOAT or data_type == INTEGER:
if attr_name in quids:
cat_bins = cdict['bins']
cat_labels = [f'({cat_bins[i]},{cat_bins[i+1]}]' for i in range(len(cat_bins)-1)]
meta_dict[attr_name] = {
'type': CATEGORICAL,
'categories': cat_labels,
'size': len(cat_labels)
}
nfeatures += len(cat_labels)
if attr_name != self.sensitiveAttribute:
categoricalAttributes.append(attr_name)
else:
meta_dict[attr_name] = {
'type': data_type,
'min': cdict['min'],
'max': cdict['max']
}
nfeatures += 1
elif data_type == CATEGORICAL or data_type == ORDINAL:
meta_dict[attr_name] = {
'type': data_type,
'categories': cdict['i2s'],
'size': len(cdict['i2s'])
}
nfeatures += len(cdict['i2s'])
if attr_name != self.sensitiveAttribute:
categoricalAttributes.append(attr_name)
else:
raise ValueError(f'Unknown data type {data_type} for attribute {attr_name}')
if attr_name != self.sensitiveAttribute:
knownAttributes.append(attr_name)
return meta_dict, knownAttributes, categoricalAttributes, nfeatures
def _encode_data(self, data):
dfcopy = data.copy()
for col, cdict in self.metadata.items():
if col in list(dfcopy):
col_data = dfcopy[col]
if cdict['type'] in [CATEGORICAL, ORDINAL]:
if len(col_data) > len(col_data.dropna()):
col_data = col_data.fillna(FILLNA_VALUE_CAT)
if FILLNA_VALUE_CAT not in cdict['categories']:
col['categories'].append(FILLNA_VALUE_CAT)
col['size'] += 1
cat = CategoricalDtype(categories=cdict['categories'], ordered=True)
col_data = col_data.astype(cat)
dfcopy[col] = col_data.cat.codes
return dfcopy.values
def _impute_missing_values(self, df):
dfImpute = df.copy()
catCols = []
numCols = []
for attr, col in self.metadata.items():
if attr in list(dfImpute):
if col['type'] in [CATEGORICAL, ORDINAL]:
catCols.append(attr)
elif col['type'] in NUMERICAL:
numCols.append(attr)
self.ImputerCat.fit(df[catCols])
dfImpute[catCols] = self.ImputerCat.transform(df[catCols])
self.ImputerNum.fit(df[numCols])
dfImpute[numCols] = self.ImputerNum.transform(df[numCols])
return dfImpute
def _one_hot(self, col_data, categories):
col_data_onehot = zeros((len(col_data), len(categories)))
cidx = [categories.index(c) for c in col_data]
col_data_onehot[arange(len(col_data)), cidx] = 1
return col_data_onehot
class LinRegAttack(AttributeInferenceAttack):
"""An AttributeInferenceAttack based on a simple Linear Regression model"""
def __init__(self, sensitiveAttribute, metadata, quids=None):
super().__init__(LinearRegression(fit_intercept=False), sensitiveAttribute, metadata, quids)
self.scaleFactor = None
self.coefficients = None
self.sigma = None
def train(self, data):
"""
Train a MLE attack to reconstruct an unknown sensitive value from a vector of known attributes
:param data: type(DataFrame) A dataset of shape (n, k)
"""
features = self._encode_data(data.drop(self.sensitiveAttribute, axis=1))
labels = data[self.sensitiveAttribute].values
n, k = features.shape
# Center independent variables for better regression performance
self.scaleFactor = mean(features, axis=0)
featuresScaled = features - self.scaleFactor
featuresScaled = concatenate([ones((n, 1)), featuresScaled], axis=1) # append all ones for inclu intercept in beta vector
# Get MLE for linear coefficients
self.PredictionModel.fit(featuresScaled, labels)
self.coefficients = self.PredictionModel.coef_
self.sigma = sum((labels - featuresScaled.dot(self.coefficients))**2)/(n-k)
LOGGER.debug('Finished training regression model')
self.trained = True
def _make_guess(self, targetAux):
targetFeatures = self._encode_data(targetAux)
targetFeaturesScaled = targetFeatures - self.scaleFactor
targetFeaturesScaled = concatenate([ones((len(targetFeaturesScaled), 1)), targetFeatures], axis=1)
guess = targetFeaturesScaled.dot(self.coefficients)[0]
return guess
def get_likelihood(self, targetAux, targetSensitive, attemptLinkage=False, data=None):
assert self.trained, 'Attack must first be trained on some data before can predict sensitive target value'
targetFeatures = self._encode_data(targetAux)
targetFeaturesScaled = targetFeatures - self.scaleFactor
targetFeaturesScaled = concatenate([ones((len(targetFeaturesScaled), 1)), targetFeatures], axis=1)
if attemptLinkage:
assert data is not None, "Need a dataset for linkage attack."
try:
groups = data.groupby(self.categoricalAttributes)
targetCats = targetAux[self.categoricalAttributes].values
groupSize = groups.size()[targetCats]
if all(groupSize == 1):
pCorrect = 1.
else:
pdfLikelihood = norm(loc=targetFeaturesScaled.dot(self.coefficients), scale=sqrt(self.sigma))
pCorrect = pdfLikelihood.pdf(targetSensitive)[0]
except:
pdfLikelihood = norm(loc=targetFeaturesScaled.dot(self.coefficients), scale=sqrt(self.sigma))
pCorrect = pdfLikelihood.pdf(targetSensitive)[0]
else:
pdfLikelihood = norm(loc=targetFeaturesScaled.dot(self.coefficients), scale=sqrt(self.sigma))
pCorrect = pdfLikelihood.pdf(targetSensitive)[0]
return pCorrect
class RandForestAttack(AttributeInferenceAttack):
"""An AttributeInferenceAttack based on a simple Linear Regression model"""
def __init__(self, sensitiveAttribute, metadata, quids=None):
super().__init__(RandomForestClassifier(), sensitiveAttribute, metadata, quids)
self.labels = {l:i for i, l in enumerate(self.metadata[self.sensitiveAttribute]['categories'])}
self.labelsInv = {i:l for l, i in self.labels.items()}
self.scaleFactor = None
def train(self, data):
"""
Train a Classifier to reconstruct an unknown sensitive label from a vector of known attributes
:param data: type(DataFrame) A dataset of shape (n, k)
"""
features = self._encode_data(data.drop(self.sensitiveAttribute, axis=1))
labels = data[self.sensitiveAttribute].apply(lambda x: self.labels[x]).values
# Feature normalisation
self.scaleFactor = mean(features, axis=0)
featuresScaled = features - self.scaleFactor
# Get MLE for linear coefficients
self.PredictionModel.fit(featuresScaled, labels)
LOGGER.debug('Finished training regression model')
self.trained = True
def _make_guess(self, targetAux):
targetFeatures = self._encode_data(targetAux)
targetFeaturesScaled = targetFeatures - self.scaleFactor
guess = self.PredictionModel.predict(targetFeaturesScaled)
return self.labelsInv[guess[0]]
def get_likelihood(self, targetAux, targetSensitive, attemptLinkage=False, data=None):
assert self.trained, 'Attack must first be trained on some data before can predict sensitive target value'
targetFeatures = self._encode_data(targetAux)
targetFeaturesScaled = targetFeatures - self.scaleFactor
if attemptLinkage:
assert data is not None, "Need a dataset for linkage attack."
try:
groups = data.groupby(self.categoricalAttributes)
targetCats = targetAux[self.categoricalAttributes].values
groupSize = groups.size()[targetCats]
if all(groupSize == 1):
pCorrect = 1.
else:
probs = self.PredictionModel.predict_proba(targetFeaturesScaled).flatten()
pCorrect = probs[self.labels[targetSensitive]]
except:
probs = self.PredictionModel.predict_proba(targetFeaturesScaled).flatten()
pCorrect = probs[self.labels[targetSensitive]]
else:
probs = self.PredictionModel.predict_proba(targetFeaturesScaled).flatten()
pCorrect = probs[self.labels[targetSensitive]]
return pCorrect |
py | 1a3175a24291777f557a7fdddd66a04cddbb37d3 | from django.db import IntegrityError
from django.shortcuts import render,redirect, get_object_or_404
from django.contrib.auth.models import User
from django.contrib.auth.decorators import login_required
from django.contrib.auth import login, logout, authenticate
from django.contrib.auth.forms import UserCreationForm, AuthenticationForm
from django.utils import timezone
from .forms import TodoForm
from .models import *
# Create your views here.
@login_required
def kaamkaj(request):
return render(request,'kaamkaj/kaamkaj_home.html',locals())
@login_required
def kaam_list(request):
todos = KaamKaj.objects.filter(user=request.user, complete_date__isnull=True).order_by('-created')
return render(request,'kaamkaj/current_kaam.html',locals())
@login_required
def create_kaam(request):
if request.method == 'GET':
return render(request,'kaamkaj/create_kaam.html', {'form':TodoForm})
else:
try:
form = TodoForm(request.POST)
new_todo =form.save(commit=False)
new_todo.user = request.user
new_todo.save()
return redirect('kaamkaj_list')
except ValueError:
return render(request,'kaamkaj/create_kaam.html', {'form':TodoForm, 'error':'Bad data passend in. Try again'} )
@login_required
def kaamkaj_details(request,todo_id):
todo = get_object_or_404(KaamKaj, id=todo_id, user=request.user)
if request.method == "GET":
form = TodoForm(instance=todo)
return render(request,'kaamkaj/kaamkaj_details.html', {'todo':todo,'form':form})
else:
try:
form = TodoForm(request.POST, instance=todo)
form.save()
return redirect('kaamkaj_list')
except ValueError:
return render(request,'kaamkaj/kaamkaj_details.html', {'todo':todo, 'form':form, 'error':'Bad Info'} )
@login_required
def kaamkaj_complete(request,todo_pk):
todo = get_object_or_404(KaamKaj, id=todo_pk, user=request.user)
if request.method == 'POST':
todo.complete_date = timezone.now()
todo.save()
return redirect('kaamkaj_list')
|
py | 1a317743fa3738cc496951eba1e38e2f82f6e800 | import sc2, sys
from __init__ import run_ladder_game
from sc2 import Race, Difficulty
from sc2.player import Bot, Computer
# Load bot
from example_bot import ExampleBot
bot = Bot(Race.Terran, ExampleBot())
# Start game
if __name__ == '__main__':
if "--LadderServer" in sys.argv:
# Ladder game started by LadderManager
print("Starting ladder game...")
result, opponentid = run_ladder_game(bot)
print(result," against opponent ", opponentid)
else:
# Local game
print("Starting local game...")
sc2.run_game(sc2.maps.get("Abyssal Reef LE"), [
bot,
Computer(Race.Protoss, Difficulty.VeryHard)
], realtime=True)
|
py | 1a3177619cd5d47ceb6a4a6dd4c34dc081b898fa | #
# Copyright (c) 2021, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import random
from itertools import chain, permutations
from functools import partial
import cuml
import cuml.common.logger as logger
import cupy as cp
import numpy as np
import pytest
import cudf
from cuml.ensemble import RandomForestClassifier as curfc
from cuml.metrics.cluster import adjusted_rand_score as cu_ars
from cuml.metrics import accuracy_score as cu_acc_score
from cuml.metrics.cluster import silhouette_score as cu_silhouette_score
from cuml.metrics.cluster import silhouette_samples as cu_silhouette_samples
from cuml.test.utils import get_handle, get_pattern, array_equal, \
unit_param, quality_param, stress_param, generate_random_labels, \
score_labeling_with_handle
from numba import cuda
from numpy.testing import assert_almost_equal
from sklearn.datasets import make_classification, make_blobs
from sklearn.metrics import accuracy_score as sk_acc_score
from sklearn.metrics import log_loss as sklearn_log_loss
from sklearn.metrics.cluster import adjusted_rand_score as sk_ars
from sklearn.metrics.cluster import homogeneity_score as sk_homogeneity_score
from sklearn.metrics.cluster import completeness_score as sk_completeness_score
from sklearn.metrics.cluster import mutual_info_score as sk_mutual_info_score
from sklearn.metrics.cluster import silhouette_score as sk_silhouette_score
from sklearn.metrics.cluster import silhouette_samples as sk_silhouette_samples
from sklearn.preprocessing import StandardScaler
from cuml.metrics.cluster import entropy
from cuml.metrics.regression import mean_squared_error, \
mean_squared_log_error, mean_absolute_error
from sklearn.metrics import mean_squared_error as sklearn_mse
from sklearn.metrics import confusion_matrix as sk_confusion_matrix
from cuml.metrics import confusion_matrix
from sklearn.metrics import mean_absolute_error as sklearn_mae
from sklearn.metrics import mean_squared_log_error as sklearn_msle
from cuml.common import has_scipy
from cuml.metrics import roc_auc_score
from cuml.metrics import precision_recall_curve
from cuml.metrics import log_loss
from sklearn.metrics import roc_auc_score as sklearn_roc_auc_score
from sklearn.metrics import precision_recall_curve \
as sklearn_precision_recall_curve
from cuml.metrics import pairwise_distances, PAIRWISE_DISTANCE_METRICS
from sklearn.metrics import pairwise_distances as sklearn_pairwise_distances
@pytest.fixture(scope='module')
def random_state():
random_state = random.randint(0, 1e6)
with logger.set_level(logger.level_debug):
logger.debug("Random seed: {}".format(random_state))
return random_state
@pytest.fixture(
scope='module',
params=(
{'n_clusters': 2, 'n_features': 2, 'label_type': 'int64',
'data_type': 'float32'},
{'n_clusters': 5, 'n_features': 1000, 'label_type': 'int32',
'data_type': 'float64'}
)
)
def labeled_clusters(request, random_state):
data, labels = make_blobs(
n_samples=1000,
n_features=request.param['n_features'],
random_state=random_state,
centers=request.param['n_clusters'],
center_box=(-1, 1),
cluster_std=1.5 # Allow some cluster overlap
)
return (
data.astype(request.param['data_type']),
labels.astype(request.param['label_type'])
)
@pytest.mark.parametrize('datatype', [np.float32, np.float64])
@pytest.mark.parametrize('use_handle', [True, False])
def test_r2_score(datatype, use_handle):
a = np.array([0.1, 0.2, 0.3, 0.4, 0.5], dtype=datatype)
b = np.array([0.12, 0.22, 0.32, 0.42, 0.52], dtype=datatype)
a_dev = cuda.to_device(a)
b_dev = cuda.to_device(b)
handle, stream = get_handle(use_handle)
score = cuml.metrics.r2_score(a_dev, b_dev, handle=handle)
np.testing.assert_almost_equal(score, 0.98, decimal=7)
def test_sklearn_search():
"""Test ensures scoring function works with sklearn machinery
"""
import numpy as np
from cuml import Ridge as cumlRidge
import cudf
from sklearn import datasets
from sklearn.model_selection import train_test_split, GridSearchCV
diabetes = datasets.load_diabetes()
X_train, X_test, y_train, y_test = train_test_split(diabetes.data,
diabetes.target,
test_size=0.2,
shuffle=False,
random_state=1)
alpha = np.array([1.0])
fit_intercept = True
normalize = False
params = {'alpha': np.logspace(-3, -1, 10)}
cu_clf = cumlRidge(alpha=alpha, fit_intercept=fit_intercept,
normalize=normalize, solver="eig")
assert getattr(cu_clf, 'score', False)
sk_cu_grid = GridSearchCV(cu_clf, params, cv=5, iid=False)
gdf_data = cudf.DataFrame(X_train)
gdf_train = cudf.DataFrame(dict(train=y_train))
sk_cu_grid.fit(gdf_data, gdf_train.train)
assert sk_cu_grid.best_params_ == {'alpha': 0.1}
@pytest.mark.parametrize('nrows', [unit_param(30), quality_param(5000),
stress_param(500000)])
@pytest.mark.parametrize('ncols', [unit_param(10), quality_param(100),
stress_param(200)])
@pytest.mark.parametrize('n_info', [unit_param(7), quality_param(50),
stress_param(100)])
@pytest.mark.parametrize('datatype', [np.float32])
def test_accuracy(nrows, ncols, n_info, datatype):
use_handle = True
train_rows = np.int32(nrows*0.8)
X, y = make_classification(n_samples=nrows, n_features=ncols,
n_clusters_per_class=1, n_informative=n_info,
random_state=123, n_classes=5)
X_test = np.asarray(X[train_rows:, 0:]).astype(datatype)
y_test = np.asarray(y[train_rows:, ]).astype(np.int32)
X_train = np.asarray(X[0:train_rows, :]).astype(datatype)
y_train = np.asarray(y[0:train_rows, ]).astype(np.int32)
# Create a handle for the cuml model
handle, stream = get_handle(use_handle, n_streams=8)
# Initialize, fit and predict using cuML's
# random forest classification model
cuml_model = curfc(max_features=1.0,
n_bins=8, split_algo=0, split_criterion=0,
min_samples_leaf=2,
n_estimators=40, handle=handle, max_leaves=-1,
max_depth=16)
cuml_model.fit(X_train, y_train)
cu_predict = cuml_model.predict(X_test)
cu_acc = cu_acc_score(y_test, cu_predict)
cu_acc_using_sk = sk_acc_score(y_test, cu_predict)
# compare the accuracy of the two models
assert array_equal(cu_acc, cu_acc_using_sk)
dataset_names = ['noisy_circles', 'noisy_moons', 'aniso'] + \
[pytest.param(ds, marks=pytest.mark.xfail)
for ds in ['blobs', 'varied']]
@pytest.mark.parametrize('name', dataset_names)
@pytest.mark.parametrize('nrows', [unit_param(20), quality_param(5000),
stress_param(500000)])
def test_rand_index_score(name, nrows):
default_base = {'quantile': .3,
'eps': .3,
'damping': .9,
'preference': -200,
'n_neighbors': 10,
'n_clusters': 3}
pat = get_pattern(name, nrows)
params = default_base.copy()
params.update(pat[1])
cuml_kmeans = cuml.KMeans(n_clusters=params['n_clusters'])
X, y = pat[0]
X = StandardScaler().fit_transform(X)
cu_y_pred = cuml_kmeans.fit_predict(X)
cu_score = cu_ars(y, cu_y_pred)
cu_score_using_sk = sk_ars(y, cp.asnumpy(cu_y_pred))
assert array_equal(cu_score, cu_score_using_sk)
@pytest.mark.parametrize('metric', (
'cityblock', 'cosine', 'euclidean', 'l1', 'sqeuclidean'
))
@pytest.mark.parametrize('chunk_divider', [1, 3, 5])
def test_silhouette_score_batched(metric, chunk_divider, labeled_clusters):
X, labels = labeled_clusters
cuml_score = cu_silhouette_score(X, labels, metric=metric,
chunksize=int(X.shape[0]/chunk_divider))
sk_score = sk_silhouette_score(X, labels, metric=metric)
assert_almost_equal(cuml_score, sk_score, decimal=2)
@pytest.mark.parametrize('metric', (
'cityblock', 'cosine', 'euclidean', 'l1', 'sqeuclidean'
))
@pytest.mark.parametrize('chunk_divider', [1, 3, 5])
def test_silhouette_samples_batched(metric, chunk_divider, labeled_clusters):
X, labels = labeled_clusters
cuml_scores = cu_silhouette_samples(X, labels, metric=metric,
chunksize=int(X.shape[0] /
chunk_divider))
sk_scores = sk_silhouette_samples(X, labels, metric=metric)
cu_trunc = cp.around(cuml_scores, decimals=3)
sk_trunc = cp.around(sk_scores, decimals=3)
diff = cp.absolute(cu_trunc - sk_trunc) > 0
over_diff = cp.all(diff)
# 0.5% elements allowed to be different
if len(over_diff.shape) > 0:
assert over_diff.shape[0] <= 0.005 * X.shape[0]
# different elements should not differ more than 1e-1
tolerance_diff = cp.absolute(cu_trunc[diff] - sk_trunc[diff]) > 1e-1
diff_change = cp.all(tolerance_diff)
if len(diff_change.shape) > 0:
assert False
def score_homogeneity(ground_truth, predictions, use_handle):
return score_labeling_with_handle(cuml.metrics.homogeneity_score,
ground_truth,
predictions,
use_handle,
dtype=np.int32)
def score_completeness(ground_truth, predictions, use_handle):
return score_labeling_with_handle(cuml.metrics.completeness_score,
ground_truth,
predictions,
use_handle,
dtype=np.int32)
def score_mutual_info(ground_truth, predictions, use_handle):
return score_labeling_with_handle(cuml.metrics.mutual_info_score,
ground_truth,
predictions,
use_handle,
dtype=np.int32)
@pytest.mark.parametrize('use_handle', [True, False])
@pytest.mark.parametrize('data', [([0, 0, 1, 1], [1, 1, 0, 0]),
([0, 0, 1, 1], [0, 0, 1, 1])])
def test_homogeneity_perfect_labeling(use_handle, data):
# Perfect labelings are homogeneous
hom = score_homogeneity(*data, use_handle)
assert_almost_equal(hom, 1.0, decimal=4)
@pytest.mark.parametrize('use_handle', [True, False])
@pytest.mark.parametrize('data', [([0, 0, 1, 1], [0, 0, 1, 2]),
([0, 0, 1, 1], [0, 1, 2, 3])])
def test_homogeneity_non_perfect_labeling(use_handle, data):
# Non-perfect labelings that further split classes into more clusters can
# be perfectly homogeneous
hom = score_homogeneity(*data, use_handle)
assert_almost_equal(hom, 1.0, decimal=4)
@pytest.mark.parametrize('use_handle', [True, False])
@pytest.mark.parametrize('data', [([0, 0, 1, 1], [0, 1, 0, 1]),
([0, 0, 1, 1], [0, 0, 0, 0])])
def test_homogeneity_non_homogeneous_labeling(use_handle, data):
# Clusters that include samples from different classes do not make for an
# homogeneous labeling
hom = score_homogeneity(*data, use_handle)
assert_almost_equal(hom, 0.0, decimal=4)
@pytest.mark.parametrize('use_handle', [True, False])
@pytest.mark.parametrize('input_range', [[0, 1000],
[-1000, 1000]])
def test_homogeneity_score_big_array(use_handle, input_range):
a, b, _, _ = generate_random_labels(lambda rd: rd.randint(*input_range,
int(10e4),
dtype=np.int32))
score = score_homogeneity(a, b, use_handle)
ref = sk_homogeneity_score(a, b)
np.testing.assert_almost_equal(score, ref, decimal=4)
@pytest.mark.parametrize('use_handle', [True, False])
@pytest.mark.parametrize('input_range', [[0, 2],
[-5, 20],
[int(-10e2), int(10e2)]])
def test_homogeneity_completeness_symmetry(use_handle, input_range):
a, b, _, _ = generate_random_labels(lambda rd: rd.randint(*input_range,
int(10e3),
dtype=np.int32))
hom = score_homogeneity(a, b, use_handle)
com = score_completeness(b, a, use_handle)
np.testing.assert_almost_equal(hom, com, decimal=4)
@pytest.mark.parametrize('use_handle', [True, False])
@pytest.mark.parametrize('input_labels', [([0, 0, 1, 1], [1, 1, 0, 0]),
([0, 0, 1, 1], [0, 0, 1, 1]),
([0, 0, 1, 1], [0, 0, 1, 2]),
([0, 0, 1, 1], [0, 1, 2, 3]),
([0, 0, 1, 1], [0, 1, 0, 1]),
([0, 0, 1, 1], [0, 0, 0, 0])])
def test_mutual_info_score(use_handle, input_labels):
score = score_mutual_info(*input_labels, use_handle)
ref = sk_mutual_info_score(*input_labels)
np.testing.assert_almost_equal(score, ref, decimal=4)
@pytest.mark.parametrize('use_handle', [True, False])
@pytest.mark.parametrize('input_range', [[0, 1000],
[-1000, 1000]])
def test_mutual_info_score_big_array(use_handle, input_range):
a, b, _, _ = generate_random_labels(lambda rd: rd.randint(*input_range,
int(10e4),
dtype=np.int32))
score = score_mutual_info(a, b, use_handle)
ref = sk_mutual_info_score(a, b)
np.testing.assert_almost_equal(score, ref, decimal=4)
@pytest.mark.parametrize('use_handle', [True, False])
@pytest.mark.parametrize('n', [14])
def test_mutual_info_score_range_equal_samples(use_handle, n):
input_range = (-n, n)
a, b, _, _ = generate_random_labels(lambda rd: rd.randint(*input_range,
n,
dtype=np.int32))
score = score_mutual_info(a, b, use_handle)
ref = sk_mutual_info_score(a, b)
np.testing.assert_almost_equal(score, ref, decimal=4)
@pytest.mark.parametrize('use_handle', [True, False])
@pytest.mark.parametrize('input_range', [[0, 19],
[0, 2],
[-5, 20]])
@pytest.mark.parametrize('n_samples', [129, 258])
def test_mutual_info_score_many_blocks(use_handle, input_range, n_samples):
a, b, _, _ = generate_random_labels(lambda rd: rd.randint(*input_range,
n_samples,
dtype=np.int32))
score = score_mutual_info(a, b, use_handle)
ref = sk_mutual_info_score(a, b)
np.testing.assert_almost_equal(score, ref, decimal=4)
@pytest.mark.parametrize('use_handle', [True, False])
@pytest.mark.parametrize('data', [([0, 0, 1, 1], [1, 1, 0, 0]),
([0, 0, 1, 1], [0, 0, 1, 1])])
def test_completeness_perfect_labeling(use_handle, data):
# Perfect labelings are complete
com = score_completeness(*data, use_handle)
np.testing.assert_almost_equal(com, 1.0, decimal=4)
@pytest.mark.parametrize('use_handle', [True, False])
@pytest.mark.parametrize('data', [([0, 0, 1, 1], [0, 0, 0, 0]),
([0, 1, 2, 3], [0, 0, 1, 1])])
def test_completeness_non_perfect_labeling(use_handle, data):
# Non-perfect labelings that assign all classes members to the same
# clusters are still complete
com = score_completeness(*data, use_handle)
np.testing.assert_almost_equal(com, 1.0, decimal=4)
@pytest.mark.parametrize('use_handle', [True, False])
@pytest.mark.parametrize('data', [([0, 0, 1, 1], [0, 1, 0, 1]),
([0, 0, 0, 0], [0, 1, 2, 3])])
def test_completeness_non_complete_labeling(use_handle, data):
# If classes members are split across different clusters, the assignment
# cannot be complete
com = score_completeness(*data, use_handle)
np.testing.assert_almost_equal(com, 0.0, decimal=4)
@pytest.mark.parametrize('use_handle', [True, False])
@pytest.mark.parametrize('input_range', [[0, 1000],
[-1000, 1000]])
def test_completeness_score_big_array(use_handle, input_range):
a, b, _, _ = generate_random_labels(lambda rd: rd.randint(*input_range,
int(10e4),
dtype=np.int32))
score = score_completeness(a, b, use_handle)
ref = sk_completeness_score(a, b)
np.testing.assert_almost_equal(score, ref, decimal=4)
def test_regression_metrics():
y_true = np.arange(50, dtype=np.int)
y_pred = y_true + 1
assert_almost_equal(mean_squared_error(y_true, y_pred), 1.)
assert_almost_equal(mean_squared_log_error(y_true, y_pred),
mean_squared_error(np.log(1 + y_true),
np.log(1 + y_pred)))
assert_almost_equal(mean_absolute_error(y_true, y_pred), 1.)
@pytest.mark.parametrize('n_samples', [50, stress_param(500000)])
@pytest.mark.parametrize('dtype', [np.int32, np.int64, np.float32, np.float64])
@pytest.mark.parametrize('function', ['mse', 'mae', 'msle'])
def test_regression_metrics_random(n_samples, dtype, function):
if dtype == np.float32 and n_samples == 500000:
# stress test for float32 fails because of floating point precision
pytest.xfail()
y_true, y_pred, _, _ = generate_random_labels(
lambda rng: rng.randint(0, 1000, n_samples).astype(dtype))
cuml_reg, sklearn_reg = {
'mse': (mean_squared_error, sklearn_mse),
'mae': (mean_absolute_error, sklearn_mae),
'msle': (mean_squared_log_error, sklearn_msle)
}[function]
res = cuml_reg(y_true, y_pred, multioutput='raw_values')
ref = sklearn_reg(y_true, y_pred, multioutput='raw_values')
cp.testing.assert_array_almost_equal(res, ref, decimal=2)
@pytest.mark.parametrize('function', ['mse', 'mse_not_squared', 'mae', 'msle'])
def test_regression_metrics_at_limits(function):
y_true = np.array([0.], dtype=np.float)
y_pred = np.array([0.], dtype=np.float)
cuml_reg = {
'mse': mean_squared_error,
'mse_not_squared': partial(mean_squared_error, squared=False),
'mae': mean_absolute_error,
'msle': mean_squared_log_error,
}[function]
assert_almost_equal(cuml_reg(y_true, y_pred), 0.00, decimal=2)
@pytest.mark.parametrize('inputs', [([-1.], [-1.]),
([1., 2., 3.], [1., -2., 3.]),
([1., -2., 3.], [1., 2., 3.])])
def test_mean_squared_log_error_exceptions(inputs):
with pytest.raises(ValueError):
mean_squared_log_error(np.array(inputs[0]), np.array(inputs[1]))
def test_multioutput_regression():
y_true = np.array([[1, 0, 0, 1], [0, 1, 1, 1], [1, 1, 0, 1]])
y_pred = np.array([[0, 0, 0, 1], [1, 0, 1, 1], [0, 0, 0, 1]])
error = mean_squared_error(y_true, y_pred)
assert_almost_equal(error, (1. + 2. / 3) / 4.)
error = mean_squared_error(y_true, y_pred, squared=False)
assert_almost_equal(error, 0.645, decimal=2)
error = mean_squared_log_error(y_true, y_pred)
assert_almost_equal(error, 0.200, decimal=2)
# mean_absolute_error and mean_squared_error are equal because
# it is a binary problem.
error = mean_absolute_error(y_true, y_pred)
assert_almost_equal(error, (1. + 2. / 3) / 4.)
def test_regression_metrics_multioutput_array():
y_true = np.array([[1, 2], [2.5, -1], [4.5, 3], [5, 7]], dtype=np.float)
y_pred = np.array([[1, 1], [2, -1], [5, 4], [5, 6.5]], dtype=np.float)
mse = mean_squared_error(y_true, y_pred, multioutput='raw_values')
mae = mean_absolute_error(y_true, y_pred, multioutput='raw_values')
cp.testing.assert_array_almost_equal(mse, [0.125, 0.5625], decimal=2)
cp.testing.assert_array_almost_equal(mae, [0.25, 0.625], decimal=2)
weights = np.array([0.4, 0.6], dtype=np.float)
msew = mean_squared_error(y_true, y_pred, multioutput=weights)
rmsew = mean_squared_error(y_true, y_pred, multioutput=weights,
squared=False)
assert_almost_equal(msew, 0.39, decimal=2)
assert_almost_equal(rmsew, 0.62, decimal=2)
y_true = np.array([[0, 0]] * 4, dtype=np.int)
y_pred = np.array([[1, 1]] * 4, dtype=np.int)
mse = mean_squared_error(y_true, y_pred, multioutput='raw_values')
mae = mean_absolute_error(y_true, y_pred, multioutput='raw_values')
cp.testing.assert_array_almost_equal(mse, [1., 1.], decimal=2)
cp.testing.assert_array_almost_equal(mae, [1., 1.], decimal=2)
y_true = np.array([[0.5, 1], [1, 2], [7, 6]])
y_pred = np.array([[0.5, 2], [1, 2.5], [8, 8]])
msle = mean_squared_log_error(y_true, y_pred, multioutput='raw_values')
msle2 = mean_squared_error(np.log(1 + y_true), np.log(1 + y_pred),
multioutput='raw_values')
cp.testing.assert_array_almost_equal(msle, msle2, decimal=2)
@pytest.mark.parametrize('function', ['mse', 'mae'])
def test_regression_metrics_custom_weights(function):
y_true = np.array([1, 2, 2.5, -1], dtype=np.float)
y_pred = np.array([1, 1, 2, -1], dtype=np.float)
weights = np.array([0.2, 0.25, 0.4, 0.15], dtype=np.float)
cuml_reg, sklearn_reg = {
'mse': (mean_squared_error, sklearn_mse),
'mae': (mean_absolute_error, sklearn_mae)
}[function]
score = cuml_reg(y_true, y_pred, sample_weight=weights)
ref = sklearn_reg(y_true, y_pred, sample_weight=weights)
assert_almost_equal(score, ref, decimal=2)
def test_mse_vs_msle_custom_weights():
y_true = np.array([0.5, 2, 7, 6], dtype=np.float)
y_pred = np.array([0.5, 1, 8, 8], dtype=np.float)
weights = np.array([0.2, 0.25, 0.4, 0.15], dtype=np.float)
msle = mean_squared_log_error(y_true, y_pred, sample_weight=weights)
msle2 = mean_squared_error(np.log(1 + y_true), np.log(1 + y_pred),
sample_weight=weights)
assert_almost_equal(msle, msle2, decimal=2)
@pytest.mark.parametrize('use_handle', [True, False])
def test_entropy(use_handle):
handle, stream = get_handle(use_handle)
# The outcome of a fair coin is the most uncertain:
# in base 2 the result is 1 (One bit of entropy).
cluster = np.array([0, 1], dtype=np.int32)
assert_almost_equal(entropy(cluster, base=2., handle=handle), 1.)
# The outcome of a biased coin is less uncertain:
cluster = np.array(([0] * 9) + [1], dtype=np.int32)
assert_almost_equal(entropy(cluster, base=2., handle=handle), 0.468995593)
# base e
assert_almost_equal(entropy(cluster, handle=handle), 0.32508297339144826)
@pytest.mark.parametrize('n_samples', [50, stress_param(500000)])
@pytest.mark.parametrize('base', [None, 2, 10, 50])
@pytest.mark.parametrize('use_handle', [True, False])
def test_entropy_random(n_samples, base, use_handle):
if has_scipy():
from scipy.stats import entropy as sp_entropy
else:
pytest.skip('Skipping test_entropy_random because Scipy is missing')
handle, stream = get_handle(use_handle)
clustering, _, _, _ = \
generate_random_labels(lambda rng: rng.randint(0, 1000, n_samples))
# generate unormalized probabilities from clustering
pk = np.bincount(clustering)
# scipy's entropy uses probabilities
sp_S = sp_entropy(pk, base=base)
# we use a clustering
S = entropy(np.array(clustering, dtype=np.int32), base, handle=handle)
assert_almost_equal(S, sp_S, decimal=2)
def test_confusion_matrix():
y_true = cp.array([2, 0, 2, 2, 0, 1])
y_pred = cp.array([0, 0, 2, 2, 0, 2])
cm = confusion_matrix(y_true, y_pred)
ref = cp.array([[2, 0, 0],
[0, 0, 1],
[1, 0, 2]])
cp.testing.assert_array_equal(cm, ref)
def test_confusion_matrix_binary():
y_true = cp.array([0, 1, 0, 1])
y_pred = cp.array([1, 1, 1, 0])
tn, fp, fn, tp = confusion_matrix(y_true, y_pred).ravel()
ref = cp.array([0, 2, 1, 1])
cp.testing.assert_array_equal(ref, cp.array([tn, fp, fn, tp]))
@pytest.mark.parametrize('n_samples', [50, 3000, stress_param(500000)])
@pytest.mark.parametrize('dtype', [np.int32, np.int64])
@pytest.mark.parametrize('problem_type', ['binary', 'multiclass'])
def test_confusion_matrix_random(n_samples, dtype, problem_type):
upper_range = 2 if problem_type == 'binary' else 1000
y_true, y_pred, _, _ = generate_random_labels(
lambda rng: rng.randint(0, upper_range, n_samples).astype(dtype))
cm = confusion_matrix(y_true, y_pred)
ref = sk_confusion_matrix(y_true, y_pred)
cp.testing.assert_array_almost_equal(ref, cm, decimal=4)
@pytest.mark.parametrize(
"normalize, expected_results",
[('true', 0.333333333),
('pred', 0.333333333),
('all', 0.1111111111),
(None, 2)]
)
def test_confusion_matrix_normalize(normalize, expected_results):
y_test = cp.array([0, 1, 2] * 6)
y_pred = cp.array(list(chain(*permutations([0, 1, 2]))))
cm = confusion_matrix(y_test, y_pred, normalize=normalize)
cp.testing.assert_allclose(cm, cp.array(expected_results))
@pytest.mark.parametrize('labels', [(0, 1),
(2, 1),
(2, 1, 4, 7),
(2, 20)])
def test_confusion_matrix_multiclass_subset_labels(labels):
y_true, y_pred, _, _ = generate_random_labels(
lambda rng: rng.randint(0, 3, 10).astype(np.int32))
ref = sk_confusion_matrix(y_true, y_pred, labels=labels)
labels = cp.array(labels, dtype=np.int32)
cm = confusion_matrix(y_true, y_pred, labels=labels)
cp.testing.assert_array_almost_equal(ref, cm, decimal=4)
@pytest.mark.parametrize('n_samples', [50, 3000, stress_param(500000)])
@pytest.mark.parametrize('dtype', [np.int32, np.int64])
@pytest.mark.parametrize('weights_dtype', ['int', 'float'])
def test_confusion_matrix_random_weights(n_samples, dtype, weights_dtype):
y_true, y_pred, _, _ = generate_random_labels(
lambda rng: rng.randint(0, 10, n_samples).astype(dtype))
if weights_dtype == 'int':
sample_weight = np.random.RandomState(0).randint(0, 10, n_samples)
else:
sample_weight = np.random.RandomState(0).rand(n_samples)
cm = confusion_matrix(y_true, y_pred, sample_weight=sample_weight)
ref = sk_confusion_matrix(y_true, y_pred, sample_weight=sample_weight)
cp.testing.assert_array_almost_equal(ref, cm, decimal=4)
def test_roc_auc_score():
y_true = np.array([0, 0, 1, 1])
y_pred = np.array([0.1, 0.4, 0.35, 0.8])
assert_almost_equal(roc_auc_score(y_true, y_pred),
sklearn_roc_auc_score(y_true, y_pred))
y_true = np.array([0, 0, 1, 1, 0])
y_pred = np.array([0.8, 0.4, 0.4, 0.8, 0.8])
assert_almost_equal(roc_auc_score(y_true, y_pred),
sklearn_roc_auc_score(y_true, y_pred))
@pytest.mark.parametrize('n_samples', [50, 500000])
@pytest.mark.parametrize('dtype', [np.int32, np.int64, np.float32, np.float64])
def test_roc_auc_score_random(n_samples, dtype):
y_true, _, _, _ = generate_random_labels(
lambda rng: rng.randint(0, 2, n_samples).astype(dtype))
y_pred, _, _, _ = generate_random_labels(
lambda rng: rng.randint(0, 1000, n_samples).astype(dtype))
auc = roc_auc_score(y_true, y_pred)
skl_auc = sklearn_roc_auc_score(y_true, y_pred)
assert_almost_equal(auc, skl_auc)
def test_roc_auc_score_at_limits():
y_true = np.array([0., 0., 0.], dtype=np.float)
y_pred = np.array([0., 0.5, 1.], dtype=np.float)
err_msg = ("roc_auc_score cannot be used when "
"only one class present in y_true. ROC AUC score "
"is not defined in that case.")
with pytest.raises(ValueError, match=err_msg):
roc_auc_score(y_true, y_pred)
y_true = np.array([0., 0.5, 1.0], dtype=np.float)
y_pred = np.array([0., 0.5, 1.], dtype=np.float)
err_msg = ("Continuous format of y_true "
"is not supported.")
with pytest.raises(ValueError, match=err_msg):
roc_auc_score(y_true, y_pred)
def test_precision_recall_curve():
y_true = np.array([0, 0, 1, 1])
y_score = np.array([0.1, 0.4, 0.35, 0.8])
precision_using_sk, recall_using_sk, thresholds_using_sk = \
sklearn_precision_recall_curve(
y_true, y_score)
precision, recall, thresholds = precision_recall_curve(
y_true, y_score)
assert array_equal(precision, precision_using_sk)
assert array_equal(recall, recall_using_sk)
assert array_equal(thresholds, thresholds_using_sk)
def test_precision_recall_curve_at_limits():
y_true = np.array([0., 0., 0.], dtype=np.float)
y_pred = np.array([0., 0.5, 1.], dtype=np.float)
err_msg = ("precision_recall_curve cannot be used when "
"y_true is all zero.")
with pytest.raises(ValueError, match=err_msg):
precision_recall_curve(y_true, y_pred)
y_true = np.array([0., 0.5, 1.0], dtype=np.float)
y_pred = np.array([0., 0.5, 1.], dtype=np.float)
err_msg = ("Continuous format of y_true "
"is not supported.")
with pytest.raises(ValueError, match=err_msg):
precision_recall_curve(y_true, y_pred)
@pytest.mark.parametrize('n_samples', [50, 500000])
@pytest.mark.parametrize('dtype', [np.int32, np.int64, np.float32, np.float64])
def test_precision_recall_curve_random(n_samples, dtype):
y_true, _, _, _ = generate_random_labels(
lambda rng: rng.randint(0, 2, n_samples).astype(dtype))
y_score, _, _, _ = generate_random_labels(
lambda rng: rng.randint(0, 1000, n_samples).astype(dtype))
precision_using_sk, recall_using_sk, thresholds_using_sk = \
sklearn_precision_recall_curve(
y_true, y_score)
precision, recall, thresholds = precision_recall_curve(
y_true, y_score)
assert array_equal(precision, precision_using_sk)
assert array_equal(recall, recall_using_sk)
assert array_equal(thresholds, thresholds_using_sk)
def test_log_loss():
y_true = np.array([0, 0, 1, 1])
y_pred = np.array([0.1, 0.4, 0.35, 0.8])
assert_almost_equal(log_loss(y_true, y_pred),
sklearn_log_loss(y_true, y_pred))
y_true = np.array([0, 0, 1, 1, 0])
y_pred = np.array([0.8, 0.4, 0.4, 0.8, 0.8])
assert_almost_equal(log_loss(y_true, y_pred),
sklearn_log_loss(y_true, y_pred))
@pytest.mark.parametrize('n_samples', [500, 500000])
@pytest.mark.parametrize('dtype', [np.int32, np.int64, np.float32, np.float64])
def test_log_loss_random(n_samples, dtype):
y_true, _, _, _ = generate_random_labels(
lambda rng: rng.randint(0, 10, n_samples).astype(dtype))
y_pred, _, _, _ = generate_random_labels(
lambda rng: rng.rand(n_samples, 10))
assert_almost_equal(log_loss(y_true, y_pred),
sklearn_log_loss(y_true, y_pred))
def test_log_loss_at_limits():
y_true = np.array([0., 1., 2.], dtype=np.float)
y_pred = np.array([0., 0.5, 1.], dtype=np.float)
err_msg = ("The shape of y_pred doesn't "
"match the number of classes")
with pytest.raises(ValueError, match=err_msg):
log_loss(y_true, y_pred)
y_true = np.array([0., 0.5, 1.0], dtype=np.float)
y_pred = np.array([0., 0.5, 1.], dtype=np.float)
err_msg = ("'y_true' can only have integer values")
with pytest.raises(ValueError, match=err_msg):
log_loss(y_true, y_pred)
@pytest.mark.parametrize("metric", PAIRWISE_DISTANCE_METRICS)
@pytest.mark.parametrize("matrix_size", [(5, 4), (1000, 3), (2, 10),
(500, 400)])
@pytest.mark.parametrize("is_col_major", [True, False])
def test_pairwise_distances(metric: str, matrix_size, is_col_major):
# Test the pairwise_distance helper function.
rng = np.random.RandomState(0)
def prep_array(array):
return np.asfortranarray(array) if is_col_major else array
# For fp64, compare at 13 decimals, (2 places less than the ~15 max)
compare_precision = 10
# Compare to sklearn, single input
X = prep_array(rng.random_sample(matrix_size))
S = pairwise_distances(X, metric=metric)
S2 = sklearn_pairwise_distances(X, metric=metric)
cp.testing.assert_array_almost_equal(S, S2, decimal=compare_precision)
# Compare to sklearn, double input with same dimensions
Y = X
S = pairwise_distances(X, Y, metric=metric)
S2 = sklearn_pairwise_distances(X, Y, metric=metric)
cp.testing.assert_array_almost_equal(S, S2, decimal=compare_precision)
# Compare single and double inputs to eachother
S = pairwise_distances(X, metric=metric)
S2 = pairwise_distances(X, Y, metric=metric)
cp.testing.assert_array_almost_equal(S, S2, decimal=compare_precision)
# Compare to sklearn, with Y dim != X dim
Y = prep_array(rng.random_sample((2, matrix_size[1])))
S = pairwise_distances(X, Y, metric=metric)
S2 = sklearn_pairwise_distances(X, Y, metric=metric)
cp.testing.assert_array_almost_equal(S, S2, decimal=compare_precision)
# Change precision of one parameter
Y = np.asfarray(Y, dtype=np.float32)
S = pairwise_distances(X, Y, metric=metric)
S2 = sklearn_pairwise_distances(X, Y, metric=metric)
cp.testing.assert_array_almost_equal(S, S2, decimal=compare_precision)
# For fp32, compare at 5 decimals, (2 places less than the ~7 max)
compare_precision = 2
# Change precision of both parameters to float
X = np.asfarray(X, dtype=np.float32)
Y = np.asfarray(Y, dtype=np.float32)
S = pairwise_distances(X, Y, metric=metric)
S2 = sklearn_pairwise_distances(X, Y, metric=metric)
cp.testing.assert_array_almost_equal(S, S2, decimal=compare_precision)
# Test sending an int type with convert_dtype=True
Y = prep_array(rng.randint(10, size=Y.shape))
S = pairwise_distances(X, Y, metric=metric, convert_dtype=True)
S2 = sklearn_pairwise_distances(X, Y, metric=metric)
cp.testing.assert_array_almost_equal(S, S2, decimal=compare_precision)
# Test that uppercase on the metric name throws an error.
with pytest.raises(ValueError):
pairwise_distances(X, Y, metric=metric.capitalize())
@pytest.mark.parametrize("metric", PAIRWISE_DISTANCE_METRICS)
@pytest.mark.parametrize("matrix_size", [
unit_param((1000, 100)),
quality_param((2000, 1000)),
stress_param((10000, 10000))])
def test_pairwise_distances_sklearn_comparison(metric: str, matrix_size):
# Test larger sizes to sklearn
rng = np.random.RandomState(1)
element_count = matrix_size[0] * matrix_size[1]
X = rng.random_sample(matrix_size)
Y = rng.random_sample(matrix_size)
# For fp64, compare at 10 decimals, (5 places less than the ~15 max)
compare_precision = 10
# Compare to sklearn, fp64
S = pairwise_distances(X, Y, metric=metric)
if (element_count <= 2000000):
S2 = sklearn_pairwise_distances(X, Y, metric=metric)
cp.testing.assert_array_almost_equal(S, S2, decimal=compare_precision)
# For fp32, compare at 4 decimals, (3 places less than the ~7 max)
compare_precision = 4
X = np.asfarray(X, dtype=np.float32)
Y = np.asfarray(Y, dtype=np.float32)
# Compare to sklearn, fp32
S = pairwise_distances(X, Y, metric=metric)
if (element_count <= 2000000):
S2 = sklearn_pairwise_distances(X, Y, metric=metric)
cp.testing.assert_array_almost_equal(S, S2, decimal=compare_precision)
@pytest.mark.parametrize("metric", PAIRWISE_DISTANCE_METRICS)
def test_pairwise_distances_one_dimension_order(metric: str):
# Test the pairwise_distance helper function for 1 dimensional cases which
# can break down when using a size of 1 for either dimension
rng = np.random.RandomState(2)
Xc = rng.random_sample((1, 4))
Yc = rng.random_sample((10, 4))
Xf = np.asfortranarray(Xc)
Yf = np.asfortranarray(Yc)
# For fp64, compare at 13 decimals, (2 places less than the ~15 max)
compare_precision = 13
# Compare to sklearn, C/C order
S = pairwise_distances(Xc, Yc, metric=metric)
S2 = sklearn_pairwise_distances(Xc, Yc, metric=metric)
cp.testing.assert_array_almost_equal(S, S2, decimal=compare_precision)
# Compare to sklearn, C/F order
S = pairwise_distances(Xc, Yf, metric=metric)
S2 = sklearn_pairwise_distances(Xc, Yf, metric=metric)
cp.testing.assert_array_almost_equal(S, S2, decimal=compare_precision)
# Compare to sklearn, F/C order
S = pairwise_distances(Xf, Yc, metric=metric)
S2 = sklearn_pairwise_distances(Xf, Yc, metric=metric)
cp.testing.assert_array_almost_equal(S, S2, decimal=compare_precision)
# Compare to sklearn, F/F order
S = pairwise_distances(Xf, Yf, metric=metric)
S2 = sklearn_pairwise_distances(Xf, Yf, metric=metric)
cp.testing.assert_array_almost_equal(S, S2, decimal=compare_precision)
# Switch which input has single dimension
Xc = rng.random_sample((1, 4))
Yc = rng.random_sample((10, 4))
Xf = np.asfortranarray(Xc)
Yf = np.asfortranarray(Yc)
# Compare to sklearn, C/C order
S = pairwise_distances(Xc, Yc, metric=metric)
S2 = sklearn_pairwise_distances(Xc, Yc, metric=metric)
cp.testing.assert_array_almost_equal(S, S2, decimal=compare_precision)
# Compare to sklearn, C/F order
S = pairwise_distances(Xc, Yf, metric=metric)
S2 = sklearn_pairwise_distances(Xc, Yf, metric=metric)
cp.testing.assert_array_almost_equal(S, S2, decimal=compare_precision)
# Compare to sklearn, F/C order
S = pairwise_distances(Xf, Yc, metric=metric)
S2 = sklearn_pairwise_distances(Xf, Yc, metric=metric)
cp.testing.assert_array_almost_equal(S, S2, decimal=compare_precision)
# Compare to sklearn, F/F order
S = pairwise_distances(Xf, Yf, metric=metric)
S2 = sklearn_pairwise_distances(Xf, Yf, metric=metric)
cp.testing.assert_array_almost_equal(S, S2, decimal=compare_precision)
@pytest.mark.parametrize("metric", ["haversine", "nan_euclidean"])
def test_pairwise_distances_unsuppored_metrics(metric):
rng = np.random.RandomState(3)
X = rng.random_sample((5, 4))
with pytest.raises(ValueError):
pairwise_distances(X, metric=metric)
def test_pairwise_distances_exceptions():
rng = np.random.RandomState(4)
X_int = rng.randint(10, size=(5, 4))
X_double = rng.random_sample((5, 4))
X_float = np.asfarray(X_double, dtype=np.float32)
X_bool = rng.choice([True, False], size=(5, 4))
# Test int inputs (only float/double accepted at this time)
with pytest.raises(TypeError):
pairwise_distances(X_int, metric="euclidean")
# Test second int inputs (should not have an exception with
# convert_dtype=True)
pairwise_distances(X_double, X_int, metric="euclidean")
# Test bool inputs (only float/double accepted at this time)
with pytest.raises(TypeError):
pairwise_distances(X_bool, metric="euclidean")
# Test sending different types with convert_dtype=False
with pytest.raises(TypeError):
pairwise_distances(X_double, X_float, metric="euclidean",
convert_dtype=False)
# Invalid metric name
with pytest.raises(ValueError):
pairwise_distances(X_double, metric="Not a metric")
# Invalid dimensions
X = rng.random_sample((5, 4))
Y = rng.random_sample((5, 7))
with pytest.raises(ValueError):
pairwise_distances(X, Y, metric="euclidean")
@pytest.mark.parametrize("input_type", ["cudf", "numpy", "cupy"])
@pytest.mark.parametrize("output_type", ["cudf", "numpy", "cupy"])
@pytest.mark.parametrize("use_global", [True, False])
def test_pairwise_distances_output_types(input_type, output_type, use_global):
# Test larger sizes to sklearn
rng = np.random.RandomState(5)
X = rng.random_sample((100, 100))
Y = rng.random_sample((100, 100))
if input_type == "cudf":
X = cudf.DataFrame(X)
Y = cudf.DataFrame(Y)
elif input_type == "cupy":
X = cp.asarray(X)
Y = cp.asarray(Y)
# Set to None if we are using the global object
output_type_param = None if use_global else output_type
# Use the global manager object. Should do nothing unless use_global is set
with cuml.using_output_type(output_type):
# Compare to sklearn, fp64
S = pairwise_distances(X, Y, metric="euclidean",
output_type=output_type_param)
if output_type == "input":
assert isinstance(S, type(X))
elif output_type == "cudf":
assert isinstance(S, cudf.DataFrame)
elif output_type == "numpy":
assert isinstance(S, np.ndarray)
elif output_type == "cupy":
assert isinstance(S, cp.core.core.ndarray)
|
py | 1a31793ba0b69631edd40d6fee3f33325a932799 | #
# Collective Knowledge: CK-powered Caffe crowdbenchmarking (very early prototyping)
#
# See CK LICENSE.txt for licensing details
# See CK COPYRIGHT.txt for copyright details
#
# Developer: Grigori Fursin, [email protected], http://fursin.net
#
cfg={} # Will be updated by CK (meta description of this module)
work={} # Will be updated by CK (temporal data)
ck=None # Will be updated by CK (initialized CK kernel)
# Local settings
line='================================================================'
ck_url='http://cknowledge.org/repo/web.php?native_action=show&native_module_uoa=program.optimization&scenario=1eb2f50d4620903e'
ck_url1='http://cknowledge.org/repo/web.php?wcid=experiment.bench.dnn:'
ffstat='ck-stat-flat-characteristics.json'
ffmin='ck-stat-flat-min.json'
form_name='wa_web_form'
onchange='document.'+form_name+'.submit();'
hextra='<i><center>\n'
hextra+=' [ <a href="http://cKnowledge.org">CK project website</a> ], '
hextra+=' [ <a href="https://github.com/mlcommons/ck-mlops">CK automation recipes for portable MLOps</a> ], '
hextra+=' [ <a href="https://en.wikipedia.org/wiki/Collective_Knowledge_(software)">Wikipedia</a> ] \n'
hextra+='</center></i>\n'
hextra+='<br>\n'
selector=[{'name':'Type', 'key':'dnn_type'},
{'name':'DNN engine', 'key':'dnn_engine_name'},
{'name':'Model', 'key':'nn_type'},
{'name':'Platform', 'key':'plat_name', 'new_line':'yes'},
{'name':'CPU', 'key':'cpu_name'},
{'name':'OS', 'key':'os_name', 'new_line':'yes'},
{'name':'GPGPU', 'key':'gpgpu_name'}]
replay_clean_vars=['no_compile','host_os','device_id']
replay_clean_env_vars=['CK_CAFFE_MODEL','CK_CAFFE_MODEL_FILE','CK_ENV_MODEL_CAFFE_WEIGHTS']
##############################################################################
# Initialize module
def init(i):
"""
Input: {}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
}
"""
return {'return':0}
##############################################################################
# crowdsource these experiments
def crowdsource(i):
"""
Input: {
(local) - if 'yes', local crowd-benchmarking, instead of public
(user) - force different user ID/email for demos
(choices) - force different choices to program pipeline
(repetitions) - statistical repetitions (default=1), for now statistical analysis is not used (TBD)
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
}
"""
import copy
import os
# Setting output
o=i.get('out','')
oo=''
if o=='con': oo='con'
quiet=i.get('quiet','')
er=i.get('exchange_repo','')
if er=='': er=ck.cfg['default_exchange_repo_uoa']
esr=i.get('exchange_subrepo','')
if esr=='': esr=ck.cfg['default_exchange_subrepo_uoa']
if i.get('local','')=='yes':
er='local'
esr=''
la=i.get('local_autotuning','')
repetitions=i.get('repetitions','')
if repetitions=='': repetitions=3
repetitions=int(repetitions)
record='no'
# Check if any input has . and convert to dict
for k in list(i.keys()):
if k.find('.')>0:
v=i[k]
kk='##'+k.replace('.','#')
del(i[k])
r=ck.set_by_flat_key({'dict':i, 'key':kk, 'value':v})
if r['return']>0: return r
choices=i.get('choices',{})
env=i.get('env',{})
if 'env' not in choices: choices['env']={}
r=ck.merge_dicts({'dict1':choices['env'], 'dict2':copy.deepcopy(env)})
env={}
xchoices=copy.deepcopy(choices)
# Get user
user=''
mcfg={}
ii={'action':'load',
'module_uoa':'module',
'data_uoa':cfg['module_deps']['program.optimization']}
r=ck.access(ii)
if r['return']==0:
mcfg=r['dict']
dcfg={}
ii={'action':'load',
'module_uoa':mcfg['module_deps']['cfg'],
'data_uoa':mcfg['cfg_uoa']}
r=ck.access(ii)
if r['return']>0 and r['return']!=16: return r
if r['return']!=16:
dcfg=r['dict']
user=dcfg.get('user_email','')
# Initialize local environment for program optimization ***********************************************************
pi=i.get('platform_info',{})
if len(pi)==0:
ii=copy.deepcopy(i)
ii['action']='initialize'
ii['module_uoa']=cfg['module_deps']['program.optimization']
ii['data_uoa']='caffe'
ii['exchange_repo']=er
ii['exchange_subrepo']=esr
ii['skip_welcome']='yes'
ii['skip_log_wait']='yes'
ii['crowdtuning_type']='caffe-crowd-benchmarking'
r=ck.access(ii)
if r['return']>0: return r
pi=r['platform_info']
user=r.get('user','')
hos=pi['host_os_uoa']
hosd=pi['host_os_dict']
tos=pi['os_uoa']
tosd=pi['os_dict']
tbits=tosd.get('bits','')
remote=tosd.get('remote','')
tdid=pi['device_id']
features=pi.get('features',{})
fplat=features.get('platform',{})
fos=features.get('os',{})
fcpu=features.get('cpu',{})
fgpu=features.get('gpu',{})
plat_name=fplat.get('name','')
plat_uid=features.get('platform_uid','')
os_name=fos.get('name','')
os_uid=features.get('os_uid','')
cpu_name=fcpu.get('name','')
if cpu_name=='': cpu_name='unknown-'+fcpu.get('cpu_abi','')
cpu_uid=features.get('cpu_uid','')
gpu_name=fgpu.get('name','')
gpgpu_name=''
sn=fos.get('serial_number','')
# Ask for cmd
tp=['cpu', 'cuda', 'cuda_fp16', 'opencl']
ck.out(line)
ck.out('Select Caffe library type:')
ck.out('')
r=ck.access({'action':'select_list',
'module_uoa':cfg['module_deps']['choice'],
'choices':tp})
if r['return']>0: return r
xtp=r['choice']
xtp16=''
if xtp=='cuda_fp16':
xtp='cuda'
xtp16='yes'
android=False
if 'android' in tos: android=True
# Get extra platform features if "cuda" or "opencl"
if android:
run_cmd='default'
prog_uoa='caffe-time'
else:
run_cmd='time_cpu'
prog_uoa='caffe'
gpgpu_uid=''
if xtp=='cuda' or xtp=='opencl':
r=ck.access({'action':'detect',
'module_uoa':cfg['module_deps']['platform.gpgpu'],
'host_os':hos,
'target_os':tos,
'device_id':tdid,
'type':xtp,
'share':'yes',
'exchange_repo':er,
'exchange_subrepo':esr})
if r['return']>0: return r
gfeat=r.get('features',{})
gpgpus=gfeat.get('gpgpu',[])
if len(gpgpus)>0:
gpgpu_name=gpgpus[0].get('gpgpu',{}).get('name','')
gpgpu_uid=gpgpus[0].get('gpgpu_uoa','')
if android:
if xtp!='opencl':
return {'return':1, 'error':'can\'t crowdbenchmark this type of DNN engine on selected target platform'}
run_cmd='default'
prog_uoa='caffe-time-opencl'
else:
run_cmd='time_gpu'
if xtp16=='yes': run_cmd='time_gpu_fp16'
# Get deps from caffe program
r=ck.access({'action':'load',
'module_uoa':cfg['module_deps']['program'],
'data_uoa':prog_uoa})
if r['return']>0: return r
pp=r['path']
# lib_dep=r['dict']['run_deps']['lib-caffe']
# deps={'lib-caffe':lib_dep}
# Check environment for selected type
# r=ck.access({'action':'resolve',
# 'module_uoa':cfg['module_deps']['env'],
# 'deps':deps,
# 'host_os':hos,
# 'target_os':tos,
# 'device_id':tdid,
# 'out':o})
# if r['return']>0: return r
# deps=r['deps']
# Prepare CK pipeline for a given workload
ii={'action':'pipeline',
'module_uoa':cfg['module_deps']['program'],
'data_uoa':prog_uoa,
'host_os':hos,
'target_os':tos,
'device_id':tdid,
'skip_target':'yes',
'prepare':'yes',
'env':env,
'choices':choices,
# 'dependencies':deps,
'cmd_key':run_cmd,
'no_state_check':'yes',
'no_compiler_description':'yes',
'skip_info_collection':'yes',
'skip_calibration':'yes',
'cpu_freq':'max',
'gpu_freq':'max',
'env_speed':'yes',
'energy':'no',
'skip_print_timers':'yes',
'generate_rnd_tmp_dir':'no',
'out':oo}
rr=ck.access(ii)
if rr['return']>0: return rr
fail=rr.get('fail','')
if fail=='yes':
return {'return':10, 'error':'pipeline failed ('+rr.get('fail_reason','')+')'}
ready=rr.get('ready','')
if ready!='yes':
return {'return':11, 'error':'couldn\'t prepare universal CK program workflow'}
state=rr['state']
tmp_dir=state.get('tmp_dir','')
if tmp_dir=='': tmp_dir='tmp' # usually when no_compile
deps=rr['dependencies'] # resolved deps
# Clean pipeline
if 'ready' in rr: del(rr['ready'])
if 'fail' in rr: del(rr['fail'])
if 'return' in rr: del(rr['return'])
# Prepare high-level experiment meta
meta={'cpu_name':cpu_name,
'os_name':os_name,
'plat_name':plat_name,
'gpu_name':gpu_name,
'dnn_type':xtp,
'gpgpu_name':gpgpu_name,
'cmd_key':run_cmd}
# Process deps
xdeps={}
xnn=''
xblas=''
for k in deps:
dp=deps[k]
ptags=dp.get('tags',[])
puoa=dp.get('package_uoa','')
if puoa=='':
puoa=dp.get('cus',{}).get('used_package_uoa','')
dname=dp.get('dict',{}).get('data_name','')
if k=='caffemodel':
xnn=dname
j1=xnn.rfind('(')
if j1>0:
xnn=xnn[j1+1:-1]
xdeps[k]={'name':dp.get('name',''), 'data_name':dname, 'ver':dp.get('ver',''), 'package_uoa':puoa, 'package_tags':ptags}
# versions of engine sub deps
dvers={}
mdep=deps['lib-caffe']
mdeps=mdep.get('dict',{}).get('deps',{})
for k in mdeps:
dvers[k]=mdeps[k].get('ver','')
meta['xversions']=dvers
meta['xdeps']=xdeps
meta['nn_type']=xnn
meta['choices']=xchoices
mmeta=copy.deepcopy(meta)
# Extra meta which is not used to search similar case ...
mmeta['platform_uid']=plat_uid
mmeta['os_uid']=os_uid
mmeta['cpu_uid']=cpu_uid
mmeta['gpgpu_uid']=gpgpu_uid
mmeta['user']=user
# Check if already exists (to aggregate stats)
aggregated_stats={}
rduid=''
found=False
if o=='con':
ck.out('')
ck.out('Checking if results already exists in a public repo (to aggregate statistics) ...')
# Find remote entry
ii={'action':'search',
'module_uoa':work['self_module_uid'],
'repo_uoa':er,
'remote_repo_uoa':esr,
'search_dict':{'meta':meta}}
rx=ck.access(ii)
if rx['return']>0: return rx
lst=rx['lst']
if len(lst)==1:
rduid=lst[0]['data_uid']
found=True
if o=='con':
ck.out('')
ck.out('Results found. Pre-loading aggregated stats from '+rduid+' ...')
# Load stats
rx=ck.access({'action':'load',
'module_uoa':work['self_module_uid'],
'data_uoa':rduid,
'repo_uoa':er,
'remote_repo_uoa':esr,
'load_extra_json_files':[ffstat]})
if rx['return']==0:
aggregated_stats=rx.get('extra_json_files',{}).get(ffstat,{})
else:
ck.out('')
ck.out('WARNING: couldn\'t load data ('+rx['error']+')')
else:
rx=ck.gen_uid({})
if rx['return']>0: return rx
rduid=rx['data_uid']
# Run CK pipeline *****************************************************
pipeline=copy.deepcopy(rr)
if len(choices)>0:
r=ck.merge_dicts({'dict1':pipeline['choices'], 'dict2':xchoices})
if r['return']>0: return r
ii={'action':'autotune',
'module_uoa':cfg['module_deps']['pipeline'],
'host_os':hos,
'target_os':tos,
'device_id':tdid,
'iterations':1,
'repetitions':repetitions,
'collect_all':'yes',
'process_multi_keys':['##characteristics#*'],
'tmp_dir':tmp_dir,
'pipeline':pipeline,
'stat_flat_dict':aggregated_stats,
"features_keys_to_process":["##choices#*"],
"record_params": {
"search_point_by_features":"yes"
},
'out':oo}
rrr=ck.access(ii)
if rrr['return']>0: return rrr
ls=rrr.get('last_iteration_output',{})
state=ls.get('state',{})
xchoices=copy.deepcopy(ls.get('choices',{}))
lsaf=rrr.get('last_stat_analysis',{}).get('dict_flat',{})
real_proto=xchoices.get('env',{}).get('CK_CAFFE_MODEL','') # to push to server
ddd={'meta':mmeta}
ddd['choices']=xchoices
features=ls.get('features',{})
deps=ls.get('dependencies',{})
fail=ls.get('fail','')
fail_reason=ls.get('fail_reason','')
ch=ls.get('characteristics',{})
# Save pipeline
ddd['state']={'fail':fail, 'fail_reason':fail_reason}
ddd['characteristics']=ch
ddd['user']=user
# Add files
ddd['file_stat']=ffstat
if real_proto!='':
ddd['file_model_topology']=os.path.basename(real_proto)
if not found:
if o=='con':
ck.out('')
ck.out('Saving results to the remote public repo ('+rduid+') ...')
# Update meta
rx=ck.access({'action':'add',
'module_uoa':work['self_module_uid'],
'data_uoa':rduid,
'repo_uoa':er,
'remote_repo_uoa':esr,
'dict':ddd,
'sort_keys':'yes'})
if rx['return']>0: return rx
# Push real proto
if real_proto!='':
if o=='con':
ck.out('')
ck.out('Pushing prototxt to the remote public repo ...')
rx=ck.access({'action':'push',
'module_uoa':work['self_module_uid'],
'data_uoa':rduid,
'repo_uoa':er,
'remote_repo_uoa':esr,
'filename':real_proto,
'overwrite':'yes'})
if rx['return']>0: return rx
# Push statistical characteristics
if o=='con':
ck.out('')
ck.out('Pushing file with statistics to server ...')
fstat=os.path.join(pp,tmp_dir,ffstat)
r=ck.save_json_to_file({'json_file':fstat, 'dict':lsaf, 'sort_keys':'yes'})
if r['return']>0: return r
rx=ck.access({'action':'push',
'module_uoa':work['self_module_uid'],
'data_uoa':rduid,
'repo_uoa':er,
'remote_repo_uoa':esr,
'filename':fstat,
'overwrite':'yes'})
if rx['return']>0: return rx
os.remove(fstat)
# Info
if o=='con':
ck.out('')
ck.out('Succesfully recorded results in remote repo (Entry UID='+rduid+')')
# Check host URL prefix and default module/action
url=ck_url+'&highlight_uid='+rduid
ck.out('')
ck.out('You can see your results at the following URL:')
ck.out('')
ck.out(url)
return {'return':0}
##############################################################################
# show results
def show(i):
"""
Input: {
(crowd_module_uoa) - if rendered from experiment crowdsourcing
(crowd_key) - add extra name to Web keys to avoid overlapping with original crowdsourcing HTML
(crowd_on_change) - reuse onchange doc from original crowdsourcing HTML
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
}
"""
import os
st=''
cmuoa=i.get('crowd_module_uoa','')
ckey=i.get('crowd_key','')
conc=i.get('crowd_on_change','')
if conc=='':
conc=onchange
hi_uid=i.get('highlight_uid','')
h=''
# h='<hr>\n'
h+='<center>\n'
h+='\n\n<script language="JavaScript">function copyToClipboard (text) {window.prompt ("Copy to clipboard: Ctrl+C, Enter", text);}</script>\n\n'
# h+='<h2>Aggregated results from Caffe crowd-benchmarking (time, accuracy, energy, cost, ...)</h2>\n'
h+=hextra
# Check host URL prefix and default module/action
rx=ck.access({'action':'form_url_prefix',
'module_uoa':'wfe',
'host':i.get('host',''),
'port':i.get('port',''),
'template':i.get('template','')})
if rx['return']>0: return rx
url0=rx['url']
template=rx['template']
url=url0
action=i.get('action','')
muoa=i.get('module_uoa','')
st=''
url+='action=index&module_uoa=wfe&native_action='+action+'&'+'native_module_uoa='+muoa
url1=url
# List entries
ii={'action':'search',
'module_uoa':work['self_module_uid'],
'add_meta':'yes'}
if cmuoa!='':
ii['module_uoa']=cmuoa
r=ck.access(ii)
if r['return']>0: return r
lst=r['lst']
# Check unique entries
choices={}
wchoices={}
for q in lst:
d=q['meta']
meta=d.get('meta',{})
for kk in selector:
kx=kk['key']
k=ckey+kx
if k not in choices:
choices[k]=[]
wchoices[k]=[{'name':'','value':''}]
kflat=kk.get('flat_key','')
if kflat=='': kflat='##'+kx
rx=ck.get_by_flat_key({'dict':meta, 'key':kflat})
if rx['return']>0: return rx
v=rx['value']
if v==None: v=''
if v!='':
if v not in choices[k]:
choices[k].append(v)
wchoices[k].append({'name':v, 'value':v})
# Prepare query div ***************************************************************
if cmuoa=='':
# Start form + URL (even when viewing entry)
r=ck.access({'action':'start_form',
'module_uoa':cfg['module_deps']['wfe'],
'url':url1,
'name':form_name})
if r['return']>0: return r
h+=r['html']
for kk in selector:
kx=kk['key']
k=ckey+kx
n=kk['name']
nl=kk.get('new_line','')
if nl=='yes':
h+='<br>\n<div id="ck_entries_space8"></div>\n'
v=''
if i.get(k,'')!='':
v=i[k]
kk['value']=v
# Show hardware
ii={'action':'create_selector',
'module_uoa':cfg['module_deps']['wfe'],
'data':wchoices.get(k,[]),
'name':k,
'onchange':conc,
'skip_sort':'no',
'selected_value':v}
r=ck.access(ii)
if r['return']>0: return r
h+='<b>'+n+':</b> '+r['html'].strip()+'\n'
# Check hidden
if hi_uid!='':
h+='<input type="hidden" name="highlight_uid" value="'+hi_uid+'">\n'
h+='<br><br>'
# Prune list
plst=[]
for q in lst:
d=q['meta']
meta=d.get('meta',{})
# Check selector
skip=False
for kk in selector:
k=kk['key']
n=kk['name']
v=kk.get('value','')
kflat=kk.get('flat_key','')
if kflat=='': kflat='##'+k
rx=ck.get_by_flat_key({'dict':meta, 'key':kflat})
if rx['return']>0: return rx
vxx=rx['value']
if vxx==None: vxx=''
if v!='' and vxx!=v:
skip=True
if not skip:
plst.append(q)
# Check if too many
lplst=len(plst)
if lplst==0:
h+='<b>No results found!</b>'
return {'return':0, 'html':h, 'style':st}
elif lplst>300:
h+='<b>Too many entries to show ('+str(lplst)+') - please, prune list further!</b>'
return {'return':0, 'html':h, 'style':st}
# Prepare table
h+='<table border="1" cellpadding="7" cellspacing="0">\n'
ha='align="center" valign="top"'
hb='align="left" valign="top"'
h+=' <tr style="background-color:#dddddd">\n'
h+=' <td '+ha+'><b>#</b></td>\n'
h+=' <td '+ha+'><b>Platform</b></td>\n'
h+=' <td '+ha+'><b>OS</b></td>\n'
h+=' <td '+ha+'><b>CPU</b></td>\n'
h+=' <td '+ha+'><b>GPGPU</b></td>\n'
h+=' <td '+ha+'><b>Type</b></td>\n'
h+=' <td '+ha+'><b>DNN engine</b></td>\n'
h+=' <td '+ha+'><b>Model</b></td>\n'
h+=' <td '+ha+'><b>Choices (env)</b></td>\n'
h+=' <td '+ha+'><b>FWBW<br>min time</b><br><br>(exp time)<br>stat. repetitions</td>\n'
h+=' <td '+ha+'><b>FW</b></td>\n'
h+=' <td '+ha+'><b>BW</b></td>\n'
h+=' <td '+ha+'><b>Per layer</b></td>\n'
h+=' <td '+ha+'><b>HW costs</td>\n'
h+=' <td '+ha+'><b>All usage costs (preparation, training, inference, errors, etc)</td>\n'
h+=' <td '+ha+'><b>Model size</b></td>\n'
h+=' <td '+ha+'><b><a href="https://github.com/dividiti/ck-caffe/blob/master/script/explore-accuracy/explore_accuracy.20160808.ipynb">Model accuracy on ImageNet</a></td>\n'
h+=' <td '+ha+'><b>Model topology and parameters</td>\n'
h+=' <td '+ha+'><b>Power consumption (W)<br>min / max</td>\n'
h+=' <td '+ha+'><b>Acoustic noise (dB)<br>min / max</td>\n'
h+=' <td '+ha+'><b>Memory usage (MB)</td>\n'
h+=' <td '+ha+'><b>Bug detected?</b></td>\n'
h+=' <td '+ha+'><b>User</b></td>\n'
h+=' <td '+ha+'><b>Replay</b></td>\n'
h+=' <tr>\n'
# Dictionary to hold target meta
tm={}
ix=0
bgraph={'0':[]} # Just for graph demo
if hi_uid!='':
bgraph['1']=[]
# Load min stat
for q in plst:
pmin=os.path.join(q['path'],ffmin)
dx={'##characteristics#run#time_fwbw_ms#min':1e99}
if os.path.isfile(pmin):
rx=ck.load_json_file({'json_file':pmin})
if rx['return']==0:
dx=rx['dict']
# Fix
x=dx.get('##characteristics#run#time_fwbw_ms#min','')
if x==None or x=='' or x>50000:
dx['##characteristics#run#time_fwbw_ms#min']=1e99
if q.get('meta',{}).get('state',{}).get('fail_reason','')=='':
q['meta']['state']['fail']='yes'
q['meta']['state']['fail_reason']='strange timing'
q['min_stat']=dx
# Sort
splst=sorted(plst, key=lambda x: x.get('min_stat',{}).get('##characteristics#run#time_fwbw_ms#min',0))
# splst=sorted(plst, key=lambda x: x.get('meta',{}).get('characteristics',{}).get('run',{}).get('time_fwbw_ms',0))
for q in splst:
ix+=1
duid=q['data_uid']
path=q['path']
d=q['meta']
# Characteristics
# Check if has statistics
dstat={}
fstat=os.path.join(path,'ck-stat-flat-characteristics.json')
if os.path.isfile(fstat):
r=ck.load_json_file({'json_file':fstat, 'dict':dstat})
if r['return']>0: return r
dstat=r['dict']
x0=dstat.get("##characteristics#run#time_fwbw_ms#min",None)
meta=d.get('meta',{})
choices=d.get('choices',{})
env=choices.get('env',{})
params=choices.get('params',{}).get('params',{})
xdeps=meta.get('xdeps',{})
d_model=xdeps.get('caffemodel',{})
d_model_name=d_model.get('data_name','')
d_model_package_uoa=d_model.get('package_uoa','')
d_model_ver=d_model.get('ver','')
d_engine=xdeps.get('lib-caffe',{})
d_engine_name=d_engine.get('data_name','')
d_engine_package_uoa=d_engine.get('package_uoa','')
d_engine_ver=d_engine.get('ver','')
tp=meta.get('dnn_type','')
nn=meta.get('nn_type','')
plat_name=meta.get('plat_name','')
cpu_name=meta.get('cpu_name','')
os_name=meta.get('os_name','')
gpgpu_name=meta.get('gpgpu_name','')
plat_uid=meta.get('platform_uid','')
cpu_uid=meta.get('cpu_uid','')
os_uid=meta.get('os_uid','')
gpu_uid=meta.get('gpu_uid','')
gpgpu_uid=meta.get('gpgpu_uid','')
user=meta.get('user','')
te=d.get('characteristics',{}).get('run',{})
# bgc='afffaf'
bgc='dfffdf'
fail=d.get('state',{}).get('fail','')
fail_reason=d.get('state',{}).get('fail_reason','')
if fail=='yes':
if fail_reason=='': fail_reason='yes'
bgc='ffafaf'
elif hi_uid!='' and duid==hi_uid:
bgc='9fff9f'
# bgraph['0'].append([ix,None])
# bgraph['1'].append([ix,x0])
bg=' style="background-color:#'+bgc+';"'
h+=' <tr'+bg+'>\n'
# Number
h+=' <td '+ha+'><a name="'+duid+'">'+str(ix)+'</a></td>\n'
# Platform, etc ...
x=plat_name
if plat_uid!='':
x='<a href="'+url0+'&wcid='+cfg['module_deps']['platform']+':'+plat_uid+'">'+x+'</a>'
h+=' <td '+ha+'>'+x+'</td>\n'
x=os_name
if os_uid!='':
x='<a href="'+url0+'&wcid='+cfg['module_deps']['platform']+':'+os_uid+'">'+x+'</a>'
h+=' <td '+ha+'>'+x+'</td>\n'
x=cpu_name
if cpu_uid!='':
x='<a href="'+url0+'&wcid='+cfg['module_deps']['platform.cpu']+':'+cpu_uid+'">'+x+'</a>'
h+=' <td '+ha+'>'+x+'</td>\n'
x=gpgpu_name
if gpgpu_uid!='':
x='<a href="'+url0+'&wcid='+cfg['module_deps']['platform.gpgpu']+':'+gpgpu_uid+'">'+x+'</a>'
h+=' <td '+ha+'>'+x+'</td>\n'
# All files
uu1=work['self_module_uid']
if cmuoa!='': uu1=cmuoa
uu2=str(ix)+') <a href="'+url0+'&wcid='+uu1+':'+duid+'">'+duid+'</a>'
uu3='[ <a href="'+url0+'&wcid='+uu1+':'+duid+'">See raw files</a> ]<br><br>('+duid+')'
uu4=uu1+':'+duid
# Type
h+=' <td '+ha+'>'+tp+'</a></td>\n'
# Engine
x=d_engine_name
if d_engine_package_uoa!='':
x='<a href="'+url0+'&wcid=package:'+d_engine_package_uoa+'">'+x+'</a>'
if x!='' and d_engine_ver!='':
x+='\n<br><br>Version <b>'+d_engine_ver+'</b>'
# Versions
ver=''
dver=meta.get('xversions',{})
for dx in sorted(dver):
vx=dver[dx]
if vx!=None and vx!='':
ver+=dx+': '+str(dver[dx])+'\n'
ver=ver.replace("\'","'").replace("'","\\'").replace('\"','"').replace('"',"\\'").replace('\n','\\n')
if ver!='':
ver='<input type="button" class="ck_small_button" onClick="alert(\''+ver+'\');" value="See versions of all deps">'
h+=' <td '+ha+'>'+x+'<br><br>'+ver+'</td>\n'
# Model
x=nn
msize=''
mtop=''
mtop5=''
if d_model_package_uoa!='':
x='<a href="'+url0+'&wcid=package:'+d_model_package_uoa+'">'+x+'</a>'
# Load features
rx=ck.access({'action':'load',
'module_uoa':'package',
'data_uoa':d_model_package_uoa})
if rx['return']==0:
mft=rx['dict'].get('features',{})
msize=str(mft.get('model_size_mb',''))+' MB'
mtop=str(mft.get('accuracy',''))
mtop5=str(mft.get('accuracy_top5',''))
# if x!='' and d_model_ver!='':
# x+='\n<br><br>Version <b>'+d_model_ver+'</b>'
h+=' <td '+ha+'>'+x+'</td>\n'
# Choices (for now env)
# x='<table border="0" cellpadding="0" cellspacing="2">\n'
x=''
for k in sorted(env):
v=env[k]
x+=str(k)+'='+str(v)+'\n'
# x+='<tr><td>'+str(k)+'=</td><td>'+str(v)+'</td></tr>\n'
# x+='</table>\n'
# x=x.replace("'","\'").replace('"',"\\'").replace('\n','\\n')
x=x.replace("\'","'").replace("'","\\'").replace('\"','"').replace('"',"\\'").replace('\n','\\n')
x1=''
if x!='':
if env.get('CK_CAFFE_BATCH_SIZE','')!='':
x1+='Batch size='+env['CK_CAFFE_BATCH_SIZE']+'<br><br>\n'
x1+='<input type="button" class="ck_small_button" onClick="alert(\''+x+'\');" value="View all">'
h+=' <td '+ha+'>'+x1+'</td>\n'
x=''
# Check if has stats
x0=dstat.get("##characteristics#run#time_fwbw_ms#min",None)
x0e=dstat.get("##characteristics#run#time_fwbw_ms#exp",None)
x1=dstat.get("##characteristics#run#time_fwbw_ms#center",None)
xr=dstat.get("##characteristics#run#time_fwbw_ms#repeats",None)
x2=dstat.get("##characteristics#run#time_fwbw_ms#halfrange",None)
x=''
if x0!=None:
x='<b>'+('%.0f'%x0)+' ms.</b>\n'
# x+='('+('%.0f'%x1)+' ± '+('%.0f'%x2)+' ms.)'
if x0e!=None and x2!=None:
x+='<br><br>('+('%.0f'%x0e)+' ± '+('%.0f'%x2)+' ms.)\n'
if xr!=None:
x+='<br><i>'+str(xr)+' repetitions</i>\n'
h+=' <td '+ha+' style="background-color:#afffaf">'+x+'</td>\n'
if fail=='yes': x0=0
bgraph['0'].append([ix,x0])
if fail!='yes' and x0!=None and duid!=hi_uid:
if hi_uid!='': bgraph['1'].append([ix,None])
x1=dstat.get("##characteristics#run#time_fw_ms#center",None)
x2=dstat.get("##characteristics#run#time_fw_ms#halfrange",None)
if x1!=None and x2!=None:
x=('%.0f'%x1)+' ± '+('%.0f'%x2)+' ms.'
h+=' <td '+ha+'>'+x+'</td>\n'
x1=dstat.get("##characteristics#run#time_bw_ms#center",None)
x2=dstat.get("##characteristics#run#time_bw_ms#halfrange",None)
if x1!=None and x2!=None:
x=('%.0f'%x1)+' ± '+('%.0f'%x2)+' ms.'
h+=' <td '+ha+'>'+x+'</td>\n'
# Check all characteristics
x=''
x5=''
for k in sorted(te):
v=te[k]
kx="##characteristics#run#"+k
kx1=dstat.get(kx+'#center',None)
kx2=dstat.get(kx+'#halfrange',None)
x6=''
if type(v)==int:
if kx1!=None and kx2!=None:
x6=str(kx1)+' +- '+str(kx2)
else:
x6=str(v)
elif type(v)==float:
if kx1!=None and kx2!=None:
x6=('%.1f'%kx1)+' +- '+('%.1f'%kx2)
else:
x6=('%.1f'%v)
if x6!='':
x5+=str(k)+'='+x6+'\n'
# Also layers
y5=''
for j in range(0,1000):
k1='##characteristics#run#per_layer_info@'+str(j)+'#direction#min'
k2='##characteristics#run#per_layer_info@'+str(j)+'#label#min'
k3='##characteristics#run#per_layer_info@'+str(j)+'#time_ms#min'
k4='##characteristics#run#per_layer_info@'+str(j)+'#time_ms#max'
k5='##characteristics#run#per_layer_info@'+str(j)+'#time_ms#exp_allx'
v1=dstat.get(k1,'')
v2=dstat.get(k2,'')
v3=dstat.get(k3,'')
v4=dstat.get(k4,'')
v5=dstat.get(k5,[])
if v1!='' and v2!='' and v3!='' and v4!='':
v6=0
if len(v5)>0:
v6=v5[0]
xv3=''
xv4=''
xv5=''
if v3!='': xv3=('%.1f'%v3)
if v4!='': xv4=('%.1f'%v4)
if v6!='': xv6=('%.1f'%v6)
if y5=='': y5='Layers:\nName (direction): min time (ms.) ; expected time (ms.) ; max time (ms.)\n'
y5+='\n'+v2+' ('+v1+'): '+xv3+';'+xv6+';'+xv4
else:
break
y5=y5.replace("\'","'").replace("'","\\'").replace('\"','"').replace('"',"\\'").replace('\n','\\n')
if y5!='':
x+='<a href="'+ck_url1+duid+'">Stats per layer</a><br><br>\n'
x+='<input type="button" class="ck_small_button" onClick="alert(\''+y5+'\');" value="All layers as pop-up">'
# x5=x5.replace("'","\'").replace('"',"\\'").replace('\n','\\n')
x5=x5.replace("\'","'").replace("'","\\'").replace('\"','"').replace('"',"\\'").replace('\n','\\n')
if x5!='':
x+='<br><br><input type="button" class="ck_small_button" onClick="alert(\''+x5+'\');" value="CK vars">'
h+=' <td '+ha+'>'+x+'</td>\n'
# Get info about platform
hd={}
if plat_uid!='':
rh=ck.access({'action':'load',
'module_uoa':cfg['module_deps']['platform'],
'data_uoa':plat_uid})
if rh['return']==0:
hd=rh['dict']
# Cost (take from platform meta)
hc='-'
if len(hd)>0:
costs=hd.get('features',{}).get('cost',[])
hc=''
for c in costs:
if hc!='': hc+='<br>\n'
hc+='<b>'+str(c.get('price',''))+' '+c.get('currency','')+ '</b> - '+c.get('desc','')+' ('+c.get('date','')+')'
h+=' <td '+ha+'>'+hc+'</a></td>\n'
# TBD: all other costs
h+=' <td '+ha+'></a></td>\n'
# Model size
h+=' <td '+ha+'>'+msize+'</td>\n'
# Accuracy
x=''
if mtop!='' and mtop5!='':
x=mtop+' / '+mtop5
# if nn=='bvlc, alexnet':
# x='0.568279 / 0.799501'
# elif nn=='bvlc, googlenet':
# x='0.689299 / 0.891441'
# elif nn=='deepscale, squeezenet, 1.1':
# x='0.583880 / 0.810123'
# elif nn=='deepscale, squeezenet, 1.0':
# x='0.576801 / 0.803903'
h+=' <td '+ha+'>'+x+'</td>\n'
# Model topology
x=''
fmt=d.get('file_model_topology','')
if fmt!='':
pfmt=os.path.join(path,fmt)
if os.path.isfile(pfmt):
x='<a href="'+url0+'&action=pull&common_action=yes&cid='+work['self_module_uid']+':'+duid+'&filename='+fmt+'">deploy.prototxt</a>\n'
h+=' <td '+ha+'>'+x+'</td>\n'
# Power consumption (TBD - real measurements)
x='-'
if len(hd)>0:
power=hd.get('features',{}).get('power_consumption',{})
if len(power)>0:
pmin=power.get('min','')
pmax=power.get('max','')
x=str(pmin)+' / '+str(pmax)
h+=' <td '+ha+'>'+x+'</a></td>\n'
# Acoustic noise (TBD - real measurements)
x='-'
if len(hd)>0:
power=hd.get('features',{}).get('acoustic_noise',{})
if len(power)>0:
pmin=power.get('min','')
pmax=power.get('max','')
x=str(pmin)+' / '+str(pmax)
h+=' <td '+ha+'>'+x+'</a></td>\n'
# Memory usage
x=''
mem=dstat.get("##characteristics#run#memory_mbytes#max",None)
if mem!=None:
x=str(int(mem))+' MB'
h+=' <td '+ha+'>'+x+'</td>\n'
# Crowdsourcing bug detection
x=fail_reason
if x=='':
x=''
else:
fail_reason=fail_reason.replace("\'","'").replace("'","\\'").replace('\"','"').replace('"',"\\'").replace('\n','\\n')
x='Yes <input type="button" class="ck_small_button" onClick="alert(\''+fail_reason+'\');" value="Log">'
h+=' <td '+ha+'>'+x+'</td>\n'
h+=' <td '+ha+'><a href="'+url0+'&action=index&module_uoa=wfe&native_action=show&native_module_uoa=experiment.user">'+user+'</a></td>\n'
h+=' <td '+ha+'><input type="button" class="ck_small_button" onClick="copyToClipboard(\'ck replay '+uu4+' '+ck.cfg.get('add_extra_to_replay','')+'\');" value="Replay"><br><br>\n'
h+=' '+uu3+'</td>\n'
h+=' <tr>\n'
h+='</table>\n'
h+='</center>\n'
if cmuoa=='':
h+='</form>\n'
if len(bgraph['0'])>0:
ii={'action':'plot',
'module_uoa':cfg['module_deps']['graph'],
"table":bgraph,
"h_lines":[1.0],
"ymin":0,
"ignore_point_if_none":"yes",
"plot_type":"d3_2d_bars",
"display_y_error_bar":"no",
"title":"Powered by Collective Knowledge",
"x_ticks_period":10,
"axis_x_desc":"Experiment",
"axis_y_desc":"Neural network total time (ms.)",
"plot_grid":"yes",
"d3_div":"ck_interactive",
"image_width":"900",
"image_height":"400",
"wfe_url":url0}
r=ck.access(ii)
if r['return']==0:
x=r.get('html','')
if x!='':
st+=r.get('style','')
h+='<br>\n'
h+='<center>\n'
h+='<div id="ck_box_with_shadow" style="width:920px;">\n'
h+=' <div id="ck_interactive" style="text-align:center">\n'
h+=x+'\n'
h+=' </div>\n'
h+='</div>\n'
h+='</center>\n'
return {'return':0, 'html':h, 'style':st}
##############################################################################
# browse public results
def browse(i):
"""
Input: {
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
}
"""
import webbrowser
ck.out('Opening web page '+ck_url+' ...')
webbrowser.open(ck_url)
return {'return':0}
##############################################################################
# show info for all layers
def html_viewer(i):
"""
Input: {
data_uoa - CK entry UOA to view
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
}
"""
import os
duoa=i.get('data_uoa','')
# Load entry
r=ck.access({'action':'load',
'module_uoa':work['self_module_uid'],
'data_uoa':duoa})
if r['return']>0: return r
p=r['path']
d=r['dict']
dchars=d.get('characteristics',{})
dchoices=d.get('choices',{})
dmeta=d.get('meta',{})
# Load stats
dstat={}
fstat=os.path.join(p,'ck-stat-flat-characteristics.json')
if os.path.isfile(fstat):
r=ck.load_json_file({'json_file':fstat, 'dict':dstat})
if r['return']>0: return r
dstat=r['dict']
# Prepare table
h=''
# h+='<hr>\n'
h+='<br>\n'
h+='<center>\n'
h+='<h2>DNN engine and model evaluation statistics per layer (crowd-tuning)</h2><br>\n'
h+='</center>\n'
xdeps=dmeta.get('xdeps',{})
lcaffe=xdeps.get('lib-caffe',{})
lmodel=xdeps.get('caffemodel',{})
# Prepare extra info
h+='<p>\n'
h+='<table border="1" cellpadding="8" cellspacing="0">\n'
h+=' <tr>\n'
h+=' <td><b>DNN engine name:</b></td>\n'
h+=' <td>'+lcaffe.get('data_name','')+'</td>\n'
h+=' </tr>\n'
h+=' <tr>\n'
h+=' <td><b>DNN engine version:</b></td>\n'
h+=' <td>'+lcaffe.get('ver','')+'</td>\n'
h+=' </tr>\n'
h+=' <tr>\n'
h+=' <td><b>DNN engine type:</b></td>\n'
h+=' <td>'+dmeta.get('dnn_type','')+'</td>\n'
h+=' </tr>\n'
x=''
dx=dmeta.get('xversions',{})
for k in sorted(dx):
v=dx[k]
if v!='':
if x!='': x+='<br>\n'
x+=k+'='+str(v)+'\n'
h+=' <tr>\n'
h+=' <td><b>DNN engine dependencies:</b></td>\n'
h+=' <td>'+x+'</td>\n'
h+=' </tr>\n'
h+=' <tr>\n'
h+=' <td><b>DNN model name:</b></td>\n'
h+=' <td>'+lmodel.get('data_name','')+'</td>\n'
h+=' </tr>\n'
h+=' <tr>\n'
h+=' <td><b>DNN model version:</b></td>\n'
h+=' <td>'+lmodel.get('ver','')+'</td>\n'
h+=' </tr>\n'
h+=' <tr>\n'
h+=' <td><b>Batch size:</b></td>\n'
h+=' <td>'+dchars.get('run',{}).get('REAL_ENV_CK_CAFFE_BATCH_SIZE','')+'</td>\n'
h+=' </tr>\n'
# TBD: Need to show min,exp,max!
# h+=' <tr>\n'
# h+=' <td><b>FWBW time (ms.):</b></td>\n'
# h+=' <td>'+str(dchars.get('run',{}).get('time_bw_ms',''))+'</td>\n'
# h+=' </tr>\n'
# h+=' <tr>\n'
# h+=' <td><b>FW time (ms.):</b></td>\n'
# h+=' <td>'+str(dchars.get('run',{}).get('time_fw_ms',''))+'</td>\n'
# h+=' </tr>\n'
# h+=' <tr>\n'
# h+=' <td><b>BW time (ms.):</b></td>\n'
# h+=' <td>'+str(dchars.get('run',{}).get('time_bw_ms',''))+'</td>\n'
# h+=' </tr>\n'
h+=' <tr>\n'
h+=' <td><b>Platform:</b></td>\n'
h+=' <td>'+dmeta.get('plat_name','')+'</td>\n'
h+=' </tr>\n'
h+=' <tr>\n'
h+=' <td><b>OS:</b></td>\n'
h+=' <td>'+dmeta.get('os_name','')+'</td>\n'
h+=' </tr>\n'
h+=' <tr>\n'
h+=' <td><b>CPU:</b></td>\n'
h+=' <td>'+dmeta.get('cpu_name','')+'</td>\n'
h+=' </tr>\n'
h+=' <tr>\n'
h+=' <td><b>GPU:</b></td>\n'
h+=' <td>'+dmeta.get('gpu_name','')+'</td>\n'
h+=' </tr>\n'
h+=' </tr>\n'
h+='</table>\n'
h+='<center>\n'
h+='<p>\n'
h+='<table border="0" cellpadding="10" cellspacing="0">\n'
h+=' <tr>\n'
h+=' <td><b>Name</b></td>\n'
h+=' <td><b>Direction</b></td>\n'
h+=' <td align="right"><b>Min time (ms.):</b></td>\n'
h+=' <td align="right"><b>Expected time (ms.):</b></td>\n'
h+=' <td align="right"><b>Max time (ms.):</b></td>\n'
h+=' <td align="right"><b>Repetitions:</b></td>\n'
h+=' </tr>\n'
# Detecting number of layers
jj={}
for j in range(0,1000):
k3='##characteristics#run#per_layer_info@'+str(j)+'#time_ms#min'
v3=dstat.get(k3,'')
if v3=='': break
jj[j]=v3
# Sorting by min time
if i.get('all_params',{}).get('skip_sort','')!='yes':
jj=sorted(jj, key=lambda x: jj[x], reverse=True)
# Also layers
for j in jj:
k1='##characteristics#run#per_layer_info@'+str(j)+'#direction#min'
k2='##characteristics#run#per_layer_info@'+str(j)+'#label#min'
k3='##characteristics#run#per_layer_info@'+str(j)+'#time_ms#min'
k4='##characteristics#run#per_layer_info@'+str(j)+'#time_ms#max'
k5='##characteristics#run#per_layer_info@'+str(j)+'#time_ms#exp_allx'
k7='##characteristics#run#per_layer_info@'+str(j)+'#time_ms#repeats'
v1=dstat.get(k1,'')
v2=dstat.get(k2,'')
v3=dstat.get(k3,'')
v4=dstat.get(k4,'')
v5=dstat.get(k5,[])
v7=dstat.get(k7,'')
if v1!='' and v2!='' and v3!='' and v4!='':
v6=0
if len(v5)>0:
v6=v5[0]
xv3=''
xv4=''
xv6=''
if v3!='':
if v3<0.1: xv3='0'
else: xv3='<b>'+('%.1f'%v3)+'</b>'
if v4!='':
if v4<0.1: xv4='0'
else: xv4='<b>'+('%.1f'%v4)+'</b>'
if v6!='':
if v6<0.1: xv6='0'
else: xv6='<b>'+('%.1f'%v6)+'</b>'
h+=' <tr>\n'
h+=' <td>'+v2+'</td>\n'
h+=' <td>'+v1+'</td>\n'
h+=' <td align="right">'+xv3+'</td>\n'
h+=' <td align="right">'+xv6+'</td>\n'
h+=' <td align="right">'+xv4+'</td>\n'
h+=' <td align="right">'+str(v7)+'</td>\n'
h+=' </tr>\n'
h+='</table>\n'
h+='</center>\n'
return {'return':0, 'html':h, 'show_top':'yes'}
##############################################################################
# replay experiment (TBD)
def replay(i):
"""
Input: {
(data_uoa)
(remote)
(host_os)
(target_os)
(device_id)
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
}
"""
import copy
import os
# Setting output
o=i.get('out','')
oo=''
if o=='con': oo='con'
duoa=i.get('data_uoa','')
remote=i.get('remote','')
er=''
esr=''
if remote=='yes':
er=i.get('exchange_repo','')
if er=='': er=ck.cfg['default_exchange_repo_uoa']
esr=i.get('exchange_subrepo','')
if esr=='': esr=ck.cfg['default_exchange_subrepo_uoa']
# Try to load info
if o=='con':
ck.out('Loading experiment entry ...')
ck.out('')
r=ck.access({'action':'load',
'module_uoa':work['self_module_uid'],
'data_uoa':duoa,
'repo_uoa':er,
'remote_repo_uoa':esr})
if r['return']>0: return r
d=r['dict']
hos=i.get('host_os','')
tos=i.get('target_os','')
tdid=i.get('device_id','')
# Check two main deps (engine and model)
meta=d.get('meta',{})
xdeps=meta.get('xdeps',{})
# TBD: rebuild env by tags!
#
# dnn=xdeps.get('lib-caffe',{})
# model=xdeps.get('caffemodel',{})
#
# pdnn=dnn.get('package_uoa','')
# pmodel=model.get('package_uoa','')
#
# preset_env={}
# penv=[pdnn,pmodel]
#
# for j in range(0, len(penv)):
# px=''
# py=penv[j]
#
# if py!='':
# # Search by package
# r=ck.access({'action':'search',
# 'module_uoa':cfg['module_deps']['env'],
# 'search_dict':{'package_uoa':py}})
# if r['return']>0: return r
#
# l=r['lst']
#
# if j==0: preset_env['lib-caffe']=px
# elif j==1: preset_env['caffemodel']=px
# Run pipeline
choices=d.get('choices',{})
# Clean various vars
for k in replay_clean_vars:
if k in choices:
del(choices[k])
if i.get('target_os','')!='' and not i['target_os'].startswith('android'):
del(i['target_os'])
env=choices.get('env',{})
for k in replay_clean_env_vars:
if k in env:
del(env[k])
choices['env']=env
if hos!='': choices['host_os']=hos
if tos!='': choices['target_os']=tos
if tdid!='': choices['device_id']=tdid
pipeline_data_uoa=choices['module_uoa']
# Prepare pipeline
ii={'action':'pipeline',
'module_uoa':cfg['module_deps']['program'],
'prepare':'yes',
'choices':choices,
'out':o}
rr=ck.access(ii)
if rr['return']>0: return rr
fail=rr.get('fail','')
if fail=='yes':
return {'return':10, 'error':'pipeline failed ('+rr.get('fail_reason','')+')'}
ready=rr.get('ready','')
if ready!='yes':
return {'return':11, 'error':'couldn\'t prepare universal CK program workflow'}
# Run pipeline
ii={'action':'run',
'module_uoa':cfg['module_deps']['pipeline'],
'data_uoa':pipeline_data_uoa,
'pipeline':rr,
'out':o}
rr=ck.access(ii)
if rr['return']>0: return rr
fail=rr.get('fail','')
if fail=='yes':
return {'return':10, 'error':'pipeline failed ('+rr.get('fail_reason','')+')'}
if o=='con':
ck.out('')
ck.out('Your results:')
ck.out('')
dstat=rr.get('last_stat_analysis',{}).get('dict_flat',{})
x0=dstat.get("##characteristics#run#time_fwbw_ms#min",None)
x0e=dstat.get("##characteristics#run#time_fwbw_ms#exp",None)
if x0!=None:
ck.out('* FWBW min: '+('%.0f'%x0)+' ms.')
if x0e!=None:
ck.out('* FWBW exp: '+('%.0f'%x0e)+' ms.')
x1=dstat.get("##characteristics#run#time_fw_ms#min",None)
x1e=dstat.get("##characteristics#run#time_fw_ms#exp",None)
if x1!=None:
ck.out('* FW min: '+('%.0f'%x1)+' ms.')
if x1e!=None:
ck.out('* FW exp: '+('%.0f'%x1e)+' ms.')
x2=dstat.get("##characteristics#run#time_bw_ms#min",None)
x2e=dstat.get("##characteristics#run#time_bw_ms#exp",None)
if x2!=None:
ck.out('* BW min: '+('%.0f'%x2)+' ms.')
if x2e!=None:
ck.out('* BW exp: '+('%.0f'%x2e)+' ms.')
return {'return':0}
|
py | 1a317a391f4a5330d17b7f906ec6b548c2ba334a | from collections import namedtuple
from enum import Enum
from string import ascii_lowercase
import numpy as np
# ABC for the Decision class
class Decision(object):
ENUM = None
FIELDS = ('decision_id')
# suggestion for subclassing
# FIELDS = super().FIELDS + ('target_idxs',)
# etc.
# An Ellipsis instead of fields indicate there is a variable
# number of fields.
SHAPES = ((1,),)
DTYPES = (np.int,)
@classmethod
def enum_dict_by_name(cls):
if cls.ENUM is None:
raise NotImplementedError
d = {}
for enum in cls.ENUM:
d[enum.name] = enum.value
return d
@classmethod
def enum_dict_by_value(cls):
if cls.ENUM is None:
raise NotImplementedError
d = {}
for enum in cls.ENUM:
d[enum.value] = enum
return d
@classmethod
def enum_by_value(cls, enum_value):
d = cls.enum_dict_by_value()
return d[enum_value]
@classmethod
def enum_by_name(cls, enum_name):
d = cls.enum_dict_by_name()
return d[enum_name]
@classmethod
def record(cls, enum_value):
# TODO check to make sure the enum value is valid
return {'decision_id' : enum_value}
@classmethod
def action(cls, walkers, decisions):
"""Perform the instructions for a set of resampling records on
walkers."""
raise NotImplementedError
@classmethod
def parents(cls, step):
"""Given a row of resampling records (for a single resampling step)
returns the parents of the children of this step."""
# initialize a list for the parents of this stages walkers
step_parents = [None for i in range(len(step))]
# the rest of the stages parents are based on the previous stage
for parent_idx, parent_rec in enumerate(step):
# if the decision is an ancestor then the instruction
# values will be the children
if parent_rec[0] in cls.ANCESTOR_DECISION_IDS:
# the first value of the parent record is the target
# idxs
child_idxs = parent_rec[1]
for child_idx in child_idxs:
step_parents[child_idx] = parent_idx
return step_parents
class NothingDecisionEnum(Enum):
NOTHING = 0
# an example of a Decision class that has the enumeration, instruction
# record namedtuple, and the instruction dtypes
class NoDecision(Decision):
ENUM = NothingDecisionEnum
INSTRUCTION_NAMES = (
(ENUM.NOTHING, "NothingInstructionRecord"),
)
INSTRUCTION_FIELDS = (
(ENUM.NOTHING, ('pos',)),)
INSTRUCTION_FIELD_DTYPES = (
(ENUM.NOTHING, (np.int,)),
)
# the decision types that pass on their state
ANCESTOR_DECISION_IDS = (ENUM.NOTHING.value,)
@classmethod
def action(cls, walkers, decisions):
# list for the modified walkers
mod_walkers = [None for i in range(len(walkers))]
# go through each decision and perform the decision
# instructions
for walker_idx, decision in enumerate(decisions):
decision_value, instruction = decision
if decision_value == cls.ENUM.NOTHING.value:
# check to make sure a walker doesn't already exist
# where you are going to put it
if mod_walkers[instruction[0]] is not None:
raise ValueError(
"Multiple walkers assigned to position {}".format(instruction[0]))
# put the walker in the position specified by the
# instruction
mod_walkers[instruction[0]] = walkers[walker_idx]
return mod_walkers
|
py | 1a317ccd1d74a84ebe30b6617dadc43bfa3470b5 | # coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
# pylint:disable=line-too-long
r"""Beam job to map to tf.Examples of embeddings.
This file has two modes:
1) Map from tf.Examples of audio to tf.Examples of embeddings.
2) Map from TFDS dataseet to tf.Examples of embeddings.
"""
# pylint:enable=line-too-long
from absl import app
from absl import flags
from absl import logging
import apache_beam as beam
import tensorflow as tf
from non_semantic_speech_benchmark.data_prep import audio_to_embeddings_beam_utils
flags.DEFINE_string('input_glob', None,
'Glob for input dir. XOR with `tfds_data`.')
flags.DEFINE_string(
'tfds_dataset', None, 'Name of TFDS dataset. '
'XOR with `input_glob`. Should be of the form ex "cifar".'
'Exactly one of `sample_rate_key`, `sample_rate`, or '
'`tfds_dataset` must be not None.')
flags.DEFINE_string('output_filename', None, 'Output filename.')
flags.DEFINE_list(
'embedding_names', None,
'List of embedding module names. Used for logging, and as '
'in the features key of the results tf.Example feature list.')
flags.DEFINE_list(
'embedding_modules', None,
'List of embedding modules to compute. Should be accepted '
'by `hub.load`.`')
flags.DEFINE_list(
'module_output_keys', None,
'List of module output key. Must be the same length as '
'`embedding_modules`.')
flags.DEFINE_string('audio_key', None, 'Key of audio.')
flags.DEFINE_string(
'sample_rate_key', None, 'Key of sample rate. '
'Exactly one of `sample_rate_key`, `sample_rate`, or '
'`tfds_dataset` must be not None.')
flags.DEFINE_integer(
'sample_rate', None, 'Sample rate.'
'Exactly one of `sample_rate_key`, `sample_rate`, or '
'`tfds_dataset` must be not None.')
flags.DEFINE_string(
'label_key', None, 'Key for labels. If the feature value is an integer, '
'convert to bytes.')
flags.DEFINE_string(
'speaker_id_key', None,
'Key for speaker_id, or `None`. If this flag is present, '
'check that the key exists and is of type `bytes`.')
flags.DEFINE_bool('average_over_time', False,
'If true, return embeddings that are averaged over time.')
flags.DEFINE_bool(
'delete_audio_from_output', True,
'If true, remove audio from the output table. Can be '
'helpful in keeping output tables small.')
flags.DEFINE_bool('debug', False, 'If True, run in debug model.')
FLAGS = flags.FLAGS
def main(unused_argv):
# Get input data location from flags. If we're reading a TFDS dataset, get
# train, validation, and test.
input_filenames_list, output_filenames, sample_rate = audio_to_embeddings_beam_utils.read_input_glob_and_sample_rate_from_flags(
FLAGS.input_glob, FLAGS.sample_rate, FLAGS.tfds_dataset,
FLAGS.output_filename)
# Check that inputs and flags are formatted correctly.
audio_to_embeddings_beam_utils.validate_inputs(input_filenames_list,
output_filenames,
FLAGS.embedding_modules,
FLAGS.embedding_names,
FLAGS.module_output_keys)
input_format = 'tfrecord'
output_format = 'tfrecord'
# If you have custom beam options, add them here.
beam_options = None
logging.info('Starting to create flume pipeline...')
with beam.Pipeline(beam_options) as root:
for i, (input_filenames_or_glob, output_filename) in enumerate(
zip(input_filenames_list, output_filenames)):
audio_to_embeddings_beam_utils.make_beam_pipeline(
root,
input_filenames_or_glob,
sample_rate,
FLAGS.debug,
FLAGS.embedding_names,
FLAGS.embedding_modules,
FLAGS.module_output_keys,
FLAGS.audio_key,
FLAGS.sample_rate_key,
FLAGS.label_key,
FLAGS.speaker_id_key,
FLAGS.average_over_time,
FLAGS.delete_audio_from_output,
output_filename,
input_format=input_format,
output_format=output_format,
suffix=i)
if __name__ == '__main__':
flags.mark_flags_as_required([
'output_filename', 'embedding_names', 'embedding_modules',
'module_output_keys', 'audio_key', 'label_key'
])
flags.mark_flags_as_mutual_exclusive(['input_glob', 'tfds_dataset'],
required=True)
flags.mark_flags_as_mutual_exclusive(
['tfds_dataset', 'sample_rate_key', 'sample_rate'], required=True)
tf.compat.v2.enable_v2_behavior()
assert tf.executing_eagerly()
app.run(main)
|
py | 1a317dcdec01369922bb1bdd77fe2f0a9b6fcefe | from pprint import pformat
stack=[]
def print_stack_before_operation(f):
def method(*args, **kwargs):
print("Current stack:", pformat(stack))
f(*args, **kwargs)
print("After operation:", pformat(stack))
return method
def print_operation_name_and_parameter(f):
def method(*args, **kwargs):
print("Operation name:{0} {1}".format( f.__name__, (", parameter: {0}".format( pformat(*args, **kwargs)) if args or kwargs else "")))
f(*args, **kwargs)
return method
def print_spaceline(linecount):
def wrapper(f):
def method(*args, **kwargs):
f(*args, **kwargs)
for _ in range(linecount): print()
return method
return wrapper
@print_spaceline(1)
@print_stack_before_operation
@print_operation_name_and_parameter
def push(v):
stack.append(v)
@print_spaceline(1)
@print_stack_before_operation
@print_operation_name_and_parameter
def multiply():
a = stack.pop(-1)
b = stack.pop(-1)
stack.append(a*b)
@print_spaceline(1)
@print_stack_before_operation
@print_operation_name_and_parameter
def add():
a = stack.pop(-1)
b = stack.pop(-1)
stack.append(a+b)
@print_spaceline(1)
@print_stack_before_operation
@print_operation_name_and_parameter
def pop():
print("Poped ------------------->", stack.pop(-1))
opcodes={
1: {
"func": push,
"pcount": 1
},
2: {
"func": multiply,
"pcount": 0
},
3: {
"func": add,
"pcount": 0
},
4: {
"func": pop,
"pcount": 0
}
}
# 1+2*3+4 = 11
push(1)
push(2)
push(3)
multiply()
add()
push(4)
add()
pop()
print("=*"* 30)
codes=[1, 1,
1, 2,
1, 3,
2,
3,
1, 4,
3,
4
]
while codes:
code=codes.pop(0)
params = []
for _ in range(opcodes[code]['pcount']):
params.append(codes.pop(0))
opcodes[code]['func'](*params)
|
py | 1a317dfb840abe0df74563804a3d52b21b5fc4cd | #!/usr/bin/env python
import os
import time
import json
import argparse
import pprint as pp
import numpy as np
import pandas as pd
import pickle
from tqdm import tqdm
from datetime import timedelta
import torch
from torch.utils.data import DataLoader
import torch.optim as optim
from nets.attention_model import AttentionModel
from nets.nar_model import NARModel
from nets.encoders.gat_encoder import GraphAttentionEncoder
from nets.encoders.gnn_encoder import GNNEncoder
from nets.encoders.mlp_encoder import MLPEncoder
from reinforce_baselines import *
from problems.tsp.problem_tsp import TSP
from utils import *
from train import *
from tensorboard_logger import Logger as TbLogger
import warnings
warnings.filterwarnings("ignore", category=UserWarning)
import warnings
warnings.filterwarnings("ignore", message="indexing with dtype torch.uint8 is now deprecated, please use a dtype torch.bool instead.")
def train_batch_ft(model, optimizer, baseline, epoch,
batch_id, step, batch, tb_logger, opts):
# Unwrap baseline
bat, bl_val = baseline.unwrap_batch(batch)
# Optionally move Tensors to GPU
x = move_to(bat['nodes'], opts.device)
graph = move_to(bat['graph'], opts.device)
bl_val = move_to(bl_val, opts.device) if bl_val is not None else None
# Evaluate model, get costs and log probabilities
cost, log_likelihood = model(x, graph)
# Evaluate baseline, get baseline loss if any (only for critic)
bl_val, bl_loss = baseline.eval(x, graph, cost) if bl_val is None else (bl_val, 0)
# Calculate loss
reinforce_loss = ((cost - bl_val) * log_likelihood).mean()
loss = reinforce_loss + bl_loss
# Normalize loss for gradient accumulation
loss = loss / opts.accumulation_steps
# Perform backward pass
loss.backward()
# Clip gradient norms and get (clipped) gradient norms for logging
grad_norms = clip_grad_norms(optimizer.param_groups, opts.max_grad_norm)
# Perform optimization step after accumulating gradients
if step % opts.accumulation_steps == 0:
optimizer.step()
optimizer.zero_grad()
# Logging
if step % int(opts.log_step) == 0:
log_values_ft(cost, grad_norms, epoch, batch_id, step, log_likelihood,
reinforce_loss, bl_loss, tb_logger, opts)
def log_values_ft(cost, grad_norms, epoch, batch_id, step, log_likelihood,
reinforce_loss, bl_loss, tb_logger, opts):
avg_cost = cost.mean().item()
grad_norms, grad_norms_clipped = grad_norms
# Log values to screen
print('\nepoch: {}, train_batch_id: {}, avg_cost: {}'.format(epoch, batch_id, avg_cost))
print('grad_norm: {}, clipped: {}'.format(grad_norms[0], grad_norms_clipped[0]))
# Log values to tensorboard
if not opts.no_tensorboard:
tb_logger.log_value('avg_cost/ft', avg_cost, step)
tb_logger.log_value('actor_loss/ft', reinforce_loss.item(), step)
tb_logger.log_value('nll/ft', -log_likelihood.mean().item(), step)
tb_logger.log_value('grad_norm/ft', grad_norms[0], step)
tb_logger.log_value('grad_norm_clipped/ft', grad_norms_clipped[0], step)
if opts.baseline == 'critic':
tb_logger.log_value('critic_loss/ft', bl_loss.item(), step)
tb_logger.log_value('critic_grad_norm/ft', grad_norms[1], step)
tb_logger.log_value('critic_grad_norm_clipped/ft', grad_norms_clipped[1], step)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--ft_run_name", type=str, default="debug",
help="Run name to create logging sub-directory")
parser.add_argument("--ft_strategy", type=str, default="active",
help="Finetuning strategy: active/fixed/random")
parser.add_argument("--problem", type=str, default="tsp")
parser.add_argument("--min_size", type=int, default=200)
parser.add_argument("--max_size", type=int, default=200)
parser.add_argument("--neighbors", type=float, default=0.20)
parser.add_argument("--knn_strat", type=str, default="percentage")
parser.add_argument("--data_distribution", type=str, default="random")
parser.add_argument("--val_dataset", type=str, default="data/tsp/tsp200_test_concorde.txt",
help="Dataset to evaluate finetuned model on")
parser.add_argument("--epoch_size", type=int, default=128000)
parser.add_argument("--val_size", type=int, default=1280)
parser.add_argument("--rollout_size", type=int, default=1280)
parser.add_argument("--batch_size", type=int, default=16)
parser.add_argument("--accumulation_steps", type=int, default=8)
parser.add_argument("--n_epochs", type=int, default=100)
parser.add_argument('--model', type=str,
help="Path to model checkpoints directory")
parser.add_argument('--baseline', type=str, default="exponential",
help="Baseline for finetuning model: none/exponential/rollout")
parser.add_argument('--bl_alpha', type=float, default=0.05,
help='Significance in the t-test for updating rollout baseline')
parser.add_argument("--lr_ft", type=float, default=0.00001)
parser.add_argument("--max_grad_norm", type=float, default=1)
parser.add_argument('--seed', type=int, default=1234, help='Random seed to use')
parser.add_argument('--no_cuda', action='store_true', help='Disable CUDA')
parser.add_argument('--num_workers', type=int, default=0,
help='Number of workers for DataLoaders')
parser.add_argument('--no_tensorboard', action='store_true',
help='Disable logging TensorBoard files')
parser.add_argument('--no_progress_bar', action='store_true',
help='Disable progress bar')
parser.add_argument('--log_step', type=int, default=100,
help='Log info every log_step steps')
parser.add_argument('--val_every', type=int, default=1,
help='Validate every val_every epochs')
opts = parser.parse_args()
opts.use_cuda = torch.cuda.is_available() and not opts.no_cuda
opts.ft_run_name = "{}_{}".format(opts.ft_run_name, time.strftime("%Y%m%dT%H%M%S"))
# Pretty print the run args
pp.pprint(vars(opts))
# Opts from checkpoint
args = load_args(os.path.join(opts.model, 'args.json'))
os.makedirs(os.path.join(args["save_dir"], opts.ft_run_name))
# Save arguments so exact configuration can always be found
with open(os.path.join(args["save_dir"], opts.ft_run_name, "args-ft.json"), 'w') as f:
json.dump(vars(opts), f, indent=True)
# Set the device
opts.device = torch.device("cuda:0" if opts.use_cuda else "cpu")
# Find model file
if os.path.isfile(opts.model):
model_filename = opts.model
path = os.path.dirname(model_filename)
elif os.path.isdir(opts.model):
epoch = max(
int(os.path.splitext(filename)[0].split("-")[1])
for filename in os.listdir(opts.model)
if os.path.splitext(filename)[1] == '.pt'
)
model_filename = os.path.join(opts.model, 'epoch-{}.pt'.format(epoch))
else:
assert False, "{} is not a valid directory or file".format(opts.model)
# Set the random seed
torch.manual_seed(opts.seed)
np.random.seed(opts.seed)
# Configure tensorboard
tb_logger = TbLogger(os.path.join(
args["log_dir"], "{}_{}-{}".format(args["problem"], args["min_size"], args["max_size"]), args["run_name"], opts.ft_run_name))
# Figure out what's the problem
problem = load_problem(args["problem"])
# Load data from load_path
load_data = {}
print('\nLoading data from {}'.format(opts.model))
load_data = torch_load_cpu(model_filename)
# Initialize model
model_class = {
'attention': AttentionModel,
'nar': NARModel,
}.get(args.get('model', 'attention'), None)
assert model_class is not None, "Unknown model: {}".format(model_class)
encoder_class = {
'gnn': GNNEncoder,
'gat': GraphAttentionEncoder,
'mlp': MLPEncoder
}.get(args.get('encoder', 'gnn'), None)
assert encoder_class is not None, "Unknown encoder: {}".format(encoder_class)
model = model_class(
problem=problem,
embedding_dim=args['embedding_dim'],
encoder_class=encoder_class,
n_encode_layers=args['n_encode_layers'],
aggregation=args['aggregation'],
aggregation_graph=args['aggregation_graph'],
normalization=args['normalization'],
learn_norm=args['learn_norm'],
track_norm=args['track_norm'],
gated=args['gated'],
n_heads=args['n_heads'],
tanh_clipping=args['tanh_clipping'],
mask_inner=True,
mask_logits=True,
mask_graph=False,
checkpoint_encoder=args['checkpoint_encoder'],
shrink_size=args['shrink_size']
).to(opts.device)
# Compute number of network parameters
print(model)
nb_param = 0
for param in model.parameters():
nb_param += np.prod(list(param.data.size()))
print('Number of parameters: ', nb_param)
# Overwrite model parameters by parameters to load
print('\nOverwriting model parameters from checkpoint')
model_ = get_inner_model(model)
model_.load_state_dict({**model_.state_dict(), **load_data.get('model', {})})
# Initialize baseline
if opts.baseline == 'exponential':
baseline = ExponentialBaseline(args["exp_beta"])
elif opts.baseline == 'critic':
assert problem.NAME == 'tsp', "Critic only supported for TSP"
baseline = CriticBaseline(
(
CriticNetwork(
embedding_dim=args["embedding_dim"],
encoder_class=encoder_class,
n_encode_layers=args["n_encode_layers"],
aggregation=args["aggregation"],
normalization=args["normalization"],
learn_norm=args["learn_norm"],
track_norm=args["track_norm"],
gated=args["gated"],
n_heads=args["n_heads"]
)
).to(opts.device)
)
print(baseline.critic)
nb_param = 0
for param in baseline.get_learnable_parameters():
nb_param += np.prod(list(param.data.size()))
print('Number of parameters (BL): ', nb_param)
elif opts.baseline == 'rollout':
baseline = RolloutBaseline(model, problem, opts)
else:
# assert opts.baseline is None, "Unknown baseline: {}".format(opts.baseline)
baseline = NoBaseline()
# Load baseline from data, make sure script is called with same type of baseline
if 'baseline' in load_data and opts.baseline == args["baseline"]:
print('\nOverwriting baseline from checkpoint')
baseline.load_state_dict(load_data['baseline'])
# Initialize optimizer
optimizer = optim.Adam(
[{'params': model.parameters(), 'lr': args["lr_model"]}]
+ (
[{'params': baseline.get_learnable_parameters(), 'lr': args["lr_critic"]}]
if len(baseline.get_learnable_parameters()) > 0
else []
)
)
# Load optimizer state
if 'optimizer' in load_data:
print('\nOverwriting optimizer from checkpoint')
optimizer.load_state_dict(load_data['optimizer'])
for state in optimizer.state.values():
for k, v in state.items():
if torch.is_tensor(v):
state[k] = v.to(opts.device)
# Set finetuning learning rate
for param_group in optimizer.param_groups:
param_group['lr'] = opts.lr_ft
# Load random state
torch.set_rng_state(load_data['rng_state'])
if opts.use_cuda:
torch.cuda.set_rng_state_all(load_data['cuda_rng_state'])
# Dumping of state was done before epoch callback, so do that now (model is loaded)
baseline.epoch_callback(model, epoch)
print("Resuming after epoch {}".format(epoch))
epoch_start = epoch + 1
step = 0
# Evaluate on held-out set
val_dataset = TSP.make_dataset(
filename=opts.val_dataset, batch_size=opts.batch_size, num_samples=opts.val_size,
neighbors=opts.neighbors, knn_strat=opts.knn_strat, supervised=True
)
avg_reward, avg_opt_gap = validate(model, val_dataset, problem, opts)
tb_logger.log_value('val_ft/avg_reward', avg_reward, step)
tb_logger.log_value('val_ft/opt_gap', avg_opt_gap, step)
if opts.ft_strategy == "active":
# Active search: finetune on the test set
train_dataset = baseline.wrap_dataset(val_dataset)
train_dataloader = DataLoader(
train_dataset, batch_size=opts.batch_size, shuffle=True, num_workers=opts.num_workers)
elif opts.ft_strategy == "fixed":
# Fixed finetuning: finetune on a fixed training set
train_dataset = baseline.wrap_dataset(
problem.make_dataset(
min_size=opts.min_size, max_size=opts.max_size, batch_size=opts.batch_size,
num_samples=opts.epoch_size, distribution=opts.data_distribution,
neighbors=opts.neighbors, knn_strat=opts.knn_strat
))
train_dataloader = DataLoader(
train_dataset, batch_size=opts.batch_size, shuffle=True, num_workers=opts.num_workers)
# Start finetuning loop
for epoch in range(epoch_start, epoch_start + opts.n_epochs):
print("\nStart finetuning epoch {}, lr={} for run {}".format(epoch, optimizer.param_groups[0]['lr'], args["run_name"]))
start_time = time.time()
# Put model in train mode!
model.train()
optimizer.zero_grad()
set_decode_type(model, "sampling")
if opts.ft_strategy == "random":
# Random finetuning: finetune on new/random samples each epoch
train_dataset = baseline.wrap_dataset(
problem.make_dataset(
min_size=opts.min_size, max_size=opts.max_size, batch_size=opts.batch_size,
num_samples=opts.epoch_size, distribution=opts.data_distribution,
neighbors=opts.neighbors, knn_strat=opts.knn_strat
))
train_dataloader = DataLoader(
train_dataset, batch_size=opts.batch_size, shuffle=True, num_workers=opts.num_workers)
for batch_id, batch in enumerate(tqdm(train_dataloader, disable=opts.no_progress_bar, ascii=True)):
train_batch_ft(
model,
optimizer,
baseline,
epoch,
batch_id,
step,
batch,
tb_logger,
opts
)
step += 1
epoch_duration = time.time() - start_time
print("Finished epoch {}, took {} s".format(epoch, time.strftime('%H:%M:%S', time.gmtime(epoch_duration))))
if epoch % opts.val_every == 0:
# Evaluate on held-out set
avg_reward, avg_opt_gap = validate(model, val_dataset, problem, opts)
tb_logger.log_value('val_ft/avg_reward', avg_reward, step)
tb_logger.log_value('val_ft/opt_gap', avg_opt_gap, step)
baseline.epoch_callback(model, epoch)
print('\nSaving model and state...')
torch.save(
{
'model': get_inner_model(model).state_dict(),
'optimizer': optimizer.state_dict(),
'rng_state': torch.get_rng_state(),
'cuda_rng_state': torch.cuda.get_rng_state_all()
},
os.path.join(args["save_dir"], opts.ft_run_name, 'epoch-{}-ft.pt'.format(epoch))
)
|
py | 1a317e6de87f6bc52d69211b2980b4d2e9c41db4 | # Copyright 2019. ThingsBoard
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from bluepy import __path__ as bluepy_path
from pprint import pformat
from bluepy.btle import DefaultDelegate, Peripheral, Scanner, UUID, capitaliseName
from bluepy.btle import BTLEDisconnectError, BTLEManagementError, BTLEGattError
from random import choice
from string import ascii_lowercase
import time
from threading import Thread
from thingsboard_gateway.connectors.connector import Connector, log
from thingsboard_gateway.connectors.ble.bytes_ble_uplink_converter import BytesBLEUplinkConverter
from thingsboard_gateway.tb_utility.tb_utility import TBUtility
class BLEConnector(Connector, Thread):
def __init__(self, gateway, config, connector_type):
super().__init__()
self.__connector_type = connector_type
self.__default_services = [x for x in range(0x1800, 0x183A)]
self.statistics = {'MessagesReceived': 0,
'MessagesSent': 0}
self.__gateway = gateway
self.__config = config
self.setName(self.__config.get("name",
'BLE Connector ' + ''.join(choice(ascii_lowercase) for _ in range(5))))
self._connected = False
self.__stopped = False
self.__previous_scan_time = time.time()-10000
self.__previous_read_time = time.time()-10000
self.__check_interval_seconds = self.__config['checkIntervalSeconds'] if self.__config.get(
'checkIntervalSeconds') is not None else 10
self.__rescan_time = self.__config['rescanIntervalSeconds'] if self.__config.get(
'rescanIntervalSeconds') is not None else 10
self.__scanner = Scanner().withDelegate(ScanDelegate(self))
self.__devices_around = {}
self.__available_converters = []
self.__notify_delegators = {}
self.__fill_interest_devices()
self.daemon = True
def run(self):
while True:
if time.time() - self.__previous_scan_time >= self.__rescan_time != 0:
self.__scan_ble()
self.__previous_scan_time = time.time()
if time.time() - self.__previous_read_time >= self.__check_interval_seconds:
self.__get_services_and_chars()
self.__previous_read_time = time.time()
time.sleep(.1)
if self.__stopped:
log.debug('STOPPED')
break
def close(self):
self.__stopped = True
for device in self.__devices_around:
try:
self.__devices_around[device]['peripheral'].disconnect()
except Exception as e:
log.exception(e)
raise e
def get_name(self):
return self.name
def on_attributes_update(self, content):
log.debug(content)
try:
for device in self.__devices_around:
if self.__devices_around[device]['device_config'].get('name') == content['device']:
for requests in self.__devices_around[device]['device_config']["attributeUpdates"]:
for service in self.__devices_around[device]['services']:
if requests['characteristicUUID'] in self.__devices_around[device]['services'][service]:
characteristic = self.__devices_around[device]['services'][service][requests['characteristicUUID']]['characteristic']
if 'WRITE' in characteristic.propertiesToString():
if content['data'].get(requests['attributeOnThingsBoard']) is not None:
try:
self.__check_and_reconnect(device)
characteristic.write(content['data'][requests['attributeOnThingsBoard']].encode('UTF-8'))
except BTLEDisconnectError:
self.__check_and_reconnect(device)
characteristic.write(content['data'][requests['attributeOnThingsBoard']].encode('UTF-8'))
except Exception as e:
log.exception(e)
else:
log.error('Cannot process attribute update request for device: %s with data: %s and config: %s',
device,
content,
self.__devices_around[device]['device_config']["attributeUpdates"])
except Exception as e:
log.exception(e)
def server_side_rpc_handler(self, content):
log.debug(content)
try:
for device in self.__devices_around:
if self.__devices_around[device]['device_config'].get('name') == content['device']:
for requests in self.__devices_around[device]['device_config']["serverSideRpc"]:
for service in self.__devices_around[device]['services']:
if requests['characteristicUUID'] in self.__devices_around[device]['services'][service]:
characteristic = self.__devices_around[device]['services'][service][requests['characteristicUUID']]['characteristic']
if requests.get('methodProcessing') and requests['methodProcessing'].upper() in characteristic.propertiesToString():
if content['data']['method'] == requests['methodRPC']:
response = None
if requests['methodProcessing'].upper() == 'WRITE':
try:
self.__check_and_reconnect(device)
response = characteristic.write(content['data'].get('params', '').encode('UTF-8'), requests.get('withResponse', False))
except BTLEDisconnectError:
self.__check_and_reconnect(device)
response = characteristic.write(content['data'].get('params', '').encode('UTF-8'), requests.get('withResponse', False))
except Exception as e:
log.exception(e)
elif requests['methodProcessing'].upper() == 'READ':
try:
self.__check_and_reconnect(device)
response = characteristic.read()
except BTLEDisconnectError:
self.__check_and_reconnect(device)
response = characteristic.read()
except Exception as e:
log.exception(e)
elif requests['methodProcessing'].upper() == 'NOTIFY':
try:
self.__check_and_reconnect(device)
delegate = self.__notify_handler(self.__devices_around[device], characteristic.handle)
response = delegate.data
except BTLEDisconnectError:
self.__check_and_reconnect(device)
delegate = self.__notify_handler(self.__devices_around[device], characteristic.handle)
response = delegate.data
except Exception as e:
log.exception(e)
if response is not None:
log.debug('Response from device: %s', response)
if requests['withResponse']:
response = 'success'
self.__gateway.send_rpc_reply(content['device'], content['data']['id'], str(response))
else:
log.error('Method for rpc request - not supported by characteristic or not found in the config.\nDevice: %s with data: %s and config: %s',
device,
content,
self.__devices_around[device]['device_config']["serverSideRpc"])
except Exception as e:
log.exception(e)
def is_connected(self):
return self._connected
def open(self):
self.__stopped = False
self.start()
def device_add(self, device):
for interested_device in self.__devices_around:
if device.addr.upper() == interested_device and self.__devices_around[interested_device].get('scanned_device') is None:
self.__devices_around[interested_device]['scanned_device'] = device
self.__devices_around[interested_device]['is_new_device'] = True
log.debug('Device with address: %s - found.', device.addr.upper())
def __get_services_and_chars(self):
for device in self.__devices_around:
try:
if self.__devices_around.get(device) is not None and self.__devices_around[device].get('scanned_device') is not None:
log.debug('Connecting to device with address: %s', self.__devices_around[device]['scanned_device'].addr.upper())
if self.__devices_around[device].get('peripheral') is None:
peripheral = Peripheral(self.__devices_around[device]['scanned_device'])
self.__devices_around[device]['peripheral'] = peripheral
else:
peripheral = self.__devices_around[device]['peripheral']
peripheral.connect(self.__devices_around[device]['scanned_device'])
services = peripheral.getServices()
for service in services:
if self.__devices_around[device].get('services') is None:
log.debug('Building device %s map, it may take a time, please wait...', device)
self.__devices_around[device]['services'] = {}
service_uuid = str(service.uuid).upper()
if self.__devices_around[device]['services'].get(service_uuid) is None:
self.__devices_around[device]['services'][service_uuid] = {}
try:
characteristics = service.getCharacteristics()
except BTLEDisconnectError:
self.__check_and_reconnect(device)
characteristics = service.getCharacteristics()
if self.__config.get('buildDevicesMap', False):
for characteristic in characteristics:
descriptors = []
try:
self.__check_and_reconnect(device)
try:
descriptors = characteristic.getDescriptors()
except BTLEDisconnectError:
self.__check_and_reconnect(device)
descriptors = characteristic.getDescriptors()
except BTLEGattError as e:
log.debug(e)
except Exception as e:
log.exception(e)
characteristic_uuid = str(characteristic.uuid).upper()
if self.__devices_around[device]['services'][service_uuid].get(characteristic_uuid) is None:
self.__devices_around[device]['services'][service_uuid][characteristic_uuid] = {
'characteristic': characteristic,
'handle': characteristic.handle,
'descriptors': {}}
for descriptor in descriptors:
log.debug(descriptor.handle)
log.debug(str(descriptor.uuid))
log.debug(str(descriptor))
self.__devices_around[device]['services'][service_uuid][characteristic_uuid]['descriptors'][descriptor.handle] = descriptor
except BTLEDisconnectError:
self.__check_and_reconnect(device)
else:
for characteristic in characteristics:
characteristic_uuid = str(characteristic.uuid).upper()
self.__devices_around[device]['services'][service_uuid][characteristic_uuid] = {
'characteristic': characteristic,
'handle': characteristic.handle}
if self.__devices_around[device]['is_new_device']:
log.debug('New device %s - processing.', device)
self.__devices_around[device]['is_new_device'] = False
self.__new_device_processing(device)
for interest_char in self.__devices_around[device]['interest_uuid']:
for section in self.__devices_around[device]['interest_uuid'][interest_char]:
data = self.__service_processing(device, section['section_config'])
converter = section['converter']
converted_data = converter.convert(section, data)
log.debug(data)
log.debug(converted_data)
self.__gateway.send_to_storage(self.get_name(), converted_data)
except BTLEDisconnectError:
log.debug('Cannot connect to device %s', device)
continue
except Exception as e:
log.exception(e)
def __new_device_processing(self, device):
default_services_on_device = [service for service in self.__devices_around[device]['services'].keys() if int(service.split('-')[0], 16) in self.__default_services]
log.debug('Default services found on device %s :%s', device, default_services_on_device)
converter = BytesBLEUplinkConverter(self.__devices_around[device]['device_config'])
converted_data = None
for service in default_services_on_device:
characteristics = [char for char in self.__devices_around[device]['services'][service].keys() if self.__devices_around[device]['services'][service][char]['characteristic'].supportsRead()]
for char in characteristics:
read_config = {'characteristicUUID': char,
'method': 'READ',
}
try:
self.__check_and_reconnect(device)
data = self.__service_processing(device, read_config)
attribute = capitaliseName(UUID(char).getCommonName())
read_config['key'] = attribute
read_config['byteFrom'] = 0
read_config['byteTo'] = -1
converter_config = [{"type": "attributes",
"clean": False,
"section_config": read_config}]
for interest_information in converter_config:
try:
converted_data = converter.convert(interest_information, data)
log.debug(converted_data)
except Exception as e:
log.debug(e)
except Exception as e:
log.debug('Cannot process %s', e)
continue
if converted_data is not None:
self.__gateway.add_device(converted_data["deviceName"], {"connector": self})
self.__gateway.send_to_storage(self.get_name(), converted_data)
def __check_and_reconnect(self, device):
while self.__devices_around[device]['peripheral']._helper is None:
self.__devices_around[device]['peripheral'].connect(self.__devices_around[device]['scanned_device'])
def __notify_handler(self, device, notify_handle, delegate=None):
class NotifyDelegate(DefaultDelegate):
def __init__(self):
DefaultDelegate.__init__(self)
self.device = device
self.data = {}
def handleNotification(self, handle, data):
self.data = data
log.debug('Notification received from device %s handle: %i, data: %s', self.device, handle, data)
if delegate is None:
delegate = NotifyDelegate()
device['peripheral'].withDelegate(delegate)
device['peripheral'].writeCharacteristic(notify_handle, b'\x01\x00', True)
if device['peripheral'].waitForNotifications(1):
log.debug("Data received: %s", delegate.data)
return delegate
def __service_processing(self, device, characteristic_processing_conf):
for service in self.__devices_around[device]['services']:
characteristic_uuid_from_config = characteristic_processing_conf.get('characteristicUUID')
if characteristic_uuid_from_config is None:
log.error('Characteristic not found in config: %s', pformat(characteristic_processing_conf))
return
if self.__devices_around[device]['services'][service].get(characteristic_uuid_from_config) is None:
continue
characteristic = self.__devices_around[device]['services'][service][characteristic_uuid_from_config]['characteristic']
self.__check_and_reconnect(device)
data = None
if characteristic_processing_conf.get('method', '_').upper().split()[0] == "READ":
if characteristic.supportsRead():
self.__check_and_reconnect(device)
data = characteristic.read()
log.debug(data)
else:
log.error('This characteristic doesn\'t support "READ" method.')
if characteristic_processing_conf.get('method', '_').upper().split()[0] == "NOTIFY":
self.__check_and_reconnect(device)
descriptor = characteristic.getDescriptors(forUUID=0x2902)[0]
handle = descriptor.handle
if self.__notify_delegators.get(device) is None:
self.__notify_delegators[device] = {}
if self.__notify_delegators[device].get(handle) is None:
self.__notify_delegators[device][handle] = {'function': self.__notify_handler,
'args': (
self.__devices_around[device],
handle,
self.__notify_delegators[device].get(handle)),
'delegate': None,
}
self.__notify_delegators[device][handle]['delegate'] = self.__notify_delegators[device][handle]['function'](*self.__notify_delegators[device][handle]['args'])
data = self.__notify_delegators[device][handle]['delegate'].data
else:
self.__notify_delegators[device][handle]['args'] = (self.__devices_around[device], handle, self.__notify_delegators[device][handle]['delegate'])
self.__notify_delegators[device][handle]['delegate'] = self.__notify_delegators[device][handle]['function'](*self.__notify_delegators[device][handle]['args'])
data = self.__notify_delegators[device][handle]['delegate'].data
if data is None:
log.error('Cannot process characteristic: %s with config:\n%s', str(characteristic.uuid).upper(), pformat(characteristic_processing_conf))
else:
log.debug('data: %s', data)
return data
def __scan_ble(self):
log.debug("Scanning for devices...")
try:
self.__scanner.scan(self.__config.get('scanTimeSeconds', 5), passive=self.__config.get('passiveScanMode', False))
except BTLEManagementError as e:
log.error('BLE working only with root user.')
log.error('Or you can try this command:\nsudo setcap '
'\'cap_net_raw,cap_net_admin+eip\' %s'
'\n====== Attention! ====== '
'\nCommand above - provided access to ble devices to any user.'
'\n========================', str(bluepy_path[0] + '/bluepy-helper'))
self._connected = False
raise e
except Exception as e:
log.exception(e)
time.sleep(10)
def __fill_interest_devices(self):
if self.__config.get('devices') is None:
log.error('Devices not found in configuration file. BLE Connector stopped.')
self._connected = False
return
for interest_device in self.__config.get('devices'):
keys_in_config = ['attributes', 'telemetry']
if interest_device.get('MACAddress') is not None:
default_converter = BytesBLEUplinkConverter(interest_device)
interest_uuid = {}
for key_type in keys_in_config:
for type_section in interest_device.get(key_type):
if type_section.get("characteristicUUID") is not None:
converter = None
if type_section.get('converter') is not None:
try:
module = TBUtility.check_and_import(self.__connector_type, type_section['converter'])
if module is not None:
log.debug('Custom converter for device %s - found!', interest_device['MACAddress'])
converter = module(interest_device)
else:
log.error("\n\nCannot find extension module for device %s .\nPlease check your configuration.\n", interest_device['MACAddress'])
except Exception as e:
log.exception(e)
else:
converter = default_converter
if converter is not None:
if interest_uuid.get(type_section["characteristicUUID"].upper()) is None:
interest_uuid[type_section["characteristicUUID"].upper()] = [{'section_config': type_section,
'type': key_type,
'converter': converter}]
else:
interest_uuid[type_section["characteristicUUID"].upper()].append({'section_config': type_section,
'type': key_type,
'converter': converter})
else:
log.error("No characteristicUUID found in configuration section for %s:\n%s\n", key_type, pformat(type_section))
if self.__devices_around.get(interest_device['MACAddress'].upper()) is None:
self.__devices_around[interest_device['MACAddress'].upper()] = {}
self.__devices_around[interest_device['MACAddress'].upper()]['device_config'] = interest_device
self.__devices_around[interest_device['MACAddress'].upper()]['interest_uuid'] = interest_uuid
else:
log.error("Device address not found, please check your settings.")
class ScanDelegate(DefaultDelegate):
def __init__(self, ble_connector):
DefaultDelegate.__init__(self)
self.__connector = ble_connector
def handleDiscovery(self, dev, is_new_device, is_new_data):
if is_new_device:
self.__connector.device_add(dev)
|
py | 1a317ea3a1ed9ea4d47149ab4632cd1433a3bcd7 | """The error checking chain is a list of status word
(sw1, sw2) error check strategies.
__author__ = "http://www.gemalto.com"
Copyright 2001-2012 gemalto
Author: Jean-Daniel Aussel, mailto:[email protected]
This file is part of pyscard.
pyscard is free software; you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation; either version 2.1 of the License, or
(at your option) any later version.
pyscard is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with pyscard; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""
from sys import exc_info
class ErrorCheckingChain(object):
"""The error checking chain is a list of response apdu status word
(sw1, sw2) error check strategies. Each strategy in the chain is
called until an error is detected. A L{smartcard.sw.SWException}
exception is raised when an error is detected. No exception is
raised if no error is detected.
Implementation derived from Bruce Eckel, Thinking in Python. The
L{ErrorCheckingChain} implements the Chain Of Responsibility design
pattern.
"""
def __init__(self, chain, strategy):
"""constructor. Appends a strategy to the L{ErrorCheckingChain}
chain."""
self.strategy = strategy
self.chain = chain
self.chain.append(self)
self.excludes = []
def next(self):
"""Returns next error checking strategy."""
# Where this link is in the chain:
location = self.chain.index(self)
if not self.end():
return self.chain[location + 1]
def addFilterException(self, exClass):
"""Add an exception filter to the error checking chain.
@param exClass: the exception to exclude, e.g.
L{smartcard.sw.SWExceptions.WarningProcessingException} A filtered
exception will not be raised when the sw1,sw2 conditions that
would raise the excption are met.
"""
self.excludes.append(exClass)
if self.end():
return
self.next().addFilterException(exClass)
def end(self):
"""Returns True if this is the end of the error checking
strategy chain."""
return (self.chain.index(self) + 1 >= len(self.chain))
def __call__(self, data, sw1, sw2):
"""Called to test data, sw1 and sw2 for error on the chain."""
try:
self.strategy(data, sw1, sw2)
except tuple(self.excludes) as exc:
# The following addtional filter may look redundant, it isn't.
# It checks that type(exc) is *equal* to any of self.excludes,
# rather than equal-or-subclass to any of self.excludes.
# This maintains backward compatibility with the behaviour of
# pyscard <= 1.6.16.
# if exception is filtered, return
for exception in self.excludes:
if exception == exc_info()[0]:
return
# otherwise reraise exception
raise
# if not done, call next strategy
if self.end():
return
return self.next()(data, sw1, sw2)
|
py | 1a317eff669d1987d2a0fec1242e11364b684fce | # -*- coding: utf-8 -*-
"""The `Common Sense Knowledge Graph <https://github.com/usc-isi-i2/cskg>`_ dataset.
- GitHub Repository: https://github.com/usc-isi-i2/cskg
- Paper: https://arxiv.org/pdf/2012.11490.pdf
- Data download: https://zenodo.org/record/4331372/files/cskg.tsv.gz
"""
import logging
from .base import SingleTabbedDataset
from ..typing import TorchRandomHint
__all__ = [
'CSKG',
]
URL = 'https://zenodo.org/record/4331372/files/cskg.tsv.gz'
class CSKG(SingleTabbedDataset):
"""The CSKG dataset.
The CSKG combines several knowledge graphs with "common sense" knowledge. It contains
2,087,833 entities, 58 relations, and 5,748,411 triples.
.. [ilievski2020] Ilievski, F., Szekely, P., & Zhang, B. (2020). `CSKG: The CommonSense Knowledge
Graph <http://arxiv.org/abs/2012.11490>`_. *arxiv*, 2012.11490.
"""
def __init__(self, create_inverse_triples: bool = False, random_state: TorchRandomHint = 0, **kwargs):
"""Initialize the `CSKG <https://github.com/usc-isi-i2/cskg>`_ dataset from [ilievski2020]_.
:param create_inverse_triples: Should inverse triples be created? Defaults to false.
:param random_state: The random seed to use in splitting the dataset. Defaults to 0.
:param kwargs: keyword arguments passed to :class:`pykeen.datasets.base.SingleTabbedDataset`.
"""
super().__init__(
url=URL,
create_inverse_triples=create_inverse_triples,
random_state=random_state,
read_csv_kwargs=dict(
usecols=['node1', 'relation', 'node2'],
),
**kwargs,
)
def _main():
ds = CSKG(eager=True)
ds.summarize()
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
_main()
|
py | 1a318122aa9c231ffa7504ba425074fa94f66fdc | # -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云(BlueKing) available.
Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and limitations under the License.
"""
from django.db import models
class MultRecode(models.Model):
multiplier = models.IntegerField(u"乘数")
multiplicand = models.IntegerField(u"被乘数")
mult_result = models.IntegerField(u"结果")
class PortScan(models.Model):
source_hostname = models.CharField(u"源主机名", max_length=80)
target_ip = models.CharField(u"目标IP",max_length=1024)
target_port = models.CharField(u"目标端口",max_length=1024)
state = models.CharField(u"状态",max_length=20)
protocol = models.CharField(u"协议",max_length=10)
scan_time = models.DateTimeField(u"扫描时间", auto_now=True)
class PortScanPara(models.Model):
source_hostname = models.CharField(u"源主机名", max_length=80)
target_ip = models.CharField(u"目标IP",max_length=1024)
target_port = models.CharField(u"目标端口",max_length=1024)
protocol = models.CharField(u"协议",max_length=10)
oper_time = models.DateTimeField(u"扫描时间", auto_now=True)
opere_hostname = models.CharField(u"执行主机", max_length=80)
|
py | 1a318182279ff2884911fa6b6e6cbdb776257783 | """tasks_2_2 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include, re_path
from django.views.generic.base import TemplateView
from allauth.account.views import confirm_email
from rest_framework import permissions
from drf_yasg.views import get_schema_view
from drf_yasg import openapi
urlpatterns = [
path("", include("home.urls")),
path("accounts/", include("allauth.urls")),
path("modules/", include("modules.urls")),
path("api/v1/", include("home.api.v1.urls")),
path("admin/", admin.site.urls),
path("users/", include("users.urls", namespace="users")),
path("rest-auth/", include("rest_auth.urls")),
# Override email confirm to use allauth's HTML view instead of rest_auth's API view
path("rest-auth/registration/account-confirm-email/<str:key>/", confirm_email),
path("rest-auth/registration/", include("rest_auth.registration.urls")),
]
admin.site.site_header = "TASKS-2"
admin.site.site_title = "TASKS-2 Admin Portal"
admin.site.index_title = "TASKS-2 Admin"
# swagger
api_info = openapi.Info(
title="TASKS-2 API",
default_version="v1",
description="API documentation for TASKS-2 App",
)
schema_view = get_schema_view(
api_info,
public=True,
permission_classes=(permissions.IsAuthenticated,),
)
urlpatterns += [
path("api-docs/", schema_view.with_ui("swagger", cache_timeout=0), name="api_docs")
]
urlpatterns += [path("", TemplateView.as_view(template_name='index.html'))]
urlpatterns += [re_path(r"^(?:.*)/?$",
TemplateView.as_view(template_name='index.html'))]
|
py | 1a3182ec4bc06492458b60ccee3fd5b5556f6172 | # Copyright 2014 eBay Inc.
#
# Author: Ron Rickard <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
CONF = cfg.CONF
pool_manager_group = cfg.OptGroup(
name='service:pool_manager', title="Configuration for Pool Manager Service"
)
OPTS = [
cfg.IntOpt('workers',
help='Number of Pool Manager worker processes to spawn'),
cfg.IntOpt('threads', default=1000,
help='Number of Pool Manager greenthreads to spawn'),
cfg.StrOpt('pool-id', default='794ccc2c-d751-44fe-b57f-8894c9f5c842',
help='The ID of the pool managed by this instance of the '
'Pool Manager'),
cfg.IntOpt('threshold-percentage', default=100,
help='The percentage of servers requiring a successful update '
'for a zone change to be considered active',
deprecated_for_removal=True,
deprecated_reason='Migrated to designate-worker'),
cfg.IntOpt('poll-timeout', default=30,
help='The time to wait for a response from a server',
deprecated_for_removal=True,
deprecated_reason='Migrated to designate-worker'),
cfg.IntOpt('poll-retry-interval', default=15,
help='The time between retrying to send a request and '
'waiting for a response from a server',
deprecated_for_removal=True,
deprecated_reason='Migrated to designate-worker'),
cfg.IntOpt('poll-max-retries', default=10,
help='The maximum number of times to retry sending a request '
'and wait for a response from a server',
deprecated_for_removal=True,
deprecated_reason='Migrated to designate-worker'),
cfg.IntOpt('poll-delay', default=5,
help='The time to wait before sending the first request '
'to a server',
deprecated_for_removal=True,
deprecated_reason='Migrated to designate-worker'),
cfg.BoolOpt('enable-recovery-timer', default=True,
help='The flag for the recovery timer'),
cfg.IntOpt('periodic-recovery-interval', default=120,
help='The time between recovering from failures'),
cfg.BoolOpt('enable-sync-timer', default=True,
help='The flag for the sync timer'),
cfg.IntOpt('periodic-sync-interval', default=1800,
help='The time between synchronizing the servers with storage'),
cfg.IntOpt('periodic-sync-seconds', default=21600,
help='Zones Updated within last N seconds will be syncd.'
'Use an empty value to sync all zones.'),
cfg.IntOpt('periodic-sync-max-attempts', default=3,
help='Number of attempts to update a zone during sync'),
cfg.IntOpt('periodic-sync-retry-interval', default=30,
help='Interval between zone update attempts during sync'),
cfg.StrOpt('cache-driver', default='memcache',
help='The cache driver to use'),
cfg.StrOpt('pool_manager_topic', default='pool_manager',
help='RPC topic name for pool-manager')
]
def register_dynamic_pool_options():
# Pool Options Registration Pass One
# Find the Current Pool ID
pool_id = CONF['service:pool_manager'].pool_id
# Build the [pool:<id>] config section
pool_group = cfg.OptGroup('pool:%s' % pool_id)
pool_opts = [
cfg.ListOpt('targets', default=[]),
cfg.ListOpt('nameservers', default=[]),
cfg.ListOpt('also_notifies', default=[]),
]
CONF.register_group(pool_group)
CONF.register_opts(pool_opts, group=pool_group)
# Pool Options Registration Pass Two
# Find the Current Pools Target ID's
pool_target_ids = CONF['pool:%s' % pool_id].targets
# Build the [pool_target:<id>] config sections
pool_target_opts = [
cfg.StrOpt('type'),
cfg.ListOpt('masters', default=[]),
cfg.DictOpt('options', default={}, secret=True),
]
for pool_target_id in pool_target_ids:
pool_target_group = cfg.OptGroup('pool_target:%s' % pool_target_id)
CONF.register_group(pool_target_group)
CONF.register_opts(pool_target_opts, group=pool_target_group)
# Find the Current Pools Nameserver ID's
pool_nameserver_ids = CONF['pool:%s' % pool_id].nameservers
# Build the [pool_nameserver:<id>] config sections
pool_nameserver_opts = [
cfg.StrOpt('host'),
cfg.IntOpt('port'),
]
for pool_nameserver_id in pool_nameserver_ids:
pool_nameserver_group = cfg.OptGroup(
'pool_nameserver:%s' % pool_nameserver_id)
CONF.register_group(pool_nameserver_group)
CONF.register_opts(pool_nameserver_opts, group=pool_nameserver_group)
cfg.CONF.register_group(pool_manager_group)
cfg.CONF.register_opts(OPTS, group=pool_manager_group)
def list_opts():
yield pool_manager_group, OPTS
|
py | 1a3182f052d2311169ec0c58854f591b03586a6b | __version__ = '1.0.0'
palette = (
(0x00,0x00,0x00),(0x00,0x00,0x00),(0x08,0x08,0x08),(0x10,0x10,0x10),
(0x18,0x18,0x18),(0x20,0x20,0x20),(0x28,0x28,0x28),(0x30,0x30,0x30),
(0x38,0x38,0x38),(0x40,0x40,0x40),(0x48,0x48,0x48),(0x50,0x50,0x50),
(0x54,0x54,0x54),(0x58,0x58,0x58),(0x60,0x60,0x60),(0x68,0x68,0x68),
(0x70,0x70,0x70),(0x78,0x78,0x78),(0x80,0x80,0x80),(0x88,0x88,0x88),
(0x94,0x94,0x94),(0x9c,0x9c,0x9c),(0xa8,0xa8,0xa8),(0xb4,0xb4,0xb4),
(0xb8,0xb8,0xb8),(0xc4,0xc4,0xc4),(0xcc,0xcc,0xcc),(0xd4,0xd4,0xd4),
(0xe0,0xe0,0xe0),(0xe8,0xe8,0xe8),(0xf0,0xf0,0xf0),(0xfc,0xfc,0xfc),
(0x08,0x08,0x0c),(0x10,0x10,0x14),(0x18,0x18,0x1c),(0x1c,0x20,0x24),
(0x24,0x24,0x2c),(0x2c,0x2c,0x34),(0x30,0x34,0x3c),(0x38,0x38,0x44),
(0x40,0x40,0x48),(0x4c,0x4c,0x58),(0x5c,0x5c,0x68),(0x6c,0x70,0x80),
(0x80,0x84,0x98),(0x98,0x9c,0xb0),(0xa8,0xac,0xc4),(0xbc,0xc4,0xdc),
(0x20,0x18,0x14),(0x28,0x20,0x1c),(0x30,0x24,0x20),(0x34,0x2c,0x28),
(0x3c,0x34,0x2c),(0x44,0x38,0x34),(0x4c,0x40,0x38),(0x54,0x48,0x40),
(0x5c,0x4c,0x48),(0x64,0x54,0x4c),(0x6c,0x5c,0x54),(0x70,0x60,0x58),
(0x78,0x68,0x60),(0x80,0x70,0x64),(0x88,0x74,0x6c),(0x90,0x7c,0x70),
(0x14,0x18,0x14),(0x1c,0x20,0x1c),(0x20,0x24,0x20),(0x28,0x2c,0x28),
(0x2c,0x30,0x2c),(0x30,0x38,0x30),(0x38,0x40,0x38),(0x40,0x44,0x40),
(0x44,0x4c,0x44),(0x54,0x5c,0x54),(0x68,0x70,0x68),(0x78,0x80,0x78),
(0x8c,0x94,0x88),(0x9c,0xa4,0x98),(0xac,0xb4,0xa8),(0xbc,0xc4,0xb8),
(0x30,0x20,0x08),(0x3c,0x28,0x08),(0x48,0x30,0x10),(0x54,0x38,0x14),
(0x5c,0x40,0x1c),(0x64,0x48,0x24),(0x6c,0x50,0x2c),(0x78,0x5c,0x34),
(0x88,0x68,0x3c),(0x94,0x74,0x48),(0xa0,0x80,0x54),(0xa8,0x88,0x5c),
(0xb4,0x90,0x64),(0xbc,0x98,0x6c),(0xc4,0xa0,0x74),(0xcc,0xa8,0x7c),
(0x10,0x14,0x10),(0x14,0x1c,0x14),(0x18,0x20,0x18),(0x1c,0x24,0x1c),
(0x20,0x2c,0x20),(0x24,0x30,0x24),(0x28,0x38,0x28),(0x2c,0x3c,0x2c),
(0x30,0x44,0x30),(0x34,0x4c,0x34),(0x3c,0x54,0x3c),(0x44,0x5c,0x40),
(0x4c,0x64,0x48),(0x54,0x6c,0x4c),(0x5c,0x74,0x54),(0x64,0x80,0x5c),
(0x18,0x0c,0x08),(0x20,0x10,0x08),(0x28,0x14,0x08),(0x34,0x18,0x0c),
(0x3c,0x1c,0x0c),(0x44,0x20,0x0c),(0x4c,0x24,0x10),(0x54,0x2c,0x14),
(0x5c,0x30,0x18),(0x64,0x38,0x1c),(0x70,0x40,0x20),(0x78,0x48,0x24),
(0x80,0x50,0x2c),(0x90,0x5c,0x38),(0xa8,0x70,0x48),(0xc0,0x84,0x58),
(0x18,0x04,0x04),(0x24,0x04,0x04),(0x30,0x00,0x00),(0x3c,0x00,0x00),
(0x44,0x00,0x00),(0x50,0x00,0x00),(0x58,0x00,0x00),(0x64,0x00,0x00),
(0x70,0x00,0x00),(0x84,0x00,0x00),(0x98,0x00,0x00),(0xac,0x00,0x00),
(0xc0,0x00,0x00),(0xd4,0x00,0x00),(0xe8,0x00,0x00),(0xfc,0x00,0x00),
(0x10,0x0c,0x20),(0x1c,0x14,0x30),(0x20,0x1c,0x38),(0x28,0x24,0x44),
(0x34,0x2c,0x50),(0x3c,0x38,0x5c),(0x44,0x40,0x68),(0x50,0x48,0x74),
(0x58,0x54,0x80),(0x64,0x60,0x8c),(0x6c,0x6c,0x98),(0x78,0x74,0xa4),
(0x84,0x84,0xb0),(0x90,0x90,0xbc),(0x9c,0x9c,0xc8),(0xac,0xac,0xd4),
(0x24,0x14,0x04),(0x34,0x18,0x04),(0x44,0x20,0x04),(0x50,0x28,0x00),
(0x64,0x30,0x04),(0x7c,0x3c,0x04),(0x8c,0x48,0x04),(0x9c,0x58,0x08),
(0xac,0x64,0x08),(0xbc,0x74,0x0c),(0xcc,0x80,0x0c),(0xdc,0x90,0x10),
(0xec,0xa0,0x14),(0xfc,0xb8,0x38),(0xf8,0xc8,0x50),(0xf8,0xdc,0x78),
(0x14,0x10,0x04),(0x1c,0x18,0x08),(0x24,0x20,0x08),(0x2c,0x28,0x0c),
(0x34,0x30,0x10),(0x38,0x38,0x10),(0x40,0x40,0x14),(0x44,0x48,0x18),
(0x48,0x50,0x1c),(0x50,0x5c,0x20),(0x54,0x68,0x28),(0x58,0x74,0x2c),
(0x5c,0x80,0x34),(0x5c,0x8c,0x34),(0x5c,0x94,0x38),(0x60,0xa0,0x40),
(0x3c,0x10,0x10),(0x48,0x18,0x18),(0x54,0x1c,0x1c),(0x64,0x24,0x24),
(0x70,0x2c,0x2c),(0x7c,0x34,0x30),(0x8c,0x40,0x38),(0x98,0x4c,0x40),
(0x2c,0x14,0x08),(0x38,0x1c,0x0c),(0x48,0x20,0x10),(0x54,0x28,0x14),
(0x60,0x2c,0x1c),(0x70,0x34,0x20),(0x7c,0x38,0x28),(0x8c,0x40,0x30),
(0x18,0x14,0x10),(0x24,0x1c,0x14),(0x2c,0x24,0x1c),(0x38,0x2c,0x20),
(0x40,0x34,0x24),(0x48,0x3c,0x2c),(0x50,0x44,0x30),(0x5c,0x4c,0x34),
(0x64,0x54,0x3c),(0x70,0x5c,0x44),(0x78,0x64,0x48),(0x84,0x70,0x50),
(0x90,0x78,0x58),(0x98,0x80,0x60),(0xa0,0x88,0x68),(0xa8,0x94,0x70),
(0x24,0x18,0x0c),(0x2c,0x20,0x10),(0x34,0x28,0x14),(0x3c,0x2c,0x14),
(0x48,0x34,0x18),(0x50,0x3c,0x1c),(0x58,0x44,0x1c),(0x68,0x4c,0x20),
(0x94,0x60,0x38),(0xa0,0x6c,0x40),(0xac,0x74,0x48),(0xb4,0x7c,0x50),
(0xc0,0x84,0x58),(0xcc,0x8c,0x5c),(0xd8,0x9c,0x6c),(0x3c,0x14,0x5c),
(0x64,0x24,0x74),(0xa8,0x48,0xa4),(0xcc,0x6c,0xc0),(0x04,0x54,0x04),
(0x04,0x84,0x04),(0x00,0xb4,0x00),(0x00,0xd8,0x00),(0x04,0x04,0x90),
(0x10,0x44,0xcc),(0x24,0x84,0xe0),(0x58,0xa8,0xe8),(0xd8,0x04,0x04),
(0xf4,0x48,0x00),(0xfc,0x80,0x00),(0xfc,0xac,0x18),(0xfc,0xfc,0xfc)
)
"""256 color palette of RGB three-tuples."""
|
py | 1a318349dadd71ff19c73e8f0bcbb3f6740d772d | # 1: +train 2: -train 3: +test 4:-test
# http://pongor.itk.ppke.hu/benchmark/#/Benchmark_data_formats
import numpy as np
import os
os.system('mkdir Index')
mat = np.empty([1357,55], int)
infile = open('./CAST.txt')
lines = infile.read().splitlines()
for i in range(len(lines)):
line = lines[i]
a = line[7:].split()
for j in range(55):
mat[i,j] = int(a[j])
for i in range(55):
print(i+1)
TrainIndex = []
TestIndex = []
TrainLabel = []
TestLabel = []
for j in range(1357):
if mat[j,i] == 1 or mat[j,i] == 2:
TrainIndex.append(j)
if mat[j,i] == 1:
TrainLabel.append(1)
elif mat[j,i] == 2:
TrainLabel.append(-1)
if mat[j,i] == 3 or mat[j,i] == 4:
TestIndex.append(j)
if mat[j,i] == 3:
TestLabel.append(1)
elif mat[j,i] == 4:
TestLabel.append(-1)
TrainIndex = np.asarray(TrainIndex, int)
TestIndex = np.asarray(TestIndex, int)
TrainLabel = np.asarray(TrainLabel, int)
TestLabel = np.asarray(TestLabel, int)
print(len(TrainIndex), np.sum(TrainLabel), len(TestIndex), np.sum(TestLabel), len(TrainIndex)+len(TestIndex))
outfile = open('./Index/TrainIndex'+str(i+1)+'.npy','wb')
np.save(outfile, TrainIndex)
outfile.close()
outfile = open('./Index/TrainLabel'+str(i+1)+'.npy','wb')
np.save(outfile, TrainLabel)
outfile.close()
outfile = open('./Index/TestIndex'+str(i+1)+'.npy','wb')
np.save(outfile, TestIndex)
outfile.close()
outfile = open('./Index/TestLabel'+str(i+1)+'.npy','wb')
np.save(outfile, TestLabel)
outfile.close()
|
py | 1a3183a235b69730968b7b0c8604fc88f040eabf | from datetime import datetime
from datetime import timedelta
import json
import requests
from requests_toolbelt import MultipartEncoder
import traceback
import types
import modules.botconfig as config
import modules.botlog as botlog
agentSession = requests.Session()
agentSession.cert = config.BotCertificate
agentV2Session = requests.Session()
agentV2Session.cert = config.BotCertificate
v2LastAuth: datetime = None
v2SessionToken = None
v2KeyAuthToken = None
def GetSessionToken():
#botlog.LogConsoleInfo(config.SessionAuthEP)
return GetSymphonyAuthToken(config.SessionAuthEP)
def GetKeyManagerToken():
#botlog.LogConsoleInfo(config.KeyManagerEP)
return GetSymphonyAuthToken(config.KeyManagerEP)
def GetSymphonyAuthToken(authEndpoint):
response = SymphonyREST('AUTH', authEndpoint, None)
return response.ResponseData.token
def BuildHeaders(sessionToken, keyAuthToken, contentType="application/json"):
RESTheaders = {
"sessionToken": sessionToken,
"keyManagerToken": keyAuthToken,
"Content-Type": contentType,
"User-Agent": "SymphonyZendeskBot (Alex Nalin - API Engineer - [email protected])"
}
return RESTheaders
def SymphonyReAuth():
global agentSession
sessionToken = GetSessionToken()
keyAuthToken = GetKeyManagerToken()
# RESTHeaders = {"sessionToken": sessionToken, "keyManagerToken": keyAuthToken,
# "Content-Type": "application/json"}
RESTHeaders = BuildHeaders(sessionToken, keyAuthToken)
# Attempting to use requests.Session
agentSession.headers.update(RESTHeaders)
def SymphonyGET(endpoint):
return SymphonyREST('GET', endpoint, None)
def SymphonyPOST(endpoint, body):
return SymphonyREST('POST', endpoint, body)
def SymphonyPOSTV2(endpoint, body):
return SymphonyREST('POSTV2', endpoint, body)
def SymphonyPOSTV2_1(endpoint, body):
return SymphonyREST('POSTV2_1', endpoint, body)
def SymphonyREST(method, endpoint, body):
retVal = SymphonyAgentResponse()
# Allowing for reauth from the async process
if method != 'AUTH' and 'sessionToken' not in agentSession.headers:
SymphonyReAuth()
try:
if method == 'GET':
response = agentSession.get(endpoint)
elif method == 'POST':
response = agentSession.post(endpoint, data=body)
elif method == 'POSTV2':
response = PostV2(endpoint, body)
elif method == 'POSTV2_1':
response = PostV2_1(endpoint, body)
elif method == 'AUTH':
response = agentSession.post(endpoint)
else:
raise MethodNotImplementedException(method + ' is not yet implemented.')
retVal.ResponseText = response.text
retVal.ResponseCode = response.status_code
if response.status_code == 200:
retVal.Success = True
retVal.ParseResponseJSON()
elif response.status_code // 100 == 2: # Any other 200 code, not success but don't throw exception
retVal.Success = True
else:
response.raise_for_status()
except requests.exceptions.HTTPError as httpex:
errorStr = "Symphony REST Exception (http): " + str(httpex)
botlog.LogConsoleInfo("Response Code: " + str(response.status_code))
botlog.LogConsoleInfo("Response Message: " + response.text)
retVal.ErrorMessage = errorStr
stackTrace = 'Stack Trace: ' + ''.join(traceback.format_stack())
botlog.LogSymphonyError(errorStr)
botlog.LogSymphonyError(stackTrace)
except requests.exceptions.RequestException as connex:
errorStr = "Symphony REST Exception (connection - Status Code " + str(response.status_code) + \
"): " + str(connex)
retVal.ErrorMessage = errorStr
stackTrace = 'Stack Trace: ' + ''.join(traceback.format_stack())
botlog.LogSymphonyError(errorStr)
botlog.LogSymphonyError(stackTrace)
except Exception as ex:
errorStr = "Symphony REST Exception (system): " + str(ex)
retVal.ErrorMessage = errorStr
stackTrace = 'Stack Trace: ' + ''.join(traceback.format_stack())
botlog.LogSystemError(errorStr)
botlog.LogSystemError(stackTrace)
finally:
return retVal
# def PostV2(endpoint, body):
# encoder = MultipartEncoder(fields=body)
#
# v2SessionToken = GetSessionToken()
# v2KeyAuthToken = GetKeyManagerToken()
#
# v2Headers = {"sessionToken": v2SessionToken, "keyManagerToken": v2KeyAuthToken,
# "Content-Type": encoder.content_type}
#
# agentV2Session.headers.update(v2Headers)
#
# return agentV2Session.post(endpoint, data=encoder)
def PostV2(endpoint, body):
global v2LastAuth
global v2SessionToken
global v2KeyAuthToken
global agentV2Session
if v2SessionToken is None or v2LastAuth is None or datetime.now() > v2LastAuth + timedelta(days=2):
v2SessionToken = GetSessionToken()
v2KeyAuthToken = GetKeyManagerToken()
v2LastAuth = datetime.now()
encoder = MultipartEncoder(fields=body)
v2Headers = BuildHeaders(v2SessionToken, v2KeyAuthToken, encoder.content_type)
agentV2Session.headers.update(v2Headers)
return agentV2Session.post(endpoint, data=encoder)
# Does not work
# I believe the problem is the Content-Type header, which does not include the boundary
# statement. If I am prepared to build the boundary myself, I might be able to get this
# to work without the requests_toolbelt package
def PostV2_1(endpoint, body):
import io
ph = io.StringIO("")
tempSession = requests.Session()
tempSession.cert = config.BotCertificate
tempSessionToken = GetSessionToken()
tempKeyAuthToken = GetKeyManagerToken()
tempHeaders = {"sessionToken": tempSessionToken, "keyManagerToken": tempKeyAuthToken,
"Content-Type": "multipart/form-data"}
tempSession.headers.update(tempHeaders)
return tempSession.post(endpoint, data=body, files=ph)
class SymphonyAgentResponse:
def __init__(self):
self.Success = False
self.ResponseText = ''
self.ResponseCode = 0
self.ErrorMessage = ''
self.ResponseData = {}
def ParseResponseJSON(self):
self.ResponseData = json.loads(self.ResponseText, object_hook=lambda d: types.SimpleNamespace(**d))
class JSONData:
def __init__(self, jsonStr):
self.__dict__ = json.loads(jsonStr)
class MethodNotImplementedException(Exception):
pass
|
py | 1a31847430e0a6b568b4a65d4764c864353f06a7 | from google.appengine.ext import ndb
from models.event import Event
from models.team import Team
class EventTeam(ndb.Model):
"""
EventTeam serves as a join model between Events and Teams, indicating that
a team will or has competed in an Event.
key_name is like 2010cmp_frc177 or 2007ct_frc195
"""
event = ndb.KeyProperty(kind=Event)
team = ndb.KeyProperty(kind=Team)
year = ndb.IntegerProperty()
created = ndb.DateTimeProperty(auto_now_add=True, indexed=False)
updated = ndb.DateTimeProperty(auto_now=True, indexed=False)
def __init__(self, *args, **kw):
# store set of affected references referenced keys for cache clearing
# keys must be model properties
self._affected_references = {
'event': set(),
'team': set(),
'year': set(),
}
super(EventTeam, self).__init__(*args, **kw)
@property
def key_name(self):
return self.event.id() + "_" + self.team.id()
|
py | 1a31858544cec1f0ca15226200dcfe6cc09f7d2d | # -*- coding: UTF-8 -*-
import unittest
from ytcc.download import Download
from unittest.mock import patch, mock_open, Mock
from test.fixtures.webvtt import FIXTURE_WEBVTT
from colorama import Fore, Style
from ytcc.download import NoCaptionsException
def red(input):
return Fore.RED + input + Style.RESET_ALL
class TestCaptions(unittest.TestCase):
def test_caption(self):
tests = [{'name': '1 video, caption found',
'urls': ['https://www.swag.com/'],
'pattern': 'vision',
'regex': False,
'links': False,
'expected': '[00:00:17.350 --> 00:00:18.752] we have this ' + red('vision') + ' of einstein'},
{'name': '1 video, caption not found',
'urls': ['https://www.swag.com/'],
'pattern': 'iwontbefound',
'regex': False,
'links': False,
'expected': '',
},
{'name': '1 video, caption found more than once',
'urls': ['https://www.swag.com/'],
'pattern': 'light',
'regex': False,
'links': False,
'expected': '[00:00:33.666 --> 00:00:38.138] actor as einstein: what ' + red('light') + ' would i see if i rode on a beam of ' + red('light') + '?',
},
{'name': '1 video, regular expression',
'urls': ['https://www.swag.com/'],
'pattern': 'actor|light',
'regex': True,
'links': False,
'expected': '[00:00:33.666 --> 00:00:38.138] ' + red('actor') + ' as einstein: what ' + red('light') + ' would i see if i rode on a beam of ' + red('light') + '?',
},
{'name': '1 video, 1 link',
'urls': ['https://www.swag.com/'],
'pattern': 'actor|light',
'regex': True,
'links': True,
'expected': '[00:00:33.666 --> 00:00:38.138] ' + red('actor') + ' as einstein: what ' + red('light') + ' would i see if i rode on a beam of ' + red('light') + '? (https://www.swag.com/&t=33s)',
},
]
for test in tests:
download = Download({'urls': test['urls'],
'pattern': test['pattern'],
'e': test['regex'],
'v': False,
'links': test['links']})
m = mock_open(read_data=FIXTURE_WEBVTT)
with patch('ytcc.download.open', m, create=True):
with patch('ytcc.storage.Storage.remove_file', Mock()):
download.get_result = Mock(return_value=0)
actual = download.get_captions()
expected = test['expected']
self.assertEqual(actual, expected)
def test_caption_captions_do_not_exist(self):
test = {
'name': 'captions do not exist',
'urls': ['https://www.swag.com/'],
'pattern': 'my pattern',
'regex': False,
'links': False,
}
download = Download({'urls': test['urls'],
'pattern': test['pattern'],
'e': test['regex'],
'v': False,
'links': test['links']})
m = mock_open(read_data=FIXTURE_WEBVTT)
m.side_effect = FileNotFoundError
with patch('ytcc.download.open', m, create=True):
with patch('ytcc.storage.Storage.remove_file', Mock()):
download.get_result = Mock(return_value=0)
with self.assertRaises(NoCaptionsException):
download.get_captions()
|
py | 1a318589af5d28da0b5a461b3485bd18717ea4b8 | #!/usr/bin/env python
import urllib
import json
import os
from flask import Flask
from flask import request
from flask import make_response
# Flask app should start in global layout
app = Flask(__name__)
@app.route('/webhook', methods=['POST'])
def webhook():
req = request.get_json(silent=True, force=True)
print("Request:")
print(json.dumps(req, indent=4))
res = processRequest(req)
res = json.dumps(res, indent=4)
# print(res)
r = make_response(res)
r.headers['Content-Type'] = 'application/json'
return r
def processRequest(req):
if req.get("result").get("action") != "landline":
return {}
baseurl = "https://vittley.com/bot.json"
yql_query = makeYqlQuery(req)
if yql_query is None:
return {}
yql_url = baseurl + urllib.urlencode({'q': yql_query}) + "&format=json"
result = urllib.urlopen(yql_url).read()
data = json.loads(result)
res = makeWebhookResult(data)
return res
def makeYqlQuery(req):
result = req.get("result")
parameters = result.get("parameters")
city = parameters.get("store")
if city is None:
return None
return "select * from weather.forecast where woeid in (select woeid from geo.places(1) where text='" + city + "')"
def makeWebhookResult(data):
query = data.get('query')
if query is None:
return {}
result = query.get('results')
if result is None:
return {}
channel = result.get('channel')
if channel is None:
return {}
item = channel.get('item')
location = channel.get('location')
units = channel.get('units')
if (location is None) or (item is None) or (units is None):
return {}
condition = item.get('condition')
if condition is None:
return {}
# print(json.dumps(item, indent=4))
speech = "Today in " + location.get('city') + ": " + condition.get('text') + \
", the temperature is " + condition.get('temp') + " " + units.get('temperature')
print("Response:")
print(speech)
return {
"speech": "No fucking idea"
"displayText": speech,
# "data": data,
# "contextOut": [],
"source": "apiai-weather-webhook-sample"
}
if __name__ == '__main__':
port = int(os.getenv('PORT', 5000))
print "Starting app on port %d" % port
app.run(debug=False, port=port, host='0.0.0.0')
|
py | 1a31868622f8c161eaa76acad261cfb80d8ed036 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import logging
from .base import BaseHandler
from ..dao.notice_dao import NoticeDao
logger = logging.getLogger('nebula.api.batch_notice')
class BatchBWListHandler(BaseHandler):
def get(self):
"""
批量查询当前未过期黑白灰名单的值集合接口.
@API
summary: 批量查询当前未过期黑白灰名单的值集合接口.
notes: 批量查询当前未过期黑白灰名单的值集合接口, 返回逗号分隔的黑白灰名单值.
tags:
- platform
parameters:
-
name: strategy
in: query
required: false
type: string
description: filter by strategy name
-
name: scene_type
in: query
required: false
type: string
description: scene type filter statement, ex. login, visit
-
name: check_type
in: query
required: false
type: string
default: IP
description: check Type filter statement, ex. IP, MOBILE
-
name: decision
in: query
required: false
default: reject
type: string
description: decision filter statement, ex. accept, reject
-
name: test
in: query
required: false
type: string
default: false
description: test notice is test or production
produces:
- text/plain
"""
strategy = self.get_argument('strategy', None)
scene_type = self.get_argument('scene_type', None)
check_type = self.get_argument('check_type', 'IP')
decision = self.get_argument('decision', "reject")
test = self.get_argument('test', 'false')
if test == "true":
test = 1
elif test == "false":
test = 0
else:
test = None
result = ''
try:
ND = NoticeDao()
data = ND.get_unexpired_notice_data(strategy=strategy, check_type=check_type, decision=decision, test=test,
scene_type=scene_type)
result = ",".join(data)
except Exception as err:
logger.error(err)
self.set_header('content-type', 'text/plain')
self.write(result)
|
py | 1a31869e6696a3db23c05459e923be52810b2c61 | """This component provides Switches for Unifi Protect."""
import logging
from homeassistant.components.switch import SwitchEntity
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import ATTR_ATTRIBUTION
from homeassistant.core import HomeAssistant
from .const import (
ATTR_DEVICE_MODEL,
CONF_IR_OFF,
CONF_IR_ON,
DEFAULT_ATTRIBUTION,
DOMAIN,
TYPE_HIGH_FPS_ON,
TYPE_RECORD_ALWAYS,
TYPE_RECORD_MOTION,
TYPE_RECORD_NEVER,
TYPE_RECORD_OFF,
TYPE_RECORD_SMARTDETECT,
)
from .entity import UnifiProtectEntity
_LOGGER = logging.getLogger(__name__)
_SWITCH_NAME = 0
_SWITCH_ICON = 1
_SWITCH_TYPE = 2
_SWITCH_REQUIRES = 3
SWITCH_TYPES = {
"record_motion": [
"Record Motion",
"video-outline",
"record_motion",
"recording_mode",
],
"record_always": ["Record Always", "video", "record_always", "recording_mode"],
"record_smart": ["Record Smart", "video", "record_smart", "has_smartdetect"],
"ir_mode": ["IR Active", "brightness-4", "ir_mode", "ir_mode"],
"status_light": ["Status Light On", "led-on", "status_light", None],
"hdr_mode": ["HDR Mode", "brightness-7", "hdr_mode", "has_hdr"],
"high_fps": ["High FPS", "video-high-definition", "high_fps", "has_highfps"],
"light_motion": [
"Light when Motion",
"motion-sensor",
"light_motion",
"motion_mode",
],
"light_dark": ["Light when Dark", "motion-sensor", "light_dark", "motion_mode"],
}
async def async_setup_entry(
hass: HomeAssistant, entry: ConfigEntry, async_add_entities
) -> None:
"""Set up switches for UniFi Protect integration."""
entry_data = hass.data[DOMAIN][entry.entry_id]
upv_object = entry_data["upv"]
protect_data = entry_data["protect_data"]
server_info = entry_data["server_info"]
if not protect_data.data:
return
ir_on = entry.data[CONF_IR_ON]
if ir_on == "always_on":
ir_on = "on"
ir_off = entry.data[CONF_IR_OFF]
if ir_off == "led_off":
ir_off = "autoFilterOnly"
elif ir_off == "always_off":
ir_off = "off"
switches = []
for switch, switch_type in SWITCH_TYPES.items():
required_field = switch_type[_SWITCH_REQUIRES]
for device_id in protect_data.data:
# Only Add Switches if Device supports it.
if required_field and not protect_data.data[device_id].get(required_field):
continue
switches.append(
UnifiProtectSwitch(
upv_object,
protect_data,
server_info,
device_id,
switch,
ir_on,
ir_off,
)
)
_LOGGER.debug("UNIFIPROTECT SWITCH CREATED: %s", switch)
async_add_entities(switches)
class UnifiProtectSwitch(UnifiProtectEntity, SwitchEntity):
"""A Unifi Protect Switch."""
def __init__(
self, upv_object, protect_data, server_info, device_id, switch, ir_on, ir_off
):
"""Initialize an Unifi Protect Switch."""
super().__init__(upv_object, protect_data, server_info, device_id, switch)
self.upv = upv_object
switch_type = SWITCH_TYPES[switch]
self._name = f"{switch_type[_SWITCH_NAME]} {self._device_data['name']}"
self._icon = f"mdi:{switch_type[_SWITCH_ICON]}"
self._ir_on_cmd = ir_on
self._ir_off_cmd = ir_off
self._switch_type = switch_type[_SWITCH_TYPE]
@property
def name(self):
"""Return the name of the device if any."""
return self._name
@property
def is_on(self):
"""Return true if device is on."""
if self._switch_type == "record_motion":
return self._device_data["recording_mode"] == TYPE_RECORD_MOTION
if self._switch_type == "record_always":
return self._device_data["recording_mode"] == TYPE_RECORD_ALWAYS
if self._switch_type == "record_smart":
return self._device_data["recording_mode"] == TYPE_RECORD_SMARTDETECT
if self._switch_type == "ir_mode":
return self._device_data["ir_mode"] == self._ir_on_cmd
if self._switch_type == "hdr_mode":
return self._device_data["hdr_mode"] is True
if self._switch_type == "high_fps":
return self._device_data["video_mode"] == TYPE_HIGH_FPS_ON
if self._switch_type == "light_motion":
return self._device_data["motion_mode"] == TYPE_RECORD_MOTION
if self._switch_type == "light_dark":
return self._device_data["motion_mode"] == TYPE_RECORD_ALWAYS
return self._device_data["status_light"] is True
@property
def icon(self):
"""Icon to use in the frontend, if any."""
return self._icon
@property
def device_state_attributes(self):
"""Return the device state attributes."""
return {
ATTR_ATTRIBUTION: DEFAULT_ATTRIBUTION,
ATTR_DEVICE_MODEL: self._model,
}
async def async_turn_on(self, **kwargs):
"""Turn the device on."""
if self._switch_type == "record_motion":
_LOGGER.debug("Turning on Motion Detection for %s", self._name)
await self.upv.set_camera_recording(self._device_id, TYPE_RECORD_MOTION)
elif self._switch_type == "record_always":
_LOGGER.debug("Turning on Constant Recording")
await self.upv.set_camera_recording(self._device_id, TYPE_RECORD_ALWAYS)
elif self._switch_type == "record_smart":
_LOGGER.debug("Turning on SmartDetect Recording")
await self.upv.set_camera_recording(
self._device_id, TYPE_RECORD_SMARTDETECT
)
elif self._switch_type == "ir_mode":
_LOGGER.debug("Turning on IR")
await self.upv.set_camera_ir(self._device_id, self._ir_on_cmd)
elif self._switch_type == "hdr_mode":
_LOGGER.debug("Turning on HDR mode")
await self.upv.set_camera_hdr_mode(self._device_id, True)
elif self._switch_type == "high_fps":
_LOGGER.debug("Turning on High FPS mode")
await self.upv.set_camera_video_mode_highfps(self._device_id, True)
elif self._switch_type == "light_motion":
_LOGGER.debug("Turning on Light Motion detection")
await self.upv.light_settings(
self._device_id, TYPE_RECORD_MOTION, enable_at="fulltime"
)
elif self._switch_type == "light_dark":
_LOGGER.debug("Turning on Light Motion when Dark")
await self.upv.light_settings(
self._device_id, TYPE_RECORD_ALWAYS, enable_at="dark"
)
else:
_LOGGER.debug("Changing Status Light to On")
await self.upv.set_device_status_light(
self._device_id, True, self._device_type
)
await self.protect_data.async_refresh(force_camera_update=True)
async def async_turn_off(self, **kwargs):
"""Turn the device off."""
if self._switch_type == "ir_mode":
_LOGGER.debug("Turning off IR")
await self.upv.set_camera_ir(self._device_id, self._ir_off_cmd)
elif self._switch_type == "status_light":
_LOGGER.debug("Changing Status Light to Off")
await self.upv.set_device_status_light(
self._device_id, False, self._device_type
)
elif self._switch_type == "hdr_mode":
_LOGGER.debug("Turning off HDR mode")
await self.upv.set_camera_hdr_mode(self._device_id, False)
elif self._switch_type == "high_fps":
_LOGGER.debug("Turning off High FPS mode")
await self.upv.set_camera_video_mode_highfps(self._device_id, False)
elif self._switch_type == "light_motion":
_LOGGER.debug("Turning off Light Motion detection")
await self.upv.light_settings(self._device_id, TYPE_RECORD_OFF)
elif self._switch_type == "light_dark":
_LOGGER.debug("Turning off Light Motion when Dark")
await self.upv.light_settings(self._device_id, TYPE_RECORD_OFF)
else:
_LOGGER.debug("Turning off Recording")
await self.upv.set_camera_recording(self._device_id, TYPE_RECORD_NEVER)
await self.protect_data.async_refresh(force_camera_update=True)
|
py | 1a31871d32fe5efa1aeb88d8a7d5b77903f08a2e | import urllib
from calcrepo import info
from calcrepo import repo
name = "ticalc"
url = "http://www.ticalc.org/"
enabled = True
class TicalcRepository(repo.CalcRepository):
def formatDownloadUrl(self, url):
return "http://www.ticalc.org" + url
def updateRepoIndexes(self, verbose=False):
self.printd("Reading ticalc.org master index (this will take some time).")
# First read in the text (the only network process involved)
masterIndex = urllib.urlopen('http://www.ticalc.org/pub/master.index').read()
self.printd(" Read in ticalc.org master index.")
# Delete and open new indices
files = self.openIndex(self.index.fileIndex, "files index")
names = self.openIndex(self.index.nameIndex, "names index")
if files is None or names is None:
try:
files.close()
except:
return
# Now, parse the enormous data and write index files
self.printd(" ")
masterIndex = masterIndex[39:]
directory = ""
while len(masterIndex) > 2:
line = masterIndex[:masterIndex.find('\n')]
masterIndex = masterIndex[masterIndex.find('\n') + 1:]
if line == "":
continue
if line[:9] == "Index of ":
dirData = line[9:]
directory = dirData[:dirData.find(" ")]
if verbose:
self.printd(" Caching " + line[9:])
else:
fileData = line[:line.find(" ")]
files.write(directory + '/' + fileData + '\n')
nameData = line[len(fileData)+1:].lstrip()
names.write(nameData + '\n')
# Close the indexes now
files.close()
names.close()
self.printd("Finished updating ticalc.org repo.\n")
def getFileInfo(self, fileUrl, fileName):
#Get the category path for the file
categoryPath = "http://www.ticalc.org/"
splitUrls = fileUrl.split('/')
for splitUrl in splitUrls:
if splitUrl != "" and (not "." in splitUrl):
categoryPath += splitUrl + '/'
#Now open the category page and extract the URL for the file info page
categoryPage = urllib.urlopen(categoryPath, "")
categoryData = categoryPage.read()
categoryPage.close()
index = categoryData.find(fileUrl) - 7
rIndex = categoryData.rfind('A HREF="', 0, index)
infoUrl = categoryData[rIndex + 9:]
infoUrl = "http://www.ticalc.org/" + infoUrl[:infoUrl.find('">')]
#Create a file info object
fileInfo = info.FileInfo(fileUrl, fileName, infoUrl, self.output)
infoPage = urllib.urlopen(infoUrl)
infoText = infoPage.read()
infoPage.close()
#Fill in all the data bits
fileInfo.description = self.getBaseFileData(infoText, "Description")
fileInfo.fileSize = self.getBaseFileData(infoText, "File Size")
fileInfo.fileDate = self.getBaseFileData(infoText, "File Date and Time", 47, 2)
fileInfo.documentation = self.getBaseFileData(infoText, "Documentation Included?")
fileInfo.sourceCode = self.getBaseFileData(infoText, "Source Code")
fileInfo.category = self.getFileCategory(infoText)
fileInfo.author = self.getFileAuthor(infoText)
fileInfo.downloads = self.getNumDownloads(infoText)
fileInfo.repository = self.name
#Print the file info object
fileInfo.printData(self.output)
return fileInfo
def getBaseFileData(self, fileInfo, data, index1 = 47, index2 = 1):
"""Function to initialize the simple data for file info"""
result = fileInfo[fileInfo.find(data):]
result = result[result.find("<FONT ") + index1:]
result = result[:result.find("</FONT>") - index2]
return result
def getFileCategory(self, fileInfo):
"""Function to get the file category for file info"""
category = fileInfo[fileInfo.find("Category"):]
category = category[category.find("<FONT ") + 47:]
category = category[category.find('">') + 2:]
category = category[:category.find("</A></B>") - 0]
return category
def getFileAuthor(self, fileInfo):
"""Function to get the file's author for file info, note that we are pretending that multiple authors do not exist here"""
author = fileInfo[fileInfo.find("Author"):]
author = author[author.find("<FONT ") + 47:]
author = author[author.find('<B>') + 3:]
authormail = author[author.find("mailto:") + 7:]
authormail = authormail[:authormail.find('"')]
author = author[:author.find("</B></A>") - 0]
author = author + " (" + authormail + ")"
return author
def getNumDownloads(self, fileInfo):
"""Function to get the number of times a file has been downloaded"""
downloads = fileInfo[fileInfo.find("FILE INFORMATION"):]
if -1 != fileInfo.find("not included in ranking"):
return "0"
downloads = downloads[:downloads.find(".<BR>")]
downloads = downloads[downloads.find("</A> with ") + len("</A> with "):]
return downloads
def getRepository():
"""Returns the relevant CalcRepository object for this repo file"""
global name, url
return TicalcRepository(name, url)
|
py | 1a3187afd37703a7dc62fae6dde36d78cf70018a | # Copyright (c) 2015-2021 Vector 35 Inc
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import ctypes
import json
from datetime import datetime
# Binary Ninja components
import binaryninja
from binaryninja import _binaryninjacore as core
from binaryninja.enums import PluginType, PluginStatus
# 2-3 compatibility
from binaryninja import range
class RepoPlugin(object):
"""
``RepoPlugin`` is mostly read-only, however you can install/uninstall enable/disable plugins. RepoPlugins are
created by parsing the plugins.json in a plugin repository.
"""
def __init__(self, handle):
self.handle = core.handle_of_type(handle, core.BNRepoPlugin)
def __del__(self):
core.BNFreePlugin(self.handle)
def __repr__(self):
return "<{} {}/{}>".format(self.path, "installed" if self.installed else "not-installed", "enabled" if self.enabled else "disabled")
@property
def path(self):
"""Relative path from the base of the repository to the actual plugin"""
return core.BNPluginGetPath(self.handle)
@property
def dependencies(self):
"""Dependencies required for installing this plugin"""
return core.BNPluginGetDependencies(self.handle)
@property
def installed(self):
"""Boolean True if the plugin is installed, False otherwise"""
return core.BNPluginIsInstalled(self.handle)
def install(self):
"""Attempt to install the given plugin"""
self.install_dependencies()
return core.BNPluginInstall(self.handle)
def uninstall(self):
"""Attempt to uninstall the given plugin"""
return core.BNPluginUninstall(self.handle)
@installed.setter
def installed(self, state):
if state:
self.install_dependencies()
return core.BNPluginInstall(self.handle)
else:
return core.BNPluginUninstall(self.handle)
def install_dependencies(self):
return core.BNPluginInstallDependencies(self.handle)
@property
def enabled(self):
"""Boolean True if the plugin is currently enabled, False otherwise"""
return core.BNPluginIsEnabled(self.handle)
@enabled.setter
def enabled(self, state):
if state:
return core.BNPluginEnable(self.handle, False)
else:
return core.BNPluginDisable(self.handle)
def enable(self, force=False):
"""
Enable this plugin, optionally trying to force it. \
Force loading a plugin with ignore platform and api constraints. \
(e.g. The plugin author says the plugin will only work on Linux-python3 but you'd like to \
attempt to load it on Macos-python2)
"""
return core.BNPluginEnable(self.handle, force)
@property
def api(self):
"""String indicating the API used by the plugin"""
result = []
count = ctypes.c_ulonglong(0)
platforms = core.BNPluginGetApis(self.handle, count)
for i in range(count.value):
result.append(platforms[i].decode("utf-8"))
core.BNFreePluginPlatforms(platforms, count)
return result
@property
def description(self):
"""String short description of the plugin"""
return core.BNPluginGetDescription(self.handle)
@property
def license(self):
"""String short license description (ie MIT, BSD, GPLv2, etc)"""
return core.BNPluginGetLicense(self.handle)
@property
def license_text(self):
"""String complete license text for the given plugin"""
return core.BNPluginGetLicenseText(self.handle)
@property
def long_description(self):
"""String long description of the plugin"""
return core.BNPluginGetLongdescription(self.handle)
@property
def minimum_version(self):
"""String minimum version the plugin was tested on"""
return core.BNPluginGetMinimumVersion(self.handle)
@property
def name(self):
"""String name of the plugin"""
return core.BNPluginGetName(self.handle)
@property
def plugin_types(self):
"""List of PluginType enumeration objects indicating the plugin type(s)"""
result = []
count = ctypes.c_ulonglong(0)
plugintypes = core.BNPluginGetPluginTypes(self.handle, count)
for i in range(count.value):
result.append(PluginType(plugintypes[i]))
core.BNFreePluginTypes(plugintypes)
return result
@property
def project_url(self):
"""String URL of the plugin's git repository"""
return core.BNPluginGetProjectUrl(self.handle)
@property
def package_url(self):
"""String URL of the plugin's zip file"""
return core.BNPluginGetPackageUrl(self.handle)
@property
def author_url(self):
"""String URL of the plugin author's url"""
return core.BNPluginGetAuthorUrl(self.handle)
@property
def author(self):
"""String of the plugin author"""
return core.BNPluginGetAuthor(self.handle)
@property
def version(self):
"""String version of the plugin"""
return core.BNPluginGetVersion(self.handle)
def install_instructions(self, platform):
"""
Installation instructions for the given platform
:param str platform: One of the valid platforms "Windows", "Linux", "Darwin"
:return: String of the installation instructions for the provided platform
:rtype: str
"""
return core.BNPluginGetInstallInstructions(self.handle, platform)
@property
def install_platforms(self):
"""List of platforms this plugin can execute on"""
result = []
count = ctypes.c_ulonglong(0)
platforms = core.BNPluginGetPlatforms(self.handle, count)
for i in range(count.value):
result.append(platforms[i].decode("utf-8"))
core.BNFreePluginPlatforms(platforms, count)
return result
@property
def being_deleted(self):
"""Boolean status indicating that the plugin is being deleted"""
return core.BNPluginIsBeingDeleted(self.handle)
@property
def being_updated(self):
"""Boolean status indicating that the plugin is being updated"""
return core.BNPluginIsBeingUpdated(self.handle)
@property
def running(self):
"""Boolean status indicating that the plugin is currently running"""
return core.BNPluginIsRunning(self.handle)
@property
def update_pending(self):
"""Boolean status indicating that the plugin has updates will be installed after the next restart"""
return core.BNPluginIsUpdatePending(self.handle)
@property
def disable_pending(self):
"""Boolean status indicating that the plugin will be disabled after the next restart"""
return core.BNPluginIsDisablePending(self.handle)
@property
def delete_pending(self):
"""Boolean status indicating that the plugin will be deleted after the next restart"""
return core.BNPluginIsDeletePending(self.handle)
@property
def update_available(self):
"""Boolean status indicating that the plugin has updates available"""
return core.BNPluginIsUpdateAvailable(self.handle)
@property
def project_data(self):
"""Gets a json object of the project data field"""
return json.loads(core.BNPluginGetProjectData(self.handle))
@property
def last_update(self):
"""Returns a datetime object representing the plugins last update"""
return datetime.fromtimestamp(core.BNPluginGetLastUpdate(self.handle))
class Repository(object):
"""
``Repository`` is a read-only class. Use RepositoryManager to Enable/Disable/Install/Uninstall plugins.
"""
def __init__(self, handle):
self.handle = core.handle_of_type(handle, core.BNRepository)
def __del__(self):
core.BNFreeRepository(self.handle)
def __repr__(self):
return "<{}>".format(self.path)
def __getitem__(self, plugin_path):
for plugin in self.plugins:
if plugin_path == plugin.path:
return plugin
raise KeyError()
@property
def url(self):
"""String URL of the git repository where the plugin repository's are stored"""
return core.BNRepositoryGetUrl(self.handle)
@property
def path(self):
"""String local path to store the given plugin repository"""
return core.BNRepositoryGetRepoPath(self.handle)
@property
def full_path(self):
"""String full path the repository"""
return core.BNRepositoryGetPluginsPath(self.handle)
@property
def plugins(self):
"""List of RepoPlugin objects contained within this repository"""
pluginlist = []
count = ctypes.c_ulonglong(0)
result = core.BNRepositoryGetPlugins(self.handle, count)
for i in range(count.value):
pluginlist.append(RepoPlugin(core.BNNewPluginReference(result[i])))
core.BNFreeRepositoryPluginList(result, count.value)
del result
return pluginlist
class RepositoryManager(object):
"""
``RepositoryManager`` Keeps track of all the repositories and keeps the enabled_plugins.json file coherent with
the plugins that are installed/uninstalled enabled/disabled
"""
def __init__(self, handle=None):
self.handle = core.BNGetRepositoryManager()
def __getitem__(self, repo_path):
for repo in self.repositories:
if repo_path == repo.path:
return repo
raise KeyError()
def check_for_updates(self):
"""Check for updates for all managed Repository objects"""
return core.BNRepositoryManagerCheckForUpdates(self.handle)
@property
def repositories(self):
"""List of Repository objects being managed"""
result = []
count = ctypes.c_ulonglong(0)
repos = core.BNRepositoryManagerGetRepositories(self.handle, count)
for i in range(count.value):
result.append(Repository(core.BNNewRepositoryReference(repos[i])))
core.BNFreeRepositoryManagerRepositoriesList(repos)
return result
@property
def plugins(self):
"""List of all RepoPlugins in each repository"""
plugin_list = {}
for repo in self.repositories:
plugin_list[repo.path] = repo.plugins
return plugin_list
@property
def default_repository(self):
"""Gets the default Repository"""
binaryninja._init_plugins()
return Repository(core.BNNewRepositoryReference(core.BNRepositoryManagerGetDefaultRepository(self.handle)))
def add_repository(self, url=None, repopath=None):
"""
``add_repository`` adds a new plugin repository for the manager to track.
:param str url: URL to the plugins.json containing the records for this repository
:param str repopath: path to where the repository will be stored on disk locally
:return: Boolean value True if the repository was successfully added, False otherwise.
:rtype: Boolean
:Example:
>>> mgr = RepositoryManager()
>>> mgr.add_repository("https://raw.githubusercontent.com/Vector35/community-plugins/master/plugins.json", "community")
True
>>>
"""
if not isinstance(url, str) or not isinstance(repopath, str):
raise ValueError("Parameter is incorrect type")
return core.BNRepositoryManagerAddRepository(self.handle, url, repopath)
|
py | 1a3188ba6a2861341bdfdebbcbb233aaad74defd | import sys
input = sys.stdin.readline
sys.setrecursionlimit(10 ** 7)
n = int(input())
a = [0] + list(map(int, input().split())) + [0]
diff = [0] * (n + 1)
for i in range(n + 1):
diff[i] = abs(a[i+1] - a[i])
ans = sum(diff)
for i in range(1, n + 1):
print(ans - (diff[i-1] + diff[i]) + (abs(a[i+1] - a[i-1])))
|
py | 1a3188e89293fb68c7ad98e8bc98a554e8991c01 | from django.test import TestCase
from utils.forms import TagForm
class TagTest(TestCase):
def test_tag_form(self):
test = TagForm(data={"name": "Test", "slug": "test", "color": "000000"})
self.assertTrue(test.is_valid())
self.assertTrue(test.save())
|
py | 1a3189797b55f73cb7909a42345cc95c4fc1e050 | import _plotly_utils.basevalidators
class DtickrangeValidator(_plotly_utils.basevalidators.InfoArrayValidator):
def __init__(
self,
plotly_name="dtickrange",
parent_name="scatter3d.marker.colorbar.tickformatstop",
**kwargs
):
super(DtickrangeValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
items=kwargs.pop(
"items",
[
{"valType": "any", "editType": "calc"},
{"valType": "any", "editType": "calc"},
],
),
role=kwargs.pop("role", "info"),
**kwargs
)
|
py | 1a318b10a44b28feaf6e224496a54b0059410c81 | # Copyright (c) 2015, Dataent Technologies Pvt. Ltd. and Contributors
# See license.txt
from __future__ import unicode_literals
import dataent
import dataent.share
import unittest
class TestDocShare(unittest.TestCase):
def setUp(self):
self.user = "[email protected]"
self.event = dataent.get_doc({"doctype": "Event",
"subject": "test share event",
"starts_on": "2015-01-01 10:00:00",
"event_type": "Private"}).insert()
def tearDown(self):
dataent.set_user("Administrator")
self.event.delete()
def test_add(self):
# user not shared
self.assertTrue(self.event.name not in dataent.share.get_shared("Event", self.user))
dataent.share.add("Event", self.event.name, self.user)
self.assertTrue(self.event.name in dataent.share.get_shared("Event", self.user))
def test_doc_permission(self):
dataent.set_user(self.user)
self.assertFalse(self.event.has_permission())
dataent.set_user("Administrator")
dataent.share.add("Event", self.event.name, self.user)
dataent.set_user(self.user)
self.assertTrue(self.event.has_permission())
def test_share_permission(self):
dataent.share.add("Event", self.event.name, self.user, write=1, share=1)
dataent.set_user(self.user)
self.assertTrue(self.event.has_permission("share"))
# test cascade
self.assertTrue(self.event.has_permission("read"))
self.assertTrue(self.event.has_permission("write"))
def test_set_permission(self):
dataent.share.add("Event", self.event.name, self.user)
dataent.set_user(self.user)
self.assertFalse(self.event.has_permission("share"))
dataent.set_user("Administrator")
dataent.share.set_permission("Event", self.event.name, self.user, "share")
dataent.set_user(self.user)
self.assertTrue(self.event.has_permission("share"))
def test_permission_to_share(self):
dataent.set_user(self.user)
self.assertRaises(dataent.PermissionError, dataent.share.add, "Event", self.event.name, self.user)
dataent.set_user("Administrator")
dataent.share.add("Event", self.event.name, self.user, write=1, share=1)
# test not raises
dataent.set_user(self.user)
dataent.share.add("Event", self.event.name, "[email protected]", write=1, share=1)
def test_remove_share(self):
dataent.share.add("Event", self.event.name, self.user, write=1, share=1)
dataent.set_user(self.user)
self.assertTrue(self.event.has_permission("share"))
dataent.set_user("Administrator")
dataent.share.remove("Event", self.event.name, self.user)
dataent.set_user(self.user)
self.assertFalse(self.event.has_permission("share"))
def test_share_with_everyone(self):
self.assertTrue(self.event.name not in dataent.share.get_shared("Event", self.user))
dataent.share.set_permission("Event", self.event.name, None, "read", everyone=1)
self.assertTrue(self.event.name in dataent.share.get_shared("Event", self.user))
self.assertTrue(self.event.name in dataent.share.get_shared("Event", "[email protected]"))
self.assertTrue(self.event.name not in dataent.share.get_shared("Event", "Guest"))
dataent.share.set_permission("Event", self.event.name, None, "read", value=0, everyone=1)
self.assertTrue(self.event.name not in dataent.share.get_shared("Event", self.user))
self.assertTrue(self.event.name not in dataent.share.get_shared("Event", "[email protected]"))
self.assertTrue(self.event.name not in dataent.share.get_shared("Event", "Guest"))
|
py | 1a318b2578ae3b68288baa429f4778bde0ffe2bb | # -*- coding: utf-8 -*-
#########################################################################
# Copyright (C) 2011 Cameron Franc and Marc Masdeu
#
# Distributed under the terms of the GNU General Public License (GPL)
#
# http://www.gnu.org/licenses/
#########################################################################
r"""
Spaces of `p`-adic automorphic forms
Compute with harmonic cocycles and `p`-adic automorphic forms, including
overconvergent `p`-adic automorphic forms.
For a discussion of nearly rigid analytic modular forms and
the rigid analytic Shimura-Maass operator, see [F]_. It is worth also
looking at [FM]_ for information on how these are implemented in this code.
EXAMPLES:
Create a quotient of the Bruhat-Tits tree::
sage: X = BruhatTitsQuotient(13,11)
Declare the corresponding space of harmonic cocycles::
sage: H = X.harmonic_cocycles(2,prec=5)
And the space of `p`-adic automorphic forms::
sage: A = X.padic_automorphic_forms(2,prec=5,overconvergent=True)
Harmonic cocycles, unlike `p`-adic automorphic forms, can be used to compute a basis::
sage: a = H.gen(0)
This can then be lifted to an overconvergent `p`-adic modular form::
sage: A.lift(a) # long time
p-adic automorphic form of cohomological weight 0
REFERENCES:
.. [F] Nearly rigid analytic modular forms and their values at CM points
Cameron Franc
Ph.D. thesis, McGill University, 2011.
"""
from __future__ import print_function
from builtins import zip
from sage.modular.btquotients.btquotient import DoubleCosetReduction
from sage.structure.unique_representation import UniqueRepresentation
from sage.structure.richcmp import op_EQ, op_NE
from sage.matrix.matrix_space import MatrixSpace
from sage.structure.element import ModuleElement
from sage.modules.module import Module
from sage.rings.all import Integer
from sage.matrix.constructor import Matrix, zero_matrix
from sage.rings.all import Qp, QQ, ZZ
from copy import copy
from sage.rings.polynomial.polynomial_ring_constructor import PolynomialRing
from sage.rings.laurent_series_ring import LaurentSeriesRing
from sage.modular.hecke.all import (AmbientHeckeModule, HeckeModuleElement)
from sage.rings.infinity import Infinity
import sage.modular.hecke.hecke_operator
from sage.misc.misc import verbose
from sage.rings.real_mpfr import RR
from sage.modular.pollack_stevens.sigma0 import Sigma0ActionAdjuster
from sage.modular.pollack_stevens.distributions import OverconvergentDistributions, Symk
# Need this to be pickleable
class _btquot_adjuster(Sigma0ActionAdjuster):
"""
Callable object that turns matrices into 4-tuples.
Since the modular symbol and harmonic cocycle code use different
conventions for group actions, this function is used to make sure
that actions are correct for harmonic cocycle computations.
EXAMPLES::
sage: from sage.modular.btquotients.pautomorphicform import _btquot_adjuster
sage: adj = _btquot_adjuster()
sage: adj(matrix(ZZ,2,2,[1..4]))
(4, 2, 3, 1)
"""
def __call__(self, g):
"""
Turn matrices into 4-tuples.
INPUT:
- ``g`` - a 2x2 matrix
OUTPUT:
A 4-tuple encoding the entries of ``g``.
EXAMPLES::
sage: from sage.modular.btquotients.pautomorphicform import _btquot_adjuster
sage: adj = _btquot_adjuster()
sage: adj(matrix(ZZ,2,2,[0, 1, 2, 3]))
(3, 1, 2, 0)
"""
a, b, c, d = g.list()
return (d, b, c, a)
def eval_dist_at_powseries(phi, f):
"""
Evaluate a distribution on a powerseries.
A distribution is an element in the dual of the Tate ring. The
elements of coefficient modules of overconvergent modular symbols
and overconvergent `p`-adic automorphic forms give examples of
distributions in Sage.
INPUT:
- ``phi`` - a distribution
- ``f`` - a power series over a ring coercible into a `p`-adic field
OUTPUT:
The value of ``phi`` evaluated at ``f``, which will be an element in the
ring of definition of ``f``
EXAMPLES::
sage: from sage.modular.btquotients.pautomorphicform import eval_dist_at_powseries
sage: R.<X> = PowerSeriesRing(ZZ,10)
sage: f = (1 - 7*X)^(-1)
sage: D = OverconvergentDistributions(0,7,10)
sage: phi = D(list(range(1,11)))
sage: eval_dist_at_powseries(phi,f)
1 + 2*7 + 3*7^2 + 4*7^3 + 5*7^4 + 6*7^5 + 2*7^7 + 3*7^8 + 4*7^9 + O(7^10)
"""
nmoments = phi.parent().precision_cap()
K = f.parent().base_ring()
if K.is_exact():
K = phi.parent().base_ring()
return sum(a * K(phi.moment(i))
for a, i in zip(f.coefficients(), f.exponents())
if i >= 0 and i < nmoments)
class BruhatTitsHarmonicCocycleElement(HeckeModuleElement):
r"""
`\Gamma`-invariant harmonic cocycles on the Bruhat-Tits
tree. `\Gamma`-invariance is necessary so that the cocycle can be
stored in terms of a finite amount of data.
More precisely, given a ``BruhatTitsQuotient`` `T`, harmonic cocycles are stored as
a list of values in some coefficient module (e.g. for weight 2 forms
can take `\CC_p`) indexed by edges of a fundamental domain for `T` in the
Bruhat-Tits tree. Evaluate the cocycle at other edges using Gamma
invariance (although the values may not be equal over an orbit of
edges as the coefficient module action may be nontrivial).
EXAMPLES:
Harmonic cocycles form a vector space, so they can be added and/or
subtracted from each other::
sage: X = BruhatTitsQuotient(5,23)
sage: H = X.harmonic_cocycles(2,prec=10)
sage: v1 = H.basis()[0]; v2 = H.basis()[1] # indirect doctest
sage: v3 = v1+v2
sage: v1 == v3-v2
True
and rescaled::
sage: v4 = 2*v1
sage: v1 == v4 - v1
True
AUTHORS:
- Cameron Franc (2012-02-20)
- Marc Masdeu
"""
def __init__(self, _parent, vec):
"""
Create a harmonic cocycle element.
INPUT:
- ``_parent`` : the parent space of harmonic cocycles.
- ``vec`` : a list of elements in the coefficient module.
EXAMPLES::
sage: X = BruhatTitsQuotient(31,7)
sage: H = X.harmonic_cocycles(2,prec=10)
sage: v = H.basis()[0] # indirect doctest
sage: TestSuite(v).run()
"""
HeckeModuleElement.__init__(self, _parent, None)
self._parent = _parent
assert type(vec) is list
assert all(v.parent() is _parent._U for v in vec)
self._R = _parent._U.base_ring()
self._wt = _parent._k
self._nE = len(_parent._E)
self._F = copy(vec)
def _add_(self, g):
r"""
Add two cocycles componentwise.
INPUT:
- ``g`` - a harmonic cocycle
OUTPUT:
A harmonic cocycle
EXAMPLES::
sage: X = BruhatTitsQuotient(5,23)
sage: H = X.harmonic_cocycles(2,prec=10)
sage: v1 = H.basis()[0]; v2 = H.basis()[1]
sage: v3 = v1+v2 # indirect doctest
sage: v1 == v3-v2
True
"""
return self.parent()(self.element() + g.element())
def _sub_(self, g):
r"""
Compute the difference of two cocycles.
INPUT:
- ``g`` - a harmonic cocycle
OUTPUT:
A harmonic cocycle
EXAMPLES::
sage: X = BruhatTitsQuotient(5,11)
sage: H = X.harmonic_cocycles(2,prec=10)
sage: v1 = H.basis()[0]; v2 = H.basis()[1]
sage: v3 = v1-v2 # indirect doctest
sage: v1 == v3+v2
True
"""
# Should ensure that self and g are modular forms of the same
# weight and on the same curve
return self.parent()(self.element() - g.element())
def _lmul_(self, a):
r"""
Multiply a cocycle by a scalar.
INPUT:
- ``a`` - a ring element
OUTPUT:
A harmonic cocycle
EXAMPLES::
sage: X = BruhatTitsQuotient(3,23)
sage: H = X.harmonic_cocycles(2,prec=10)
sage: v1 = H.basis()[0]
sage: v2 = 2*v1 # indirect doctest
sage: v1 == v2-v1
True
"""
# Should ensure that 'a' is a scalar
return self.parent()(a * self.element())
def _richcmp_(self, other, op):
r"""
General comparison method for ``HarmonicCocycles``
INPUT:
- ``other`` - Another harmonic cocycle
EXAMPLES::
sage: X = BruhatTitsQuotient(11,23)
sage: H = X.harmonic_cocycles(2,prec=10)
sage: v1 = H.basis()[0]
sage: v2 = 3*v1 # indirect doctest
sage: 2*v1 == v2-v1
True
"""
if op not in [op_EQ, op_NE]:
return NotImplemented
b = all(self._F[e] == other._F[e] for e in range(self._nE))
if op == op_EQ:
return b
return not b
def _repr_(self):
r"""
Return a string describing the cocycle.
EXAMPLES::
sage: X = BruhatTitsQuotient(5,13)
sage: H = X.harmonic_cocycles(2,prec=10)
sage: H.basis()[0] # indirect doctest
Harmonic cocycle with values in Sym^0 Q_5^2
"""
return 'Harmonic cocycle with values in %s' % self.parent()._U
def monomial_coefficients(self):
r"""
Void method to comply with pickling.
EXAMPLES::
sage: M = BruhatTitsQuotient(3,5).harmonic_cocycles(2,prec=10)
sage: M.monomial_coefficients()
{}
"""
return {}
def print_values(self):
r"""
Print the values of the cocycle on all of the edges.
EXAMPLES::
sage: X = BruhatTitsQuotient(5,23)
sage: H = X.harmonic_cocycles(2,prec=10)
sage: H.basis()[0].print_values()
0 |1 + O(5^10)
1 |0
2 |0
3 |4 + 4*5 + 4*5^2 + 4*5^3 + 4*5^4 + 4*5^5 + 4*5^6 + 4*5^7 + 4*5^8 + 4*5^9 + O(5^10)
4 |0
5 |0
6 |0
7 |0
8 |0
9 |0
10 |0
11 |0
"""
tmp = ''
for e in range(self._nE):
tmp += str(e) + '\t|'+ str(self._F[e]) + '\n'
print (tmp[:-1])
def valuation(self):
r"""
Return the valuation of the cocycle, defined as the
minimum of the values it takes on a set of representatives.
OUTPUT:
An integer.
EXAMPLES::
sage: X = BruhatTitsQuotient(3,17)
sage: H = X.harmonic_cocycles(2,prec=10)
sage: b1 = H.basis()[0]
sage: b2 = 3*b1
sage: b1.valuation()
0
sage: b2.valuation()
1
sage: H(0).valuation()
+Infinity
"""
if self == 0:
return Infinity
else:
return min(self._F[e].valuation() for e in range(self._nE))
def _compute_element(self):
r"""
Express a harmonic cocycle in a coordinate vector.
OUTPUT:
A coordinate vector encoding ``self`` in terms of the ambient
basis in ``self.parent``
EXAMPLES::
sage: X = BruhatTitsQuotient(3,17)
sage: H = X.harmonic_cocycles(2,prec=10)
sage: H.basis()[0]._compute_element()
(1 + O(3^9), O(3^9), 0)
sage: H.basis()[1]._compute_element()
(0, 1 + O(3^9), 0)
sage: H.basis()[2]._compute_element()
(0, O(3^9), 1 + O(3^10))
"""
R = self._R
A = self.parent().basis_matrix().transpose()
B = Matrix(R, self._nE * (self.parent()._k - 1), 1,
[self._F[e].moment(ii) for e in range(self._nE)
for ii in range(self.parent()._k - 1)])
try:
res = (A.solve_right(B)).transpose()
except ValueError:
rest = (A.transpose() * A).solve_right(A.transpose() * B)
err = A * rest - B
if err != 0:
try:
if hasattr(err.parent().base_ring().an_element(),
'valuation'):
minval = min([o.valuation() for o in err.list()
if o != 0])
else:
minval = sum([RR(o.norm() ** 2) for o in err.list()])
verbose('Error = %s' % minval)
except AttributeError:
verbose('Warning: something did not work in the '
'computation')
res = rest.transpose()
return self.parent().free_module()(res.row(0))
#In BruhatTitsHarmonicCocycle
def evaluate(self, e1):
r"""
Evaluate a harmonic cocycle on an edge of the Bruhat-Tits tree.
INPUT:
- ``e1`` - a matrix corresponding to an edge of the
Bruhat-Tits tree
OUTPUT:
- An element of the coefficient module of the cocycle which
describes the value of the cocycle on ``e1``
EXAMPLES::
sage: X = BruhatTitsQuotient(5,17)
sage: e0 = X.get_edge_list()[0]
sage: e1 = X.get_edge_list()[1]
sage: H = X.harmonic_cocycles(2,prec=10)
sage: b = H.basis()[0]
sage: b.evaluate(e0.rep)
1 + O(5^10)
sage: b.evaluate(e1.rep)
4 + 4*5 + 4*5^2 + 4*5^3 + 4*5^4 + 4*5^5 + 4*5^6 + 4*5^7 + 4*5^8 + 4*5^9 + O(5^10)
"""
X = self.parent()._X
p = X._p
u = DoubleCosetReduction(X, e1)
if u.label < self._nE:
val = self._F[u.label]
else:
val = -self._F[u.label - self._nE]
return u.igamma(self.parent().embed_quaternion, scale=p ** (-u.power)) * val
#In BruhatTitsHarmonicCocycle
def riemann_sum(self, f, center=1, level=0, E=None):
r"""
Evaluate the integral of the function ``f`` with respect
to the measure determined by ``self`` over `\mathbf{P}^1(\QQ_p)`.
INPUT:
- ``f`` - a function on `\mathbf{P}^1(\QQ_p)`.
- ``center`` - An integer (default = 1). Center of integration.
- ``level`` - An integer (default = 0). Determines the size of
the covering when computing the Riemann sum. Runtime is
exponential in the level.
- ``E`` - A list of edges (default = None). They should describe
a covering of `\mathbf{P}^1(\QQ_p)`.
OUTPUT:
A `p`-adic number.
EXAMPLES::
sage: X = BruhatTitsQuotient(5,7)
sage: H = X.harmonic_cocycles(2,prec=10)
sage: b = H.basis()[0]
sage: R.<z> = PolynomialRing(QQ,1)
sage: f = z^2
Note that `f` has a pole at infinity, so that the result will
be meaningless::
sage: b.riemann_sum(f,level=0)
1 + 5 + 2*5^3 + 4*5^4 + 2*5^5 + 3*5^6 + 3*5^7 + 2*5^8 + 4*5^9 + O(5^10)
"""
R1 = LaurentSeriesRing(f.base_ring(), 'r1')
if E is None:
E = self.parent()._X._BT.get_balls(center, level)
else:
E = self.parent()._X._BT.subdivide(E, level)
value = 0
ii = 0
for e in E:
ii += 1
expansion = ((R1([e[1, 1], e[1, 0]]) ** (self.parent()._k - 2) * e.determinant() ** (-(self.parent()._k - 2) / 2)) * f(R1([e[0, 1], e[0, 0]]) / R1([e[1, 1], e[1, 0]]))).truncate(self.parent()._k - 1)
dist = self.parent()._Sigma0(e.inverse(), check=False) * self.evaluate(e)
value += eval_dist_at_powseries(dist, expansion)
return value
def modular_form(self, z=None, level=0):
r"""
Integrate Teitelbaum's `p`-adic Poisson kernel against
the measure corresponding to ``self`` to evaluate the associated
modular form at ``z``.
If ``z`` = None, a function is returned that encodes the modular form.
.. NOTE::
This function uses the integration method of Riemann
summation and is incredibly slow! It should only be used for
testing and bug-finding. Overconvergent methods are quicker.
INPUT:
- ``z`` - an element in the quadratic unramified extension of
`\QQ_p` that is not contained in `\QQ_p` (default = None).
- ``level`` - an integer. How fine of a mesh should the Riemann
sum use.
OUTPUT:
An element of the quadratic unramified extension of `\QQ_p`.
EXAMPLES::
sage: X = BruhatTitsQuotient(3,23)
sage: H = X.harmonic_cocycles(2,prec = 8)
sage: b = H.basis()[0]
sage: R.<a> = Qq(9,prec=10)
sage: x1 = b.modular_form(a,level = 0); x1
a + (2*a + 1)*3 + (a + 1)*3^2 + (a + 1)*3^3 + 3^4 + (a + 2)*3^5 + O(3^7)
sage: x2 = b.modular_form(a,level = 1); x2
a + (a + 2)*3 + (2*a + 1)*3^3 + (2*a + 1)*3^4 + 3^5 + (a + 2)*3^6 + O(3^7)
sage: x3 = b.modular_form(a,level = 2); x3
a + (a + 2)*3 + (2*a + 2)*3^2 + 2*a*3^4 + (a + 1)*3^5 + 3^6 + O(3^7)
sage: x4 = b.modular_form(a,level = 3);x4
a + (a + 2)*3 + (2*a + 2)*3^2 + (2*a + 2)*3^3 + 2*a*3^5 + a*3^6 + O(3^7)
sage: (x4-x3).valuation()
3
TESTS:
Check that :trac:`22634` is fixed::
sage: X = BruhatTitsQuotient(7,2)
sage: H = X.harmonic_cocycles(4,20)
sage: f0, g0 = H.basis()
sage: A = X.padic_automorphic_forms(4,20,overconvergent=True)
sage: f = A.lift(f0).modular_form(method='moments')
sage: T.<x> = Qq(7^2,20)
sage: a,b,c,d = X.embed_quaternion(X.get_units_of_order()[1]).change_ring(Qp(7,20)).list()
sage: (c*x + d)^4 * f(x) == f((a*x + b)/(c*x + d))
True
sage: g = A.lift(g0).modular_form(method='moments')
sage: (c*x + d)^4 * f(x) == f((a*x + b)/(c*x + d))
True
"""
return self.derivative(z, level, order=0)
# In BruhatTitsHarmonicCocycle
def derivative(self, z=None, level=0, order=1):
r"""
Integrate Teitelbaum's `p`-adic Poisson kernel against
the measure corresponding to ``self`` to evaluate the rigid
analytic Shimura-Maass derivatives of the associated modular
form at `z`.
If ``z = None``, a function is returned that encodes the
derivative of the modular form.
.. NOTE::
This function uses the integration method of Riemann
summation and is incredibly slow! It should only be used for
testing and bug-finding. Overconvergent methods are quicker.
INPUT:
- ``z`` - an element in the quadratic unramified extension of
`\QQ_p` that is not contained in `\QQ_p` (default = None). If ``z
= None`` then a function encoding the derivative is returned.
- ``level`` - an integer. How fine of a mesh should the Riemann
sum use.
- ``order`` - an integer. How many derivatives to take.
OUTPUT:
An element of the quadratic unramified extension of `\QQ_p`, or
a function encoding the derivative.
EXAMPLES::
sage: X = BruhatTitsQuotient(3,23)
sage: H = X.harmonic_cocycles(2,prec=5)
sage: b = H.basis()[0]
sage: R.<a> = Qq(9,prec=10)
sage: b.modular_form(a,level=0) == b.derivative(a,level=0,order=0)
True
sage: b.derivative(a,level=1,order=1)
(2*a + 2)*3 + (a + 2)*3^2 + 2*a*3^3 + O(3^4)
sage: b.derivative(a,level=2,order=1)
(2*a + 2)*3 + 2*a*3^2 + 3^3 + O(3^4)
"""
def F(z):
R = PolynomialRing(z.parent(), 'x,y').fraction_field()
Rx = PolynomialRing(z.parent(), 'x1').fraction_field()
x1 = Rx.gen()
subst = R.hom([x1, z], codomain=Rx)
x, y = R.gens()
center = self.parent()._X._BT.find_containing_affinoid(z)
zbar = z.trace() - z
f = R(1) / (x - y)
k = self.parent()._k
V = [f]
for ii in range(order):
V = [v.derivative(y) for v in V] + [k / (y - zbar) * v
for v in V]
k += 2
return sum([self.riemann_sum(subst(v), center, level) for v in V])
if z is None:
return F
else:
return F(z)
class BruhatTitsHarmonicCocycles(AmbientHeckeModule, UniqueRepresentation):
r"""
Ensure unique representation
EXAMPLES::
sage: X = BruhatTitsQuotient(3,5)
sage: M1 = X.harmonic_cocycles( 2, prec = 10)
sage: M2 = X.harmonic_cocycles( 2, 10)
sage: M1 is M2
True
"""
Element = BruhatTitsHarmonicCocycleElement
@staticmethod
def __classcall__(cls, X, k, prec=None, basis_matrix=None, base_field=None):
r"""
Represent a space of Gamma invariant harmonic
cocycles valued in a coefficient module.
INPUT:
- ``X`` - A BruhatTitsQuotient object
- ``k`` - integer - The weight. It must be even.
- ``prec`` - integer (default: None). If specified, the
precision for the coefficient module
- ``basis_matrix`` - a matrix (default: None).
- ``base_field`` - a ring (default: None)
EXAMPLES::
sage: X = BruhatTitsQuotient(3,23)
sage: H = X.harmonic_cocycles(2,prec = 5)
sage: H.dimension()
3
sage: X.genus()
3
Higher even weights are implemented::
sage: H = X.harmonic_cocycles(8, prec = 10)
sage: H.dimension()
26
AUTHORS:
- Cameron Franc (2012-02-20)
- Marc Masdeu
"""
return super(BruhatTitsHarmonicCocycles, cls).__classcall__(cls, X, k, prec,
basis_matrix,
base_field)
def __init__(self, X, k, prec=None, basis_matrix=None, base_field=None):
"""
Compute the space of harmonic cocycles.
EXAMPLES::
sage: X = BruhatTitsQuotient(3,37)
sage: H = X.harmonic_cocycles(4,prec=10)
sage: TestSuite(H).run()
"""
self._k = k
self._X = X
self._E = self._X.get_edge_list()
self._V = self._X.get_vertex_list()
if base_field is not None and not base_field.is_exact():
prec = base_field.precision_cap()
if prec is None:
self._prec = None # Be careful!
if base_field is None:
try:
self._R = X.get_splitting_field()
except AttributeError:
raise ValueError("It looks like you are not using Magma as"
" backend...and still we don't know how "
"to compute splittings in that case!")
else:
pol = X.get_splitting_field().defining_polynomial().factor()[0][0]
self._R = base_field.extension(pol, pol.variable_name()).absolute_field(name='r')
else:
self._prec = prec
if base_field is None:
self._R = Qp(self._X._p, prec=prec)
else:
self._R = base_field
self._U = Symk(self._k - 2, base=self._R, act_on_left=True,
adjuster=_btquot_adjuster(),
dettwist=-ZZ((self._k - 2) / 2), act_padic=True)
if basis_matrix is None:
self.__rank = self._X.dimension_harmonic_cocycles(self._k)
else:
self.__rank = basis_matrix.nrows()
if basis_matrix is not None:
self.__matrix = basis_matrix
self.__matrix.set_immutable()
assert self.__rank == self.__matrix.nrows()
self._Sigma0 = self._U._act._Sigma0
AmbientHeckeModule.__init__(self, self._R, self.__rank,
self._X.prime() * self._X.Nplus() * self._X.Nminus(), weight=self._k)
self._populate_coercion_lists_()
def monomial_coefficients(self):
r"""
Void method to comply with pickling.
EXAMPLES::
sage: M = BruhatTitsQuotient(3,5).harmonic_cocycles(2,prec=10)
sage: M.monomial_coefficients()
{}
"""
return {}
def base_extend(self, base_ring):
r"""
Extend the base ring of the coefficient module.
INPUT:
- ``base_ring`` - a ring that has a coerce map from the
current base ring
OUTPUT:
A new space of HarmonicCocycles with the base extended.
EXAMPLES::
sage: X = BruhatTitsQuotient(3,19)
sage: H = X.harmonic_cocycles(2,10)
sage: H.base_ring()
3-adic Field with capped relative precision 10
sage: H1 = H.base_extend(Qp(3,prec=15))
sage: H1.base_ring()
3-adic Field with capped relative precision 15
"""
if not base_ring.has_coerce_map_from(self.base_ring()):
raise ValueError("No coercion defined")
else:
return self.change_ring(base_ring)
def change_ring(self, new_base_ring):
r"""
Change the base ring of the coefficient module.
INPUT:
- ``new_base_ring`` - a ring that has a coerce map from the
current base ring
OUTPUT:
New space of HarmonicCocycles with different base ring
EXAMPLES::
sage: X = BruhatTitsQuotient(5,17)
sage: H = X.harmonic_cocycles(2,10)
sage: H.base_ring()
5-adic Field with capped relative precision 10
sage: H1 = H.base_extend(Qp(5,prec=15)) # indirect doctest
sage: H1.base_ring()
5-adic Field with capped relative precision 15
"""
if not new_base_ring.has_coerce_map_from(self.base_ring()):
raise ValueError("No coercion defined")
basis_matrix = self.basis_matrix().change_ring(new_base_ring)
basis_matrix.set_immutable()
return self.__class__(self._X, self._k, prec=None,
basis_matrix=basis_matrix,
base_field=new_base_ring)
def rank(self):
r"""
Return the rank (dimension) of ``self``.
OUTPUT:
An integer.
EXAMPLES::
sage: X = BruhatTitsQuotient(7,11)
sage: H = X.harmonic_cocycles(2,prec = 10)
sage: X.genus() == H.rank()
True
sage: H1 = X.harmonic_cocycles(4,prec = 10)
sage: H1.rank()
16
"""
return self.__rank
def submodule(self, v, check=False):
r"""
Return the submodule of ``self`` spanned by ``v``.
INPUT:
- ``v`` - Submodule of self.free_module().
- ``check`` - Boolean (default = False).
OUTPUT:
Subspace of harmonic cocycles.
EXAMPLES::
sage: X = BruhatTitsQuotient(3,17)
sage: H = X.harmonic_cocycles(2,prec=10)
sage: H.rank()
3
sage: v = H.gen(0)
sage: N = H.free_module().span([v.element()])
sage: H1 = H.submodule(N)
Traceback (most recent call last):
...
NotImplementedError
"""
# return BruhatTitsHarmonicCocyclesSubmodule(self, v)
raise NotImplementedError
def is_simple(self):
r"""
Whether ``self`` is irreducible.
OUTPUT:
Boolean. True if and only if ``self`` is irreducible.
EXAMPLES::
sage: X = BruhatTitsQuotient(3,29)
sage: H = X.harmonic_cocycles(4,prec =10)
sage: H.rank()
14
sage: H.is_simple()
False
sage: X = BruhatTitsQuotient(7,2)
sage: H = X.harmonic_cocycles(2,prec=10)
sage: H.rank()
1
sage: H.is_simple()
True
"""
return self.rank() == 1
def _repr_(self):
r"""
This returns the representation of self as a string.
EXAMPLES::
sage: X = BruhatTitsQuotient(5,23)
sage: H = X.harmonic_cocycles(2,prec=10)
sage: H
Space of harmonic cocycles of weight 2 on Quotient of the Bruhat
Tits tree of GL_2(QQ_5) with discriminant 23 and level 1
"""
return 'Space of harmonic cocycles of weight %s on %s' % (self._k,
self._X)
def _latex_(self):
r"""
A LaTeX representation of ``self``.
EXAMPLES::
sage: X = BruhatTitsQuotient(5,23)
sage: H = X.harmonic_cocycles(2,prec=10)
sage: latex(H) # indirect doctest
\text{Space of harmonic cocycles of weight } 2 \text{ on } X(5 \cdot 23,1)\otimes_{\mathbb{Z}} \mathbb{F}_{5}
"""
s = '\\text{Space of harmonic cocycles of weight } '
s += (self._k)._latex_() + ' \\text{ on } ' + self._X._latex_()
return s
def _an_element_(self):
r"""
Return an element of the ambient space
OUTPUT:
A harmonic cocycle in self.
EXAMPLES:
sage: X = BruhatTitsQuotient(5,23)
sage: H = X.harmonic_cocycles(2,prec=10)
sage: H.an_element() # indirect doctest
Harmonic cocycle with values in Sym^0 Q_5^2
"""
return self.basis()[0]
def _coerce_map_from_(self, S):
r"""
Can coerce from other BruhatTitsHarmonicCocycles or from
pAdicAutomorphicForms, also from 0
OUTPUT:
Boolean. True if and only if ``self`` is a space of
BruhatTitsHarmonicCocycles or pAdicAutomorphicForms.
EXAMPLES::
sage: X = BruhatTitsQuotient(3,17)
sage: H = X.harmonic_cocycles(2,prec=10)
sage: A = X.padic_automorphic_forms(2,prec=10)
sage: A(H.basis()[0]) # indirect doctest
p-adic automorphic form of cohomological weight 0
"""
if isinstance(S, (BruhatTitsHarmonicCocycles, pAdicAutomorphicForms)):
if S._k != self._k:
return False
if S._X != self._X:
return False
return True
return False
def __eq__(self, other):
r"""
Test whether two BruhatTitsHarmonicCocycle spaces are equal.
INPUT:
- ``other`` -- a BruhatTitsHarmonicCocycles class.
OUTPUT:
A boolean value
EXAMPLES::
sage: X = BruhatTitsQuotient(5,7)
sage: H1 = X.harmonic_cocycles(2,prec=10)
sage: H2 = X.harmonic_cocycles(2,prec=10)
sage: H1 == H2
True
"""
if not isinstance(other, BruhatTitsHarmonicCocycles):
return False
return (self.base_ring() == other.base_ring() and
self._X == other._X and
self._k == other._k)
def __ne__(self, other):
r"""
Test whether two BruhatTitsHarmonicCocycle spaces are not equal.
INPUT:
- ``other`` -- a BruhatTitsHarmonicCocycles class.
OUTPUT:
A boolean value
EXAMPLES::
sage: X = BruhatTitsQuotient(5,7)
sage: H1 = X.harmonic_cocycles(2,prec=10)
sage: H2 = X.harmonic_cocycles(2,prec=10)
sage: H1 != H2
False
"""
return not self.__eq__(other)
def _element_constructor_(self, x):
r"""
Constructor for harmonic cocycles.
INPUT:
- ``x`` - an object coercible into a harmonic cocycle.
OUTPUT:
A harmonic cocycle.
EXAMPLES::
sage: X = BruhatTitsQuotient(3,17)
sage: H = X.harmonic_cocycles(2,prec=10)
sage: H(H.an_element()) # indirect doctest
Harmonic cocycle with values in Sym^0 Q_3^2
sage: H(0)
Harmonic cocycle with values in Sym^0 Q_3^2
"""
if type(x) is sage.modules.free_module_element.FreeModuleElement_generic_dense:
vmat = MatrixSpace(self._R, 1, self.dimension())(x)
tmp = (vmat * self.ambient_module().basis_matrix()).row(0)
vec = [self._U(tmp[e * (self._k - 1):(e + 1) * (self._k - 1)])
for e in range(len(self._E))]
return self.element_class(self, vec)
if type(x) is list:
return self.element_class(self, [self._U(o) for o in x])
if hasattr(x, 'parent'):
parent = x.parent()
if isinstance(parent, BruhatTitsHarmonicCocycles):
return self.element_class(self, [self._U(o) for o in x._F])
elif isinstance(parent, pAdicAutomorphicForms):
tmp = [self._E[ii].rep * self._U(x._F[ii]) for ii in range(self._nE)]
return self.element_class(self, tmp)
if x == 0:
tmp = [self._U([0] * (self.weight() - 1))] * self._X._num_edges
return self.element_class(self, tmp)
else:
raise TypeError
def free_module(self):
r"""
Return the underlying free module
OUTPUT:
A free module.
EXAMPLES::
sage: X = BruhatTitsQuotient(3,7)
sage: H = X.harmonic_cocycles(2,prec=10)
sage: H.free_module()
Vector space of dimension 1 over 3-adic Field with
capped relative precision 10
"""
try:
return self.__free_module
except AttributeError:
pass
V = self.base_ring() ** self.dimension()
self.__free_module = V
return V
def character(self):
r"""
The trivial character.
OUTPUT:
The identity map.
EXAMPLES::
sage: X = BruhatTitsQuotient(3,7)
sage: H = X.harmonic_cocycles(2,prec = 10)
sage: f = H.character()
sage: f(1)
1
sage: f(2)
2
"""
return lambda x: x
def embed_quaternion(self, g, scale=1, exact=None):
r"""
Embed the quaternion element ``g`` into the matrix algebra.
INPUT:
- ``g`` - A quaternion, expressed as a 4x1 matrix.
OUTPUT:
A 2x2 matrix with `p`-adic entries.
EXAMPLES::
sage: X = BruhatTitsQuotient(7,2)
sage: q = X.get_stabilizers()[0][1][0]
sage: H = X.harmonic_cocycles(2,prec = 5)
sage: Hmat = H.embed_quaternion(q)
sage: Hmat.matrix().trace() == X._conv(q).reduced_trace() and Hmat.matrix().determinant() == 1
True
"""
if exact is None:
exact = self._R.is_exact()
return self._Sigma0(scale * self._X.embed_quaternion(g, exact=exact,
prec=self._prec),
check=False)
def basis_matrix(self):
r"""
Return a basis of ``self`` in matrix form.
If the coefficient module `M` is of finite rank then the space
of Gamma invariant `M` valued harmonic cocycles can be
represented as a subspace of the finite rank space of all
functions from the finitely many edges in the corresponding
BruhatTitsQuotient into `M`. This function computes this
representation of the space of cocycles.
OUTPUT:
- A basis matrix describing the cocycles in the spaced of all
`M` valued Gamma invariant functions on the tree.
EXAMPLES::
sage: X = BruhatTitsQuotient(5,3)
sage: M = X.harmonic_cocycles(4,prec = 20)
sage: B = M.basis() # indirect doctest
sage: len(B) == X.dimension_harmonic_cocycles(4)
True
AUTHORS:
- Cameron Franc (2012-02-20)
- Marc Masdeu (2012-02-20)
"""
try:
return self.__matrix
except AttributeError:
pass
nV = len(self._V)
nE = len(self._E)
stab_conds = []
S = self._X.get_edge_stabilizers()
p = self._X._p
d = self._k - 1
for e in self._E:
try:
g = filter(lambda g: g[2], S[e.label])[0]
C = self._U.acting_matrix(self._Sigma0(self.embed_quaternion(g[0])), d).transpose() # Warning - Need to allow the check = True
C -= self._U.acting_matrix(self._Sigma0(Matrix(QQ, 2, 2, p ** g[1])), d).transpose() # Warning - Need to allow the check = True
stab_conds.append([e.label, C])
except IndexError:
pass
n_stab_conds = len(stab_conds)
self._M = Matrix(self._R, (nV + n_stab_conds) * d, nE * d, 0,
sparse=True)
for v in self._V:
for e in filter(lambda e: e.parity == 0, v.leaving_edges):
C = sum([self._U.acting_matrix(self.embed_quaternion(x[0]), d)
for x in e.links],
Matrix(self._R, d, d, 0)).transpose()
self._M.set_block(v.label * d, e.label * d, C)
for e in filter(lambda e: e.parity == 0, v.entering_edges):
C = sum([self._U.acting_matrix(self.embed_quaternion(x[0]), d)
for x in e.opposite.links],
Matrix(self._R, d, d, 0)).transpose()
self._M.set_block(v.label * d, e.opposite.label * d, C)
for kk in range(n_stab_conds):
v = stab_conds[kk]
self._M.set_block((nV + kk) * d, v[0] * d, v[1])
x1 = self._M.right_kernel().matrix()
if x1.nrows() != self.rank():
raise RuntimeError('The computed dimension does not agree with '
'the expectation. Consider increasing '
'precision!')
K = [c.list() for c in x1.rows()]
if not self._R.is_exact():
for ii in range(len(K)):
s = min([t.valuation() for t in K[ii]])
for jj in range(len(K[ii])):
K[ii][jj] = (p ** (-s)) * K[ii][jj]
self.__matrix = Matrix(self._R, len(K), nE * d, K)
self.__matrix.set_immutable()
return self.__matrix
def __apply_atkin_lehner(self, q, f):
r"""
Apply an Atkin-Lehner involution to a harmonic cocycle
INPUT:
- ``q`` - an integer dividing the full level p*Nminus*Nplus
- ``f`` - a harmonic cocycle
OUTPUT:
- The harmonic cocycle obtained by hitting ``f`` with the
Atkin-Lehner at ``q``
EXAMPLES::
sage: X = BruhatTitsQuotient(5,17)
sage: H = X.harmonic_cocycles(2,prec = 10)
sage: A = H.atkin_lehner_operator(5).matrix() # indirect doctest
sage: A**2 == 1
True
"""
Data = self._X._get_atkin_lehner_data(q)
p = self._X._p
tmp = [self._U(0) for jj in range(len(self._E))]
d1 = Data[1]
mga = self.embed_quaternion(Data[0])
nE = len(self._E)
for jj in range(nE):
t = d1[jj]
if t.label < nE:
tmp[jj] += mga * t.igamma(self.embed_quaternion, scale=p ** -t.power) * f._F[t.label]
else:
tmp[jj] += mga * t.igamma(self.embed_quaternion, scale=p ** -t.power) * (-f._F[t.label - nE])
return self(tmp)
def __apply_hecke_operator(self, l, f):
r"""
This function applies a Hecke operator to a harmonic cocycle.
INPUT:
- ``l`` - an integer
- ``f`` - a harmonic cocycle
OUTPUT:
- A harmonic cocycle which is the result of applying the lth
Hecke operator to ``f``
EXAMPLES::
sage: X = BruhatTitsQuotient(5,17)
sage: H = X.harmonic_cocycles(2,prec=50)
sage: A = H.hecke_operator(7).matrix() # indirect doctest
sage: [o.rational_reconstruction() for o in A.charpoly().coefficients()]
[-8, -12, 12, 20, 8, 1]
"""
HeckeData, alpha = self._X._get_hecke_data(l)
if self.level() % l == 0:
factor = QQ(l ** (Integer((self._k - 2) / 2)) / (l + 1))
else:
factor = QQ(l ** (Integer((self._k - 2) / 2)))
p = self._X._p
alphamat = self.embed_quaternion(alpha)
tmp = [self._U(0) for jj in range(len(self._E))]
for d0, d1 in HeckeData:
mga = self.embed_quaternion(d0) * alphamat
nE = len(self._E)
for jj in range(nE):
t = d1[jj]
if t.label < nE:
tmp[jj] += mga * t.igamma(self.embed_quaternion, scale=p ** -t.power) * f._F[t.label]
else:
tmp[jj] += mga * t.igamma(self.embed_quaternion, scale=p ** -t.power) * (-f._F[t.label - nE])
return self([factor * x for x in tmp])
def _compute_atkin_lehner_matrix(self, d):
r"""
When the underlying coefficient module is finite, this
function computes the matrix of an Atkin-Lehner involution in
the basis provided by the function basis_matrix
INPUT:
- ``d`` - an integer dividing p*Nminus*Nplus, where these
quantities are associated to the BruhatTitsQuotient self._X
OUTPUT:
- The matrix of the Atkin-Lehner involution at ``d`` in the basis given by
self.basis_matrix
EXAMPLES::
sage: X = BruhatTitsQuotient(5,13)
sage: H = X.harmonic_cocycles(2,prec=5)
sage: A = H.atkin_lehner_operator(5).matrix() # indirect doctest
sage: A**2 == 1
True
"""
return self.__compute_operator_matrix(lambda f: self.__apply_atkin_lehner(d, f))
def _compute_hecke_matrix_prime(self, l):
r"""
When the underlying coefficient module is finite, this
function computes the matrix of a (prime) Hecke operator in
the basis provided by the function basis_matrix
INPUT:
- ``l`` - a prime integer
OUTPUT:
- The matrix of `T_l` acting on the cocycles in the basis given by
self.basis_matrix
EXAMPLES::
sage: X = BruhatTitsQuotient(3,11)
sage: H = X.harmonic_cocycles(4,prec=60)
sage: A = H.hecke_operator(7).matrix() # long time, indirect doctest
sage: [o.rational_reconstruction() for o in A.charpoly().coefficients()] # long time
[6496256, 1497856, -109040, -33600, -904, 32, 1]
"""
return self.__compute_operator_matrix(lambda f: self.__apply_hecke_operator(l, f))
def __compute_operator_matrix(self, T):
r"""
Compute the matrix of the operator `T`.
Used primarily to compute matrices of Hecke operators
in a streamlined way.
INPUT:
- ``T`` - A linear function on the space of harmonic cocycles.
OUTPUT:
The matrix of ``T`` acting on the space of harmonic cocycles.
EXAMPLES::
sage: X = BruhatTitsQuotient(3,17)
sage: H = X.harmonic_cocycles(2,prec=10)
sage: A = H.hecke_operator(11).matrix() # indirect doctest
sage: [o.rational_reconstruction() for o in A.charpoly().coefficients()]
[-12, -1, 4, 1]
"""
R = self._R
A = self.basis_matrix().transpose()
basis = self.basis()
B = zero_matrix(R, len(self._E) * (self._k - 1), self.dimension())
for rr in range(len(basis)):
g = T(basis[rr])
B.set_block(0, rr, Matrix(R, len(self._E) * (self._k - 1), 1, [g._F[e].moment(ii) for e in range(len(self._E)) for ii in range(self._k - 1)]))
try:
res = (A.solve_right(B)).transpose()
except ValueError:
rest = (A.transpose() * A).solve_right(A.transpose() * B)
err = A * rest - B
if err != 0:
try:
if hasattr(err.parent().base_ring().an_element(),
'valuation'):
minval = min([o.valuation() for o in err.list()
if o != 0])
else:
minval = sum([RR(o.norm() ** 2) for o in err.list()])
verbose('Error = %s' % minval)
except AttributeError:
verbose('Warning: something did not work in the computation')
res = rest.transpose()
res.set_immutable()
return res
# class BruhatTitsHarmonicCocyclesSubmodule(BruhatTitsHarmonicCocycles,sage.modular.hecke.submodule.HeckeSubmodule):
# r"""
# Submodule of a space of BruhatTitsHarmonicCocycles.
#
# INPUT:
#
# - ``x`` - integer (default: 1) the description of the
# argument x goes here. If it contains multiple lines, all
# the lines after the first need to be indented.
#
# - ``y`` - integer (default: 2) the ...
#
# EXAMPLES::
#
# sage: X = BruhatTitsQuotient(3,17)
# sage: H = X.harmonic_cocycles(2,prec=10)
# sage: N = H.free_module().span([H.an_element().element()])
# sage: H1 = H.submodule(N) # indirect doctest
# sage: H1
# Subspace of Space of harmonic cocycles of weight 2 on Quotient of the Bruhat Tits tree of GL_2(QQ_3) with discriminant 17 and level 1 of dimension 1
#
# AUTHOR:
#
# - Marc Masdeu (2012-02-20)
# """
# def __init__(self, ambient_module, submodule, check):
# """
# Submodule of harmonic cocycles.
#
# INPUT:
#
# - ``ambient_module`` - BruhatTitsHarmonicCocycles
#
# - ``submodule`` - submodule of the ambient space.
#
# - ``check`` - (default: False) whether to check that the
# submodule is Hecke equivariant
#
# EXAMPLES::
#
# sage: X = BruhatTitsQuotient(3,17)
# sage: H = X.harmonic_cocycles(2,prec=10)
# sage: N = H.free_module().span([H.an_element().element()])
# sage: H1 = H.submodule(N)
# sage: TestSuite(H1).run()
# """
# A = ambient_module
# self.__rank = submodule.dimension()
# basis_matrix = submodule.basis_matrix()*A.basis_matrix()
# basis_matrix.set_immutable()
# BruhatTitsHarmonicCocycles.__init__(self,A._X,A._k,A._prec,basis_matrix,A.base_ring())
#
# def rank(self):
# r"""
# Returns the rank (dimension) of the submodule.
#
# OUTPUT:
#
# Integer - The rank of ``self``.
#
# EXAMPLES::
#
# sage: X = BruhatTitsQuotient(3,17)
# sage: H = X.harmonic_cocycles(2,prec=10)
# sage: N = H.free_module().span([H.an_element().element()])
# sage: H1 = H.submodule(basis = [H.an_element()])
# sage: H1.rank()
# 1
# """
# return self.__rank
#
# def _repr_(self):
# r"""
# Returns the representation of self as a string.
#
# OUTPUT:
#
# String representation of self.
#
# EXAMPLES::
#
# sage: X = BruhatTitsQuotient(3,17)
# sage: H = X.harmonic_cocycles(2,prec=10)
# sage: N = H.free_module().span([H.an_element().element()])
# sage: H1=H.submodule(N)
# sage: H1
# Subspace of Space of harmonic cocycles of weight 2 on Quotient of the Bruhat Tits tree of GL_2(QQ_3) with discriminant 17 and level 1 of dimension 1
# """
# return "Subspace of %s of dimension %s"%(self.ambient(),self.dimension())
class pAdicAutomorphicFormElement(ModuleElement):
r"""
Rudimentary implementation of a class for a `p`-adic
automorphic form on a definite quaternion algebra over `\QQ`. These
are required in order to compute moments of measures associated to
harmonic cocycles on the Bruhat-Tits tree using the overconvergent modules
of Darmon-Pollack and Matt Greenberg. See Greenberg's thesis [G]_ for
more details.
INPUT:
- ``vec`` - A preformatted list of data
EXAMPLES::
sage: X = BruhatTitsQuotient(17,3)
sage: H = X.harmonic_cocycles(2,prec=10)
sage: h = H.an_element()
sage: HH = X.padic_automorphic_forms(2,10)
sage: a = HH(h)
sage: a
p-adic automorphic form of cohomological weight 0
REFERENCES:
.. [G] Heegner points and rigid analytic modular forms
Matthew Greenberg
Ph.D. Thesis, McGill University, 2006.
AUTHORS:
- Cameron Franc (2012-02-20)
- Marc Masdeu
"""
def __init__(self, parent, vec):
"""
Create a pAdicAutomorphicFormElement
EXAMPLES::
sage: X = BruhatTitsQuotient(17,3)
sage: A = X.padic_automorphic_forms(2,prec=10)
sage: TestSuite(A.an_element()).run()
"""
self._num_generators = len(parent._list)
self._cached_values = {}
self._R = Qp(parent.prime(), prec=parent._prec)
self._value = [parent._U(v) for v in vec]
ModuleElement.__init__(self, parent)
def _add_(self, g):
r"""
This function adds two `p`-adic automorphic forms.
INPUT:
- ``g`` - a `p`-adic automorphic form
OUTPUT:
- the result of adding ``g`` to self
EXAMPLES::
sage: X = BruhatTitsQuotient(17,3)
sage: A = X.padic_automorphic_forms(2,prec=10)
sage: a = A.an_element()
sage: b = a + a # indirect doctest
"""
# Should ensure that self and g are of the same weight and on
# the same curve
vec = [self._value[e] + g._value[e]
for e in range(self._num_generators)]
return self.parent()(vec)
def _sub_(self, g):
r"""
This function subtracts a `p`-adic automorphic form from another.
INPUT:
- ``g`` - a `p`-adic automorphic form
OUTPUT:
- the result of subtracting ``g`` from self
EXAMPLES::
sage: X = BruhatTitsQuotient(17,3)
sage: A = X.padic_automorphic_forms(2,prec=10)
sage: a = A.an_element()
sage: b = a - a # indirect doctest
sage: b == 0
True
"""
# Should ensure that self and g are of the same weight and on
# the same curve
vec = [self._value[e] - g._value[e]
for e in range(self._num_generators)]
return self.parent()(vec)
def _richcmp_(self, other, op):
r"""
Test for equality of pAdicAutomorphicForm elements
INPUT:
- ``other`` - Another `p`-automorphic form
EXAMPLES::
sage: X = BruhatTitsQuotient(5,23)
sage: H = X.harmonic_cocycles(2,prec=10)
sage: A = X.padic_automorphic_forms(2,prec=10)
sage: v1 = A(H.basis()[0])
sage: v2 = 3*v1
sage: 2*v1 == v2-v1 # indirect doctest
True
"""
if op not in [op_EQ, op_NE]:
return NotImplemented
b = all(self._value[e] == other._value[e]
for e in range(self._num_generators))
if op == op_EQ:
return b
return not b
def __bool__(self):
"""
Tell whether the form is zero or not.
OUTPUT:
Boolean. ``True`` if self is zero, ``False`` otherwise.
EXAMPLES::
sage: X = BruhatTitsQuotient(5,23)
sage: H = X.harmonic_cocycles(4,prec = 20)
sage: A = X.padic_automorphic_forms(4,prec = 20)
sage: v1 = A(H.basis()[1])
sage: bool(v1)
True
sage: v2 = v1-v1
sage: bool(v2)
False
"""
return any(not o.is_zero() for o in self._value)
__nonzero__ = __bool__
def __getitem__(self, e1):
r"""
Evaluate a `p`-adic automorphic form on a matrix in `GL_2(\QQ_p)`.
INPUT:
- ``e1`` - a matrix in `GL_2(\QQ_p)`
OUTPUT:
- the value of self evaluated on ``e1``
EXAMPLES::
sage: X = BruhatTitsQuotient(17,3)
sage: M = X.harmonic_cocycles(2,prec=5)
sage: A = X.padic_automorphic_forms(2,prec=5)
sage: a = A(M.gen(0))
sage: a[Matrix(ZZ,2,2,[1,2,3,4])]
8 + 8*17 + 8*17^2 + 8*17^3 + 8*17^4 + O(17^5)
"""
return self.evaluate(e1)
def evaluate(self, e1):
r"""
Evaluate a `p`-adic automorphic form on a matrix in `GL_2(\QQ_p)`.
INPUT:
- ``e1`` - a matrix in `GL_2(\QQ_p)`
OUTPUT:
- the value of self evaluated on ``e1``
EXAMPLES::
sage: X = BruhatTitsQuotient(7,5)
sage: M = X.harmonic_cocycles(2,prec=5)
sage: A = X.padic_automorphic_forms(2,prec=5)
sage: a = A(M.basis()[0])
sage: a.evaluate(Matrix(ZZ,2,2,[1,2,3,1]))
4 + 6*7 + 6*7^2 + 6*7^3 + 6*7^4 + O(7^5)
sage: a.evaluate(Matrix(ZZ,2,2,[17,0,0,1]))
1 + O(7^5)
"""
X = self.parent()._source
p = self.parent().prime()
u = DoubleCosetReduction(X, e1)
tmp = ((u.t(self.parent()._U.base_ring().precision_cap())) * p ** (u.power)).adjoint()
S0 = self.parent()._Sigma0
return S0(tmp, check=False) * self._value[u.label]
# Warning! Should remove check=False...
def _lmul_(self, a):
r"""
Multiply the automorphic form by a scalar.
INPUT:
- a scalar
EXAMPLES::
sage: X = BruhatTitsQuotient(17,3)
sage: M = X.harmonic_cocycles(2,prec=5)
sage: A = X.padic_automorphic_forms(2,prec=5)
sage: a = A(M.basis()[0])
sage: a.evaluate(Matrix(ZZ,2,2,[1,2,3,4]))
8 + 8*17 + 8*17^2 + 8*17^3 + 8*17^4 + O(17^5)
sage: b = 2*a # indirect doctest
sage: b.evaluate(Matrix(ZZ,2,2,[1,2,3,4]))
16 + 16*17 + 16*17^2 + 16*17^3 + 16*17^4 + O(17^5)
"""
# Should ensure that 'a' is a scalar
return self.parent()([a * self._value[e]
for e in range(self._num_generators)])
def _repr_(self):
r"""
This returns the representation of self as a string.
If self corresponds to a modular form of weight `k`, then the
cohomological weight is `k-2`.
OUTPUT:
A string.
EXAMPLES::
sage: X = BruhatTitsQuotient(17,3)
sage: A = X.padic_automorphic_forms(2,prec=10)
sage: a = A.an_element()
sage: a # indirect doctest
p-adic automorphic form of cohomological weight 0
"""
return 'p-adic automorphic form of cohomological weight %s' % self.parent()._U.weight()
def valuation(self):
r"""
The valuation of ``self``, defined as the minimum of the
valuations of the values that it takes on a set of edge
representatives.
OUTPUT:
An integer.
EXAMPLES::
sage: X = BruhatTitsQuotient(17,3)
sage: M = X.harmonic_cocycles(2,prec=10)
sage: A = X.padic_automorphic_forms(2,prec=10)
sage: a = A(M.gen(0))
sage: a.valuation()
0
sage: (17*a).valuation()
1
"""
return min(self._value[e].valuation()
for e in range(self._num_generators))
def _improve(self, hc):
r"""
Repeatedly apply the `U_p` operator to a `p`-adic
automorphic form. This is used to compute moments of a measure
associated to a rigid modular form in the following way: lift
a rigid modular form to an overconvergent `p`-adic
automorphic form in any way, and then repeatedly apply `U_p`
to project to the ordinary part. The resulting form encodes
the moments of the measure of the original rigid modular form
(assuming it is ordinary).
EXAMPLES::
sage: X = BruhatTitsQuotient(7,2)
sage: H = X.harmonic_cocycles(2,prec = 10)
sage: h = H.gen(0)
sage: A = X.padic_automorphic_forms(2,prec = 10,overconvergent=True)
sage: a = A.lift(h) # indirect doctest
REFERENCES:
For details see [G]_. Alternatively, one can look at
[DP]_ for the analogous algorithm in the case of modular symbols.
AUTHORS:
- Cameron Franc (2012-02-20)
- Marc Masdeu
"""
MMM = self.parent()
U = MMM._U
S0 = MMM._Sigma0
h1 = MMM([o.lift(M=MMM.precision_cap()) for o in self._value])
h2 = MMM._apply_Up_operator(h1, True)
verbose("Applied Up once")
ii = 0
current_val = 0
init_val = self.valuation()
old_val = init_val - 1
while current_val > old_val:
old_val = current_val
ii += 1
h1._value = [U(c) for c in h2._value]
h2 = MMM._apply_Up_operator(h1, True)
current_val = (h2 - h1).valuation() - init_val
verbose('val = %s' % current_val)
if current_val is Infinity:
break
verbose('Applied Up %s times' % (ii + 1))
return h2
def integrate(self, f, center=1, level=0, method='moments'):
r"""
Calculate
.. MATH::
\int_{\mathbf{P}^1(\QQ_p)} f(x)d\mu(x)
were `\mu` is the measure associated to ``self``.
INPUT:
- ``f`` - An analytic function.
- ``center`` - 2x2 matrix over `\QQ_p` (default: 1)
- ``level`` - integer (default: 0)
- ``method`` - string (default: 'moments'). Which method of
integration to use. Either 'moments' or 'riemann_sum'.
EXAMPLES:
Integrating the Poisson kernel against a measure yields a
value of the associated modular form. Such values can be
computed efficiently using the overconvergent method, as long
as one starts with an ordinary form::
sage: X = BruhatTitsQuotient(7,2)
sage: X.genus()
1
Since the genus is 1, the space of weight 2 forms is 1
dimensional. Hence any nonzero form will be a `U_7`
eigenvector. By Jacquet-Langlands and Cerednik-Drinfeld, in
this case the Hecke eigenvalues correspond to that of any
nonzero form on `\Gamma_0(14)` of weight `2`. Such a form is
ordinary at `7`, and so we can apply the overconvergent method
directly to this form without `p`-stabilizing::
sage: H = X.harmonic_cocycles(2,prec = 5)
sage: h = H.gen(0)
sage: A = X.padic_automorphic_forms(2,prec = 5,overconvergent=True)
sage: a = A.lift(h)
sage: a._value[0].moment(2)
2 + 6*7 + 4*7^2 + 4*7^3 + 6*7^4 + O(7^5)
Now that we've lifted our harmonic cocycle to an
overconvergent automorphic form we simply need to define the
Teitelbaum-Poisson Kernel, and then integrate::
sage: Kp.<x> = Qq(49,prec = 5)
sage: z = Kp['z'].gen()
sage: f = 1/(z-x)
sage: a.integrate(f)
(5*x + 5) + (4*x + 4)*7 + (5*x + 5)*7^2 + (5*x + 6)*7^3 + O(7^5)
AUTHORS:
- Cameron Franc (2012-02-20)
- Marc Masdeu (2012-02-20)
"""
E = self.parent()._source._BT.get_balls(center, level)
R1 = LaurentSeriesRing(f.base_ring(), 'r1', default_prec = self.parent()._U.base_ring().precision_cap() + 1)
R2 = PolynomialRing(f.base_ring(), 'x')
x = R2.gen()
value = 0
ii = 0
if method == 'riemann_sum':
for e in E:
ii += 1
#print(ii,"/",len(E))
exp = ((R1([e[1, 1], e[1, 0]])) ** (self.parent()._U.weight()) * e.determinant() ** (-(self.parent()._U.weight()) / 2)) * f(R1([e[0, 1], e[0, 0]]) / R1([e[1, 1], e[1, 0]]))
#exp = R2([tmp[jj] for jj in range(self.parent()._k-1)])
new = eval_dist_at_powseries(self.evaluate(e), exp.truncate(self.parent()._U.weight() + 1))
value += new
elif method == 'moments':
n = self.parent()._U.weight()
for e in E:
ii += 1
#print(ii,"/",len(E))
a, b, c, d = e.list()
delta = e.determinant()
verbose('%s' % (R2([e[0, 1], e[0, 0]])
/ R2([e[1, 1], e[1, 0]])))
tmp = ((c * x + d) ** n * delta ** -ZZ(n / 2)) * f((a * x + b) / (c * x + d))
exp = R1(tmp.numerator()) / R1(tmp.denominator())
new = eval_dist_at_powseries(self.evaluate(e), exp)
value += new
else:
print('The available methods are either "moments" or "riemann_sum". The latter is only provided for consistency check, and should never be used.')
return False
return value
def modular_form(self, z=None, level=0, method='moments'):
r"""
Return the modular form corresponding to ``self``.
INPUT:
- ``z`` - (default: None). If specified, returns the value of
the form at the point ``z`` in the `p`-adic upper half
plane.
- ``level`` - integer (default: 0). If ``method`` is
'riemann_sum', will use a covering of `P^1(\QQ_p)` with
balls of size `p^-\mbox{level}`.
- ``method`` - string (default: ``moments``). It must be
either ``moments`` or ``riemann_sum``.
OUTPUT:
- A function from the `p`-adic upper half plane to `\CC_p`. If
an argument ``z`` was passed, returns instead the value at
that point.
EXAMPLES:
Integrating the Poisson kernel against a measure yields a
value of the associated modular form. Such values can be
computed efficiently using the overconvergent method, as long
as one starts with an ordinary form::
sage: X = BruhatTitsQuotient(7, 2)
sage: X.genus()
1
Since the genus is 1, the space of weight 2 forms is 1
dimensional. Hence any nonzero form will be a `U_7`
eigenvector. By Jacquet-Langlands and Cerednik-Drinfeld, in
this case the Hecke eigenvalues correspond to that of any
nonzero form on `\Gamma_0(14)` of weight `2`. Such a form is
ordinary at `7`, and so we can apply the overconvergent method
directly to this form without `p`-stabilizing::
sage: H = X.harmonic_cocycles(2,prec = 5)
sage: A = X.padic_automorphic_forms(2,prec = 5,overconvergent=True)
sage: f0 = A.lift(H.basis()[0])
Now that we've lifted our harmonic cocycle to an
overconvergent automorphic form, we extract the associated
modular form as a function and test the modular property::
sage: T.<x> = Qq(7^2,prec = 5)
sage: f = f0.modular_form(method = 'moments')
sage: a,b,c,d = X.embed_quaternion(X.get_units_of_order()[1]).change_ring(T.base_ring()).list()
sage: ((c*x + d)^2*f(x)-f((a*x + b)/(c*x + d))).valuation()
5
"""
return self.derivative(z, level, method, order=0)
def derivative(self, z=None, level=0, method='moments', order=1):
r"""
Return the derivative of the modular form corresponding to
``self``.
INPUT:
- ``z`` - (default: None). If specified, evaluates the derivative
at the point ``z`` in the `p`-adic upper half plane.
- ``level`` - integer (default: 0). If ``method`` is
'riemann_sum', will use a covering of `P^1(\QQ_p)` with
balls of size `p^-\mbox{level}`.
- ``method`` - string (default: ``moments``). It must be
either ``moments`` or ``riemann_sum``.
- ``order`` - integer (default: 1). The order of the
derivative to be computed.
OUTPUT:
- A function from the `p`-adic upper half plane to `\CC_p`. If
an argument ``z`` was passed, returns instead the value of
the derivative at that point.
EXAMPLES:
Integrating the Poisson kernel against a measure yields a
value of the associated modular form. Such values can be
computed efficiently using the overconvergent method, as long
as one starts with an ordinary form::
sage: X = BruhatTitsQuotient(7, 2)
sage: X.genus()
1
Since the genus is 1, the space of weight 2 forms is 1
dimensional. Hence any nonzero form will be a `U_7`
eigenvector. By Jacquet-Langlands and Cerednik-Drinfeld, in
this case the Hecke eigenvalues correspond to that of any
nonzero form on `\Gamma_0(14)` of weight `2`. Such a form is
ordinary at `7`, and so we can apply the overconvergent method
directly to this form without `p`-stabilizing::
sage: H = X.harmonic_cocycles(2,prec=5)
sage: h = H.gen(0)
sage: A = X.padic_automorphic_forms(2,prec=5,overconvergent=True)
sage: f0 = A.lift(h)
Now that we've lifted our harmonic cocycle to an
overconvergent automorphic form, we extract the associated
modular form as a function and test the modular property::
sage: T.<x> = Qq(49,prec=10)
sage: f = f0.modular_form()
sage: g = X.get_embedding_matrix()*X.get_units_of_order()[1]
sage: a,b,c,d = g.change_ring(T).list()
sage: (c*x +d)^2*f(x)-f((a*x + b)/(c*x + d))
O(7^5)
We can also compute the Shimura-Maass derivative, which is a
nearly rigid analytic modular forms of weight 4::
sage: f = f0.derivative()
sage: (c*x + d)^4*f(x)-f((a*x + b)/(c*x + d))
O(7^5)
"""
def F(z, level=level, method=method):
R = PolynomialRing(z.parent(), 'x,y').fraction_field()
Rx = PolynomialRing(z.parent(), 'x1').fraction_field()
x1 = Rx.gen()
subst = R.hom([x1, z], codomain=Rx)
x, y = R.gens()
center = self.parent()._source._BT.find_containing_affinoid(z)
zbar = z.trace() - z
f = R(1) / (x - y)
k = self.parent()._n + 2
V = [f]
for ii in range(order):
V = [v.derivative(y) for v in V] + [k / (y - zbar) * v
for v in V]
k += 2
return sum(self.integrate(subst(v), center, level, method)
for v in V)
if z is None:
return F
return F(z, level, method)
# So far we cannot break it into two integrals because of the pole
# at infinity.
def coleman(self, t1, t2, E=None, method='moments', mult=False,
delta=-1):
r"""
If ``self`` is a `p`-adic automorphic form that
corresponds to a rigid modular form, then this computes the
Coleman integral of this form between two points on the
boundary `P^1(\QQ_p)` of the `p`-adic upper half plane.
INPUT:
- ``t1``, ``t2`` - elements of `P^1(\QQ_p)` (the endpoints
of integration)
- ``E`` - (default: None). If specified, will not compute the
covering adapted to ``t1`` and ``t2`` and instead use the
given one. In that case, ``E`` should be a list of matrices
corresponding to edges describing the open balls to be
considered.
- ``method`` - string (default: 'moments'). Tells which
algorithm to use (alternative is 'riemann_sum', which is
unsuitable for computations requiring high precision)
- ``mult`` - boolean (default: False). Whether to compute the
multiplicative version.
OUTPUT:
The result of the Coleman integral
EXAMPLES::
sage: p = 7
sage: lev = 2
sage: prec = 10
sage: X = BruhatTitsQuotient(p,lev, use_magma = True) # optional - magma
sage: k = 2 # optional - magma
sage: M = X.harmonic_cocycles(k,prec) # optional - magma
sage: B = M.basis() # optional - magma
sage: f = 3*B[0] # optional - magma
sage: MM = X.padic_automorphic_forms(k,prec,overconvergent = True) # optional - magma
sage: D = -11 # optional - magma
sage: X.is_admissible(D) # optional - magma
True
sage: K.<a> = QuadraticField(D) # optional - magma
sage: Kp.<g> = Qq(p**2,prec) # optional - magma
sage: P = Kp.gen() # optional - magma
sage: Q = 2+Kp.gen()+ p*(Kp.gen() +1) # optional - magma
sage: F = MM.lift(f) # long time, optional - magma
sage: J0 = F.coleman(P,Q,mult = True) # long time, optional - magma
AUTHORS:
- Cameron Franc (2012-02-20)
- Marc Masdeu (2012-02-20)
"""
p = self.parent().prime()
K = t1.parent()
R = PolynomialRing(K, 'x')
x = R.gen()
R1 = LaurentSeriesRing(K, 'r1', default_prec=self.parent()._U.base_ring().precision_cap())
r1 = R1.gen()
if E is None:
E = self.parent()._source._BT.find_covering(t1, t2)
# print('Got ', len(E), ' open balls.')
value = 0
ii = 0
value_exp = K(1)
if method == 'riemann_sum':
for e in E:
ii += 1
b = e[0, 1]
d = e[1, 1]
y = (b - d * t1) / (b - d * t2)
poly = R1(y.log()) # R1(our_log(y))
c_e = self.evaluate(e)
new = eval_dist_at_powseries(c_e, poly)
value += new
if mult:
value_exp *= K.teichmuller(y) ** Integer(c_e.moment(0).rational_reconstruction())
elif method == 'moments':
for e in E:
ii += 1
f = (x - t1) / (x - t2)
a, b, c, d = e.list()
y0 = f(R1([b, a]) / R1([d, c])) # f( (ax+b)/(cx+d) )
y0 = p ** (-y0(ZZ(0)).valuation()) * y0
mu = K.teichmuller(y0(ZZ(0)))
y = y0 / mu - 1
poly = R1(0)
ypow = y
for jj in range(1, R1.default_prec() + 10):
poly += (-1) ** (jj + 1) * ypow / jj
ypow *= y
c_e = self.evaluate(e)
new = eval_dist_at_powseries(c_e, poly)
if hasattr(new, 'degree'):
assert 0
value += new
if mult:
value_exp *= K.teichmuller(((b - d * t1) / (b - d * t2))) ** Integer(c_e.moment(0).rational_reconstruction())
else:
print('The available methods are either "moments" or "riemann_sum". The latter is only provided for consistency check, and should not be used in practice.')
return False
if mult:
return K.teichmuller(value_exp) * value.exp()
return value
class pAdicAutomorphicForms(Module, UniqueRepresentation):
Element = pAdicAutomorphicFormElement
@staticmethod
def __classcall__(cls, domain, U, prec=None, t=None, R=None,
overconvergent=False):
r"""
The module of (quaternionic) `p`-adic automorphic forms.
INPUT:
- ``domain`` - A BruhatTitsQuotient.
- ``U`` -- A distributions module or an integer. If ``U`` is a
distributions module then this creates the relevant space of
automorphic forms. If ``U`` is an integer then the coefficients
are the (`U-2`)nd power of the symmetric representation of
`GL_2(\QQ_p)`.
- ``prec`` -- A precision (default : None). If not None should
be a positive integer.
- ``t`` -- (default : None). The number of additional moments to store. If None, determine
it automatically from ``prec``, ``U`` and the ``overconvergent`` flag.
- ``R`` -- (default : None). If specified, coefficient field of the automorphic forms.
If not specified it defaults to the base ring of the distributions ``U``, or to `Q_p`
with the working precision ``prec``.
- ``overconvergent`` -- Boolean (default = False). If True, will construct overconvergent
`p`-adic automorphic forms. Otherwise it constructs the finite dimensional space of
`p`-adic automorphic forms which is isomorphic to the space of harmonic cocycles.
EXAMPLES:
The space of weight 2 p-automorphic forms is isomorphic with
the space of scalar valued invariant harmonic cocycles::
sage: X = BruhatTitsQuotient(11,5)
sage: H0 = X.padic_automorphic_forms(2,10)
sage: H1 = X.padic_automorphic_forms(2,prec = 10)
sage: H0 == H1
True
AUTHORS:
- Cameron Franc (2012-02-20)
- Marc Masdeu (2012-02-20)
"""
return super(pAdicAutomorphicForms, cls).__classcall__(cls, domain, U,
prec, t, R,
overconvergent)
def __init__(self, domain, U, prec=None, t=None, R=None,
overconvergent=False):
"""
Create a space of `p`-automorphic forms
EXAMPLES::
sage: X = BruhatTitsQuotient(11,5)
sage: H = X.harmonic_cocycles(2,prec=10)
sage: A = X.padic_automorphic_forms(2,prec=10)
sage: TestSuite(A).run()
"""
if R is None:
if not isinstance(U, Integer):
self._R = U.base_ring()
else:
if prec is None:
prec = 100
self._R = Qp(domain._p, prec)
else:
self._R = R
#U is a CoefficientModuleSpace
if isinstance(U, Integer):
if t is None:
if overconvergent:
t = prec - U + 1
else:
t = 0
if overconvergent:
self._U = OverconvergentDistributions(U - 2, base=self._R,
prec_cap=U - 1 + t,
act_on_left=True,
adjuster=_btquot_adjuster(),
dettwist=-ZZ((U - 2) / 2),
act_padic=True)
else:
self._U = Symk(U - 2, base=self._R, act_on_left=True,
adjuster=_btquot_adjuster(),
dettwist=-ZZ((U - 2) / 2),
act_padic=True)
else:
self._U = U
self._source = domain
self._list = self._source.get_list() # Contains also the opposite edges
self._prec = self._R.precision_cap()
self._n = self._U.weight()
self._p = self._source._p
self._Sigma0 = self._U._act._Sigma0
Module.__init__(self, base=self._R)
self._populate_coercion_lists_()
def prime(self):
"""
Return the underlying prime.
OUTPUT:
- ``p`` - a prime integer
EXAMPLES::
sage: X = BruhatTitsQuotient(11,5)
sage: H = X.harmonic_cocycles(2,prec = 10)
sage: A = X.padic_automorphic_forms(2,prec = 10)
sage: A.prime()
11
"""
return self._p
def zero_element(self):
r"""
Return the zero element of ``self``.
EXAMPLES::
sage: X = BruhatTitsQuotient(5, 7)
sage: H1 = X.padic_automorphic_forms( 2, prec=10)
sage: H1.zero_element() == 0
True
"""
return self.element_class(self, [self._U(0) for o in self._list])
def __eq__(self, other):
r"""
Test whether two pAdicAutomorphicForm spaces are equal.
INPUT:
- ``other`` -- another space of `p`-automorphic forms.
OUTPUT:
A boolean value
EXAMPLES::
sage: X = BruhatTitsQuotient(5,7)
sage: H1 = X.padic_automorphic_forms(2,prec = 10)
sage: H2 = X.padic_automorphic_forms(2,prec = 10)
sage: H1 == H2
True
"""
if not isinstance(other, pAdicAutomorphicForms):
return False
return (self.base_ring() == other.base_ring() and
self._source == other._source and
self._U == other._U)
def __ne__(self, other):
r"""
Test whether two pAdicAutomorphicForm spaces are not equal.
INPUT:
- ``other`` -- another space of `p`-automorphic forms.
OUTPUT:
A boolean value
EXAMPLES::
sage: X = BruhatTitsQuotient(5,7)
sage: H1 = X.padic_automorphic_forms(2,prec = 10)
sage: H2 = X.padic_automorphic_forms(2,prec = 10)
sage: H1 == H2
True
"""
return not self.__eq__(other)
def _repr_(self):
r"""
Return the representation of self as a string.
EXAMPLES::
sage: X = BruhatTitsQuotient(3,7)
sage: A = X.padic_automorphic_forms(2,prec = 10)
sage: A # indirect doctest
Space of automorphic forms on Quotient of the Bruhat Tits tree of GL_2(QQ_3) with discriminant 7 and level 1 with values in Sym^0 Q_3^2
"""
s = 'Space of automorphic forms on '
s += str(self._source)
s += ' with values in ' + str(self._U)
return s
def _coerce_map_from_(self, S):
r"""
Can coerce from other BruhatTitsHarmonicCocycles or from pAdicAutomorphicForms
INPUT:
- ``S`` - a BruhatTitsHarmonicCocycle or pAdicAutomorphicForm
OUTPUT:
A boolean value. True if adn only if ``S`` is coercible into self.
EXAMPLES::
sage: X = BruhatTitsQuotient(3,7)
sage: H = X.harmonic_cocycles(2,prec=10)
sage: A = X.padic_automorphic_forms(2,prec=10)
sage: A._coerce_map_from_(H)
True
"""
if isinstance(S, BruhatTitsHarmonicCocycles):
if S.weight() - 2 != self._n:
return False
if S._X != self._source:
return False
return True
if isinstance(S, pAdicAutomorphicForms):
if S._n != self._n:
return False
if S._source != self._source:
return False
return True
return False
def _element_constructor_(self, data):
r"""
Construct a `p`-automorphic form.
INPUT:
- ``data`` - defining data. Can be either a harmonic cocycle, or a `p`-adic automorphic form,
or a list of elements coercible into the module of coefficients of ``self``.
OUTPUT:
A `p`-adic automorphic form.
EXAMPLES::
sage: X = BruhatTitsQuotient(13,5)
sage: H = X.harmonic_cocycles(2,prec=10)
sage: h=H.an_element() # indirect doctest
sage: A = X.padic_automorphic_forms(2,prec=10)
sage: A(h)
p-adic automorphic form of cohomological weight 0
"""
# Code how to coerce x into the space
# Admissible values of x?
if type(data) is list:
return self.element_class(self, [self._U(o, normalize=False) for o in data])
if isinstance(data, pAdicAutomorphicFormElement):
vals = [self._U(o, normalize=False) for o in data._value]
return self.element_class(self, vals)
if isinstance(data, BruhatTitsHarmonicCocycleElement):
E = self._list
tmp = []
F = []
Uold = data.parent()._U
for ii in range(len(data._F)):
newtmp = data.parent()._Sigma0(E[ii].rep.inverse(), check=False) * Uold(data._F[ii],normalize=False)
tmp.append(newtmp)
F.append(newtmp)
A = data.parent()._Sigma0(Matrix(QQ,2,2,[0,1/self.prime(),1,0]),check=False)
for ii in range(len(data._F)):
F.append(-(A * tmp[ii]))
vals = self._make_invariant([self._U(o,normalize=False) for o in F])
return self.element_class(self, vals)
if data == 0:
return self.zero_element()
def _an_element_(self):
r"""
Return an element of the module.
OUTPUT:
A harmonic cocycle.
EXAMPLES::
sage: X = BruhatTitsQuotient(13,5)
sage: A = X.padic_automorphic_forms(2,prec=10)
sage: A.an_element() # indirect doctest
p-adic automorphic form of cohomological weight 0
"""
return self(0)
def precision_cap(self):
"""
Return the precision of self.
OUTPUT:
An integer.
EXAMPLES::
sage: X = BruhatTitsQuotient(13,11)
sage: A = X.padic_automorphic_forms(2,prec=10)
sage: A.precision_cap()
10
"""
return self._prec
def lift(self, f):
r"""
Lift the harmonic cocycle ``f`` to a p-automorphic form.
If one is using overconvergent coefficients, then this will
compute all of the moments of the measure associated to ``f``.
INPUT:
- ``f`` - a harmonic cocycle
OUTPUT:
A `p`-adic automorphic form
EXAMPLES:
If one does not work with an overconvergent form then lift
does nothing::
sage: X = BruhatTitsQuotient(13,5)
sage: H = X.harmonic_cocycles(2,prec=10)
sage: h = H.gen(0)
sage: A = X.padic_automorphic_forms(2,prec=10)
sage: A.lift(h) # long time
p-adic automorphic form of cohomological weight 0
With overconvergent forms, the input is lifted naively and its
moments are computed::
sage: X = BruhatTitsQuotient(13,11)
sage: H = X.harmonic_cocycles(2,prec=5)
sage: A2 = X.padic_automorphic_forms(2,prec=5,overconvergent=True)
sage: a = H.gen(0)
sage: A2.lift(a) # long time
p-adic automorphic form of cohomological weight 0
"""
return self(f)._improve(f)
def _make_invariant(self, F):
r"""
Naively lift a ``classical`` automorphic form to an
overconvergent form.
INPUT:
- ``F`` - a classical (nonoverconvergent) pAdicAutomorphicForm or
BruhatTitsHarmonicCocycle.
OUTPUT:
An overconvergent pAdicAutomorphicForm
EXAMPLES::
sage: X = BruhatTitsQuotient(13,11)
sage: H = X.harmonic_cocycles(2,prec = 5)
sage: A = X.padic_automorphic_forms(2,prec = 5)
sage: h = H.basis()[0]
sage: A.lift(h) # indirect doctest long time
p-adic automorphic form of cohomological weight 0
"""
S = self._source.get_stabilizers()
M = [e.rep for e in self._list]
newF = []
for ii in range(len(S)):
Si = S[ii]
x = self._U(F[ii], normalize=False)
if any(v[2] for v in Si):
newFi = self._U(0)
s = QQ(0)
m = M[ii]
for v in Si:
s += 1
g = self._Sigma0(m.adjoint() * self._source.embed_quaternion(v[0], prec=self._prec).adjoint() * m,check = False)
newFi += g * x
newF.append((QQ(1) / s) * newFi)
else:
newF.append(self._U(x,normalize=False))
return newF
def _apply_Up_operator(self, f, scale=False, original_moments=None):
r"""
Apply the Up operator to ``f``.
INPUT:
- f -- a `p`-adic automorphic form.
- scale -- (default: True) whether to scale by the appropriate power of `p`
at each iteration.
EXAMPLES::
sage: X = BruhatTitsQuotient(3,11)
sage: M = X.harmonic_cocycles(4,10)
sage: A = X.padic_automorphic_forms(4,10, overconvergent = True)
sage: F = A.lift(M.basis()[0]); F # indirect doctest
p-adic automorphic form of cohomological weight 2
"""
HeckeData = self._source._get_Up_data()
S0 = f._value[0].parent()._act._Sigma0
prec_cap = self._U.base_ring().precision_cap()
if not scale:
factor = self._p ** (self._U.weight() // 2)
else:
factor = 1
# Save original moments
if original_moments is None:
original_moments = [[fval._moments[ii] for ii in range(self._n + 1)]
for fval in f._value]
Tf = []
for jj in range(len(self._list)):
tmp = self._U(0,normalize=False)
for gg, edge_list in HeckeData:
u = edge_list[jj]
tprec = 2 * (prec_cap + u.power) + 1
r = S0(self._p ** -u.power * (u.t(tprec) * gg).adjoint(),check=False)
tmp += r * f._value[u.label]
tmp *= factor
for ii in range(self._n + 1):
tmp._moments[ii] = original_moments[jj][ii]
Tf.append(tmp)
return self(Tf)
|
py | 1a318cc1218bff537b590adfb7bcf41f02a0a9de | # coding: utf-8
#
# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file
# except in compliance with the License. A copy of the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for
# the specific language governing permissions and limitations under the License.
#
import pprint
import re # noqa: F401
import six
import typing
from enum import Enum
from ask_sdk_model.interfaces.connections.requests.base_request import BaseRequest
if typing.TYPE_CHECKING:
from typing import Dict, List, Optional
from datetime import datetime
from ask_sdk_model.interfaces.connections.entities.restaurant import Restaurant
class ScheduleFoodEstablishmentReservationRequest(BaseRequest):
"""
ScheduleFoodEstablishmentReservationRequest for booking restaurant reservation
:param version: version of the request
:type version: (optional) str
:param start_time: start time of the reservation
:type start_time: (optional) str
:param party_size: party size
:type party_size: (optional) str
:param restaurant: restaurant
:type restaurant: (optional) ask_sdk_model.interfaces.connections.entities.restaurant.Restaurant
"""
deserialized_types = {
'object_type': 'str',
'version': 'str',
'start_time': 'str',
'party_size': 'str',
'restaurant': 'ask_sdk_model.interfaces.connections.entities.restaurant.Restaurant'
} # type: Dict
attribute_map = {
'object_type': '@type',
'version': '@version',
'start_time': 'startTime',
'party_size': 'partySize',
'restaurant': 'restaurant'
} # type: Dict
def __init__(self, version=None, start_time=None, party_size=None, restaurant=None):
# type: (Optional[str], Optional[str], Optional[str], Optional[Restaurant]) -> None
"""ScheduleFoodEstablishmentReservationRequest for booking restaurant reservation
:param version: version of the request
:type version: (optional) str
:param start_time: start time of the reservation
:type start_time: (optional) str
:param party_size: party size
:type party_size: (optional) str
:param restaurant: restaurant
:type restaurant: (optional) ask_sdk_model.interfaces.connections.entities.restaurant.Restaurant
"""
self.__discriminator_value = "ScheduleFoodEstablishmentReservationRequest" # type: str
self.object_type = self.__discriminator_value
super(ScheduleFoodEstablishmentReservationRequest, self).__init__(object_type=self.__discriminator_value, version=version)
self.start_time = start_time
self.party_size = party_size
self.restaurant = restaurant
def to_dict(self):
# type: () -> Dict[str, object]
"""Returns the model properties as a dict"""
result = {} # type: Dict
for attr, _ in six.iteritems(self.deserialized_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else
x.value if isinstance(x, Enum) else x,
value
))
elif isinstance(value, Enum):
result[attr] = value.value
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else
(item[0], item[1].value)
if isinstance(item[1], Enum) else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
# type: () -> str
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
# type: () -> str
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
# type: (object) -> bool
"""Returns true if both objects are equal"""
if not isinstance(other, ScheduleFoodEstablishmentReservationRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
# type: (object) -> bool
"""Returns true if both objects are not equal"""
return not self == other
|
py | 1a318cffcaa9c95920f766a683b2d49c2a007457 | #!/usr/bin/env python
"""
Copyright (c) 2017, Lars Niklasson
Copyright (c) 2017, Filip Slottner Seholm
Copyright (c) 2017, Fanny Sandblom
Copyright (c) 2017, Kevin Hoogendijk
Copyright (c) 2017, Nils Andren
Copyright (c) 2017, Alicia Gil Martin
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the Chalmers University of Technology nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import rospy
from ackermann_msgs.msg import AckermannDrive
#from truck_hw_api import interpolate
from sensor_msgs.msg import Joy
from std_msgs.msg import Bool
from math import *
from converter import *
from controls import *
# Subscribes to joy messages and publish appropriate steering and speed commands,
# Based on a control scheme. Also handles safety buttons and a switch for automatic driving
class GamepadNode:
def __init__(self):
self.no_dms_count = 0
print "sleeping for 1 sec"
rospy.sleep(1)
min_angle = rospy.get_param('min_angle', -21)
max_angle = rospy.get_param('max_angle', 16)
min_speed = rospy.get_param('min_speed', -1)
max_speed = rospy.get_param('max_speed', 1.4)
gamepad_rate = rospy.get_param('gamepad/rate', 50)
self.converter = Converter(gamepad_rate, min_angle, max_angle, min_speed, max_speed)
self.gamepad = rospy.get_param('gamepad/type', DEFAULT_GAMEPAD).lower()
if not self.gamepad in gamepads.keys():
self.gamepad = DEFAULT_GAMEPAD
self.manualDrivePublisher = rospy.Publisher('man_drive', AckermannDrive, queue_size=10)
self.autoCtrlPublisher = rospy.Publisher('auto_ctrl', Bool, queue_size=10)
self.dmsPublisher = rospy.Publisher('dead_mans_switch', Bool, queue_size=10)
self.journeyStartPublisher = rospy.Publisher('start_journey', Bool, queue_size=10)
rospy.init_node('gamepad', anonymous=False)
rospy.Subscriber('joy', Joy, self.callback)
rospy.loginfo("init done, subscribed to /joy and publishes to several topics")
def callback(self,data):
#dict with key = button, value = input
try:
#raises gamepad map format error
buttons = getButtons(data, self.gamepad)
#convert button input to driving commands, etc
commands = self.converter.getDriveCommands(buttons)
js_ret = commands['journey_start']
newSpeed = commands['speed']
newAngle = commands['angle']
deadMansSwitch = commands['dms']
autoCtrl = commands['auto_mode']
if js_ret:
js_msg = Bool()
js_msg.data = True
self.journeyStartPublisher.publish(js_msg)
dms = Bool()
dms.data = deadMansSwitch
self.dmsPublisher.publish(dms)
ac = Bool()
ac.data = autoCtrl
self.autoCtrlPublisher.publish(ac)
#only publish if needed
if deadMansSwitch and (not autoCtrl):
ack = AckermannDrive()
ack.steering_angle = newAngle
ack.speed = newSpeed
self.manualDrivePublisher.publish(ack)
except GamepadMapFormatError:
rospy.logfatal("%s, shutting down", GamepadMapFormatError.message)
exit(0)
if __name__ == '__main__':
j = GamepadNode()
rospy.spin()
|
py | 1a318edbe9a091de9e97e94324768401d9cc60bc | # -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2019.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
# pylint: disable=invalid-name
"""Matplotlib classes for pulse visualization."""
import collections
import numpy as np
try:
from matplotlib import pyplot as plt, gridspec
HAS_MATPLOTLIB = True
except ImportError:
HAS_MATPLOTLIB = False
from qiskit.visualization.pulse.qcstyle import PulseStyle, SchedStyle
from qiskit.visualization.pulse import interpolation
from qiskit.pulse.channels import (DriveChannel, ControlChannel,
MeasureChannel, AcquireChannel,
SnapshotChannel)
from qiskit.pulse import (SamplePulse, FrameChange, PersistentValue, Snapshot,
Acquire, PulseError)
class EventsOutputChannels:
"""Pulse dataset for channel."""
def __init__(self, t0, tf):
"""Create new channel dataset.
Args:
t0 (int): starting time of plot
tf (int): ending time of plot
"""
self.pulses = {}
self.t0 = t0
self.tf = tf
self._waveform = None
self._framechanges = None
self._conditionals = None
self._snapshots = None
self._labels = None
self.enable = False
def add_instruction(self, start_time, pulse):
"""Add new pulse instruction to channel.
Args:
start_time (int): Starting time of instruction
pulse (Instruction): Instruction object to be added
"""
if start_time in self.pulses.keys():
self.pulses[start_time].append(pulse.command)
else:
self.pulses[start_time] = [pulse.command]
@property
def waveform(self):
"""Get waveform."""
if self._waveform is None:
self._build_waveform()
return self._waveform[self.t0:self.tf]
@property
def framechanges(self):
"""Get frame changes."""
if self._framechanges is None:
self._build_waveform()
return self._trim(self._framechanges)
@property
def conditionals(self):
"""Get conditionals."""
if self._conditionals is None:
self._build_waveform()
return self._trim(self._conditionals)
@property
def snapshots(self):
"""Get snapshots."""
if self._snapshots is None:
self._build_waveform()
return self._trim(self._snapshots)
@property
def labels(self):
"""Get labels."""
if self._labels is None:
self._build_waveform()
return self._trim(self._labels)
def is_empty(self):
"""Return if pulse is empty.
Returns:
bool: if the channel has nothing to plot
"""
if any(self.waveform) or self.framechanges or self.conditionals or self.snapshots:
return False
return True
def to_table(self, name):
"""Get table contains.
Args:
name (str): name of channel
Returns:
dict: dictionary of events in the channel
"""
time_event = []
framechanges = self.framechanges
conditionals = self.conditionals
snapshots = self.snapshots
for key, val in framechanges.items():
data_str = 'framechange: %.2f' % val
time_event.append((key, name, data_str))
for key, val in conditionals.items():
data_str = 'conditional, %s' % val
time_event.append((key, name, data_str))
for key, val in snapshots.items():
data_str = 'snapshot: %s' % val
time_event.append((key, name, data_str))
return time_event
def _build_waveform(self):
"""Create waveform from stored pulses.
"""
self._framechanges = {}
self._conditionals = {}
self._snapshots = {}
self._labels = {}
fc = 0
pv = np.zeros(self.tf + 1, dtype=np.complex128)
wf = np.zeros(self.tf + 1, dtype=np.complex128)
last_pv = None
for time, commands in sorted(self.pulses.items()):
if time > self.tf:
break
tmp_fc = 0
for command in commands:
if isinstance(command, FrameChange):
tmp_fc += command.phase
pv[time:] = 0
elif isinstance(command, Snapshot):
self._snapshots[time] = command.name
if tmp_fc != 0:
self._framechanges[time] = tmp_fc
fc += tmp_fc
for command in commands:
if isinstance(command, PersistentValue):
pv[time:] = np.exp(1j*fc) * command.value
last_pv = (time, command)
break
for command in commands:
duration = command.duration
tf = min(time + duration, self.tf)
if isinstance(command, SamplePulse):
wf[time:tf] = np.exp(1j*fc) * command.samples[:tf-time]
pv[time:] = 0
self._labels[time] = (tf, command)
if last_pv is not None:
pv_cmd = last_pv[1]
self._labels[last_pv[0]] = (time, pv_cmd)
last_pv = None
elif isinstance(command, Acquire):
wf[time:tf] = np.ones(tf - time)
self._labels[time] = (tf, command)
self._waveform = wf + pv
def _trim(self, events):
"""Return events during given `time_range`.
Args:
events (dict): time and operation of events
Returns:
dict: dictionary of events within the time
"""
events_in_time_range = {}
for k, v in events.items():
if self.t0 <= k <= self.tf:
events_in_time_range[k] = v
return events_in_time_range
class SamplePulseDrawer:
"""A class to create figure for sample pulse."""
def __init__(self, style):
"""Create new figure.
Args:
style (PulseStyle): style sheet
"""
self.style = style or PulseStyle()
def draw(self, pulse, dt, interp_method, scaling=1):
"""Draw figure.
Args:
pulse (SamplePulse): SamplePulse to draw
dt (float): time interval
interp_method (Callable): interpolation function
See `qiskit.visualization.interpolation` for more information
scaling (float): Relative visual scaling of waveform amplitudes
Returns:
matplotlib.figure: A matplotlib figure object of the pulse envelope
"""
figure = plt.figure()
interp_method = interp_method or interpolation.step_wise
figure.set_size_inches(self.style.figsize[0], self.style.figsize[1])
ax = figure.add_subplot(111)
ax.set_facecolor(self.style.bg_color)
samples = pulse.samples
time = np.arange(0, len(samples) + 1, dtype=float) * dt
time, re, im = interp_method(time, samples, self.style.num_points)
# plot
ax.fill_between(x=time, y1=re, y2=np.zeros_like(time),
facecolor=self.style.wave_color[0], alpha=0.3,
edgecolor=self.style.wave_color[0], linewidth=1.5,
label='real part')
ax.fill_between(x=time, y1=im, y2=np.zeros_like(time),
facecolor=self.style.wave_color[1], alpha=0.3,
edgecolor=self.style.wave_color[1], linewidth=1.5,
label='imaginary part')
ax.set_xlim(0, pulse.duration * dt)
if scaling:
ax.set_ylim(-scaling, scaling)
else:
v_max = max(max(np.abs(re)), max(np.abs(im)))
ax.set_ylim(-1.2 * v_max, 1.2 * v_max)
return figure
class ScheduleDrawer:
"""A class to create figure for schedule and channel."""
def __init__(self, style):
"""Create new figure.
Args:
style (SchedStyle): style sheet
"""
self.style = style or SchedStyle()
def _build_channels(self, schedule, channels_to_plot, t0, tf):
# prepare waveform channels
drive_channels = collections.OrderedDict()
measure_channels = collections.OrderedDict()
control_channels = collections.OrderedDict()
acquire_channels = collections.OrderedDict()
snapshot_channels = collections.OrderedDict()
_channels = list(schedule.channels) + channels_to_plot
_channels = list(set(_channels))
for chan in _channels:
if isinstance(chan, DriveChannel):
try:
drive_channels[chan] = EventsOutputChannels(t0, tf)
except PulseError:
pass
elif isinstance(chan, MeasureChannel):
try:
measure_channels[chan] = EventsOutputChannels(t0, tf)
except PulseError:
pass
elif isinstance(chan, ControlChannel):
try:
control_channels[chan] = EventsOutputChannels(t0, tf)
except PulseError:
pass
elif isinstance(chan, AcquireChannel):
try:
acquire_channels[chan] = EventsOutputChannels(t0, tf)
except PulseError:
pass
elif isinstance(chan, SnapshotChannel):
try:
snapshot_channels[chan] = EventsOutputChannels(t0, tf)
except PulseError:
pass
output_channels = {**drive_channels, **measure_channels,
**control_channels, **acquire_channels}
channels = {**output_channels, **acquire_channels, **snapshot_channels}
# sort by index then name to group qubits together.
output_channels = collections.OrderedDict(sorted(output_channels.items(),
key=lambda x: (x[0].index, x[0].name)))
channels = collections.OrderedDict(sorted(channels.items(),
key=lambda x: (x[0].index, x[0].name)))
for start_time, instruction in schedule.instructions:
for channel in instruction.channels:
if channel in output_channels:
output_channels[channel].add_instruction(start_time, instruction)
elif channel in snapshot_channels:
snapshot_channels[channel].add_instruction(start_time, instruction)
return channels, output_channels, snapshot_channels
def _count_valid_waveforms(self, channels, scaling=1, channels_to_plot=None,
plot_all=False):
# count numbers of valid waveform
n_valid_waveform = 0
v_max = 0
for channel, events in channels.items():
if channels_to_plot:
if channel in channels_to_plot:
waveform = events.waveform
v_max = max(v_max,
max(np.abs(np.real(waveform))),
max(np.abs(np.imag(waveform))))
n_valid_waveform += 1
events.enable = True
else:
if not events.is_empty() or plot_all:
waveform = events.waveform
v_max = max(v_max,
max(np.abs(np.real(waveform))),
max(np.abs(np.imag(waveform))))
n_valid_waveform += 1
events.enable = True
# when input schedule is empty or comprises only frame changes,
# we need to overwrite maximum amplitude by a value greater than zero,
# otherwise auto axis scaling will fail with zero division.
v_max = v_max or 1
if scaling:
v_max = 0.5 * scaling
else:
v_max = 0.5 / (1.2 * v_max)
return n_valid_waveform, v_max
# pylint: disable=unused-argument
def _draw_table(self, figure, channels, dt, n_valid_waveform):
# create table
table_data = []
if self.style.use_table:
for channel, events in channels.items():
if events.enable:
table_data.extend(events.to_table(channel.name))
table_data = sorted(table_data, key=lambda x: x[0])
# plot table
if table_data:
# table area size
ncols = self.style.table_columns
nrows = int(np.ceil(len(table_data)/ncols))
# fig size
h_table = nrows * self.style.fig_unit_h_table
h_waves = (self.style.figsize[1] - h_table)
# create subplots
gs = gridspec.GridSpec(2, 1, height_ratios=[h_table, h_waves], hspace=0)
tb = plt.subplot(gs[0])
ax = plt.subplot(gs[1])
# configure each cell
tb.axis('off')
cell_value = [['' for _kk in range(ncols * 3)] for _jj in range(nrows)]
cell_color = [self.style.table_color * ncols for _jj in range(nrows)]
cell_width = [*([0.2, 0.2, 0.5] * ncols)]
for ii, data in enumerate(table_data):
# pylint: disable=unbalanced-tuple-unpacking
r, c = np.unravel_index(ii, (nrows, ncols), order='f')
# pylint: enable=unbalanced-tuple-unpacking
time, ch_name, data_str = data
# item
cell_value[r][3 * c + 0] = 't = %s' % time * dt
cell_value[r][3 * c + 1] = 'ch %s' % ch_name
cell_value[r][3 * c + 2] = data_str
table = tb.table(cellText=cell_value,
cellLoc='left',
rowLoc='center',
colWidths=cell_width,
bbox=[0, 0, 1, 1],
cellColours=cell_color)
table.auto_set_font_size(False)
table.set_fontsize = self.style.table_font_size
else:
ax = figure.add_subplot(111)
figure.set_size_inches(self.style.figsize[0], self.style.figsize[1])
return ax
def _draw_snapshots(self, ax, snapshot_channels, dt, y0):
for events in snapshot_channels.values():
snapshots = events.snapshots
if snapshots:
for time in snapshots:
ax.annotate(s=u"\u25D8", xy=(time*dt, y0), xytext=(time*dt, y0+0.08),
arrowprops={'arrowstyle': 'wedge'}, ha='center')
def _draw_framechanges(self, ax, fcs, dt, y0):
framechanges_present = True
for time in fcs.keys():
ax.text(x=time*dt, y=y0, s=r'$\circlearrowleft$',
fontsize=self.style.icon_font_size,
ha='center', va='center')
return framechanges_present
def _get_channel_color(self, channel):
# choose color
if isinstance(channel, DriveChannel):
color = self.style.d_ch_color
elif isinstance(channel, ControlChannel):
color = self.style.u_ch_color
elif isinstance(channel, MeasureChannel):
color = self.style.m_ch_color
elif isinstance(channel, AcquireChannel):
color = self.style.a_ch_color
else:
color = 'black'
return color
def _prev_label_at_time(self, prev_labels, time):
for _, labels in enumerate(prev_labels):
for t0, (tf, _) in labels.items():
if time in (t0, tf):
return True
return False
def _draw_labels(self, ax, labels, prev_labels, dt, y0):
for t0, (tf, cmd) in labels.items():
if isinstance(cmd, PersistentValue):
name = cmd.name if cmd.name else 'pv'
elif isinstance(cmd, Acquire):
name = cmd.name if cmd.name else 'acquire'
else:
name = cmd.name
ax.annotate(r'%s' % name,
xy=((t0+tf)//2*dt, y0),
xytext=((t0+tf)//2*dt, y0-0.07),
fontsize=self.style.label_font_size,
ha='center', va='center')
linestyle = self.style.label_ch_linestyle
alpha = self.style.label_ch_alpha
color = self.style.label_ch_color
if not self._prev_label_at_time(prev_labels, t0):
ax.axvline(t0*dt, -1, 1, color=color,
linestyle=linestyle, alpha=alpha)
if not (self._prev_label_at_time(prev_labels, tf) or tf in labels):
ax.axvline(tf*dt, -1, 1, color=color,
linestyle=linestyle, alpha=alpha)
def _draw_channels(self, ax, output_channels, interp_method, t0, tf, dt, v_max,
label=False, framechange=True):
y0 = 0
prev_labels = []
for channel, events in output_channels.items():
if events.enable:
# plot waveform
waveform = events.waveform
time = np.arange(t0, tf + 1, dtype=float) * dt
if waveform.any():
time, re, im = interp_method(time, waveform, self.style.num_points)
else:
# when input schedule is empty or comprises only frame changes,
# we should avoid interpolation due to lack of data points.
# instead, it just returns vector of zero.
re, im = np.zeros_like(time), np.zeros_like(time)
color = self._get_channel_color(channel)
# scaling and offset
re = v_max * re + y0
im = v_max * im + y0
offset = np.zeros_like(time) + y0
# plot
ax.fill_between(x=time, y1=re, y2=offset,
facecolor=color[0], alpha=0.3,
edgecolor=color[0], linewidth=1.5,
label='real part')
ax.fill_between(x=time, y1=im, y2=offset,
facecolor=color[1], alpha=0.3,
edgecolor=color[1], linewidth=1.5,
label='imaginary part')
ax.plot((t0, tf), (y0, y0), color='#000000', linewidth=1.0)
# plot frame changes
fcs = events.framechanges
if fcs and framechange:
self._draw_framechanges(ax, fcs, dt, y0)
# plot labels
labels = events.labels
if labels and label:
self._draw_labels(ax, labels, prev_labels, dt, y0)
prev_labels.append(labels)
else:
continue
# plot label
ax.text(x=0, y=y0, s=channel.name,
fontsize=self.style.axis_font_size,
ha='right', va='center')
y0 -= 1
return y0
def draw(self, schedule, dt, interp_method, plot_range,
scaling=1, channels_to_plot=None, plot_all=True,
table=True, label=False, framechange=True):
"""Draw figure.
Args:
schedule (ScheduleComponent): Schedule to draw
dt (float): time interval
interp_method (Callable): interpolation function
See `qiskit.visualization.interpolation` for more information
plot_range (tuple[float]): plot range
scaling (float): Relative visual scaling of waveform amplitudes
channels_to_plot (list[OutputChannel]): channels to draw
plot_all (bool): if plot all channels even it is empty
table (bool): Draw event table
label (bool): Label individual instructions
framechange (bool): Add framechange indicators
Returns:
matplotlib.figure: A matplotlib figure object for the pulse schedule
Raises:
VisualizationError: when schedule cannot be drawn
"""
figure = plt.figure()
if not channels_to_plot:
channels_to_plot = []
interp_method = interp_method or interpolation.step_wise
# setup plot range
if plot_range:
t0 = int(np.floor(plot_range[0]/dt))
tf = int(np.floor(plot_range[1]/dt))
else:
t0 = 0
# when input schedule is empty or comprises only frame changes,
# we need to overwrite pulse duration by an integer greater than zero,
# otherwise waveform returns empty array and matplotlib will be crashed.
tf = schedule.stop_time or 1
# prepare waveform channels
(channels, output_channels,
snapshot_channels) = self._build_channels(schedule, channels_to_plot, t0, tf)
# count numbers of valid waveform
n_valid_waveform, v_max = self._count_valid_waveforms(output_channels, scaling=scaling,
channels_to_plot=channels_to_plot,
plot_all=plot_all)
if table:
ax = self._draw_table(figure, channels, dt, n_valid_waveform)
else:
ax = figure.add_subplot(111)
figure.set_size_inches(self.style.figsize[0], self.style.figsize[1])
ax.set_facecolor(self.style.bg_color)
y0 = self._draw_channels(ax, output_channels, interp_method,
t0, tf, dt, v_max, label=label,
framechange=framechange)
self._draw_snapshots(ax, snapshot_channels, dt, y0)
ax.set_xlim(t0 * dt, tf * dt)
ax.set_ylim(y0, 1)
ax.set_yticklabels([])
return figure
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.