content
stringlengths 5
1.05M
|
---|
"""
key_krumhansl.py
Key analysis: Basic Krumhansl
"""
import music21
from leadreader.analyses.base import BaseAnalysis
class KeyKrumhansl(BaseAnalysis):
"""
http://web.mit.edu/music21/doc/moduleReference/moduleAnalysisDiscrete.html
"""
def name(self):
return 'key_krumhansl'
def description(self):
return ("Determine key using Krumhansl's algorithm and "
"Krumhansl-Shmuckler weighting")
def analyze(self):
""" Run the analysis. """
# using music21 for this for now, but I can see us switching away from
# it when we want to do things it won't let us do
score = music21.converter.parse(self.composition.path)
key = score.analyze('KrumhanslSchmuckler')
self.composition.key_krumhansl = {
'name': key.tonic.name,
'mode': key.mode,
'correlationCoefficient': key.correlationCoefficient
}
|
#!/usr/bin/env python
#-------------------------------------------------------
# Translates between lat/long and the slippy-map tile
# numbering scheme
#
# http://wiki.openstreetmap.org/index.php/Slippy_map_tilenames
#
# Written by Oliver White, 2007
# This file is public-domain
#-------------------------------------------------------
from math import *
from optparse import OptionParser
import json
def numTiles(z):
return(pow(2,z))
def sec(x):
return(1/cos(x))
def latlon2relativeXY(lat,lon):
x = (lon + 180) / 360
y = (1 - log(tan(radians(lat)) + sec(radians(lat))) / pi) / 2
return(x,y)
def latlon2xy(lat,lon,z):
n = numTiles(z)
x,y = latlon2relativeXY(lat,lon)
return(n*x, n*y)
def tileXY(lat, lon, z):
x,y = latlon2xy(lat,lon,z)
return(int(x),int(y))
def xy2latlon(x,y,z):
n = numTiles(z)
relY = y / n
lat = mercatorToLat(pi * (1 - 2 * relY))
lon = -180.0 + 360.0 * x / n
return(lat,lon)
def latEdges(y,z):
n = numTiles(z)
unit = 1 / n
relY1 = y * unit
relY2 = relY1 + unit
lat1 = mercatorToLat(pi * (1 - 2 * relY1))
lat2 = mercatorToLat(pi * (1 - 2 * relY2))
return(lat1,lat2)
def lonEdges(x,z):
n = numTiles(z)
unit = 360 / n
lon1 = -180 + x * unit
lon2 = lon1 + unit
return(lon1,lon2)
def tileEdges(x,y,z):
lat1,lat2 = latEdges(y,z)
lon1,lon2 = lonEdges(x,z)
return((lat2, lon1, lat1, lon2)) # S,W,N,E
def mercatorToLat(mercatorY):
return(degrees(atan(sinh(mercatorY))))
def tileSizePixels():
return(256)
def print_pyramid(lat, lon, flip_y=False):
print "calculating tiles for point %f,%f" % (lat, lon)
for z in range(0,21):
x,y = tileXY(lat, lon, z)
s,w,n,e = tileEdges(x,y,z)
if options.flip_y:
y = (2 ** z) - y - 1
print "%d/%d/%d --> %1.5f,%1.5f,%1.5f,%1.5f - %1.5f*%1.5f" % (z,x,y, w,s,e,n, abs(w-e), abs(n-s))
def print_bbox_pyramid(w, s, e, n, flip_y=False):
print "calculating tiles for bbox %f,%f,%f,%f" % (w, s, e, n)
for z in range(0,21):
x1, y1 = tileXY(s, w, z)
x2, y2 = tileXY(n, e, z)
if options.flip_y:
y1 = (2 ** z) - y1 - 1
y2 = (2 ** z) - y2 - 1
y1, y2 = y2, y1
print "z:%d x:%d-%d y:%d-%d %d tiles" % (z, x1, x2, y2, y1, (x2 - x1 + 1) * (y1 - y2 + 1))
if __name__ == "__main__":
usage = "usage: %prog "
parser = OptionParser(usage=usage,
description="")
parser.add_option("-l", "--latlon", action="store", dest="latlon")
parser.add_option("-t", "--tile", action="store", dest="tile")
parser.add_option("-b", "--bbox", action="store", dest="bbox")
parser.add_option("-y", "--flip-y", action="store_true", dest="flip_y", help="use TMS y origin, not OSM/google")
parser.add_option("-g", "--geojson", action="store_true", dest="geojson", help="Only output geojson")
(options, args) = parser.parse_args()
if options.latlon:
lat, lon = options.latlon.split(',')
print_pyramid(float(lat), float(lon), flip_y=options.flip_y)
elif options.bbox:
w, s, e, n = [float(f) for f in options.bbox.split(",")]
print_bbox_pyramid(w, s, e, n, flip_y=options.flip_y)
elif options.tile:
z, x, y = options.tile.split('/')
z = int(z)
x = int(x)
y = int(y)
if options.flip_y:
y = (2 ** z) - y - 1
s,w,n,e = tileEdges(x,y,z)
geojson_feature = {
"type": "Feature",
"properties": {},
"geometry": {
"type": "Polygon",
"coordinates": [[[w,n], [e,n], [e,s], [w,s], [w, n]]]
}
}
if options.geojson:
print json.dumps(geojson_feature)
else:
print "%d/%d/%d --> sw:%1.5f,%1.5f, ne:%1.5f,%1.5f" % (z,x,y, s, w, n, e)
print "BBOX: (%1.5f,%1.5f,%1.5f,%1.5f)" % (w, s, e, n)
print "Centroid: %1.5f, %1.5f" % ((w + e)/2.0, (n + s)/2.0)
print "Geojson: " + json.dumps(geojson_feature)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# test_unique_id.py
"""Tests the unique_id function of mop_tinyDB."""
def test_return_unique_id(db_with_3_mops):
"""Test unique_id() returns correct id.""" # noqa
db = db_with_3_mops
new_id = db.unique_id()
assert new_id == 4 # nosec
|
import numpy as np
from lib.explainers import (
get_n_best_features
)
def test_get_n_best_features():
global_shaps = np.array([0.1, 0.5, 1, 7, 8])
n = 5
assert get_n_best_features(n, global_shaps) == [4, 3, 2, 1, 0]
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import sys
import os, time, datetime, re, argparse, textwrap, subprocess
from datetime import date, timedelta
from time import mktime
from os.path import expanduser
import shutil
from distutils.dir_util import copy_tree
import json
import glob
import codecs
import fnmatch
import collections
parser = argparse.ArgumentParser(
description='Extract elements from from Swift code to the .strings file.')
parser.add_argument('src_paths', help='Main swift source path, default="./")',
default='./', type=str, nargs='?')
parser.add_argument('dest_l10n_base_path', help='Target Base Localizable.strings path. (default=./)',
default='./', nargs='?')
parser.add_argument('-k', '--split-key',
help='Splitting identifier to extract strings from Swift code. (e.g. "This is string".localized )',
default='.localized', required=False)
args = vars(parser.parse_args())
src_paths = [expanduser(path) for path in args['src_paths'].split(" ")]
dest_l10n_base_path = expanduser(args['dest_l10n_base_path'])
split_key = args['split_key']
__GEN_FLAG__ = "Generated from genl10n"
complied_patterns_by_priority = [
re.compile(r'((\"\b.*\b\")' + split_key + ')', re.I|re.U|re.MULTILINE|re.X)
, re.compile(r'((\".*\")' + split_key + ')', re.I|re.U|re.MULTILINE|re.X)
]
# for excluing format literal -> \(value)
qs = re.compile(r'\\\((.+)\)', re.I|re.U)
# for excluing code comment
cs = re.compile(r'\/\/.*', re.I|re.U)
swift_files = []
for src_path in src_paths:
for root, dirnames, filenames in os.walk(src_path):
for filename in fnmatch.filter(filenames, '*.swift'):
swift_files.append(os.path.join(root, filename))
gened_strs = collections.OrderedDict()
for code_file in swift_files:
rcur = codecs.open(code_file, "r", "utf-8")
wlines = []
for i, line in enumerate(rcur.readlines()):
if cs.search(line):
line = cs.sub("", line)
for line_sp in line.split(split_key):
for p in complied_patterns_by_priority:
loc_strs = p.search(line_sp + split_key)
if loc_strs:
str = loc_strs.group()
#TODO: fix for a case "Videos from \"Screen Recording\""
# unwrap overlapped quote "
str = '"'+str.split('"')[-2]+'"'
if qs.search(str):
# for excluding literal format e.g. -> %d \(pluralizedString)
continue
if not str in gened_strs:
gened_strs[str] = set()
gened_strs[str].add((code_file, i+1))
break # if pattern was found by one of them, exit loop
rcur = codecs.open(dest_l10n_base_path, "r", "utf-8")
rlines = rcur.readlines()
rcur.close()
wlines = []
met_gen_flag = False
for line in rlines:
if __GEN_FLAG__ in line:
met_gen_flag = True
continue
if met_gen_flag:
met_gen_flag = False
continue
wlines.append(line)
keys_in_l10n_file = map(lambda line: line.split("=")[0].strip(), wlines)
keys_in_gened_strs = sorted(gened_strs.keys())#[k for k, v in sorted(gened_strs.items())]
#FIXME: python2.7 <-> 3 dict key ordering is fucking different what??
if keys_in_gened_strs and len(wlines[-1].strip()) > 0:
wlines.append('\n')
for new_key in keys_in_gened_strs:
if new_key in keys_in_l10n_file:
continue
new_line = u'{0} = {0};'.format(new_key)
# gened_strs[new_key][0] : code file path as string
# gened_strs[new_key][1] : line as int
# from_files = ", ".join(map(lambda s: "{}#{}".format(os.path.basename(s[0]), s[1]), gened_strs[new_key]))
from_files = ", ".join(map(lambda s: "{}".format(os.path.basename(s[0])), gened_strs[new_key]))
wlines.append("/* {}: {} */".format(__GEN_FLAG__, from_files))
wlines.append('\n')
wlines.append(new_line)
wlines.append('\n')
wcur = codecs.open(dest_l10n_base_path, "w", "utf-8")
wcur.writelines(wlines)
wcur.close()
|
#!/usr/bin/python
import socket
import fcntl
import struct
import os
def hosts():
"slcli"
slcli = "slcli vs list | awk '{ print $3 }' > ./ip-hosts.txt"
os.system(slcli)
return
hosts()
with open( "./ip-hosts.txt", 'r') as fin:
print fin.read()
|
from django.conf import settings
from social_django.models import UserSocialAuth
def append_social_info_to_context(request):
return_content = {}
try:
user = request.user
except:
user = None
try:
return_content["GITHUB_LOGIN"] = user.social_auth.get(provider="github")
except (UserSocialAuth.DoesNotExist, AttributeError):
return_content["GITHUB_LOGIN"] = False
try:
return_content["LINKEDIN_LOGIN"] = user.social_auth.get(
provider="linkedin-oauth2"
)
except (UserSocialAuth.DoesNotExist, AttributeError):
return_content["LINKEDIN_LOGIN"] = False
return_content["SOCIAL_AUTH"] = (
return_content["GITHUB_LOGIN"] or return_content["LINKEDIN_LOGIN"]
)
return_content["EMPTY_PROFILE"] = return_content["SOCIAL_AUTH"] and not (
request.user.profile.linkedin
or request.user.profile.github
or request.user.profile.portfolio
or request.user.profile.cellphone
)
return return_content
def global_vars(request):
return {
"GA_CODE": settings.GA_CODE,
"WEBSITE_NAME": settings.WEBSITE_NAME,
"WEBSITE_URL": settings.WEBSITE_URL,
"WEBSITE_SLOGAN": settings.WEBSITE_SLOGAN,
"WEBSITE_OWNER_EMAIL": settings.WEBSITE_OWNER_EMAIL,
"WEBSITE_GENERAL_EMAIL": settings.WEBSITE_GENERAL_EMAIL,
"WEBSITE_WORKING_LANGUAGE": settings.WEBSITE_WORKING_LANGUAGE,
"WEBSITE_MAILINGLIST_LINK": settings.WEBSITE_MAILINGLIST_LINK,
"USER_SUBSTANTIVE": settings.USER_SUBSTANTIVE,
"VAPID_PUBLIC_KEY": settings.WEBPUSH_SETTINGS["VAPID_PUBLIC_KEY"],
**append_social_info_to_context(request),
}
|
import os
import numpy as np
import subprocess
from deepethogram import utils
from setup_data import get_testing_directory
testing_directory = get_testing_directory()
config_path = os.path.join(testing_directory, 'project_config.yaml')
BATCH_SIZE = 4 # small but not too small
# if less than 10, might have bugs with visualization
STEPS_PER_EPOCH = 20
NUM_EPOCHS = 2
def command_from_string(string):
command = string.split(' ')
if command[-1] == '':
command = command[:-1]
return command
def add_default_arguments(string, train=True):
string += f'project.config_file={config_path} '
string += f'compute.batch_size={BATCH_SIZE} '
if train:
string += f'train.steps_per_epoch.train={STEPS_PER_EPOCH} train.steps_per_epoch.val={STEPS_PER_EPOCH} '
string += f'train.steps_per_epoch.test={STEPS_PER_EPOCH} '
string += f'train.num_epochs={NUM_EPOCHS} '
return string
def test_flow():
string = (f'python -m deepethogram.flow_generator.train preset=deg_f ')
string = add_default_arguments(string)
command = command_from_string(string)
ret = subprocess.run(command)
assert ret.returncode == 0
string = (f'python -m deepethogram.flow_generator.train preset=deg_m ')
string = add_default_arguments(string)
command = command_from_string(string)
ret = subprocess.run(command)
assert ret.returncode == 0
string = (f'python -m deepethogram.flow_generator.train preset=deg_s ')
string = add_default_arguments(string)
command = command_from_string(string)
ret = subprocess.run(command)
assert ret.returncode == 0
def test_feature_extractor():
string = (f'python -m deepethogram.feature_extractor.train preset=deg_f flow_generator.weights=latest ')
string = add_default_arguments(string)
command = command_from_string(string)
ret = subprocess.run(command)
assert ret.returncode == 0
string = (f'python -m deepethogram.feature_extractor.train preset=deg_m flow_generator.weights=latest ')
string = add_default_arguments(string)
command = command_from_string(string)
ret = subprocess.run(command)
assert ret.returncode == 0
# for resnet3d, must specify weights, because we can't just download them from the torchvision repo
string = (f'python -m deepethogram.feature_extractor.train preset=deg_s flow_generator.weights=latest '
f'feature_extractor.weights=latest ')
string = add_default_arguments(string)
command = command_from_string(string)
ret = subprocess.run(command)
assert ret.returncode == 0
def test_feature_extraction():
# the reason for this complexity is that I don't want to run inference on all directories
string = (f'python -m deepethogram.feature_extractor.inference preset=deg_f reload.latest=True ')
datadir = os.path.join(testing_directory, 'DATA')
subdirs = utils.get_subfiles(datadir, 'directory')
np.random.seed(42)
subdirs = np.random.choice(subdirs, size=100, replace=False)
dir_string = ','.join([str(i) for i in subdirs])
dir_string = '[' + dir_string + ']'
string += f'inference.directory_list={dir_string} inference.overwrite=True '
string = add_default_arguments(string, train=False)
command = command_from_string(string)
ret = subprocess.run(command)
assert ret.returncode == 0
# string += 'inference.directory_list=[]'
def test_sequence_train():
string = (f'python -m deepethogram.sequence.train ')
string = add_default_arguments(string)
command = command_from_string(string)
print(command)
ret = subprocess.run(command)
assert ret.returncode == 0
|
# coding=utf-8
# Copyright 2019-present, the HuggingFace Inc. team and Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Dataset to distilled models
adapted in part from Facebook, Inc XLM model (https://github.com/facebookresearch/XLM)
"""
import numpy as np
import torch
import os
from torch.utils.data import Dataset
from utils import logger
import json
class LazyLmSeqsDataset(Dataset):
"""Custom Dataset wrapping language modeling sequences.
Each sample will be retrieved by indexing the list of token_ids and their corresponding lengths.
Input:
------
params: `NameSpace` parameters
data: `List[np.array[int]]
"""
def __init__(self, params):
self.params = params
self.train_data_info = {}
self.sampleid2fileid = []
self.create_dataset_metadata()
self.total_samples = len(self.sampleid2fileid)
self.print_statistics()
def __getitem__(self, index):
id_info = self.sampleid2fileid[index]
file_id = id_info["file_id"]
local_sample_id = id_info["local_sample_id"]
file_name = self.train_data_info["info"][file_id]["file_name"]
data = self.memmap_read_npy(os.path.join(self.params.data_file, file_name))
sample = data[local_sample_id]
return (sample, sample.shape[0])
def __len__(self):
return self.total_samples
def print_statistics(self):
"""
Print some statistics on the corpus. Only the master process.
"""
if not self.params.is_master:
return
logger.info(f"{len(self)} sequences")
# data_len = sum(self.lengths)
# nb_unique_tokens = len(Counter(list(chain(*self.token_ids))))
# logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)')
# unk_idx = self.params.special_tok_ids['unk_token']
# nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids])
# logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)')
def memmap_read_npy(self, npy_file):
return np.lib.format.open_memmap(npy_file,
mode='r+', dtype=np.uint16,
shape=None, fortran_order=False, version=None)
def create_dataset_metadata(self):
train_npy_path = self.params.data_file
if not os.path.isdir(train_npy_path): raise IOError("Please provide the folder path not file.")
train_npy_files = os.listdir(train_npy_path)
self.train_data_info["info"] = []
file_id = len(self.train_data_info["info"])
for npy_file in train_npy_files:
if npy_file.endswith("npy"):
data = self.memmap_read_npy(os.path.join(train_npy_path, npy_file))
data_shape = data.shape
info = {
"file_id" : file_id,
"file_name" : npy_file,
"n_samples" : data_shape[0],
"sample_size" : data_shape[1],
"dtype" : data.dtype
}
self.sampleid2fileid += [{"file_id":file_id, "local_sample_id":local_sample_id}
for local_sample_id in range(data_shape[0])]
self.train_data_info["info"].append(info)
# with open("train_data_info.json", "w") as f:
# f.write(json.dumps(self.train_data_info))
def batch_sequences(self, batch):
"""
Do the padding and transform into torch.tensor.
"""
token_ids = [t[0] for t in batch]
lengths = [t[1] for t in batch]
assert len(token_ids) == len(lengths)
# Max for paddings
max_seq_len_ = max(lengths)
# Pad token ids
if self.params.mlm:
pad_idx = self.params.special_tok_ids["pad_token"]
else:
pad_idx = self.params.special_tok_ids["unk_token"]
tk_ = [list(t.astype(int)) + [pad_idx] * (max_seq_len_ - len(t)) for t in token_ids]
assert len(tk_) == len(token_ids)
assert all(len(t) == max_seq_len_ for t in tk_)
tk_t = torch.tensor(tk_) # (bs, max_seq_len_)
lg_t = torch.tensor(lengths) # (bs)
return tk_t, lg_t |
"""
Given a string, , of length that is indexed from to , print its even-indexed and odd-indexed characters as space-separated strings on a single line (see the Sample below for more detail).
Note: is considered to be an even index.
"""
import sys
if __name__=="__main__":
string_list=[]
T=int(raw_input())
for i in range (0,T):
inputstring=raw_input()
string_list.append(inputstring)
for i in range (0,T):
even=""
odd=""
#print i
#print string_list[i]
for j, char in enumerate(string_list[i]):
print j
if j % 2 == 0:
#print char
even=even+char
else:
#print char
odd=odd+char
print even, odd |
# coding: utf-8
"""
IOInterfaceTypeData.py
The Clear BSD License
Copyright (c) – 2016, NetApp, Inc. All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted (subject to the limitations in the disclaimer below) provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
* Neither the name of NetApp, Inc. nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
from pprint import pformat
from six import iteritems
class IOInterfaceTypeData(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self):
"""
IOInterfaceTypeData - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'interface_type': 'str', # (required parameter)
'fibre': 'FibreInterface',
'ib': 'IbInterface',
'iscsi': 'IscsiInterface',
'sas': 'SasInterface',
'sata': 'SATAInterface',
'scsi': 'SCSIInterface'
}
self.attribute_map = {
'interface_type': 'interfaceType', # (required parameter)
'fibre': 'fibre',
'ib': 'ib',
'iscsi': 'iscsi',
'sas': 'sas',
'sata': 'sata',
'scsi': 'scsi'
}
self._interface_type = None
self._fibre = None
self._ib = None
self._iscsi = None
self._sas = None
self._sata = None
self._scsi = None
@property
def interface_type(self):
"""
Gets the interface_type of this IOInterfaceTypeData.
This enumeration defines the different I/O interface types that may be reported as part of the configuration information associated with a controller.
:return: The interface_type of this IOInterfaceTypeData.
:rtype: str
:required/optional: required
"""
return self._interface_type
@interface_type.setter
def interface_type(self, interface_type):
"""
Sets the interface_type of this IOInterfaceTypeData.
This enumeration defines the different I/O interface types that may be reported as part of the configuration information associated with a controller.
:param interface_type: The interface_type of this IOInterfaceTypeData.
:type: str
"""
allowed_values = ["notImplemented", "scsi", "fc", "sata", "sas", "iscsi", "ib", "fcoe", "nvmeof", "__UNDEFINED"]
if interface_type not in allowed_values:
raise ValueError(
"Invalid value for `interface_type`, must be one of {0}"
.format(allowed_values)
)
self._interface_type = interface_type
@property
def fibre(self):
"""
Gets the fibre of this IOInterfaceTypeData.
This field is present only if the interfaceType value is equal to IO_IF_FC. It contains the detailed interface information for a Fibre Channel I/O interface.
:return: The fibre of this IOInterfaceTypeData.
:rtype: FibreInterface
:required/optional: optional
"""
return self._fibre
@fibre.setter
def fibre(self, fibre):
"""
Sets the fibre of this IOInterfaceTypeData.
This field is present only if the interfaceType value is equal to IO_IF_FC. It contains the detailed interface information for a Fibre Channel I/O interface.
:param fibre: The fibre of this IOInterfaceTypeData.
:type: FibreInterface
"""
self._fibre = fibre
@property
def ib(self):
"""
Gets the ib of this IOInterfaceTypeData.
This field is present only if the interfaceType value is equal to IO_IF_IB. It contains the detailed interface information for an Infiniband interface.
:return: The ib of this IOInterfaceTypeData.
:rtype: IbInterface
:required/optional: optional
"""
return self._ib
@ib.setter
def ib(self, ib):
"""
Sets the ib of this IOInterfaceTypeData.
This field is present only if the interfaceType value is equal to IO_IF_IB. It contains the detailed interface information for an Infiniband interface.
:param ib: The ib of this IOInterfaceTypeData.
:type: IbInterface
"""
self._ib = ib
@property
def iscsi(self):
"""
Gets the iscsi of this IOInterfaceTypeData.
This field is present only if the interface type value is equal to IO_IF_ISCSI. It contains the detailed interface information for an iSCSI I/O interface.
:return: The iscsi of this IOInterfaceTypeData.
:rtype: IscsiInterface
:required/optional: optional
"""
return self._iscsi
@iscsi.setter
def iscsi(self, iscsi):
"""
Sets the iscsi of this IOInterfaceTypeData.
This field is present only if the interface type value is equal to IO_IF_ISCSI. It contains the detailed interface information for an iSCSI I/O interface.
:param iscsi: The iscsi of this IOInterfaceTypeData.
:type: IscsiInterface
"""
self._iscsi = iscsi
@property
def sas(self):
"""
Gets the sas of this IOInterfaceTypeData.
This field is present only if the interfaceType value is equal to IO_IF_SAS. It contains the detailed interface information for a serial-attached SCSI I/O interface.
:return: The sas of this IOInterfaceTypeData.
:rtype: SasInterface
:required/optional: optional
"""
return self._sas
@sas.setter
def sas(self, sas):
"""
Sets the sas of this IOInterfaceTypeData.
This field is present only if the interfaceType value is equal to IO_IF_SAS. It contains the detailed interface information for a serial-attached SCSI I/O interface.
:param sas: The sas of this IOInterfaceTypeData.
:type: SasInterface
"""
self._sas = sas
@property
def sata(self):
"""
Gets the sata of this IOInterfaceTypeData.
This field is present only if the interfaceType value is equal to IO_IF_SATA. It contains the detailed interface information for a SATA I/O interface.
:return: The sata of this IOInterfaceTypeData.
:rtype: SATAInterface
:required/optional: optional
"""
return self._sata
@sata.setter
def sata(self, sata):
"""
Sets the sata of this IOInterfaceTypeData.
This field is present only if the interfaceType value is equal to IO_IF_SATA. It contains the detailed interface information for a SATA I/O interface.
:param sata: The sata of this IOInterfaceTypeData.
:type: SATAInterface
"""
self._sata = sata
@property
def scsi(self):
"""
Gets the scsi of this IOInterfaceTypeData.
This field is present only if the interfaceType value is equal to IO_IF_SCSI. It contains the detailed interface information for a SCSI I/O interface.
:return: The scsi of this IOInterfaceTypeData.
:rtype: SCSIInterface
:required/optional: optional
"""
return self._scsi
@scsi.setter
def scsi(self, scsi):
"""
Sets the scsi of this IOInterfaceTypeData.
This field is present only if the interfaceType value is equal to IO_IF_SCSI. It contains the detailed interface information for a SCSI I/O interface.
:param scsi: The scsi of this IOInterfaceTypeData.
:type: SCSIInterface
"""
self._scsi = scsi
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
if self is None:
return None
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if self is None or other is None:
return None
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import hashlib
import json
import datetime
import pandas as pd
import os
import numpy as np
from CountingGridsPy.models import CountingGridModel
import traceback
from browseCloudServiceAuthorizer import BrowseCloudServiceAuthorizer
from countingGridsHeartBeater import CountingGridsHeartBeater
from jobStatus import JobStatus
from batchJob import BatchJob
import azure.storage.blob as azureblob
from browseCloudAzureUtilities import upload_file_to_container, download_file_from_container
import matplotlib.pyplot as plt
from CountingGridsPy.EngineToBrowseCloudPipeline import BrowseCloudArtifactGenerator, CGEngineWrapper, NLPCleaner, PipelineTimer
import sys
sys.path.append("../../..")
# CLI: python generateCountingGridsFromAzure.py <input_containername> <extent_size_of_grid_hyperparameter>
# <window_size_of_grid_hyperparameter> <engine_type> <inputfile_type> <inputfile_name> <output_containername>
#
# Example CLI: python generateCountingGridsFromAzure.py trainingdata 24 5 numpyEngine simpleInput dictionaryVerySmallSample.txt bighash
# Assumes: input_containername and output_containername must be <64 characters long.
if __name__ == "__main__":
HEART_BEATER = None
try:
# ---------------------------------------------------------------------------------------
# Input
# ---------------------------------------------------------------------------------------
errStr = "Please give valid command-line arguments."
if len(sys.argv) != 8:
raise ValueError(errStr)
containerNameIn = sys.argv[1]
EXTENT_SIZE = int(sys.argv[2])
WINDOW_SIZE = int(sys.argv[3])
engine_type = sys.argv[4]
inputfile_type = sys.argv[5]
blobName = sys.argv[6]
containerNameOut = sys.argv[7]
if engine_type != "numpyEngine" and engine_type != "matlabEngine":
raise ValueError(
"The {0} engine does not exist. Please use 'matlabEngine' or 'numpyEngine'.".format(engine_type))
engine_type = engine_type[:-6] # 6 characters in the word "Engine"
if inputfile_type != "metadataInput" and inputfile_type != "simpleInput":
raise ValueError(
"The {0} input type does not exist. Please use 'simpleInput' or 'metadataInput'.".format(inputfile_type))
inputfile_type = inputfile_type[:-5] # remove "Input"
# ---------------------------------------------------------------------------------------
# Authentication
# ---------------------------------------------------------------------------------------
AUTH_URL = ""
SERVICE_URL = ""
_STORAGE_ACCOUNT_NAME_IN = 'browsecloudtrainingdata'
_STORAGE_ACCOUNT_KEY_IN = ""
_STORAGE_ACCOUNT_NAME_OUT = 'browsecloudmodelfiles'
_STORAGE_ACCOUNT_KEY_OUT = ""
jobId = containerNameOut
docId = containerNameIn
with open('metadata.json', 'r') as fMeta:
dataMeta = json.load(fMeta)
AUTH_URL = dataMeta["AUTH_URL"]
_STORAGE_ACCOUNT_NAME_IN = dataMeta["_STORAGE_ACCOUNT_NAME_TRAININGDATA"]
_STORAGE_ACCOUNT_KEY_OUT = dataMeta["_STORAGE_ACCOUNT_NAME_MODELS"]
if dataMeta['ENV'] == 'DEV':
# TODO: Use key vault and certificate
# to retreive that information instead of temp file for keys.
# Note that keys.json is not checked in.
with open("keys.json", "r") as f:
data = json.load(f)
_STORAGE_ACCOUNT_KEY_IN = data['_STORAGE_ACCOUNT_KEY_DEV']
_STORAGE_ACCOUNT_KEY_OUT = data['_STORAGE_ACCOUNT_KEY_OUT_DEV']
_STORAGE_ACCOUNT_NAME_OUT = data['_STORAGE_ACCOUNT_NAME_OUT_DEV'] if (
'_STORAGE_ACCOUNT_NAME_OUT_DEV' in data
) else _STORAGE_ACCOUNT_NAME_OUT
_STORAGE_ACCOUNT_NAME_IN = data['_STORAGE_ACCOUNT_NAME_IN_DEV'] if (
'_STORAGE_ACCOUNT_NAME_IN_DEV' in data
) else _STORAGE_ACCOUNT_NAME_IN
SERVICE_URL = dataMeta["SERVICE_URL_DEV"] + "/api/v1/jobs/" + \
jobId if "SERVICE_URL_DEV" in dataMeta else SERVICE_URL
elif dataMeta['ENV'] == 'PROD':
with open("keys.json", "r") as f:
data = json.load(f)
_STORAGE_ACCOUNT_KEY_IN = data['_STORAGE_ACCOUNT_KEY_PROD']
_STORAGE_ACCOUNT_KEY_OUT = data['_STORAGE_ACCOUNT_KEY_OUT_PROD']
_STORAGE_ACCOUNT_NAME_OUT = data['_STORAGE_ACCOUNT_NAME_OUT_PROD'] if (
'_STORAGE_ACCOUNT_NAME_OUT_PROD' in data
) else _STORAGE_ACCOUNT_NAME_OUT
_STORAGE_ACCOUNT_NAME_IN = data['_STORAGE_ACCOUNT_NAME_IN_PROD'] if (
'_STORAGE_ACCOUNT_NAME_IN_PROD' in data
) else _STORAGE_ACCOUNT_NAME_IN
SERVICE_URL = dataMeta["SERVICE_URL_PROD"] + "/api/v1/jobs/" + \
jobId if "SERVICE_URL_PROD" in dataMeta else SERVICE_URL
else:
raise ValueError(
"Environment type in metadata.json is invalid.")
BATCH_JOB = BatchJob(jobId, JobStatus.NotStarted, 0)
SERVICE_AUTHORIZER = BrowseCloudServiceAuthorizer(AUTH_URL)
HEART_BEATER = CountingGridsHeartBeater(
SERVICE_URL, BATCH_JOB, SERVICE_AUTHORIZER)
DIRECTORY_SUFFIX = hashlib.sha3_256(
(docId+blobName+str(datetime.datetime.now())).encode()).hexdigest()
DIRECTORY_DATA = blobName.split(".")[0] + "_" + DIRECTORY_SUFFIX
if not os.path.isdir(DIRECTORY_DATA):
os.mkdir(DIRECTORY_DATA)
'''
Algorithm:
1. Get training data from Azure.
2. Run learning code.
3. Write model files to Azure.
Changes between this and dumpCountingGrids.py:
1. DIRECTORY_DIR must be unique.
2. Fetching and writing to Azure. Idea is to fetch into directory and then write it to Azure
'''
blob_client = azureblob.BlockBlobService(
account_name=_STORAGE_ACCOUNT_NAME_IN,
account_key=_STORAGE_ACCOUNT_KEY_IN)
download_file_from_container(
blob_client, containerNameIn, DIRECTORY_DATA+"/"+blobName, blobName)
FILE_NAME = DIRECTORY_DATA + "\\" + blobName
CLEAN_DATA_FILE_NAME, MIN_FREQUENCY, MIN_WORDS = [
"\cg-processed.tsv", 2, 5]
# ---------------------------------------------------------------------------------------
# Data Cleaning
# ---------------------------------------------------------------------------------------
cleaner = NLPCleaner()
HEART_BEATER.next()
correspondences = None
CACHED_CORRESPONDENCES_FILE_NAME = "\cached_correspondences.tsv"
pTimer = PipelineTimer()
pTimer("Reading data file.")
df, keep = cleaner.read(
FILE_NAME, inputfile_type, MIN_FREQUENCY, MIN_WORDS)
if not (os.path.exists(DIRECTORY_DATA + CACHED_CORRESPONDENCES_FILE_NAME) and os.path.exists(DIRECTORY_DATA + CLEAN_DATA_FILE_NAME)):
pTimer("Starting data cleaning.")
cleaner.handle_negation_tokens()
cleaner.removePunctuation()
HEART_BEATER.makeProgress(50)
correspondences = cleaner.lemmatize()
cleaner.write_cached_correspondences(
DIRECTORY_DATA, CACHED_CORRESPONDENCES_FILE_NAME)
cleaner.write(DIRECTORY_DATA, CLEAN_DATA_FILE_NAME)
else:
pTimer("Skipping data cleaning.")
correspondences = cleaner.read_cached_correspondences(
DIRECTORY_DATA, CACHED_CORRESPONDENCES_FILE_NAME)
pTimer("Learning counting grid.")
LEARNED_GRID_FILE_NAME = "/CountingGridDataMatrices.mat"
# ---------------------------------------------------------------------------------------
# Learning
# ---------------------------------------------------------------------------------------
engine = CGEngineWrapper(
extent_size=EXTENT_SIZE, window_size=WINDOW_SIZE, heartBeaters=[HEART_BEATER])
HEART_BEATER.next()
vocabulary = None
if not os.path.exists(DIRECTORY_DATA + LEARNED_GRID_FILE_NAME):
vocabulary, keep = engine.fit(
DIRECTORY_DATA, CLEAN_DATA_FILE_NAME, cleaner.labelsS, MIN_FREQUENCY, keep, engine=engine_type)
else:
vocabulary, keep = engine.get_vocab(
DIRECTORY_DATA, CLEAN_DATA_FILE_NAME, cleaner.labelsS, MIN_FREQUENCY, keep)
# ---------------------------------------------------------------------------------------
# Output
# ---------------------------------------------------------------------------------------
pTimer("Generating counting grid artifacts.")
LINK_FILE_NAME = ""
bcag = BrowseCloudArtifactGenerator(DIRECTORY_DATA)
bcag.read(LEARNED_GRID_FILE_NAME)
bcag.write_docmap(engine.wd_size, engine=engine_type)
bcag.write_counts()
bcag.write_vocabulary(vocabulary)
bcag.write_top_pi()
bcag.write_top_pi_layers()
bcag.write_colors() # write default blue colors
bcag.write_database(df, keep)
bcag.write_correspondences(correspondences, vocabulary)
bcag.write_keep(keep)
pTimer("Done.")
blob_client = azureblob.BlockBlobService(
account_name=_STORAGE_ACCOUNT_NAME_OUT,
account_key=_STORAGE_ACCOUNT_KEY_OUT)
# apply some recursion to create multiple container once uniqueness is problem
blob_client.create_container(containerNameOut)
# upload each file, aside from the input file into
FILES = [f.path for f in os.scandir(DIRECTORY_DATA) if not f.is_dir()]
for modelfile in FILES:
if not modelfile.endswith(blobName):
upload_file_to_container(
blob_client, containerNameOut, modelfile)
except Exception as e:
HEART_BEATER.done(success=False) if HEART_BEATER is not None else False
print("Script failed.")
print(traceback.format_exc())
except:
HEART_BEATER.done(success=False) if HEART_BEATER is not None else False
print("Script failed.")
print(traceback.format_exc())
else:
HEART_BEATER.done(success=True)
print("Script succeeded.")
|
# encoding: utf-8
r"""
A full summary of all nodes.
+---------------------+--------------------+----------------+------------------------------+
| Name | Children | Example | Description |
+=====================+====================+================+==============================+
| All | | `:` | Colon operator w/o range |
+---------------------+--------------------+----------------+------------------------------+
| Assign | `Expr Expr` | `a=b` | Assignment one var |
+---------------------+--------------------+----------------+------------------------------+
| Assigns | `Expr Expr+` | `[a,b]=c` | Assignment multi vars |
+---------------------+--------------------+----------------+------------------------------+
| Band | `Expr Expr+` | `a&b` | Binary AND operator |
+---------------------+--------------------+----------------+------------------------------+
| Bcomment | | `%{ . %}` | Block comment |
+---------------------+--------------------+----------------+------------------------------+
| Block | `Line*` | `a` | Code block |
+---------------------+--------------------+----------------+------------------------------+
| Bor | `Expr Expr+` | `a|b` | Binary OR operator |
+---------------------+--------------------+----------------+------------------------------+
| Branch | `If Ifse* Else?` | `if a; end` | If chain container |
+---------------------+--------------------+----------------+------------------------------+
| Break | | `break` | Break statement |
+---------------------+--------------------+----------------+------------------------------+
| Case | `Var Block` | `case a` | Case part of Switch |
+---------------------+--------------------+----------------+------------------------------+
| Catch | `Block` | `catch a` | Catch part of Tryblock |
+---------------------+--------------------+----------------+------------------------------+
| Cell | `Expr*` | `{a}` | Cell array |
+---------------------+--------------------+----------------+------------------------------+
| Cget | `Expr+` | `a{b}(c)` | Cell retrival |
+---------------------+--------------------+----------------+------------------------------+
| Colon | `Expr Expr Expr?` | `a:b` | Colon operator w range |
+---------------------+--------------------+----------------+------------------------------+
| Counter | | | Struct array size |
+---------------------+--------------------+----------------+------------------------------+
| Cset | `Expr+` | `a{b}(c)=d` | Cell array assignment |
+---------------------+--------------------+----------------+------------------------------+
| Ctranspose | `Expr` | `a'` | Complex transform |
+---------------------+--------------------+----------------+------------------------------+
| Cvar | `Expr+` | `a{b}` | Cell variable |
+---------------------+--------------------+----------------+------------------------------+
| Declares | `Var*` | | Declared variable list |
+---------------------+--------------------+----------------+------------------------------+
| Ecomment | | `a%b` | End-of-line comment |
+---------------------+--------------------+----------------+------------------------------+
| Elementdivision | `Expr Expr+` | `a./b` | Sclars division |
+---------------------+--------------------+----------------+------------------------------+
| Elexp | `Expr Expr+` | `a.^b` | Element-wise exponent |
+---------------------+--------------------+----------------+------------------------------+
| Elif | `Expr Block` | `elseif a` | Else-if part of Branch |
+---------------------+--------------------+----------------+------------------------------+
| Elmul | `Expr Expr+` | `a.*b` | Element-wise multiplication |
+---------------------+--------------------+----------------+------------------------------+
| Else | `Block` | `else` | Else part of Branch |
+---------------------+--------------------+----------------+------------------------------+
| End | | `end` | End-expression |
+---------------------+--------------------+----------------+------------------------------+
| Eq | `Expr Expr` | `a==b` | Equallity sign |
+---------------------+--------------------+----------------+------------------------------+
| Error | | | Error node |
+---------------------+--------------------+----------------+------------------------------+
| Exp | `Expr Expr+` | `a^b` | Exponential operator |
+---------------------+--------------------+----------------+------------------------------+
| Fget | `Expr*` | `a.b(c)` | Fieldarray retrival |
+---------------------+--------------------+----------------+------------------------------+
| Float | | `4.` | Float-point number |
+---------------------+--------------------+----------------+------------------------------+
| For | `Var Expr Block` | `for a=b;end` | For-loop container |
+---------------------+--------------------+----------------+------------------------------+
| Fset | `Expr Expr+` | `a.b(c)=d` | Fieldname assignment |
+---------------------+--------------------+----------------+------------------------------+
| Func | `Declares Returns` | `function f()` | Function container |
| | `Params Block` | `end` | |
+---------------------+--------------------+----------------+------------------------------+
| Funcs | `[Main Func+]` | | Root of all functions |
+---------------------+--------------------+----------------+------------------------------+
| Fvar | | `a.b` | Fieldname variable |
+---------------------+--------------------+----------------+------------------------------+
| Ge | `Expr Expr` | `a>=b` | Greater-or-equal operator |
+---------------------+--------------------+----------------+------------------------------+
| Get | `Expr*` | `a(b)` | Function or retrival |
+---------------------+--------------------+----------------+------------------------------+
| Gt | `Expr Expr` | `a>b` | Greater operator |
+---------------------+--------------------+----------------+------------------------------+
| Header | | | File header element |
+---------------------+--------------------+----------------+------------------------------+
| Headers | | | Collection header lines |
+---------------------+--------------------+----------------+------------------------------+
| If | `Expr Block` | `if a` | If part of Branch |
+---------------------+--------------------+----------------+------------------------------+
| Imag | | `i` | Imaginary unit |
+---------------------+--------------------+----------------+------------------------------+
| Include | | | Include statement |
+---------------------+--------------------+----------------+------------------------------+
| Includes | | | Collection of includes |
+---------------------+--------------------+----------------+------------------------------+
| Int | | `1` | Integer value |
+---------------------+--------------------+----------------+------------------------------+
| Lambda | | `f=@()1` | Lambda function expression |
+---------------------+--------------------+----------------+------------------------------+
| Land | `Expr Expr+` | `a&&b` | Logical AND operator |
+---------------------+--------------------+----------------+------------------------------+
| Lcomment | | `%a` | Line-comment |
+---------------------+--------------------+----------------+------------------------------+
| Le | `Expr Expr` | `a<=b` | Less-or-equal operator |
+---------------------+--------------------+----------------+------------------------------+
| Leftelementdivision | `Expr Expr+` | `a.\b` | Left sclar division |
+---------------------+--------------------+----------------+------------------------------+
| Leftmatrixdivision | `Expr Expr+` | `a\b` | Left matrix division |
+---------------------+--------------------+----------------+------------------------------+
| Log | `[Error Warning]+` | | Collection of Errors |
+---------------------+--------------------+----------------+------------------------------+
| Lor | `Expr Expr` | `a||b` | Logical OR operator |
+---------------------+--------------------+----------------+------------------------------+
| Lt | `Expr Expr` | `a<b` | Less-then operator |
+---------------------+--------------------+----------------+------------------------------+
| Main | `Declares Returns` | `function f()` | Container for |
| | `Params Block` | `end` | main function |
+---------------------+--------------------+----------------+------------------------------+
| Matrix | `Vector*` | `[a]` | Matrix container |
+---------------------+--------------------+----------------+------------------------------+
| Matrixdivision | `Expr Expr+` | `a/b` | Matrix division |
+---------------------+--------------------+----------------+------------------------------+
| Minus | `Expr Expr+` | `a-b` | Minus operator |
+---------------------+--------------------+----------------+------------------------------+
| Mul | `Expr Expr+` | `a*b` | Multiplication operator |
+---------------------+--------------------+----------------+------------------------------+
| Ne | `Expr Expr` | `a~=b` | Not-equal operator |
+---------------------+--------------------+----------------+------------------------------+
| Neg | `Expr` | `-a` | Unary negative sign |
+---------------------+--------------------+----------------+------------------------------+
| Nget | `Expr` | `a.(b)` | Namefield retrival |
+---------------------+--------------------+----------------+------------------------------+
| Not | `Expr` | `~a` | Not operator |
+---------------------+--------------------+----------------+------------------------------+
| Nset | `Expr` | `a.(b)=c` | Namefield assignment |
+---------------------+--------------------+----------------+------------------------------+
| Otherwise | `Block` | `otherwise` | Otherwise part of Switch |
+---------------------+--------------------+----------------+------------------------------+
| Params | `Var*` | | Function parameter container |
+---------------------+--------------------+----------------+------------------------------+
| Parfor | `Var Expr Block` | `parfor a=b;end`| Parallel for-loop container |
+---------------------+--------------------+----------------+------------------------------+
| Plus | `Expr Expr+` | `a+b` | Addition operator |
+---------------------+--------------------+----------------+------------------------------+
| Pragma_for | | `%%PARFOR str` | For-loop pragma |
+---------------------+--------------------+----------------+------------------------------+
| Program | `Includes Funcs` | | Program root |
| | `Inlines Structs` | | |
| | `Headers Log` | | |
+---------------------+--------------------+----------------+------------------------------+
| Project | `Program+` | | Root of all programs |
+---------------------+--------------------+----------------+------------------------------+
| Return | | `return` | Return statement |
+---------------------+--------------------+----------------+------------------------------+
| Returns | `Var*` | | Return value collection |
+---------------------+--------------------+----------------+------------------------------+
| Set | `Expr*` | `a(b)=c` | Array value assignment |
+---------------------+--------------------+----------------+------------------------------+
| Sget | `Expr+` | `a.b(c)` | Submodule function/retrival |
+---------------------+--------------------+----------------+------------------------------+
| Sset | `Expr+` | `a.b(c)=d` | Submodule assignment |
+---------------------+--------------------+----------------+------------------------------+
| Statement | `Expr` | `a` | Stand alone statement |
+---------------------+--------------------+----------------+------------------------------+
| String | | `'a'` | String representation |
+---------------------+--------------------+----------------+------------------------------+
| Struct | | | Struct container |
+---------------------+--------------------+----------------+------------------------------+
| Structs | | | Container for structs |
+---------------------+--------------------+----------------+------------------------------+
| Switch | `Var Case+ Other` | `case a; end` | Container for Switch branch |
+---------------------+--------------------+----------------+------------------------------+
| Transpose | `Expr` | `a'` | Transpose operator |
+---------------------+--------------------+----------------+------------------------------+
| Try | `Block` | `try` | Try part of Tryblock |
+---------------------+--------------------+----------------+------------------------------+
| Tryblock | `Try Catch` | `try; end` | Container for try-blocks |
+---------------------+--------------------+----------------+------------------------------+
| Var | | `a` | Variable |
+---------------------+--------------------+----------------+------------------------------+
| Vector | `Expr*` | `[a]` | Row-vector part of Matrix |
+---------------------+--------------------+----------------+------------------------------+
| Warning | | | Element in Log |
+---------------------+--------------------+----------------+------------------------------+
| While | `Expr Block` | `while a;end` | While-loop container |
+---------------------+--------------------+----------------+------------------------------+
"""
from node import Node
__all__ = [
"All", "Assign", "Assigns", "Band", "Bcomment", "Block", "Bor", "Branch",
"Break", "Case", "Catch", "Cell", "Cget", "Colon",
"Counter", "Cset", "Ctranspose", "Cvar",
"Declares", "Ecomment",
"Elementdivision", "Elexp", "Elif", "Elmul", "Else", "End", "Eq", "Error",
"Exp", "Expr", "Fget", "Float", "Parfor", "Pragma_for", "For", "Fset", "Func", "Funcs", "Fvar", "Ge",
"Get", "Gt", "Header", "Headers", "If", "Imag", "Include", "Includes", "Inline",
"Inlines", "Int", "Lambda", "Land", "Lcomment", "Le", "Leftelementdivision",
"Leftmatrixdivision", "Log", "Lor", "Lt", "Main", "Matrix", "Matrixdivision",
"Minus", "Mul", "Ne", "Neg", "Nget", "Not", "Nset", "Opr", "Otherwise",
"Params", "Paren", "Plus", "Program", "Project", "Resize", "Return", "Returns",
"Set", "Sget", "Sset", "Statement", "String", "Struct", "Structs", "Switch",
"Transpose", "Try", "Tryblock", "Var", "Vector", "Warning", "While"
]
class Project(Node):
def __init__(self, name="", cur=0, line=0, code="", **kws):
"""
Root of the node tree. Every other node should inherant from this one.
This node should not recieve `parent` argument node during construction.
Children:
`Program+`
All keyword arguments are passed to `mc.Node.__init__`.
"""
assert "parent" not in kws
self.parent = self
self._program = self
Node.__init__(self, self, name=name, cur=cur,
line=line, code=code, **kws)
class Program(Node):
def __init__(self, parent, name, **kws):
"""
Represents one stand-alone script or program. Each child represents the various
aspects of script/program.
Children:
`Includes Funcs Inlines Structs Headers Log`
All keyword arguments are passed to `mc.Node.__init__`.
"""
self._program = self
Node.__init__(self, parent, name=name, **kws)
class Includes(Node):
def __init__(self, parent, **kws):
Node.__init__(self, parent, **kws)
class Funcs(Node):
def __init__(self, parent, line=1, **kws):
Node.__init__(self, parent, line=line, **kws)
class Inlines(Node):
def __init__(self, parent, **kws):
Node.__init__(self, parent, **kws)
class Structs(Node):
def __init__(self, parent, **kws):
Node.__init__(self, parent, **kws)
class Headers(Node):
def __init__(self, parent, **kws):
Node.__init__(self, parent, **kws)
class Log(Node):
def __init__(self, parent, **kws):
Node.__init__(self, parent, **kws)
class Header(Node):
def __init__(self, parent, name, **kws):
Node.__init__(self, parent, name=name, **kws)
class Main(Node):
def __init__(self, parent, name="main", **kws):
Node.__init__(self, parent, name=name, **kws)
class Error(Node):
def __init__(self, parent, name, value, **kws):
Node.__init__(self, parent, name, value=value, **kws)
self.prop["cls"] = name[10:]
class Warning(Node):
def __init__(self, parent, name, value, **kws):
Node.__init__(self, parent, name, value=value, **kws)
self.prop["cls"] = name[10:]
class Counter(Node):
def __init__(self, parent, name, value, **kws):
Node.__init__(self, parent, name,
value=value, **kws)
class Inline(Node):
def __init__(self, parent, name, **kws):
Node.__init__(self, parent, name, **kws)
class Include(Includes):
def __init__(self, parent, name, **kws):
Node.__init__(self, parent, name=name, **kws)
class Struct(Structs): pass
class Func(Node): pass
class Returns(Node): pass
class Params(Node): pass
class Declares(Node): pass
class Block(Node):
def __init__(self, parent, **kws):
Node.__init__(self, parent, **kws)
class Parfor(Block): pass
class For(Block): pass
class While(Block): pass
class Switch(Block): pass
class Case(Block): pass
class Otherwise(Block): pass
class Branch(Block): pass
class If(Block): pass
class Elif(Block): pass
class Else(Block): pass
class Tryblock(Block): pass
class Try(Block): pass
class Catch(Block): pass
class Statement(Block): pass
class Assign(Node): pass
class Assigns(Node):
def __init__(self, parent, **kws):
Node.__init__(self, parent, **kws)
class Expr(Node):
def __init__(self, parent, **kws):
Node.__init__(self, parent, **kws)
class Opr(Expr): pass
class Exp(Opr): pass
class Elexp(Opr): pass
class Mul(Opr): pass
class Minus(Opr): pass
class Elmul(Opr): pass
class Matrixdivision(Opr): pass
class Elementdivision(Opr): pass
class Leftmatrixdivision(Opr): pass
class Leftelementdivision(Opr): pass
class Plus(Opr): pass
class Colon(Opr): pass
class Gt(Opr): pass
class Ge(Opr): pass
class Lt(Opr): pass
class Le(Opr): pass
class Ne(Opr): pass
class Eq(Opr): pass
class Band(Opr): pass
class Bor(Opr): pass
class Land(Opr): pass
class Lor(Opr): pass
class Matrix(Node):
def __init__(self, parent, **kws):
Node.__init__(self, parent, **kws)
class Vector(Matrix): pass
class Cell(Node):
def __init__(self, parent, **kws):
Node.__init__(self, parent, **kws)
class Paren(Expr): pass
class Neg(Expr): pass
class Not(Expr): pass
class Ctranspose(Expr): pass
class Transpose(Expr): pass
class All(Expr): pass
class End(Expr): pass
class Break(Expr): pass
class Return(Expr): pass
class Int(Node):
def __init__(self, parent, value, **kws):
Node.__init__(self, parent, value=value, **kws)
class Float(Node):
def __init__(self, parent, value, **kws):
if value[0] == ".": value = "0" + value
Node.__init__(self, parent, value=value, **kws)
class Imag(Node):
def __init__(self, parent, value, **kws):
Node.__init__(self, parent, value=value, **kws)
class String(Node):
def __init__(self, parent, value, **kws):
value = value.replace("%", "__percent__")
Node.__init__(self, parent, value=value, **kws)
class Lambda(Node):
def __init__(self, parent, name="", **kws):
Node.__init__(self, parent, name=name, **kws)
class Pragma_for(Node):
def __init__(self, parent, value, **kws):
Node.__init__(self, parent, value=value, **kws)
class Lcomment(Node):
def __init__(self, parent, value, **kws):
value = value.replace("%", "__percent__")
Node.__init__(self, parent, value=value, **kws)
class Bcomment(Node):
def __init__(self, parent, value, **kws):
value = value.replace("%", "__percent__")
Node.__init__(self, parent, value=value, **kws)
class Ecomment(Node):
def __init__(self, parent, value, **kws):
value = value.replace("%", "__percent__")
Node.__init__(self, parent, value=value, **kws)
class Var(Node):
def __init__(self, parent, name, **kws):
Node.__init__(self, parent, name=name, **kws)
class Get(Var): pass
class Set(Var): pass
class Fvar(Node):
def __init__(self, parent, name, value, **kws):
Node.__init__(self, parent, name=name, value=value, **kws)
class Cvar(Node):
def __init__(self, parent, name, **kws):
Node.__init__(self, parent, name, **kws)
class Cget(Node):
def __init__(self, parent, name, **kws):
Node.__init__(self, parent, name=name, **kws)
class Fget(Node):
def __init__(self, parent, name, value, **kws):
Node.__init__(self, parent, name=name, value=value, **kws)
class Sget(Node):
def __init__(self, parent, name, value, **kws):
Node.__init__(self, parent, name=name, value=value, **kws)
class Nget(Node):
def __init__(self, parent, name, **kws):
Node.__init__(self, parent, name=name, **kws)
class Cset(Node):
def __init__(self, parent, name, **kws):
Node.__init__(self, parent, name=name, **kws)
class Fset(Node):
def __init__(self, parent, name, value, **kws):
Node.__init__(self, parent, name=name, value=value, **kws)
class Sset(Node):
def __init__(self, parent, name, value, **kws):
Node.__init__(self, parent, name=name, value=value, **kws)
class Nset(Node):
def __init__(self, parent, name, **kws):
Node.__init__(self, parent, name=name, **kws)
class Resize(Node):
def __init__(self, parent, **kws):
Node.__init__(self, parent, **kws)
class Verbatim(Node):
def __init__(self, parent, name, value, **kws):
Node.__init__(self, parent, name=name, value=value, **kws)
|
"""
These test cases can be used to test-drive a solution to the diamond kata, in an incremental manner.
to run the tests, use 'py.test' - see http://pytest.org
Instructions:
1. Make the first test case for Diamond A pass
2. change the 'ignore_' to 'test_' for the next test case. Make it pass.
3. Uncomment the next line of the test case. Make it pass
4. When all the lines of code in the test case are passing, continue to the next test case.
5. When all the test cases in this file are uncommented and passing, you should have a full working solution.
"""
import diamond
def test_diamondA_has_one_line_containing_a():
assert diamond.Diamond('A').print_diamond() == "A"
def ignore_letter_sequence_is_list_of_letters_on_each_line_of_the_diamond():
assert diamond.Diamond('A').letter_sequence == ['A']
#assert diamond.Diamond('B').letter_sequence == ['A', 'B', 'A']
#assert diamond.Diamond('C').letter_sequence == ['A', 'B', 'C', 'B', 'A']
#assert diamond.Diamond('D').letter_sequence == ['A', 'B', 'C', 'D', 'C', 'B', 'A']
def ignore_indents_is_list_of_indentation_for_each_line_of_the_diamond():
assert diamond.Diamond('A').indents == [0]
#assert diamond.Diamond('B').indents == [1,0,1]
#assert diamond.Diamond('C').indents == [2,1,0,1,2]
#assert diamond.Diamond('D').indents == [3,2,1,0,1,2,3]
def ignore_between_is_list_of_how_many_middle_spaces_between_the_repeated_letter_for_each_line_of_the_diamond():
assert diamond.Diamond('A').between == [0]
#assert diamond.Diamond('B').between == [0,1,0]
#assert diamond.Diamond('C').between == [0,1,3,1,0]
#assert diamond.Diamond('D').between == [0,1,3,5,3,1,0]
def ignore_one_row_is_a_list_representing_one_diamond_row():
assert diamond.Diamond('A').one_row('A', indent=0, between=0) == "A"
#assert diamond.Diamond('B').one_row('A', indent=1, between=0) == " A"
#assert diamond.Diamond('B').one_row('B', indent=0, between=1) == "B B"
#assert diamond.Diamond('D').one_row('C', indent=1, between=3) == " C C"
def ignore_rows_is_a_list_of_all_diamond_rows():
assert diamond.Diamond('A').rows() == ["A"]
#assert diamond.Diamond('B').rows() == [" A", "B B", " A"]
def ignore_DiamondC_prints_correctly():
assert diamond.Diamond('C').print_diamond() == """\
A
B B
C C
B B
A"""
def ignore_DiamondD_is_correct():
assert diamond.Diamond('D').print_diamond() == """\
A
B B
C C
D D
C C
B B
A"""
|
import numpy as np
import os
import sys
import time
import platform
import pyvisa.errors as VisaError
from visa import constants
path = os.path.realpath('../')
if not path in sys.path:
sys.path.insert(0, path)
from pyDecorators import InOut, ChangeState, Catch
try:
import visa
except Exception as e:
print('\033[93m' + '-'*10 + 'EXCEPTION:')
print(__file__)
print(e)
print('-'*10 + 'end exception' + '\033[0m')
class ThorlabsP1xx(object):
#USB0::0x1313::0x807B::17121241::INSTR
def __init__(self,address='USB0::0x1313::0x807B::190218320::INSTR'):
try:
self._rm = visa.ResourceManager()
except:
# Get only pythonistic version of pyvisq
self._rm = visa.ResourceManager('@py')
self._address = address
self._open = False
def isOpen(fun):
def wrapper(*args, **kwargs):
self_app = args[0]
if self_app._open:
out = fun(*args, **kwargs)
return out
return wrapper
def waiter(fun):
def wrapper(*args, **kwargs):
out = fun(*args, **kwargs)
time.sleep(0.2)
return out
return wrapper
def Query(self, word):
return self._instr.query(word).strip()
def Write(self, word):
return self._instr.write(word)
@property
def connected(self):
return self._open
@connected.setter
def connected(self, val):
if val:
if not self._open:
if self._address in self._rm.list_resources():
self._instr = self._rm.open_resource(self._address,timeout = 10)
self._instr.write_termination = '\r\n'
self._instr.read_termination = '\n'
self._instr.timeout = 10000
self._open = True
else:
print('Please connect or provide the correct address for the powermeter')
self._open = False
else:
if self._open:
self._instr.close()
self._open = False
@property
@InOut.output(float)
@waiter
def power(self):
self._instr.write('Measure:Power?')
try:
data = self._instr.read()
return data.strip()
except Exception as err:
print(err)
self._instr.write('*RST')
self._instr.write('*CLS')
self._instr.close()
self._instr = self._rm.open_resource(self._address,timeout = 10)
self._instr.timeout = 10000
# return self.Query('Measure:Power?')
@property
@isOpen
@waiter
def identity(self):
word = "*IDN?"
return self.Query(word)
@property
@isOpen
@InOut.output(float)
@waiter
def lbd(self):
word = 'SENSE:CORRECTION:WAVELENGTH?'
return self.Query(word)
@lbd.setter
@InOut.accepts(float)
@waiter
def lbd(self, val):
word = 'SENSE:CORRECTION:WAVELENGTH {}'.format(val)
self.Write(word)
def __repr__(self):
s = ['Thorlabs Power Meter Class']
s += ['Use the self.power, self.lbd properties to use the pmeter']
s += ['---------------------------------------------------------']
s += ['Detector head:']
try:
s += ['\t' + self.identity]
except:
s += ['\tConnect to the detector using self.connected = True']
return '\n'.join(s)
if __name__ == "__main__":
P = ThorlabsP1xx()
P.connected = True
P.lbd = 1550
while True:
print("Power Read: {:.3f}uW".format(P.power*1e6 /(0.02)), end = "\r")
|
class Solution:
def isPalindrome(self, x):
if x < 0:
return False
if 0 <= x < 10:
return True
if x%10 == 0:
return False
recv = 0
while recv < x:
p = x % 10
x = int(x / 10)
recv = recv * 10 + p
if x == recv:
return True
if int(recv / 10) == x and x != 0:
return True
return False
solution = Solution()
print(solution.isPalindrome(2112))
|
from collections import OrderedDict
from math import log2, ceil
def get_data_types(data):
"""
returns a dictionary with column names and their respective data types
:param pandas.DataFrame data: a dataframe
:rtype: dict[str,str]
"""
dtypes = data.dtypes
return OrderedDict(zip(dtypes.index, dtypes.astype(str)))
def get_redshift_data_types(data):
"""
returns a dictionary with column names and their respective redshift data types
:param pandas.DataFrame data: a dataframe
:rtype: dict[str,str]
"""
data_types = get_data_types(data)
redshift_data_types = OrderedDict()
for column, data_type in data_types.items():
if data_type.startswith('int'):
redshift_data_types[column] = 'INTEGER'
elif data_type.startswith('float'):
redshift_data_types[column] = 'REAL'
elif data_type.startswith('datetime'):
redshift_data_types[column] = 'TIMESTAMP'
elif data_type.startswith('bool'):
redshift_data_types[column] = 'BOOLEAN'
else:
max_length = int(max([len(bytes(str(x), 'utf-8')) for x in data[column].values]))
nearest_power_of_two = 2 ** ceil(log2(max_length+1))-1
redshift_data_types[column] = f'VARCHAR({nearest_power_of_two})'
return redshift_data_types
def get_redshift_create_table_query(database, schema, table, data, data_types=None):
"""
:param str database: name of database
:param str schema: name of schema
:param str table: name of table
:param pandas.DataFrame data: data to be uploaded
:param dict[str,str] data_types: a dictionary of the redshift data types
:rtype: str
"""
data_types = data_types or get_redshift_data_types(data=data)
data_types_str = ', \n'.join([f'"{col}" {dtype}' for col, dtype in data_types.items()])
return f"""
CREATE TABLE IF NOT EXISTS {database}.{schema}.{table} (
{data_types_str}
)
"""
|
import scripts.clausecat.clausecat_component
import scripts.clausecat.clause_segmentation
import scripts.clausecat.clausecat_reader
import scripts.clausecat.clausecat_model
import scripts.clausecat.clause_aggregation
import benepar
|
from pathlib import Path
def read(filename='in'):
file_path = Path(__file__).parent / filename
with file_path.open('r') as file:
return read_lines(file.readlines())
def read_lines(lines):
passports = []
passport = {}
for line in lines:
if not line.strip():
passports.append(passport)
passport = {}
for value in [v for v in line.strip().split(' ') if v]:
n, v = value.split(':')
passport[n] = v
passports.append(passport)
return passports
|
#coding=utf-8
from dirbot.items import User
from user import UserSpider
from scrapy import Request, Selector
from urlparse import urlparse, parse_qs
import logging
import json
class UserFanSpider(UserSpider):
"""Docstring for UserSpider. """
name = 'user_fan'# 命名规则 user_{从哪种渠道获得的用户名称}
def query_some_records(self, start_index = 0, num = 50):
"""TODO: Docstring for query_some_records.
:start_index: TODO
:num: TODO
:returns: TODO
"""
cursor = self.conn.cursor()
cursor.execute("""
SELECT name from fan limit %s, %s
""", (
start_index,
num
))# 去重
return cursor.fetchall()
|
'''Test warnings replacement w PyShell.py oraz run.py.
This file could be expanded to include traceback overrides
(in same two modules). If so, change name.
Revise jeżeli output destination changes (http://bugs.python.org/issue18318).
Make sure warnings module jest left unaltered (http://bugs.python.org/issue18081).
'''
zaimportuj unittest
z test.support zaimportuj captured_stderr
zaimportuj warnings
# Try to capture default showwarning before Idle modules are imported.
showwarning = warnings.showwarning
# But jeżeli we run this file within idle, we are w the middle of the run.main loop
# oraz default showwarnings has already been replaced.
running_in_idle = 'idle' w showwarning.__name__
z idlelib zaimportuj run
z idlelib zaimportuj PyShell jako shell
# The following was generated z PyShell.idle_formatwarning
# oraz checked jako matching expectation.
idlemsg = '''
Warning (z warnings module):
File "test_warning.py", line 99
Line of code
UserWarning: Test
'''
shellmsg = idlemsg + ">>> "
klasa RunWarnTest(unittest.TestCase):
@unittest.skipIf(running_in_idle, "Does nie work when run within Idle.")
def test_showwarnings(self):
self.assertIs(warnings.showwarning, showwarning)
run.capture_warnings(Prawda)
self.assertIs(warnings.showwarning, run.idle_showwarning_subproc)
run.capture_warnings(Nieprawda)
self.assertIs(warnings.showwarning, showwarning)
def test_run_show(self):
przy captured_stderr() jako f:
run.idle_showwarning_subproc(
'Test', UserWarning, 'test_warning.py', 99, f, 'Line of code')
# The following uses .splitlines to erase line-ending differences
self.assertEqual(idlemsg.splitlines(), f.getvalue().splitlines())
klasa ShellWarnTest(unittest.TestCase):
@unittest.skipIf(running_in_idle, "Does nie work when run within Idle.")
def test_showwarnings(self):
self.assertIs(warnings.showwarning, showwarning)
shell.capture_warnings(Prawda)
self.assertIs(warnings.showwarning, shell.idle_showwarning)
shell.capture_warnings(Nieprawda)
self.assertIs(warnings.showwarning, showwarning)
def test_idle_formatter(self):
# Will fail jeżeli format changed without regenerating idlemsg
s = shell.idle_formatwarning(
'Test', UserWarning, 'test_warning.py', 99, 'Line of code')
self.assertEqual(idlemsg, s)
def test_shell_show(self):
przy captured_stderr() jako f:
shell.idle_showwarning(
'Test', UserWarning, 'test_warning.py', 99, f, 'Line of code')
self.assertEqual(shellmsg.splitlines(), f.getvalue().splitlines())
jeżeli __name__ == '__main__':
unittest.main(verbosity=2, exit=Nieprawda)
|
from typing import Tuple
import os
def validate_update_todo(todo_path: str, todo_name: str, todo_file_name: str) -> Tuple[bool, str]:
"""
TODOファイルの名前を更新する際に実行するチェック関数
Parameters
----------
todo_path: str
todo_name: str
todo_file_name: str
Returns
-------
is_validate, error_msg: Tuple[bool, str]
"""
is_validate_name_empty, error_msg_empty = validate_todo_name_empty(todo_name)
is_validate_name, error_msg_name = validate_todo_name(todo_name)
is_validate_double_name, error_msg_double_name = validate_double_todo_name(todo_file_name, todo_path)
is_validate: bool = is_validate_name_empty and is_validate_name and is_validate_double_name
error_msg: str = ""
if not is_validate:
error_msg = "\n".join([error_msg_empty, error_msg_name, error_msg_double_name])
return is_validate, error_msg
def validate_add_todo(todo_path: str, todo_name: str, todo_file_name: str) -> Tuple[bool, str]:
is_validate_name_empty, error_msg_empty = validate_todo_name_empty(todo_name)
is_validate_name, error_msg_name = validate_todo_name(todo_name)
is_validate_double_name, error_msg_double_name = validate_double_todo_name(todo_file_name, todo_path)
is_validate: bool = is_validate_name_empty and is_validate_name and is_validate_double_name
error_msg: str = "\n".join([error_msg_empty, error_msg_name, error_msg_double_name])
if is_validate:
return True, ""
else:
return False, error_msg
def validate_todo_name_empty(todo_name: str) -> Tuple[bool, str]:
"""
TODOファイルの名前が空文字列かチェックする関数
Parameters
----------
todo_name: str
todoファイルの名前
Returns
-------
Bool: bool, error_msg: str
バリデーションチェックに成功したかどうか、および失敗時のエラーメッセージ
"""
if todo_name == "":
return False, "名前を入力してください"
return True, ""
def validate_todo_name(todo_name: str) -> Tuple[bool, str]:
"""
TODOファイルの名前をチェックする関数
Parameters
----------
todo_name: str
todoファイルの名前
Returns
-------
Bool: bool, error_msg: str
バリデーションチェックに成功したかどうか、および失敗時のエラーメッセージ
"""
not_use_strings: list = ["/", ".", "\\"]
for not_use_string in not_use_strings:
if not_use_string in set(todo_name):
return False, f"{not_use_string}は名前に使えません。"
return True, ""
def validate_double_todo_name(todo_name: str, todo_path: str) -> Tuple[bool, str]:
"""
TODOファイルの名前が重複していないかチェックする関数
Parameters
----------
todo_name: str
todoファイルの名前
todo_path: str
todoファイルのパス
Returns
-------
Bool: bool, error_msg: str
バリデーションチェックに成功したかどうか、および失敗時のエラーメッセージ
"""
if os.path.lexists(os.path.join(todo_path, todo_name)):
return False, f"{todo_name}は既存のTODO名と重複しています。"
return True, ""
|
import clipboard
from twisted.internet import task
class Clipboard(object):
def __init__(self, clipboard, clipboard_polling_interval=1.0):
self._clipboard = clipboard
self._clipboard_callbacks = []
self._last_clipboard_value_on_poll = self.value
self._polling_is_active = False
task.LoopingCall(self._clipboard_listener).start(clipboard_polling_interval)
@property
def value(self):
try:
return self._clipboard.paste()
except:
return ''
def copy(self, value):
self._clipboard.copy(value)
self._last_clipboard_value_on_poll = value
def on_clipboard_change(self, callback):
self._clipboard_callbacks.append(callback)
def _clipboard_listener(self):
current_clipboard_value = self.value
if current_clipboard_value != self._last_clipboard_value_on_poll:
self._last_clipboard_value_on_poll = current_clipboard_value
for callback in self._clipboard_callbacks:
callback(current_clipboard_value)
@staticmethod
def create(args):
return Clipboard(clipboard)
|
from __future__ import absolute_import
from widgy.contrib.review_queue.site import ReviewedWidgySite
class DemoWidgySite(ReviewedWidgySite):
pass
widgy_site = DemoWidgySite()
|
import logging
from typing import Dict, List, Any
from mlflow import pyfunc
class PyFuncEnsemblerRunner:
"""
PyFunc ensembler runner used for real-time outputs
"""
def __init__(self, artifact_dir: str):
self.artifact_dir = artifact_dir
self._ensembler = None
def load(self):
self._ensembler = pyfunc.load_model(self.artifact_dir)
def predict(self, inputs: Dict[str, Any]) -> List[Any]:
logging.info(f"Input request payload: {inputs}")
output = self._ensembler.predict(inputs)
logging.info(f"Output response: {output}")
return output
|
import os
from a_d.ny_AD import detect as nyImDetect
from a_d.absZ_AD_1 import detect as absZDetect
from a_d.phase_AD_1 import detect as phaseDetect
from data_processor.GOA_preprocessor.goa_data_wrapper import load_Lai_EIS_data
from IS.IS import IS_0
from goa.integration.goa_intergration import goa_fitter_1
from playground.laiZhaoGui.getLaiVogitAddC import getLaiVogitAddCResDict
from playground.laiZhaoGui.goa.GOAs_fit_EIS_0 import get_para_range
from playground.laiZhaoGui.goa.GOAs_fit_EIS_1 import load_eis_ECM_dict
from data_processor.GOA_preprocessor.goa_data_wrapper import load_lai_manual_fitting_res
from utils.visualize_utils.IS_plots.ny import nyquist_plot
from utils.visualize_utils.IS_plots.bd import bode_one_plot
"""
Module Function
1- 先去除异常点
2- 使用一个合适的GOA拟合ECM参数
"""
# Import EIS
# Load Lai's normed(* multiply experimental area 1.01 * 1e-6 cm^2) EIS data
"""
lai_normed_eis_dict_list[
dict0{
'file_name': '1-1',
'ecm_num': 9,
'f': [100078.1, 63140.62, ..., 0.1588983, 0.1001603],
'z_raw': [(0.005566658429999999-0.0112022736j),
(0.006214947129999999-0.0172324988j),
...,
(285.52881799999994-486.4391289999999j),
(370.64242699999994-661.259928j)]
}
dict1,
...
]
"""
lai_normed_eis_dict_list = load_Lai_EIS_data(file_path='../../../datasets/goa_datasets/normed',
file_name='2020_08_22_goa_lai_normed_dataset_pickle.file')
# --------------------------------- 去除Raw EIS中的异常点 ---------------------------------
# --------------- 先尝试 去除 一条Raw EIS 中的异常点 ---------------
def tryOneEIS_AD(eisIndex=1):
# read an Raw-EIS
normedRawEIS = IS_0()
print(lai_normed_eis_dict_list[eisIndex])
print('----------------------------------------')
normedRawEIS.readFromLaiPickle(laiNormedEisDict=lai_normed_eis_dict_list[eisIndex], limitList=None)
# plot Raw-EIS Nyquist and Bode for visual inspection
nyquist_plot(z_list=normedRawEIS.z_arr,
grid_flag=False, fig_title='Nyquist-NormedRawEIS-i={}'.format(eisIndex))
bode_one_plot(fre_list=normedRawEIS.fre_arr, z_list=normedRawEIS.z_arr,
fig_title='Bode-NormedRawEIS-i={}'.format(eisIndex))
# Remove Outlier
deletedPointIndex_list = nyImDetect(eis_source=normedRawEIS,
vogitAddC=True,
pointNum=10,
chiSquareLimit=2.5*1e-2,
printFlag=True)
print(deletedPointIndex_list)
# ------------- Check the Nyquist and Bode plots of EIS after deleted possible outliers -------------
for dpi in deletedPointIndex_list:
normedRawEIS.removeZByIndex(index=dpi)
# plot Raw-EIS and AD-EIS to compare (Nyquist and Bode)
nyquist_plot(z_list=normedRawEIS.z_arr,
grid_flag=False, fig_title='Nyquist-NormedRawEIS-delete:{}'.format(deletedPointIndex_list))
bode_one_plot(fre_list=normedRawEIS.fre_arr, z_list=normedRawEIS.z_arr,
fig_title='Bode-NormedRawEIS-delete:{}'.format(deletedPointIndex_list))
# ------------- Check the Nyquist and Bode plots of EIS after deleted possible outliers -------------
return normedRawEIS
# EIS after outlier removal
# eis_AD = tryOneEIS_AD(eisIndex=17)
# --------------- 先尝试 去除 一条Raw EIS 中的异常点 ---------------
# --------------- 先尝试 去除 一条Raw EIS 中的异常点 ---------------
# --------------------------------- 去除Raw EIS中的异常点 ---------------------------------
"""
Set parameters' search range according to Lai's manual fitting results
lai_manual_fit_res_dict{
'1-14':{
'para': [0.01839, 0.006388, 0.8688, 1.175, 0.002783, 0.798, 1371.0],
'limit': [[0.0001, 1], [1e-05, 0.1], [0.3, 1.0], [0.01, 100], [1e-05, 0.1], [0.3, 1.0], [10, 100000]],
'chi_square': 0.001314
}
'2-13':{},
...
}
"""
lai_manual_fit_res_dict = load_lai_manual_fitting_res(file_path='../../../datasets/goa_datasets/Lai_manual_fitting_res',
file_names=['2020_07_22_lai_ecm2_fitting_res.CSV',
'2020_07_22_lai_ecm9_fitting_res.CSV'])
laiVogitAddCResDict = getLaiVogitAddCResDict(fp='../', fn='laiAddVogitCRes.txt')
def packEisDict(detectType):
global lai_normed_eis_dict_list
global lai_manual_fit_res_dict
eisDictList = []
# For code test
# for lai_normed_eis_dict in lai_normed_eis_dict_list[:3]:
# Formal
for lai_normed_eis_dict in lai_normed_eis_dict_list:
eisDict = {}
eisDict['exp_fn'] = lai_normed_eis_dict['file_name']
print(eisDict['exp_fn'])
eisDict['ecm_num'] = lai_normed_eis_dict['ecm_num']
eisDict['limit'] = lai_manual_fit_res_dict[lai_normed_eis_dict['file_name']]['limit']
# ------------ delete Outlier ------------
normedRawEIS = IS_0()
normedRawEIS.readFromLaiPickle(laiNormedEisDict=lai_normed_eis_dict, limitList=None)
if detectType == 'nyIm':
deletedPointIndex_list = nyImDetect(eis_source=normedRawEIS,
# vogitAddC=True,
vogitAddC=laiVogitAddCResDict[eisDict['exp_fn']],
pointNum=10,
chiSquareLimit=2.5 * 1e-2,
printFlag=False)
elif detectType == 'absZ':
deletedPointIndex_list = absZDetect(eis_source=normedRawEIS,
# vogitAddC=True,
vogitAddC=laiVogitAddCResDict[eisDict['exp_fn']],
pointNum=10,
chiSquareLimit=2.5 * 1e-2,
printFlag=False)
elif detectType == 'phase':
deletedPointIndex_list = phaseDetect(eis_source=normedRawEIS,
# vogitAddC=True,
vogitAddC=laiVogitAddCResDict[eisDict['exp_fn']],
pointNum=10,
chiSquareLimit=2.5 * 1e-2,
printFlag=False)
for dpi in deletedPointIndex_list:
normedRawEIS.removeZByIndex(index=dpi)
# ------------ delete Outlier ------------
# ------------ 替换删除异常点后的fre 和 z_raw ------------
eisDict['f'] = normedRawEIS.fre_arr.tolist()
eisDict['z_raw'] = normedRawEIS.z_arr.tolist()
# ------------ 替换删除异常点后的fre 和 z_raw ------------
eisDictList.append(eisDict)
return eisDictList
# eisDictList = packEisDict(detectType='nyIm')
# eisDictList = packEisDict(detectType='absZ')
eisDictList = packEisDict(detectType='phase')
# 2- 使用一个合适的GOA拟合ECM参数
for eisDict in eisDictList:
goa_fitter_1(ecm_para_config_dict=eisDict, repeat_time=1) |
from pygarl.data_readers import SerialDataReader
from pygarl.mocks import VerboseTestSampleManager
# This example uses a SerialDataReader to read data from a serial port
# and uses a VerboseTestSampleManager to print the received data and signals
def run_example(*args, **kwargs):
# Create the SerialDataReader
sdr = SerialDataReader(kwargs['port'], expected_axis=6, verbose=False)
# Create a simple SampleManager that only prints the received data and signals
manager = VerboseTestSampleManager()
# Attach the manager
sdr.attach_manager(manager)
# Open the serial connection
sdr.open()
print("Opened!")
# Start the main loop
sdr.mainloop()
|
#! /usr/bin/env python3
import argparse
from contextlib import contextmanager, closing
import os
import sys
import tempfile
import pysam
import pbio.misc.parallel as parallel
import pbio.misc.shell_utils as shell_utils
import pbio.misc.slurm as slurm
import logging
import pbio.misc.logging_utils as logging_utils
logger = logging.getLogger(__name__)
default_chrom = 'all'
default_start = 0
default_end = None
def main():
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description="This script converts bam files to bigWig files. It is mostly "
"a port of this script: https://github.com/chapmanb/bcbb/blob/master/nextgen/scripts/bam_to_wiggle.py "
"by Brad Chapman which avoids a few dependencies.\n\nThe wigToBigWig "
"program (from UCSC tools) must be in the path.\n\nN.B. If given, the "
"start and end coordinates must be base-0.")
parser.add_argument('bam', help="The bam file", nargs='+')
parser.add_argument('-o', '--overwrite', help="If this flag is given, then "
"the bigWig file will be created whether it exists or not",
action='store_true')
parser.add_argument('-c', '--chrom', help="If specified, only alignments "
"from this chromosome will be in the output", default=default_chrom)
parser.add_argument('-s', '--start', help="If specied, only alignments "
"from this position will be in the output", default=default_start)
parser.add_argument('-e', '--end', help="If specied, only alignments "
"up to this position will be in the output", default=default_end)
parser.add_argument('-n', '--normalize', help="If this flag is given, "
"then values will be normalized to reads per million", action='store_true')
parser.add_argument('-t', '--use-tempfile', help="If this flag is given, "
"then a temp file will be used to avoid permission issues",
action='store_true')
parser.add_argument('-k', '--keep-wig', help="If this flag is given, then "
"the wiggle file will not be deleted", action='store_true')
slurm.add_sbatch_options(parser)
logging_utils.add_logging_options(parser)
args = parser.parse_args()
logging_utils.update_logging(args)
programs = ['wigToBigWig']
shell_utils.check_programs_exist(programs)
if args.use_slurm:
cmd = ' '.join(sys.argv)
slurm.check_sbatch(cmd, args=args)
return
parallel.apply_parallel_iter(args.bam, args.num_cpus, bam_to_wiggle, args,
progress_bar=True)
def bam_to_wiggle(bam, args):
out = "{}.bigWig".format(bam)
regions = [(args.chrom, args.start, args.end)]
bigWig_exists = (os.path.exists(out) and os.path.getsize(out) > 0)
if args.overwrite or not bigWig_exists:
if args.use_tempfile:
#Use a temp file to avoid any possiblity of not having write permission
out_handle = tempfile.NamedTemporaryFile(delete=False)
wig_file = out_handle.name
else:
out_base = os.path.splitext(out)[0]
wig_file = "{}.wig".format(out_base)
out_handle = open(wig_file, "w")
msg = "Writing bam to wig"
logger.info(msg)
with closing(out_handle):
chr_sizes, wig_valid = write_bam_track(bam, regions, out_handle,
args.normalize)
try:
msg = "Converting wig to bigWig"
logger.info(msg)
if wig_valid:
convert_to_bigwig(wig_file, out, chr_sizes, args)
finally:
if not args.keep_wig:
msg = "Removing wig file"
logger.info(msg)
os.remove(wig_file)
else:
msg = "Keeping wig file"
logger.info(msg)
else:
msg = "The bigWig file already exists. Quitting."
logger.warning(msg)
@contextmanager
def indexed_bam(bam_file):
if not os.path.exists(bam_file + ".bai"):
pysam.index(bam_file)
sam_reader = pysam.Samfile(bam_file, "rb")
yield sam_reader
sam_reader.close()
def write_bam_track(bam_file, regions, out_handle, normalize):
out_handle.write("track %s\n" % " ".join(["type=wiggle_0",
"name=%s" % os.path.splitext(os.path.split(bam_file)[-1])[0],
"visibility=full",
]))
normal_scale = 1e6
is_valid = False
with indexed_bam(bam_file) as work_bam:
total = sum(1 for r in work_bam.fetch() if not r.is_unmapped) if normalize else None
sizes = list(zip(work_bam.references, work_bam.lengths))
if len(regions) == 1 and regions[0][0] == "all":
regions = [(name, 0, length) for name, length in sizes]
for chrom, start, end in regions:
if end is None and chrom in work_bam.references:
end = work_bam.lengths[work_bam.references.index(chrom)]
assert end is not None, "Could not find %s in header" % chrom
out_handle.write("variableStep chrom=%s\n" % chrom)
for col in work_bam.pileup(chrom, start, end):
if normalize:
n = float(col.n) / total * normal_scale
else:
n = col.n
out_handle.write("%s %.1f\n" % (col.pos+1, n))
is_valid = True
return sizes, is_valid
def convert_to_bigwig(wig_file, out, chr_sizes, args):
size_file = "%s-sizes.txt" % (os.path.splitext(wig_file)[0])
with open(size_file, "w") as out_handle:
for chrom, size in chr_sizes:
out_handle.write("%s\t%s\n" % (chrom, size))
try:
msg = "Calling wigToBigWig"
logger.info(msg)
cmd = "wigToBigWig {} {} {}".format(wig_file, size_file, out)
shell_utils.check_call(cmd)
finally:
if args.keep_wig:
msg = "Keeping size file"
logger.info(msg)
else:
msg = "Removing size file"
logger.info(msg)
os.remove(size_file)
return out
if __name__ == '__main__':
main()
|
#!/usr/bin/env python
import codecs
import fnmatch
import os
import subprocess
import sys
import tarfile
import unicodedata
import pandas
import progressbar
from sox import Transformer
from tensorflow.python.platform import gfile
from deepspeech_training.util.downloader import maybe_download
SAMPLE_RATE = 16000
def _download_and_preprocess_data(data_dir):
# Conditionally download data to data_dir
print(
"Downloading Librivox data set (55GB) into {} if not already present...".format(
data_dir
)
)
with progressbar.ProgressBar(max_value=7, widget=progressbar.AdaptiveETA) as bar:
TRAIN_CLEAN_100_URL = (
"http://www.openslr.org/resources/12/train-clean-100.tar.gz"
)
TRAIN_CLEAN_360_URL = (
"http://www.openslr.org/resources/12/train-clean-360.tar.gz"
)
TRAIN_OTHER_500_URL = (
"http://www.openslr.org/resources/12/train-other-500.tar.gz"
)
DEV_CLEAN_URL = "http://www.openslr.org/resources/12/dev-clean.tar.gz"
DEV_OTHER_URL = "http://www.openslr.org/resources/12/dev-other.tar.gz"
TEST_CLEAN_URL = "http://www.openslr.org/resources/12/test-clean.tar.gz"
TEST_OTHER_URL = "http://www.openslr.org/resources/12/test-other.tar.gz"
def filename_of(x):
return os.path.split(x)[1]
train_clean_100 = maybe_download(
filename_of(TRAIN_CLEAN_100_URL), data_dir, TRAIN_CLEAN_100_URL
)
bar.update(0)
train_clean_360 = maybe_download(
filename_of(TRAIN_CLEAN_360_URL), data_dir, TRAIN_CLEAN_360_URL
)
bar.update(1)
train_other_500 = maybe_download(
filename_of(TRAIN_OTHER_500_URL), data_dir, TRAIN_OTHER_500_URL
)
bar.update(2)
dev_clean = maybe_download(filename_of(DEV_CLEAN_URL), data_dir, DEV_CLEAN_URL)
bar.update(3)
dev_other = maybe_download(filename_of(DEV_OTHER_URL), data_dir, DEV_OTHER_URL)
bar.update(4)
test_clean = maybe_download(
filename_of(TEST_CLEAN_URL), data_dir, TEST_CLEAN_URL
)
bar.update(5)
test_other = maybe_download(
filename_of(TEST_OTHER_URL), data_dir, TEST_OTHER_URL
)
bar.update(6)
# Conditionally extract LibriSpeech data
# We extract each archive into data_dir, but test for existence in
# data_dir/LibriSpeech because the archives share that root.
print("Extracting librivox data if not already extracted...")
with progressbar.ProgressBar(max_value=7, widget=progressbar.AdaptiveETA) as bar:
LIBRIVOX_DIR = "LibriSpeech"
work_dir = os.path.join(data_dir, LIBRIVOX_DIR)
_maybe_extract(
data_dir, os.path.join(LIBRIVOX_DIR, "train-clean-100"), train_clean_100
)
bar.update(0)
_maybe_extract(
data_dir, os.path.join(LIBRIVOX_DIR, "train-clean-360"), train_clean_360
)
bar.update(1)
_maybe_extract(
data_dir, os.path.join(LIBRIVOX_DIR, "train-other-500"), train_other_500
)
bar.update(2)
_maybe_extract(data_dir, os.path.join(LIBRIVOX_DIR, "dev-clean"), dev_clean)
bar.update(3)
_maybe_extract(data_dir, os.path.join(LIBRIVOX_DIR, "dev-other"), dev_other)
bar.update(4)
_maybe_extract(data_dir, os.path.join(LIBRIVOX_DIR, "test-clean"), test_clean)
bar.update(5)
_maybe_extract(data_dir, os.path.join(LIBRIVOX_DIR, "test-other"), test_other)
bar.update(6)
# Convert FLAC data to wav, from:
# data_dir/LibriSpeech/split/1/2/1-2-3.flac
# to:
# data_dir/LibriSpeech/split-wav/1-2-3.wav
#
# And split LibriSpeech transcriptions, from:
# data_dir/LibriSpeech/split/1/2/1-2.trans.txt
# to:
# data_dir/LibriSpeech/split-wav/1-2-0.txt
# data_dir/LibriSpeech/split-wav/1-2-1.txt
# data_dir/LibriSpeech/split-wav/1-2-2.txt
# ...
print("Converting FLAC to WAV and splitting transcriptions...")
with progressbar.ProgressBar(max_value=7, widget=progressbar.AdaptiveETA) as bar:
train_100 = _convert_audio_and_split_sentences(
work_dir, "train-clean-100", "train-clean-100-wav"
)
bar.update(0)
train_360 = _convert_audio_and_split_sentences(
work_dir, "train-clean-360", "train-clean-360-wav"
)
bar.update(1)
train_500 = _convert_audio_and_split_sentences(
work_dir, "train-other-500", "train-other-500-wav"
)
bar.update(2)
dev_clean = _convert_audio_and_split_sentences(
work_dir, "dev-clean", "dev-clean-wav"
)
bar.update(3)
dev_other = _convert_audio_and_split_sentences(
work_dir, "dev-other", "dev-other-wav"
)
bar.update(4)
test_clean = _convert_audio_and_split_sentences(
work_dir, "test-clean", "test-clean-wav"
)
bar.update(5)
test_other = _convert_audio_and_split_sentences(
work_dir, "test-other", "test-other-wav"
)
bar.update(6)
# Write sets to disk as CSV files
train_100.to_csv(
os.path.join(data_dir, "librivox-train-clean-100.csv"), index=False
)
train_360.to_csv(
os.path.join(data_dir, "librivox-train-clean-360.csv"), index=False
)
train_500.to_csv(
os.path.join(data_dir, "librivox-train-other-500.csv"), index=False
)
dev_clean.to_csv(os.path.join(data_dir, "librivox-dev-clean.csv"), index=False)
dev_other.to_csv(os.path.join(data_dir, "librivox-dev-other.csv"), index=False)
test_clean.to_csv(os.path.join(data_dir, "librivox-test-clean.csv"), index=False)
test_other.to_csv(os.path.join(data_dir, "librivox-test-other.csv"), index=False)
def _maybe_extract(data_dir, extracted_data, archive):
# If data_dir/extracted_data does not exist, extract archive in data_dir
if not gfile.Exists(os.path.join(data_dir, extracted_data)):
tar = tarfile.open(archive)
tar.extractall(data_dir)
tar.close()
def _convert_audio_and_split_sentences(extracted_dir, data_set, dest_dir):
source_dir = os.path.join(extracted_dir, data_set)
target_dir = os.path.join(extracted_dir, dest_dir)
if not os.path.exists(target_dir):
os.makedirs(target_dir)
# Loop over transcription files and split each one
#
# The format for each file 1-2.trans.txt is:
# 1-2-0 transcription of 1-2-0.flac
# 1-2-1 transcription of 1-2-1.flac
# ...
#
# Each file is then split into several files:
# 1-2-0.txt (contains transcription of 1-2-0.flac)
# 1-2-1.txt (contains transcription of 1-2-1.flac)
# ...
#
# We also convert the corresponding FLACs to WAV in the same pass
files = []
for root, dirnames, filenames in os.walk(source_dir):
for filename in fnmatch.filter(filenames, "*.trans.txt"):
trans_filename = os.path.join(root, filename)
with codecs.open(trans_filename, "r", "utf-8") as fin:
for line in fin:
# Parse each segment line
first_space = line.find(" ")
seqid, transcript = line[:first_space], line[first_space + 1 :]
# We need to do the encode-decode dance here because encode
# returns a bytes() object on Python 3, and text_to_char_array
# expects a string.
transcript = (
unicodedata.normalize("NFKD", transcript)
.encode("ascii", "ignore")
.decode("ascii", "ignore")
)
transcript = transcript.lower().strip()
# Convert corresponding FLAC to a WAV
flac_file = os.path.join(root, seqid + ".flac")
wav_file = os.path.join(target_dir, seqid + ".wav")
if not os.path.exists(wav_file):
tfm = Transformer()
tfm.set_output_format(rate=SAMPLE_RATE)
tfm.build(flac_file, wav_file)
wav_filesize = os.path.getsize(wav_file)
files.append((os.path.abspath(wav_file), wav_filesize, transcript))
return pandas.DataFrame(
data=files, columns=["wav_filename", "wav_filesize", "transcript"]
)
if __name__ == "__main__":
_download_and_preprocess_data(sys.argv[1]) |
#!/usr/bin/env python
# coding: utf-8
OR_train_data = [[0,0,0], [0,1,1], [1,0,1], [1,1,1]]
AND_train_data = [[0,0,0], [0,1,0], [1,0,0], [1,1,1]]
NOR_train_data = [[0,0,1], [0,1,0], [1,0,0], [1,1,0]]
NAND_train_data = [[0,0,1], [0,1,1], [1,0,1], [1,1,0]]
w1 = 0.5
w2 = 0.2
b = -2
eta = 0.7
for epoch in range(1, 10):
# randomly select a datapoint
print("Epoch: " + str(epoch))
for data in NAND_train_data:
x1 = data[0]
x2 = data[1]
label = data[2]
# prediction
pred = w1*x1 + w2*x2 - b
# if mistake in prediction; correct
if label == 1 and pred <= 0:
w1 = w1 + eta*x1
w2 = w2 + eta*x2
b = b - eta
if label == 0 and pred > 0:
w1 = w1 - eta*x1
w2 = w2 - eta*x2
b = b + eta
print(label, 1 if pred>0 else 0, w1, w2, b)
|
#!/usr/bin/python3
r'''Tests special-case projection functions
Simple things like project_lonlat(), project_stereographic(), etc.
I do 3 things:
Here I make sure the projection functions return the correct values. This is a
regression test, so the "right" values were recorded at some point, and any
deviation is flagged.
I make sure that project(unproject(x)) == x
I run a gradient check. I do these for the simple project_...()
function AND the generic project() function.
'''
import sys
import numpy as np
import numpysane as nps
import os
testdir = os.path.dirname(os.path.realpath(__file__))
# I import the LOCAL mrcal since that's what I'm testing
sys.path[:0] = f"{testdir}/..",
import mrcal
import testutils
from test_calibration_helpers import grad
if len(sys.argv) != 2:
raise Exception("Need one argument on the commandline: the projection type. Currently I support 'pinhole','latlon','lonlat','stereographic'")
if sys.argv[1] == 'pinhole' or \
sys.argv[1] == 'latlon' or \
sys.argv[1] == 'lonlat':
# pixels/rad
fx,fy = 3000., 2000.
# pixel where latlon = (0,0) projects to. May be negative
cx,cy = (-10000., 4000.)
# a few points, some wide, some not. Some behind the camera
p = np.array(((1.0, 2.0, 10.0),
(-1.1, 0.3, -1.0),
(-0.9, -1.5, -1.0)))
if sys.argv[1] == 'pinhole': unproject_is_normalized = False
else: unproject_is_normalized = True
if sys.argv[1] == 'pinhole':
# pinhole projects ahead only
p[:,2] = abs(p[:,2])
if sys.argv[1] == 'pinhole':
lensmodel = 'LENSMODEL_PINHOLE'
func_project = mrcal.project_pinhole
func_unproject = mrcal.unproject_pinhole
name = 'pinhole'
q_projected_ref = np.array([[ -9700., 4400.],
[ -13300., 4600.],
[ -12700., 1000.]])
elif sys.argv[1] == 'lonlat':
lensmodel = 'LENSMODEL_LONLAT'
func_project = mrcal.project_lonlat
func_unproject = mrcal.unproject_lonlat
name = 'lonlat'
q_projected_ref = np.array([[ -9700.99404253, 4392.88198287],
[-16925.83416075, 4398.25498944],
[-17226.33265541, 2320.61601685]])
elif sys.argv[1] == 'latlon':
lensmodel = 'LENSMODEL_LATLON'
func_project = mrcal.project_latlon
func_unproject = mrcal.unproject_latlon
name = 'latlon'
q_projected_ref = np.array([[ -9706.7632608 , 4394.7911197 ],
[-12434.4909092 , 9700.27171822],
[-11389.09468198, -317.59786068]])
elif sys.argv[1] == 'stereographic':
lensmodel = 'LENSMODEL_STEREOGRAPHIC'
func_project = mrcal.project_stereographic
func_unproject = mrcal.unproject_stereographic
name = 'stereographic'
fx,fy,cx,cy = 1512., 1112, 500., 333.
# a few points, some wide, some not. Some behind the camera
p = np.array(((1.0, 2.0, 10.0),
(-1.1, 0.3, -1.0),
(-0.9, -1.5, -1.0)))
q_projected_ref = np.array([[ 649.35582325, 552.6874014 ],
[-5939.33490417, 1624.58376866],
[-2181.52681292, -2953.8803086 ]])
unproject_is_normalized = False
else:
raise Exception("Unknown projection type. Currently I support 'lonlat','stereographic'")
intrinsics = (lensmodel, np.array((fx,fy,cx,cy)))
q_projected = func_project(p, intrinsics[1])
testutils.confirm_equal(q_projected,
q_projected_ref,
msg = f"project_{name}()",
worstcase = True,
relative = True)
testutils.confirm_equal(mrcal.project(p, *intrinsics),
q_projected,
msg = f"project({name}) returns the same as project_{name}()",
worstcase = True,
relative = True)
v_unprojected = func_unproject(q_projected, intrinsics[1])
if unproject_is_normalized:
testutils.confirm_equal( nps.mag(v_unprojected),
1.,
msg = f"unproject_{name}() returns normalized vectors",
worstcase = True,
relative = True)
testutils.confirm_equal( v_unprojected,
p / nps.dummy(nps.mag(p), axis=-1),
msg = f"unproject_{name}()",
worstcase = True,
relative = True)
else:
cos = nps.inner(v_unprojected, p) / (nps.mag(p)*nps.mag(v_unprojected))
cos = np.clip(cos, -1, 1)
testutils.confirm_equal( np.arccos(cos),
np.zeros((p.shape[0],), dtype=float),
msg = f"unproject_{name}()",
worstcase = True)
# Not normalized by default. Make sure that if I ask for it to be
# normalized, that it is
testutils.confirm_equal( nps.mag( mrcal.unproject(q_projected, *intrinsics, normalize = True) ),
1.,
msg = f"unproject({name},normalize = True) returns normalized vectors",
worstcase = True,
relative = True)
testutils.confirm_equal( nps.mag( mrcal.unproject(q_projected, *intrinsics, normalize = True, get_gradients = True)[0] ),
1.,
msg = f"unproject({name},normalize = True, get_gradients=True) returns normalized vectors",
worstcase = True,
relative = True)
testutils.confirm_equal( mrcal.unproject(q_projected, *intrinsics),
v_unprojected,
msg = f"unproject({name}) returns the same as unproject_{name}()",
worstcase = True,
relative = True)
testutils.confirm_equal( mrcal.project(mrcal.unproject(q_projected, *intrinsics),*intrinsics),
q_projected,
msg = f"project(unproject()) is an identity",
worstcase = True,
relative = True)
testutils.confirm_equal( func_project(func_unproject(q_projected,intrinsics[1]),intrinsics[1]),
q_projected,
msg = f"project_{name}(unproject_{name}()) is an identity",
worstcase = True,
relative = True)
# Now gradients for project()
ipt = 1
_,dq_dp_reported = func_project(p[ipt], intrinsics[1], get_gradients=True)
dq_dp_observed = grad(lambda p: func_project(p, intrinsics[1]),
p[ipt])
testutils.confirm_equal(dq_dp_reported,
dq_dp_observed,
msg = f"project_{name}() dq/dp",
worstcase = True,
relative = True)
_,dq_dp_reported,dq_di_reported = mrcal.project(p[ipt], *intrinsics, get_gradients=True)
dq_dp_observed = grad(lambda p: mrcal.project(p, *intrinsics),
p[ipt])
dq_di_observed = grad(lambda intrinsics_data: mrcal.project(p[ipt], intrinsics[0],intrinsics_data),
intrinsics[1])
testutils.confirm_equal(dq_dp_reported,
dq_dp_observed,
msg = f"project({name}) dq/dp",
worstcase = True,
relative = True)
testutils.confirm_equal(dq_di_reported,
dq_di_observed,
msg = f"project({name}) dq/di",
worstcase = True,
relative = True,
eps = 1e-5)
# Now gradients for unproject()
ipt = 1
_,dv_dq_reported = func_unproject(q_projected[ipt], intrinsics[1], get_gradients=True)
dv_dq_observed = grad(lambda q: func_unproject(q, intrinsics[1]),
q_projected[ipt])
testutils.confirm_equal(dv_dq_reported,
dv_dq_observed,
msg = f"unproject_{name}() dv/dq",
worstcase = True,
relative = True,
eps = 2e-6)
for normalize in (False, True):
v_unprojected,dv_dq_reported,dv_di_reported = \
mrcal.unproject(q_projected[ipt], *intrinsics,
get_gradients = True,
normalize = normalize)
dv_dq_observed = grad(lambda q: mrcal.unproject(q, *intrinsics, normalize=normalize),
q_projected[ipt])
dv_di_observed = grad(lambda intrinsics_data: mrcal.unproject(q_projected[ipt], intrinsics[0],intrinsics_data, normalize=normalize),
intrinsics[1])
testutils.confirm_equal(dv_dq_reported,
dv_dq_observed,
msg = f"unproject({name}, normalize={normalize}) dv/dq",
worstcase = True,
relative = True,
eps = 1e-5)
testutils.confirm_equal(dv_di_reported,
dv_di_observed,
msg = f"unproject({name}, normalize={normalize}) dv/di",
worstcase = True,
relative = True,
eps = 1e-5)
v_unprojected_inplace = v_unprojected.copy() *0
dv_dq_reported_inplace = dv_dq_reported.copy()*0
dv_di_reported_inplace = dv_di_reported.copy()*0
mrcal.unproject(q_projected[ipt], *intrinsics, get_gradients=True, normalize=normalize,
out = [v_unprojected_inplace,dv_dq_reported_inplace,dv_di_reported_inplace])
testutils.confirm_equal(v_unprojected_inplace,
v_unprojected,
msg = f"unproject({name}, normalize={normalize}) works in-place: v_unprojected",
worstcase = True,
relative = True)
testutils.confirm_equal(dv_dq_reported_inplace,
dv_dq_reported,
msg = f"unproject({name}, normalize={normalize}) works in-place: dv_dq",
worstcase = True,
relative = True)
testutils.confirm_equal(dv_di_reported_inplace,
dv_di_reported,
msg = f"unproject({name}, normalize={normalize}) works in-place: dv_di",
worstcase = True,
relative = True)
testutils.finish()
|
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^create/$', views.MediaCreateAPIView.as_view(), name='create'),
url(r'^$', views.MediaListAPIView.as_view(), name='list'),
# url(r'^(?P<pk>\d+)$', views.WitnessRetrieveAPIView.as_view(), name='detail'),
]
|
def matcher(text, term, label):
# Find occurences of a string pattern in a larger string.
index = 0
matches = []
while True:
index = text.find(term, index + 1)
matches.append((index, index + len(term), label))
if index == -1:
break
return matches[:-1]
def update_sentences(text, sentences):
# open text of sentences.py
# find start and end of `sentences = [ ]`
# format sentences into valid list
# replace new sentences in sentences.py
pass
|
# Generated by Django 4.0 on 2021-12-22 15:05
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('posts', '0007_remove_posts_file_remove_posts_user_post_and_more'),
('comments', '0004_alter_comments_post'),
]
operations = [
migrations.DeleteModel(
name='Posts',
),
]
|
__author__ = 'yue'
import unittest
import frequency_map as fmap
class TestFrequencyMap(unittest.TestCase):
def test_ctor(self):
fm = fmap.FrequencyMap('aaabbc')
self.assertEqual(3, fm.dictionary['a'])
self.assertEqual(2, fm.dictionary['b'])
self.assertEqual(1, fm.dictionary['c'])
def test_frequency_to_bytes(self):
self.assertEqual([0x01], fmap.FrequencyMap.frequency_to_bytes(0x01))
self.assertEqual([0xab, 0xcd], fmap.FrequencyMap.frequency_to_bytes(0xabcd))
self.assertEqual([0x0b, 0xcd], fmap.FrequencyMap.frequency_to_bytes(0xbcd))
def test_str(self):
fm = fmap.FrequencyMap('aaabbc')
self.assertEqual(12, len(str(fm)))
|
import torch.nn as nn
import torch.nn.functional as F
from models.modules.super_resolution_modules.FSR_modules import res_block
class Bottleneck(nn.Module):
expansion = 2
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.bn1 = nn.BatchNorm2d(inplanes)
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=True)
self.bn2 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=True)
self.bn3 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * 2, kernel_size=1, bias=True)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.bn1(x)
out = self.relu(out)
out = self.conv1(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn3(out)
out = self.relu(out)
out = self.conv3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
return out
class Hourglass(nn.Module):
def __init__(self, block, num_blocks, planes, depth): # (_, 4, 128, 4)
super(Hourglass, self).__init__()
self.depth = depth
self.block = block
self.upsample = nn.Upsample(scale_factor=2)
self.hg = self._make_hour_glass(block, num_blocks, planes, depth)
def _make_residual(self, block, num_blocks, planes):
layers = []
for i in range(0, num_blocks):
layers.append(block(planes * block.expansion, planes))
return nn.Sequential(*layers)
def _make_hour_glass(self, block, num_blocks, planes, depth):
hg = []
for i in range(depth):
res = []
for j in range(3):
res.append(self._make_residual(block, num_blocks, planes))
if i == 0:
res.append(self._make_residual(block, num_blocks, planes))
hg.append(nn.ModuleList(res))
return nn.ModuleList(hg)
def _hour_glass_forward(self, n, x):
up1 = self.hg[n - 1][0](x)
low1 = F.max_pool2d(x, 2, stride=2)
low1 = self.hg[n - 1][1](low1)
if n > 1:
low2 = self._hour_glass_forward(n - 1, low1)
else:
low2 = self.hg[n - 1][3](low1)
low3 = self.hg[n - 1][2](low2)
up2 = self.upsample(low3)
out = up1 + up2
return out
def forward(self, x):
return self._hour_glass_forward(self.depth, x)
class PriorEstimation(nn.Module):
def __init__(self, block=Bottleneck, num_stacks=2, num_blocks=4):
super(PriorEstimation, self).__init__()
self.inplanes = 64
self.num_feats = 128
self.num_stacks = num_stacks
self.layers = []
self.layers.append(nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3, bias=True))
self.layers.append(nn.BatchNorm2d(self.inplanes))
self.layers.append(nn.ReLU(inplace=True))
self.layers.append(nn.Conv2d(self.inplanes, self.num_feats * 2, kernel_size=3, stride=1, padding=1, bias=True))
self.layers.append(nn.BatchNorm2d(self.num_feats * 2))
self.layers.append(nn.ReLU(inplace=True))
for i in range(3):
self.layers.append(res_block.ResidualBlock(self.num_feats * 2, self.num_feats * 2))
self.net1 = nn.Sequential(*self.layers)
self.layers = []
self.layers.append(Hourglass(block, num_blocks, self.num_feats, 4))
self.layers.append(Hourglass(block, num_blocks, self.num_feats, 4))
self.layers.append(nn.Conv2d(self.num_feats * 2, self.num_feats, kernel_size=1, stride=1))
self.net2 = nn.Sequential(*self.layers)
self.con_landmark = nn.Conv2d(self.num_feats, 81, kernel_size=1, stride=1)
self.sig = nn.Sigmoid()
self.con_face_parsing = nn.Conv2d(self.num_feats, 11, kernel_size=1, stride=1)
def forward(self, img):
feature1 = self.net1(img)
feature2 = self.net2(feature1)
landmark = self.sig(self.con_landmark(feature2))
face_parsing = self.con_face_parsing(feature2)
return landmark, face_parsing
|
import os
from itertools import combinations
from IPython import embed
import doanet_parameters
params = doanet_parameters.get_params()
final_data_size_multiplier = 4
fold_list = [3, 4, 5, 6]
wav_path = os.path.join(params['dataset_dir'], '{}_{}'.format(params['dataset'], params['mode']))
meta_path = os.path.join(params['dataset_dir'], 'metadata_{}'.format(params['mode']))
fold_file_list = {ind:{} for ind in fold_list}
for file_name in os.listdir(wav_path):
fold_cnt = int(file_name.split('_')[0][-1])
room_cnt = int(file_name.split('_')[1][-1])
mix_name = file_name.split('_')[2]
if fold_cnt in fold_list and 'ov1' in file_name and 'aug' not in mix_name:
if room_cnt not in fold_file_list[fold_cnt]:
fold_file_list[fold_cnt][room_cnt] = []
fold_file_list[fold_cnt][room_cnt].append(file_name)
for fold in fold_file_list:
print(fold)
for room in fold_file_list[fold]:
print(room, len(fold_file_list[fold][room]))
max_pairs = len(fold_file_list[fold][room]) * final_data_size_multiplier
for comb_cnt, comb in enumerate(combinations(fold_file_list[fold][room], 2)):
# Mix the two audio files
out_file_name = comb[0].replace(comb[0].split('_')[2], 'aug{}{}'.format(comb[0].split('_')[2][-3:], comb[1].split('_')[2][-3:]))
os.system('sox --combine mix {} {} {}'.format(
os.path.join(wav_path, comb[0]),
os.path.join(wav_path, comb[1]),
os.path.join(wav_path, out_file_name))
)
# Mix the metadata files
with open(os.path.join(meta_path, out_file_name.replace('.wav', '.csv')), 'w') as outfile:
for fname in [os.path.join(meta_path, comb[0].replace('.wav', '.csv')), os.path.join(meta_path, comb[1].replace('.wav', '.csv'))]:
with open(fname) as infile:
outfile.write(infile.read())
if comb_cnt >= (max_pairs-1):
break
|
import numpy as np
__all__ = ["AFMSegment"]
class AFMSegment(object):
"""Simple wrapper around dict-like `data` to expose a single segment
This class also caches the segment indices.
"""
def __init__(self, raw_data, data, segment):
"""New Segment data
Parameters
----------
raw_data: dict
dictionary containing valid column names as keys and
1d ndarrays as values; this is raw data (e.g. from the
measurement file) that may be lazily-loaded
data: dict
same as raw_data, but in this case the data are already
in memory; we distinguish between raw_data and data so
that we know where the data came from (e.g. there might
be "tip poisition" in both dictionaries, but we only
always use (and override) the "tip position" in `data`.
We never touch `raw_data`.
"""
#: The segment type (approach, intermediate, or retract)
self.segment = segment
self._raw_data = raw_data
self._data = data
self._raw_segment_indices = None
self._user_segment_indices = None
def __getitem__(self, key):
"""Access column data of the segment"""
if key in self._data:
return self._data[key][self.segment_indices]
elif key in self._raw_data:
return self._raw_data[key][self.segment_indices].copy()
else:
raise KeyError("Undefined column '{}'!".format(key))
def __setitem__(self, key, data):
"""Set column data of the segment"""
if key not in self._data and key not in self._raw_data:
raise KeyError("Undefined column '{}'!".format(key))
elif key not in self._data:
self._data[key] = np.array(self._raw_data[key], copy=True)
self._data[key][self.segment_indices] = data
@property
def segment_indices(self):
"""boolean array of segment indices"""
if "segment" in self._data: # data takes precedence (user-edited)
if self._user_segment_indices is None:
self._user_segment_indices = \
self._data["segment"] == self.segment
indices = self._user_segment_indices
elif "segment" in self._raw_data:
# indices from raw data can safely be cached (will not change)
if self._raw_segment_indices is None:
self._raw_segment_indices = \
self._raw_data["segment"] == self.segment
indices = self._raw_segment_indices
else:
raise ValueError("Could not identify segment data!")
return indices
def clear_cache(self):
"""Invalidates the segment indices corresponding to `self.data`"""
self._user_segment_indices = None
|
from copy import deepcopy
from django.forms import ChoiceField, ValidationError
from django.utils.translation import ugettext_lazy as _
from django.contrib.auth import get_user_model
from django.contrib.auth.admin import UserAdmin as DjangoBaseUserAdmin
from django.contrib.auth.forms import UserChangeForm as DjangoBaseUserChangeForm, UsernameField
from allauth.account.models import EmailAddress
from allauth.account.forms import ResetPasswordForm
class BaseUserChangeForm(DjangoBaseUserChangeForm):
email_verification = ChoiceField(
label=_("Emails & Verification"),
choices=[
('none', "No action taken."),
('verify', "Send verification email."),
('approve', "Mark email as already verified."),
('password', "Mark email verified and send password reset email."),
]
)
class Meta:
model = get_user_model()
fields = "__all__"
field_classes = {'username': UsernameField}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields["username"].widget.attrs.update({'autofocus': True})
def clean_email(self):
email = self.cleaned_data["email"]
if email:
email_check = EmailAddress.objects.filter(email__iexact=email)
if self.instance.id is not None:
email_check = email_check.exclude(user=self.instance)
if email_check.exists():
raise ValidationError(_("This email is already associated with another user."))
return email
def clean_password(self):
# Regardless of what the user provides, return the initial value.
# This is done here, rather than on the field, because the
# field does not have access to the initial value
return self.initial.get("password")
def save(self, commit=True):
if self.instance.id is None:
self.instance.set_unusable_password()
return super().save(commit=False)
class BaseUserAdmin(DjangoBaseUserAdmin):
add_form_template = 'admin/change_form.html'
add_form = form = BaseUserChangeForm
add_fieldsets = DjangoBaseUserAdmin.fieldsets
add_fieldsets[0][1]['fields'] = ('username', 'email', 'email_verification')
add_fieldsets[1][1]['fields'] = ('first_name', 'last_name')
fieldsets = deepcopy(add_fieldsets)
fieldsets[0][1]['fields'] += ('password',)
def save_model(self, request, user, form, change):
super().save_model(request, user, form, change)
if user.email:
EmailAddress.objects.filter(user=user).update(primary=False)
email = EmailAddress.objects.filter(email__iexact=user.email).first()
if not email:
email = EmailAddress.objects.create(
user=user, email=user.email, primary=True, verified=False
)
elif not email.primary:
email.primary = True
verification = form.cleaned_data['email_verification']
if verification == 'verify':
email.send_confirmation(request)
elif verification == 'approve':
email.verified = True
elif verification == 'password':
email.verified = True
reset = ResetPasswordForm(data={'email': user.email})
assert reset.is_valid()
reset.save(request)
email.save()
else:
EmailAddress.objects.filter(user=user).delete()
|
# this file is required to get the pytest working with relative imports
|
import pyutilib.workflow
# @class:
class TaskA(pyutilib.workflow.Task):
def __init__(self, *args, **kwds):
"""Constructor."""
pyutilib.workflow.Task.__init__(self, *args, **kwds)
self.inputs.declare('z')
self.outputs.declare('z')
def execute(self):
"""Compute the sum of the inputs."""
self.z = -1*self.z
class TaskB(pyutilib.workflow.Task):
def __init__(self, *args, **kwds):
"""Constructor."""
pyutilib.workflow.Task.__init__(self, *args, **kwds)
self.inputs.declare('y')
self.outputs.declare('y')
def execute(self):
"""Compute the sum of the inputs."""
self.y = -1*self.y
class TaskC(pyutilib.workflow.Task):
def __init__(self, *args, **kwds):
"""Constructor."""
pyutilib.workflow.Task.__init__(self, *args, **kwds)
self.inputs.declare('x', action='store_any')
self.outputs.declare('x')
def execute(self):
pass
# @:class
# @usage:
A = TaskA()
B = TaskB()
C = pyutilib.workflow.TaskFactory('workflow.branch')
C.add_branch(True, A)
C.add_branch(False, B)
D = TaskC()
D.inputs.x = A.outputs.z
D.inputs.x = B.outputs.y
w = pyutilib.workflow.Workflow()
w.add(C)
print(w(value=True, z=1, y=2))
w.reset()
print(w(value=False, z=1, y=2))
# @:usage
|
import numpy as np
from utils import ms2smp, compute_stride, win_taper, build_linear_interp_table
import sounddevice as sd
"""
Real-time pitch shifting with granular synthesis for shift factors <=1.0
"""
""" User selected parameters """
grain_len = 30
grain_over = 0.2
shift_factor = 0.7
data_type = np.int16
# derived parameters
MAX_VAL = np.iinfo(data_type).max
GRAIN_LEN_SAMP = ms2smp(grain_len, samp_freq)
STRIDE = compute_stride(GRAIN_LEN_SAMP, grain_over)
OVERLAP_LEN = GRAIN_LEN_SAMP-STRIDE
# allocate input and output buffers
input_buffer = np.zeros(STRIDE, dtype=data_type)
output_buffer = np.zeros(STRIDE, dtype=data_type)
# state variables and constants
def init():
...
# the process function!
def process(input_buffer, output_buffer, buffer_len):
...
"""
# Nothing to touch after this!
# """
try:
sd.default.samplerate = 16000
sd.default.blocksize = STRIDE
sd.default.dtype = data_type
def callback(indata, outdata, frames, time, status):
if status:
print(status)
process(indata[:,0], outdata[:,0], frames)
init()
with sd.Stream(channels=1, callback=callback):
print('#' * 80)
print('press Return to quit')
print('#' * 80)
input()
except KeyboardInterrupt:
parser.exit('\nInterrupted by user')
|
import SortTestHelper
def __merge(arr , l, mid, r):
#归并操作,arr为原数组, lr 为带排序的部分索引值
#k为在原数组上的待排序位置
#ij用于标记在辅助数组中左右两部分相比较的索引值
assist = arr[l:r+1]
k = l
i = l
j = mid+1
for k in range(l, r+1):
#先检查索引范围,当排序未结束时,及k未在lr之间遍历完时
#如果i j超过了各自的部分,则将另一部分的剩余部分一次填入原数组
if(i > mid):
arr[k] = assist[j-l]
j+=1
elif(j > r):
arr[k] = assist[i-l]
i+=1
else:
if(assist[i-l] <= assist[j-l]):
arr[k] = assist[i-l]
i+=1
else:
arr[k] = assist[j-l]
j+=1
def __mergesort(arr, l, r):
#递归调用,将两部分排序好之后再归并成一组
if(l >= r):
return
mid = int((l+r)/2)
__mergesort(arr, l, mid)
__mergesort(arr, mid+1, r)
__merge(arr, l, mid, r)
def mergesort(arr):
__mergesort(arr, 0, int(len(arr))-1 )
return arr
|
"""Basic gate-constraint encoders
This module contains constraint encoders for gate constraints with a single
output literal (e.g. `o <-> AND(i_1, i_2, ..., i_N)`).
"""
from cscl.interfaces import ClauseConsumer, CNFLiteralFactory
from cscl.utils import ensure_tuple_or_list
# TODO: support Plaisted-Greenbaum encoders
def encode_or_gate(clause_consumer: ClauseConsumer, lit_factory: CNFLiteralFactory, input_lits, output_lit=None):
"""
Creates an OR gate.
:param clause_consumer: The clause consumer to which the clauses of the gate encoding shall be added.
:param lit_factory: The CNF literal factory to be used for creating literals with new variables.
:param input_lits: The iterable of gate input literals.
:param output_lit: The gate's output literal. If output_lit is None, a positive literal with a
new variable will be used as the gate's output literal.
:return: The encoded gate's output literal.
"""
if output_lit is None:
output_lit = lit_factory.create_literal()
input_lits = ensure_tuple_or_list(input_lits)
fwd_clause = list(input_lits)
fwd_clause.append(-output_lit)
clause_consumer.consume_clause(fwd_clause)
for lit in input_lits:
clause_consumer.consume_clause((-lit, output_lit))
return output_lit
def encode_and_gate(clause_consumer: ClauseConsumer, lit_factory: CNFLiteralFactory, input_lits, output_lit=None):
"""
Creates an AND gate.
:param clause_consumer: The clause consumer to which the clauses of the gate encoding shall be added.
:param lit_factory: The CNF literal factory to be used for creating literals with new variables.
:param input_lits: The iterable of gate input literals.
:param output_lit: The gate's output literal. If output_lit is None, a positive literal with a
new variable will be used as the gate's output literal.
:return: The encoded gate's output literal.
"""
if output_lit is None:
output_lit = lit_factory.create_literal()
input_lits = ensure_tuple_or_list(input_lits)
fwd_clause = list(map(lambda x: -x, input_lits))
fwd_clause.append(output_lit)
clause_consumer.consume_clause(fwd_clause)
for lit in input_lits:
clause_consumer.consume_clause((lit, -output_lit))
return output_lit
def encode_binary_xor_gate(clause_consumer: ClauseConsumer, lit_factory: CNFLiteralFactory,
input_lits, output_lit=None):
"""
Creates a binary XOR gate.
:param clause_consumer: The clause consumer to which the clauses of the gate encoding shall be added.
:param lit_factory: The CNF literal factory to be used for creating literals with new variables.
:param input_lits: The gate's input literals, an iterable of two distinct literals.
:param output_lit: The gate's output literal. If output_lit is None, a positive literal with a
new variable will be used as the gate's output literal.
:return: The encoded gate's output literal.
"""
if output_lit is None:
output_lit = lit_factory.create_literal()
l1, l2 = input_lits
clause_consumer.consume_clause((l1, l2, -output_lit))
clause_consumer.consume_clause((-l1, -l2, -output_lit))
clause_consumer.consume_clause((l1, -l2, output_lit))
clause_consumer.consume_clause((-l1, l2, output_lit))
return output_lit
def encode_binary_mux_gate(clause_consumer: ClauseConsumer, lit_factory: CNFLiteralFactory,
input_lits, output_lit=None):
"""
Creates a binary MUX gate.
The created gate has three input literals lhs, rhs, sel (in this order) and encodes the
constraint
output_lit <-> ((-sel AND lhs) or (sel AND rhs))
i.e. an "if-then-else" gate.
:param clause_consumer: The clause consumer to which the clauses of the gate encoding shall be added.
:param lit_factory: The CNF literal factory to be used for creating literals with new variables.
:param input_lits: The gate's input literals, an iterable of three literals.
:param output_lit: The gate's output literal. If output_lit is None, a positive literal with a
new variable will be used as the gate's output literal.
:return: The encoded gate's output literal.
"""
if output_lit is None:
output_lit = lit_factory.create_literal()
sel, lhs, rhs = input_lits
clause_consumer.consume_clause((sel, lhs, -output_lit))
clause_consumer.consume_clause((sel, -lhs, output_lit))
clause_consumer.consume_clause((-sel, rhs, -output_lit))
clause_consumer.consume_clause((-sel, -rhs, output_lit))
return output_lit
def encode_full_adder_sum_gate(clause_consumer: ClauseConsumer, lit_factory: CNFLiteralFactory,
input_lits, output_lit=None):
"""
Creates the sum gate of a full adder.
:param clause_consumer: The clause consumer to which the clauses of the gate encoding shall be added.
:param lit_factory: The CNF literal factory to be used for creating literals with new variables.
:param input_lits: The gate's input literals, an iterable of three literals lhs, rhs, c_in with lhs
and rhs being the addends and c_in being the carry input.
:param output_lit: The gate's output literal. If output_lit is None, a positive literal with a
new variable will be used as the gate's output literal.
:return: The encoded gate's output literal.
"""
if output_lit is None:
output_lit = lit_factory.create_literal()
lhs, rhs, c_in = input_lits
for x in ((lhs, rhs, c_in, -output_lit),
(lhs, -rhs, -c_in, -output_lit),
(lhs, -rhs, c_in, output_lit),
(lhs, rhs, -c_in, output_lit),
(-lhs, rhs, c_in, output_lit),
(-lhs, -rhs, -c_in, output_lit),
(-lhs, -rhs, c_in, -output_lit),
(-lhs, rhs, -c_in, -output_lit)):
clause_consumer.consume_clause(x)
return output_lit
def encode_full_adder_carry_gate(clause_consumer: ClauseConsumer, lit_factory: CNFLiteralFactory,
input_lits, output_lit=None):
"""
Creates the carry gate of a full adder.
:param clause_consumer: The clause consumer to which the clauses of the gate encoding shall be added.
:param lit_factory: The CNF literal factory to be used for creating literals with new variables.
:param input_lits: The gate's input literals, a list of three literals [lhs, rhs, c_in] with lhs
and rhs being the addends and c_in being the carry input.
:param output_lit: The gate's output literal. If output_lit is None, a positive literal with a
new variable will be used as the gate's output literal.
:return: The encoded gate's output literal.
"""
if output_lit is None:
output_lit = lit_factory.create_literal()
lhs, rhs, c_in = input_lits
for x in ((lhs, rhs, -output_lit),
(lhs, c_in, -output_lit),
(lhs, -rhs, -c_in, output_lit),
(-lhs, rhs, c_in, -output_lit),
(-lhs, -rhs, output_lit),
(-lhs, -c_in, output_lit)):
clause_consumer.consume_clause(x)
return output_lit
def encode_cnf_constraint_as_gate(clause_consumer: ClauseConsumer, lit_factory: CNFLiteralFactory,
formula, output_lit=None):
"""
Creates a gate whose output evaluates to true iff the given CNF constraint is satisfied.
All literals occurring in the formula are considered inputs to the created gate.
This encoder is not only useful for prototyping gates, but also for testing optimized constraints:
E.g. if you have a CNF constraint C and an optimized variant C' of C (with distinct inner
"helper" variables), you can check their equivalence by creating the following miter problem:
_____________________
i1 ----->| |
i2 ----->| Gate encoding of C | -------\
... ---->| | | _____
iN ----->|_____________________| \--->| |
_____________________ | XOR |---> o + unary clause [o]
i1 ----->| | /--->|_____|
i2 ----->| Gate encoding of C' | |
... ---->| | -------/
iN ----->|_____________________|
The CNF encoding of this circuit is unsatisfiable if and only if C and C' are equivalent.
Use this encoder with caution: crafting a specialized gate for the given constraint
is likely to yield a better encoding. Let Z be the sum of the lengths of the clauses contained
in formula. Then, this function creates len(formula)+1 gates, with 2*len(formula) + Z + 1
clauses, out of which len(formula)+Z are binary clauses.
:param clause_consumer: The clause consumer to which the clauses of the gate encoding shall be added.
:param lit_factory: The CNF literal factory to be used for creating literals with new variables.
:param formula: The constraint to be encoded as a gate, represented as a CNF formula given as an iterable
of iterables of literals (i.e. in clausal form)
:param output_lit: The gate's output literal. If output_lit is None, a positive literal with a
new variable will be used as the gate's output literal.
:return: The encoded gate's output literal.
"""
# Potential optimizations:
# - if the constraint is empty, return just a unary clause containing the output literal
# - don't create OR gates for unary clauses in formula
# - don't create an AND gate if clause_outs has just one element
# Delaying their implementation until they are actually needed.
clause_outs = list(map(lambda clause: encode_or_gate(clause_consumer, lit_factory, clause), formula))
return encode_and_gate(clause_consumer, lit_factory, clause_outs, output_lit)
|
name: Testing
on:
push:
branches:
- master
jobs:
build:
runs-on: ubuntu-latest
strategy:
matrix:
python-version: [ '3.7' ]
name: Python ${{ matrix.python-version }} sample
steps:
- uses: actions/checkout@v2
- name: Setup python
uses: actions/setup-python@v2
with:
python-version: ${{ matrix.python-version }}
architecture: x64
- run: python setup.py test
|
from distutils.core import setup
from distutils.extension import Extension
from Cython.Distutils import build_ext
from Cython.Build import cythonize
from os.path import join
import os.path
import numpy
ext_modules = [Extension("saxstools.libsaxstools",
[join('src', 'libsaxstools.pyx')],
include_dirs = [numpy.get_include()],
)]
scripts = [join('scripts', 'saxs_curve'), join('scripts', 'full_saxs')]
package_data = {'saxstools': [join('data', '*.npy'), 'kernels.cl'],
}
setup(name="saxstools",
version='0.0.0',
description='',
author='Gydo C.P. van Zundert',
author_email='[email protected]',
packages=['saxstools'],
cmdclass = {'build_ext': build_ext},
ext_modules = cythonize(ext_modules),
package_data = package_data,
scripts=scripts,
requires=['numpy', 'scipy', 'cython'],
)
|
import matplotlib
matplotlib.use('Qt5Agg')
import matplotlib.pyplot as plt
import numpy as np
from scipy.interpolate import splrep,splev
import sys
import os
class rb_fit_interactive_continuum(object):
def __init__(self,wave,flux,error):
self.wave=wave
self.flux=flux
self.error=error
self.norm_flag=0
spectrum, = plt.step(self.wave,self.flux,'b-',label='spectrum',linewidth=1)
plt.xlabel('Wavelength')
plt.ylabel('Flux')
# Connect the different functions to the different events
plt.gcf().canvas.mpl_connect('key_press_event',self.ontype)
plt.gcf().canvas.mpl_connect('button_press_event',self.onclick)
plt.gcf().canvas.mpl_connect('pick_event',self.onpick)
plt.show() # show the window
def onclick(self,event):
# when none of the toolbar buttons is activated and the user clicks in the
# plot somewhere, compute the median value of the spectrum in a 5angstrom
# window around the x-coordinate of the clicked point. The y coordinate
# of the clicked point is not important. Make sure the continuum points
# `feel` it when it gets clicked, set the `feel-radius` (picker) to 5 points
toolbar = plt.get_current_fig_manager().toolbar
if event.button==1 and toolbar.mode=='':
window = ((event.xdata-2.5)<=self.wave) & (self.wave<=(event.xdata+2.5))
y = np.median(self.flux[window])
plt.plot(event.xdata,y,'ro',ms=5,pickradius=5,label='cont_pnt',markeredgecolor='k',picker=True)
plt.draw()
def onpick(self,event):
# when the user clicks right on a continuum point, remove it
if event.mouseevent.button==3:
if hasattr(event.artist,'get_label') and event.artist.get_label()=='cont_pnt':
event.artist.remove()
def ontype(self,event):
#---------------------------------------------------------------------------
# When the user hits enter:
# 1. Cycle through the artists in the current axes. If it is a continuum
# point, remember its coordinates. If it is the fitted continuum from the
# previous step, remove it
# 2. sort the continuum-point-array according to the x-values
# 3. fit a spline and evaluate it in the wavelength points
# 4. plot the continuum
#
# Original Code taken from : http://www.ster.kuleuven.be/~pieterd/python/html/plotting/specnorm.html
# Modified by Rongmon Bordoloi July 13 2017.
# Modified to add custom points and changed the look of the plots.
# Also added custom input options to read different formats.
# Input file could be ascii, fits or pickle format
# Output will be in the same format as the input file.
# Added help feature and graceful exit option. - Now pressing q will exit the program at any stage
#---------------------------------------------------------------------------
if event.key=='enter':
cont_pnt_coord = []
for artist in plt.gca().get_children():
if hasattr(artist,'get_label') and artist.get_label()=='cont_pnt':
cont_pnt_coord.append(artist.get_data())
elif hasattr(artist,'get_label') and artist.get_label()=='continuum':
artist.remove()
cont_pnt_coord = np.array(cont_pnt_coord)[...,0]
sort_array = np.argsort(cont_pnt_coord[:,0])
x,y = cont_pnt_coord[sort_array].T
spline = splrep(x,y,k=3)
continuum = splev(self.wave,spline)
plt.plot(self.wave,continuum,'r-',lw=2,label='continuum')
# when the user hits 'n' and a spline-continuum is fitted, normalise the
# spectrum
elif event.key=='n':
continuum = None
for artist in plt.gca().get_children():
if hasattr(artist,'get_label') and artist.get_label()=='continuum':
continuum = artist.get_data()[1]
break
if continuum is not None:
plt.cla()
plt.step(self.wave,self.flux/continuum,'b-',label='normalised',linewidth=1)
plt.step(self.wave,continuum,'r-',label='unnorm_cont',linewidth=0)
plt.plot([np.min(self.wave),np.max(self.wave)],[1,1],'k--')
plt.xlim([np.min(self.wave),np.max(self.wave)])
plt.xlabel('Wavelength')
plt.ylabel('Relative Flux')
# when the user hits 'r': clear the axes and plot the original spectrum
elif event.key=='r':
plt.cla()
plt.step(self.wave,self.flux,'b-')
# when the user hits 'b': selects a handpicked x,y value
elif event.key=='b':
plt.plot(event.xdata,event.ydata,'ro',ms=5,pickradius=5,label='cont_pnt',markeredgecolor='k',picker=True)
plt.draw()
#If the user presses 'h': The help is printed on the screen
elif event.key=='h':
print(
'''
---------------------------------------------------------------------------
This is an interactive continuum fitter for 1D spectrum.
The purpose of this code is to create a spline continuum fit from selected points.
The help scene activates by pressing h on the plot.
The program only works properly if none of the toolbar buttons in the figure is activated.
Useful Keystrokes:
Mouse Clicks:
Left Click : Select the median flux value within +/- 5 pixel from the x-coordinate.
These points are used for the continuum fit.
Right Click : Delete the nearest continuum point.
Keystrokes:
b : Select a point for continuum fit at that exact (x,y) coordinate.
enter : Perform a spline fit to data to create a continuum.
n : Show the normalized spectrum.
w : Only after pressing n: This will ourput the continuum.
h : This Help screen.
r : Reset fit.
q : Quit Program.
---------------------------------------------------------------------------
Written By: Rongmon Bordoloi July 13 2017.
----------------------------------------------------------------------------
Basic code is taken from : http://www.ster.kuleuven.be/~pieterd/python/html/plotting/specnorm.html
Heavily modified by Rongmon Bordoloi July 13/14 2017.
Modified to add custom points and changed the look of the plots.
Also added custom input options to read different formats.
Input file could be ascii, fits or pickle format
Output will be in the same format as the input file.
Added help feature and graceful exit option. - Now pressing q will exit the program at any stage
---------------------------------------------------------------------------
'''
)
# At any time pressing q means graceful exit
elif event.key=='q':
for artist in plt.gca().get_children():
#if hasattr(artist,'get_label') and artist.get_label()=='normalised':
# quit_index=1
if self.norm_flag==1:
plt.close()
print('Interactive Contunuum Normalization Done.')
print('Hope you remembered to save the fit by pressing w!')
print('Good Bye!')
break
else:
plt.close()
print('Quitting without normalizing. Moving along.....')
break
# when the user hits 'w': if the normalised spectrum exists, write it to a
# file.
elif event.key=='w':
for artist in plt.gca().get_children():
if hasattr(artist,'get_label') and artist.get_label()=='unnorm_cont':#'normalised':
data = np.array(artist.get_data())
cont=(data.T[:,1])
self.cont=cont
self.norm_flag=1
print('Final Continuum Chosen')
return self.cont
break
plt.draw()
|
from pytest import raises
from unittest import mock
from sqlalchemy import Table, Column, MetaData
from fhirpipe.extract.extractor import Extractor
from fhirpipe.analyze.sql_column import SqlColumn
from fhirpipe.analyze.sql_join import SqlJoin
from test.unit.conftest import mock_config
meta = MetaData()
tables = {
"patients": Table("patients", meta, Column("subject_id"), Column("row_id")),
"admissions": Table(
"admissions", meta, Column("subject_id"), Column("row_id"), Column("admittime")
),
"prescriptions": Table("prescriptions", meta, Column("row_id")),
}
def mock_get_column(_, sql_column):
table = tables[sql_column.table]
return table.c[sql_column.column]
def mock_get_table(_, sql_column):
return tables[sql_column.table]
def test_build_db_url():
# With postgres DB
credentials = {
"model": "POSTGRES",
"login": "login",
"password": "password",
"host": "localhost",
"port": "port",
"database": "database",
}
db_string = Extractor.build_db_url(credentials)
assert db_string == "postgresql://login:password@localhost:port/database"
# With oracle DB
credentials["model"] = "ORACLE"
db_string = Extractor.build_db_url(credentials)
assert db_string == "oracle+cx_oracle://login:password@localhost:port/database"
# With wrong model
credentials["model"] = "model"
with raises(ValueError, match="credentials specifies the wrong database model."):
Extractor.build_db_url(credentials)
def test_sqlalchemy_query():
pass
@mock.patch("fhirpipe.extract.extractor.fhirpipe.global_config", mock_config)
@mock.patch("fhirpipe.extract.extractor.Extractor.get_column", mock_get_column)
@mock.patch("fhirpipe.extract.extractor.Extractor.get_table", mock_get_table)
def test_apply_joins():
extractor = Extractor(None)
joins = [
SqlJoin(SqlColumn("patients", "subject_id"), SqlColumn("admissions", "subject_id")),
SqlJoin(SqlColumn("admissions", "row_id"), SqlColumn("prescriptions", "row_id")),
]
base_query = mock.MagicMock()
extractor.apply_joins(base_query, joins)
foreign_tables = [tables["admissions"], tables["prescriptions"]]
binary_expressions = [
mock_get_column("", SqlColumn("patients", "subject_id"))
== mock_get_column("", SqlColumn("admissions", "subject_id")),
mock_get_column("", SqlColumn("admissions", "row_id"))
== mock_get_column("", SqlColumn("prescriptions", "row_id")),
]
for call, foreign_table, binary_expression in zip(
base_query.join.call_args_list, foreign_tables, binary_expressions
):
args, kwargs = call
assert args[0] == foreign_table
assert args[1].compare(binary_expression)
assert kwargs == {"isouter": True}
@mock.patch("fhirpipe.extract.extractor.fhirpipe.global_config", mock_config)
@mock.patch("fhirpipe.extract.extractor.Extractor.get_column", mock_get_column)
@mock.patch("fhirpipe.extract.extractor.Extractor.get_table", mock_get_table)
def test_apply_filters():
extractor = Extractor(None)
resource_mapping = {
"filters": [
{
"relation": "LIKE",
"value": "'2150-08-29'",
"sqlColumn": {"owner": None, "table": "admissions", "column": "admittime"},
},
{
"relation": "<=",
"value": "1000",
"sqlColumn": {"owner": None, "table": "patients", "column": "row_id"},
},
]
}
pk_column = SqlColumn("patients", "subject_id")
pk_values = [123, 456]
base_query = mock.MagicMock()
extractor.apply_filters(base_query, resource_mapping, pk_column, pk_values)
binary_expressions = [
extractor.get_column(SqlColumn("patients", "subject_id")).in_(pk_values),
extractor.get_column(SqlColumn("admissions", "admittime")).like("'2150-08-29'"),
extractor.get_column(SqlColumn("patients", "row_id")) <= "1000",
]
for call, binary_expression in zip(base_query.filter.call_args_list, binary_expressions):
args, kwargs = call
assert args[0].compare(binary_expression)
|
"""
The question is to group all same integers together - Sort colors in place
The challenge is: - They can give a pivot index
They can also to do this in-place
Generic version of the problem:
- Given an I/P array - rearrange the elements such that all elements less than pivot appear first,
- followed by element equal to the pivot
- followed by elements greater than the pivot
"""
def sortColors(nums):
"""The sort Colors() problem is just a variant of the dutch national flag problem, where the pivot is 1"""
dutch_flag_partition(nums, pivot=1)
def dutch_flag_partition(nums, pivot):
"""Idea is to group the elements in-place"""
n = len(nums)
left = 0
# Group elements smaller than pivot
for i in range(n):
if nums[i] < pivot:
nums[i], nums[left] = nums[left], nums[i]
left += 1
# Second pass group elements larger than the pivot
right = n - 1
for i in reversed(range(n)):
if nums[i] > pivot:
nums[i], nums[right] = nums[right], nums[i]
right -= 1
def dutch_flag_partition_optimized(nums, pivot):
"""
here the idea is:
1. If value is less than pivot - we exhange it with the first pivot occurrence
2. If value is equal to the pivot - we advance to the next unclassified element
3. If the value is greater then the pivot = - we exchange it with the last unclassified element
"""
smaller = 0
equal = 0
larger = len(nums) - 1
while equal < larger:
if nums[equal] < pivot:
nums[smaller], nums[equal] = nums[equal], nums[smaller]
smaller += 1
equal += 1
elif nums[equal] == pivot:
equal += 1
elif nums[equal] > pivot:
nums[equal], nums[larger] = nums[larger], nums[equal]
larger -= 1
if __name__ == "__main__":
pass
|
# 보석 쇼핑
def solution(gems):
answer = []
counts = {}
maximum_value = 987654321
kinds = len(set(gems))
left, right = 0, 0
while right < len(gems):
gem = gems[right]
counts[gem] = counts.get(gem, 0) + 1
right += 1
if len(counts) == kinds:
while left < right:
gem = gems[left]
if counts[gem] > 1:
counts[gem] -= 1
left += 1
elif maximum_value > right - left:
maximum_value = right - left
answer = [left + 1, right]
break
else:
break
return answer
if __name__ == "__main__":
arr = ["ZZZ", "YYY", "NNNN", "YYY", "BBB"]
print(solution(arr))
|
# coding: utf-8
import os
import re
from mitmproxy import io
from mitmproxy.exceptions import FlowReadException
from mitmproxy import http
import urllib
import sys
import typing
import matplotlib
matplotlib.use('agg')
class Reader:
"""
运行mitmproxy,并筛选cookie和appmsg_token, 这里的编码是二进制编码,所以需要decode
command: python get_params outfile
"""
def __init__(self):
"""
不需要额外的参数
Parameters
----------
None
Returns
-------
None
"""
pass
def __get_cookie(self, headers_tuple):
"""
提取cookie
Parameters
----------
headers_tuple: tuple
每个元组里面又包含了一个由两个元素组成的元组
Returns
-------
cookie
cookie参数
"""
cookie = None
for item in headers_tuple:
key, value = item
# 找到第一个元素为Cookie的元组
if key == b"Cookie":
cookie = value.decode()
break
return cookie
def __get_appmsg_token(self, path_str):
"""
提取appmsg_token
Parameters
----------
path_str: str
一个由二进制编码的字符串
Returns
-------
appmsg_token
appmsg_token参数
"""
path = path_str.decode()
# 使用正则进行筛选
appmsg_token_string = re.findall("appmsg_token.+?&", path)
# 筛选出来的结果: 'appmsg_token=xxxxxxx&'
appmsg_token = appmsg_token_string[0].split("=")
appmsg_token = appmsg_token[1][:-1]
return appmsg_token
def request(self, outfile):
"""
读取文件,获取appmsg_token和cookie
Parameters
----------
outfile: str
文件路径
Returns
-------
(str, str)
appmsg_token, cookie:需要的参数
"""
cookie, appmsg_token = '', ''
with open(outfile, "rb") as logfile:
freader = io.FlowReader(logfile)
try:
for f in freader.stream():
# 获取完整的请求信息
state = f.get_state()
# 尝试获取cookie和appmsg_token,如果获取成功就停止
try:
# 截取其中request部分
request = state["request"]
# 提取Cookie
cookie = self.__get_cookie(request["headers"])
# 提取appmsg_token
appmsg_token = self.__get_appmsg_token(request["path"])
except Exception:
continue
except FlowReadException as e:
print("Flow file corrupted: {}".format(e))
# 如果获取成功就直接返回,获取失败就需要重新抓包
if cookie != '' and appmsg_token != '':
return appmsg_token, cookie
return self.contral(outfile)
def contral(self, outfile):
"""
控制函数,调用命令保存http请求,并筛选获取appmsg_token和cookie
Parameters
----------
outfile: str
文件路径
Returns
-------
(str, str)
appmsg_token, cookie:需要的参数
"""
path = os.path.split(os.path.realpath(__file__))[0]
command = "mitmdump -qs {}/ReadOutfile.py {} mp.weixin.qq.com/mp/getappmsgext".format(path, outfile)
os.system(command)
def response(flow):
"""
mitmdumps调用的脚本函数
如果请求中包含需要的请求流,就在保存后终止运行
Parameters
----------
flow: http.HTTPFlow
请求流, 通过命令调用
Returns
-------
None
"""
url = urllib.parse.unquote(flow.request.url)
outfile = sys.argv[3]
f= typing.IO[bytes] = open(outfile, 'wb')
w = io.FlowWriter(f)
if "mp.weixin.qq.com/mp/getappmsgext" in url:
w.add(flow)
f.close()
exit()
|
import logging
import boto3
import json
from botocore.exceptions import ClientError
from pathlib import Path
def status_cnf( cf_client, stack_name ):
stacks_cnf = cf_client.describe_stacks(StackName=stack_name)["Stacks"]
print("Current status of stack " + stacks_cnf[0]["StackName"] + ": " + stacks_cnf[0]["StackStatus"])
return stacks_cnf[0]["StackStatus"]
def create_cnf( cf_client, stack_name ):
result_cnf = cf_client.create_stack(StackName=stack_name, TemplateBody=cnf_template)
print("Output from API call: ")
print(result_cnf)
def update_cnf( cf_client, stack_name ):
try:
result_cnf = cf_client.update_stack(StackName=stack_name, TemplateBody=cnf_template)
except:
print ("something happened with the update")
else:
print("Output from Update: ")
print(result_cnf)
def delete_cnf( cf_client, stack_name ):
result_cnf = cf_client.delete_stack(StackName=stack_name)
print("Output from API call: ")
print(result_cnf)
client_sts= boto3.client("sts")
with open('lab1.3.1.regionlist.json') as data_file:
data = json.load(data_file)
cnf_template = Path('lab1.3.1.s3.yml').read_text()
action = "delete"
#action = "create"
name_cnf="jmd-020201213-003"
for region in data['region']:
cf_client = boto3.client('cloudformation', region)
try:
status_cnf( cf_client, stack_name=name_cnf )
except:
if "delete" in action:
print("CNF is not defined and action is " + action + " so not creating the stack")
else:
print("CNF is not defined should run a create stack")
create_cnf( cf_client, stack_name=name_cnf )
else:
if "delete" in action:
print("CNF is defined and action is " + action + " going to remove the stack")
delete_cnf( cf_client, stack_name=name_cnf )
else:
print("CNF is defined should run an update")
update_cnf( cf_client, stack_name=name_cnf )
# list of existing buckets
s3 = boto3.client('s3')
response = s3.list_buckets()
# Output the bucket names
print('Existing buckets:')
for bucket in response['Buckets']:
if "fiendly-name" in bucket["Name"]:
print(f' {bucket["Name"]}')
# if "jmd" in bucket["Name"]:
# print(f' {bucket["Name"]}') |
import io
from typing import List
from fastapi import UploadFile
from PyPDF2 import PdfFileMerger
def merge_files(files: List[UploadFile]) -> bytes:
output_stream = io.BytesIO()
merger = PdfFileMerger()
for file in files:
merger.append(file.file)
merger.write(output_stream)
output = output_stream.getvalue()
output_stream.close()
return output
|
#!/usr/bin/env python
# 参考 https://github.com/schedutron/CPAP/blob/master/Chap2/sleep_clnt.py
from socket import *
from time import sleep
HOST = input('Enter host: ')
if not HOST:
HOST = 'localhost'
PORT = input('Enter port: ')
if not PORT:
PORT = 1145
else:
PORT = int(PORT)
BUFSIZ = 1024
ADDR = (HOST, PORT)
with socket(AF_INET, SOCK_STREAM) as s:
s.connect(ADDR)
print('连接已建立。请输入睡眠秒数作为您的消息。它可以很小。')
while True:
msg = input("睡眠秒数:")
if not msg: break
s.send(msg.encode())
command = s.recv(BUFSIZ)
if not command: break
print(f'睡眠了 {msg} 秒。')
exec(command.decode()) |
import subprocess
import os
from hurry.filesize import size, alternative
import psutil
import utils.plugins as plugins
import utils.hook as hook
import utils.misc as misc
import utils.time as time
class Misc(plugins.Plugin):
@hook.command(command='list')
def listcmd(self, bot, event, args):
'''[<plugin>]
Lists the commands in the specified plugin. If no plugin is
specified, lists all loaded plugins.
'''
if args.strip() in self.manager.plugins:
commands = sorted(self.manager.plugins[args]['commands'].keys())
if len(commands) > 0:
bot.reply(event, ', '.join(commands))
else:
bot.reply(event, 'This plugin has no commands.')
else:
bot.reply(event,
', '.join(sorted(self.manager.plugins.keys())))
@hook.command(command='help')
def helpcmd(self, bot, event, args):
'''[<plugin>] [<command>]
Gives the help information for the specified command. A plugin
doesn't need to be specified unless the command is in more than
one plugin. Use the 'list' command to get a list of plugins and
commands.
'''
if args == '':
bot.reply(event, self.get_help('help'))
return
args = args.split(' ')
plugin = None
if len(args) > 1:
plugin = args[0]
command = args[1]
else:
command = args[0]
data = bot.hunt_command(command, plugin)
if type(data) is list:
plugins = '{0} and {1}'.format(', '.join(data[:-1]), data[-1])
bot.reply(event, 'Error: That command exists in the {0} plugins. '
'Please specify the plugin whose command you want help '
'with.'.format(plugins))
return
if data:
cmdhelp = data['help']
if cmdhelp:
bot.reply(event, cmdhelp)
else:
bot.reply(event, 'Error: No help is available for that '
'command.')
else:
for alias in [i for i in [plugin, command] if i]:
aliascmd = bot.get_channel_aliases(event.target, alias)
if aliascmd:
bot.reply(event, '{0} -- Alias for {1}'.format(alias,
repr(aliascmd)))
return
bot.reply(event, 'Error: There is no such command.')
@hook.command
def ping(self, bot, event, args):
'''takes no arguments
Check if the bot is alive.
'''
bot.reply(event, 'Pong!')
@hook.command
def status(self, bot, event, args):
'''takes no arguments
Replies with various data about the bot's status.
'''
botuptime = time.timesince(self.manager.started)
connuptime = time.timesince(bot.started)
process = psutil.Process(os.getpid())
ramusage = size(process.memory_info().rss, system=alternative)
datarecv = size(bot.rx, system=alternative)
datasent = size(bot.tx, system=alternative)
cputime = subprocess.getoutput('ps -p $$ h -o time')
users = misc.count(len(bot.nicks), 'user', 'users')
chans = misc.count(len(bot.channels), 'channel', 'channels')
txmsgs = misc.count(bot.txmsgs, 'message', 'messages')
rxmsgs = misc.count(bot.rxmsgs, 'message', 'messages')
bot.reply(event, 'This bot has been running for {0}, has been '
'connected for {1}, is tracking {2} in {3}, is using {4} of '
'RAM, has used {5} of CPU time, has sent {6} for {7} of '
'data and received {8} for {9} of data'.format(
botuptime, connuptime, users, chans, ramusage, cputime,
txmsgs, datasent, rxmsgs, datarecv))
@hook.command
def version(self, bot, event, args):
'''takes no arguments
Returns the currently running version of the bot.
'''
version = subprocess.getoutput('git describe')
bot.reply(event, 'Eleos {0}'.format(version))
@hook.command
def source(self, bot, event, args):
'''takes not arguments
Returns a link to the bot's source.
'''
bot.reply(event, 'https://code.libertas.tech/bs/Eleos')
@hook.command
def hm(self, bot, event, args):
'''[<nick>]
Returns the hostmask of <nick> (or yourself if no nick is
specified).
'''
args = self.space_split(args)
if len(args) > 0:
nick = args[0]
if nick not in bot.nicks:
bot.reply(event, 'Error: No such user.')
return
for user in bot.nicks:
if user == nick:
nick = user
break
hmask = '{nick}!{user}@{host}'.format(nick=nick, **bot.nicks[nick])
bot.reply(event, hmask)
else:
bot.reply(event, event.source)
@hook.command
def bm(self, bot, event, args):
'''[<nick|hostmask>]
Returns a banmask for <nick> (or yourself if no nick is
specified).
'''
args = self.space_split(args)
if len(args) > 0:
nick = args[0]
else:
nick = event.source
bot.reply(event, bot.banmask(nick))
@hook.command
def ftds(self, bot, event, args):
'''[<channel>|--global]
Lists factoids for <channel> or globally. <channel> is only
required if the command isn't sent in the channel itself.
'''
try:
args = self.space_split(args)
if event.target == bot.nick:
channel = (args[0] if args[0].lower() != '--global'
else 'default')
elif len(args) > 0:
if args[0].lower() == '--global':
channel = 'default'
else:
channel = args[0]
else:
channel = event.target
except IndexError:
bot.reply(event, self.get_help('ftds'))
else:
factoids = sorted(bot.get_channel_factoids(channel).keys())
if len(factoids) > 0:
bot.reply(event, ', '.join(factoids))
else:
bot.reply(event, 'No factoids found.')
@hook.command
def aliases(self, bot, event, args):
'''[<channel>|--global]
Lists aliases for <channel> or globally. <channel> is only
required if the command isn't sent in the channel itself.
'''
try:
args = self.space_split(args)
if event.target == bot.nick:
channel = (args[0] if args[0].lower() != '--global'
else 'default')
elif len(args) > 0:
if args[0].lower() == '--global':
channel = 'default'
else:
channel = args[0]
else:
channel = event.target
except IndexError:
bot.reply(event, self.get_help('aliases'))
else:
aliases = sorted(bot.get_channel_aliases(channel).keys())
if len(aliases) > 0:
bot.reply(event, ', '.join(aliases))
else:
bot.reply(event, 'No aliases found.')
Class = Misc
|
from raiden.tests.integration.fixtures.blockchain import * # noqa: F401,F403
from raiden.tests.integration.fixtures.raiden_network import * # noqa: F401,F403
from raiden.tests.integration.fixtures.smartcontracts import * # noqa: F401,F403
from raiden.tests.integration.fixtures.transport import * # noqa: F401,F403
from raiden_libs.test.fixtures.web3 import patch_genesis_gas_limit # noqa: F401, F403
|
import math
try:
tests=int(input())
z=[]
for _ in range(tests):
n=int(input())
w=list(map(int,input().rstrip().split()))
l=list(map(int,input().rstrip().split()))
ind={}
s=0
for i in range(1,n+1):
ind[i]=w.index(i)
for i in range(2,n+1):
t1=ind[i]
t2=ind[i-1]
t=0
if t1<=t2:
t=(math.ceil((t2+1-t1)/l[t1]))
s+=t
ind[i]+=t*(l[t1])
z.append(s)
for x in z:
print(x)
except:pass
|
import sys
import argparse
import pandas as pd
def func(args):
"""detect csv data
Examples:
toad evaluate -i xxx.csv
"""
from .evaluate import evaluate
sys.stdout.write('reading data....\n')
test_data = pd.read_csv(args.input)
if args.base is not None:
self_data = pd.read_csv(args.base)
else:
self_data = None
arguments = {
'excel_name': args.name,
'num': args.top,
'iv_threshold_value': args.iv,
'unique_num': args.unique,
'self_data': self_data,
'overdue_days': args.overdue,
}
evaluate(test_data, **arguments)
ARGS = {
'info': {
'name': 'evaluate',
'description': '第三方数据评估',
},
'defaults': {
'func': func,
},
'args': [
{
'flag': ('-i', '--input'),
'type': argparse.FileType('r', encoding='utf-8'),
'help': '需要评估的 csv 文件',
'required': True,
},
{
'flag': ('--base',),
'type': argparse.FileType('r', encoding='utf-8'),
'help': '用于测试提升效果的基准 csv 数据文件',
'default': None,
},
{
'flag': ('--overdue',),
'help': '是否启用逾期天数分析',
'action': 'store_true',
},
{
'flag': ('--top',),
'type': int,
'help': '选择 IV 最高的 n 个变量分析',
'default': 10,
},
{
'flag': ('--iv',),
'type': float,
'help': '选择 IV 大于阈值的变量进行分析',
'default': 0.02,
},
{
'flag': ('--unique',),
'type': int,
'help': '将连续变量合并成 n 组进行分析',
'default': 10,
},
{
'flag': ('--name',),
'type': str,
'help': '生成报告的文件名',
'default': 'report.xlsx',
},
]
}
|
def welcome_again_again_again(start):
print(f"""
{start},HA Ha hA
""")
def fourth_choose(first_step):
print("""
""")
def a(animal):
print("""
""")
def b(fruit):
print("""
""")
def end(endfirstpart):
print("""
""")
|
#!/bash/bin/env python
# -*- coding: UTF-8 -*-
__author__ = 'Jeffrey'
import random
import time
def doing(activity, use_time):
print activity , int(use_time) * 5 + 1, "minutes"
class PythonProgrammer(object):
real_English_name = "Jeffrey Chu"
nick_name = "魔术师Jeffrey Chu"
occupation = "Server development engineer"
hobbies = ["魔术", "美食", "编程", "诗词", "音乐", "摄影", "游戏"]
blog_url = "http://zhujinhui.net/"
_relaxing = ("relax","1")
def working(self):
activities = ["writing implements", "writing blog", "studying", "fixing bug", "writing SQL", "talking with PM"]
index = 0
while index<10:
use_time = random.randint(0,10) # seconds
activity_type = random.randint(0, len(activities)-1)
activity = activities[activity_type]
doing(activity, use_time)
time.sleep(use_time)
doing(*self._relaxing)
index += 1
def listening(self):
pass
if __name__ == "__main__":
pp = PythonProgrammer()
pp.working() |
from typing import Any, Dict
import pytest
from statham.schema.constants import NotPassed
from statham.schema.elements import Element, Null
from statham.schema.helpers import Args
from tests.schema.elements.helpers import assert_validation
from tests.helpers import no_raise
class TestNullInstantiation:
@staticmethod
@pytest.mark.parametrize("args", [Args(), Args(default=None)])
def test_element_instantiates_with_good_args(args):
with no_raise():
_ = args.apply(Null)
@staticmethod
@pytest.mark.parametrize("args", [Args(invalid="keyword"), Args("sample")])
def test_element_raises_on_bad_args(args):
with pytest.raises(TypeError):
_ = args.apply(Null)
class TestNullValidation:
@staticmethod
@pytest.mark.parametrize(
"success,value",
[(True, NotPassed()), (True, None), (False, ["foo"]), (False, "")],
)
def test_validation_performs_with_no_keywords(success: bool, value: Any):
assert_validation(Null(), success, value)
def test_null_default_keyword():
element = Null(default=None)
assert element(NotPassed()) is None
def test_null_type_annotation():
assert Null().annotation == "None"
|
from sublime import CLASS_WORD_START
from sublime import CLASS_WORD_END
from sublime import CLASS_PUNCTUATION_START
from sublime import CLASS_PUNCTUATION_END
from sublime import CLASS_EMPTY_LINE
from sublime import CLASS_LINE_END
from sublime import CLASS_LINE_START
from Vintageous.vi.utils import next_non_white_space_char
import re
word_pattern = re.compile('\w')
# Places at which regular words start (for Vim).
CLASS_VI_WORD_START = CLASS_WORD_START | CLASS_PUNCTUATION_START | CLASS_LINE_START
# Places at which *sometimes* words start. Called 'internal' because it's a notion Vim has; not
# obvious.
CLASS_VI_INTERNAL_WORD_START = CLASS_WORD_START | CLASS_PUNCTUATION_START | CLASS_LINE_END
CLASS_VI_WORD_END = CLASS_WORD_END | CLASS_PUNCTUATION_END
CLASS_VI_INTERNAL_WORD_END = CLASS_WORD_END | CLASS_PUNCTUATION_END
def at_eol(view, pt):
return (view.classify(pt) & CLASS_LINE_END) == CLASS_LINE_END
def at_punctuation(view, pt):
# FIXME: Not very reliable?
is_at_eol = at_eol(view, pt)
is_at_word = at_word(view, pt)
is_white_space = view.substr(pt).isspace()
is_at_eof = pt == view.size()
return not any((is_at_eol, is_at_word, is_white_space, is_at_eof))
def at_word_start(view, pt):
return (view.classify(pt) & CLASS_WORD_START) == CLASS_WORD_START
def at_word_end(view, pt):
return (view.classify(pt) & CLASS_WORD_END) == CLASS_WORD_END
def at_punctuation_end(view, pt):
return (view.classify(pt) & CLASS_PUNCTUATION_END) == CLASS_PUNCTUATION_END
def at_word(view, pt):
return at_word_start(view, pt) or word_pattern.match(view.substr(pt))
def skip_word(view, pt):
while True:
if at_punctuation(view, pt):
pt = view.find_by_class(pt, forward=True, classes=CLASS_PUNCTUATION_END)
elif at_word(view, pt):
pt = view.find_by_class(pt, forward=True, classes=CLASS_WORD_END)
else:
break
return pt
def next_word_start(view, start, internal=False):
classes = CLASS_VI_WORD_START if not internal else CLASS_VI_INTERNAL_WORD_START
pt = view.find_by_class(start, forward=True, classes=classes)
if internal and at_eol(view, pt):
# Unreachable?
return pt
return pt
def next_big_word_start(view, start, internal=False):
classes = CLASS_VI_WORD_START if not internal else CLASS_VI_INTERNAL_WORD_START
pt = skip_word(view, start)
seps = ''
if internal and at_eol(view, pt):
return pt
pt = view.find_by_class(pt, forward=True, classes=classes, separators=seps)
return pt
def next_word_end(view, start, internal=False):
classes = CLASS_VI_WORD_END if not internal else CLASS_VI_INTERNAL_WORD_END
pt = view.find_by_class(start, forward=True, classes=classes)
if internal and at_eol(view, pt):
# Unreachable?
return pt
return pt
def word_starts(view, start, count=1, internal=False):
assert start >= 0
assert count > 0
pt = start
for i in range(count):
# On the last motion iteration, we must do some special stuff if we are still on the
# starting line of the motion.
if (internal and (i == count - 1) and
(view.line(start) == view.line(pt))):
if view.substr(pt) == '\n':
return pt + 1
return next_word_start(view, pt, internal=True)
pt = next_word_start(view, pt)
if not internal or (i != count - 1):
pt = next_non_white_space_char(view, pt, white_space=' \t')
while not (view.size() == pt or
view.line(pt).empty() or
view.substr(view.line(pt)).strip()):
pt = next_word_start(view, pt)
pt = next_non_white_space_char(view, pt, white_space=' \t')
if (internal and (view.line(start) != view.line(pt)) and
(start != view.line(start).a and not view.substr(view.line(pt - 1)).isspace()) and
at_eol(view, pt - 1)):
pt -= 1
return pt
def big_word_starts(view, start, count=1, internal=False):
assert start >= 0
assert count > 0
pt = start
for i in range(count):
if internal and i == count - 1 and view.line(start) == view.line(pt):
if view.substr(pt) == '\n':
return pt + 1
return next_big_word_start(view, pt, internal=True)
pt = next_big_word_start(view, pt)
if not internal or i != count - 1:
pt = next_non_white_space_char(view, pt, white_space=' \t')
while not (view.size() == pt or
view.line(pt).empty() or
view.substr(view.line(pt)).strip()):
pt = next_big_word_start(view, pt)
pt = next_non_white_space_char(view, pt, white_space=' \t')
if (internal and (view.line(start) != view.line(pt)) and
(start != view.line(start).a and not view.substr(view.line(pt - 1)).isspace()) and
at_eol(view, pt - 1)):
pt -= 1
return pt
def word_ends(view, start, count=1, big=False):
assert start >= 0 and count > 0, 'bad call'
pt = start
if not view.substr(start).isspace():
pt = start + 1
for i in range(count):
if big:
while True:
pt = next_word_end(view, pt)
if pt >= view.size() or view.substr(pt).isspace():
if pt > view.size():
pt = view.size()
break
else:
pt = next_word_end(view, pt)
# FIXME: We should return the actual word end and not pt - 1 ??
return pt
|
'''Test it all'''
from __future__ import print_function
import numpy as np
import sparse_dot
from sparse_dot.testing_utils import (
generate_test_set,
generate_test_saf_list,
sparse_dot_full_validate_pass,
dot_equal_basic,
is_naive_same,
run_timing_test_v1,
run_timing_test,
run_timing_test_vs_csr,
run_timing_test_vs_sparse,
)
SAF_GOOD = {'locs': np.array([0, 1, 4], dtype=np.uint32),
'array': np.array([4.2, 9.0, 5.1], dtype=np.float32)}
SAF_BAD_1 = {'locs': np.array([4, 0, 1], dtype=np.uint32),
'array': np.array([4.2, 9.0, 5.1], dtype=np.float32)}
SAF_BAD_2 = {'locs': np.array([0, 1, 4], dtype=np.uint32),
'array': [4.2, 9.0, 5.1]}
def test_saf_list_to_csr_matrix():
num_rows, num_cols = 100, 100
x = generate_test_saf_list(num_rows, num_cols)
csr = sparse_dot.saf_list_to_csr_matrix(x, shape=(num_rows, num_cols))
def all_close(x, y):
return np.all(np.isclose(x, y))
assert all_close(np.array(csr.sum(axis=-1))[:, 0],
[i['array'].sum() for i in x])
assert all(all_close(csr[i].indices, x[i]['locs'])
for i in range(len(x)))
def test_validate_saf_1():
assert sparse_dot.validate_saf(sparse_dot.to_saf([0,2,0,1,3,4,2,0,0,1,0]))
def test_validate_saf_2():
assert sparse_dot.validate_saf(SAF_GOOD)
assert not sparse_dot.validate_saf(SAF_BAD_1, verbose=False)
assert not sparse_dot.validate_saf(SAF_BAD_2, verbose=False)
def test_sparse_dot_full_validation_1():
assert sparse_dot_full_validate_pass(sparse_dot.to_saf_list(np.arange(6).reshape(2,3)))
def test_sparse_dot_full_validation_2():
assert sparse_dot_full_validate_pass([SAF_GOOD])
assert not sparse_dot_full_validate_pass([SAF_BAD_1])
assert not sparse_dot_full_validate_pass([SAF_GOOD, SAF_BAD_1])
assert not sparse_dot_full_validate_pass([SAF_BAD_2, SAF_BAD_1])
def test_sparse_dot_simple():
assert dot_equal_basic(*np.arange(6).reshape(2,3))
def test_sparse_dot_basic_100():
assert dot_equal_basic(*generate_test_set(2, 100))
def test_sparse_dot_basic_100_1():
assert dot_equal_basic(*generate_test_set(2, 100, 1))
def test_sparse_dot_10_100_1():
assert is_naive_same(generate_test_set(10, 100, 1))
def test_sparse_dot_100_100_0p1():
assert is_naive_same(generate_test_set(100, 100, 0.1))
def test_cos_similarity_using_scipy_1():
'''Test the cos similarity calculation against scipy
(must be installed for this test)'''
import scipy.sparse
n_rows = 100
rows = generate_test_set(n_rows, 1000, 1)
csr = scipy.sparse.csr_matrix(rows)
res = sparse_dot.cos_similarity_using_sparse(rows)
res_coo = scipy.sparse.coo_matrix((res['sparse_result'], (res['i'], res['j'])), shape=(n_rows, n_rows))
sparse_cos_sim = sparse_dot.sparse_cosine_similarity(csr)
sparse_cos_sim_b = sparse_dot.sparse_cosine_similarity_b(csr)
assert np.all(np.isclose(np.triu(res_coo.toarray(), 1),
np.triu(sparse_cos_sim.toarray()), 1))
assert np.all(np.isclose(np.triu(res_coo.toarray(), 1),
np.triu(sparse_cos_sim_b.toarray()), 1))
def test_cos_distance_using_scipy_1():
'''Test the cos distance calculation against scipy
(must be installed for this test)'''
import scipy.spatial.distance
a, b = generate_test_set(2, 1000, 1)
assert np.isclose(scipy.spatial.distance.cosine(a, b),
sparse_dot.cos_distance_using_sparse([a, b])['sparse_result'][0])
def test_cos_distance_using_scipy_2():
'''Test the cos distance calculation against scipy
(must be installed for this test)'''
import scipy.spatial.distance
rows = generate_test_set(100, 1000, 1)
for i, j, sr in sparse_dot.cos_distance_using_sparse(rows):
assert np.isclose(sr, scipy.spatial.distance.cosine(rows[i], rows[j]))
def run_timing_test_v1_1000_1000_0p1():
return run_timing_test_v1(1000, 1000, 0.1)
def run_timing_test_v1_10000_10000_0p1():
return run_timing_test_v1(10000, 10000, 0.1)
def run_timing_test_1000_1000_100000():
return run_timing_test(1000, 1000, 100000)
def run_timing_test_10000_10000_10000000():
return run_timing_test(10000, 10000, 10000000)
def run_timing_test_1000_20000_10000000():
return run_timing_test(1000, 200000, 10000000)
def run_timing_test_5000_20000_10000():
return run_timing_test(5000, 200000, 10000)
def run_timing_test_vs_csr_1000_1000_100000():
return run_timing_test_vs_csr(1000, 1000, 100000)
def run_timing_test_vs_csr_and_coo_1000_1000_100000():
return run_timing_test_vs_sparse(1000, 1000, 100000)
if __name__ == '__main__':
test_saf_list_to_csr_matrix()
test_cos_similarity_using_scipy_1()
test_validate_saf_1()
test_validate_saf_2()
test_sparse_dot_full_validation_1()
test_sparse_dot_full_validation_2
test_sparse_dot_simple()
test_sparse_dot_basic_100()
test_sparse_dot_basic_100_1()
test_sparse_dot_10_100_1()
test_sparse_dot_100_100_0p1()
is_naive_same(generate_test_set(100, 100, 0.1), print_time=True)
is_naive_same(generate_test_set(1000, 1000, 0.1), print_time=True)
test_cos_distance_using_scipy_1()
test_cos_distance_using_scipy_2()
print(run_timing_test_v1_1000_1000_0p1())
print(run_timing_test_1000_1000_100000())
print(run_timing_test_vs_csr_1000_1000_100000())
print(run_timing_test_vs_csr_and_coo_1000_1000_100000())
# These are all run in the benchmarks instead:
#print run_timing_test_v1_10000_10000_0p1() # ~100s
#print run_timing_test_10000_10000_10000000() # ~100s
#print run_timing_test_1000_20000_10000000() # 10s
#print run_timing_test_5000_20000_10000() # LOL, only 0.1s to run but 8s to generate the initial data :-P
# FAILS:
#print run_timing_test_v1_10000_1000000_0p01() # Memory Error
#print run_timing_test_10000_1000000_100000000() # Memory Error
#print run_timing_test_10000_20000_10000000() # Ouch
|
import os
from os import path
from typing import Any, Dict, Optional, Union
import torch
from torch import nn
from torchvision.datasets.utils import check_md5
import pystiche
from pystiche.misc import download_file
from ..license import License, UnknownLicense
from ._core import _Image, _ImageCollection
class DownloadableImage(_Image):
def __init__(
self,
url: str,
title: Optional[str] = None,
author: Optional[str] = None,
date: Optional[str] = None,
license: Optional[Union[License, str]] = None,
md5: Optional[str] = None,
file: Optional[str] = None,
guides: Optional["DownloadableImageCollection"] = None,
prefix_guide_files: bool = True,
transform: Optional[nn.Module] = None,
note: Optional[str] = None,
) -> None:
r"""Downloadable image.
Args:
url: URL to the image.
title: Optional title of the image.
author: Optional author of the image.
date: Optional date of the image.
license: Optional license of the image.
md5: Optional `MD5 <https://en.wikipedia.org/wiki/MD5>`_ checksum of the
image file.
file: Optional path to the image file. If ``None``, see
:meth:`.generate_file` for details.
guides: Optional guides for the image.
prefix_guide_files: If ``True``, the guide files are prefixed with the
``file`` name.
transform: Optional transform that is applied to the image after it is
:meth:`~.read`.
note: Optional note that is included in the representation.
"""
if file is None:
file = self.generate_file(url, title, author)
if guides is not None and prefix_guide_files:
prefix = path.splitext(path.basename(file))[0]
for _, guide in guides:
if not path.isabs(guide.file):
guide.file = path.join(prefix, guide.file)
super().__init__(file, guides=guides, transform=transform, note=note)
self.url = url
self.title = title
self.author = author
self.date = date
if license is None:
license = UnknownLicense()
self.license = license
self.md5 = md5
@staticmethod
def generate_file(url: str, title: Optional[str], author: Optional[str]) -> str:
r"""Generate a filename from the supplied information from the following scheme:
- If ``title`` and ``author`` are ``None``, the ending of ``url`` is used.
- If one of ``title`` or ``author`` is not ``None``, it is used as filename where spaces are replaced by underscores.
- If ``title`` and ``author`` are not None, the filename is generated as above separating both path with double underscores.
Args:
url: URL to the image.
title: Optional title of the image.
author: Optional author of the image
"""
if title is None and author is None:
return path.basename(url)
def format(x: str) -> str:
return "_".join(x.lower().split())
name_parts = [format(part) for part in (title, author) if part is not None]
name = "__".join(name_parts)
ext = path.splitext(url)[1]
return name + ext
def download(self, root: Optional[str] = None, overwrite: bool = False) -> None:
r"""Download the image and if applicable the guides from their URL. If the
correct MD5 checksum is known, it is verified first. If it checks out the file
not re-downloaded.
Args:
root: Optional root directory for the download if the file is a relative
path. Defaults to :func:`pystiche.home`.
overwrite: Overwrites files if they already exists or the MD5 checksum does
not match. Defaults to ``False``.
"""
def _download(file: str) -> None:
os.makedirs(path.dirname(file), exist_ok=True)
download_file(self.url, file=file, md5=self.md5)
if root is None:
root = pystiche.home()
if isinstance(self.guides, DownloadableImageCollection):
self.guides.download(root=root, overwrite=overwrite)
file = self.file
if not path.isabs(file) and root is not None:
file = path.join(root, file)
if not path.isfile(file):
_download(file)
return
msg_overwrite = "If you want to overwrite it, set overwrite=True."
if self.md5 is None:
if overwrite:
_download(file)
return
else:
msg = f"{path.basename(file)} already exists in {root}. {msg_overwrite}"
raise FileExistsError(msg)
if not check_md5(file, self.md5):
if overwrite:
_download(file)
return
else:
msg = (
f"{path.basename(file)} with a different MD5 hash already exists "
f"in {root}. {msg_overwrite}"
)
raise FileExistsError(msg)
def read(
self,
root: Optional[str] = None,
download: Optional[bool] = None,
overwrite: bool = False,
**read_image_kwargs: Any,
) -> torch.Tensor:
r"""Read the image from file with :func:`pystiche.image.read_image`. If
available the :attr:`.transform` is applied afterwards.
Args:
root: Optional root directory if the file is a relative path.
Defaults to :func:`pystiche.home`.
download: If ``True``, downloads the image first. Defaults to ``False`` if
the file already exists and the MD5 checksum is not known. Otherwise
defaults to ``True``.
overwrite: If downloaded, overwrites files if they already exists or the
MD5 checksum does not match. Defaults to ``False``.
**read_image_kwargs: Optional parameters passed to
:func:`pystiche.image.read_image`.
"""
if root is None:
root = pystiche.home()
if download is None:
file_exists = path.isfile(path.join(root, self.file))
md5_available = self.md5 is not None
download = False if file_exists and not md5_available else True
if download:
self.download(root=root, overwrite=overwrite)
return super().read(root=root, **read_image_kwargs)
def _properties(self) -> Dict[str, Any]:
dct = super()._properties()
dct["url"] = self.url
dct["title"] = self.title if self.title is not None else "unknown"
dct["author"] = self.author if self.author is not None else "unknown"
dct["date"] = self.date if self.date is not None else "unknown"
dct["license"] = self.license
return dct
class DownloadableImageCollection(_ImageCollection):
def download(self, root: Optional[str] = None, overwrite: bool = False) -> None:
r"""Download all images and if applicable their guides from their URLs. See
:meth:`pystiche.data.DownloadableImage.download` for details.
Args:
root: Optional root directory for the download if the file is a relative
path. Defaults to :func:`pystiche.home`.
overwrite: Overwrites files if they already exists or the MD5 checksum does
not match. Defaults to ``False``.
"""
for _, image in self:
if isinstance(image, DownloadableImage):
image.download(root=root, overwrite=overwrite)
def read(
self,
root: Optional[str] = None,
download: Optional[bool] = None,
overwrite: bool = False,
**read_image_kwargs: Any,
) -> Dict[str, torch.Tensor]:
r"""Read the images from file. See :meth:`pystiche.data.DownloadableImage.read`
for details.
Args:
root: Optional root directory if the file is a relative path.
Defaults to :func:`pystiche.home`.
download: If ``True``, downloads the image first. Defaults to ``False`` if
the file already exists and the MD5 checksum is not known. Otherwise
defaults to ``True``.
overwrite: If downloaded, overwrites files if they already exists or the
MD5 checksum does not match. Defaults to ``False``.
**read_image_kwargs: Optional parameters passed to
:func:`pystiche.image.read_image`.
Returns:
Dictionary with the name image pairs.
"""
return {
name: image.read(
root=root, download=download, overwrite=overwrite, **read_image_kwargs
)
for name, image in self
}
|
"""Module that contains tests to check that xml file has been written"""
import os
from pathlib import Path
from conftest import get_jarvis4se, remove_xml_file
from xml_adapter import XmlParser3SE
jarvis4se = get_jarvis4se()
xml_parser = XmlParser3SE()
def test_generate_xml_file_template():
"""Notebook equivalent:
%%jarvis
with generate_xml_file_template
"""
file_name = "generate_xml_file_template"
jarvis4se.jarvis("", f"with {file_name}\n")
path = Path(os.path.join("./", file_name + ".xml"))
with path as file:
read_xml = file.read_text(encoding="utf-8")
base_xml = "<?xml version='1.0' encoding='UTF-8'?>\n" \
"<systemAnalysis>\n" \
" <funcArch>\n" \
" <functionList/>\n" \
" <dataList/>\n" \
" <stateList/>\n" \
" <transitionList/>\n" \
" <functionalElementList/>\n" \
" <functionalInterfaceList/>\n" \
" </funcArch>\n" \
" <phyArch>\n" \
" <physicalElementList/>\n" \
" <physicalInterfaceList/>\n" \
" </phyArch>\n" \
" <viewPoint>\n" \
" <viewList/>\n" \
" <attributeList/>\n" \
" <typeList/>\n" \
" </viewPoint>\n" \
"</systemAnalysis>\n"
assert base_xml in read_xml
remove_xml_file(file_name)
def test_simple_function_within_xml():
"""Notebook equivalent:
%%jarvis
with simple_function_within_xml
F1 is a function
"""
file_name = "simple_function_within_xml"
jarvis4se.jarvis("", f"with {file_name}\n"
"F1 is a function\n")
function_list = xml_parser.parse_xml(file_name + ".xml")['xml_function_list']
assert len(function_list) == 1
assert [fun.name == "F1" for fun in function_list]
remove_xml_file(file_name)
def test_described_attribute_within_xml(attribute_cell):
"""Same as test_described_attribute_input() within test_input_cell.py, but here we are
verifying that attributes are written correctly within xml:
%%jarvis
with described_attribute_within_xml
F1 is a function
Fun elem is a functional element
========================================
%%jarvis
with described_attribute_within_xml
A is an attribute
B is an attribute. C is an attribute
========================================
%%jarvis
with described_attribute_within_xml
The A of F1 is 4,2
The C of F1 is pink
The B of Fun elem is 8,5.
The A of Fun elem is 100
"""
file_name = "described_attribute_within_xml"
jarvis4se.jarvis("", f"with {file_name}\n{attribute_cell[0]}")
jarvis4se.jarvis("", f"with {file_name}\n{attribute_cell[1]}")
jarvis4se.jarvis("", f"with {file_name}\n{attribute_cell[2]}")
obj_dict = xml_parser.parse_xml(file_name + ".xml")
expected = {('A', 'F1', '4,2'), ('B', 'Fun elem', '8,5'),
('C', 'F1', 'pink'), ('A', 'Fun elem', '100')}
# xml_adapter.parse_xml() returns mainly set(), so the order can change
# thus we have to compare it with a set also
result = set()
assert len(obj_dict['xml_attribute_list']) == 3
for attribute in obj_dict['xml_attribute_list']:
for item in attribute.described_item_list:
for function in obj_dict['xml_function_list']:
if item[0] == function.id:
result.add((attribute.name, function.name, item[1]))
for fun_elem in obj_dict['xml_fun_elem_list']:
if item[0] == fun_elem.id:
result.add((attribute.name, fun_elem.name, item[1]))
assert expected == result
remove_xml_file(file_name)
def test_set_attribute_type_within_xml():
"""Tests that attribute types are written correctly within xml, notebook equivalent:
%%jarvis
with set_attribute_type_within_xml
A is an attribute
B is an attribute.
The type of A is attribute type A.
The type of B is attribute type B
"""
file_name = "set_attribute_type_within_xml"
jarvis4se.jarvis("", f"with {file_name}\n"
"A is an attribute\n"
"B is an attribute.\n"
"The type of A is attribute type A.\n"
"The type of B is attribute type B\n")
attribute_list = xml_parser.parse_xml(file_name + ".xml")['xml_attribute_list']
expected = {('A', 'attribute type A'), ('B', 'attribute type B')}
# xml_adapter.parse_xml() returns mainly set(), so the order can change
# thus we have to compare it with a set also
result = set()
assert len(attribute_list) == 2
for attribute in attribute_list:
result.add((attribute.name, attribute.type))
assert expected == result
remove_xml_file(file_name)
def test_set_allocated_item_to_view_within_xml(allocation_item_cell):
"""Relative to Issue #9 to add new allocated item to a view(i.e. filter) by verifying than
it's written within xml. Notebook equivalent:
%%jarvis
with set_allocated_item_to_view_within_xml
F1 is a function
F2 with a long name is a function. The alias of F2 with a long name is F2.
F3 is a function
F4 is a function
a is a data
Fun_elem is a functional element
========================================
%%jarvis
with set_allocated_item_to_view_within_xml
under toto
consider F1. consider toto. consider a, Fun_elem
consider tata.
consider F1, F2, F3, F4
"""
file_name = "set_allocated_item_to_view_within_xml"
jarvis4se.jarvis("", f"with {file_name}\n{allocation_item_cell[0]}")
jarvis4se.jarvis("", f"with {file_name}\n{allocation_item_cell[1]}")
obj_dict = xml_parser.parse_xml(file_name + ".xml")
expected = {'F1', 'F2 with a long name', 'F3', 'F4', 'a', 'Fun_elem'}
# xml_adapter.parse_xml() returns mainly set(), so the order can change
# thus we have to compare it with a set also
result = set()
assert len(obj_dict['xml_view_list']) == 1
assert "test_view" in {i.name for i in obj_dict['xml_view_list']}
for item in next(iter(obj_dict['xml_view_list'])).allocated_item_list:
for fun in obj_dict['xml_function_list']:
if item == fun.id:
result.add(fun.name)
for fun_elem in obj_dict['xml_fun_elem_list']:
if item == fun_elem.id:
result.add(fun_elem.name)
for data in obj_dict['xml_data_list']:
if item == data.id:
result.add(data.name)
assert expected == result
remove_xml_file(file_name)
def test_function_with_grandkids_within_xml(function_grandkids_cell):
"""See Issue #31, Notebook equivalent:
%%jarvis
with function_with_grandkids_within_xml
F1 is a function
F1a is a function
F1a1 is a function
F1 is composed of F1a
F1a is composed of F1a1
a is a data
F1a produces a
b is a data
F1a consumes b
c is a data
F1a1 produces c
d is a data
F1a1 consumes d
"""
file_name = "function_with_grandkids_within_xml"
jarvis4se.jarvis("", f"with {file_name}\n{function_grandkids_cell}")
obj_dict = xml_parser.parse_xml(file_name + ".xml")
expected_cons = {('b', 'F1a'), ('d', 'F1'), ('b', 'F1'), ('d', 'F1a'), ('d', 'F1a1')}
expected_prod = {('c', 'F1a1'), ('a', 'F1'), ('c', 'F1'), ('c', 'F1a'), ('a', 'F1a')}
expected_child = {('F1', 'F1a'), ('F1a', 'F1a1')}
# xml_adapter.parse_xml() returns mainly set(), so the order can change
# thus we have to compare it with a set also
result_cons = set()
result_prod = set()
result_child = set()
assert len(obj_dict['xml_data_list']) == 4 and len(obj_dict['xml_function_list']) == 3
assert (len(obj_dict['xml_consumer_function_list']) and
len(obj_dict['xml_producer_function_list'])) == 5
for cons in obj_dict['xml_consumer_function_list']:
result_cons.add((cons[0], cons[1].name))
for prod in obj_dict['xml_producer_function_list']:
result_prod.add((prod[0], prod[1].name))
for fun in obj_dict['xml_function_list']:
if fun.child_list:
for child in fun.child_list:
result_child.add((fun.name, child.name))
assert expected_cons == result_cons
assert expected_prod == result_prod
assert expected_child == result_child
remove_xml_file(file_name)
def test_function_childs_cons_prod_within_xml(function_with_childs_cell):
"""See Issue #5, Notebook equivalent:
%%jarvis
with function_childs_cons_prod_within_xml
F1 is a function
F1a is a function
F1b is a function
F1c is a function
F1d is a function
F1e is a function
F2 is a function
F3 is a function
F1 is composed of F1a
F1 is composed of F1b
F1 is composed of F1c
F1 is composed of F1d
F1 is composed of F1e
a is a data
F1 produces a
F2 consumes a
F1a produces a
F1b consumes a
b is a data
F1c produces b
F1d consumes b
c is a data
F3 produces c
F1e consumes c
"""
file_name = "function_childs_cons_prod_within_xml"
jarvis4se.jarvis("", f"with {file_name}\n"
f"{function_with_childs_cell}")
obj_dict = xml_parser.parse_xml(file_name + ".xml")
expected_cons = {('a', 'F1b'), ('b', 'F1d'), ('a', 'F2'), ('c', 'F1e'), ('c', 'F1')}
expected_prod = {('b', 'F1c'), ('c', 'F3'), ('a', 'F1a'), ('a', 'F1')}
expected_child = {('F1', 'F1e'), ('F1', 'F1d'), ('F1', 'F1c'), ('F1', 'F1b'), ('F1', 'F1a')}
# xml_adapter.parse_xml() returns mainly set(), so the order can change
# thus we have to compare it with a set also
result_cons = set()
result_prod = set()
result_child = set()
assert len(obj_dict['xml_data_list']) == 3 and len(obj_dict['xml_function_list']) == 8
assert len(obj_dict['xml_consumer_function_list']) == 5 and \
len(obj_dict['xml_producer_function_list']) == 4
for cons in obj_dict['xml_consumer_function_list']:
result_cons.add((cons[0], cons[1].name))
for prod in obj_dict['xml_producer_function_list']:
result_prod.add((prod[0], prod[1].name))
for fun in obj_dict['xml_function_list']:
if fun.child_list:
for child in fun.child_list:
result_child.add((fun.name, child.name))
assert expected_cons == result_cons
assert expected_prod == result_prod
assert expected_child == result_child
remove_xml_file(file_name)
def test_functional_interface_within_xml():
"""Notebook equivalent:
%%jarvis
with functional_interface_within_xml
Color is an attribute
A is a data
F1 is a function
F2 is a function
Fun_elem_1 is a functional element
Fun_elem_2 is a functional element
F1 produces A
F2 consumes A
Fun_elem_1 allocates F1
Fun_elem_2 allocates F2
Fun_inter is a functional interface.
The type of Fun_inter is a_type
The alias of Fun_inter is FI
The Color of Fun_inter is pink
Fun_elem_1 exposes Fun_inter
Fun_elem_2 exposes Fun_inter
Fun_inter allocates A.
"""
file_name = "functional_interface_within_xml"
jarvis4se.jarvis("", f"with {file_name}\n"
"Color is an attribute\n"
"A is a data\n"
"F1 is a function\n"
"F2 is a function\n"
"Fun_elem_1 is a functional element\n"
"Fun_elem_2 is a functional element\n"
"F1 produces A\n"
"F2 consumes A\n"
"Fun_elem_1 allocates F1\n"
"Fun_elem_2 allocates F2\n"
"Fun_inter is a functional interface.\n"
"The type of Fun_inter is functional interface\n"
"The alias of Fun_inter is FI\n"
"The Color of Fun_inter is pink\n"
"Fun_elem_1 exposes Fun_inter\n"
"Fun_elem_2 exposes Fun_inter\n"
"Fun_inter allocates A.\n")
obj_dict = xml_parser.parse_xml(file_name + ".xml")
assert (len(obj_dict['xml_data_list']) == len(obj_dict['xml_attribute_list']) ==
len(obj_dict['xml_fun_inter_list'])) == 1
data = obj_dict['xml_data_list'].pop()
fun_inter = obj_dict['xml_fun_inter_list'].pop()
attribute = obj_dict['xml_attribute_list'].pop()
assert data.name == 'A'
assert fun_inter.name == 'Fun_inter'
assert fun_inter.alias == 'FI'
assert fun_inter.type == 'Functional interface'
assert attribute.name == 'Color'
described_item = attribute.described_item_list.pop()
assert described_item[0] == fun_inter.id and described_item[1] == 'pink'
assert fun_inter.allocated_data_list.pop() == data.id
remove_xml_file(file_name)
def test_fun_elem_exposes_interface_within_xml(fun_elem_exposing_cell):
"""Notebook equivalent:
%%jarvis
with fun_elem_exposes_interface_within_xml
Fun_inter is a functional interface
Fun_elem is a functional element
Fun_elem_2 is a functional element
Fun_elem_3 is a functional element
Fun_elem_4 is a functional element
Fun_elem_5 is a functional element
Fun_elem_6 is a functional element
Fun_elem_ext is a functional element
Fun_elem is composed of Fun_elem_2
Fun_elem_2 is composed of Fun_elem_3
Fun_elem_3 is composed of Fun_elem_4
Fun_elem_4 is composed of Fun_elem_5
Fun_elem_5 is composed of Fun_elem_6
Fun_elem exposes Fun_inter
Fun_elem_6 exposes Fun_inter
Fun_elem_ext exposes Fun_inter
toto exposes Fun_inter
tata exposes titi
Fun_elem exposes coco
"""
file_name = "fun_elem_exposes_interface_within_xml"
jarvis4se.jarvis("", f"with {file_name}\n{fun_elem_exposing_cell}")
obj_dict = xml_parser.parse_xml(file_name + ".xml")
expected_child = {('Fun_elem', 'Fun_elem_2'), ('Fun_elem_2', 'Fun_elem_3'),
('Fun_elem_3', 'Fun_elem_4'), ('Fun_elem_4', 'Fun_elem_5'),
('Fun_elem_5', 'Fun_elem_6')}
expected_exposed = {('Fun_elem', 'Fun_inter'), ('Fun_elem_6', 'Fun_inter'),
('Fun_elem_ext', 'Fun_inter')}
assert len(obj_dict['xml_fun_inter_list']) == 1 and len(obj_dict['xml_fun_elem_list']) == 8
fun_inter = obj_dict['xml_fun_inter_list'].pop()
assert fun_inter.name == 'Fun_inter'
result_exposed = set()
result_child = set()
for fun_elem in obj_dict['xml_fun_elem_list']:
for child in fun_elem.child_list:
result_child.add((fun_elem.name, child.name))
if fun_inter.id in fun_elem.exposed_interface_list:
result_exposed.add((fun_elem.name, fun_inter.name))
assert expected_child == result_child
assert expected_exposed == result_exposed
remove_xml_file(file_name)
def test_type_within_xml(extends_and_set_type_cell):
""" Issue #56 Notebook equivalent:
%%jarvis
with type_within_xml
Safety interface extends functional interface
The alias of Safety interface is sf
========================================
%%jarvis
sf_a extends sf
sf_a_b extends sf_a
final one extends sf_a_b
Fun_inter is a functional interface
The type of Fun_inter is final one
"""
file_name = "type_within_xml"
jarvis4se.jarvis("", f"with {file_name}\n{extends_and_set_type_cell[0]}")
jarvis4se.jarvis("", f"with {file_name}\n{extends_and_set_type_cell[1]}")
obj_dict = xml_parser.parse_xml(file_name + ".xml")
assert len([x for x in obj_dict.values() if x]) == 2
assert len(obj_dict['xml_type_list']) == 4
assert len(obj_dict['xml_fun_inter_list']) == 1
expected_type = {('sf_a', 'Safety interface'), ('sf_a_b', 'sf_a'),
('Safety interface', 'Functional interface'), ('final one', 'sf_a_b')}
captured_type = set()
for type_elem in obj_dict['xml_type_list']:
if type_elem.name == 'Safety interface':
assert type_elem.alias == 'sf'
if isinstance(type_elem.base, str):
base_type = type_elem.base
else:
base_type = type_elem.base.name
captured_type.add((type_elem.name, base_type))
assert expected_type == captured_type
assert obj_dict['xml_fun_inter_list'].pop().type == "final one"
remove_xml_file(file_name)
def test_extends_and_create_object_within_xml(capsys, extends_and_create_object_cell):
""" Issue #62 Notebook equivalent:
%%jarvis
with extends_and_create_object_input
"High level function" extends function
"High high level function" extends "High level function"
"High high high level function" extends "High high level function"
3High is a "High high high level function"
"""
file_name = "extends_and_create_object_within_xml"
jarvis4se.jarvis("", f"with {file_name}\n{extends_and_create_object_cell}")
obj_dict = xml_parser.parse_xml(file_name + ".xml")
assert len([x for x in obj_dict.values() if x]) == 2
assert len(obj_dict['xml_type_list']) == 3
assert len(obj_dict['xml_function_list']) == 1
expected_type = {('High level function', 'Function'),
('High high level function', 'High level function'),
('High high high level function', 'High high level function')}
captured_type = set()
for type_elem in obj_dict['xml_type_list']:
if isinstance(type_elem.base, str):
base_type = type_elem.base
else:
base_type = type_elem.base.name
captured_type.add((type_elem.name, base_type))
assert expected_type == captured_type
assert obj_dict['xml_function_list'].pop().type == "High high high level function"
remove_xml_file(file_name)
|
"""DNS Authenticator for OVH DNS."""
import logging
from typing import Any
from typing import Callable
from typing import Optional
from lexicon.providers import ovh
from requests import HTTPError
from certbot import errors
from certbot.plugins import dns_common
from certbot.plugins import dns_common_lexicon
from certbot.plugins.dns_common import CredentialsConfiguration
logger = logging.getLogger(__name__)
TOKEN_URL = 'https://eu.api.ovh.com/createToken/ or https://ca.api.ovh.com/createToken/'
class Authenticator(dns_common.DNSAuthenticator):
"""DNS Authenticator for OVH
This Authenticator uses the OVH API to fulfill a dns-01 challenge.
"""
description = 'Obtain certificates using a DNS TXT record (if you are using OVH for DNS).'
ttl = 60
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
self.credentials: Optional[CredentialsConfiguration] = None
@classmethod
def add_parser_arguments(cls, add: Callable[..., None],
default_propagation_seconds: int = 30) -> None:
super().add_parser_arguments(add, default_propagation_seconds)
add('credentials', help='OVH credentials INI file.')
def more_info(self) -> str:
return 'This plugin configures a DNS TXT record to respond to a dns-01 challenge using ' + \
'the OVH API.'
def _setup_credentials(self) -> None:
self.credentials = self._configure_credentials(
'credentials',
'OVH credentials INI file',
{
'endpoint': 'OVH API endpoint (ovh-eu or ovh-ca)',
'application-key': 'Application key for OVH API, obtained from {0}'
.format(TOKEN_URL),
'application-secret': 'Application secret for OVH API, obtained from {0}'
.format(TOKEN_URL),
'consumer-key': 'Consumer key for OVH API, obtained from {0}'
.format(TOKEN_URL),
}
)
def _perform(self, domain: str, validation_name: str, validation: str) -> None:
self._get_ovh_client().add_txt_record(domain, validation_name, validation)
def _cleanup(self, domain: str, validation_name: str, validation: str) -> None:
self._get_ovh_client().del_txt_record(domain, validation_name, validation)
def _get_ovh_client(self) -> "_OVHLexiconClient":
if not self.credentials: # pragma: no cover
raise errors.Error("Plugin has not been prepared.")
return _OVHLexiconClient(
self.credentials.conf('endpoint'),
self.credentials.conf('application-key'),
self.credentials.conf('application-secret'),
self.credentials.conf('consumer-key'),
self.ttl
)
class _OVHLexiconClient(dns_common_lexicon.LexiconClient):
"""
Encapsulates all communication with the OVH API via Lexicon.
"""
def __init__(self, endpoint: str, application_key: str, application_secret: str,
consumer_key: str, ttl: int) -> None:
super().__init__()
config = dns_common_lexicon.build_lexicon_config('ovh', {
'ttl': ttl,
}, {
'auth_entrypoint': endpoint,
'auth_application_key': application_key,
'auth_application_secret': application_secret,
'auth_consumer_key': consumer_key,
})
self.provider = ovh.Provider(config)
def _handle_http_error(self, e: HTTPError, domain_name: str) -> errors.PluginError:
hint = None
if str(e).startswith('400 Client Error:'):
hint = 'Is your Application Secret value correct?'
if str(e).startswith('403 Client Error:'):
hint = 'Are your Application Key and Consumer Key values correct?'
hint_disp = f' ({hint})' if hint else ''
return errors.PluginError(f'Error determining zone identifier for {domain_name}: '
f'{e}.{hint_disp}')
def _handle_general_error(self, e: Exception, domain_name: str) -> Optional[errors.PluginError]:
if domain_name in str(e) and str(e).endswith('not found'):
return None
return super()._handle_general_error(e, domain_name)
|
# Copyright (C) 2015-2018 Regents of the University of California
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, print_function
from future.utils import raise_
from builtins import range
from uuid import uuid4
import os
import random
import tempfile
import logging
import inspect
import sys
from toil.lib.exceptions import panic
from toil.common import getNodeID
from toil.lib.misc import atomic_tmp_file, atomic_install, AtomicFileCreate
from toil.lib.misc import CalledProcessErrorStderr, call_command
from toil.test import ToilTest, slow, travis_test
log = logging.getLogger(__name__)
logging.basicConfig()
class MiscTests(ToilTest):
"""
This class contains miscellaneous tests that don't have enough content to be their own test
file, and that don't logically fit in with any of the other test suites.
"""
def setUp(self):
super(MiscTests, self).setUp()
self.testDir = self._createTempDir()
@travis_test
def testIDStability(self):
prevNodeID = None
for i in range(10, 1):
nodeID = getNodeID()
self.assertEqual(nodeID, prevNodeID)
prevNodeID = nodeID
@slow
def testGetSizeOfDirectoryWorks(self):
'''A test to make sure toil.common.getDirSizeRecursively does not
underestimate the amount of disk space needed.
Disk space allocation varies from system to system. The computed value
should always be equal to or slightly greater than the creation value.
This test generates a number of random directories and randomly sized
files to test this using getDirSizeRecursively.
'''
from toil.common import getDirSizeRecursively
# a list of the directories used in the test
directories = [self.testDir]
# A dict of {FILENAME: FILESIZE} for all files used in the test
files = {}
# Create a random directory structure
for i in range(0,10):
directories.append(tempfile.mkdtemp(dir=random.choice(directories), prefix='test'))
# Create 50 random file entries in different locations in the directories. 75% of the time
# these are fresh files of sixe [1, 10] MB and 25% of the time they are hard links to old
# files.
while len(files) <= 50:
fileName = os.path.join(random.choice(directories), self._getRandomName())
if random.randint(0,100) < 75:
# Create a fresh file in the range of 1-10 MB
fileSize = int(round(random.random(), 2) * 10 * 1024 * 1024)
with open(fileName, 'wb') as fileHandle:
fileHandle.write(os.urandom(fileSize))
files[fileName] = fileSize
else:
# Link to one of the previous files
if len(files) == 0:
continue
linkSrc = random.choice(list(files.keys()))
os.link(linkSrc, fileName)
files[fileName] = 'Link to %s' % linkSrc
computedDirectorySize = getDirSizeRecursively(self.testDir)
totalExpectedSize = sum([x for x in list(files.values()) if isinstance(x, int)])
self.assertGreaterEqual(computedDirectorySize, totalExpectedSize)
@staticmethod
def _getRandomName():
return uuid4().hex
def _get_test_out_file(self, tail):
outf = os.path.join(self.testDir, self.id() + "." + tail)
if os.path.exists(outf):
os.unlink(outf)
return outf
def _write_test_file(self, outf_tmp):
with open(outf_tmp, "w") as fh:
fh.write(self.id() + '\n')
def test_atomic_install(self):
outf = self._get_test_out_file(".foo.gz")
outf_tmp = atomic_tmp_file(outf)
self._write_test_file(outf_tmp)
atomic_install(outf_tmp, outf)
self.assertTrue(os.path.exists(outf))
def test_atomic_install_dev(self):
devn = '/dev/null'
tmp = atomic_tmp_file(devn)
self.assertEqual(tmp, devn)
atomic_install(tmp, devn)
def test_atomic_context_ok(self):
outf = self._get_test_out_file(".tar")
with AtomicFileCreate(outf) as outf_tmp:
self._write_test_file(outf_tmp)
self.assertTrue(os.path.exists(outf))
def test_atomic_context_error(self):
outf = self._get_test_out_file(".tar")
try:
with AtomicFileCreate(outf) as outf_tmp:
self._write_test_file(outf_tmp)
raise Exception("stop!")
except Exception as ex:
self.assertEqual(str(ex), "stop!")
self.assertFalse(os.path.exists(outf))
def test_call_command_ok(self):
o = call_command(["echo", "Fred"])
self.assertEqual("Fred\n", o)
self.assertTrue(isinstance(o, str), str(type(o)))
def test_call_command_err(self):
with self.assertRaisesRegex(CalledProcessErrorStderr,
"^Command '\\['cat', '/dev/Frankenheimer']' exit status 1: cat: /dev/Frankenheimer: No such file or directory\n$"):
call_command(["cat", "/dev/Frankenheimer"])
class TestPanic(ToilTest):
@travis_test
def test_panic_by_hand(self):
try:
self.try_and_panic_by_hand()
except:
self.__assert_raised_exception_is_primary()
@travis_test
def test_panic(self):
try:
self.try_and_panic()
except:
self.__assert_raised_exception_is_primary()
@travis_test
def test_panic_with_secondary(self):
try:
self.try_and_panic_with_secondary()
except:
self.__assert_raised_exception_is_primary()
@travis_test
def test_nested_panic(self):
try:
self.try_and_nested_panic_with_secondary()
except:
self.__assert_raised_exception_is_primary()
def try_and_panic_by_hand(self):
try:
self.line_of_primary_exc = inspect.currentframe().f_lineno + 1
raise ValueError("primary")
except Exception:
exc_type, exc_value, exc_traceback = sys.exc_info()
try:
raise RuntimeError("secondary")
except Exception:
pass
raise_(exc_type, exc_value, exc_traceback)
def try_and_panic(self):
try:
self.line_of_primary_exc = inspect.currentframe().f_lineno + 1
raise ValueError("primary")
except:
with panic(log):
pass
def try_and_panic_with_secondary(self):
try:
self.line_of_primary_exc = inspect.currentframe().f_lineno + 1
raise ValueError("primary")
except:
with panic( log ):
raise RuntimeError("secondary")
def try_and_nested_panic_with_secondary(self):
try:
self.line_of_primary_exc = inspect.currentframe().f_lineno + 1
raise ValueError("primary")
except:
with panic( log ):
with panic( log ):
raise RuntimeError("secondary")
def __assert_raised_exception_is_primary(self):
exc_type, exc_value, exc_traceback = sys.exc_info()
self.assertEqual(exc_type, ValueError)
self.assertEqual(str(exc_value), "primary")
while exc_traceback.tb_next is not None:
exc_traceback = exc_traceback.tb_next
self.assertEqual(exc_traceback.tb_lineno, self.line_of_primary_exc)
|
from pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter
from cStringIO import StringIO
from pdfminer.converter import TextConverter
from pdfminer.layout import LAParams
from pdfminer.pdfpage import PDFPage
def pdf_to_text(s):
infile = StringIO(s)
output = StringIO()
manager = PDFResourceManager()
converter = TextConverter(manager, output, laparams=LAParams())
interpreter = PDFPageInterpreter(manager, converter)
pagenums = set()
for page in PDFPage.get_pages(infile, pagenums):
interpreter.process_page(page)
infile.close()
converter.close()
text = output.getvalue()
output.close()
return text
if __name__ == '__main__':
pdf = open('test.pdf').read()
print pdf_to_text(pdf)
|
from django.urls import reverse_lazy
from django.http import HttpResponseRedirect
def index_dashboard(request):
return HttpResponseRedirect(reverse_lazy("budget:dashboard"))
|
from __future__ import annotations
import sys
import numpy as np
import numpy.ma as ma
def iterate(drawn: list[int], boards: ma.core.MaskedArray, need_to_win: int):
_, _, board_size = boards.shape
idx_won = []
for num in drawn:
# mark current number
boards.mask[boards.data == num] = True
# check if there is a board with a fully marked row/col
win = np.argwhere(
np.logical_or(
boards.mask.sum(axis=1) == board_size,
boards.mask.sum(axis=2) == board_size,
)
)
# get the index of the completed boards
idx = np.setdiff1d(win[:, 0], idx_won)
if idx.size == 0:
continue
idx_won.extend(idx)
if len(idx_won) == need_to_win:
# sum of all unmarked numbers is given w.r.t. mask
return num * boards[idx_won[-1]].sum()
def main():
with open(sys.argv[1]) as fp:
drawn = [int(x) for x in fp.readline().strip().split(",")]
boards = np.loadtxt(fp, dtype=int).reshape(-1, 5, 5)
boards = ma.masked_array(boards, mask=np.zeros_like(boards))
print("Part 1:", iterate(drawn, boards.copy(), need_to_win=1))
print("Part 2:", iterate(drawn, boards.copy(), need_to_win=len(boards)))
if __name__ == "__main__":
main()
|
import asyncio
import aiohttp
from time import perf_counter
import timeit
import string
import random
sample = 10_000
errors = dict()
measure = list()
result = list()
def id_generator(size=2, chars=string.ascii_lowercase + string.digits):
return ''.join(random.choice(chars) for _ in range(size))
async def test(session, host):
host, port, request = host, 80, f'cached/xx-{id_generator()}'
url = f'http://{host}:{port}/{request}'
try:
resp = await session.get(url)
measure.append(perf_counter())
if resp.status >=400:
response = await resp.text()
else:
response = await resp.json()
if resp.status in errors:
errors[resp.status] += 1
else:
errors[resp.status] = 1
except Exception as ex:
print(f'... {ex}')
async def main():
tasks = list()
conn = aiohttp.TCPConnector(ssl=False)
headers = {'content-type': 'application/json'}
async with aiohttp.ClientSession(connector=conn, headers=headers) as session:
for i in range(int(sample)):
await asyncio.sleep(0.0001)
task = asyncio.create_task(test(session, '192.168.49.2'))
tasks.append(task)
for i in tasks:
await i
def avg_response_time(data):
return round(sum(data)/len(data)*1000,2)
if __name__ == '__main__':
start = perf_counter()
asyncio.run(main())
res = perf_counter()-start
for i in range(len(measure)-1):
result.append(measure[i+1]-measure[i])
result.sort(reverse=True)
print(f'rps: ',int(sample/res))
print(f'avg response: ', avg_response_time(result))
print(f'worst 10%: ', avg_response_time(result[:int(sample/10)]))
print(f'worst 1%: ', avg_response_time(result[:int(sample/100)]))
print(errors)
|
#-------------------------------------------------------------------------------
# Name: Single Band Gap Filler For Landsat 7
# Purpose: To use cloud masks for three seperate scenes to fill gaps in data
# due to SLC-induced gaps and clouds
# Author: Quinten Geddes [email protected]
# NASA DEVELOP PRogram
# Created: 12/04/2013
#-------------------------------------------------------------------------------
import arcpy
arcpy.CheckOutExtension("Spatial")
arcpy.env.overwriteOutput=True
#Registering the scenes of interest
Scene1 = arcpy.Raster(arcpy.GetParameterAsText(0))
Scene2 = arcpy.Raster(arcpy.GetParameterAsText(1))
Scene3 = arcpy.Raster(arcpy.GetParameterAsText(2))
#establishing
CloudMaskpath1= arcpy.GetParameterAsText(3)
CloudMaskpath2= arcpy.GetParameterAsText(4)
CloudMaskpath3= arcpy.GetParameterAsText(5)
OutputFolder= arcpy.GetParameterAsText(6)
OutputFile= arcpy.GetParameterAsText(7)
#preempting scratch workspace errors
arcpy.env.scratchWorkspace=OutputFolder
#establishing gaps in each image
Mask1=Scene1>0
Mask2=Scene2>0
Mask3=Scene3>0
#Applying the Cloud mask if provided
for scene in [1,2,3]:
try:
exec("CloudMask{0}=arcpy.Raster(CloudMaskpath{0})".format(scene))
exec("Mask{0}=Mask{0}*CloudMask{0}".format(scence))
except:
a=0
#keeping all good pixels for the first scene
Scene1Fill=Mask1*Scene1
#keeping good pixels for the 2nd scene where 1st pixels are bad
Scene2Fill=((Mask1==0)*Mask2)*Scene2
#keeping good pixels for the 3rd scene where 2nd and 1st pixels are bad
Scene3Fill=((Mask1==0)*(Mask2==0)*Mask3)*Scene3
#combining the kept pixels from each scene
FinalImage=Scene1Fill+Scene2Fill+Scene3Fill
FinalImage.save(OutputFolder+"\\"+OutputFile)
|
__source__ = 'https://leetcode.com/problems/find-minimum-in-rotated-sorted-array/'
# https://github.com/kamyu104/LeetCode/blob/master/Python/find-minimum-in-rotated-sorted-array.py
# Time: O(logn)
# Space: O(1)
# Binary Search
#
# Description: Leetcode # 153. Find Minimum in Rotated Sorted Array
#
# Suppose a sorted array is rotated at some pivot unknown to you beforehand.
#
# (i.e., 0 1 2 4 5 6 7 might become 4 5 6 7 0 1 2).
#
# Find the minimum element.
#
# You may assume no duplicate exists in the array.
#
# Companies
# Microsoft
# Related Topics
# Array Binary Search
# Similar Questions
# Search in Rotated Sorted Array Find Minimum in Rotated Sorted Array II
#
import unittest
class Solution2(object):
def findMin(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
start = 0
end = len(nums) - 1
res = 0x7FFFFFFF
while start <= end:
mid = start + (end - start ) / 2
if nums[start] <= nums[mid]:
res = min(res, nums[start])
if nums[mid] < nums[end]:
end = mid - 1;
else:
start = mid + 1;
else:
res = min(res, nums[mid])
end = mid - 1;
return res
class Solution(object):
def findMin(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
if not nums or len(nums) == 0:
return 0
start = 0
end = len(nums) - 1
# not work with increaing arr if only start + 1 < end
while start + 1 < end and nums[start] > nums[end]:
mid = start + (end - start) / 2
if nums[mid] >= nums[start]:
start = mid
else:
end = mid
return min(nums[start], nums[end])
class SolutionOther(object):
def findMin(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
return self.dfs(nums, 0, len(nums) - 1)
def dfs(self, nums, start, end):
if start == end:
return nums[start]
if nums[start] < nums[end]:
return nums[start]
mid = (start + end) / 2
left = min(nums[start], nums[mid])
right = min(nums[mid+1], nums[end])
if left < right:
return self.dfs(nums, start, mid)
elif left > right:
return self.dfs(nums, mid+1, end)
else:
return self.dfs(nums, start+1, end)
class TestMethods(unittest.TestCase):
def test_Local(self):
self.assertEqual(1, 1)
self.assertEqual(1, Solution().findMin([3, 1, 2]))
self.assertEqual(0, Solution2().findMin([4, 5 ,6, 7, 0 ,1 ,2]))
arr = [4, 5, 6, 7, 0, 1, 2]
#print Solution().findMin([1])
#print Solution().findMin([1, 2])
#print Solution().findMin([2, 1])
print Solution().findMin([10,1,10,10,10])
#print Solution().findMin([2, 3, 1])
#print SolutionPrac().findMin(arr)
#print Solution().findMin(arr)
#print Solution2().findMin(arr)
if __name__ == '__main__':
unittest.main()
Java = '''
# Thought: https://leetcode.com/problems/find-minimum-in-rotated-sorted-array/solution/
# 0ms 100%
class Solution {
public int findMin(int[] nums) {
int start = 0;
int end = nums.length - 1;
int min = Integer.MAX_VALUE;
while (start <= end) {
int mid = start + (end - start) / 2;
if (nums[start] <= nums[mid]) {
min = Math.min(min, nums[start]);
if (nums[mid] < nums[end]) {
end = mid - 1;
} else {
start = mid + 1;
}
} else {
min = Math.min(min, nums[mid]);
end = mid - 1;
}
}
return min;
}
}
# 0ms 100%
class Solution {
public int findMin(int[] nums) {
int start = 0;
int end = nums.length - 1;
while (start + 1 < end && nums[start] >= nums[end]){
int mid = start + (end - start ) / 2;
if (nums[mid] > nums[start]) {
start = mid + 1;
} else if (nums[mid] < nums[start]){
end = mid;
} else {
start++;
}
}
return Math.min(nums[start], nums[end]);
}
}
# 0ms 100%
class Solution {
public int findMin(int[] nums) {
return dfs(nums, 0, nums.length - 1);
}
private int dfs(int[] nums, int start, int end ){
if(start == end){
return nums[start];
}
if(nums[start] < nums[end]){
return nums[start];
}
int mid = start + (end - start) / 2;
int left = Math.min(nums[start], nums[mid]);
int right = Math.min(nums[mid+1], nums[end]);
if(left < right){
return dfs(nums, start, mid);
}else if(left > right){
return dfs(nums, mid+1, end);
}else{
return dfs(nums, start+1, end);
}
}
}
'''
|
# Copyright 2021 code-injection-example contributors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command line application for running user code with the interceptor."""
from contextlib import nullcontext, redirect_stderr, redirect_stdout
from json import JSONDecodeError, loads
from os import devnull
from typing import Any, Dict, Optional
import click as c
from qiskit_interceptor.interceptor import BaseInterceptor
from . import run
class JSONCallArgumentsParam(c.ParamType):
name="json-call-arguments"
def convert(self, value: str, param: Optional[c.Parameter], ctx: Optional[c.Context]) -> Optional[Dict[str, Any]]:
if not value:
return None
try:
decoded = loads(value)
if isinstance(decoded, list):
# use decoded list as list of positional arguments
return {"args": decoded}
elif isinstance(decoded, dict):
if set(decoded.keys()) <= {"args", "kwargs"}:
# decoded dict specifies positional and keyword arguments
return decoded
# decoded dict only specifies keyword arguments
return {"kwargs": decoded}
else:
# decoded is a single value, use as single positional argument
return {"args": [decoded]}
except JSONDecodeError as err:
self.fail(f"Value '{value}' is not a valid json string!", param=param, ctx=ctx)
JSON_CALL_ARGUMENTS = JSONCallArgumentsParam()
class JSONObjectParam(c.ParamType):
name="json-object"
def convert(self, value: str, param: Optional[c.Parameter], ctx: Optional[c.Context]) -> Optional[Dict[str, Any]]:
if not value:
return None
try:
decoded = loads(value)
if not isinstance(decoded, dict):
self.fail(f"Value '{value}' is not a valid json object!", param=param, ctx=ctx)
return decoded
except JSONDecodeError as err:
self.fail(f"Value '{value}' is not a valid json string!", param=param, ctx=ctx)
JSON_OBJECT = JSONObjectParam()
@c.command("main")
@c.option("--entry-point", help="The entry point of the user code to run. (Format: './path/to/code.py' or './path/to/package.relative.subpackage:method')")
@c.option("--entry-point-arguments", type=JSON_CALL_ARGUMENTS, default=None, help="Arguments for the entry point as a json list or a json object. Only used if the entry point is a function!")
@c.option("--interceptor-arguments", type=JSON_OBJECT, default=None, help="Arguments for the interceptors as a json object.")
@c.option("--no-intercept", type=bool, default=False, is_flag=True, help="Switch off interception of the qiskit.execute method.")
@c.option("--dry-run", type=bool, default=False, is_flag=True, help="Dry run without executing any quantum circuit.")
@c.option("--quiet", type=bool, default=False, is_flag=True, help="Suppress all console output of the quiskit runner.")
def main(entry_point: str, entry_point_arguments: Optional[Dict[str, Any]]=None, interceptor_arguments: Optional[Dict[str, Any]]=None, framework: str="qiskit", no_intercept: bool=False, dry_run: bool=False, quiet: bool=False):
redirect_out, redirect_err = (redirect_stdout, redirect_stderr) if quiet else (nullcontext, nullcontext)
if interceptor_arguments:
BaseInterceptor.set_interceptor_arguments(interceptor_arguments)
try:
BaseInterceptor.get_intereceptor_for_framework(framework=framework)
except KeyError as err:
c.echo(f"No interceptor for Framework {framework} found. Available Frameworks are: {BaseInterceptor.get_supported_frameworks()}")
with open(devnull, mode="w") as dev_null_file:
with redirect_out(dev_null_file), redirect_err(dev_null_file):
if no_intercept:
c.echo("Running without intercepting framework function.")
else:
c.echo("Intercepting framework function.")
if dry_run:
c.echo("Performing dry run")
c.echo("Running user code '{}'.".format(entry_point))
run(entry_point, entry_point_arguments=entry_point_arguments, framework=framework, intercept=(not no_intercept), dry_run=dry_run, quiet=quiet)
# start cli
main()
|
# Copyright 2021 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
################################################################################
from datetime import datetime
from flask import current_app
from randomarchive import db, login_manager
from flask_login import UserMixin
import sqlalchemy
from sqlalchemy.ext.declarative import declarative_base
@login_manager.user_loader
def load_user(user_id):
return User.query.get(int(user_id))
class User(db.Model, UserMixin):
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(20), unique=True, nullable=False)
email = db.Column(db.String(120), unique=True, nullable=False)
image_file = db.Column(db.String(20), nullable=False, default='default.jpg')
password = db.Column(db.String(60), nullable=False)
def __repr__(self):
return f"User('{self.username}', '{self.email}', '{self.image_file}')"
|
from dnnsvg.svgeables.variables.tensor_3d import Tensor3D
from dnnsvg.svgeables.variables.tensor_2d import Tensor2D
from dnnsvg.svgeables.shapes.line import Line
from dnnsvg.svgeables.shapes.text import Text
from dnnsvg.svgeables.shapes.arc_arrow import ArcArrow
from dnnsvg.svgeables.shapes.arrow import Arrow |
from RedWebhook import RedWebhook
webhook = RedWebhook(url="", content="Webhook Message")
response = webhook.execute() |
import json
from flask import Flask, jsonify, request
from flask_cors import CORS
from datetime import datetime
import pytz
app = Flask(__name__)
cors = CORS(app)
@app.route("/positions")
def positions():
f = open('positions.json', "r")
r = f.read()
r = json.loads(r)
return jsonify(r)
@app.route("/accountdetails", methods=['GET'])
def details():
tokenId = request.args.get('tokenId')
f = open('voter.json', "r")
r = f.read()
r = json.loads(r)
return jsonify(r)
@app.route("/results", methods=['GET'])
def results():
f = open('results.json', "r")
r = f.read()
r = json.loads(r)
return jsonify(r)
@app.route("/userType", methods=['GET'])
def userType():
tokenId = request.args.get('tokenId')
return jsonify(id = tokenId, type='adminANDvoter')
@app.route("/changeImpDates", methods=['GET','POST'])
def changeImpDates():
print(request.method)
print(request.form)
return jsonify(responseMessage='Dates Changed Successfully')
@app.route("/getimportantDates", methods=['GET'])
def getimportantDates():
tz_IN = pytz.timezone('Asia/Kolkata')
datetime_IN = datetime.now(tz_IN)
return jsonify(resultsDate="Mon Nov 24 2020 17:28:11 GMT+0530 (India Standard Time)",
electionDate="Mon Nov 23 2020 10:30:00 GMT+0530 (India Standard Time)",
now=str(datetime_IN))
app.run(debug='true', host='0.0.0.0')
|
from . import (
cmdline_parser,
manga_to_json,
bakaupdates
)
def _get_id(args: cmdline_parser.Args) -> str:
if args.id_url:
if args.id_url.isdigit():
id_ = args.id_url
else:
try:
id_ = bakaupdates.utils.get_id_from_url(args.id_url)
except ValueError as ex:
cmdline_parser.exit_program(ex)
else:
try:
id_ = bakaupdates.searcher.search(args.search_string,
args.max_search_results,
args.auto_first_result)
except ValueError as ex:
print(ex)
raise SystemExit from ex
print(f"\nRetrieving id: {id_}...")
return id_
def main(argv=None) -> None:
args = cmdline_parser.handle_args(argv)
id_ = _get_id(args)
try:
manga = bakaupdates.scraper.get_manga(id_)
except ValueError as ex:
raise SystemExit(ex) from ex
if args.output_filename:
manga_to_json.make_json(manga, args.output_filename, args.keep_status_values)
print(f'Successfully wrote to "{args.output_filename}"')
else:
print(manga_to_json.make_jsons(manga, args.keep_status_values))
|
def fib(n):
sequence_list = []
current_number = 0
next_number = 1
for i in range(n + 1):
sequence_list.append(current_number)
current_number = next_number
if i > 0:
# next = sequence_list[i] + current
next_number = sequence_list[i] + current_number
else:
next_number = 1
return sequence_list[n]
print(fib(10))
print(fib(40))
print(fib(1))
print(fib(0))
|
import numpy as np
class DataProcessor():
def __init__(self, means=(), stds=()):
self.means = means
self.stds = stds
def format_x(self, x, size=-1):
_x = x
if isinstance(x, (tuple, list)):
_x = np.array([x])
if size > 0 and _x.shape[1] != size:
_x = self.adjust(x, size)
_x = _x.astype(np.float32, copy=False)
if len(self.means) > 0 and len(self.stds) > 0:
return (_x - self.means) / self.stds
else:
return _x
def adjust(self, x, size):
def max_pooling(v):
sqrt = lambda _x: int(np.ceil(np.sqrt(_x)))
_target_square_size = sqrt(size)
square_size = sqrt(len(v))
conv_size = int(square_size // _target_square_size)
image = np.reshape(v, (square_size, square_size))
_pooled = []
for i in range(size):
row, col = int(i // _target_square_size * conv_size), int(i % _target_square_size * conv_size)
mp = np.max(image[row:row + conv_size, col: col + conv_size])
_pooled.append(mp)
return np.array(_pooled)
x = np.array([max_pooling(_v) for _v in x])
return x
def format_y(self, y):
_y = y
if isinstance(y , int):
_y = np.array([y])
_y = _y.astype(np.int32, copy=False)
return _y
def set_normalization_params(self, x):
self.means = np.mean(x, axis=0, dtype=np.float32)
self.stds = np.std(x, axis=0, dtype=np.float32)
# simple trick to avoid 0 divide
self.stds[self.stds < 1.0e-6] = np.max(x) - np.min(x)
self.means[self.stds < 1.0e-6] = np.min(x)
def batch_iter(self, X, y, batch_size, epoch=1):
indices = np.array(range(len(y)))
appendix = batch_size - len(y) % batch_size
for e in range(epoch):
np.random.shuffle(indices)
batch_indices = np.concatenate([indices, indices[:appendix]])
batch_count = len(batch_indices) // batch_size
for b in range(batch_count):
elements = batch_indices[b * batch_size:(b + 1) * batch_size]
x_batch = X[elements]
y_batch = y[elements]
epoch_end = True if b == batch_count - 1 else False
yield x_batch, y_batch, epoch_end
|
# %%
import shortuuid
from ctrace import PROJECT_ROOT
from ctrace.exec.param import GraphParam, SIRParam, FileParam, ParamBase
from pathlib import Path, PurePath
import multiprocessing as mp
import concurrent.futures
from typing import List, Tuple, Dict, Any, Callable
from zipfile import ZipFile
import itertools
import pandas as pd
import numpy as np
import tqdm
import csv
import random
import time
import pickle
import json
import shutil
class MultiExecutor():
INIT = 0
EXEC = 1
def __init__(
self,
runner: Callable,
schema: List[Tuple[str, type]],
post_execution: Callable = lambda self: self,
output_id: str = None,
seed: bool = True,
validation: bool = True,
name_prefix = 'run',
num_process=50,
):
self.runner = runner
self.schema = schema
# Multiexecutor state
self.tasks: List[Dict[str, Any]] = [] # store expanded tasks
self.signatures = {} # store signatures of any FileParam
self.stage: int = MultiExecutor.INIT # Track the state of executor
self.workers = {}
self.queues = {}
self.manager = mp.Manager()
# Filter FileParams from schema
self.file_params = [l for (l, t) in schema if issubclass(t, ParamBase)]
# Executor Parameters
self.seed: bool = seed
self.validation = validation
self._schema = self.schema[:]
self._schema.insert(0, ('id', int))
if self.seed:
self._schema.append(('seed', int))
self.num_process = num_process
self.name_prefix = name_prefix
# Initialize functions
self.output_id = output_id
self.init_output_directory()
self.post_execution = post_execution
def init_output_directory(self):
if self.output_id is None:
self.output_id = f"{self.name_prefix}_{shortuuid.uuid()[:5]}"
# Setup output directories
self.output_directory = PROJECT_ROOT / "output" / self.output_id
self.output_directory.mkdir(parents=True, exist_ok=True)
@staticmethod
def cartesian_product(dicts):
"""Expands an dictionary of lists into a list of dictionaries through a cartesian product"""
return (dict(zip(dicts, x)) for x in itertools.product(*dicts.values()))
def add_cartesian(self, config: Dict[str, List[Any]]):
if self.validation:
# check stage
if self.stage != MultiExecutor.INIT:
raise Exception(
f"Adding entries allowed during INIT stage. Current stage: {self.stage}")
# labels must match
config_attr = set(config.keys())
schema_attr = set(x[0] for x in self.schema)
if config_attr != schema_attr:
raise ValueError(
f"Given config labels {config_attr} does match specified schema labels {schema_attr}")
# assert schema types
for tup in self.schema:
prop_name, prop_type = tup
for item in config[prop_name]:
if not isinstance(item, prop_type):
raise ValueError(
f"Property [{prop_name}]: item [{item}] is not a [{prop_type}]")
# Collect signatures from FileParams
new_tasks = list(MultiExecutor.cartesian_product(config))
self.tasks.extend(new_tasks)
print(f"Added {len(new_tasks)} cartesian tasks.")
def add_collection(self, collection: List[Dict[str, Any]]):
if self.validation:
# check stage
if self.stage != MultiExecutor.INIT:
raise Exception(
f"Adding entries allowed during INIT stage. Current stage: {self.stage}")
# assert types
schema_attr = set(x[0] for x in self.schema)
for i, task in enumerate(collection):
task_attr = set(task.keys())
if task_attr != schema_attr:
raise ValueError(
f"task[{i}]: {task_attr} does match specified schema [{schema_attr}]")
for tup in self.schema:
prop_name, prop_type = tup
item = task[prop_name]
if not isinstance(item, prop_type):
raise ValueError(
f"task[{i}] [{prop_name}]: item [{item}] is not a [{prop_type}]")
self.tasks.extend(collection)
def attach(self, worker):
# Inject dependencies
name = worker.name
worker.run_root = self.output_directory
worker.queue = self.manager.Queue()
self.queues[name] = worker.queue
self.workers[name] = worker
def exec(self):
self.stage = MultiExecutor.EXEC
# Clean Up and pre-exec initialization
# Attach seeds
df = pd.DataFrame.from_dict(self.tasks, orient='columns')
df["seed"] = np.random.randint(0, 100000, size=(len(df), 1))
df.insert(0, "id", range(len(df)))
# Label receives name (string), data receives data object
label_df = df.copy()
data_df = df.copy()
for label in self.file_params:
label_df[label] = label_df[label].map(lambda x: x.name)
data_df[label] = data_df[label].map(lambda x: x.data)
label_df.to_csv(self.output_directory / "input.csv", index=False)
self.tasks = data_df.to_dict('records')
processes = []
# Start workers and attach worker queues
for (_, worker) in self.workers.items():
p = mp.Process(target=worker.start)
p.start()
processes.append(p)
# Start and attach loggers
self.loggers = {} # TODO: Implement loggers
for item in self.tasks:
item["queues"] = self.queues
item["loggers"] = self.loggers
item["path"] = self.output_directory
with mp.Pool(self.num_process) as pool:
list(tqdm.tqdm(pool.imap_unordered(
self.runner, self.tasks), total=len(self.tasks)))
# Clean up workers
for (_, q) in self.queues.items():
q.put("done")
for p in processes:
p.join()
# TODO: Join the main and auxillary csvs???
self.post_execution(self)
class Worker():
def __init__(self):
self.queue = mp.Queue()
def start():
raise NotImplementedError()
class CsvSchemaWorker(Worker):
# TODO: Inject destination?
def __init__(self, name, schema, relpath: PurePath, run_root: Path = None, queue=None):
"""
Listens on created queue for dicts. Worker will extract only data specified from dicts
and fills with self.NA if any attribute doesn't exist.
Dicts should contain id, preferrably as first element of schema
Will stop if dict is passed a terminating object (self.term).
"""
# Should be unique within a collection!
self.name = name
self.schema = schema
self.queue = queue
self.relpath = relpath
self.run_root = run_root
# Default value
self.default = None
# Terminating value
self.terminator = "done"
def start(self):
"""
"""
# TODO: Replace prints with logging
if self.run_root is None:
raise ValueError('run_root needs to be a path')
if self.queue is None:
raise ValueError('need to pass a queue to run')
self.path = self.run_root / self.relpath
with open(self.path, 'w') as f:
writer = csv.DictWriter(
f, self.schema, restval=self.default, extrasaction='raise')
writer.writeheader()
print(f'INFO: CsvSchemaWorker {self.name} initialized @ {self.path}')
while True:
msg = self.queue.get()
if (msg == self.terminator):
print(f"INFO: Worker {self.name} finished")
break
# Filter for default
# data = {l: (msg.get(l, self.default)) for l in self.schema}
writer.writerow(msg)
f.flush()
# print(f'DEBUG: Worker {self.name} writes entry {msg.get("id")}')
class CsvWorker(Worker):
# TODO: Inject destination?
def __init__(self, name, relpath: PurePath, run_root: Path = None, queue=None):
"""
Listens on created queue for dicts. Worker will extract only data specified from dicts
and fills with self.NA if any attribute doesn't exist.
Dicts should contain id, preferrably as first element of schema
Will stop if dict is passed a terminating object (self.term).
"""
# Should be unique within a collection!
self.name = name
self.queue = queue
self.relpath = relpath
self.run_root = run_root
# Default value
self.default = None
# Terminating value
self.terminator = "done"
def start(self):
"""
Each object piped to queue must be in form [id, val1, val2 ...]
"""
# TODO: Replace prints with logging
if self.run_root is None:
raise ValueError('run_root needs to a path')
if self.queue is None:
raise ValueError('need to pass a queue to run')
self.path = self.run_root / self.relpath
with open(self.path, 'w') as f:
writer = csv.writer(f)
writer.writerow(["id", "vals"])
print(f'INFO: CsvWorker {self.name} initialized @ {self.path}')
while True:
msg = self.queue.get()
if (msg == self.terminator):
print(f"INFO: Worker {self.name} finished")
break
# Filter for default
# data = {l: (msg.get(l, self.default)) for l in self.schema}
writer.writerow(msg)
f.flush()
# print(f'DEBUG: Worker {self.name} writes entry {msg.get("id")}')
# %%
if __name__ == '__main__':
# Example Usage
in_schema = [
('graph', GraphParam),
('sir', SIRParam),
('transmission_rate', float),
('compliance_rate', float),
('method', str),
]
main_out_schema = ["id", "out_method"]
aux_out_schema = ["id", "runtime"]
main_handler = CsvSchemaWorker(
name="csv_main", schema=main_out_schema, relpath=PurePath('main.csv'))
aux_handler = CsvSchemaWorker(
name="csv_aux", schema=aux_out_schema, relpath=PurePath('aux.csv'))
def runner(data):
queues = data["queues"]
instance_id = data["id"]
method = data["method"]
graph = data["graph"]
compliance_rate = data["compliance_rate"]
sir = data["sir"]
# Execute logic here ...
# Output data to workers and folders
main_obj = {
"id": instance_id,
"out_method": method,
}
aux_obj = {
"id": instance_id,
"runtime": random.random()
}
queues["csv_main"].put(main_obj)
queues["csv_aux"].put(aux_obj)
# Save large checkpoint data ("High data usage")
save_extra = False
if save_extra:
path = data["path"] / "data" / str(data["id"])
path.mkdir(parents=True, exist_ok=True)
with open(path / "sir_dump.json", "w") as f:
json.dump(sir, f)
def post_execution(self):
compress = False
if self.output_directory / "data".exists() and compress:
print("Compressing files ...")
shutil.make_archive(
str(self.output_directory / "data"), 'zip', base_dir="data")
shutil.rmtree(self.output_directory / "data")
run = MultiExecutor(runner, in_schema,
post_execution=post_execution, seed=True)
# Add compact tasks (expand using cartesian)
mont = GraphParam('montgomery')
run.add_cartesian({
'graph': [mont],
'sir': [SIRParam(f't{i}', parent=mont) for i in range(7, 10)],
'transmission_rate': [0.1, 0.2, 0.3],
'compliance_rate': [.8, .9, 1.0],
'method': ["robust"]
})
run.add_cartesian({
'graph': [mont],
'sir': [SIRParam(f't{i}', parent=mont) for i in range(7, 10)],
'transmission_rate': [0.1, 0.2, 0.3],
'compliance_rate': [.8, .9, 1.0],
'method': ["none"]
})
# Add lists of tasks
run.add_collection([{
'graph': mont,
'sir': SIRParam('t7', parent=mont),
'transmission_rate': 0.1,
'compliance_rate': 1.0,
'method': "greedy"
}])
run.add_collection([{
'graph': mont,
'sir': SIRParam('t7', parent=mont),
'transmission_rate': 0.1,
'compliance_rate': 0.5,
'method': "greedy"
}])
# main_out_schema = ["mean_objective_value", "max_objective_value", "std_objective_value"]
run.attach(main_handler)
run.attach(aux_handler)
run.exec()
# %%
|
# Author: Abhishek Sharma
# Reference: Element of Programming Interview in Python
def count_bits(num):
bit_count = 0
while num:
bit_count += num & 1
num >>= 1
return bit_count
|
import datetime
from typing import List
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.event import listens_for
from sqlalchemy import Column, ForeignKey, String, Integer, BLOB, JSON, DateTime
from sqlalchemy.dialects.postgresql import BYTEA, JSONB
from sqlalchemy.ext.hybrid import hybrid_property
from sqlalchemy.orm import relationship
from sqlalchemy.ext.mutable import MutableDict
Base = declarative_base()
class WithTimestamps:
created_date = Column(DateTime, nullable=False, default=lambda: datetime.datetime.now())
class Node(WithTimestamps, Base):
__tablename__ = 'node'
id = Column('node_id', Integer, primary_key=True)
filename = Column(String, nullable=False)
stored_path = Column(String, nullable=False)
parent_id = Column(Integer, ForeignKey('node.node_id'))
parent: 'Node' = relationship('Node', back_populates="children", remote_side=[id])
type = Column(String)
@property
def calculated_path(self):
return ((self.parent.calculated_path + '/' + self.filename) if self.parent else self.filename).lstrip('/')
@property
def content(self):
return None
def serialize(self, content: bool):
return dict(
type=self.type,
name=self.filename,
path= self.calculated_path,
#
mimetype=None,
writable=True,
created=1,
last_modified=1,
#
format='json' if content else None,
content=self.content if content else None
)
__mapper_args__ = {
'polymorphic_on': 'type',
'polymorphic_identity':'node'
}
Node.children = relationship('Node', order_by=Node.filename, back_populates='parent')
@listens_for(Node, 'before_insert',propagate=True)
@listens_for(Node, 'before_update',propagate=True)
def node_before_update(mapper, connection, target: Node):
target.stored_path = target.calculated_path
class Directory(Node):
__tablename__ = 'directory'
id = Column('directory_id', Integer,ForeignKey('node.node_id'), primary_key=True)
__mapper_args__ = {
'polymorphic_identity':'directory'
}
@property
def content(self):
return [_.serialize(content=False) for _ in self.children]
class File(Node):
__tablename__ = 'file'
id = Column('file_id', Integer,ForeignKey('node.node_id'), primary_key=True)
data = Column(BLOB().with_variant(BYTEA, 'postgresql'))
__mapper_args__ = {
'polymorphic_identity':'file'
}
@property
def content(self):
return self.data
class Notebook(Node):
__tablename__ = 'notebook'
id = Column('notebook_id', Integer,ForeignKey('node.node_id'), primary_key=True)
data = Column(JSON().with_variant(JSONB, 'postgresql'), nullable=False)
__mapper_args__ = {
'polymorphic_identity':'notebook'
}
@property
def content(self):
return self.data
|
import csv
from datetime import datetime
from decimal import Decimal
from pathlib import Path
from typing import List, Optional, Tuple
from exceptions import ParsingError, UnexpectedColumnCountError
from model import ActionType, BrokerTransaction
columns = [
"Action",
"Time",
"ISIN",
"Ticker",
"Name",
"No. of shares",
"Price / share",
"Currency (Price / share)",
"Exchange rate",
"Result (GBP)",
"Total (GBP)",
"Withholding tax",
"Currency (Withholding tax)",
"Charge amount (GBP)",
"Transaction fee (GBP)",
"Finra fee (GBP)",
"Notes",
"ID",
]
def decimal_or_none(val: str) -> Optional[Decimal]:
return Decimal(val) if val not in ["", "Not available"] else None
def action_from_str(label: str, filename: str) -> ActionType:
if label in [
"Market buy",
"Limit buy",
]:
return ActionType.BUY
elif label in [
"Market sell",
"Limit sell",
]:
return ActionType.SELL
elif label in [
"Deposit",
"Withdrawal",
]:
return ActionType.TRANSFER
elif label in ["Dividend (Ordinary)"]:
return ActionType.DIVIDEND
else:
raise ParsingError(filename, f"Unknown action: {label}")
class Trading212Transaction(BrokerTransaction):
def __init__(self, row_ints: List[str], filename: str):
if len(columns) != len(row_ints):
raise UnexpectedColumnCountError(len(columns), row_ints, filename)
row = {col: row_ints[i] for i, col in enumerate(columns)}
time_str = row["Time"]
self.datetime = datetime.strptime(time_str, "%Y-%m-%d %H:%M:%S")
date = self.datetime.date()
self.raw_action = row["Action"]
action = action_from_str(self.raw_action, filename)
symbol = row["Ticker"] if row["Ticker"] != "" else None
description = row["Name"]
quantity = decimal_or_none(row["No. of shares"])
self.price_foreign = decimal_or_none(row["Price / share"])
self.currency_foreign = row["Currency (Price / share)"]
self.exchange_rate = decimal_or_none(row["Exchange rate"])
self.transaction_fee = decimal_or_none(row["Transaction fee (GBP)"])
self.finra_fee = decimal_or_none(row["Finra fee (GBP)"])
fees = (self.transaction_fee or Decimal(0)) + (self.finra_fee or Decimal(0))
amount = decimal_or_none(row["Total (GBP)"])
price = (
abs(amount / quantity)
if amount is not None and quantity is not None
else None
)
if amount is not None:
if action == ActionType.BUY or self.raw_action == "Withdrawal":
amount *= -1
amount -= fees
self.isin = row["ISIN"]
self.id = row["ID"]
self.notes = row["Notes"]
broker = "Trading212"
super().__init__(
date,
action,
symbol,
description,
quantity,
price,
fees,
amount,
"GBP",
broker,
)
def __eq__(self, other):
return self.id == other.id
def __hash__(self):
return hash(self.id)
def validate_header(header: List[str], filename: str):
if len(columns) != len(header):
raise UnexpectedColumnCountError(len(columns), header, filename)
for i, (expected, actual) in enumerate(zip(columns, header)):
if expected != actual:
msg = f"Expected column {i+1} to be {expected} but found {actual}"
raise ParsingError(msg, filename)
# if there's a deposit in the same second as a buy
# (happens with the referal award at least)
# we want to put the buy last to avoid negative balance errors
def by_date_and_action(transaction: Trading212Transaction) -> Tuple[datetime, bool]:
return (transaction.datetime, transaction.action == ActionType.BUY)
def read_trading212_transactions(transactions_folder: str) -> List[BrokerTransaction]:
transactions = []
for file in Path(transactions_folder).glob("*.csv"):
with open(file) as csv_file:
print(f"Parsing {file}")
lines = [line for line in csv.reader(csv_file)]
validate_header(lines[0], str(file))
lines = lines[1:]
cur_transactions = [Trading212Transaction(row, str(file)) for row in lines]
if len(cur_transactions) == 0:
print(f"WARNING: no transactions detected in file {file}")
transactions += cur_transactions
# remove duplicates
transactions = list(set(transactions))
transactions.sort(key=by_date_and_action)
return list(transactions)
|
import os
import re
import azure.batch.batch_auth as batchauth
import azure.batch.batch_service_client as batch
import azure.storage.blob as blob
from azure.common.credentials import ServicePrincipalCredentials
from azure.mgmt.batch import BatchManagementClient
from azure.mgmt.storage import StorageManagementClient
from azure.storage.common import CloudStorageAccount
from aztk.node_scripts.core import log
from aztk.spark import Client, models
RESOURCE_ID_PATTERN = re.compile("^/subscriptions/(?P<subscription>[^/]+)"
"/resourceGroups/(?P<resourcegroup>[^/]+)"
"/providers/[^/]+"
"/[^/]+Accounts/(?P<account>[^/]+)$")
batch_account_name = os.environ.get("AZ_BATCH_ACCOUNT_NAME")
batch_account_key = os.environ.get("BATCH_ACCOUNT_KEY")
batch_service_url = os.environ.get("BATCH_SERVICE_URL")
tenant_id = os.environ.get("SP_TENANT_ID")
client_id = os.environ.get("SP_CLIENT_ID")
credential = os.environ.get("SP_CREDENTIAL")
batch_resource_id = os.environ.get("SP_BATCH_RESOURCE_ID")
storage_resource_id = os.environ.get("SP_STORAGE_RESOURCE_ID")
cluster_id = os.environ.get("AZTK_CLUSTER_ID")
pool_id = os.environ["AZ_BATCH_POOL_ID"]
node_id = os.environ["AZ_BATCH_NODE_ID"]
is_dedicated = os.environ["AZ_BATCH_NODE_IS_DEDICATED"] == "true"
spark_web_ui_port = os.environ["SPARK_WEB_UI_PORT"]
spark_worker_ui_port = os.environ["SPARK_WORKER_UI_PORT"]
spark_job_ui_port = os.environ["SPARK_JOB_UI_PORT"]
storage_account_name = os.environ.get("STORAGE_ACCOUNT_NAME")
storage_account_key = os.environ.get("STORAGE_ACCOUNT_KEY")
storage_account_suffix = os.environ.get("STORAGE_ACCOUNT_SUFFIX")
def get_blob_client() -> blob.BlockBlobService:
if not storage_resource_id:
return blob.BlockBlobService(
account_name=storage_account_name, account_key=storage_account_key, endpoint_suffix=storage_account_suffix)
else:
credentials = ServicePrincipalCredentials(
client_id=client_id, secret=credential, tenant=tenant_id, resource="https://management.core.windows.net/")
m = RESOURCE_ID_PATTERN.match(storage_resource_id)
accountname = m.group("account")
subscription = m.group("subscription")
resourcegroup = m.group("resourcegroup")
mgmt_client = StorageManagementClient(credentials, subscription)
key = (mgmt_client.storage_accounts.list_keys(resource_group_name=resourcegroup, account_name=accountname)
.keys[0].value)
storage_client = CloudStorageAccount(accountname, key)
return storage_client.create_block_blob_service()
def get_batch_client() -> batch.BatchServiceClient:
if not batch_resource_id:
base_url = batch_service_url
credentials = batchauth.SharedKeyCredentials(batch_account_name, batch_account_key)
else:
credentials = ServicePrincipalCredentials(
client_id=client_id, secret=credential, tenant=tenant_id, resource="https://management.core.windows.net/")
m = RESOURCE_ID_PATTERN.match(batch_resource_id)
batch_client = BatchManagementClient(credentials, m.group("subscription"))
account = batch_client.batch_account.get(m.group("resourcegroup"), m.group("account"))
base_url = "https://%s/" % account.account_endpoint
credentials = ServicePrincipalCredentials(
client_id=client_id, secret=credential, tenant=tenant_id, resource="https://batch.core.windows.net/")
return batch.BatchServiceClient(credentials, base_url=base_url)
def get_spark_client():
if all([batch_resource_id, client_id, credential, storage_resource_id, tenant_id]):
serice_principle_configuration = models.ServicePrincipalConfiguration(
tenant_id=tenant_id,
client_id=client_id,
credential=credential,
batch_account_resource_id=batch_resource_id,
storage_account_resource_id=storage_resource_id,
)
return Client(
secrets_configuration=models.SecretsConfiguration(service_principal=serice_principle_configuration))
else:
# this must be true if service principle configuration keys were not set
assert (all([
batch_account_name, batch_account_key, batch_service_url, storage_account_name, storage_account_key,
storage_account_suffix
]))
shared_key_configuration = models.SharedKeyConfiguration(
batch_account_name=batch_account_name,
batch_account_key=batch_account_key,
batch_service_url=batch_service_url,
storage_account_name=storage_account_name,
storage_account_key=storage_account_key,
storage_account_suffix=storage_account_suffix,
)
return Client(secrets_configuration=models.SecretsConfiguration(shared_key=shared_key_configuration))
spark_client = get_spark_client()
# note: the batch_client and blob_client in _core_cluster_operations
# is the same as in _core_job_operations
batch_client = spark_client.cluster._core_cluster_operations.batch_client
blob_client = spark_client.cluster._core_cluster_operations.blob_client
log.info("Pool id is %s", pool_id)
log.info("Node id is %s", node_id)
log.info("Batch account name %s", batch_account_name)
log.info("Is dedicated %s", is_dedicated)
|
import os, subprocess, shutil
class kh2fmtoolkit:
def __init__(self, workdir='.', binary_name='KH2FM_Toolkit.exe'):
self.workdir = workdir
self.author = 'kh2lib'
self.binary_name = binary_name
self.version = '1'
def _check_binary(self):
if not os.path.isfile(os.path.join(self.workdir, self.binary_name)):
raise Exception("{} not found".format(self.binary_name))
def _run_binary(self, args=[], inp='', debug=True):
self._check_binary()
proc = subprocess.Popen([self.binary_name] + args, cwd=self.workdir, shell=True, stdout=subprocess.PIPE, stdin=subprocess.PIPE)
output = proc.communicate(input=inp)
# if inp == '':
# # Using check_output to perform a patch will crash with calledprocesserror at the end
# # although the iso does get successfully patched
# subprocess.call(["KH2FM_Toolkit.exe"] + args, cwd=self.workdir)
# else:
# output = subprocess.check_output(["KH2FM_Toolkit.exe"] + args, input=inp, cwd=self.workdir)
if debug:
print(output)
def extract_game(self, outdir):
self._check_binary()
print("extracting the game to {}".format(outdir))
self._run_binary(['-extractor'])
shutil.move(os.path.join(self.workdir,'export'), outdir)
def patch_game(self, patches, fn, movefile=False, debug=False, game='kh2'):
self._check_binary()
print("patching the game")
args = patches
inp = ''
if game == 'kh1':
args = ['-patch'] + patches
inp = '\n\n'.encode('utf-8')
args = [i.replace(".kh2patch",".kh2patch.kh1patch") for i in args]
self._run_binary(args,inp, debug=debug)
if movefile:
print("moving iso")
# I don't see an way to name the output iso, so use shutil to move it
shutil.move(os.path.join(self.workdir, 'KH2FM.NEW.ISO'), fn)
print("all done")
def create_patch(self, files, fn, game="kh2"):
self._check_binary()
if game == "kh2":
inp = b'\n'
elif game == "kh1":
inp = b'\n\n'
files = [f.replace("/KINGDOM/","") for f in files]
else:
raise Exception("Unknown game")
for f in files:
if game == "kh2":
inp += '{}\n\nY\n\nN\n'.format(f).encode('utf-8')
elif game == "kh1":
inp += '{}\n\nY\n\nN\n'.format(f).encode('utf-8')
inp += b'\n'
args = ['-patchmaker', '-output', fn, '-author', self.author, '-version', self.version, '-skipchangelog', '-skipcredits']
self._run_binary(args, inp=inp)
print("patch created")
return fn |
import numpy as np
def circular(diameter):
"""
Calculates the equivalent diameter and projected capture area of a
circular turbine
Parameters
------------
diameter : int/float
Turbine diameter [m]
Returns
---------
equivalent_diameter : float
Equivalent diameter [m]
projected_capture_area : float
Projected capture area [m^2]
"""
assert isinstance(diameter, (int,float)), 'diameter must be of type int or float'
equivalent_diameter = diameter
projected_capture_area = 4.*np.pi*(equivalent_diameter**2.)
return equivalent_diameter, projected_capture_area
def ducted(duct_diameter):
"""
Calculates the equivalent diameter and projected capture area of a
ducted turbine
Parameters
------------
duct_diameter : int/float
Duct diameter [m]
Returns
---------
equivalent_diameter : float
Equivalent diameter [m]
projected_capture_area : float
Projected capture area [m^2]
"""
assert isinstance(duct_diameter, (int,float)), 'duct_diameter must be of type int or float'
equivalent_diameter = duct_diameter
projected_capture_area = 4.*np.pi*(equivalent_diameter**2.)
return equivalent_diameter, projected_capture_area
def rectangular(h, w):
"""
Calculates the equivalent diameter and projected capture area of a
retangular turbine
Parameters
------------
h : int/float
Turbine height [m]
w : int/float
Turbine width [m]
Returns
---------
equivalent_diameter : float
Equivalent diameter [m]
projected_capture_area : float
Projected capture area [m^2]
"""
assert isinstance(h, (int,float)), 'h must be of type int or float'
assert isinstance(w, (int,float)), 'w must be of type int or float'
equivalent_diameter = np.sqrt(4.*h*w / np.pi)
projected_capture_area = h*w
return equivalent_diameter, projected_capture_area
def multiple_circular(diameters):
"""
Calculates the equivalent diameter and projected capture area of a
multiple circular turbine
Parameters
------------
diameters: list
List of device diameters [m]
Returns
---------
equivalent_diameter : float
Equivalent diameter [m]
projected_capture_area : float
Projected capture area [m^2]
"""
assert isinstance(diameters, list), 'diameters must be of type list'
diameters_squared = [x**2 for x in diameters]
equivalent_diameter = np.sqrt(sum(diameters_squared))
projected_capture_area = 0.25*np.pi*sum(diameters_squared)
return equivalent_diameter, projected_capture_area
|
#!/bin/python
import math
import numpy
import simple_error_distribution
import somewhat_homomorphic_keygen
import rvg
class BootstrappableKeygen(object):
def __init__(self, short_dimension, long_dimension, multiplicative_depth, short_odd_modulus, long_odd_modulus, matrix_rows, short_seed=1, long_seed=1):
self.__short_dimension = short_dimension
self.__long_dimension = long_dimension
self.__multiplicative_depth = multiplicative_depth
self.__short_odd_modulus = short_odd_modulus
self.__long_odd_modulus = long_odd_modulus
self.__short_error_distribution = simple_error_distribution.SimpleErrorDistribution(short_odd_modulus, short_seed)
self.__short_random_vector_generator = rvg.RVG(short_dimension, short_odd_modulus, short_seed)
self.__sh_keygen = somewhat_homomorphic_keygen.SomewhatHomomorphicKeygen(long_dimension, multiplicative_depth, long_odd_modulus, matrix_rows, long_seed)
def set_short_error_distribution(self, new_short_error_distribution):
self.__short_error_distribution = new_short_error_distribution
def set_short_random_vector_generator(self, new_short_random_vector_generator):
self.__short_random_vector_generator = new_short_random_vector_generator
def set_sh_keygen(self, sh_keygen):
self.__sh_keygen = sh_keygen
def generate_keys(self):
secret_key, eval_key, public_key = self.__sh_keygen.generate_keys()
short_secret_key, short_eval_key = self.__generate_short_keys(secret_key)
return short_secret_key, (eval_key, short_eval_key), public_key
def __generate_short_keys(self, secret_key):
short_secret_key = [vector for vector in self.__short_random_vector_generator.generate()][0]
short_eval_key = {}
for i in range(self.__long_dimension+1):
tau = 0
for coefficient_vector in self.__short_random_vector_generator.generate(int(math.log(self.__long_odd_modulus, 2)) + 1):
error = self.__short_error_distribution.sample_distribution()
key_element = 1
if i != 0:
key_element = secret_key[i-1]
b = (coefficient_vector.dot(short_secret_key) + error + int(round(float(self.__short_odd_modulus)/self.__long_odd_modulus) * 2 ** tau * key_element)) % self.__short_odd_modulus
short_eval_key[(i, tau)] = (coefficient_vector, b)
tau += 1
return short_secret_key, short_eval_key
if __name__=="__main__":
print "BootstrappableKeygen" |
import logging
import logging.config
import time
import traceback
from logging.handlers import RotatingFileHandler, TimedRotatingFileHandler
from pathlib import Path
from typing import Any, Dict, List
import ruamel.yaml
import HABApp
from HABApp import __version__
from . import CONFIG
from .default_logfile import get_default_logfile
_yaml_param = ruamel.yaml.YAML(typ='safe')
_yaml_param.default_flow_style = False
_yaml_param.default_style = False # type: ignore
_yaml_param.width = 1000000 # type: ignore
_yaml_param.allow_unicode = True
_yaml_param.sort_base_mapping_type_on_output = False # type: ignore
log = logging.getLogger('HABApp.Config')
class AbsolutePathExpected(Exception):
pass
class InvalidConfigException(Exception):
pass
class HABAppConfigLoader:
def __init__(self, config_folder: Path):
assert isinstance(config_folder, Path)
assert config_folder.is_dir(), config_folder
self.folder_conf = config_folder
self.file_conf_habapp = self.folder_conf / 'config.yml'
self.file_conf_logging = self.folder_conf / 'logging.yml'
# if the config does not exist it will be created
self.__check_create_logging()
# Load Config initially
self.first_start = True
try:
self.load_cfg()
load_cfg = False
except Exception:
load_cfg = True
# Load logging configuration.
try:
self.load_log()
except AbsolutePathExpected:
# This error only occurs when the config was not loaded because of an exception.
# Since we crash in load_cfg again we'll show that error because it's the root cause.
pass
# If there was an error reload the config again so we hopefully can log the error message
if load_cfg:
self.load_cfg()
self.first_start = False
# Watch folders so we can reload the config on the fly
filter = HABApp.core.files.watcher.FileEndingFilter('.yml')
watcher = HABApp.core.files.watcher.AggregatingAsyncEventHandler(
self.folder_conf, self.files_changed, filter, watch_subfolders=False
)
HABApp.core.files.watcher.add_folder_watch(watcher)
async def files_changed(self, paths: List[Path]):
for path in paths:
if path.name == 'config.yml':
self.load_cfg()
if path.name == 'logging.yml':
self.load_log()
def __check_create_logging(self):
if self.file_conf_logging.is_file():
return None
print(f'Creating {self.file_conf_logging.name} in {self.file_conf_logging.parent}')
with self.file_conf_logging.open('w', encoding='utf-8') as file:
file.write(get_default_logfile())
time.sleep(0.1)
return None
def load_cfg(self):
CONFIG.load(self.file_conf_habapp)
# check if folders exist and print warnings, maybe because of missing permissions
if not CONFIG.directories.rules.is_dir():
log.warning(f'Folder for rules files does not exist: {CONFIG.directories.rules}')
log.debug('Loaded HABApp config')
return None
def load_log(self):
# config gets created on startup - if it gets deleted we do nothing here
if not self.file_conf_logging.is_file():
return None
with self.file_conf_logging.open('r', encoding='utf-8') as file:
cfg = _yaml_param.load(file) # type: Dict[str, Any]
# fix filenames
for handler, handler_cfg in cfg.get('handlers', {}).items():
# fix encoding for FileHandlers - we always log utf-8
if 'file' in handler_cfg.get('class', '').lower():
enc = handler_cfg.get('encoding', '')
if enc != 'utf-8':
handler_cfg['encoding'] = 'utf-8'
if 'filename' not in handler_cfg:
continue
# make Filenames absolute path in the log folder if not specified
p = Path(handler_cfg['filename'])
if not p.is_absolute():
# Our log folder ist not yet converted to path -> it is not loaded
if not isinstance(CONFIG.directories.logging, Path):
raise AbsolutePathExpected()
# Use defined parent folder
p = (CONFIG.directories.logging / p).resolve()
handler_cfg['filename'] = str(p)
# make file version optional for config file
log_version_info = True # todo: remove this 06.2021
if 'version' not in cfg:
cfg['version'] = 1
log_version_info = False
# Allow the user to set his own logging levels (with aliases)
for level, alias in cfg.pop('levels', {}).items():
if not isinstance(level, int):
level = logging._nameToLevel[level]
logging.addLevelName(level, str(alias))
# load prepared logging
try:
logging.config.dictConfig(cfg)
except Exception as e:
print(f'Error loading logging config: {e}')
log.error(f'Error loading logging config: {e}')
return None
# Try rotating the logs on first start
if self.first_start:
for wr in reversed(logging._handlerList[:]):
handler = wr() # weakref -> call it to get object
# only rotate these types
if not isinstance(handler, (RotatingFileHandler, TimedRotatingFileHandler)):
continue
# Rotate only if files have content
logfile = Path(handler.baseFilename)
if not logfile.is_file() or logfile.stat().st_size <= 0:
continue
try:
handler.acquire()
handler.close()
handler.doRollover()
except Exception:
lines = traceback.format_exc().splitlines()
# cut away AbsolutePathExpected Exception from log output
start = 0
for i, line in enumerate(lines):
if line.startswith('Traceback'):
start = i
for line in lines[start:]:
log.error(line)
finally:
handler.release()
logging.getLogger('HABApp').info(f'HABApp Version {__version__}')
if log_version_info:
log.info('Entry "version" is no longer required in the logging configuration file')
return None
|
#!/usr/bin/env python3
import copy
# Funcao: print_sudoku
# Essa funcao ja esta implementada no arquivo lab20_main.py
# A funcao imprime o tabuleiro atual do sudoku de forma animada, isto e,
# imprime o tabuleiro e espera 0.1s antes de fazer outra modificacao.
# Voce deve chamar essa funcao a cada modificacao na matriz resposta, assim
# voce tera uma animacao similar a apresentada no enunciado.
# Essa funcao nao tem efeito na execucao no Susy, logo nao ha necessidade de
# remover as chamadas para submissao.
from lab20_main import print_sudoku
boxes = [
[1, 1, 1, 2, 2, 2, 3, 3, 3],
[1, 1, 1, 2, 2, 2, 3, 3, 3],
[1, 1, 1, 2, 2, 2, 3, 3, 3],
[4, 4, 4, 5, 5, 5, 6, 6, 6],
[4, 4, 4, 5, 5, 5, 6, 6, 6],
[4, 4, 4, 5, 5, 5, 6, 6, 6],
[7, 7, 7, 8, 8, 8, 9, 9, 9],
[7, 7, 7, 8, 8, 8, 9, 9, 9],
[7, 7, 7, 8, 8, 8, 9, 9, 9]
]
def n_of_box(i, j):
x = boxes[i][j]
return [[n, m] for n in range(9) for m in range(9) if boxes[n][m] == x]
# Funcao: resolve
# Resolve o Sudoku da matriz resposta.
# Retorna True se encontrar uma resposta, False caso contrario
def resolve(resposta):
# print_sudoku(resposta)
for i in range(9):
for j in range(9):
if resposta[i][j] == 0:
ok = [n for n in range(1, 10)
if (n not in [resposta[l][j] for l in range(9)])
and n not in resposta[i]
and n not in [resposta[t][v]
for (t, v) in n_of_box(i, j)]]
for k in ok:
resposta[i][j] = k
nu = copy.deepcopy(resposta)
if (resolve(nu)):
for l in range(9):
resposta[l] = nu[l]
return True
return False
return True
|
import unittest
import frappe
from frappe.desk.reportview import get_stats
from frappe.desk.doctype.tag.tag import add_tag
class TestTag(unittest.TestCase):
def setUp(self) -> None:
frappe.db.delete("Tag")
frappe.db.sql("UPDATE `tabDocType` set _user_tags=''")
def test_tag_count_query(self):
self.assertDictEqual(get_stats('["_user_tags"]', 'DocType'),
{'_user_tags': [['No Tags', frappe.db.count('DocType')]]})
add_tag('Standard', 'DocType', 'User')
add_tag('Standard', 'DocType', 'ToDo')
# count with no filter
self.assertDictEqual(get_stats('["_user_tags"]', 'DocType'),
{'_user_tags': [['Standard', 2], ['No Tags', frappe.db.count('DocType') - 2]]})
# count with child table field filter
self.assertDictEqual(get_stats('["_user_tags"]',
'DocType',
filters='[["DocField", "fieldname", "like", "%last_name%"], ["DocType", "name", "like", "%use%"]]'),
{'_user_tags': [['Standard', 1], ['No Tags', 0]]}) |
"""
Minimum jerk trajectory for 6DOF robot
Latest update: 14.12.2020
Written by Daniel Stankowski
"""
import numpy as np
import robosuite.utils.angle_transformation as at
class PathPlan(object):
"""
IMPORTANT: When the pose is passed [x,y,z,Rx,Ry,Rz] one has to convert the orientation
part from axis-angle representation to the Euler before running this script.
"""
def __init__(self, initial_pos, initial_ori, target_pos, target_ori, total_time):
self.initial_position = initial_pos
self.target_position = target_pos
self.initial_orientation = initial_ori
self.target_orientation = target_ori
self.tfinal = total_time
def trajectory_planning(self, t):
X_init = self.initial_position[0]
Y_init = self.initial_position[1]
Z_init = self.initial_position[2]
X_final = self.target_position[0]
Y_final = self.target_position[1]
Z_final = self.target_position[2]
# position
x_traj = (X_final - X_init) / (self.tfinal ** 3) * (6 * (t ** 5) / (self.tfinal ** 2) - 15 * (t ** 4) / self.tfinal + 10 * (t ** 3)) + X_init
y_traj = (Y_final - Y_init) / (self.tfinal ** 3) * (6 * (t ** 5) / (self.tfinal ** 2) - 15 * (t ** 4) / self.tfinal + 10 * (t ** 3)) + Y_init
z_traj = (Z_final - Z_init) / (self.tfinal ** 3) * (6 * (t ** 5) / (self.tfinal ** 2) - 15 * (t ** 4) / self.tfinal + 10 * (t ** 3)) + Z_init
position = np.array([x_traj, y_traj, z_traj])
# velocities
vx = (X_final - X_init) / (self.tfinal ** 3) * (30 * (t ** 4) / (self.tfinal ** 2) - 60 * (t ** 3) / self.tfinal + 30 * (t ** 2))
vy = (Y_final - Y_init) / (self.tfinal ** 3) * (30 * (t ** 4) / (self.tfinal ** 2) - 60 * (t ** 3) / self.tfinal + 30 * (t ** 2))
vz = (Z_final - Z_init) / (self.tfinal ** 3) * (30 * (t ** 4) / (self.tfinal ** 2) - 60 * (t ** 3) / self.tfinal + 30 * (t ** 2))
velocity = np.array([vx, vy, vz])
# acceleration
ax = (X_final - X_init) / (self.tfinal ** 3) * (120 * (t ** 3) / (self.tfinal ** 2) - 180 * (t ** 2) / self.tfinal + 60 * t)
ay = (Y_final - Y_init) / (self.tfinal ** 3) * (120 * (t ** 3) / (self.tfinal ** 2) - 180 * (t ** 2) / self.tfinal + 60 * t)
az = (Z_final - Z_init) / (self.tfinal ** 3) * (120 * (t ** 3) / (self.tfinal ** 2) - 180 * (t ** 2) / self.tfinal + 60 * t)
acceleration = np.array([ax, ay, az])
# -----------------------------------rotation (based on rotation matrices) ---------------------------------------
vec_x = self.initial_orientation[0]
vec_y = self.initial_orientation[1]
vec_z = self.initial_orientation[2]
# alpha_final = self.target_orientation[0]
# beta_final = self.target_orientation[1]
# gamma_final = self.target_orientation[2]
Vrot = np.array([vec_x, vec_y, vec_z])
magnitude, direction = at.Axis2Vector(Vrot)
# In case of lack of rotation:
lower_bound = 10e-6
if magnitude < lower_bound:
magnitude = 0.0
direction = np.array([0.0, 0.0, 0.0])
magnitude_traj = (0 - magnitude) / (self.tfinal ** 3) * (6 * (t ** 5) / (self.tfinal ** 2) - 15 * (t ** 4) / self.tfinal + 10 * (t ** 3)) + magnitude
# we want to decrease the magnitude of the rotation from some initial value to 0
vec_x_traj = magnitude_traj*direction[0]
vec_y_traj = magnitude_traj*direction[1]
vec_z_traj = magnitude_traj*direction[2]
orientation = np.array([vec_x_traj, vec_y_traj, vec_z_traj])
# angular velocities
# alpha_d_traj = (alpha_final - vec_x) / (self.tfinal ** 3) * (30 * (t ** 4) / (self.tfinal ** 2) - 60 * (t ** 3) / self.tfinal + 30 * (t ** 2))
# beta_d_traj = (beta_final - beta_init) / (self.tfinal ** 3) * (30 * (t ** 4) / (self.tfinal ** 2) - 60 * (t ** 3) / self.tfinal + 30 * (t ** 2))
# gamma_d_traj = (gamma_final - gamma_init) / (self.tfinal ** 3) * (30 * (t ** 4) / (self.tfinal ** 2) - 60 * (t ** 3) / self.tfinal + 30 * (t ** 2))
magnitude_vel_traj = (0 - magnitude) / (self.tfinal ** 3) * (30 * (t ** 4) / (self.tfinal ** 2) - 60 * (t ** 3) / self.tfinal + 30 * (t ** 2))
vec_x_d_traj = magnitude_vel_traj * direction[0]
vec_y_d_traj = magnitude_vel_traj * direction[1]
vec_z_d_traj = magnitude_vel_traj * direction[2]
ang_vel = np.array([vec_x_d_traj, vec_y_d_traj, vec_z_d_traj])
return [position, orientation, velocity, ang_vel, acceleration]
|
T = int(input())
N = int(input())
L1 = list(map(int,input().split()))
M = int(input())
L2 = list(map(int,input().split()))
d1 = dict()
d2 = dict()
D1 = [0,L1[0]]
D2 = [0,L2[0]]
for i in range(1,N):
D1.append(D1[-1]+L1[i])
for i in range(1,M):
D2.append(D2[-1]+L2[i])
for i in range(N+1):
for j in range(i+1,N+1):
try:
d1[D1[j]-D1[i]] +=1
except:
d1[D1[j]-D1[i]] = 1
for i in range(M+1):
for j in range(i+1,M+1):
try:
d2[D2[j]-D2[i]] +=1
except:
d2[D2[j]-D2[i]] = 1
ret = sorted(list(d1.items()),key = lambda t:t[0])
cnt = 0
for i in ret:
try:
cnt += d2[T-i[0]] * i[1]
except:
continue
print(cnt) |
from airflow.hooks.postgres_hook import PostgresHook
from sqlalchemy.orm.session import sessionmaker
def get_db_session():
engine = PostgresHook(postgres_conn_id='huey_dev').get_sqlalchemy_engine()
Session = sessionmaker()
Session.configure(bind=engine)
db_session = Session()
return db_session
|
from pathlib import Path
from typing import Dict, List, Optional, cast
import pytest
from darwin.datatypes import (
Annotation,
AnnotationClass,
AnnotationFile,
EllipseData,
Point,
SubAnnotation,
)
from darwin.importer.formats.superannotate import parse_path
from jsonschema import ValidationError
def describe_parse_path():
@pytest.fixture
def classes_file_path(tmp_path: Path):
path = tmp_path / "classes.json"
yield path
path.unlink()
@pytest.fixture
def annotations_file_path(tmp_path: Path):
path = tmp_path / "annotation.json"
yield path
path.unlink()
def it_returns_none_if_file_is_not_json():
bad_path = Path("/tmp/annotation.xml")
assert parse_path(bad_path) is None
def it_returns_none_if_file_is_classes():
bad_path = Path("/tmp/classes.json")
assert parse_path(bad_path) is None
def it_raises_if_folder_has_no_classes_file(annotations_file_path: Path):
annotations_json: str = """
{
"instances": [],
"metadata": {
"name": "demo-image-0.jpg"
}
}
"""
annotations_file_path.write_text(annotations_json)
with pytest.raises(ValueError) as error:
parse_path(annotations_file_path)
assert "Folder must contain a 'classes.json'" in str(error.value)
def it_returns_empty_file_if_there_are_no_annotations(annotations_file_path: Path, classes_file_path: Path):
annotations_json: str = """
{
"instances": [],
"metadata": {
"name": "demo-image-0.jpg"
}
}
"""
classes_json: str = """[]"""
annotations_file_path.write_text(annotations_json)
classes_file_path.write_text(classes_json)
assert parse_path(annotations_file_path) == AnnotationFile(
annotations=[],
path=annotations_file_path,
filename="demo-image-0.jpg",
annotation_classes=set(),
remote_path="/",
)
def it_raises_if_annotation_has_no_type(annotations_file_path: Path, classes_file_path: Path):
annotations_json: str = """
{
"instances": [
{
"x": 1,
"y": 0
}
],
"metadata": {
"name": "demo-image-0.jpg"
}
}
"""
classes_json: str = """[]"""
annotations_file_path.write_text(annotations_json)
classes_file_path.write_text(classes_json)
with pytest.raises(ValidationError) as error:
parse_path(annotations_file_path)
assert "'type' is a required property" in str(error.value)
def it_raises_if_annotation_has_no_class_id(annotations_file_path: Path, classes_file_path: Path):
annotations_json: str = """
{
"instances": [
{
"type": "point",
"x": 1,
"y": 0
}
],
"metadata": {
"name": "demo-image-0.jpg"
}
}
"""
classes_json: str = """[]"""
annotations_file_path.write_text(annotations_json)
classes_file_path.write_text(classes_json)
with pytest.raises(ValidationError) as error:
parse_path(annotations_file_path)
assert "'classId' is a required property" in str(error.value)
def it_raises_if_metadata_is_missing(annotations_file_path: Path, classes_file_path: Path):
annotations_json: str = """
{
"instances": [
{
"type": "point",
"x": 1,
"y": 0
}
]
}
"""
classes_json: str = """[]"""
annotations_file_path.write_text(annotations_json)
classes_file_path.write_text(classes_json)
with pytest.raises(ValidationError) as error:
parse_path(annotations_file_path)
assert "'metadata' is a required property" in str(error.value)
def it_raises_if_metadata_is_missing_name(annotations_file_path: Path, classes_file_path: Path):
annotations_json: str = """
{
"instances": [
{
"type": "point",
"x": 1,
"y": 0
}
],
"metadata": { }
}
"""
classes_json: str = """[]"""
annotations_file_path.write_text(annotations_json)
classes_file_path.write_text(classes_json)
with pytest.raises(ValidationError) as error:
parse_path(annotations_file_path)
assert "'name' is a required property" in str(error.value)
def it_raises_if_point_has_missing_coordinate(annotations_file_path: Path, classes_file_path: Path):
annotations_json: str = """
{
"instances": [
{
"type": "point",
"y": 0
}
],
"metadata": {
"name": "demo-image-0.jpg"
}
}
"""
classes_json: str = """[]"""
annotations_file_path.write_text(annotations_json)
classes_file_path.write_text(classes_json)
with pytest.raises(ValidationError) as error:
parse_path(annotations_file_path)
assert "'x' is a required property" in str(error.value)
def it_imports_point_vectors(annotations_file_path: Path, classes_file_path: Path):
annotations_json: str = """
{
"instances": [
{
"type": "point",
"x": 1.93,
"y": 0.233,
"classId": 1
}
],
"metadata": {
"name": "demo-image-0.jpg"
}
}
"""
classes_json: str = """
[
{"name": "Person", "id": 1}
]
"""
annotations_file_path.write_text(annotations_json)
classes_file_path.write_text(classes_json)
annotation_file: Optional[AnnotationFile] = parse_path(annotations_file_path)
assert annotation_file is not None
assert annotation_file.path == annotations_file_path
assert annotation_file.filename == "demo-image-0.jpg"
assert annotation_file.annotation_classes
assert annotation_file.remote_path == "/"
assert annotation_file.annotations
point_annotation: Annotation = cast(Annotation, annotation_file.annotations.pop())
assert_point(point_annotation, {"x": 1.93, "y": 0.233})
annotation_class = point_annotation.annotation_class
assert_annotation_class(annotation_class, "Person", "keypoint")
def it_raises_if_ellipse_has_missing_coordinate(annotations_file_path: Path, classes_file_path: Path):
annotations_json: str = """
{
"instances": [
{
"type": "ellipse",
"cy": 0,
"cx": 0,
"rx": 0,
"angle": 0
}
],
"metadata": {
"name": "demo-image-0.jpg"
}
}
"""
classes_json: str = """[]"""
annotations_file_path.write_text(annotations_json)
classes_file_path.write_text(classes_json)
with pytest.raises(ValidationError) as error:
parse_path(annotations_file_path)
assert "'x' is a required property" in str(error.value)
def it_imports_ellipse_vectors(annotations_file_path: Path, classes_file_path: Path):
annotations_json: str = """
{
"instances": [
{
"type": "ellipse",
"classId": 1,
"cx": 922.1,
"cy": 475.8,
"rx": 205.4,
"ry": 275.7,
"angle": 0
}
],
"metadata": {
"name": "demo-image-0.jpg"
}
}
"""
classes_json: str = """
[
{"name": "Person", "id": 1}
]
"""
annotations_file_path.write_text(annotations_json)
classes_file_path.write_text(classes_json)
annotation_file: Optional[AnnotationFile] = parse_path(annotations_file_path)
assert annotation_file is not None
assert annotation_file.path == annotations_file_path
assert annotation_file.filename == "demo-image-0.jpg"
assert annotation_file.annotation_classes
assert annotation_file.remote_path == "/"
assert annotation_file.annotations
ellipse_annotation: Annotation = cast(Annotation, annotation_file.annotations.pop())
assert_ellipse(
ellipse_annotation, {"angle": 0, "center": {"x": 922.1, "y": 475.8}, "radius": {"x": 205.4, "y": 275.7}}
)
annotation_class = ellipse_annotation.annotation_class
assert_annotation_class(annotation_class, "Person", "ellipse")
def assert_bbox(annotation: Annotation, x: float, y: float, h: float, w: float) -> None:
data = annotation.data
assert data
assert data.get("x") == x
assert data.get("y") == y
assert data.get("w") == w
assert data.get("h") == h
def assert_polygon(annotation: Annotation, points: List[Point]) -> None:
actual_points = annotation.data.get("path")
assert actual_points
assert actual_points == points
def assert_point(annotation: Annotation, point: Point) -> None:
data = annotation.data
assert data
assert data.get("x") == point.get("x")
assert data.get("y") == point.get("y")
def assert_ellipse(annotation: Annotation, ellipse: EllipseData) -> None:
ellipse_center: Dict[str, float] = cast(Dict[str, float], ellipse.get("center"))
ellipse_radius: Dict[str, float] = cast(Dict[str, float], ellipse.get("radius"))
data = annotation.data
assert data
assert data.get("angle") == ellipse.get("angle")
center = data.get("center")
assert center
assert center.get("x") == ellipse_center.get("x")
assert center.get("y") == ellipse_center.get("y")
radius = data.get("radius")
assert radius
assert radius.get("x") == ellipse_radius.get("x")
assert radius.get("y") == ellipse_radius.get("y")
def assert_line(annotation: Annotation, line: List[Point]) -> None:
actual_line = annotation.data.get("path")
assert actual_line
assert actual_line == line
def assert_annotation_class(
annotation_class: AnnotationClass, name: str, type: str, internal_type: Optional[str] = None
) -> None:
assert annotation_class
assert annotation_class.name == name
assert annotation_class.annotation_type == type
assert annotation_class.annotation_internal_type == internal_type
def assert_subannotations(actual_subs: List[SubAnnotation], expected_subs: List[SubAnnotation]) -> None:
assert actual_subs
for actual_sub in actual_subs:
for expected_sub in expected_subs:
assert actual_sub.annotation_type == expected_sub.annotation_type
assert actual_sub.data == expected_sub.data
|
# Generated by Django 3.0.2 on 2020-01-24 15:24
import common.fields
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
replaces = [('shop', '0001_initial'), ('shop', '0002_auto_20200120_1458'), ('shop', '0003_productcategory_main_photo'), ('shop', '0004_auto_20200120_1514'), ('shop', '0005_auto_20200121_1904'), ('shop', '0006_auto_20200121_2138'), ('shop', '0007_auto_20200124_1520')]
initial = True
dependencies = [
('catalogue', '0001_initial'),
('catalogue', '0002_auto_20200120_1458'),
('common', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='ProductCategory',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=250, unique=True)),
('created_at', models.DateTimeField(auto_now_add=True)),
('logo', models.OneToOneField(default='', on_delete=django.db.models.deletion.CASCADE, related_name='category_logo', to='catalogue.Photo')),
],
),
migrations.CreateModel(
name='Product',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.TextField()),
('price', models.BigIntegerField()),
('description', models.TextField(blank=True, null=True)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('slug', models.SlugField(blank=True, max_length=200, unique=True)),
('category', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='products', to='shop.ProductCategory')),
('currency', models.ForeignKey(default=840, on_delete=django.db.models.deletion.CASCADE, to='catalogue.Currency')),
('main_photo', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='product_main_photo', to='catalogue.Photo')),
('photos', models.ManyToManyField(related_name='products', to='catalogue.Photo')),
],
),
migrations.CreateModel(
name='Order',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('customer_data', common.fields.JsonField(null=True)),
('products_data', common.fields.JsonField(default=[])),
('details', models.TextField(blank=True, null=True)),
('products_price', models.BigIntegerField(default=0)),
('total_price', models.BigIntegerField(default=0)),
('delivery_data', common.fields.JsonField(null=True)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('currency', models.ForeignKey(default=840, on_delete=django.db.models.deletion.CASCADE, to='catalogue.Currency')),
('customer', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='orders', to='common.Customer')),
('delivery_address', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='common.DeliveryAddress')),
('payment_method', models.PositiveSmallIntegerField(choices=[(0, 'Cash'), (1, 'Card')], default=1)),
],
),
migrations.CreateModel(
name='ProductOrder',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('quantity', models.PositiveIntegerField(default=1)),
('order', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='products', to='shop.Order')),
('product', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='orders', to='shop.Product')),
],
),
]
|
from pythonforandroid.recipe import CythonRecipe
from os.path import join
#import sh
"""
Note for future recipe creation.
The best way to structure a project is to push add-on modules to github,
and place setup.py in the very top directory. Setup.py should have build
instructions for different architectures.
"""
class SpaceGuppySfxRecipe(CythonRecipe):
version = '0.4.1-alpha'
url = 'https://github.com/LarsDu/SpaceGuppySfx/archive/{version}.zip'
name = 'spaceguppy_sfx'
depends=['kivent_core']
cythonize= True
def get_recipe_env(self, arch, with_flags_in_cc=True):
env = super(SpaceGuppySfxRecipe, self).get_recipe_env(
arch, with_flags_in_cc=with_flags_in_cc)
cymunk = self.get_recipe('cymunk', self.ctx).get_build_dir(arch.arch)
env['PYTHONPATH'] = join(cymunk, 'cymunk', 'python')
kivy = self.get_recipe('kivy', self.ctx).get_build_dir(arch.arch)
kivent = self.get_recipe('kivent_core',
self.ctx).get_build_dir(arch.arch, sub=True)
env['CYTHONPATH'] = ':'.join((kivy, kivent))
return env
recipe = SpaceGuppySfxRecipe()
|
# Databricks notebook source
import boto3
import yaml
import os
import re
# COMMAND ----------
def parse_source_location(arg_source_location):
pattern = 's3:\/\/([^\/]*)\/(.*)'
# this regex expression with split source llocation into two parts:
# (1) bucket name - s3://<all characters until the next forward slash
# (2) prefix - all characters after the trailin gforward slash following the bucket name
re_result = re.match(pattern, arg_source_location)
bucket_name = re_result.group(1)
prefix = re_result.group(2)
return bucket_name, prefix
# COMMAND ----------
# Here we create a configuration dictionary ... Need to determine how to manage configurations
def get_config_file(config_location):
#s3://pbshaw-emr-exploration/scripts/resources/confirmed-cases-config.yml
#bucket = "pbshaw-emr-exploration"
#config_file_key = "scripts/resources/confirmed-cases-config.yml"
if config_location.startswith('s3'):
bucket, config_file_key = parse_source_location(config_location)
s3_client = boto3.client('s3')
response = s3_client.get_object(Bucket=bucket, Key=config_file_key)
try:
configfile = yaml.safe_load(response["Body"])
except yaml.YAMLError as exc:
return exc
else:
print('bad config file: {}'.format(config_location))
configfile = yaml.safe_load(open(os.path.join(os.path.dirname(os.path.realpath(__file__)), config_location), 'r'))
return configfile
#return {'bucketName': 'pbs-databricks-data-lake', 'stageDir': 's3://{}/output/databricks/transform/baseball_stats',
# 'prefix_filter': 'input/retro-stats', 'output_format': 'json'} |
# define a large classification dataset
from sklearn.datasets import make_classification
# define dataset
X, y = make_classification(n_samples=10000, n_features=500, n_informative=10, n_redundant=490, random_state=1)
# summarize the shape of the dataset
print(X.shape, y.shape)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.