filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_0_27554
|
'''
# tpls_server.py
functional implementation of the
'transport layer security sever' or tpls_server
when i wrote this only God and I understood it.
now only God does
'''
import logging
import datetime as dt
import logging
import sys
import socketserver
logging.basicConfig(level=logging.DEBUG,
format='%(name)s: %(message)s',
)
local_ip = "192.168.1.3"
n_port = 1234
tw = ['nighthawk','22670bf1b10545e46f7d797c8e4bd7a77af1bb667f7e864aaca07fad65439f84','951c49bc88ce9a2cc31d4470423590a7c0bedbef953a06a72e6b5d4f74731ed6', '56b8ba882b6aeeb7fa43f9125d8d2909b8a734f82b46b67b3809105a28cfb05d']
trusted_wallet_hash = tw
handshake = []
run = True
def chash_0(input_string):
handshake.clear()
autolog('chash_0: input')
packet = input_string
for i in range(0, len(tw)) or packet == tw[i]:
if packet != tw[i]:
cs = 'ERROR: CONNECTION UNSUCCESSFUL'
handshake.clear()
handshake.append(0)
autolog(cs)
autolog(handshake)
return cs
elif packet == tw[i]:
cs = 'CONNECTION SUCCESSFUL'
handshake.clear()
handshake.append(1)
autolog(cs)
autolog(handshake)
return cs
else:
cs = 'ERROR: CONNECTION UNSUCCESSFUL'
handshake.clear()
handshake.append(0)
autolog(cs)
autolog(handshake)
return cs
def client_thread(conn, ip, port, MAX_BUFFER_SIZE = 4096):
# incoming user wallet hash, id of user/node
init_chash_b = conn.recv(MAX_BUFFER_SIZE)
autolog('client_thread: ')
# MAX_BUFFER_SIZE is how big the message can be
import sys
siz = sys.getsizeof(init_chash_b)
if siz >= MAX_BUFFER_SIZE:
print("The length of input is probably too long: {}".format(siz))
autolog('client_thread: ')
# decode incoming user hash
chash_0_r = init_chash_b.decode("utf8")
autolog(chash_0_r)
# analyze incoming user hash
res = chash_0(chash_0_r)
autolog('chash -> analyer')
vysl = res.encode("utf8") # encode the result string
conn.sendall(vysl) # send it to client
if handshake[0] == 1:
# fid tx
autolog('FID INCOMING')
data_bytes = conn.recv(MAX_BUFFER_SIZE)
siz = sys.getsizeof(init_chash_b)
if siz >= MAX_BUFFER_SIZE:
print("The length of input is probably too long: {}".format(siz))
# decode the FID
data = data_bytes.decode('utf-8')
autolog(data)
fid_analyze(data)
# responce after fid execute
replyb = 'DATA TRANSFER COMPLETE'
conn.sendall(replyb.encode('utf-8'))
else:
conn.close() # close connection
arnold = 'CONNECTION ' + ip + ':' + port + " TERMINATED"
autolog(arnold)
start_handshake()
def fid_analyze(fid):
autolog(type(fid))
if fid == '99':
autolog(fid)
run = False
elif fid == '0':
# pipe to execute function
autolog('0_NETWORK_PROTOCOL')
elif fid == '1':
autolog('1_np')
elif fid == 'msg':
incoming_msg(fid)
else:
autolog('NO MATHCING FID EXECUTABLES')
def post_fid_anal():
pass
def incoming_msg(msg):
autolog(msg)
def start_handshake():
import socket
soc = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# this is for easy starting/killing the app
soc.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
autolog('SOCKET CREATED')
try:
soc.bind((local_ip, n_port))
autolog('SOCKET BIND COMPLETE')
except socket.error as msg:
import sys
print(dt.datetime.now(), 'BIND_FAIL_ERROR: ' + str(sys.exc_info()))
sys.exit()
#Start listening on socket
soc.listen(10)
autolog('SOCKET LISTENING')
# for handling task in separate jobs we need threading
from threading import Thread
# this will make an infinite loop needed for
# not reseting server for every client
conn, addr = soc.accept()
ip, port = str(addr[0]), str(addr[1])
d = 'loop_0 > ACCEPTING CONNECTIONS FROM ' + ip + ':' + port
autolog(d)
try:
Thread(target=client_thread, args=(conn, ip, port)).start()
except:
print(dt.datetime.now(), "Terible error!")
import traceback
autolog('loop: ERROR')
traceback.print_exc()
soc.close()
def autolog(message):
import inspect, logging
# Get the previous frame in the stack, otherwise it would
# be this function!!!
func = inspect.currentframe().f_back.f_code
# Dump the message + the name of this function to the log.
logging.debug("{}\t{}\t{}\t{}".format(
dt.datetime.now(),
func.co_filename,
func.co_name,
message
))
|
the-stack_0_27555
|
"""
@file util.py
@author Tej Sukhatme
some functions to be used
"""
import pandas as pd
def load_features(path):
if path.exists() and path.is_file():
df = pd.read_csv(path)
features = df.drop(columns=['incidence'])
return features.values
def load_labels(path):
if path.exists() and path.is_file():
df = pd.read_csv(path)
labels = pd.Series(df['incidence'])
return labels.values
def load(path, is_labels=False):
if path.exists() and path.is_file():
df = pd.read_csv(path)
if is_labels:
df = pd.Series(df['incidence'])
return df.values
|
the-stack_0_27556
|
__all__: list = ['inherit_docstrings']
from collections import OrderedDict
from inspect import Signature
from io import StringIO
from textwrap import indent
from typing import Any, Dict, List, TypeVar, Type, Collection, Optional
import docstring_parser
from ..util import get_signature
def _get_docstring_params_dict(obj: Any) -> Dict[str, docstring_parser.DocstringParam]:
params = docstring_parser.google.parse(obj.__doc__).params
params_dict = OrderedDict((param.arg_name, param) for param in params)
return params_dict
def _replace_params_in_docstring(docstring: docstring_parser.Docstring,
new_params: Collection[docstring_parser.DocstringParam]) -> None:
# erase old params from docstring
new_meta = [meta for meta in docstring.meta
if not isinstance(meta, docstring_parser.DocstringParam)]
# replace them by new_params
new_meta.extend(new_params)
docstring.meta = new_meta
T = TypeVar('T')
def inherit_docstrings(cls: Type[T]) -> Type[T]:
"""Modifies docstring parameters list of class and nested members updating it with docstring parameters from parent
classes and parent classes members.
All args and attributes from all parent classes are added to applied class docstring. If any parameter present in
more than a one class it is updated by the most recent version (according to mro order). The same applies to all
nested members: nested member is considered to be a parent of other nested member if and only if corresponding outer
classes have the same relationship and nested members have the same name.
Args:
cls: target class.
"""
cls_docstring = docstring_parser.google.parse(cls.__doc__)
nested_docstrings = {}
inherited_params = OrderedDict()
nested_inherited_params = {}
for member_name in dir(cls):
if member_name.startswith('_'):
continue
nested_docstring = getattr(getattr(cls, member_name), '__doc__')
if nested_docstring:
nested_docstrings[member_name] = docstring_parser.google.parse(nested_docstring)
nested_inherited_params[member_name] = OrderedDict()
traverse_order = list(cls.__mro__[::-1])
traverse_order.append(cls)
for parent in traverse_order:
inherited_params.update(_get_docstring_params_dict(parent))
for member_name in dir(parent):
if member_name in nested_inherited_params:
nested_inherited_params[member_name].update(_get_docstring_params_dict(getattr(parent, member_name)))
_replace_params_in_docstring(cls_docstring, inherited_params.values())
cls.__doc__ = _construct_docstring(cls_docstring, get_signature(cls))
for member_name in nested_docstrings:
if nested_inherited_params[member_name]:
_replace_params_in_docstring(nested_docstrings[member_name], nested_inherited_params[member_name].values())
member = getattr(cls, member_name)
member_sig = None
if isinstance(member, type):
member_sig = get_signature(member)
getattr(cls, member_name).__doc__ = _construct_docstring(nested_docstrings[member_name], member_sig)
return cls
def _indent_all_lines_except_first(text: str) -> str:
first_line_end = text.find('\n')
if first_line_end == -1:
return text
return text[:first_line_end + 1] + indent(text[first_line_end + 1:], ' ' * 4)
def _get_key_value_chunk(title: str, keys: List[str], values: List[str]) -> str:
io = StringIO()
io.write(f'{title}:\n')
for key, value in zip(keys, values):
io.write(indent(f'{key}: {_indent_all_lines_except_first(value)}\n', ' ' * 4))
return io.getvalue().rstrip('\n')
def _construct_docstring(parsed_docstring: docstring_parser.Docstring, sig: Optional[Signature] = None) -> str:
chunks = []
docstring_prefix = ''
if parsed_docstring.short_description:
chunks.append(parsed_docstring.short_description)
else:
docstring_prefix = '\n'
if parsed_docstring.long_description:
chunks.append(parsed_docstring.long_description)
arg_params = []
attr_params = []
params_dict = {param.arg_name: param for param in parsed_docstring.params}
if sig:
for sig_param_name in sig.parameters.keys():
param = params_dict.pop(sig_param_name, None)
if not param:
continue
if param.args[0] == 'attribute':
attr_params.append(param)
elif param.args[0] == 'param':
arg_params.append(param)
for param in parsed_docstring.params:
if param.arg_name not in params_dict:
continue
if param.args[0] == 'attribute':
attr_params.append(param)
elif param.args[0] == 'param':
arg_params.append(param)
if arg_params:
chunk = _get_key_value_chunk(title='Args',
keys=[param.arg_name for param in arg_params],
values=[param.description for param in arg_params])
chunks.append(chunk)
if attr_params:
chunk = _get_key_value_chunk(title='Attributes',
keys=[param.arg_name for param in attr_params],
values=[param.description for param in attr_params])
chunks.append(chunk)
returns = parsed_docstring.returns
if returns:
chunk = _get_key_value_chunk(title='Yields' if returns.is_generator else 'Returns',
keys=[returns.type_name],
values=[returns.description])
chunks.append(chunk)
raises = parsed_docstring.raises
if raises:
chunk = _get_key_value_chunk(title='Raises',
keys=[exception.type_name for exception in raises],
values=[exception.description for exception in raises])
chunks.append(chunk)
for meta in parsed_docstring.meta:
if type(meta) not in [docstring_parser.DocstringParam,
docstring_parser.DocstringReturns,
docstring_parser.DocstringRaises]:
io = StringIO()
io.write(f'{meta.args[0].title()}:\n')
io.write(indent(meta.description, ' ' * 4))
chunks.append(io.getvalue())
docstring = docstring_prefix + '\n\n'.join(chunks)
return docstring
|
the-stack_0_27557
|
from __future__ import print_function
import sys
import os
import argparse
import torch
import torch.nn as nn
import torch.backends.cudnn as cudnn
import torchvision.transforms as transforms
from torch.autograd import Variable
from data import VOCroot, VOC_CLASSES as labelmap
from PIL import Image
from data import AnnotationTransform, VOCDetection, BaseTransform, VOC_CLASSES
import torch.utils.data as data
from ssd import build_ssd
parser = argparse.ArgumentParser(description='Single Shot MultiBox Detection')
parser.add_argument('--trained_model', default='weights/v2.pth',
type=str, help='Trained state_dict file path to open')
parser.add_argument('--save_folder', default='eval/', type=str,
help='Dir to save results')
parser.add_argument('--visual_threshold', default=0.6, type=float,
help='Final confidence threshold')
parser.add_argument('--cuda', default=True, type=bool,
help='Use cuda to train model')
parser.add_argument('--voc_root', default=VOCroot, help='Location of VOC root directory')
args = parser.parse_args()
if not os.path.exists(args.save_folder):
os.mkdir(args.save_folder)
def test_net(save_folder, net, cuda, testset, transform, thresh):
# dump predictions and assoc. ground truth to text file for now
filename = save_folder+'test1.txt'
num_images = len(testset)
for i in range(num_images):
print('Testing image {:d}/{:d}....'.format(i+1, num_images))
img = testset.pull_image(i)
img_id, annotation = testset.pull_anno(i)
x = torch.from_numpy(transform(img)[0]).permute(2, 0, 1)
x = Variable(x.unsqueeze(0))
with open(filename, mode='a') as f:
f.write('\nGROUND TRUTH FOR: '+img_id+'\n')
for box in annotation:
f.write('label: '+' || '.join(str(b) for b in box)+'\n')
if cuda:
x = x.cuda()
y = net(x) # forward pass
detections = y.data
# scale each detection back up to the image
scale = torch.Tensor([img.shape[1], img.shape[0],
img.shape[1], img.shape[0]])
pred_num = 0
for i in range(detections.size(1)):
j = 0
while detections[0, i, j, 0] >= 0.6:
if pred_num == 0:
with open(filename, mode='a') as f:
f.write('PREDICTIONS: '+'\n')
score = detections[0, i, j, 0]
label_name = labelmap[i-1]
pt = (detections[0, i, j, 1:]*scale).cpu().numpy()
coords = (pt[0], pt[1], pt[2], pt[3])
pred_num += 1
with open(filename, mode='a') as f:
f.write(str(pred_num)+' label: '+label_name+' score: ' +
str(score) + ' '+' || '.join(str(c) for c in coords) + '\n')
j += 1
if __name__ == '__main__':
# load net
num_classes = len(VOC_CLASSES) + 1 # +1 background
net = build_ssd('test', 300, num_classes) # initialize SSD
net.load_state_dict(torch.load(args.trained_model))
net.eval()
print('Finished loading model!')
# load data
testset = VOCDetection(args.voc_root, [('2007', 'test')], None, AnnotationTransform())
if args.cuda:
net = net.cuda()
cudnn.benchmark = True
# evaluation
test_net(args.save_folder, net, args.cuda, testset,
BaseTransform(net.size, (104, 117, 123)),
thresh=args.visual_threshold)
|
the-stack_0_27558
|
from telethon import events
import subprocess
from telethon.errors import MessageEmptyError, MessageTooLongError, MessageNotModifiedError
import io
import asyncio
import time
from userbot.utils import admin_cmd
import glob
import os
try:
import instantmusic , subprocess
except:
os.system("pip install instantmusic")
os.system("rm -rf *.mp3")
def bruh(name):
os.system("instantmusic -q -s "+name)
@borg.on(admin_cmd(pattern="song ?(.*)"))
async def _(event):
if event.fwd_from:
return
DELAY_BETWEEN_EDITS = 0.5
PROCESS_RUN_TIME = 100
cmd = event.pattern_match.group(1)
reply_to_id = event.message.id
if event.reply_to_msg_id:
reply_to_id = event.reply_to_msg_id
await event.edit("ok finding the song")
bruh(str(cmd))
l = glob.glob("*.mp3")
loa = l[0]
await event.edit("Trying To send This Song")
await borg.send_file(
event.chat_id,
loa,
force_document=True,
allow_cache=False,
caption=cmd,
reply_to=reply_to_id
)
os.system("rm -rf *.mp3")
subprocess.check_output("rm -rf *.mp3",shell=True)
|
the-stack_0_27559
|
from urllib.parse import urlencode
from allauth.account.adapter import DefaultAccountAdapter
from allauth.exceptions import ImmediateHttpResponse
from django.http import HttpResponseRedirect
from django.urls import reverse
from allauth_2fa.utils import user_has_valid_totp_device
class OTPAdapter(DefaultAccountAdapter):
def has_2fa_enabled(self, user):
"""Returns True if the user has 2FA configured."""
return user_has_valid_totp_device(user)
def login(self, request, user):
# Require two-factor authentication if it has been configured.
if self.has_2fa_enabled(user):
# Cast to string for the case when this is not a JSON serializable
# object, e.g. a UUID.
request.session['allauth_2fa_user_id'] = str(user.id)
redirect_url = reverse('two-factor-authenticate')
# Add "next" parameter to the URL.
view = request.resolver_match.func.view_class()
view.request = request
success_url = view.get_success_url()
query_params = request.GET.copy()
if success_url:
query_params[view.redirect_field_name] = success_url
if query_params:
redirect_url += '?' + urlencode(query_params)
raise ImmediateHttpResponse(
response=HttpResponseRedirect(redirect_url)
)
# Otherwise defer to the original allauth adapter.
return super(OTPAdapter, self).login(request, user)
|
the-stack_0_27560
|
# Copyright 2016, 2017 California Institute of Technology
# Users must agree to abide by the restrictions listed in the
# file "LegalStuff.txt" in the PROPER library directory.
#
# PROPER developed at Jet Propulsion Laboratory/California Inst. Technology
# Original IDL version by John Krist
# Python translation by Navtej Saini, with Luis Marchen and Nikta Amiri
#
# Modified by J. Krist - 19 April 2019 - added DFTI_THREAD_LIMIT
import ctypes as _ctypes
# enum DFTI_CONFIG_PARAM from mkl_dfti.h
DFTI_FORWARD_DOMAIN = _ctypes.c_int(0) # Domain for forward transform, no default
DFTI_DIMENSION = _ctypes.c_int(1) # Dimension, no default
DFTI_LENGTHS = _ctypes.c_int(2) # length(s) of transform, no default
DFTI_PRECISION = _ctypes.c_int(3) # Precision of computation, no default
DFTI_FORWARD_SCALE = _ctypes.c_int(4) # Scale factor for forward transform, default = 1.0
DFTI_BACKWARD_SCALE = _ctypes.c_int(5) # Scale factor for backward transform, default = 1.0
DFTI_FORWARD_SIGN = _ctypes.c_int(6) # Default for forward transform = DFTI_NEGATIVE
DFTI_NUMBER_OF_TRANSFORMS = _ctypes.c_int(7) # Number of data sets to be transformed, default = 1
DFTI_COMPLEX_STORAGE = _ctypes.c_int(8) # Representation for complex domain, default = DFTI_COMPLEX_COMPLEX
DFTI_REAL_STORAGE = _ctypes.c_int(9) # Rep. for real domain, default = DFTI_REAL_REAL
DFTI_CONJUGATE_EVEN_STORAGE = _ctypes.c_int(10) # Rep. for conjugate even domain, default = DFTI_COMPLEX_REAL
DFTI_PLACEMENT = _ctypes.c_int(11) # Placement of result, default = DFTI_INPLACE
DFTI_INPUT_STRIDES = _ctypes.c_int(12) # Stride information of input data, default = tigthly
DFTI_OUTPUT_STRIDES = _ctypes.c_int(13) # Stride information of output data, default = tigthly
DFTI_INPUT_DISTANCE = _ctypes.c_int(14) # Distance information of input data, default = 0
DFTI_OUTPUT_DISTANCE = _ctypes.c_int(15) # Distance information of output data, default = 0
DFTI_INITIALIZATION_EFFORT = _ctypes.c_int(16) # Effort spent in initialization, default = DFTI_MEDIUM
DFTI_WORKSPACE = _ctypes.c_int(17) # Use of workspace during computation, default = DFTI_ALLOW
DFTI_ORDERING = _ctypes.c_int(18) # Possible out of order computation, default = DFTI_ORDERED
DFTI_TRANSPOSE = _ctypes.c_int(19) # Possible transposition of result, default = DFTI_NONE
DFTI_DESCRIPTOR_NAME = _ctypes.c_int(20) # name of descriptor, default = string of zero length
DFTI_PACKED_FORMAT = _ctypes.c_int(21) # packed format for real transform, default = DFTI_CCS_FORMAT
# below 4 parameters for get_value functions only
DFTI_COMMIT_STATUS = _ctypes.c_int(22) # Whether descriptor has been commited
DFTI_VERSION = _ctypes.c_int(23) # DFTI implementation version number
DFTI_FORWARD_ORDERING = _ctypes.c_int(24) # The ordering of forward transform
DFTI_BACKWARD_ORDERING = _ctypes.c_int(25) # The ordering of backward transform
# below for set_value and get_value functions
DFTI_NUMBER_OF_USER_THREADS = _ctypes.c_int(26) # number of user's threads) default = 1
DFTI_THREAD_LIMIT = _ctypes.c_int(27) # number of user's threads) default = 1
# DFTI options values
DFTI_COMMITTED = _ctypes.c_int(30) # status - commit
DFTI_UNCOMMITTED = _ctypes.c_int(31) # status - uncommit
DFTI_COMPLEX = _ctypes.c_int(32) # General domain
DFTI_REAL = _ctypes.c_int(33) # Real domain
DFTI_CONJUGATE_EVEN = _ctypes.c_int(34) # Conjugate even domain
DFTI_SINGLE = _ctypes.c_int(35) # Single precision
DFTI_DOUBLE = _ctypes.c_int(36) # Double precision
DFTI_NEGATIVE = _ctypes.c_int(37) # -i, for setting definition of transform
DFTI_POSITIVE = _ctypes.c_int(38) # +i, for setting definition of transform
DFTI_COMPLEX_COMPLEX = _ctypes.c_int(39) # Representation method for domain
DFTI_COMPLEX_REAL = _ctypes.c_int(40) # Representation method for domain
DFTI_REAL_COMPLEX = _ctypes.c_int(41) # Representation method for domain
DFTI_REAL_REAL = _ctypes.c_int(42) # Representation method for domain
DFTI_INPLACE = _ctypes.c_int(43) # Result overwrites input
DFTI_NOT_INPLACE = _ctypes.c_int(44) # Result placed differently than input
DFTI_LOW = _ctypes.c_int(45) # A low setting
DFTI_MEDIUM = _ctypes.c_int(46) # A medium setting
DFTI_HIGH = _ctypes.c_int(47) # A high setting
DFTI_ORDERED = _ctypes.c_int(48) # Data on forward and backward domain ordered
DFTI_BACKWARD_SCRAMBLED = _ctypes.c_int(49) # Data on forward ordered and backward domain scrambled
DFTI_FORWARD_SCRAMBLED = _ctypes.c_int(50) # Data on forward scrambled and backward domain ordered
DFTI_ALLOW = _ctypes.c_int(51) # Allow certain request or usage
DFTI_AVOID = _ctypes.c_int(52) # Avoid certain request or usage
DFTI_NONE = _ctypes.c_int(53) # none certain request or usage
DFTI_CCS_FORMAT = _ctypes.c_int(54) # ccs format for real DFT
DFTI_PACK_FORMAT = _ctypes.c_int(55) # pack format for real DFT
DFTI_PERM_FORMAT = _ctypes.c_int(56) # perm format for real DFT
DFTI_CCE_FORMAT = _ctypes.c_int(57) # cce format for real DFT
# and not scrambled:
# error values:
DFTI_NO_ERROR = _ctypes.c_int(0)
DFTI_MEMORY_ERROR = _ctypes.c_int(1)
DFTI_INVALID_CONFIGURATION = _ctypes.c_int(2)
DFTI_INCONSISTENT_CONFIGURATION = _ctypes.c_int(3)
DFTI_MULTITHREADED_ERROR = _ctypes.c_int(4)
DFTI_BAD_DESCRIPTOR = _ctypes.c_int(5)
DFTI_UNIMPLEMENTED = _ctypes.c_int(6)
DFTI_MKL_INTERNAL_ERROR = _ctypes.c_int(7)
DFTI_NUMBER_OF_THREADS_ERROR = _ctypes.c_int(8)
DFTI_1D_LENGTH_EXCEEDS_INT32 = _ctypes.c_int(9)
def DftiErrorMessage(e):
""" Intel MKL DFT error messages.
Parameters
----------
e : int
DFT error code
Returns
-------
None
"""
if e == DFTI_NO_ERROR.value:
pass
elif e == DFTI_MEMORY_ERROR.value:
print("DFTI Error : Memory error")
elif e == DFTI_INVALID_CONFIGURATION.value:
print("DFTI Error : Invalid configuration")
elif e == DFTI_INCONSISTENT_CONFIGURATION.value:
print("DFTI Error : Inconsistent configuration")
elif e == DFTI_MULTITHREADED_ERROR.value:
print("DFTI Error : Multithreaded error")
elif e == DFTI_BAD_DESCRIPTOR.value:
print("DFTI Error : Bad descriptor")
elif e == DFTI_UNIMPLEMENTED.value:
print("DFTI Error : Unimplemented")
elif e == DFTI_MKL_INTERNAL_ERROR.value:
print("DFTI Error : MKL internal error")
elif e == DFTI_NUMBER_OF_THREADS_ERROR.value:
print("DFTI Error : Number of threads error")
elif e == DFTI_1D_LENGTH_EXCEEDS_INT32.value:
print("DFTI Error : 1D length exceeds int32")
else:
print("Unknown error code")
|
the-stack_0_27561
|
# coding: utf-8
"""
Swagger Petstore
This spec is mainly for testing Petstore server and contains fake endpoints, models. Please do not use this for any other purpose. Special characters: \" \\
OpenAPI spec version: 1.0.0
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from pprint import pformat
from six import iteritems
import re
class HasOnlyReadOnly(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self):
"""
HasOnlyReadOnly - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'bar': 'str',
'foo': 'str'
}
self.attribute_map = {
'bar': 'bar',
'foo': 'foo'
}
self._bar = None
self._foo = None
@property
def bar(self):
"""
Gets the bar of this HasOnlyReadOnly.
:return: The bar of this HasOnlyReadOnly.
:rtype: str
"""
return self._bar
@property
def foo(self):
"""
Gets the foo of this HasOnlyReadOnly.
:return: The foo of this HasOnlyReadOnly.
:rtype: str
"""
return self._foo
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
the-stack_0_27562
|
import subprocess
# Execute local command
def run_command(command, no_wrapper=False, silent=False):
"""exec command
"""
if no_wrapper:
proc = subprocess.Popen(command, bufsize=0, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = proc.communicate()
if stderr:
log.error("Error running command %s %s", ' '.join(command), stderr)
return stdout, stderr, proc.returncode
else:
(ret, output) = commands.getstatusoutput(command)
if ret:
log.error("Executing command %s: %s", command, output)
if not silent:
raise ExecErrorException
return output, None
|
the-stack_0_27563
|
import json
import os
import random
import subprocess
from enum import Enum
from typing import List, Set
import numpy as np
from tqdm import tqdm
import torch
from torchvision import transforms
from torchvision.transforms import Compose
import h5py
import nltk
import torchmeta.datasets.helpers as datasets
from gensim import corpora
from gensim.utils import tokenize
from nltk.corpus import stopwords
from torchmeta.transforms import Categorical, ClassSplitter
from torchmeta.utils.data import (BatchMetaDataLoader, ClassDataset,
CombinationMetaDataset, Dataset)
from transformers import BertTokenizer, BertModel
from torch.utils.data import DataLoader
import torch
def get_dataset(args):
"""Return the appropriate dataset, with preprocessing transforms
Returns:
- train_loader (BatchMetaDataLoader): Train dataloader
- val_loader (BatchMetaDataLoader): Validation dataloader
- test_loader (BatchMetaDataLoader): Test dataloader
- dictionary: token2id dict for word tokenisation if not BERT (else None)
"""
dataset = args.dataset
data_dir = args.data_dir
json_path = args.json_path
num_way = args.num_ways
num_shots = args.num_shots
num_shots_test = args.num_shots_test
text_encoder = args.text_encoder
text_type = args.text_type
remove_stop_words = args.remove_stop_words
if dataset == "cub":
train, val, test, dictionary = get_CUB(data_dir, num_way, num_shots,
num_shots_test)
elif dataset == "zanim":
train, val, test, dictionary = get_zanim(data_dir, json_path, num_way,
num_shots, num_shots_test,
text_encoder, text_type,
remove_stop_words,
args.image_embedding_model)
elif dataset == "supervised-zanim":
train, val, test = get_supervised_zanim(data_dir, json_path,
text_encoder, text_type,
remove_stop_words,
args.image_embedding_model,
args.device)
if text_encoder != 'BERT':
raise NotImplementedError()
dictionary = {}
dataloaders = tuple(
DataLoader(d,
batch_size=args.batch_size,
num_workers=args.num_workers,
shuffle=True) for d in [train, val, test])
return tuple(x for x in [*dataloaders, dictionary])
else:
raise NotImplementedError()
train_loader = BatchMetaDataLoader(train,
batch_size=args.batch_size,
shuffle=True,
num_workers=args.num_workers)
val_loader = BatchMetaDataLoader(val,
batch_size=args.batch_size,
shuffle=True,
num_workers=args.num_workers)
test_loader = BatchMetaDataLoader(test,
batch_size=args.batch_size,
shuffle=True,
num_workers=args.num_workers)
return train_loader, val_loader, test_loader, dictionary
def _convert_zanim_arguments(text_encoder: str, text_type: List[str]):
token_mode = TokenisationMode.BERT if text_encoder == "BERT" else TokenisationMode.STANDARD
modes = {
"description": DescriptionMode.FULL_DESCRIPTION,
"label": DescriptionMode.LABEL,
"common_name": DescriptionMode.COMMON_NAME
}
try:
description_mode = set([modes[l] for l in text_type])
except KeyError:
raise NameError(f"Invalid text type used")
return (token_mode, description_mode)
def get_supervised_zanim(data_dir: str, json_path: str, text_encoder: str,
text_type: str, remove_stop_words: bool,
image_embedding_model: str, device: str):
splits = []
_, description_mode = _convert_zanim_arguments(text_encoder, text_type)
for (train, val, test) in [(True, False, False), (False, True, False),
(False, False, True)]:
splits.append(
SupervisedZanim(root=data_dir,
json_path=json_path,
train=train,
val=val,
test=test,
description_mode=description_mode,
remove_stop_words=remove_stop_words,
image_embedding_model=image_embedding_model,
device=device))
return tuple(splits)
def get_zanim(data_dir: str, json_path: str, num_way: int, num_shots: int,
num_shots_test: int, text_encoder: str, text_type: str,
remove_stop_words: bool, image_embedding_model: str):
token_mode, description_mode = _convert_zanim_arguments(
text_encoder,
text_type,
)
train = Zanim(root=data_dir,
json_path=json_path,
num_classes_per_task=num_way,
meta_train=True,
tokenisation_mode=token_mode,
description_mode=description_mode,
remove_stop_words=remove_stop_words,
image_embedding_model=image_embedding_model)
train_split = ClassSplitter(train,
shuffle=True,
num_test_per_class=num_shots_test,
num_train_per_class=num_shots)
train_split.seed(0)
val = Zanim(root=data_dir,
json_path=json_path,
num_classes_per_task=num_way,
meta_val=True,
tokenisation_mode=token_mode,
description_mode=description_mode,
remove_stop_words=remove_stop_words,
image_embedding_model=image_embedding_model)
val_split = ClassSplitter(val,
shuffle=True,
num_test_per_class=int(100 / num_way),
num_train_per_class=num_shots)
val_split.seed(0)
test = Zanim(root=data_dir,
json_path=json_path,
num_classes_per_task=num_way,
meta_test=True,
tokenisation_mode=token_mode,
description_mode=description_mode,
remove_stop_words=remove_stop_words,
image_embedding_model=image_embedding_model)
test_split = ClassSplitter(test,
shuffle=True,
num_test_per_class=int(100 / num_way),
num_train_per_class=num_shots)
test_split.seed(0)
dictionary = {} if text_encoder == "BERT" else train.dictionary
return train_split, val_split, test_split, dictionary
def get_CUB(data_dir: str, num_way: int, num_shots: int, num_shots_test: int):
"""Need to fix to get text as well
"""
train = datasets.cub(data_dir,
ways=num_way,
shots=num_shots,
test_shots=num_shots_test,
meta_split="train",
download=True)
val = datasets.cub(data_dir,
ways=num_way,
shots=num_shots,
test_shots=int(100 / num_shots),
meta_split="val",
download=True)
test = datasets.CUB(data_dir,
ways=num_way,
shots=num_shots,
test_shots=int(100 / num_shots),
meta_split="test",
download=True)
dictionary = {}
return train, val, test, dictionary
class TokenisationMode(Enum):
BERT = 1
STANDARD = 2
class DescriptionMode(Enum):
FULL_DESCRIPTION = 1
LABEL = 2
COMMON_NAME = 3
class SupervisedZanim(torch.utils.data.Dataset):
def __init__(self,
root,
json_path="train.json",
train=True,
val=False,
test=False,
description_mode=[DescriptionMode.FULL_DESCRIPTION],
remove_stop_words=False,
image_embedding_model="resnet-152",
device=None,
pooling=lambda x: torch.mean(x, dim=1)):
super().__init__()
if (train + val + test > 1) or (train + val + test == 0):
raise ValueError(
"Only a single value of train, val, test can be true")
self._zcd = ZanimClassDataset(
root,
json_path,
meta_train=train,
meta_val=val,
meta_test=test,
tokenisation_mode=TokenisationMode.BERT,
description_mode=description_mode,
remove_stop_words=remove_stop_words,
image_embedding_model=image_embedding_model)
self.model = BertModel.from_pretrained('bert-base-uncased')
print("Precomputing BERT embeddings")
if device is not None:
self.model.to(device)
batch_size = 64
self._bert_embeddings = torch.zeros(len(self._zcd.descriptions),
self.model.config.hidden_size)
for start in range(0, len(self._zcd.descriptions), batch_size):
with torch.no_grad():
end = min(len(self._zcd.descriptions), start + batch_size)
des, mas = (self._zcd.descriptions[start:end].to(device),
self._zcd.mask[start:end].to(device)
) if device is not None else (
self._zcd.descriptions[start:end],
self._zcd.mask[start:end])
self._bert_embeddings[start:end] = pooling(
self.model(input_ids=des,
attention_mask=mas,
output_attentions=False).last_hidden_state)
print("Completed embedding computation")
self._bert_embeddings = self._bert_embeddings.cpu()
def __len__(self):
return len(self._zcd.category_id)
def __getitem__(self, index):
category_id = self._zcd.category_id[index]
image_id = self._zcd.image_ids[index]
bert_index = np.where(self._zcd.categories == category_id)[0][0]
return self._zcd.image_embeddings[image_id], self._bert_embeddings[
bert_index], category_id
class Zanim(CombinationMetaDataset):
def __init__(self,
root,
json_path="train.json",
num_classes_per_task=None,
meta_train=False,
meta_val=False,
meta_test=False,
tokenisation_mode: TokenisationMode = TokenisationMode.BERT,
description_mode: Set[DescriptionMode] = [
DescriptionMode.FULL_DESCRIPTION
],
remove_stop_words=True,
image_embedding_model='resnet-152',
target_transform=None,
categories=None):
"""
:param root: the path to the root directory of the dataset
:param json_path: the path to the json file containing the annotations
"""
if target_transform is None:
target_transform = Categorical(num_classes_per_task)
random.seed(0)
np.random.seed(0)
torch.manual_seed(0)
self.dataset = ZanimClassDataset(
root,
json_path,
meta_train=meta_train,
meta_val=meta_val,
meta_test=meta_test,
tokenisation_mode=tokenisation_mode,
description_mode=description_mode,
image_embedding_model=image_embedding_model,
remove_stop_words=remove_stop_words,
categories=categories)
super().__init__(self.dataset,
num_classes_per_task,
target_transform=target_transform)
@property
def dictionary(self):
return self.dataset.dictionary.token2id
class ZanimClassDataset(ClassDataset):
def __init__(self,
root: str,
json_path: str,
meta_train=False,
meta_val=False,
meta_test=False,
tokenisation_mode=TokenisationMode.BERT,
description_mode: Set[DescriptionMode] = [
DescriptionMode.FULL_DESCRIPTION
],
remove_stop_words=True,
image_embedding_model: str = "resnet-152",
categories=None):
super().__init__(meta_train=meta_train,
meta_val=meta_val,
meta_test=meta_test)
if not (root in json_path):
json_path = os.path.join(root, json_path)
self.root = root
self.tokenisation_mode = tokenisation_mode
with open(json_path) as annotations:
annotations = json.load(annotations)
self.annotations = annotations
N = len(annotations['categories'])
self.categories = np.arange(N)
np.random.shuffle(self.categories)
if categories is None:
if meta_train:
self.categories = self.categories[:int(0.6 * N)]
elif meta_val:
self.categories = self.categories[int(0.6 * N):int(0.8 * N)]
elif meta_test:
self.categories = self.categories[int(0.8 * N):]
else:
raise ValueError(
"One of meta_train, meta_val, meta_test must be true")
else:
self.categories = categories
np.sort(self.categories)
self.image_ids = [
i['id'] for i in annotations['images']
if annotations['annotations'][i['id']]['category_id'] in
self.categories
]
self.category_id = [
annotations['annotations'][id]['category_id']
for id in self.image_ids
]
self.category_id_map = {}
for id in range(len(self.image_ids)):
cat_id = self.category_id[id]
image_id = self.image_ids[id]
if cat_id in self.category_id_map:
self.category_id_map[cat_id].append(image_id)
else:
self.category_id_map[cat_id] = [image_id]
for cat_id in self.category_id_map.keys():
self.category_id_map[cat_id] = np.array(
self.category_id_map[cat_id])
self.descriptions = self._get_descriptions(self.annotations,
self.categories,
description_mode)
print("Copying image embeddings to local disk")
image_embedding_file = f"image-embedding-{image_embedding_model}.hdf5"
local_image_embedding_path = os.path.join('/content',
image_embedding_file)
if not os.path.exists(local_image_embedding_path):
self._copy_image_embeddings(image_embedding_file)
self.image_embeddings = h5py.File(local_image_embedding_path,
'r')['images']
self._num_classes = len(self.categories)
if remove_stop_words:
nltk.download('stopwords')
stop_words = stopwords.words('english')
self.descriptions = [
" ".join([w for w in s.split() if not (w in stop_words)])
for s in self.descriptions
]
if tokenisation_mode == TokenisationMode.BERT:
tokenizer = BertTokenizer.from_pretrained("bert-base-uncased")
tokens = tokenizer(self.descriptions,
return_token_type_ids=False,
return_tensors="pt",
padding=True,
truncation=True)
self.descriptions = tokens['input_ids']
self.mask = tokens['attention_mask']
elif tokenisation_mode == TokenisationMode.STANDARD:
# since using a generator can't take len(tokenize(d))
lengths = [
sum([1 for w in tokenize(d)]) for d in self.descriptions
]
max_length = max(lengths)
self.descriptions = [
d.lower() + " " +
" ".join(["<PAD>" for _ in range(max_length - lengths[i])])
for (i, d) in enumerate(self.descriptions)
]
# the dictionary should be formed across all folds, so recompute set of 'descriptions' across all categories
full_set_of_descriptions = self._get_descriptions(
self.annotations, np.arange(N), description_mode)
self.dictionary = corpora.Dictionary(
[tokenize(d.lower()) for d in full_set_of_descriptions])
self.dictionary.add_documents([tokenize("<PAD>")])
self.descriptions = [[
self.dictionary.token2id[z] for z in tokenize(d)
] for d in self.descriptions]
print("Completed tokenisation")
def _get_descriptions(self, annotations, categories, description_mode):
descriptions = ["" for i in categories]
description_json_key_map = {
DescriptionMode.FULL_DESCRIPTION: 'description',
DescriptionMode.LABEL: 'name',
DescriptionMode.COMMON_NAME: 'common_name'
}
description_mode = [
description_json_key_map[d] for d in description_mode
]
descriptions = [
" ".join(
[annotations['categories'][i][d] for d in description_mode])
for i in categories
]
return descriptions
def _copy_image_embeddings(self, image_file):
self._run_command(
["cp", os.path.join(self.root, image_file), "/content/"])
def _run_command(self, command):
pipes = subprocess.Popen(command, stderr=subprocess.PIPE)
_, err = pipes.communicate()
if pipes.returncode != 0:
raise Exception(
f"Error in running custom command {' '.join(command)}: {err.strip()}"
)
def __len__(self):
return self._num_classes
@property
def num_classes(self):
return self._num_classes
def __getitem__(self, index):
indices = self.category_id_map[self.categories[index %
self.num_classes]]
mask = self.mask[
index] if self.tokenisation_mode == TokenisationMode.BERT else None
return ZanimDataset(index,
indices,
self.image_embeddings[indices],
self.descriptions[index],
index % self.num_classes,
attention_mask=mask,
target_transform=self.get_target_transform(index))
class ZanimDataset(Dataset):
def __init__(self,
index,
image_ids,
data,
description,
category_id,
attention_mask=None,
target_transform=None):
super().__init__(index, target_transform=target_transform)
self.data = data
self.category_id = category_id
self.description = description
self.image_ids = image_ids
self.attention_mask = attention_mask
def __len__(self):
return len(self.data)
def __getitem__(self, index):
target = self.category_id
if self.target_transform is not None:
target = self.target_transform(target)
if self.attention_mask is None:
return (self.image_ids[index], torch.tensor(self.description),
self.data[index]), target
else:
return (self.image_ids[index], torch.tensor(self.description),
torch.tensor(self.attention_mask),
self.data[index]), target
if __name__ == "__main__":
import sys
import argparse
parser = argparse.ArgumentParser(description="data module test")
parser.add_argument("--text_type", type=str, default="label")
parser.add_argument("--json_path", type=str, default="train.json")
parser.add_argument("--text_encoder", type=str, default="BERT")
parser.add_argument(
"--data_dir",
type=str,
default="/content/drive/My Drive/MSc ML/NLP/NLP project/Dataset")
parser.add_argument('--remove_stop_words',
action='store_true',
help="whether to remove stop words")
args = parser.parse_args(sys.argv[1:])
args.device = torch.device("cuda")
text_type = args.text_type
text_encoder = args.text_encoder
num_way = 3
num_shots = 5
num_shots_test = 32
batch_size = 5
remove_stop_words = True if args.remove_stop_words else False
data_dir = args.data_dir
train, val, test = get_supervised_zanim(data_dir,
args.json_path,
text_encoder,
text_type,
remove_stop_words,
image_embedding_model='resnet-152',
device=args.device)
for batch_idx, batch in enumerate(DataLoader(train, batch_size=10)):
image, text, cat = batch
print(image.shape)
print(text.shape)
print(cat)
if batch_idx > 10:
break
train, val, test, dictionary = get_zanim(
data_dir,
args.json_path,
num_way,
num_shots,
num_shots_test,
text_encoder,
text_type,
remove_stop_words,
image_embedding_model="resnet-152")
print("dictionary", len(dictionary), dictionary)
train_loader = BatchMetaDataLoader(train,
batch_size=batch_size,
shuffle=True,
num_workers=0)
# check first couple batches
for batch_idx, batch in enumerate(train_loader):
train_inputs, train_targets = batch['train']
print("train targets")
print(train_targets.shape, train_targets)
test_inputs, test_targets = batch['test']
if text_encoder == "BERT":
idx, text, attn_mask, im = train_inputs
print("idx")
print(idx.shape, idx)
print("text")
print(text.shape, text)
print("attn_mask")
print(attn_mask.shape, attn_mask)
print("im")
print(im.shape, im)
else:
idx, text, im = train_inputs
print("idx")
print(idx.shape, idx)
print("text")
print(text.shape, text)
print("im")
print(im.shape, im)
if batch_idx > 1:
break
|
the-stack_0_27564
|
# isort:skip_file
from __future__ import absolute_import, division, print_function, unicode_literals
import torch_glow
import torch
import unittest
from collections import OrderedDict
def create_model(x, relu):
""" x is an example input, relu is whether or not to include a fused relu"""
with torch.no_grad():
x_size = len(x.size())
conv_op = None
if x_size == 4:
conv_op = torch.nn.Conv2d(3, 10, 3)
elif x_size == 5:
conv_op = torch.nn.Conv3d(3, 10, 3)
else:
print(f"Only 2d and 3d conv supported, got {x_size}d inputs")
exit(1)
conv_op.weight.random_(-1, 1)
conv_op.bias.data.random_(-1, 1)
model = None
if relu:
model = torch.nn.Sequential(
OrderedDict([("conv", conv_op), ("relu", torch.nn.ReLU())])
)
model = torch.quantization.fuse_modules(model, [["conv", "relu"]])
else:
model = torch.nn.Sequential(OrderedDict([("conv", conv_op)]))
model = torch.quantization.QuantWrapper(model)
model.qconfig = torch.quantization.get_default_qconfig("fbgemm")
torch.quantization.prepare(model, inplace=True)
model(x)
torch.quantization.convert(model, inplace=True)
return model
def run_to_glow(m, x):
"""Trace the model m with input x and call to_glow"""
traced_m = torch.jit.trace(m, (x))
spec = torch.classes.glow.GlowCompileSpec()
spec.setBackend("Interpreter")
sim = torch.classes.glow.SpecInputMeta()
sim.set(x.size(), torch.float32)
inputs = [sim]
spec.addInputs(inputs)
lowered_module = torch_glow.to_glow(traced_m, {"forward": spec})
return lowered_module
class TestConvToGlow(unittest.TestCase):
def test_conv2d_to_glow(self):
x = torch.randn([1, 3, 30, 30])
m = create_model(x, False)
run_to_glow(m, x)
def test_conv2d_relu_to_glow(self):
x = torch.randn([1, 3, 30, 30])
m = create_model(x, True)
run_to_glow(m, x)
def test_conv3d_to_glow(self):
x = torch.randn([1, 3, 30, 30, 30])
m = create_model(x, False)
run_to_glow(m, x)
def test_conv3d_relu_to_glow(self):
x = torch.randn([1, 3, 30, 30, 30])
m = create_model(x, True)
run_to_glow(m, x)
|
the-stack_0_27565
|
import os
import time
from selenium.webdriver.support.select import Select
from Data.parameters import Data
from filenames import file_extention
from get_dir import pwd
from reuse_func import GetData
class download_clusterwise_csv():
def __init__(self, driver):
self.driver = driver
def test_clusterwise(self):
p = pwd()
self.cal = GetData()
self.fname = file_extention()
self.driver.find_element_by_xpath(Data.hyper).click()
self.cal.page_loading(self.driver)
self.driver.find_element_by_id(Data.sr_cluster_btn).click()
self.cal.page_loading(self.driver)
self.driver.find_element_by_id(Data.Download).click()
time.sleep(15)
self.filename = p.get_download_dir() + "/" + self.fname.composite_cluster()
self.cal.page_loading(self.driver)
return os.path.isfile(self.filename)
def remove_file(self):
os.remove(self.filename)
|
the-stack_0_27568
|
# -*- coding: utf-8 -*-
# @File : SimplePostPowershellModule.py
# @Date : 2019/1/12
# @Desc :
from Lib.ModuleAPI import *
class PostModule(PostMSFPowershellFunctionModule):
NAME = "获取域内主机名"
DESC = "模块获取主机所在域的所有域主机名,如果主机不在域中,脚本可能报错."
MODULETYPE = TAG2CH.Discovery
PLATFORM = ["Windows"] # 平台
PERMISSIONS = ["Administrator", "SYSTEM", ] # 所需权限
ATTCK = ["T1018"] # ATTCK向量
README = ["https://www.yuque.com/funnywolfdoc/viperdoc/sp72lr"]
REFERENCES = ["https://attack.mitre.org/techniques/T1018/"]
AUTHOR = "Viper"
def __init__(self, sessionid, hid, custom_param):
super().__init__(sessionid, hid, custom_param)
self.set_script("PowerView.ps1") # 设置目标机执行的脚本文件
self.set_execute_string('Get-NetComputer')
def check(self):
"""执行前的检查函数"""
session = Session(self._sessionid)
if session.is_in_domain:
self.set_execute_string('Get-NetComputer')
return True, None
else:
return False, "此模块只支持Windows的Meterpreter,且必须在域中"
def callback(self, status, message, data):
if status:
powershell_json_output = data.split("\n")
if isinstance(powershell_json_output, list) and len(powershell_json_output) > 0:
try:
for one in powershell_json_output:
if one is None or len(one) == 0:
continue
else:
ouputstr = "主机名: {}".format(one)
self.log_good(ouputstr)
except Exception as E:
pass
else:
self.log_error("脚本无有效输出")
self.log_error(powershell_json_output)
else:
self.log_error("模块执行失败")
self.log_error(message)
|
the-stack_0_27570
|
import time
import os
import logging
import json
from smac.scenario.scenario import Scenario
__author__ = "Marius Lindauer"
__copyright__ = "Copyright 2016, ML4AAD"
__license__ = "3-clause BSD"
__maintainer__ = "Marius Lindauer"
__email__ = "[email protected]"
__version__ = "0.0.1"
class Stats(object):
"""
All statistics collected during configuration run.
Written to output-directory to be restored
Attributes
----------
ta_runs
wallclock_time_used
ta_time_used
inc_changed
"""
def __init__(self, scenario: Scenario):
"""Constructor
Parameters
----------
scenario : Scenario
output_dir : str
"""
self.__scenario = scenario
self.ta_runs = 0
self.n_configs = 0
self.wallclock_time_used = 0
self.ta_time_used = 0
self.inc_changed = 0
# debug stats
self._n_configs_per_intensify = 0
self._n_calls_of_intensify = 0
## exponential moving average
self._ema_n_configs_per_intensifiy = 0
self._EMA_ALPHA = 0.2
self._start_time = None
self._logger = logging.getLogger(self.__module__ + "." + self.__class__.__name__)
def save(self):
"""
Save all relevant attributes to json-dictionary.
"""
if not self.__scenario.output_dir_for_this_run:
self._logger.debug("No scenario.output_dir: not saving stats!")
return
# Set used_wallclock_time
self.wallclock_time_used = self.get_used_wallclock_time()
data = {}
for v in vars(self):
if not v in ['_Stats__scenario', '_logger', '_start_time']:
data[v] = getattr(self, v)
path = os.path.join(
self.__scenario.output_dir_for_this_run, "stats.json"
)
self._logger.debug("Saving stats to %s", path)
with open(path, 'w') as fh:
json.dump(data, fh)
def load(self, fn=None):
"""
Load all attributes from dictionary in file into stats-object.
Parameters
----------
fn: string or None
Path to file to load stats from. If no path is given, the path given
in the current scenario is used.
"""
if not fn:
fn = os.path.join(
self.__scenario.output_dir_for_this_run, "stats.json"
)
with open(fn, 'r') as fh:
data = json.load(fh)
# Set attributes
for key in data:
if hasattr(self, key):
setattr(self, key, data[key])
else:
raise ValueError("Stats does not recognize {}".format(key))
def start_timing(self):
"""
Starts the timer (for the runtime configuration budget).
Substracting wallclock time used so we can continue loaded Stats.
"""
if self.__scenario:
self._start_time = time.time() - self.wallclock_time_used
else:
raise ValueError("Scenario is missing")
def get_used_wallclock_time(self):
"""Returns used wallclock time
Returns
-------
wallclock_time : int
used wallclock time in sec
"""
return time.time() - self._start_time
def get_remaing_time_budget(self):
"""Subtracts the runtime configuration budget with the used wallclock
time"""
if self.__scenario:
return self.__scenario.wallclock_limit - (time.time() - self._start_time)
else:
raise "Scenario is missing"
def get_remaining_ta_runs(self):
"""Subtract the target algorithm runs in the scenario with the used ta
runs"""
if self.__scenario:
return self.__scenario.ta_run_limit - self.ta_runs
else:
raise "Scenario is missing"
def get_remaining_ta_budget(self):
"""Subtracts the ta running budget with the used time"""
if self.__scenario:
return self.__scenario.algo_runs_timelimit - self.ta_time_used
def is_budget_exhausted(self):
"""Check whether the configuration budget for time budget, ta_budget
and ta_runs is empty
Returns
-------
exhaustedness: boolean
true if one of the budgets is exhausted
"""
return self.get_remaing_time_budget() < 0 or \
self.get_remaining_ta_budget() < 0 or \
self.get_remaining_ta_runs() <= 0
def update_average_configs_per_intensify(self, n_configs: int):
"""Updates statistics how many configurations on average per used in
intensify
Parameters
----------
n_configs: int
number of configurations in current intensify
"""
self._n_calls_of_intensify += 1
self._n_configs_per_intensify += n_configs
if self._n_calls_of_intensify == 1:
self._ema_n_configs_per_intensifiy = n_configs
else:
self._ema_n_configs_per_intensifiy = (1 - self._EMA_ALPHA) * self._ema_n_configs_per_intensifiy \
+ self._EMA_ALPHA * n_configs
def print_stats(self, debug_out:bool=False):
"""Prints all statistics
Parameters
---------
debug: bool
use logging.debug instead of logging.info if set to true
"""
log_func = self._logger.info
if debug_out:
log_func = self._logger.debug
log_func("##########################################################")
log_func("Statistics:")
log_func("#Incumbent changed: %d" %(self.inc_changed - 1)) # first change is default conf
log_func("#Target algorithm runs: %d / %s" %(self.ta_runs, str(self.__scenario.ta_run_limit)))
log_func("#Configurations: %d" %(self.n_configs))
log_func("Used wallclock time: %.2f / %.2f sec " %(time.time() - self._start_time, self.__scenario.wallclock_limit))
log_func("Used target algorithm runtime: %.2f / %.2f sec" %(self.ta_time_used, self.__scenario.algo_runs_timelimit))
self._logger.debug("Debug Statistics:")
if self._n_calls_of_intensify > 0:
self._logger.debug("Average Configurations per Intensify: %.2f" %(self._n_configs_per_intensify / self._n_calls_of_intensify))
self._logger.debug("Exponential Moving Average of Configurations per Intensify: %.2f" %(self._ema_n_configs_per_intensifiy))
log_func("##########################################################")
|
the-stack_0_27571
|
#following tutorial: https://opencv-python-tutroals.readthedocs.io/en/latest/py_tutorials/py_core/py_image_arithmetics/py_image_arithmetics.html
import numpy as np
import cv2
scalar1 = np.uint8([250])
scalar2 = np.uint8([10])
print (cv2.add(scalar1,scalar2)) #cv addition: 250 + 10 = 260 => 255
print (scalar1 + scalar2) #numpy addition: 250 + 10 = 260 % 256 = 4
#since they are 8 bit arrays, they max out their size at 2^8 - 1 = 255
#Both images should be of same depth and type, or second image can just be a scalar value.
#openCV provides better results when adding images
img1 = cv2.imread("frame0.jpg")
img2 = cv2.imread("opencv-logo-white.png")
#need same sized image in order to add them
img1_resized = cv2.resize(img1, (img2.shape[1], img2.shape[0]))
addedImg = cv2.add(img1_resized,img2)
#inputs: img1, alpha, img2, beta, lambda
#equation dst = alpha * img1 + beta * img2 + lambda
blendedImg = cv2.addWeighted(img1_resized, 0.7, img2, 0.3, 0)
cv2.imshow("added Image",addedImg)
cv2.imshow("blended Image",blendedImg)
cv2.waitKey(0)
cv2.destroyAllWindows()
#selecting region of image (roi)
rows,cols,channels = img2.shape
roi = img1[0:rows,0:cols]
#creates mask and inverse mask
img2gray = cv2.cvtColor(img2,cv2.COLOR_BGR2GRAY)
ret, mask = cv2.threshold(img2gray, 10, 255, cv2.THRESH_BINARY)
mask_inv = cv2.bitwise_not(mask)
#blacks out area of roi
img1_bg = cv2.bitwise_and(roi,roi,mask = mask_inv)
#get only the logo
img2_fg = cv2.bitwise_and(img2,img2,mask=mask)
dst = cv2.add(img1_bg,img2_fg)
img1[0:rows,0:cols] = dst
cv2.imshow("grayed image",img2gray)
cv2.imshow("mask",mask)
cv2.imshow("inverse mask",mask_inv)
cv2.imshow("background image",img1_bg)
cv2.imshow("foreground image",img2_fg)
cv2.imshow("destination roi image",dst)
cv2.imshow("full result image" , img1)
cv2.waitKey(0)
cv2.destroyAllWindows()
|
the-stack_0_27572
|
# coding=utf-8
# Copyright 2020 The TF-Agents Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common utilities for TF-Agents."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections as cs
import contextlib
import distutils.version
import functools
import importlib
import os
from typing import Dict, Optional, Text
from absl import logging
import numpy as np
import tensorflow as tf
from tf_agents.specs import tensor_spec
from tf_agents.trajectories import time_step as ts
from tf_agents.typing import types
from tf_agents.utils import nest_utils
from tf_agents.utils import object_identity
# pylint:disable=g-direct-tensorflow-import
from tensorflow.core.protobuf import struct_pb2 # TF internal
from tensorflow.python import tf2 as tf2_checker # TF internal
from tensorflow.python.eager import monitoring # TF internal
from tensorflow.python.saved_model import nested_structure_coder # TF internal
# pylint:enable=g-direct-tensorflow-import
try:
importlib.import_module('tf_agents.utils.allow_tf1')
except ImportError:
_TF1_MODE_ALLOWED = False
else:
_TF1_MODE_ALLOWED = True
tf_agents_gauge = monitoring.BoolGauge('/tensorflow/agents/agents',
'TF-Agents usage', 'method')
MISSING_RESOURCE_VARIABLES_ERROR = """
Resource variables are not enabled. Please enable them by adding the following
code to your main() method:
tf.compat.v1.enable_resource_variables()
For unit tests, subclass `tf_agents.utils.test_utils.TestCase`.
"""
def check_tf1_allowed():
"""Raises an error if running in TF1 (non-eager) mode and this is disabled."""
if _TF1_MODE_ALLOWED:
return
if not tf2_checker.enabled():
raise RuntimeError(
'You are using TF1 or running TF with eager mode disabled. '
'TF-Agents no longer supports TF1 mode (except for a shrinking list of '
'internal allowed users). If this negatively affects you, please '
'reach out to the TF-Agents team. Otherwise please use TF2.')
def resource_variables_enabled():
return tf.compat.v1.resource_variables_enabled()
_IN_LEGACY_TF1 = (
tf.__git_version__ != 'unknown'
and tf.__version__ != '1.15.0'
and (distutils.version.LooseVersion(tf.__version__) <=
distutils.version.LooseVersion('1.15.0.dev20190821')))
def in_legacy_tf1():
return _IN_LEGACY_TF1
def set_default_tf_function_parameters(*args, **kwargs):
"""Generates a decorator that sets default parameters for `tf.function`.
Args:
*args: default arguments for the `tf.function`.
**kwargs: default keyword arguments for the `tf.function`.
Returns:
Function decorator with preconfigured defaults for `tf.function`.
"""
def maybe_wrap(fn):
"""Helper function."""
wrapped = [None]
@functools.wraps(fn)
def preconfigured_function(*fn_args, **fn_kwargs):
if tf.executing_eagerly():
return fn(*fn_args, **fn_kwargs)
if wrapped[0] is None:
wrapped[0] = function(*((fn,) + args), **kwargs)
return wrapped[0](*fn_args, **fn_kwargs) # pylint: disable=not-callable
return preconfigured_function
return maybe_wrap
def function(*args, **kwargs):
"""Wrapper for tf.function with TF Agents-specific customizations.
Example:
```python
@common.function()
def my_eager_code(x, y):
...
```
Args:
*args: Args for tf.function.
**kwargs: Keyword args for tf.function.
Returns:
A tf.function wrapper.
"""
autograph = kwargs.pop('autograph', False)
experimental_relax_shapes = kwargs.pop('experimental_relax_shapes', True)
return tf.function( # allow-tf-function
*args,
autograph=autograph,
experimental_relax_shapes=experimental_relax_shapes,
**kwargs)
def has_eager_been_enabled():
"""Returns true iff in TF2 or in TF1 with eager execution enabled."""
with tf.init_scope():
return tf.executing_eagerly()
def function_in_tf1(*args, **kwargs):
"""Wrapper that returns common.function if using TF1.
This allows for code that assumes autodeps is available to be written once,
in the same way, for both TF1 and TF2.
Usage:
```python
train = function_in_tf1()(agent.train)
loss = train(experience)
```
Args:
*args: Arguments for common.function.
**kwargs: Keyword arguments for common.function.
Returns:
A callable that wraps a function.
"""
def maybe_wrap(fn):
"""Helper function."""
# We're in TF1 mode and want to wrap in common.function to get autodeps.
wrapped = [None]
@functools.wraps(fn)
def with_check_resource_vars(*fn_args, **fn_kwargs):
"""Helper function for calling common.function."""
check_tf1_allowed()
if has_eager_been_enabled():
# We're either in eager mode or in tf.function mode (no in-between); so
# autodep-like behavior is already expected of fn.
return fn(*fn_args, **fn_kwargs)
if not resource_variables_enabled():
raise RuntimeError(MISSING_RESOURCE_VARIABLES_ERROR)
if wrapped[0] is None:
wrapped[0] = function(*((fn,) + args), **kwargs)
return wrapped[0](*fn_args, **fn_kwargs) # pylint: disable=not-callable
return with_check_resource_vars
return maybe_wrap
def create_variable(name,
initial_value=0,
shape=(),
dtype=tf.int64,
use_local_variable=False,
trainable=False,
initializer=None,
unique_name=True):
"""Create a variable."""
check_tf1_allowed()
if has_eager_been_enabled():
if initializer is None:
if shape:
initial_value = tf.constant(initial_value, shape=shape, dtype=dtype)
else:
initial_value = tf.convert_to_tensor(initial_value, dtype=dtype)
else:
if callable(initializer):
initial_value = lambda: initializer(shape, dtype)
else:
initial_value = initializer
return tf.compat.v2.Variable(
initial_value, trainable=trainable, dtype=dtype, name=name)
collections = [tf.compat.v1.GraphKeys.GLOBAL_VARIABLES]
if use_local_variable:
collections = [tf.compat.v1.GraphKeys.LOCAL_VARIABLES]
if initializer is None:
initializer = tf.compat.v1.initializers.constant(initial_value, dtype=dtype)
if shape is None:
shape = tf.convert_to_tensor(initial_value).shape
if unique_name:
name = tf.compat.v1.get_default_graph().unique_name(name)
return tf.compat.v1.get_variable(
name=name,
shape=shape,
dtype=dtype,
initializer=initializer,
collections=collections,
use_resource=True,
trainable=trainable)
def soft_variables_update(source_variables,
target_variables,
tau=1.0,
tau_non_trainable=None,
sort_variables_by_name=False):
"""Performs a soft/hard update of variables from the source to the target.
Note: **when using this function with TF DistributionStrategy**, the
`strategy.extended.update` call (below) needs to be done in a cross-replica
context, i.e. inside a merge_call. Please use the Periodically class above
that provides this wrapper for you.
For each variable v_t in target variables and its corresponding variable v_s
in source variables, a soft update is:
v_t = (1 - tau) * v_t + tau * v_s
When tau is 1.0 (the default), then it does a hard update:
v_t = v_s
Args:
source_variables: list of source variables.
target_variables: list of target variables.
tau: A float scalar in [0, 1]. When tau is 1.0 (the default), we do a hard
update. This is used for trainable variables.
tau_non_trainable: A float scalar in [0, 1] for non_trainable variables. If
None, will copy from tau.
sort_variables_by_name: A bool, when True would sort the variables by name
before doing the update.
Returns:
An operation that updates target variables from source variables.
Raises:
ValueError: if `tau not in [0, 1]`.
ValueError: if `len(source_variables) != len(target_variables)`.
ValueError: "Method requires being in cross-replica context,
use get_replica_context().merge_call()" if used inside replica context.
"""
if tau < 0 or tau > 1:
raise ValueError('Input `tau` should be in [0, 1].')
if tau_non_trainable is None:
tau_non_trainable = tau
if tau_non_trainable < 0 or tau_non_trainable > 1:
raise ValueError('Input `tau_non_trainable` should be in [0, 1].')
updates = []
op_name = 'soft_variables_update'
if tau == 0.0 or not source_variables or not target_variables:
return tf.no_op(name=op_name)
if len(source_variables) != len(target_variables):
raise ValueError(
'Source and target variable lists have different lengths: '
'{} vs. {}'.format(len(source_variables), len(target_variables)))
if sort_variables_by_name:
source_variables = sorted(source_variables, key=lambda x: x.name)
target_variables = sorted(target_variables, key=lambda x: x.name)
strategy = tf.distribute.get_strategy()
for (v_s, v_t) in zip(source_variables, target_variables):
v_t.shape.assert_is_compatible_with(v_s.shape)
def update_fn(v1, v2):
"""Update variables."""
# For not trainable variables do hard updates.
# This helps stabilaze BatchNorm moving averagees TODO(b/144455039)
if not v1.trainable:
current_tau = tau_non_trainable
else:
current_tau = tau
if current_tau == 1.0:
return v1.assign(v2)
else:
return v1.assign((1 - current_tau) * v1 + current_tau * v2)
# TODO(b/142508640): remove this when b/142802462 is fixed.
# Workaround for b/142508640, only use extended.update for
# MirroredVariable variables (which are trainable variables).
# For other types of variables (i.e. SyncOnReadVariables, for example
# batch norm stats) do a regular assign, which will cause a sync and
# broadcast from replica 0, so will have slower performance but will be
# correct and not cause a failure.
if tf.distribute.has_strategy() and v_t.trainable:
# Assignment happens independently on each replica,
# see b/140690837 #46.
update = strategy.extended.update(v_t, update_fn, args=(v_s,))
else:
update = update_fn(v_t, v_s)
updates.append(update)
return tf.group(*updates, name=op_name)
def join_scope(parent_scope, child_scope):
"""Joins a parent and child scope using `/`, checking for empty/none.
Args:
parent_scope: (string) parent/prefix scope.
child_scope: (string) child/suffix scope.
Returns:
joined scope: (string) parent and child scopes joined by /.
"""
if not parent_scope:
return child_scope
if not child_scope:
return parent_scope
return '/'.join([parent_scope, child_scope])
# TODO(b/138322868): Add an optional action_spec for validation.
def index_with_actions(q_values, actions, multi_dim_actions=False):
"""Index into q_values using actions.
Note: this supports multiple outer dimensions (e.g. time, batch etc).
Args:
q_values: A float tensor of shape [outer_dim1, ... outer_dimK, action_dim1,
..., action_dimJ].
actions: An int tensor of shape [outer_dim1, ... outer_dimK] if
multi_dim_actions=False [outer_dim1, ... outer_dimK, J] if
multi_dim_actions=True I.e. in the multidimensional case,
actions[outer_dim1, ... outer_dimK] is a vector [actions_1, ...,
actions_J] where each element actions_j is an action in the range [0,
num_actions_j). While in the single dimensional case, actions[outer_dim1,
... outer_dimK] is a scalar.
multi_dim_actions: whether the actions are multidimensional.
Returns:
A [outer_dim1, ... outer_dimK] tensor of q_values for the given actions.
Raises:
ValueError: If actions have unknown rank.
"""
if actions.shape.rank is None:
raise ValueError('actions should have known rank.')
batch_dims = actions.shape.rank
if multi_dim_actions:
# In the multidimensional case, the last dimension of actions indexes the
# vector of actions for each batch, so exclude it from the batch dimensions.
batch_dims -= 1
outer_shape = tf.shape(input=actions)
batch_indices = tf.meshgrid(
*[tf.range(outer_shape[i]) for i in range(batch_dims)], indexing='ij')
batch_indices = [tf.cast(tf.expand_dims(batch_index, -1), dtype=tf.int32)
for batch_index in batch_indices]
if not multi_dim_actions:
actions = tf.expand_dims(actions, -1)
# Cast actions to tf.int32 in order to avoid a TypeError in tf.concat.
actions = tf.cast(actions, dtype=tf.int32)
action_indices = tf.concat(batch_indices + [actions], -1)
return tf.gather_nd(q_values, action_indices)
def periodically(body, period, name='periodically'):
"""Periodically performs the tensorflow op in `body`.
The body tensorflow op will be executed every `period` times the periodically
op is executed. More specifically, with `n` the number of times the op has
been executed, the body will be executed when `n` is a non zero positive
multiple of `period` (i.e. there exist an integer `k > 0` such that
`k * period == n`).
If `period` is `None`, it will not perform any op and will return a
`tf.no_op()`.
If `period` is 1, it will just execute the body, and not create any counters
or conditionals.
Args:
body: callable that returns the tensorflow op to be performed every time an
internal counter is divisible by the period. The op must have no output
(for example, a tf.group()).
period: inverse frequency with which to perform the op.
name: name of the variable_scope.
Raises:
TypeError: if body is not a callable.
Returns:
An op that periodically performs the specified op.
"""
if tf.executing_eagerly():
if isinstance(period, tf.Variable):
return Periodically(body, period, name)
return EagerPeriodically(body, period)
else:
return Periodically(body, period, name)()
class Periodically(tf.Module):
"""Periodically performs the ops defined in `body`."""
def __init__(self, body, period, name='periodically'):
"""Periodically performs the ops defined in `body`.
The body tensorflow op will be executed every `period` times the
periodically op is executed. More specifically, with `n` the number of times
the op has been executed, the body will be executed when `n` is a non zero
positive multiple of `period` (i.e. there exist an integer `k > 0` such that
`k * period == n`).
If `period` is `None`, it will not perform any op and will return a
`tf.no_op()`.
If `period` is 1, it will just execute the body, and not create any counters
or conditionals.
Args:
body: callable that returns the tensorflow op to be performed every time
an internal counter is divisible by the period. The op must have no
output (for example, a tf.group()).
period: inverse frequency with which to perform the op. It can be a Tensor
or a Variable.
name: name of the object.
Raises:
TypeError: if body is not a callable.
Returns:
An op that periodically performs the specified op.
"""
super(Periodically, self).__init__(name=name)
if not callable(body):
raise TypeError('body must be callable.')
self._body = body
self._period = period
self._counter = create_variable(self.name + '/counter', 0)
def __call__(self):
def call(strategy=None):
del strategy # unused
if self._period is None:
return tf.no_op()
if self._period == 1:
return self._body()
period = tf.cast(self._period, self._counter.dtype)
remainder = tf.math.mod(self._counter.assign_add(1), period)
return tf.cond(
pred=tf.equal(remainder, 0), true_fn=self._body, false_fn=tf.no_op)
# TODO(b/129083817) add an explicit unit test to ensure correct behavior
ctx = tf.distribute.get_replica_context()
if ctx:
return tf.distribute.get_replica_context().merge_call(call)
else:
return call()
class EagerPeriodically(object):
"""EagerPeriodically performs the ops defined in `body`.
Only works in Eager mode.
"""
def __init__(self, body, period):
"""EagerPeriodically performs the ops defined in `body`.
Args:
body: callable that returns the tensorflow op to be performed every time
an internal counter is divisible by the period. The op must have no
output (for example, a tf.group()).
period: inverse frequency with which to perform the op. Must be a simple
python int/long.
Raises:
TypeError: if body is not a callable.
Returns:
An op that periodically performs the specified op.
"""
if not callable(body):
raise TypeError('body must be callable.')
self._body = body
self._period = period
self._counter = 0
def __call__(self):
if self._period is None:
return tf.no_op()
if self._period == 1:
return self._body()
self._counter += 1
if self._counter % self._period == 0:
self._body()
def clip_to_spec(value, spec):
"""Clips value to a given bounded tensor spec.
Args:
value: (tensor) value to be clipped.
spec: (BoundedTensorSpec) spec containing min. and max. values for clipping.
Returns:
clipped_value: (tensor) `value` clipped to be compatible with `spec`.
"""
return tf.clip_by_value(value, spec.minimum, spec.maximum)
def spec_means_and_magnitudes(action_spec):
"""Get the center and magnitude of the ranges in action spec."""
action_means = tf.nest.map_structure(
lambda spec: (spec.maximum + spec.minimum) / 2.0, action_spec)
action_magnitudes = tf.nest.map_structure(
lambda spec: (spec.maximum - spec.minimum) / 2.0, action_spec)
return np.array(
action_means, dtype=np.float32), np.array(
action_magnitudes, dtype=np.float32)
def scale_to_spec(tensor, spec):
"""Shapes and scales a batch into the given spec bounds.
Args:
tensor: A [batch x n] tensor with values in the range of [-1, 1].
spec: (BoundedTensorSpec) to use for scaling the action.
Returns:
A batch scaled the given spec bounds.
"""
tensor = tf.reshape(tensor, [-1] + spec.shape.as_list())
# Scale the tensor.
means, magnitudes = spec_means_and_magnitudes(spec)
tensor = means + magnitudes * tensor
# Set type.
return tf.cast(tensor, spec.dtype)
def ornstein_uhlenbeck_process(initial_value,
damping=0.15,
stddev=0.2,
seed=None,
scope='ornstein_uhlenbeck_noise'):
"""An op for generating noise from a zero-mean Ornstein-Uhlenbeck process.
The Ornstein-Uhlenbeck process is a process that generates temporally
correlated noise via a random walk with damping. This process describes
the velocity of a particle undergoing brownian motion in the presence of
friction. This can be useful for exploration in continuous action environments
with momentum.
The temporal update equation is:
`x_next = (1 - damping) * x + N(0, std_dev)`
Args:
initial_value: Initial value of the process.
damping: The rate at which the noise trajectory is damped towards the mean.
We must have 0 <= damping <= 1, where a value of 0 gives an undamped
random walk and a value of 1 gives uncorrelated Gaussian noise. Hence in
most applications a small non-zero value is appropriate.
stddev: Standard deviation of the Gaussian component.
seed: Seed for random number generation.
scope: Scope of the variables.
Returns:
An op that generates noise.
"""
if tf.executing_eagerly():
return OUProcess(initial_value, damping, stddev, seed, scope)
else:
return OUProcess(initial_value, damping, stddev, seed, scope)()
class OUProcess(tf.Module):
"""A zero-mean Ornstein-Uhlenbeck process."""
def __init__(self,
initial_value,
damping=0.15,
stddev=0.2,
seed=None,
scope='ornstein_uhlenbeck_noise'):
"""A Class for generating noise from a zero-mean Ornstein-Uhlenbeck process.
The Ornstein-Uhlenbeck process is a process that generates temporally
correlated noise via a random walk with damping. This process describes
the velocity of a particle undergoing brownian motion in the presence of
friction. This can be useful for exploration in continuous action
environments with momentum.
The temporal update equation is:
`x_next = (1 - damping) * x + N(0, std_dev)`
Args:
initial_value: Initial value of the process.
damping: The rate at which the noise trajectory is damped towards the
mean. We must have 0 <= damping <= 1, where a value of 0 gives an
undamped random walk and a value of 1 gives uncorrelated Gaussian noise.
Hence in most applications a small non-zero value is appropriate.
stddev: Standard deviation of the Gaussian component.
seed: Seed for random number generation.
scope: Scope of the variables.
"""
super(OUProcess, self).__init__()
self._damping = damping
self._stddev = stddev
self._seed = seed
with tf.name_scope(scope):
self._x = tf.compat.v2.Variable(
initial_value=initial_value, trainable=False)
def __call__(self):
noise = tf.random.normal(
shape=self._x.shape,
stddev=self._stddev,
dtype=self._x.dtype,
seed=self._seed)
return self._x.assign((1. - self._damping) * self._x + noise)
def log_probability(distributions, actions, action_spec):
"""Computes log probability of actions given distribution.
Args:
distributions: A possibly batched tuple of distributions.
actions: A possibly batched action tuple.
action_spec: A nested tuple representing the action spec.
Returns:
A Tensor representing the log probability of each action in the batch.
"""
outer_rank = nest_utils.get_outer_rank(actions, action_spec)
def _compute_log_prob(single_distribution, single_action):
# sum log-probs over everything but the batch
single_log_prob = single_distribution.log_prob(single_action)
rank = single_log_prob.shape.rank
reduce_dims = list(range(outer_rank, rank))
return tf.reduce_sum(
input_tensor=single_log_prob,
axis=reduce_dims)
nest_utils.assert_same_structure(distributions, actions)
log_probs = [
_compute_log_prob(dist, action)
for (dist, action
) in zip(tf.nest.flatten(distributions), tf.nest.flatten(actions))
]
# sum log-probs over action tuple
total_log_probs = tf.add_n(log_probs)
return total_log_probs
# TODO(ofirnachum): Move to distribution utils.
def entropy(distributions, action_spec):
"""Computes total entropy of distribution.
Args:
distributions: A possibly batched tuple of distributions.
action_spec: A nested tuple representing the action spec.
Returns:
A Tensor representing the entropy of each distribution in the batch.
Assumes actions are independent, so that marginal entropies of each action
may be summed.
"""
nested_modes = tf.nest.map_structure(lambda d: d.mode(), distributions)
outer_rank = nest_utils.get_outer_rank(nested_modes, action_spec)
def _compute_entropy(single_distribution):
entropies = single_distribution.entropy()
# Sum entropies over everything but the batch.
rank = entropies.shape.rank
reduce_dims = list(range(outer_rank, rank))
return tf.reduce_sum(input_tensor=entropies, axis=reduce_dims)
entropies = [
_compute_entropy(dist) for dist in tf.nest.flatten(distributions)
]
# Sum entropies over action tuple.
total_entropies = tf.add_n(entropies)
return total_entropies
def discounted_future_sum(values, gamma, num_steps):
"""Discounted future sum of batch-major values.
Args:
values: A Tensor of shape [batch_size, total_steps] and dtype float32.
gamma: A float discount value.
num_steps: A positive integer number of future steps to sum.
Returns:
A Tensor of shape [batch_size, total_steps], where each entry `(i, j)` is
the result of summing the entries of values starting from
`gamma^0 * values[i, j]` to
`gamma^(num_steps - 1) * values[i, j + num_steps - 1]`,
with zeros padded to values.
For example, values=[5, 6, 7], gamma=0.9, will result in sequence:
```python
[(5 * 0.9^0 + 6 * 0.9^1 + 7 * 0.9^2), (6 * 0.9^0 + 7 * 0.9^1), 7 * 0.9^0]
```
Raises:
ValueError: If values is not of rank 2.
"""
if values.get_shape().rank != 2:
raise ValueError('Input must be rank 2 tensor. Got %d.' %
values.get_shape().rank)
(batch_size, total_steps) = values.get_shape().as_list()
num_steps = tf.minimum(num_steps, total_steps)
discount_filter = tf.reshape(gamma**tf.cast(tf.range(num_steps), tf.float32),
[-1, 1, 1])
padded_values = tf.concat([values, tf.zeros([batch_size, num_steps - 1])], 1)
convolved_values = tf.squeeze(
tf.nn.conv1d(
input=tf.expand_dims(padded_values, -1),
filters=discount_filter,
stride=1,
padding='VALID'), -1)
return convolved_values
def discounted_future_sum_masked(values, gamma, num_steps, episode_lengths):
"""Discounted future sum of batch-major values.
Args:
values: A Tensor of shape [batch_size, total_steps] and dtype float32.
gamma: A float discount value.
num_steps: A positive integer number of future steps to sum.
episode_lengths: A vector shape [batch_size] with num_steps per episode.
Returns:
A Tensor of shape [batch_size, total_steps], where each entry is the
discounted sum as in discounted_future_sum, except with values after
the end of episode_lengths masked to 0.
Raises:
ValueError: If values is not of rank 2, or if total_steps is not defined.
"""
if values.shape.rank != 2:
raise ValueError('Input must be a rank 2 tensor. Got %d.' % values.shape)
total_steps = tf.compat.dimension_value(values.shape[1])
if total_steps is None:
raise ValueError('total_steps dimension in input '
'values[batch_size, total_steps] must be fully defined.')
episode_mask = tf.cast(
tf.sequence_mask(episode_lengths, total_steps), tf.float32)
values *= episode_mask
return discounted_future_sum(values, gamma, num_steps)
def shift_values(values, gamma, num_steps, final_values=None):
"""Shifts batch-major values in time by some amount.
Args:
values: A Tensor of shape [batch_size, total_steps] and dtype float32.
gamma: A float discount value.
num_steps: A nonnegative integer amount to shift values by.
final_values: A float32 Tensor of shape [batch_size] corresponding to the
values at step num_steps + 1. Defaults to None (all zeros).
Returns:
A Tensor of shape [batch_size, total_steps], where each entry (i, j) is
gamma^num_steps * values[i, j + num_steps] if j + num_steps < total_steps;
gamma^(total_steps - j) * final_values[i] otherwise.
Raises:
ValueError: If values is not of rank 2.
"""
if values.get_shape().rank != 2:
raise ValueError('Input must be rank 2 tensor. Got %d.' %
values.get_shape().rank)
(batch_size, total_steps) = values.get_shape().as_list()
num_steps = tf.minimum(num_steps, total_steps)
if final_values is None:
final_values = tf.zeros([batch_size])
padding_exponent = tf.expand_dims(
tf.cast(tf.range(num_steps, 0, -1), tf.float32), 0)
final_pad = tf.expand_dims(final_values, 1) * gamma**padding_exponent
return tf.concat([
gamma**tf.cast(num_steps, tf.float32) * values[:, num_steps:], final_pad
], 1)
def get_episode_mask(time_steps):
"""Create a mask that is 0.0 for all final steps, 1.0 elsewhere.
Args:
time_steps: A TimeStep namedtuple representing a batch of steps.
Returns:
A float32 Tensor with 0s where step_type == LAST and 1s otherwise.
"""
episode_mask = tf.cast(
tf.not_equal(time_steps.step_type, ts.StepType.LAST), tf.float32)
return episode_mask
def get_contiguous_sub_episodes(next_time_steps_discount):
"""Computes mask on sub-episodes which includes only contiguous components.
Args:
next_time_steps_discount: Tensor of shape [batch_size, total_steps]
corresponding to environment discounts on next time steps (i.e.
next_time_steps.discount).
Returns:
A float Tensor of shape [batch_size, total_steps] specifying mask including
only contiguous components. Each row will be of the form
[1.0] * a + [0.0] * b, where a >= 1 and b >= 0, and in which the initial
sequence of ones corresponds to a contiguous sub-episode.
"""
episode_end = tf.equal(next_time_steps_discount,
tf.constant(0, dtype=next_time_steps_discount.dtype))
mask = tf.math.cumprod(
1.0 - tf.cast(episode_end, tf.float32), axis=1, exclusive=True)
return mask
def convert_q_logits_to_values(logits, support):
"""Converts a set of Q-value logits into Q-values using the provided support.
Args:
logits: A Tensor representing the Q-value logits.
support: The support of the underlying distribution.
Returns:
A Tensor containing the expected Q-values.
"""
probabilities = tf.nn.softmax(logits)
return tf.reduce_sum(input_tensor=support * probabilities, axis=-1)
def generate_tensor_summaries(tag, tensor, step):
"""Generates various summaries of `tensor` such as histogram, max, min, etc.
Args:
tag: A namescope tag for the summaries.
tensor: The tensor to generate summaries of.
step: Variable to use for summaries.
"""
with tf.name_scope(tag):
tf.compat.v2.summary.histogram(name='histogram', data=tensor, step=step)
tf.compat.v2.summary.scalar(
name='mean', data=tf.reduce_mean(input_tensor=tensor), step=step)
tf.compat.v2.summary.scalar(
name='mean_abs',
data=tf.reduce_mean(input_tensor=tf.abs(tensor)),
step=step)
tf.compat.v2.summary.scalar(
name='max', data=tf.reduce_max(input_tensor=tensor), step=step)
tf.compat.v2.summary.scalar(
name='min', data=tf.reduce_min(input_tensor=tensor), step=step)
def summarize_tensor_dict(tensor_dict: Dict[Text, types.Tensor],
step: Optional[types.Tensor]):
"""Generates summaries of all tensors in `tensor_dict`.
Args:
tensor_dict: A dictionary {name, tensor} to summarize.
step: The global step
"""
for tag in tensor_dict:
generate_tensor_summaries(tag, tensor_dict[tag], step)
# TODO(kbanoop): Support batch mode
def compute_returns(rewards, discounts):
"""Compute the return from each index in an episode.
Args:
rewards: Tensor of per-timestep reward in the episode.
discounts: Tensor of per-timestep discount factor. Should be 0 for final
step of each episode.
Returns:
Tensor of per-timestep cumulative returns.
"""
rewards.shape.assert_is_compatible_with(discounts.shape)
if (not rewards.shape.is_fully_defined() or
not discounts.shape.is_fully_defined()):
check_shape = tf.compat.v1.assert_equal(
tf.shape(input=rewards), tf.shape(input=discounts))
else:
check_shape = tf.no_op()
with tf.control_dependencies([check_shape]):
# Reverse the rewards and discounting for accumulation.
rewards, discounts = tf.reverse(rewards, [0]), tf.reverse(discounts, [0])
def discounted_accumulate_rewards(next_step_return, reward_and_discount):
reward, discount = reward_and_discount
return next_step_return * discount + reward
# Cumulatively sum discounted reward R_t.
# R_t = r_t + discount * (r_t+1 + discount * (r_t+2 * discount( ...
# As discount is 0 for terminal states, ends of episode will not include
# reward from subsequent timesteps.
returns = tf.scan(
discounted_accumulate_rewards, [rewards, discounts],
initializer=tf.constant(0, dtype=discounts.dtype))
returns = tf.reverse(returns, [0])
return returns
def initialize_uninitialized_variables(session, var_list=None):
"""Initialize any pending variables that are uninitialized."""
if var_list is None:
var_list = tf.compat.v1.global_variables() + tf.compat.v1.local_variables()
is_initialized = session.run(
[tf.compat.v1.is_variable_initialized(v) for v in var_list])
uninitialized_vars = []
for flag, v in zip(is_initialized, var_list):
if not flag:
uninitialized_vars.append(v)
if uninitialized_vars:
logging.info('uninitialized_vars: %s',
', '.join([str(x) for x in uninitialized_vars]))
session.run(tf.compat.v1.variables_initializer(uninitialized_vars))
class Checkpointer(object):
"""Checkpoints training state, policy state, and replay_buffer state."""
def __init__(self, ckpt_dir, max_to_keep=20, **kwargs):
"""A class for making checkpoints.
If ckpt_dir doesn't exists it creates it.
Args:
ckpt_dir: The directory to save checkpoints.
max_to_keep: Maximum number of checkpoints to keep (if greater than the
max are saved, the oldest checkpoints are deleted).
**kwargs: Items to include in the checkpoint.
"""
self._checkpoint = tf.train.Checkpoint(**kwargs)
if not tf.io.gfile.exists(ckpt_dir):
tf.io.gfile.makedirs(ckpt_dir)
self._manager = tf.train.CheckpointManager(
self._checkpoint, directory=ckpt_dir, max_to_keep=max_to_keep)
if self._manager.latest_checkpoint is not None:
logging.info('Checkpoint available: %s', self._manager.latest_checkpoint)
self._checkpoint_exists = True
else:
logging.info('No checkpoint available at %s', ckpt_dir)
self._checkpoint_exists = False
self._load_status = self._checkpoint.restore(
self._manager.latest_checkpoint)
@property
def checkpoint_exists(self):
return self._checkpoint_exists
@property
def manager(self):
"""Returns the underlying tf.train.CheckpointManager."""
return self._manager
def initialize_or_restore(self, session=None):
"""Initialize or restore graph (based on checkpoint if exists)."""
self._load_status.initialize_or_restore(session)
return self._load_status
def save(self, global_step: tf.Tensor,
options: tf.train.CheckpointOptions = None):
"""Save state to checkpoint."""
saved_checkpoint = self._manager.save(
checkpoint_number=global_step, options=options)
self._checkpoint_exists = True
logging.info('%s', 'Saved checkpoint: {}'.format(saved_checkpoint))
def replicate(tensor, outer_shape):
"""Replicates a tensor so as to match the given outer shape.
Example:
- t = [[1, 2, 3], [4, 5, 6]] (shape = [2, 3])
- outer_shape = [2, 1]
The shape of the resulting tensor is: [2, 1, 2, 3]
and its content is: [[t], [t]]
Args:
tensor: A tf.Tensor.
outer_shape: Outer shape given as a 1D tensor of type list, numpy or
tf.Tensor.
Returns:
The replicated tensor.
Raises:
ValueError: when the outer shape is incorrect.
"""
outer_shape = tf.convert_to_tensor(value=outer_shape)
if len(outer_shape.shape) != 1:
raise ValueError('The outer shape must be a 1D tensor')
outer_ndims = int(outer_shape.shape[0])
tensor_ndims = len(tensor.shape)
# No need to replicate anything if there is no outer dim to add.
if outer_ndims == 0:
return tensor
# Calculate target shape of replicated tensor
target_shape = tf.concat([outer_shape, tf.shape(input=tensor)], axis=0)
# tf.tile expects `tensor` to be at least 1D
if tensor_ndims == 0:
tensor = tensor[None]
# Replicate tensor "t" along the 1st dimension.
tiled_tensor = tf.tile(tensor, [tf.reduce_prod(input_tensor=outer_shape)] +
[1] * (tensor_ndims - 1))
# Reshape to match outer_shape.
return tf.reshape(tiled_tensor, target_shape)
def assert_members_are_not_overridden(base_cls,
instance,
allowlist=(),
denylist=()):
"""Asserts public members of `base_cls` are not overridden in `instance`.
If both `allowlist` and `denylist` are empty, no public member of
`base_cls` can be overridden. If a `allowlist` is provided, only public
members in `allowlist` can be overridden. If a `denylist` is provided,
all public members except those in `denylist` can be overridden. Both
`allowlist` and `denylist` cannot be provided at the same, if so a
ValueError will be raised.
Args:
base_cls: A Base class.
instance: An instance of a subclass of `base_cls`.
allowlist: Optional list of `base_cls` members that can be overridden.
denylist: Optional list of `base_cls` members that cannot be overridden.
Raises:
ValueError if both allowlist and denylist are provided.
"""
if denylist and allowlist:
raise ValueError('Both `denylist` and `allowlist` cannot be provided.')
instance_type = type(instance)
subclass_members = set(instance_type.__dict__.keys())
public_members = set(
[m for m in base_cls.__dict__.keys() if not m.startswith('_')])
common_members = public_members & subclass_members
if allowlist:
common_members = common_members - set(allowlist)
elif denylist:
common_members = common_members & set(denylist)
overridden_members = [
m for m in common_members
if base_cls.__dict__[m] != instance_type.__dict__[m]
]
if overridden_members:
raise ValueError(
'Subclasses of {} cannot override most of its base members, but '
'{} overrides: {}'.format(base_cls, instance_type, overridden_members))
def element_wise_squared_loss(x, y):
return tf.compat.v1.losses.mean_squared_error(
x, y, reduction=tf.compat.v1.losses.Reduction.NONE)
def element_wise_huber_loss(x, y):
return tf.compat.v1.losses.huber_loss(
x, y, reduction=tf.compat.v1.losses.Reduction.NONE)
def transpose_batch_time(x):
"""Transposes the batch and time dimensions of a Tensor.
If the input tensor has rank < 2 it returns the original tensor. Retains as
much of the static shape information as possible.
Args:
x: A Tensor.
Returns:
x transposed along the first two dimensions.
"""
x_static_shape = x.get_shape()
if x_static_shape.rank is not None and x_static_shape.rank < 2:
return x
x_rank = tf.rank(x)
x_t = tf.transpose(a=x, perm=tf.concat(([1, 0], tf.range(2, x_rank)), axis=0))
x_t.set_shape(
tf.TensorShape(
[x_static_shape.dims[1].value,
x_static_shape.dims[0].value]).concatenate(x_static_shape[2:]))
return x_t
def save_spec(spec, file_path):
"""Saves the given spec nest as a StructProto.
**Note**: Currently this will convert BoundedTensorSpecs into regular
TensorSpecs.
Args:
spec: A nested structure of TensorSpecs.
file_path: Path to save the encoded spec to.
"""
signature_encoder = nested_structure_coder.StructureCoder()
spec = tensor_spec.from_spec(spec)
spec_proto = signature_encoder.encode_structure(spec)
dir_path = os.path.dirname(file_path)
if not tf.io.gfile.exists(dir_path):
tf.io.gfile.makedirs(dir_path)
with tf.compat.v2.io.gfile.GFile(file_path, 'wb') as gfile:
gfile.write(spec_proto.SerializeToString())
def load_spec(file_path):
"""Loads a data spec from a file.
**Note**: Types for Named tuple classes will not match. Users need to convert
to these manually:
# Convert from:
# 'tensorflow.python.saved_model.nested_structure_coder.Trajectory'
# to proper TrajectorySpec.
# trajectory_spec = trajectory.Trajectory(*spec)
Args:
file_path: Path to the saved data spec.
Returns:
A nested structure of TensorSpecs.
"""
with tf.compat.v2.io.gfile.GFile(file_path, 'rb') as gfile:
signature_proto = struct_pb2.StructuredValue.FromString(gfile.read())
signature_encoder = nested_structure_coder.StructureCoder()
return signature_encoder.decode_proto(signature_proto)
def extract_shared_variables(variables_1, variables_2):
"""Separates shared variables from the given collections.
Args:
variables_1: An iterable of Variables
variables_2: An iterable of Variables
Returns:
A Tuple of ObjectIdentitySets described by the set operations
```
(variables_1 - variables_2,
variables_2 - variables_1,
variables_1 & variables_2)
```
"""
var_refs1 = object_identity.ObjectIdentitySet(variables_1)
var_refs2 = object_identity.ObjectIdentitySet(variables_2)
shared_vars = var_refs1.intersection(var_refs2)
return (var_refs1.difference(shared_vars), var_refs2.difference(shared_vars),
shared_vars)
def check_no_shared_variables(network_1, network_2):
"""Checks that there are no shared trainable variables in the two networks.
Args:
network_1: A network.Network.
network_2: A network.Network.
Raises:
ValueError: if there are any common trainable variables.
ValueError: if one of the networks has not yet been built
(e.g. user must call `create_variables`).
"""
variables_1 = object_identity.ObjectIdentitySet(network_1.trainable_variables)
variables_2 = object_identity.ObjectIdentitySet(network_2.trainable_variables)
shared_variables = variables_1 & variables_2
if shared_variables:
raise ValueError(
'After making a copy of network \'{}\' to create a target '
'network \'{}\', the target network shares weights with '
'the original network. This is not allowed. If '
'you want explicitly share weights with the target network, or '
'if your input network shares weights with others, please '
'provide a target network which explicitly, selectively, shares '
'layers/weights with the input network. If you are not intending to '
'share weights make sure all the weights are created inside the Network'
' since a copy will be created by creating a new Network with the same '
'args but a new name. Shared variables found: '
'\'{}\'.'.format(
network_1.name, network_2.name,
[x.name for x in shared_variables]))
def check_matching_networks(network_1, network_2):
"""Check that two networks have matching input specs and variables.
Args:
network_1: A network.Network.
network_2: A network.Network.
Raises:
ValueError: if the networks differ in input_spec, variables (number, dtype,
or shape).
ValueError: if either of the networks has not been built yet
(e.g. user must call `create_variables`).
"""
if network_1.input_tensor_spec != network_2.input_tensor_spec:
raise ValueError('Input tensor specs of network and target network '
'do not match: {} vs. {}.'.format(
network_1.input_tensor_spec,
network_2.input_tensor_spec))
if len(network_1.variables) != len(network_2.variables):
raise ValueError(
'Variables lengths do not match between Q network and target network: '
'{} vs. {}'.format(network_1.variables, network_2.variables))
for v1, v2 in zip(network_1.variables, network_2.variables):
if v1.dtype != v2.dtype or v1.shape != v2.shape:
raise ValueError(
'Variable dtypes or shapes do not match: {} vs. {}'.format(v1, v2))
def maybe_copy_target_network_with_checks(network, target_network=None,
name=None,
input_spec=None):
"""Copies the network into target if None and checks for shared variables."""
if target_network is None:
target_network = network.copy(name=name)
target_network.create_variables(input_spec)
# Copy may have been shallow, and variables may inadvertently be shared
# between the target and the original networks. This would be an unusual
# setup, so we throw an error to protect users from accidentally doing so.
# If you explicitly want this to be enabled, please open a feature request
# with the team.
check_no_shared_variables(network, target_network)
check_matching_networks(network, target_network)
return target_network
AggregatedLosses = cs.namedtuple(
'AggregatedLosses',
['total_loss', # Total loss = weighted + regularization
'weighted', # Weighted sum of per_example_loss by sample_weight.
'regularization', # Total of regularization losses.
])
def aggregate_losses(per_example_loss=None,
sample_weight=None,
global_batch_size=None,
regularization_loss=None):
"""Aggregates and scales per example loss and regularization losses.
If `global_batch_size` is given it would be used for scaling, otherwise it
would use the batch_dim of per_example_loss and number of replicas.
Args:
per_example_loss: Per-example loss [B] or [B, T, ...].
sample_weight: Optional weighting for each example, Tensor shaped [B] or
[B, T, ...], or a scalar float.
global_batch_size: Optional global batch size value. Defaults to (size of
first dimension of `losses`) * (number of replicas).
regularization_loss: Regularization loss.
Returns:
An AggregatedLosses named tuple with scalar losses to optimize.
"""
total_loss, weighted_loss, reg_loss = None, None, None
if sample_weight is not None and not isinstance(sample_weight, tf.Tensor):
sample_weight = tf.convert_to_tensor(sample_weight, dtype=tf.float32)
# Compute loss that is scaled by global batch size.
if per_example_loss is not None:
loss_rank = per_example_loss.shape.rank
if sample_weight is not None:
weight_rank = sample_weight.shape.rank
# Expand `sample_weight` to be broadcastable to the shape of
# `per_example_loss`, to ensure that multiplication works properly.
if weight_rank > 0 and loss_rank > weight_rank:
for dim in range(weight_rank, loss_rank):
sample_weight = tf.expand_dims(sample_weight, dim)
# Sometimes we have an episode boundary or similar, and at this location
# the loss is nonsensical (i.e., inf or nan); and sample_weight is zero.
# In this case, we should respect the zero sample_weight and ignore the
# frame.
per_example_loss = tf.math.multiply_no_nan(
per_example_loss, sample_weight)
if loss_rank is not None and loss_rank == 0:
err_msg = (
'Need to use a loss function that computes losses per sample, ex: '
'replace losses.mean_squared_error with tf.math.squared_difference. '
'Invalid value passed for `per_example_loss`. Expected a tensor '
'tensor with at least rank 1, received: {}'.format(per_example_loss))
if tf.distribute.has_strategy():
raise ValueError(err_msg)
else:
logging.warning(err_msg)
# Add extra dimension to prevent error in compute_average_loss.
per_example_loss = tf.expand_dims(per_example_loss, 0)
elif loss_rank > 1:
# If per_example_loss is shaped [B, T, ...], we need to compute the mean
# across the extra dimensions, ex. time, as well.
per_example_loss = tf.reduce_mean(per_example_loss, range(1, loss_rank))
global_batch_size = global_batch_size and tf.cast(global_batch_size,
per_example_loss.dtype)
weighted_loss = tf.nn.compute_average_loss(
per_example_loss,
global_batch_size=global_batch_size)
total_loss = weighted_loss
# Add scaled regularization losses.
if regularization_loss is not None:
reg_loss = tf.nn.scale_regularization_loss(regularization_loss)
if total_loss is None:
total_loss = reg_loss
else:
total_loss += reg_loss
return AggregatedLosses(total_loss, weighted_loss, reg_loss)
def summarize_scalar_dict(name_data, step, name_scope='Losses/'):
if name_data:
with tf.name_scope(name_scope):
for name, data in name_data.items():
if data is not None:
tf.compat.v2.summary.scalar(
name=name, data=data, step=step)
@contextlib.contextmanager
def soft_device_placement():
"""Context manager for soft device placement, allowing summaries on CPU.
Eager and graph contexts have different default device placements. See
b/148408921 for details. This context manager should be used whenever using
summary writers contexts to make sure summaries work when executing on TPUs.
Yields:
Sets `tf.config.set_soft_device_placement(True)` within the context
"""
original_setting = tf.config.get_soft_device_placement()
try:
tf.config.set_soft_device_placement(True)
yield
finally:
tf.config.set_soft_device_placement(original_setting)
def deduped_network_variables(network, *args):
"""Returns a list of variables in net1 that are not in any other nets.
Args:
network: A Keras network.
*args: other networks to check for duplicate variables.
"""
other_vars = object_identity.ObjectIdentitySet(
[v for n in args for v in n.variables]) # pylint:disable=g-complex-comprehension
return [v for v in network.variables if v not in other_vars]
def safe_has_state(state):
"""Safely checks `state not in (None, (), [])`."""
# TODO(b/158804957): tf.function changes "s in ((),)" to a tensor bool expr.
# pylint: disable=literal-comparison
return state is not None and state is not () and state is not []
# pylint: enable=literal-comparison
|
the-stack_0_27574
|
from concurrent.futures import ThreadPoolExecutor
import numpy as np
from scipy.sparse import csr_matrix, vstack
def parallel_argsort(matrix: np.ndarray, n_jobs: int = 4) -> np.ndarray:
"""
Parallel argsort, i.e. batched processing of matrix where each batch
is processed in parallel.
Args:
matrix: matrix to sort along last axis.
n_jobs: number of workers
Returns:
indexes of elements in a sorted array that they have in original one
"""
def task(batch):
return np.argsort(batch, axis=-1)[:, ::-1]
sorted_ids = np.zeros(matrix.shape, dtype=int)
batch_size = int(np.ceil(matrix.shape[0] / n_jobs))
with ThreadPoolExecutor(max_workers=n_jobs) as pool:
res_iter = pool.map(
task, [matrix[i * batch_size : (i + 1) * batch_size] for i in range(n_jobs)]
)
for i, ids in enumerate(res_iter):
s, e = i * batch_size, (i + 1) * batch_size
sorted_ids[s:e] = ids
return sorted_ids
def fast_np_sparse_batch_combine_two_dists(
batch_fwd_dist: np.ndarray, batch_bwd_dist: np.ndarray
) -> np.ndarray:
"""
Performs parallel combination of two distributions coming from backward and forward passes.
Used to combine forward and backward passes of recurrent neural networks.
Args:
batch_fwd_dist: distribution coming from the forward pass.
batch_bwd_dist: distribution coming from the backward pass.
Returns:
`numpy.ndarray` - combination of distributions.
"""
vs = batch_fwd_dist.shape[-1]
q_sparse = csr_matrix(
(np.logspace(0.1, 100, num=vs, base=1.057)[::-1], (range(vs), range(vs))),
shape=(vs, vs),
)
fwd_sorted_ids = parallel_argsort(batch_fwd_dist, n_jobs=20)
bwd_sorted_ids = parallel_argsort(batch_bwd_dist, n_jobs=20)
matrices = []
for sample_num, (fwd_ids, bwd_ids) in enumerate(
zip(fwd_sorted_ids, bwd_sorted_ids)
):
rows = np.hstack([fwd_ids, bwd_ids])
cols = np.hstack([np.arange(vs), np.arange(vs)])
sparse_matrix = csr_matrix(
(np.ones(2 * vs, dtype=bool), (rows, cols)), shape=(vs, vs)
).astype(np.int8)
matrices.append(sparse_matrix)
big_sparse_matrix = vstack(matrices)
batch_logits = (big_sparse_matrix * q_sparse).max(axis=-1).toarray()
return batch_logits.reshape(batch_fwd_dist.shape)
|
the-stack_0_27576
|
# 2019-11-12 01:01:32(JST)
import sys
# import collections
# import math
# from string import ascii_lowercase, ascii_uppercase, digits
# from bisect import bisect_left as bi_l, bisect_right as bi_r
# import itertools
# from functools import reduce
# import operator as op
# from scipy.misc import comb # float
# import numpy as np
def main():
n, *h = [int(x) for x in sys.stdin.read().split()]
mountain = []
valley = []
flat = 0
i = 0
while True:
if h[i] == h[i+1]:
i += 1
continue
elif h[i] < h[i+1]:
break
else:
mountain.append(h[i])
break
i = -1
while True:
if h[i-1] == h[i]:
i -= 1
continue
elif h[i-1] > h[i]:
break
else:
mountain.append(h[i])
break
for i in range(1, n-1):
if flat == 0:
if h[i-1] < h[i] > h[i+1]:
mountain.append(h[i])
elif h[i-1] > h[i] < h[i+1]:
valley.append(h[i])
elif h[i-1] < h[i] == h[i+1]:
flat = 1
elif h[i-1] > h[i] == h[i+1]:
flat = -1
else:
if flat == 1:
if h[i] > h[i+1]:
mountain.append(h[i])
flat = 0
elif h[i] < h[i+1]:
flat = 0
else:
continue
elif flat == -1:
if h[i] < h[i+1]:
valley.append(h[i])
flat = 0
elif h[i] > h[h+1]:
flat = 0
else:
continue
ans = sum(mountain) - sum(valley)
print(ans)
if __name__ == "__main__":
main()
|
the-stack_0_27577
|
import scrapy
import json
import pickle
import os
import ast
from urllib import parse
from scrapy.selector import Selector
class HainanSpider(scrapy.Spider):
name = "Hainan"
if not os.path.exists("../../data/HTML_pk/%s" % name):
os.makedirs("../../data/HTML_pk/%s" % name)
if not os.path.exists("../../data/text/%s" % name):
os.makedirs("../../data/text/%s" % name)
def start_requests(self):
total_page = 486
# total_page = 3
url_base = "http://www.hainan.gov.cn/u/search/wjk/rs?keywords=&docYear=&docName=&fwzh=&column=undefined&curPage={0}&PageSize=15"
for i in range(total_page):
yield scrapy.Request(url=url_base.format(i + 1), callback=self.parse)
def parse(self, response):
detail_page_links = []
for item in json.loads(response.text)["page"]["list"]:
UID = item["url"].split("/")[-1].split(".")[0]
item["date"] = None
item["url"] = response.urljoin(item["url"])
item["UID"] = UID
date = item["pubDate"]
if date and len(date) > 10:
date = date[:10]
item["date"] = date
item["FileNumber"] = item["c_wjbh"]
if "?" not in UID:
detail_page_links.append(item["url"])
item["crawl state"] = "half"
item["text length"] = 0
yield item
yield from response.follow_all(detail_page_links, callback=self.parse_content)
def parse_content(self, response):
UID = response.url.split("/")[-1].split(".")[0]
paragraph_list = response.css("div#zoom p *::text").getall()
attachment_link = response.css("div#zoom p a::attr(href)").getall()
if len(paragraph_list) == 0:
paragraph_list = response.css("table p *::text").getall()
if len(paragraph_list) == 0:
paragraph_list = response.css("p *::text").getall()
length = len("".join(paragraph_list))
if length > 0:
with open("../../data/text/%s/%s.txt" % (self.name, UID), "w") as f:
f.write("\n".join(paragraph_list))
with open("../../data/HTML_pk/%s/%s.pkl" % (self.name, UID), "wb") as f:
pickle.dump(response.text, f)
state = "full"
else:
state = "empty"
return {
"UID": UID,
"mainText": paragraph_list,
"attachment_link": attachment_link,
"crawl state": state,
"text length": length,
}
|
the-stack_0_27578
|
from collections import OrderedDict
from datetime import datetime, timedelta
import numpy as np
import numpy.ma as ma
import pytest
from pandas._libs import lib
from pandas._libs.tslib import iNaT
from pandas.core.dtypes.common import is_categorical_dtype, is_datetime64tz_dtype
from pandas.core.dtypes.dtypes import CategoricalDtype
import pandas as pd
from pandas import (
Categorical,
DataFrame,
Index,
IntervalIndex,
MultiIndex,
NaT,
Series,
Timestamp,
date_range,
isna,
period_range,
timedelta_range,
)
import pandas._testing as tm
from pandas.core.arrays import IntervalArray, period_array
class TestSeriesConstructors:
@pytest.mark.parametrize(
"constructor,check_index_type",
[
# NOTE: some overlap with test_constructor_empty but that test does not
# test for None or an empty generator.
# test_constructor_pass_none tests None but only with the index also
# passed.
(lambda: Series(), True),
(lambda: Series(None), True),
(lambda: Series({}), True),
(lambda: Series(()), False), # creates a RangeIndex
(lambda: Series([]), False), # creates a RangeIndex
(lambda: Series((_ for _ in [])), False), # creates a RangeIndex
(lambda: Series(data=None), True),
(lambda: Series(data={}), True),
(lambda: Series(data=()), False), # creates a RangeIndex
(lambda: Series(data=[]), False), # creates a RangeIndex
(lambda: Series(data=(_ for _ in [])), False), # creates a RangeIndex
],
)
def test_empty_constructor(self, constructor, check_index_type):
with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
expected = Series()
result = constructor()
assert len(result.index) == 0
tm.assert_series_equal(result, expected, check_index_type=check_index_type)
def test_invalid_dtype(self):
# GH15520
msg = "not understood"
invalid_list = [pd.Timestamp, "pd.Timestamp", list]
for dtype in invalid_list:
with pytest.raises(TypeError, match=msg):
Series([], name="time", dtype=dtype)
def test_invalid_compound_dtype(self):
# GH#13296
c_dtype = np.dtype([("a", "i8"), ("b", "f4")])
cdt_arr = np.array([(1, 0.4), (256, -13)], dtype=c_dtype)
with pytest.raises(ValueError, match="Use DataFrame instead"):
Series(cdt_arr, index=["A", "B"])
def test_scalar_conversion(self):
# Pass in scalar is disabled
scalar = Series(0.5)
assert not isinstance(scalar, float)
# Coercion
assert float(Series([1.0])) == 1.0
assert int(Series([1.0])) == 1
def test_constructor(self, datetime_series):
with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
empty_series = Series()
assert datetime_series.index.is_all_dates
# Pass in Series
derived = Series(datetime_series)
assert derived.index.is_all_dates
assert tm.equalContents(derived.index, datetime_series.index)
# Ensure new index is not created
assert id(datetime_series.index) == id(derived.index)
# Mixed type Series
mixed = Series(["hello", np.NaN], index=[0, 1])
assert mixed.dtype == np.object_
assert mixed[1] is np.NaN
assert not empty_series.index.is_all_dates
with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
assert not Series().index.is_all_dates
# exception raised is of type Exception
with pytest.raises(Exception, match="Data must be 1-dimensional"):
Series(np.random.randn(3, 3), index=np.arange(3))
mixed.name = "Series"
rs = Series(mixed).name
xp = "Series"
assert rs == xp
# raise on MultiIndex GH4187
m = MultiIndex.from_arrays([[1, 2], [3, 4]])
msg = "initializing a Series from a MultiIndex is not supported"
with pytest.raises(NotImplementedError, match=msg):
Series(m)
@pytest.mark.parametrize("input_class", [list, dict, OrderedDict])
def test_constructor_empty(self, input_class):
with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
empty = Series()
empty2 = Series(input_class())
# these are Index() and RangeIndex() which don't compare type equal
# but are just .equals
tm.assert_series_equal(empty, empty2, check_index_type=False)
# With explicit dtype:
empty = Series(dtype="float64")
empty2 = Series(input_class(), dtype="float64")
tm.assert_series_equal(empty, empty2, check_index_type=False)
# GH 18515 : with dtype=category:
empty = Series(dtype="category")
empty2 = Series(input_class(), dtype="category")
tm.assert_series_equal(empty, empty2, check_index_type=False)
if input_class is not list:
# With index:
with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
empty = Series(index=range(10))
empty2 = Series(input_class(), index=range(10))
tm.assert_series_equal(empty, empty2)
# With index and dtype float64:
empty = Series(np.nan, index=range(10))
empty2 = Series(input_class(), index=range(10), dtype="float64")
tm.assert_series_equal(empty, empty2)
# GH 19853 : with empty string, index and dtype str
empty = Series("", dtype=str, index=range(3))
empty2 = Series("", index=range(3))
tm.assert_series_equal(empty, empty2)
@pytest.mark.parametrize("input_arg", [np.nan, float("nan")])
def test_constructor_nan(self, input_arg):
empty = Series(dtype="float64", index=range(10))
empty2 = Series(input_arg, index=range(10))
tm.assert_series_equal(empty, empty2, check_index_type=False)
@pytest.mark.parametrize(
"dtype",
["f8", "i8", "M8[ns]", "m8[ns]", "category", "object", "datetime64[ns, UTC]"],
)
@pytest.mark.parametrize("index", [None, pd.Index([])])
def test_constructor_dtype_only(self, dtype, index):
# GH-20865
result = pd.Series(dtype=dtype, index=index)
assert result.dtype == dtype
assert len(result) == 0
def test_constructor_no_data_index_order(self):
with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
result = pd.Series(index=["b", "a", "c"])
assert result.index.tolist() == ["b", "a", "c"]
def test_constructor_no_data_string_type(self):
# GH 22477
result = pd.Series(index=[1], dtype=str)
assert np.isnan(result.iloc[0])
@pytest.mark.parametrize("item", ["entry", "ѐ", 13])
def test_constructor_string_element_string_type(self, item):
# GH 22477
result = pd.Series(item, index=[1], dtype=str)
assert result.iloc[0] == str(item)
def test_constructor_dtype_str_na_values(self, string_dtype):
# https://github.com/pandas-dev/pandas/issues/21083
ser = Series(["x", None], dtype=string_dtype)
result = ser.isna()
expected = Series([False, True])
tm.assert_series_equal(result, expected)
assert ser.iloc[1] is None
ser = Series(["x", np.nan], dtype=string_dtype)
assert np.isnan(ser.iloc[1])
def test_constructor_series(self):
index1 = ["d", "b", "a", "c"]
index2 = sorted(index1)
s1 = Series([4, 7, -5, 3], index=index1)
s2 = Series(s1, index=index2)
tm.assert_series_equal(s2, s1.sort_index())
def test_constructor_iterable(self):
# GH 21987
class Iter:
def __iter__(self):
for i in range(10):
yield i
expected = Series(list(range(10)), dtype="int64")
result = Series(Iter(), dtype="int64")
tm.assert_series_equal(result, expected)
def test_constructor_sequence(self):
# GH 21987
expected = Series(list(range(10)), dtype="int64")
result = Series(range(10), dtype="int64")
tm.assert_series_equal(result, expected)
def test_constructor_single_str(self):
# GH 21987
expected = Series(["abc"])
result = Series("abc")
tm.assert_series_equal(result, expected)
def test_constructor_list_like(self):
# make sure that we are coercing different
# list-likes to standard dtypes and not
# platform specific
expected = Series([1, 2, 3], dtype="int64")
for obj in [[1, 2, 3], (1, 2, 3), np.array([1, 2, 3], dtype="int64")]:
result = Series(obj, index=[0, 1, 2])
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("dtype", ["bool", "int32", "int64", "float64"])
def test_constructor_index_dtype(self, dtype):
# GH 17088
s = Series(Index([0, 2, 4]), dtype=dtype)
assert s.dtype == dtype
@pytest.mark.parametrize(
"input_vals",
[
([1, 2]),
(["1", "2"]),
(list(pd.date_range("1/1/2011", periods=2, freq="H"))),
(list(pd.date_range("1/1/2011", periods=2, freq="H", tz="US/Eastern"))),
([pd.Interval(left=0, right=5)]),
],
)
def test_constructor_list_str(self, input_vals, string_dtype):
# GH 16605
# Ensure that data elements from a list are converted to strings
# when dtype is str, 'str', or 'U'
result = Series(input_vals, dtype=string_dtype)
expected = Series(input_vals).astype(string_dtype)
tm.assert_series_equal(result, expected)
def test_constructor_list_str_na(self, string_dtype):
result = Series([1.0, 2.0, np.nan], dtype=string_dtype)
expected = Series(["1.0", "2.0", np.nan], dtype=object)
tm.assert_series_equal(result, expected)
assert np.isnan(result[2])
def test_constructor_generator(self):
gen = (i for i in range(10))
result = Series(gen)
exp = Series(range(10))
tm.assert_series_equal(result, exp)
gen = (i for i in range(10))
result = Series(gen, index=range(10, 20))
exp.index = range(10, 20)
tm.assert_series_equal(result, exp)
def test_constructor_map(self):
# GH8909
m = map(lambda x: x, range(10))
result = Series(m)
exp = Series(range(10))
tm.assert_series_equal(result, exp)
m = map(lambda x: x, range(10))
result = Series(m, index=range(10, 20))
exp.index = range(10, 20)
tm.assert_series_equal(result, exp)
def test_constructor_categorical(self):
cat = pd.Categorical([0, 1, 2, 0, 1, 2], ["a", "b", "c"], fastpath=True)
res = Series(cat)
tm.assert_categorical_equal(res.values, cat)
# can cast to a new dtype
result = Series(pd.Categorical([1, 2, 3]), dtype="int64")
expected = pd.Series([1, 2, 3], dtype="int64")
tm.assert_series_equal(result, expected)
# GH12574
cat = Series(pd.Categorical([1, 2, 3]), dtype="category")
assert is_categorical_dtype(cat)
assert is_categorical_dtype(cat.dtype)
s = Series([1, 2, 3], dtype="category")
assert is_categorical_dtype(s)
assert is_categorical_dtype(s.dtype)
def test_constructor_categorical_with_coercion(self):
factor = Categorical(["a", "b", "b", "a", "a", "c", "c", "c"])
# test basic creation / coercion of categoricals
s = Series(factor, name="A")
assert s.dtype == "category"
assert len(s) == len(factor)
str(s.values)
str(s)
# in a frame
df = DataFrame({"A": factor})
result = df["A"]
tm.assert_series_equal(result, s)
result = df.iloc[:, 0]
tm.assert_series_equal(result, s)
assert len(df) == len(factor)
str(df.values)
str(df)
df = DataFrame({"A": s})
result = df["A"]
tm.assert_series_equal(result, s)
assert len(df) == len(factor)
str(df.values)
str(df)
# multiples
df = DataFrame({"A": s, "B": s, "C": 1})
result1 = df["A"]
result2 = df["B"]
tm.assert_series_equal(result1, s)
tm.assert_series_equal(result2, s, check_names=False)
assert result2.name == "B"
assert len(df) == len(factor)
str(df.values)
str(df)
# GH8623
x = DataFrame(
[[1, "John P. Doe"], [2, "Jane Dove"], [1, "John P. Doe"]],
columns=["person_id", "person_name"],
)
x["person_name"] = Categorical(x.person_name) # doing this breaks transform
expected = x.iloc[0].person_name
result = x.person_name.iloc[0]
assert result == expected
result = x.person_name[0]
assert result == expected
result = x.person_name.loc[0]
assert result == expected
def test_constructor_categorical_dtype(self):
result = pd.Series(
["a", "b"], dtype=CategoricalDtype(["a", "b", "c"], ordered=True)
)
assert is_categorical_dtype(result) is True
tm.assert_index_equal(result.cat.categories, pd.Index(["a", "b", "c"]))
assert result.cat.ordered
result = pd.Series(["a", "b"], dtype=CategoricalDtype(["b", "a"]))
assert is_categorical_dtype(result)
tm.assert_index_equal(result.cat.categories, pd.Index(["b", "a"]))
assert result.cat.ordered is False
# GH 19565 - Check broadcasting of scalar with Categorical dtype
result = Series(
"a", index=[0, 1], dtype=CategoricalDtype(["a", "b"], ordered=True)
)
expected = Series(
["a", "a"], index=[0, 1], dtype=CategoricalDtype(["a", "b"], ordered=True)
)
tm.assert_series_equal(result, expected, check_categorical=True)
def test_constructor_categorical_string(self):
# GH 26336: the string 'category' maintains existing CategoricalDtype
cdt = CategoricalDtype(categories=list("dabc"), ordered=True)
expected = Series(list("abcabc"), dtype=cdt)
# Series(Categorical, dtype='category') keeps existing dtype
cat = Categorical(list("abcabc"), dtype=cdt)
result = Series(cat, dtype="category")
tm.assert_series_equal(result, expected)
# Series(Series[Categorical], dtype='category') keeps existing dtype
result = Series(result, dtype="category")
tm.assert_series_equal(result, expected)
def test_categorical_sideeffects_free(self):
# Passing a categorical to a Series and then changing values in either
# the series or the categorical should not change the values in the
# other one, IF you specify copy!
cat = Categorical(["a", "b", "c", "a"])
s = Series(cat, copy=True)
assert s.cat is not cat
s.cat.categories = [1, 2, 3]
exp_s = np.array([1, 2, 3, 1], dtype=np.int64)
exp_cat = np.array(["a", "b", "c", "a"], dtype=np.object_)
tm.assert_numpy_array_equal(s.__array__(), exp_s)
tm.assert_numpy_array_equal(cat.__array__(), exp_cat)
# setting
s[0] = 2
exp_s2 = np.array([2, 2, 3, 1], dtype=np.int64)
tm.assert_numpy_array_equal(s.__array__(), exp_s2)
tm.assert_numpy_array_equal(cat.__array__(), exp_cat)
# however, copy is False by default
# so this WILL change values
cat = Categorical(["a", "b", "c", "a"])
s = Series(cat)
assert s.values is cat
s.cat.categories = [1, 2, 3]
exp_s = np.array([1, 2, 3, 1], dtype=np.int64)
tm.assert_numpy_array_equal(s.__array__(), exp_s)
tm.assert_numpy_array_equal(cat.__array__(), exp_s)
s[0] = 2
exp_s2 = np.array([2, 2, 3, 1], dtype=np.int64)
tm.assert_numpy_array_equal(s.__array__(), exp_s2)
tm.assert_numpy_array_equal(cat.__array__(), exp_s2)
def test_unordered_compare_equal(self):
left = pd.Series(["a", "b", "c"], dtype=CategoricalDtype(["a", "b"]))
right = pd.Series(pd.Categorical(["a", "b", np.nan], categories=["a", "b"]))
tm.assert_series_equal(left, right)
def test_constructor_maskedarray(self):
data = ma.masked_all((3,), dtype=float)
result = Series(data)
expected = Series([np.nan, np.nan, np.nan])
tm.assert_series_equal(result, expected)
data[0] = 0.0
data[2] = 2.0
index = ["a", "b", "c"]
result = Series(data, index=index)
expected = Series([0.0, np.nan, 2.0], index=index)
tm.assert_series_equal(result, expected)
data[1] = 1.0
result = Series(data, index=index)
expected = Series([0.0, 1.0, 2.0], index=index)
tm.assert_series_equal(result, expected)
data = ma.masked_all((3,), dtype=int)
result = Series(data)
expected = Series([np.nan, np.nan, np.nan], dtype=float)
tm.assert_series_equal(result, expected)
data[0] = 0
data[2] = 2
index = ["a", "b", "c"]
result = Series(data, index=index)
expected = Series([0, np.nan, 2], index=index, dtype=float)
tm.assert_series_equal(result, expected)
data[1] = 1
result = Series(data, index=index)
expected = Series([0, 1, 2], index=index, dtype=int)
tm.assert_series_equal(result, expected)
data = ma.masked_all((3,), dtype=bool)
result = Series(data)
expected = Series([np.nan, np.nan, np.nan], dtype=object)
tm.assert_series_equal(result, expected)
data[0] = True
data[2] = False
index = ["a", "b", "c"]
result = Series(data, index=index)
expected = Series([True, np.nan, False], index=index, dtype=object)
tm.assert_series_equal(result, expected)
data[1] = True
result = Series(data, index=index)
expected = Series([True, True, False], index=index, dtype=bool)
tm.assert_series_equal(result, expected)
data = ma.masked_all((3,), dtype="M8[ns]")
result = Series(data)
expected = Series([iNaT, iNaT, iNaT], dtype="M8[ns]")
tm.assert_series_equal(result, expected)
data[0] = datetime(2001, 1, 1)
data[2] = datetime(2001, 1, 3)
index = ["a", "b", "c"]
result = Series(data, index=index)
expected = Series(
[datetime(2001, 1, 1), iNaT, datetime(2001, 1, 3)],
index=index,
dtype="M8[ns]",
)
tm.assert_series_equal(result, expected)
data[1] = datetime(2001, 1, 2)
result = Series(data, index=index)
expected = Series(
[datetime(2001, 1, 1), datetime(2001, 1, 2), datetime(2001, 1, 3)],
index=index,
dtype="M8[ns]",
)
tm.assert_series_equal(result, expected)
def test_constructor_maskedarray_hardened(self):
# Check numpy masked arrays with hard masks -- from GH24574
data = ma.masked_all((3,), dtype=float).harden_mask()
result = pd.Series(data)
expected = pd.Series([np.nan, np.nan, np.nan])
tm.assert_series_equal(result, expected)
def test_series_ctor_plus_datetimeindex(self):
rng = date_range("20090415", "20090519", freq="B")
data = {k: 1 for k in rng}
result = Series(data, index=rng)
assert result.index is rng
def test_constructor_default_index(self):
s = Series([0, 1, 2])
tm.assert_index_equal(s.index, pd.Index(np.arange(3)))
@pytest.mark.parametrize(
"input",
[
[1, 2, 3],
(1, 2, 3),
list(range(3)),
pd.Categorical(["a", "b", "a"]),
(i for i in range(3)),
map(lambda x: x, range(3)),
],
)
def test_constructor_index_mismatch(self, input):
# GH 19342
# test that construction of a Series with an index of different length
# raises an error
msg = "Length of passed values is 3, index implies 4"
with pytest.raises(ValueError, match=msg):
Series(input, index=np.arange(4))
def test_constructor_numpy_scalar(self):
# GH 19342
# construction with a numpy scalar
# should not raise
result = Series(np.array(100), index=np.arange(4), dtype="int64")
expected = Series(100, index=np.arange(4), dtype="int64")
tm.assert_series_equal(result, expected)
def test_constructor_broadcast_list(self):
# GH 19342
# construction with single-element container and index
# should raise
msg = "Length of passed values is 1, index implies 3"
with pytest.raises(ValueError, match=msg):
Series(["foo"], index=["a", "b", "c"])
def test_constructor_corner(self):
df = tm.makeTimeDataFrame()
objs = [df, df]
s = Series(objs, index=[0, 1])
assert isinstance(s, Series)
def test_constructor_sanitize(self):
s = Series(np.array([1.0, 1.0, 8.0]), dtype="i8")
assert s.dtype == np.dtype("i8")
s = Series(np.array([1.0, 1.0, np.nan]), copy=True, dtype="i8")
assert s.dtype == np.dtype("f8")
def test_constructor_copy(self):
# GH15125
# test dtype parameter has no side effects on copy=True
for data in [[1.0], np.array([1.0])]:
x = Series(data)
y = pd.Series(x, copy=True, dtype=float)
# copy=True maintains original data in Series
tm.assert_series_equal(x, y)
# changes to origin of copy does not affect the copy
x[0] = 2.0
assert not x.equals(y)
assert x[0] == 2.0
assert y[0] == 1.0
@pytest.mark.parametrize(
"index",
[
pd.date_range("20170101", periods=3, tz="US/Eastern"),
pd.date_range("20170101", periods=3),
pd.timedelta_range("1 day", periods=3),
pd.period_range("2012Q1", periods=3, freq="Q"),
pd.Index(list("abc")),
pd.Int64Index([1, 2, 3]),
pd.RangeIndex(0, 3),
],
ids=lambda x: type(x).__name__,
)
def test_constructor_limit_copies(self, index):
# GH 17449
# limit copies of input
s = pd.Series(index)
# we make 1 copy; this is just a smoke test here
assert s._data.blocks[0].values is not index
def test_constructor_pass_none(self):
with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
s = Series(None, index=range(5))
assert s.dtype == np.float64
s = Series(None, index=range(5), dtype=object)
assert s.dtype == np.object_
# GH 7431
# inference on the index
with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
s = Series(index=np.array([None]))
expected = Series(index=Index([None]))
tm.assert_series_equal(s, expected)
def test_constructor_pass_nan_nat(self):
# GH 13467
exp = Series([np.nan, np.nan], dtype=np.float64)
assert exp.dtype == np.float64
tm.assert_series_equal(Series([np.nan, np.nan]), exp)
tm.assert_series_equal(Series(np.array([np.nan, np.nan])), exp)
exp = Series([pd.NaT, pd.NaT])
assert exp.dtype == "datetime64[ns]"
tm.assert_series_equal(Series([pd.NaT, pd.NaT]), exp)
tm.assert_series_equal(Series(np.array([pd.NaT, pd.NaT])), exp)
tm.assert_series_equal(Series([pd.NaT, np.nan]), exp)
tm.assert_series_equal(Series(np.array([pd.NaT, np.nan])), exp)
tm.assert_series_equal(Series([np.nan, pd.NaT]), exp)
tm.assert_series_equal(Series(np.array([np.nan, pd.NaT])), exp)
def test_constructor_cast(self):
msg = "could not convert string to float"
with pytest.raises(ValueError, match=msg):
Series(["a", "b", "c"], dtype=float)
def test_constructor_unsigned_dtype_overflow(self, uint_dtype):
# see gh-15832
msg = "Trying to coerce negative values to unsigned integers"
with pytest.raises(OverflowError, match=msg):
Series([-1], dtype=uint_dtype)
def test_constructor_coerce_float_fail(self, any_int_dtype):
# see gh-15832
msg = "Trying to coerce float values to integers"
with pytest.raises(ValueError, match=msg):
Series([1, 2, 3.5], dtype=any_int_dtype)
def test_constructor_coerce_float_valid(self, float_dtype):
s = Series([1, 2, 3.5], dtype=float_dtype)
expected = Series([1, 2, 3.5]).astype(float_dtype)
tm.assert_series_equal(s, expected)
def test_constructor_dtype_no_cast(self):
# see gh-1572
s = Series([1, 2, 3])
s2 = Series(s, dtype=np.int64)
s2[1] = 5
assert s[1] == 5
def test_constructor_datelike_coercion(self):
# GH 9477
# incorrectly inferring on dateimelike looking when object dtype is
# specified
s = Series([Timestamp("20130101"), "NOV"], dtype=object)
assert s.iloc[0] == Timestamp("20130101")
assert s.iloc[1] == "NOV"
assert s.dtype == object
# the dtype was being reset on the slicing and re-inferred to datetime
# even thought the blocks are mixed
belly = "216 3T19".split()
wing1 = "2T15 4H19".split()
wing2 = "416 4T20".split()
mat = pd.to_datetime("2016-01-22 2019-09-07".split())
df = pd.DataFrame({"wing1": wing1, "wing2": wing2, "mat": mat}, index=belly)
result = df.loc["3T19"]
assert result.dtype == object
result = df.loc["216"]
assert result.dtype == object
def test_constructor_datetimes_with_nulls(self):
# gh-15869
for arr in [
np.array([None, None, None, None, datetime.now(), None]),
np.array([None, None, datetime.now(), None]),
]:
result = Series(arr)
assert result.dtype == "M8[ns]"
def test_constructor_dtype_datetime64(self):
s = Series(iNaT, dtype="M8[ns]", index=range(5))
assert isna(s).all()
# in theory this should be all nulls, but since
# we are not specifying a dtype is ambiguous
s = Series(iNaT, index=range(5))
assert not isna(s).all()
s = Series(np.nan, dtype="M8[ns]", index=range(5))
assert isna(s).all()
s = Series([datetime(2001, 1, 2, 0, 0), iNaT], dtype="M8[ns]")
assert isna(s[1])
assert s.dtype == "M8[ns]"
s = Series([datetime(2001, 1, 2, 0, 0), np.nan], dtype="M8[ns]")
assert isna(s[1])
assert s.dtype == "M8[ns]"
# GH3416
dates = [
np.datetime64(datetime(2013, 1, 1)),
np.datetime64(datetime(2013, 1, 2)),
np.datetime64(datetime(2013, 1, 3)),
]
s = Series(dates)
assert s.dtype == "M8[ns]"
s.iloc[0] = np.nan
assert s.dtype == "M8[ns]"
# GH3414 related
expected = Series(
[datetime(2013, 1, 1), datetime(2013, 1, 2), datetime(2013, 1, 3)],
dtype="datetime64[ns]",
)
result = Series(Series(dates).astype(np.int64) / 1000000, dtype="M8[ms]")
tm.assert_series_equal(result, expected)
result = Series(dates, dtype="datetime64[ns]")
tm.assert_series_equal(result, expected)
expected = Series(
[pd.NaT, datetime(2013, 1, 2), datetime(2013, 1, 3)], dtype="datetime64[ns]"
)
result = Series([np.nan] + dates[1:], dtype="datetime64[ns]")
tm.assert_series_equal(result, expected)
dts = Series(dates, dtype="datetime64[ns]")
# valid astype
dts.astype("int64")
# invalid casting
msg = r"cannot astype a datetimelike from \[datetime64\[ns\]\] to \[int32\]"
with pytest.raises(TypeError, match=msg):
dts.astype("int32")
# ints are ok
# we test with np.int64 to get similar results on
# windows / 32-bit platforms
result = Series(dts, dtype=np.int64)
expected = Series(dts.astype(np.int64))
tm.assert_series_equal(result, expected)
# invalid dates can be help as object
result = Series([datetime(2, 1, 1)])
assert result[0] == datetime(2, 1, 1, 0, 0)
result = Series([datetime(3000, 1, 1)])
assert result[0] == datetime(3000, 1, 1, 0, 0)
# don't mix types
result = Series([Timestamp("20130101"), 1], index=["a", "b"])
assert result["a"] == Timestamp("20130101")
assert result["b"] == 1
# GH6529
# coerce datetime64 non-ns properly
dates = date_range("01-Jan-2015", "01-Dec-2015", freq="M")
values2 = dates.view(np.ndarray).astype("datetime64[ns]")
expected = Series(values2, index=dates)
for dtype in ["s", "D", "ms", "us", "ns"]:
values1 = dates.view(np.ndarray).astype(f"M8[{dtype}]")
result = Series(values1, dates)
tm.assert_series_equal(result, expected)
# GH 13876
# coerce to non-ns to object properly
expected = Series(values2, index=dates, dtype=object)
for dtype in ["s", "D", "ms", "us", "ns"]:
values1 = dates.view(np.ndarray).astype(f"M8[{dtype}]")
result = Series(values1, index=dates, dtype=object)
tm.assert_series_equal(result, expected)
# leave datetime.date alone
dates2 = np.array([d.date() for d in dates.to_pydatetime()], dtype=object)
series1 = Series(dates2, dates)
tm.assert_numpy_array_equal(series1.values, dates2)
assert series1.dtype == object
# these will correctly infer a datetime
s = Series([None, pd.NaT, "2013-08-05 15:30:00.000001"])
assert s.dtype == "datetime64[ns]"
s = Series([np.nan, pd.NaT, "2013-08-05 15:30:00.000001"])
assert s.dtype == "datetime64[ns]"
s = Series([pd.NaT, None, "2013-08-05 15:30:00.000001"])
assert s.dtype == "datetime64[ns]"
s = Series([pd.NaT, np.nan, "2013-08-05 15:30:00.000001"])
assert s.dtype == "datetime64[ns]"
# tz-aware (UTC and other tz's)
# GH 8411
dr = date_range("20130101", periods=3)
assert Series(dr).iloc[0].tz is None
dr = date_range("20130101", periods=3, tz="UTC")
assert str(Series(dr).iloc[0].tz) == "UTC"
dr = date_range("20130101", periods=3, tz="US/Eastern")
assert str(Series(dr).iloc[0].tz) == "US/Eastern"
# non-convertible
s = Series([1479596223000, -1479590, pd.NaT])
assert s.dtype == "object"
assert s[2] is pd.NaT
assert "NaT" in str(s)
# if we passed a NaT it remains
s = Series([datetime(2010, 1, 1), datetime(2, 1, 1), pd.NaT])
assert s.dtype == "object"
assert s[2] is pd.NaT
assert "NaT" in str(s)
# if we passed a nan it remains
s = Series([datetime(2010, 1, 1), datetime(2, 1, 1), np.nan])
assert s.dtype == "object"
assert s[2] is np.nan
assert "NaN" in str(s)
def test_constructor_with_datetime_tz(self):
# 8260
# support datetime64 with tz
dr = date_range("20130101", periods=3, tz="US/Eastern")
s = Series(dr)
assert s.dtype.name == "datetime64[ns, US/Eastern]"
assert s.dtype == "datetime64[ns, US/Eastern]"
assert is_datetime64tz_dtype(s.dtype)
assert "datetime64[ns, US/Eastern]" in str(s)
# export
result = s.values
assert isinstance(result, np.ndarray)
assert result.dtype == "datetime64[ns]"
exp = pd.DatetimeIndex(result)
exp = exp.tz_localize("UTC").tz_convert(tz=s.dt.tz)
tm.assert_index_equal(dr, exp)
# indexing
result = s.iloc[0]
assert result == Timestamp(
"2013-01-01 00:00:00-0500", tz="US/Eastern", freq="D"
)
result = s[0]
assert result == Timestamp(
"2013-01-01 00:00:00-0500", tz="US/Eastern", freq="D"
)
result = s[Series([True, True, False], index=s.index)]
tm.assert_series_equal(result, s[0:2])
result = s.iloc[0:1]
tm.assert_series_equal(result, Series(dr[0:1]))
# concat
result = pd.concat([s.iloc[0:1], s.iloc[1:]])
tm.assert_series_equal(result, s)
# short str
assert "datetime64[ns, US/Eastern]" in str(s)
# formatting with NaT
result = s.shift()
assert "datetime64[ns, US/Eastern]" in str(result)
assert "NaT" in str(result)
# long str
t = Series(date_range("20130101", periods=1000, tz="US/Eastern"))
assert "datetime64[ns, US/Eastern]" in str(t)
result = pd.DatetimeIndex(s, freq="infer")
tm.assert_index_equal(result, dr)
# inference
s = Series(
[
pd.Timestamp("2013-01-01 13:00:00-0800", tz="US/Pacific"),
pd.Timestamp("2013-01-02 14:00:00-0800", tz="US/Pacific"),
]
)
assert s.dtype == "datetime64[ns, US/Pacific]"
assert lib.infer_dtype(s, skipna=True) == "datetime64"
s = Series(
[
pd.Timestamp("2013-01-01 13:00:00-0800", tz="US/Pacific"),
pd.Timestamp("2013-01-02 14:00:00-0800", tz="US/Eastern"),
]
)
assert s.dtype == "object"
assert lib.infer_dtype(s, skipna=True) == "datetime"
# with all NaT
s = Series(pd.NaT, index=[0, 1], dtype="datetime64[ns, US/Eastern]")
expected = Series(pd.DatetimeIndex(["NaT", "NaT"], tz="US/Eastern"))
tm.assert_series_equal(s, expected)
@pytest.mark.parametrize("arr_dtype", [np.int64, np.float64])
@pytest.mark.parametrize("dtype", ["M8", "m8"])
@pytest.mark.parametrize("unit", ["ns", "us", "ms", "s", "h", "m", "D"])
def test_construction_to_datetimelike_unit(self, arr_dtype, dtype, unit):
# tests all units
# gh-19223
dtype = f"{dtype}[{unit}]"
arr = np.array([1, 2, 3], dtype=arr_dtype)
s = Series(arr)
result = s.astype(dtype)
expected = Series(arr.astype(dtype))
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("arg", ["2013-01-01 00:00:00", pd.NaT, np.nan, None])
def test_constructor_with_naive_string_and_datetimetz_dtype(self, arg):
# GH 17415: With naive string
result = Series([arg], dtype="datetime64[ns, CET]")
expected = Series(pd.Timestamp(arg)).dt.tz_localize("CET")
tm.assert_series_equal(result, expected)
def test_constructor_datetime64_bigendian(self):
# GH#30976
ms = np.datetime64(1, "ms")
arr = np.array([np.datetime64(1, "ms")], dtype=">M8[ms]")
result = Series(arr)
expected = Series([Timestamp(ms)])
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("interval_constructor", [IntervalIndex, IntervalArray])
def test_construction_interval(self, interval_constructor):
# construction from interval & array of intervals
intervals = interval_constructor.from_breaks(np.arange(3), closed="right")
result = Series(intervals)
assert result.dtype == "interval[int64]"
tm.assert_index_equal(Index(result.values), Index(intervals))
@pytest.mark.parametrize(
"data_constructor", [list, np.array], ids=["list", "ndarray[object]"]
)
def test_constructor_infer_interval(self, data_constructor):
# GH 23563: consistent closed results in interval dtype
data = [pd.Interval(0, 1), pd.Interval(0, 2), None]
result = pd.Series(data_constructor(data))
expected = pd.Series(IntervalArray(data))
assert result.dtype == "interval[float64]"
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"data_constructor", [list, np.array], ids=["list", "ndarray[object]"]
)
def test_constructor_interval_mixed_closed(self, data_constructor):
# GH 23563: mixed closed results in object dtype (not interval dtype)
data = [pd.Interval(0, 1, closed="both"), pd.Interval(0, 2, closed="neither")]
result = Series(data_constructor(data))
assert result.dtype == object
assert result.tolist() == data
def test_construction_consistency(self):
# make sure that we are not re-localizing upon construction
# GH 14928
s = Series(pd.date_range("20130101", periods=3, tz="US/Eastern"))
result = Series(s, dtype=s.dtype)
tm.assert_series_equal(result, s)
result = Series(s.dt.tz_convert("UTC"), dtype=s.dtype)
tm.assert_series_equal(result, s)
result = Series(s.values, dtype=s.dtype)
tm.assert_series_equal(result, s)
@pytest.mark.parametrize(
"data_constructor", [list, np.array], ids=["list", "ndarray[object]"]
)
def test_constructor_infer_period(self, data_constructor):
data = [pd.Period("2000", "D"), pd.Period("2001", "D"), None]
result = pd.Series(data_constructor(data))
expected = pd.Series(period_array(data))
tm.assert_series_equal(result, expected)
assert result.dtype == "Period[D]"
def test_constructor_period_incompatible_frequency(self):
data = [pd.Period("2000", "D"), pd.Period("2001", "A")]
result = pd.Series(data)
assert result.dtype == object
assert result.tolist() == data
def test_constructor_periodindex(self):
# GH7932
# converting a PeriodIndex when put in a Series
pi = period_range("20130101", periods=5, freq="D")
s = Series(pi)
assert s.dtype == "Period[D]"
expected = Series(pi.astype(object))
tm.assert_series_equal(s, expected)
def test_constructor_dict(self):
d = {"a": 0.0, "b": 1.0, "c": 2.0}
result = Series(d, index=["b", "c", "d", "a"])
expected = Series([1, 2, np.nan, 0], index=["b", "c", "d", "a"])
tm.assert_series_equal(result, expected)
pidx = tm.makePeriodIndex(100)
d = {pidx[0]: 0, pidx[1]: 1}
result = Series(d, index=pidx)
expected = Series(np.nan, pidx, dtype=np.float64)
expected.iloc[0] = 0
expected.iloc[1] = 1
tm.assert_series_equal(result, expected)
def test_constructor_dict_list_value_explicit_dtype(self):
# GH 18625
d = {"a": [[2], [3], [4]]}
result = Series(d, index=["a"], dtype="object")
expected = Series(d, index=["a"])
tm.assert_series_equal(result, expected)
def test_constructor_dict_order(self):
# GH19018
# initialization ordering: by insertion order if python>= 3.6, else
# order by value
d = {"b": 1, "a": 0, "c": 2}
result = Series(d)
expected = Series([1, 0, 2], index=list("bac"))
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("value", [2, np.nan, None, float("nan")])
def test_constructor_dict_nan_key(self, value):
# GH 18480
d = {1: "a", value: "b", float("nan"): "c", 4: "d"}
result = Series(d).sort_values()
expected = Series(["a", "b", "c", "d"], index=[1, value, np.nan, 4])
tm.assert_series_equal(result, expected)
# MultiIndex:
d = {(1, 1): "a", (2, np.nan): "b", (3, value): "c"}
result = Series(d).sort_values()
expected = Series(
["a", "b", "c"], index=Index([(1, 1), (2, np.nan), (3, value)])
)
tm.assert_series_equal(result, expected)
def test_constructor_dict_datetime64_index(self):
# GH 9456
dates_as_str = ["1984-02-19", "1988-11-06", "1989-12-03", "1990-03-15"]
values = [42544017.198965244, 1234565, 40512335.181958228, -1]
def create_data(constructor):
return dict(zip((constructor(x) for x in dates_as_str), values))
data_datetime64 = create_data(np.datetime64)
data_datetime = create_data(lambda x: datetime.strptime(x, "%Y-%m-%d"))
data_Timestamp = create_data(Timestamp)
expected = Series(values, (Timestamp(x) for x in dates_as_str))
result_datetime64 = Series(data_datetime64)
result_datetime = Series(data_datetime)
result_Timestamp = Series(data_Timestamp)
tm.assert_series_equal(result_datetime64, expected)
tm.assert_series_equal(result_datetime, expected)
tm.assert_series_equal(result_Timestamp, expected)
def test_contructor_dict_tuple_indexer(self):
# GH 12948
data = {(1, 1, None): -1.0}
result = Series(data)
expected = Series(
-1.0, index=MultiIndex(levels=[[1], [1], [np.nan]], codes=[[0], [0], [-1]])
)
tm.assert_series_equal(result, expected)
def test_constructor_mapping(self, non_mapping_dict_subclass):
# GH 29788
ndm = non_mapping_dict_subclass({3: "three"})
result = Series(ndm)
expected = Series(["three"], index=[3])
tm.assert_series_equal(result, expected)
def test_constructor_list_of_tuples(self):
data = [(1, 1), (2, 2), (2, 3)]
s = Series(data)
assert list(s) == data
def test_constructor_tuple_of_tuples(self):
data = ((1, 1), (2, 2), (2, 3))
s = Series(data)
assert tuple(s) == data
def test_constructor_dict_of_tuples(self):
data = {(1, 2): 3, (None, 5): 6}
result = Series(data).sort_values()
expected = Series([3, 6], index=MultiIndex.from_tuples([(1, 2), (None, 5)]))
tm.assert_series_equal(result, expected)
def test_constructor_set(self):
values = {1, 2, 3, 4, 5}
with pytest.raises(TypeError, match="'set' type is unordered"):
Series(values)
values = frozenset(values)
with pytest.raises(TypeError, match="'frozenset' type is unordered"):
Series(values)
# https://github.com/pandas-dev/pandas/issues/22698
@pytest.mark.filterwarnings("ignore:elementwise comparison:FutureWarning")
def test_fromDict(self):
data = {"a": 0, "b": 1, "c": 2, "d": 3}
series = Series(data)
tm.assert_is_sorted(series.index)
data = {"a": 0, "b": "1", "c": "2", "d": datetime.now()}
series = Series(data)
assert series.dtype == np.object_
data = {"a": 0, "b": "1", "c": "2", "d": "3"}
series = Series(data)
assert series.dtype == np.object_
data = {"a": "0", "b": "1"}
series = Series(data, dtype=float)
assert series.dtype == np.float64
def test_fromValue(self, datetime_series):
nans = Series(np.NaN, index=datetime_series.index, dtype=np.float64)
assert nans.dtype == np.float_
assert len(nans) == len(datetime_series)
strings = Series("foo", index=datetime_series.index)
assert strings.dtype == np.object_
assert len(strings) == len(datetime_series)
d = datetime.now()
dates = Series(d, index=datetime_series.index)
assert dates.dtype == "M8[ns]"
assert len(dates) == len(datetime_series)
# GH12336
# Test construction of categorical series from value
categorical = Series(0, index=datetime_series.index, dtype="category")
expected = Series(0, index=datetime_series.index).astype("category")
assert categorical.dtype == "category"
assert len(categorical) == len(datetime_series)
tm.assert_series_equal(categorical, expected)
def test_constructor_dtype_timedelta64(self):
# basic
td = Series([timedelta(days=i) for i in range(3)])
assert td.dtype == "timedelta64[ns]"
td = Series([timedelta(days=1)])
assert td.dtype == "timedelta64[ns]"
td = Series([timedelta(days=1), timedelta(days=2), np.timedelta64(1, "s")])
assert td.dtype == "timedelta64[ns]"
# mixed with NaT
td = Series([timedelta(days=1), NaT], dtype="m8[ns]")
assert td.dtype == "timedelta64[ns]"
td = Series([timedelta(days=1), np.nan], dtype="m8[ns]")
assert td.dtype == "timedelta64[ns]"
td = Series([np.timedelta64(300000000), pd.NaT], dtype="m8[ns]")
assert td.dtype == "timedelta64[ns]"
# improved inference
# GH5689
td = Series([np.timedelta64(300000000), NaT])
assert td.dtype == "timedelta64[ns]"
# because iNaT is int, not coerced to timedelta
td = Series([np.timedelta64(300000000), iNaT])
assert td.dtype == "object"
td = Series([np.timedelta64(300000000), np.nan])
assert td.dtype == "timedelta64[ns]"
td = Series([pd.NaT, np.timedelta64(300000000)])
assert td.dtype == "timedelta64[ns]"
td = Series([np.timedelta64(1, "s")])
assert td.dtype == "timedelta64[ns]"
# these are frequency conversion astypes
# for t in ['s', 'D', 'us', 'ms']:
# with pytest.raises(TypeError):
# td.astype('m8[%s]' % t)
# valid astype
td.astype("int64")
# invalid casting
msg = r"cannot astype a timedelta from \[timedelta64\[ns\]\] to \[int32\]"
with pytest.raises(TypeError, match=msg):
td.astype("int32")
# this is an invalid casting
msg = "Could not convert object to NumPy timedelta"
with pytest.raises(ValueError, match=msg):
Series([timedelta(days=1), "foo"], dtype="m8[ns]")
# leave as object here
td = Series([timedelta(days=i) for i in range(3)] + ["foo"])
assert td.dtype == "object"
# these will correctly infer a timedelta
s = Series([None, pd.NaT, "1 Day"])
assert s.dtype == "timedelta64[ns]"
s = Series([np.nan, pd.NaT, "1 Day"])
assert s.dtype == "timedelta64[ns]"
s = Series([pd.NaT, None, "1 Day"])
assert s.dtype == "timedelta64[ns]"
s = Series([pd.NaT, np.nan, "1 Day"])
assert s.dtype == "timedelta64[ns]"
# GH 16406
def test_constructor_mixed_tz(self):
s = Series([Timestamp("20130101"), Timestamp("20130101", tz="US/Eastern")])
expected = Series(
[Timestamp("20130101"), Timestamp("20130101", tz="US/Eastern")],
dtype="object",
)
tm.assert_series_equal(s, expected)
def test_NaT_scalar(self):
series = Series([0, 1000, 2000, iNaT], dtype="M8[ns]")
val = series[3]
assert isna(val)
series[2] = val
assert isna(series[2])
def test_NaT_cast(self):
# GH10747
result = Series([np.nan]).astype("M8[ns]")
expected = Series([NaT])
tm.assert_series_equal(result, expected)
def test_constructor_name_hashable(self):
for n in [777, 777.0, "name", datetime(2001, 11, 11), (1,), "\u05D0"]:
for data in [[1, 2, 3], np.ones(3), {"a": 0, "b": 1}]:
s = Series(data, name=n)
assert s.name == n
def test_constructor_name_unhashable(self):
msg = r"Series\.name must be a hashable type"
for n in [["name_list"], np.ones(2), {1: 2}]:
for data in [["name_list"], np.ones(2), {1: 2}]:
with pytest.raises(TypeError, match=msg):
Series(data, name=n)
def test_auto_conversion(self):
series = Series(list(date_range("1/1/2000", periods=10)))
assert series.dtype == "M8[ns]"
def test_convert_non_ns(self):
# convert from a numpy array of non-ns timedelta64
arr = np.array([1, 2, 3], dtype="timedelta64[s]")
s = Series(arr)
expected = Series(pd.timedelta_range("00:00:01", periods=3, freq="s"))
tm.assert_series_equal(s, expected)
# convert from a numpy array of non-ns datetime64
# note that creating a numpy datetime64 is in LOCAL time!!!!
# seems to work for M8[D], but not for M8[s]
s = Series(
np.array(["2013-01-01", "2013-01-02", "2013-01-03"], dtype="datetime64[D]")
)
tm.assert_series_equal(s, Series(date_range("20130101", periods=3, freq="D")))
# s = Series(np.array(['2013-01-01 00:00:01','2013-01-01
# 00:00:02','2013-01-01 00:00:03'],dtype='datetime64[s]'))
# tm.assert_series_equal(s,date_range('20130101
# 00:00:01',period=3,freq='s'))
@pytest.mark.parametrize(
"index",
[
date_range("1/1/2000", periods=10),
timedelta_range("1 day", periods=10),
period_range("2000-Q1", periods=10, freq="Q"),
],
ids=lambda x: type(x).__name__,
)
def test_constructor_cant_cast_datetimelike(self, index):
# floats are not ok
# strip Index to convert PeriodIndex -> Period
# We don't care whether the error message says
# PeriodIndex or PeriodArray
msg = f"Cannot cast {type(index).__name__.rstrip('Index')}.*? to "
with pytest.raises(TypeError, match=msg):
Series(index, dtype=float)
# ints are ok
# we test with np.int64 to get similar results on
# windows / 32-bit platforms
result = Series(index, dtype=np.int64)
expected = Series(index.astype(np.int64))
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"index",
[
date_range("1/1/2000", periods=10),
timedelta_range("1 day", periods=10),
period_range("2000-Q1", periods=10, freq="Q"),
],
ids=lambda x: type(x).__name__,
)
def test_constructor_cast_object(self, index):
s = Series(index, dtype=object)
exp = Series(index).astype(object)
tm.assert_series_equal(s, exp)
s = Series(pd.Index(index, dtype=object), dtype=object)
exp = Series(index).astype(object)
tm.assert_series_equal(s, exp)
s = Series(index.astype(object), dtype=object)
exp = Series(index).astype(object)
tm.assert_series_equal(s, exp)
@pytest.mark.parametrize("dtype", [np.datetime64, np.timedelta64])
def test_constructor_generic_timestamp_no_frequency(self, dtype):
# see gh-15524, gh-15987
msg = "dtype has no unit. Please pass in"
with pytest.raises(ValueError, match=msg):
Series([], dtype=dtype)
@pytest.mark.parametrize(
"dtype,msg",
[
("m8[ps]", "cannot convert timedeltalike"),
("M8[ps]", "cannot convert datetimelike"),
],
)
def test_constructor_generic_timestamp_bad_frequency(self, dtype, msg):
# see gh-15524, gh-15987
with pytest.raises(TypeError, match=msg):
Series([], dtype=dtype)
@pytest.mark.parametrize("dtype", [None, "uint8", "category"])
def test_constructor_range_dtype(self, dtype):
# GH 16804
expected = Series([0, 1, 2, 3, 4], dtype=dtype or "int64")
result = Series(range(5), dtype=dtype)
tm.assert_series_equal(result, expected)
def test_constructor_tz_mixed_data(self):
# GH 13051
dt_list = [
Timestamp("2016-05-01 02:03:37"),
Timestamp("2016-04-30 19:03:37-0700", tz="US/Pacific"),
]
result = Series(dt_list)
expected = Series(dt_list, dtype=object)
tm.assert_series_equal(result, expected)
def test_constructor_data_aware_dtype_naive(self, tz_aware_fixture):
# GH#25843
tz = tz_aware_fixture
result = Series([Timestamp("2019", tz=tz)], dtype="datetime64[ns]")
expected = Series([Timestamp("2019")])
tm.assert_series_equal(result, expected)
|
the-stack_0_27580
|
import inspect
from typing import TypeVar, Optional, Type, List, Dict
from warnings import warn
from cynergy.attributes import LifeCycle
from cynergy.config import ConfigProvider, Plain, Config, ServiceByName
from cynergy import attributes
from cynergy.errors.ContainerException import ConfigProviderRequiredException
T = TypeVar('T')
class IocContainer(object):
def __init__(self, config_provider: Optional[ConfigProvider]):
self.__multiple_class_mapping = {}
self.__instances = {}
self.__class_mapping = {}
self.__config_cache = {}
self.__config = config_provider
self.__primitives = (str, int, dict, list, set, float)
self.__BY_NAME_FORMAT = "by_name|{}"
def __resolve_argument(self, argument):
if type(argument) in self.__primitives:
return argument
if type(argument) is Plain:
return argument.value
if type(argument) is Config:
if self.__config is None:
raise ValueError("Tried to access config while ConfigProvider is not initialized")
if argument.default is None:
return self.__config.get(argument.value)
else:
try:
return self.__config.get(argument.value)
except KeyError:
return argument.default
if type(argument) is ServiceByName:
return self.get_by_name(argument.value)
raise NotImplementedError(
'Argument type "{}" is not supported (did you pass argument type?)'.format(type(argument)))
def __create_instance_for_single(self, class_to_init: Type, original: Type):
arguments_mapping = {}
if hasattr(class_to_init, attributes.IOC_ARGUMENTS_NAME):
arguments_mapping = getattr(class_to_init, attributes.IOC_ARGUMENTS_NAME)
try:
arguments = inspect.signature(class_to_init)
except Exception:
raise Exception("Error while trying to access class signature [{}]. Maybe you tried to register a module "
"instead of a class?".format(class_to_init.__name__))
initiated_arguments = {}
for argument in arguments.parameters.values():
if argument.name in arguments_mapping:
try:
initiated_argument = self.__resolve_argument(arguments_mapping[argument.name])
except ValueError:
raise ConfigProviderRequiredException(class_to_init, argument)
else:
if not argument.default == inspect._empty:
initiated_argument = argument.default
else:
if not argument.default == inspect._empty:
initiated_argument = argument.default
else:
if argument.annotation in self.__primitives:
raise TypeError("Could not initialize primitive argument [{}] for class [{}] without "
"argument mapping ".format(argument.name, class_to_init.__name__))
initiated_argument = self.get(argument.annotation)
initiated_arguments[argument.name] = initiated_argument
self.__set_instance(original, class_to_init(**initiated_arguments))
def __set_instance(self, cls: Type, instance):
self.__instances[self._get_class_name(cls)] = instance
def __get_instance(self, cls: Type):
return self.__instances[self._get_class_name(cls)]
def __create_instance_for_list_new(self, original, classes: List[Type]):
self.__set_instance(original, [self.get(cls) for cls in classes])
def __create_instance(self, cls: Type):
cls_name = self._get_class_name(cls)
class_to_init = cls if cls_name not in self.__class_mapping else self.__class_mapping[cls_name]
if isinstance(class_to_init, list):
self.__create_instance_for_list_new(cls, class_to_init)
else:
self.__create_instance_for_single(class_to_init, cls)
return self.__get_instance(cls)
def register_instance(self, cls, instance):
self.__register_instance_by_name(self._get_class_name(cls), instance)
def register_class(self, cls, assign_to: Type):
self.__class_mapping[self._get_class_name(cls)] = assign_to
def register_many(self, cls, new_classes: List[Type]):
self.__class_mapping[self._get_class_name(List[cls])] = new_classes
@staticmethod
def _get_class_name(cls: Type):
if type(cls) == type(List):
return 'List[{}]'.format(cls.__args__[0].__name__)
return cls.__name__
def __register_instance_by_name(self, name, instance):
self.__instances[name] = instance
def register_instance_by_name(self, name, instance):
self.__register_instance_by_name(self.__BY_NAME_FORMAT.format(name), instance)
def get_by_name(self, name):
key = self.__BY_NAME_FORMAT.format(name)
if key not in self.__instances:
KeyError('The service "{}" is not registered'.format(key))
return self.__instances[key]
def get(self, cls: Type[T], life_cycle=LifeCycle.SINGLETON) -> T:
if not life_cycle:
life_cycle = attributes.LifeCycle.SINGLETON
if hasattr(cls, attributes.LIFECYCLE_ARGUMENT_NAME):
life_cycle = getattr(cls, attributes.LIFECYCLE_ARGUMENT_NAME)
if life_cycle == attributes.LifeCycle.SINGLETON:
if self._get_class_name(cls) not in self.__instances:
self.__create_instance(cls)
return self.__get_instance(cls)
elif life_cycle == attributes.LifeCycle.MULTI_INSTANCE:
return self.__create_instance(cls)
raise NotImplementedError("Not implemented lifecycle", life_cycle)
def clear_all(self):
self.__instances = {}
def register(self, cls: Type, class_or_instance):
if isinstance(class_or_instance, list):
self.register_many(cls, class_or_instance)
elif inspect.isclass(class_or_instance):
self.register_class(cls, class_or_instance)
else:
self.register_instance(cls, class_or_instance)
def get_config(self, key: str):
if key not in self.__config_cache:
self.__config_cache[key] = self.__config.get(key)
return self.__config_cache[key]
__instance = None
def __get_instance() -> IocContainer:
global __instance
if __instance is None:
__instance = IocContainer(None)
return __instance
def get_config(key: str):
return __get_instance().get_config(key)
def initialize(config_provider: Optional[ConfigProvider] = None,
class_mapping: Dict[Type, Type] = None):
global __instance
if __instance is not None:
warn("Container already initialized. If you need multiple instances consider not use the container statically",
UserWarning)
__instance.clear_all()
__instance = IocContainer(config_provider)
if class_mapping is None:
return
for source_class, new_class in class_mapping.items():
__instance.register_class(source_class, new_class)
def _clear_all():
return __get_instance().clear_all()
def register_instance_by_name(name, instance):
return __get_instance().register_instance_by_name(name, instance)
def get(cls: Type[T], life_cycle=LifeCycle.SINGLETON) -> T:
return __get_instance().get(cls, life_cycle)
def register_instance(cls, instance):
warn("This function is deprecated, can be now used as register(cls, obj)")
return __get_instance().register_instance(cls, instance)
def register_class(cls, assign_to):
"""
Register type (cls) to type assign_to - meaning everytime you'll ask the type cls you'll receive assign_to
:param cls: Class to map from
:param assign_to: Class to map to
"""
return __get_instance().register_class(cls, assign_to)
def register(cls, class_or_instance):
return __get_instance().register(cls, class_or_instance)
def get_by_name(name):
return __get_instance().get_by_name(name)
def register_many(cls: Type, types: List[Type]):
return __get_instance().register_many(cls, types)
|
the-stack_0_27581
|
# Copyright 2016 SAS Project Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Top main script of the test harness.
If writing your own top level script to run the test harness,
look here how the setup is performed.
It is particularly important to correctly set the geo drivers
and multiprocessing pool for best performance
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import logging
import os
import sys
import unittest
from reference_models.common import mpool
from reference_models.geo import drive
#----------------------------------------
# Setup the command line arguments
parser = argparse.ArgumentParser(description='Test Main')
# - Generic config.
parser.add_argument('--log_level', type=str, default='info',
help='Logging level: debug, info, warning or error')
# Setup the logger
logger = logging.getLogger()
handler = logging.StreamHandler(sys.stdout)
handler.setFormatter(
logging.Formatter(
'[%(levelname)s] %(asctime)s %(filename)s:%(lineno)d %(message)s'))
logger.addHandler(handler)
logger.setLevel(logging.INFO)
_LOGGER_MAP = {
'debug': logging.DEBUG,
'info': logging.INFO,
'warning': logging.WARNING,
'error': logging.ERROR
}
# Multi-processing
# Number of worker processes allocated for heavy duty calculation,
# typically DPA, PPA creation and IAP for PPA/GWPZ.
# Values:
# -1: Use half of the cpu available
# -2: Use all available cpu minus one (one process should be reserved for
# the IAP shared objects manager process).
# Other: the number of process to allocate
NUM_PROCESSES = -2
# Memory allocation management
# A simple strategy is proposed by default, knowing the test harness usage:
# - NLCD only used for PPA and GWPZ for deriving the Land Cover. Area size
# is limited in general. LandCover extraction is done in main thread only
# (except for PPA tests where worker processes check the CBSD landcover)
# ==> Allocate most of the cache to main processes
# - NED used by all others, with heavy terrain profile reading by the main
# and worker threads.
# ==> Allocate equal memory to worker processes, and twice to main process
# This strategy could be tuned for best performance.
#
# A 2 level cache could also be used (with memmap or otherwise) in order to share
# most of the in-memory tiles across processes. Not implemented.
#
# The memory allocated for geo cache across all sub-processes (in MB).
# -1: automatic allocation
# otherwise this number (in MB) is used for setting up the cache size
MEM_ALLOCATION_GEO_CACHE_MB = -1
# When 'automatic allocation', the ratio of total physical memory
# dedicated to the geo cache
MEM_RATIO_FOR_GEO_CACHE = 0.5
# The weighting factor of the main processes vs worker processes. Use > 0
MEM_NED_WEIGHT_MASTER = 2.0
MEM_NLCD_WEIGHT_MASTER = 2.0
MEM_NLCD_CACHE_WORKERS = 6
def GetAvailableMemoryMb():
"""Returns the available physical memory."""
import psutil
return int(psutil.virtual_memory().total / 1e6)
def GetGeoAllocatedMemory():
"""Returns the memory available for Geo cache."""
if MEM_ALLOCATION_GEO_CACHE_MB > 0:
return MEM_ALLOCATION_GEO_CACHE_MB
else:
return GetAvailableMemoryMb() * MEM_RATIO_FOR_GEO_CACHE
def GetGeoCacheSize(num_workers):
"""Compute the number of geo tiles to cache for a given number of workers.
This is derived according to a strategy taking into account respective use
of the geo in master and workers processes.
Args:
num_workers: The number of workers.
Returns:
A tuple (n_master_ned, n_worker_ned, n_master_nlcd, n_worker_nlcd), holding
the cache size to use for resp master/workers, and NED/NLCD.
"""
geo_mem = GetGeoAllocatedMemory()
num_ned_work = num_workers + MEM_NED_WEIGHT_MASTER
num_nlcd_work = MEM_NLCD_WEIGHT_MASTER
NED_TILE_MB = 52
NLCD_TILE_MB = 13
num_tiles = float(geo_mem - MEM_NLCD_CACHE_WORKERS*num_workers*NLCD_TILE_MB) / (
num_ned_work * NED_TILE_MB + num_nlcd_work * NLCD_TILE_MB)
num_tiles = max(4, int(round(num_tiles)))
num_tiles_master_ned = int(round(num_tiles * MEM_NED_WEIGHT_MASTER))
num_tiles_worker_ned = num_tiles
num_tiles_master_nlcd = int(round(num_tiles * MEM_NLCD_WEIGHT_MASTER))
num_tiles_worker_nlcd = MEM_NLCD_CACHE_WORKERS
return (num_tiles_master_ned, num_tiles_worker_ned,
num_tiles_master_nlcd, num_tiles_worker_nlcd)
if __name__ == '__main__':
# Process the commad line arguments.
options = parser.parse_args()
logging.getLogger().setLevel(_LOGGER_MAP[options.log_level.lower()])
# Configure the multiprocessing worker pool.
# Your options are:
# 0: single process (default if not called)
# -1: use half of the cpus
# -2: use all cpus (minus one)
# a specific number of cpus
# Or your own `pool`.
logging.info('Start Worker processes')
mpool.Configure(num_processes=NUM_PROCESSES)
num_workers = mpool.GetNumWorkerProcesses()
logging.info(' ... %d workers started' % num_workers)
# Configure geo drivers
logging.info('Configure geo drivers')
(num_tiles_master_ned, num_tiles_worker_ned,
num_tiles_master_nlcd, num_tiles_worker_nlcd) = GetGeoCacheSize(num_workers)
if num_tiles_master_ned < 16:
logging.warning('Required geo cache size %d (for master) is low'
'- too few memory or too many workers'
% num_tiles_master_ned)
logging.info(' ... NED: cache size: %d per master, %d for workers'
% (num_tiles_master_ned, num_tiles_worker_ned))
logging.info(' ... NLCD: cache size: %d per master, %d for workers'
% (num_tiles_master_ned, num_tiles_worker_ned))
# - for main process
drive.ConfigureTerrainDriver(cache_size=num_tiles_master_ned)
drive.ConfigureNlcdDriver(cache_size=num_tiles_master_nlcd)
# - for worker processes
mpool.RunOnEachWorkerProcess(drive.ConfigureTerrainDriver,
terrain_dir=None, cache_size=num_tiles_worker_ned)
mpool.RunOnEachWorkerProcess(drive.ConfigureNlcdDriver,
nlcd_dir=None, cache_size=num_tiles_worker_nlcd)
# Run the tests
tests = unittest.TestLoader().discover('testcases', '*_testcase.py')
unittest.TextTestRunner(verbosity=2).run(tests)
|
the-stack_0_27585
|
#!/usr/bin/env python
#
# Copyright 2017-2018 University Of Southern California
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
import os
import platform
from Pegasus.DAX3 import *
__author__ = 'Rafael Ferreira da Silva'
class OSType:
LINUX = 'LINUX'
SUNOS = 'SUNOS'
AIX = 'AIX'
MACOSX = 'MACOSX'
WINDOWS = 'WINDOWS'
class DirectoryType:
SHARED_SCRATCH = 'shared-scratch'
SHARED_STORAGE = 'shared-storage'
LOCAL_SCRATCH = 'local-scratch'
LOCAL_STORAGE = 'local-storage'
class JobType:
COMPUTE = 'compute'
AUXILLARY = 'auxillary'
TRANSFER = 'transfer'
REGISTER = 'register'
CLEANUP = 'cleanup'
class GridType:
GT2 = 'gt2'
GT4 = 'gt4'
GT5 = 'gt5'
CONDOR = 'condor'
CREAM = 'cream'
BATCH = 'batch'
PBS = 'pbs'
LSF = 'lsf'
SGE = 'sge'
NORDUGRID = 'nordugrid'
UNICORE = 'unicore'
EC2 = 'ec2'
DELTACLOUD = 'deltacloud'
class SchedulerType:
FORK = 'Fork'
PBS = 'PBS'
LSF = 'LSF'
CONDOR = 'Condor'
SGE = 'SGE'
UNKNOWN = 'unknown'
class SitesCatalog:
def __init__(self, workflow_dir, filename='sites.xml'):
"""
Create a Pegasus site catalog.
:param workflow_dir: Path to the workflow directory
:param filename: sites catalog filename (default: sites.xml)
"""
self.workflow_dir = workflow_dir
self.filename = filename
self._create_local_site()
def add_site(self, handle, arch=Arch.X86_64, os=OSType.LINUX):
"""
Add a site to the sites catalog
:param handle: Site name
:param arch: Site architecture (default: x86_64)
:param os: Site OS (default: LINUX)
"""
if not handle:
raise Exception('A site handle should be provided.')
if handle in self._sites:
raise Exception('Site "%s" already exists.' % handle)
self._sites.update(self._create_site(handle, arch, os))
def add_site_profile(self, handle, namespace, key, value=''):
"""
Add a profile to a specific site.
:param handle: Site name
:param namespace: Namespace values recognized by Pegasus
:param key: Profile key
:param value: Profile value (default: '')
"""
if not handle or not namespace or not key:
raise Exception(
'A site handle, a namespace, and a key should be provided.'
)
if handle not in self._sites:
raise Exception('There are no entries for site "%s".' % handle)
profile = {'namespace': namespace, 'key': key, 'value': value}
self._sites[handle]['profiles'].append(profile)
def add_job_manager(self, handle, type, contact, scheduler, jobtype=None):
"""
Add a job manager to a specific site.
:param handle: Site name
:param type: The universe name is actually the primary key for the jobmanager identification
:param contact: The contact string is the secondary key for any job manager
:param scheduler: Grid scheduler
:param jobtype: Type of Jobs in the executable workflow the grid supports
"""
if not handle or not type or not contact or not scheduler:
raise Exception(
'A site handle, and a jobmanager type, contact, and scheduler should be provided.'
)
if handle not in self._sites:
raise Exception('There are no entries for site "%s".' % handle)
grid = {'type': type, 'contact': contact, 'scheduler': scheduler}
if jobtype:
grid['jobtype'] = jobtype
self._sites[handle]['grids'].append(grid)
def write(self, force=False):
"""
Write the sites catalog to a file.
:param force: whether to overwrite the catalog file
"""
sites_catalog_file = self.workflow_dir + '/' + self.filename
if not os.path.isfile(sites_catalog_file) or force:
with open(sites_catalog_file, 'w') as ppf:
ppf.write('<?xml version="1.0" encoding="UTF-8"?>\n')
ppf.write(
'<sitecatalog xmlns="http://pegasus.isi.edu/schema/sitecatalog" '
'xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" '
'xsi:schemaLocation="http://pegasus.isi.edu/schema/sitecatalog '
'http://pegasus.isi.edu/schema/sc-4.1.xsd" version="4.1">\n'
)
# writing sites
for handle in self._sites:
ppf.write(
'\t<site handle="%s" arch="%s" os="%s">\n' % (
handle, self._sites[handle]['arch'],
self._sites[handle]['os']
)
)
# directories
dirs = self._sites[handle]['directories']
for dir in dirs:
ppf.write(
'\t\t<directory type="%s" path="%s">\n' %
(dir, dirs[dir]['path'])
)
ppf.write(
'\t\t\t<file-server operation="all" url="file://%s"/>\n'
% dirs[dir]['path']
)
ppf.write('\t\t</directory>\n')
# grids
for grid in self._sites[handle]['grids']:
ppf.write(
'\t\t<grid type="%s" contact="%s" scheduler="%s" '
%
(grid['type'], grid['contact'], grid['scheduler'])
)
if 'jobtype' in grid:
ppf.write('jobtype="%s" ' % grid['jobtype'])
ppf.write('/>\n')
# site profiles
for p in self._sites[handle]['profiles']:
ppf.write(
'\t\t<profile namespace="%s" key="%s">%s</profile>\n'
% (p['namespace'], p['key'], p['value'])
)
ppf.write('\t</site>\n')
ppf.write('</sitecatalog>\n')
else:
print(
'\x1b[0;35mWARNING: Sites Catalog (%s) already exists. Use "force=True" '
'to overwrite it.\n\x1b[0m' % sites_catalog_file
)
def _create_local_site(self):
"""
Create a local site for the workflow
"""
os = platform.system()
if os.lower() == 'linux':
os = OSType.LINUX
elif os.lower() == 'windows':
os = OSType.WINDOWS
else:
os = OSType.MACOSX
# create local site
self._sites = self._create_site('local', platform.machine(), os)
self._sites['local']['directories'] = {
DirectoryType.SHARED_SCRATCH:
{
'path': self.workflow_dir + '/scratch'
},
DirectoryType.SHARED_STORAGE:
{
'path': self.workflow_dir + '/output'
}
}
def _create_site(self, handle, arch, os):
"""
Create a general site.
:param handle: Site name
:param arch: Site architecture
:param os: Site operational system
:return: The dictionary object of the site
"""
return {
handle:
{
'arch': arch,
'os': os,
'directories': {},
'grids': [],
'profiles': []
}
}
|
the-stack_0_27586
|
from setuptools import find_packages, setup
from os import path
def get_content_from_readme(file_name: str = 'README.md') -> str:
this_directory = path.abspath(path.dirname(__file__))
with open(path.join(this_directory, file_name), encoding='utf-8') as file:
return file.read()
url = "https://github.com/LostInDarkMath/pedantic-python-decorators"
author = "Willi Sontopski"
setup(
name="pedantic",
version="1.4.10",
python_requires='>=3.6.1',
packages=find_packages(),
install_requires=['docstring_parser'],
author=author,
author_email="[email protected]",
license="Apache-2.0 License",
maintainer=author,
description="Some useful Python decorators for cleaner software development.",
long_description=get_content_from_readme(),
long_description_content_type='text/markdown',
keywords="decorators tools helpers type-checking pedantic type annotations",
url=url,
project_urls={
"Bug Tracker": f'{url}/issues',
"Documentation": 'https://lostindarkmath.github.io/pedantic-python-decorators/pedantic/',
"Source Code": url,
},
include_package_data=False,
zip_safe=True,
)
|
the-stack_0_27588
|
from keras.layers import Dense, Embedding, Input, Lambda
from keras.models import Model
import tensorflow as tf
from doc2vec.model import lambdas, model
class DBOW(model.Doc2VecModel):
def build(self):
doc_input = Input(shape=(1,))
embedded_doc = Embedding(input_dim=self._num_docs,
output_dim=self._embedding_size,
input_length=1,
name=model.DOC_EMBEDDINGS_LAYER_NAME)(doc_input)
embedded_doc = Lambda(lambdas.squeeze(axis=1))(embedded_doc)
stack = Lambda(lambdas.stack(self._window_size))(embedded_doc)
softmax = Dense(self._vocab_size, activation='softmax')(stack)
self._model = Model(inputs=doc_input, outputs=softmax)
|
the-stack_0_27591
|
import os
from django.utils.deprecation import MiddlewareMixin
class RangesMiddleware(MiddlewareMixin):
def process_response(self, request, response):
if response.status_code != 200 or not hasattr(response, 'file_to_stream'):
return response
http_range = request.META.get('HTTP_RANGE')
if not (http_range and http_range.startswith('bytes=') and http_range.count('-') == 1):
return response
if_range = request.META.get('HTTP_IF_RANGE')
if if_range and if_range != response.get('Last-Modified') and if_range != response.get('ETag'):
return response
f = response.file_to_stream
statobj = os.fstat(f.fileno())
start, end = http_range.split('=')[1].split('-')
if not start: # requesting the last N bytes
start = max(0, statobj.st_size - int(end))
end = ''
start, end = int(start or 0), int(end or statobj.st_size - 1)
assert 0 <= start < statobj.st_size, (start, statobj.st_size)
end = min(end, statobj.st_size - 1)
f.seek(start)
old_read = f.read
f.read = lambda n: old_read(min(n, end + 1 - f.tell()))
response.status_code = 206
response['Content-Length'] = end + 1 - start
response['Content-Range'] = 'bytes %d-%d/%d' % (start, end, statobj.st_size)
return response
# "GET /static/image/Wild_Crane_Andy.mp4 HTTP/1.1" 200 417792
|
the-stack_0_27592
|
# This script is borrowed from https://github.com/pytorch/vision/blob/master/torchvision/models/resnet.py
# Adhere to their licence to use this script.
import torch
import torch.nn as nn
from torchvision.models.utils import load_state_dict_from_url
__all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101',
'resnet152', 'resnext50_32x4d', 'resnext101_32x8d',
'wide_resnet50_2', 'wide_resnet101_2']
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
'resnext50_32x4d': 'https://download.pytorch.org/models/resnext50_32x4d-7cdf4587.pth',
'resnext101_32x8d': 'https://download.pytorch.org/models/resnext101_32x8d-8ba56ff5.pth',
'wide_resnet50_2': 'https://download.pytorch.org/models/wide_resnet50_2-95faca4d.pth',
'wide_resnet101_2': 'https://download.pytorch.org/models/wide_resnet101_2-32ee1156.pth',
}
def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=dilation, groups=groups, bias=False, dilation=dilation)
def conv1x1(in_planes, out_planes, stride=1):
"""1x1 convolution"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None):
super(BasicBlock, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
if groups != 1 or base_width != 64:
raise ValueError('BasicBlock only supports groups=1 and base_width=64')
if dilation > 1:
raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
# Both self.conv1 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = norm_layer(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = norm_layer(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None):
super(Bottleneck, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
width = int(planes * (base_width / 64.)) * groups
# Both self.conv2 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv1x1(inplanes, width)
self.bn1 = norm_layer(width)
self.conv2 = conv3x3(width, width, stride, groups, dilation)
self.bn2 = norm_layer(width)
self.conv3 = conv1x1(width, planes * self.expansion)
self.bn3 = norm_layer(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers, num_classes=1000, zero_init_residual=False,
groups=1, width_per_group=64, replace_stride_with_dilation=None,
norm_layer=None):
super(ResNet, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
self._norm_layer = norm_layer
self.inplanes = 64
self.dilation = 1
if replace_stride_with_dilation is None:
# each element in the tuple indicates if we should replace
# the 2x2 stride with a dilated convolution instead
replace_stride_with_dilation = [False, False, False]
if len(replace_stride_with_dilation) != 3:
raise ValueError("replace_stride_with_dilation should be None "
"or a 3-element tuple, got {}".format(replace_stride_with_dilation))
self.groups = groups
self.base_width = width_per_group
self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = norm_layer(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2,
dilate=replace_stride_with_dilation[0])
self.layer3 = self._make_layer(block, 256, layers[2], stride=2,
dilate=replace_stride_with_dilation[1])
self.layer4 = self._make_layer(block, 512, layers[3], stride=2,
dilate=replace_stride_with_dilation[2])
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
# self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# Zero-initialize the last BN in each residual branch,
# so that the residual branch starts with zeros, and each residual block behaves like an identity.
# This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0)
def _make_layer(self, block, planes, blocks, stride=1, dilate=False):
norm_layer = self._norm_layer
downsample = None
previous_dilation = self.dilation
if dilate:
self.dilation *= stride
stride = 1
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
norm_layer(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, self.groups,
self.base_width, previous_dilation, norm_layer))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes, groups=self.groups,
base_width=self.base_width, dilation=self.dilation,
norm_layer=norm_layer))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = torch.flatten(x, 1)
# x = self.fc(x)
return x
def _resnet(arch, block, layers, pretrained, progress, **kwargs):
model = ResNet(block, layers, **kwargs)
if pretrained:
state_dict = load_state_dict_from_url(model_urls[arch],
progress=progress)
model.load_state_dict(state_dict, strict=False)
return model
def resnet18(pretrained=False, progress=True, **kwargs):
r"""ResNet-18 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>'_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet18', BasicBlock, [2, 2, 2, 2], pretrained, progress,
**kwargs)
def resnet34(pretrained=False, progress=True, **kwargs):
r"""ResNet-34 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>'_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet34', BasicBlock, [3, 4, 6, 3], pretrained, progress,
**kwargs)
def resnet50(pretrained=False, progress=True, **kwargs):
r"""ResNet-50 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>'_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet50', Bottleneck, [3, 4, 6, 3], pretrained, progress,
**kwargs)
def resnet101(pretrained=False, progress=True, **kwargs):
r"""ResNet-101 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>'_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet101', Bottleneck, [3, 4, 23, 3], pretrained, progress,
**kwargs)
def resnet152(pretrained=False, progress=True, **kwargs):
r"""ResNet-152 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>'_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet152', Bottleneck, [3, 8, 36, 3], pretrained, progress,
**kwargs)
def resnext50_32x4d(pretrained=False, progress=True, **kwargs):
r"""ResNeXt-50 32x4d model from
`"Aggregated Residual Transformation for Deep Neural Networks" <https://arxiv.org/pdf/1611.05431.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs['groups'] = 32
kwargs['width_per_group'] = 4
return _resnet('resnext50_32x4d', Bottleneck, [3, 4, 6, 3],
pretrained, progress, **kwargs)
def resnext101_32x8d(pretrained=False, progress=True, **kwargs):
r"""ResNeXt-101 32x8d model from
`"Aggregated Residual Transformation for Deep Neural Networks" <https://arxiv.org/pdf/1611.05431.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs['groups'] = 32
kwargs['width_per_group'] = 8
return _resnet('resnext101_32x8d', Bottleneck, [3, 4, 23, 3],
pretrained, progress, **kwargs)
def wide_resnet50_2(pretrained=False, progress=True, **kwargs):
r"""Wide ResNet-50-2 model from
`"Wide Residual Networks" <https://arxiv.org/pdf/1605.07146.pdf>`_
The model is the same as ResNet except for the bottleneck number of channels
which is twice larger in every block. The number of channels in outer 1x1
convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048
channels, and in Wide ResNet-50-2 has 2048-1024-2048.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs['width_per_group'] = 64 * 2
return _resnet('wide_resnet50_2', Bottleneck, [3, 4, 6, 3],
pretrained, progress, **kwargs)
def wide_resnet101_2(pretrained=False, progress=True, **kwargs):
r"""Wide ResNet-101-2 model from
`"Wide Residual Networks" <https://arxiv.org/pdf/1605.07146.pdf>`_
The model is the same as ResNet except for the bottleneck number of channels
which is twice larger in every block. The number of channels in outer 1x1
convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048
channels, and in Wide ResNet-50-2 has 2048-1024-2048.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs['width_per_group'] = 64 * 2
return _resnet('wide_resnet101_2', Bottleneck, [3, 4, 23, 3],
pretrained, progress, **kwargs)
|
the-stack_0_27593
|
import logging
import fabric.api
import fabric.operations
import cloudenvy.envy
class Ssh(cloudenvy.envy.Command):
def _build_subparser(self, subparsers):
help_str = 'SSH into your Envy.'
subparser = subparsers.add_parser('ssh', help=help_str,
description=help_str)
subparser.set_defaults(func=self.run)
subparser.add_argument('-n', '--name', action='store', default='',
help='Specify custom name for an Envy.')
return subparser
def run(self, config, args):
envy = cloudenvy.envy.Envy(config)
if envy.ip():
disable_known_hosts = ('-o UserKnownHostsFile=/dev/null'
' -o StrictHostKeyChecking=no')
forward_agent = '-o ForwardAgent=yes'
options = [disable_known_hosts]
if envy.forward_agent:
options.append(forward_agent)
fabric.operations.local('ssh %s %s@%s' % (' '.join(options),
envy.remote_user,
envy.ip()))
else:
logging.error('Could not determine IP.')
|
the-stack_0_27594
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2019 David Lundgren <[email protected]>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r'''
---
author:
- David Lundgren (@dlundgren)
module: sysrc
short_description: Manage FreeBSD using sysrc
version_added: '2.0.0'
description:
- Manages C(/etc/rc.conf) for FreeBSD.
options:
name:
description:
- Name of variable in C(/etc/rc.conf) to manage.
type: str
required: true
value:
description:
- The value to set when I(state=present).
- The value to add when I(state=value_present).
- The value to remove when I(state=value_absent).
type: str
state:
description:
- Use I(present) to add the variable.
- Use I(absent) to remove the variable.
- Use I(value_present) to add the value to the existing variable.
- Use I(value_absent) to remove the value from the existing variable.
type: str
default: "present"
choices: [ absent, present, value_present, value_absent ]
path:
description:
- Path to file to use instead of C(/etc/rc.conf).
type: str
default: "/etc/rc.conf"
delim:
description:
- Delimiter to be used instead of C( ).
- Only used when I(state=value_present) or I(state=value_absent).
default: " "
type: str
jail:
description:
- Name or ID of the jail to operate on.
type: str
notes:
- The C(name) cannot contain periods as sysrc does not support OID style names.
'''
EXAMPLES = r'''
---
# enable mysql in the /etc/rc.conf
- name: Configure mysql pid file
community.general.sysrc:
name: mysql_pidfile
value: "/var/run/mysqld/mysqld.pid"
# enable accf_http kld in the boot loader
- name: Enable accf_http kld
community.general.sysrc:
name: accf_http_load
state: present
value: "YES"
path: /boot/loader.conf
# add gif0 to cloned_interfaces
- name: Add gif0 interface
community.general.sysrc:
name: cloned_interfaces
state: value_present
value: "gif0"
# enable nginx on a jail
- name: Enable nginx in test jail
community.general.sysrc:
name: nginx_enable
value: "YES"
jail: testjail
'''
RETURN = r'''
changed:
description: Return changed for sysrc actions.
returned: always
type: bool
sample: true
'''
from ansible.module_utils.basic import AnsibleModule
import re
class Sysrc(object):
def __init__(self, module, name, value, path, delim, jail):
self.module = module
self.name = name
self.changed = False
self.value = value
self.path = path
self.delim = delim
self.jail = jail
self.sysrc = module.get_bin_path('sysrc', True)
def has_unknown_variable(self, out, err):
# newer versions of sysrc use stderr instead of stdout
return err.find("unknown variable") > 0 or out.find("unknown variable") > 0
def exists(self):
# sysrc doesn't really use exit codes
(rc, out, err) = self.run_sysrc(self.name)
if self.value is None:
regex = "%s: " % re.escape(self.name)
else:
regex = "%s: %s$" % (re.escape(self.name), re.escape(self.value))
return not self.has_unknown_variable(out, err) and re.match(regex, out) is not None
def contains(self):
(rc, out, err) = self.run_sysrc('-n', self.name)
if self.has_unknown_variable(out, err):
return False
return self.value in out.strip().split(self.delim)
def present(self):
if self.exists():
return
if self.module.check_mode:
self.changed = True
return
(rc, out, err) = self.run_sysrc("%s=%s" % (self.name, self.value))
if out.find("%s:" % self.name) == 0 and re.search("-> %s$" % re.escape(self.value), out) is not None:
self.changed = True
def absent(self):
if not self.exists():
return
# inversed since we still need to mark as changed
if not self.module.check_mode:
(rc, out, err) = self.run_sysrc('-x', self.name)
if self.has_unknown_variable(out, err):
return
self.changed = True
def value_present(self):
if self.contains():
return
if self.module.check_mode:
self.changed = True
return
setstring = '%s+=%s%s' % (self.name, self.delim, self.value)
(rc, out, err) = self.run_sysrc(setstring)
if out.find("%s:" % self.name) == 0:
values = out.split(' -> ')[1].strip().split(self.delim)
if self.value in values:
self.changed = True
def value_absent(self):
if not self.contains():
return
if self.module.check_mode:
self.changed = True
return
setstring = '%s-=%s%s' % (self.name, self.delim, self.value)
(rc, out, err) = self.run_sysrc(setstring)
if out.find("%s:" % self.name) == 0:
values = out.split(' -> ')[1].strip().split(self.delim)
if self.value not in values:
self.changed = True
def run_sysrc(self, *args):
cmd = [self.sysrc, '-f', self.path]
if self.jail:
cmd += ['-j', self.jail]
cmd.extend(args)
(rc, out, err) = self.module.run_command(cmd)
return (rc, out, err)
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(type='str', required=True),
value=dict(type='str', default=None),
state=dict(type='str', default='present', choices=['absent', 'present', 'value_present', 'value_absent']),
path=dict(type='str', default='/etc/rc.conf'),
delim=dict(type='str', default=' '),
jail=dict(type='str', default=None),
),
supports_check_mode=True,
)
name = module.params.pop('name')
# OID style names are not supported
if not re.match('^[a-zA-Z0-9_]+$', name):
module.fail_json(
msg="Name may only contain alpha-numeric and underscore characters"
)
value = module.params.pop('value')
state = module.params.pop('state')
path = module.params.pop('path')
delim = module.params.pop('delim')
jail = module.params.pop('jail')
result = dict(
name=name,
state=state,
value=value,
path=path,
delim=delim,
jail=jail
)
rc_value = Sysrc(module, name, value, path, delim, jail)
if state == 'present':
rc_value.present()
elif state == 'absent':
rc_value.absent()
elif state == 'value_present':
rc_value.value_present()
elif state == 'value_absent':
rc_value.value_absent()
result['changed'] = rc_value.changed
module.exit_json(**result)
if __name__ == '__main__':
main()
|
the-stack_0_27596
|
# Copyright (c) 2021, Frappe Technologies and contributors
# License: MIT. See LICENSE
import json
from datetime import datetime
from typing import Dict, List
from croniter import croniter
import frappe
from frappe.model.document import Document
from frappe.utils import get_datetime, now_datetime
from frappe.utils.background_jobs import enqueue, get_jobs
class ScheduledJobType(Document):
def autoname(self):
self.name = ".".join(self.method.split(".")[-2:])
def validate(self):
if self.frequency != "All":
# force logging for all events other than continuous ones (ALL)
self.create_log = 1
def enqueue(self, force=False):
# enqueue event if last execution is done
if self.is_event_due() or force:
if frappe.flags.enqueued_jobs:
frappe.flags.enqueued_jobs.append(self.method)
if frappe.flags.execute_job:
self.execute()
else:
if not self.is_job_in_queue():
enqueue('frappe.core.doctype.scheduled_job_type.scheduled_job_type.run_scheduled_job',
queue = self.get_queue_name(), job_type=self.method)
return True
return False
def is_event_due(self, current_time = None):
'''Return true if event is due based on time lapsed since last execution'''
# if the next scheduled event is before NOW, then its due!
return self.get_next_execution() <= (current_time or now_datetime())
def is_job_in_queue(self):
queued_jobs = get_jobs(site=frappe.local.site, key='job_type')[frappe.local.site]
return self.method in queued_jobs
def get_next_execution(self):
CRON_MAP = {
"Yearly": "0 0 1 1 *",
"Annual": "0 0 1 1 *",
"Monthly": "0 0 1 * *",
"Monthly Long": "0 0 1 * *",
"Weekly": "0 0 * * 0",
"Weekly Long": "0 0 * * 0",
"Daily": "0 0 * * *",
"Daily Long": "0 0 * * *",
"Hourly": "0 * * * *",
"Hourly Long": "0 * * * *",
"All": "0/" + str((frappe.get_conf().scheduler_interval or 240) // 60) + " * * * *",
}
if not self.cron_format:
self.cron_format = CRON_MAP[self.frequency]
return croniter(self.cron_format,
get_datetime(self.last_execution or datetime(2000, 1, 1))).get_next(datetime)
def execute(self):
self.scheduler_log = None
try:
self.log_status('Start')
if self.server_script:
script_name = frappe.db.get_value("Server Script", self.server_script)
if script_name:
frappe.get_doc('Server Script', script_name).execute_scheduled_method()
else:
frappe.get_attr(self.method)()
frappe.db.commit()
self.log_status('Complete')
except Exception:
frappe.db.rollback()
self.log_status('Failed')
def log_status(self, status):
# log file
frappe.logger("scheduler").info(f"Scheduled Job {status}: {self.method} for {frappe.local.site}")
self.update_scheduler_log(status)
def update_scheduler_log(self, status):
if not self.create_log:
# self.get_next_execution will work properly iff self.last_execution is properly set
if self.frequency == "All" and status == 'Start':
self.db_set('last_execution', now_datetime(), update_modified=False)
frappe.db.commit()
return
if not self.scheduler_log:
self.scheduler_log = frappe.get_doc(dict(doctype = 'Scheduled Job Log', scheduled_job_type=self.name)).insert(ignore_permissions=True)
self.scheduler_log.db_set('status', status)
if status == 'Failed':
self.scheduler_log.db_set('details', frappe.get_traceback())
if status == 'Start':
self.db_set('last_execution', now_datetime(), update_modified=False)
frappe.db.commit()
def get_queue_name(self):
return 'long' if ('Long' in self.frequency) else 'default'
def on_trash(self):
frappe.db.delete("Scheduled Job Log", {"scheduled_job_type": self.name})
@frappe.whitelist()
def execute_event(doc: str):
frappe.only_for("System Manager")
doc = json.loads(doc)
frappe.get_doc("Scheduled Job Type", doc.get("name")).enqueue(force=True)
return doc
def run_scheduled_job(job_type: str):
"""This is a wrapper function that runs a hooks.scheduler_events method"""
try:
frappe.get_doc("Scheduled Job Type", dict(method=job_type)).execute()
except Exception:
print(frappe.get_traceback())
def sync_jobs(hooks: Dict = None):
frappe.reload_doc("core", "doctype", "scheduled_job_type")
scheduler_events = hooks or frappe.get_hooks("scheduler_events")
all_events = insert_events(scheduler_events)
clear_events(all_events)
def insert_events(scheduler_events: Dict) -> List:
cron_jobs, event_jobs = [], []
for event_type in scheduler_events:
events = scheduler_events.get(event_type)
if isinstance(events, dict):
cron_jobs += insert_cron_jobs(events)
else:
# hourly, daily etc
event_jobs += insert_event_jobs(events, event_type)
return cron_jobs + event_jobs
def insert_cron_jobs(events: Dict) -> List:
cron_jobs = []
for cron_format in events:
for event in events.get(cron_format):
cron_jobs.append(event)
insert_single_event("Cron", event, cron_format)
return cron_jobs
def insert_event_jobs(events: List, event_type: str) -> List:
event_jobs = []
for event in events:
event_jobs.append(event)
frequency = event_type.replace("_", " ").title()
insert_single_event(frequency, event)
return event_jobs
def insert_single_event(frequency: str, event: str, cron_format: str = None):
cron_expr = {"cron_format": cron_format} if cron_format else {}
doc = frappe.get_doc(
{
"doctype": "Scheduled Job Type",
"method": event,
"cron_format": cron_format,
"frequency": frequency,
}
)
if not frappe.db.exists(
"Scheduled Job Type", {"method": event, "frequency": frequency, **cron_expr}
):
try:
doc.insert()
except frappe.DuplicateEntryError:
doc.delete()
doc.insert()
def clear_events(all_events: List):
for event in frappe.get_all(
"Scheduled Job Type", fields=["name", "method", "server_script"]
):
is_server_script = event.server_script
is_defined_in_hooks = event.method in all_events
if not (is_defined_in_hooks or is_server_script):
frappe.delete_doc("Scheduled Job Type", event.name)
|
the-stack_0_27597
|
"""
Core components of Home Assistant.
Home Assistant is a Home Automation framework for observing the state
of entities and react to changes.
"""
from __future__ import annotations
import asyncio
from collections.abc import Awaitable, Collection, Coroutine, Iterable, Mapping
import datetime
import enum
import functools
import logging
import os
import pathlib
import re
import threading
from time import monotonic
from types import MappingProxyType
from typing import TYPE_CHECKING, Any, Callable, Optional, TypeVar, cast
from urllib.parse import urlparse
import attr
import voluptuous as vol
import yarl
from homeassistant import block_async_io, loader, util
from homeassistant.const import (
ATTR_DOMAIN,
ATTR_FRIENDLY_NAME,
ATTR_NOW,
ATTR_SECONDS,
ATTR_SERVICE,
ATTR_SERVICE_DATA,
CONF_UNIT_SYSTEM_IMPERIAL,
EVENT_CALL_SERVICE,
EVENT_CORE_CONFIG_UPDATE,
EVENT_HOMEASSISTANT_CLOSE,
EVENT_HOMEASSISTANT_FINAL_WRITE,
EVENT_HOMEASSISTANT_START,
EVENT_HOMEASSISTANT_STARTED,
EVENT_HOMEASSISTANT_STOP,
EVENT_SERVICE_REGISTERED,
EVENT_SERVICE_REMOVED,
EVENT_STATE_CHANGED,
EVENT_TIME_CHANGED,
EVENT_TIMER_OUT_OF_SYNC,
LENGTH_METERS,
MATCH_ALL,
MAX_LENGTH_EVENT_EVENT_TYPE,
MAX_LENGTH_STATE_STATE,
__version__,
)
from homeassistant.exceptions import (
HomeAssistantError,
InvalidEntityFormatError,
InvalidStateError,
MaxLengthExceeded,
ServiceNotFound,
Unauthorized,
)
from homeassistant.util import location
from homeassistant.util.async_ import (
fire_coroutine_threadsafe,
run_callback_threadsafe,
shutdown_run_callback_threadsafe,
)
import homeassistant.util.dt as dt_util
from homeassistant.util.timeout import TimeoutManager
from homeassistant.util.unit_system import IMPERIAL_SYSTEM, METRIC_SYSTEM, UnitSystem
import homeassistant.util.uuid as uuid_util
# Typing imports that create a circular dependency
if TYPE_CHECKING:
from homeassistant.auth import AuthManager
from homeassistant.components.http import HomeAssistantHTTP
from homeassistant.config_entries import ConfigEntries
STAGE_1_SHUTDOWN_TIMEOUT = 100
STAGE_2_SHUTDOWN_TIMEOUT = 60
STAGE_3_SHUTDOWN_TIMEOUT = 30
block_async_io.enable()
T = TypeVar("T")
_UNDEF: dict = {} # Internal; not helpers.typing.UNDEFINED due to circular dependency
# pylint: disable=invalid-name
CALLABLE_T = TypeVar("CALLABLE_T", bound=Callable)
CALLBACK_TYPE = Callable[[], None]
# pylint: enable=invalid-name
CORE_STORAGE_KEY = "core.config"
CORE_STORAGE_VERSION = 1
DOMAIN = "homeassistant"
# How long to wait to log tasks that are blocking
BLOCK_LOG_TIMEOUT = 60
# How long we wait for the result of a service call
SERVICE_CALL_LIMIT = 10 # seconds
# Source of core configuration
SOURCE_DISCOVERED = "discovered"
SOURCE_STORAGE = "storage"
SOURCE_YAML = "yaml"
# How long to wait until things that run on startup have to finish.
TIMEOUT_EVENT_START = 15
_LOGGER = logging.getLogger(__name__)
def split_entity_id(entity_id: str) -> list[str]:
"""Split a state entity ID into domain and object ID."""
return entity_id.split(".", 1)
VALID_ENTITY_ID = re.compile(r"^(?!.+__)(?!_)[\da-z_]+(?<!_)\.(?!_)[\da-z_]+(?<!_)$")
def valid_entity_id(entity_id: str) -> bool:
"""Test if an entity ID is a valid format.
Format: <domain>.<entity> where both are slugs.
"""
return VALID_ENTITY_ID.match(entity_id) is not None
def valid_state(state: str) -> bool:
"""Test if a state is valid."""
return len(state) <= MAX_LENGTH_STATE_STATE
def callback(func: CALLABLE_T) -> CALLABLE_T:
"""Annotation to mark method as safe to call from within the event loop."""
setattr(func, "_hass_callback", True)
return func
def is_callback(func: Callable[..., Any]) -> bool:
"""Check if function is safe to be called in the event loop."""
return getattr(func, "_hass_callback", False) is True
@enum.unique
class HassJobType(enum.Enum):
"""Represent a job type."""
Coroutinefunction = 1
Callback = 2
Executor = 3
class HassJob:
"""Represent a job to be run later.
We check the callable type in advance
so we can avoid checking it every time
we run the job.
"""
__slots__ = ("job_type", "target")
def __init__(self, target: Callable) -> None:
"""Create a job object."""
if asyncio.iscoroutine(target):
raise ValueError("Coroutine not allowed to be passed to HassJob")
self.target = target
self.job_type = _get_callable_job_type(target)
def __repr__(self) -> str:
"""Return the job."""
return f"<Job {self.job_type} {self.target}>"
def _get_callable_job_type(target: Callable) -> HassJobType:
"""Determine the job type from the callable."""
# Check for partials to properly determine if coroutine function
check_target = target
while isinstance(check_target, functools.partial):
check_target = check_target.func
if asyncio.iscoroutinefunction(check_target):
return HassJobType.Coroutinefunction
if is_callback(check_target):
return HassJobType.Callback
return HassJobType.Executor
class CoreState(enum.Enum):
"""Represent the current state of Home Assistant."""
not_running = "NOT_RUNNING"
starting = "STARTING"
running = "RUNNING"
stopping = "STOPPING"
final_write = "FINAL_WRITE"
stopped = "STOPPED"
def __str__(self) -> str:
"""Return the event."""
return self.value
class HomeAssistant:
"""Root object of the Home Assistant home automation."""
auth: AuthManager
http: HomeAssistantHTTP = None # type: ignore
config_entries: ConfigEntries = None # type: ignore
def __init__(self) -> None:
"""Initialize new Home Assistant object."""
self.loop = asyncio.get_running_loop()
self._pending_tasks: list = []
self._track_task = True
self.bus = EventBus(self)
self.services = ServiceRegistry(self)
self.states = StateMachine(self.bus, self.loop)
self.config = Config(self)
self.components = loader.Components(self)
self.helpers = loader.Helpers(self)
# This is a dictionary that any component can store any data on.
self.data: dict = {}
self.state: CoreState = CoreState.not_running
self.exit_code: int = 0
# If not None, use to signal end-of-loop
self._stopped: asyncio.Event | None = None
# Timeout handler for Core/Helper namespace
self.timeout: TimeoutManager = TimeoutManager()
@property
def is_running(self) -> bool:
"""Return if Home Assistant is running."""
return self.state in (CoreState.starting, CoreState.running)
@property
def is_stopping(self) -> bool:
"""Return if Home Assistant is stopping."""
return self.state in (CoreState.stopping, CoreState.final_write)
def start(self) -> int:
"""Start Home Assistant.
Note: This function is only used for testing.
For regular use, use "await hass.run()".
"""
# Register the async start
fire_coroutine_threadsafe(self.async_start(), self.loop)
# Run forever
# Block until stopped
_LOGGER.info("Starting Home Assistant core loop")
self.loop.run_forever()
return self.exit_code
async def async_run(self, *, attach_signals: bool = True) -> int:
"""Home Assistant main entry point.
Start Home Assistant and block until stopped.
This method is a coroutine.
"""
if self.state != CoreState.not_running:
raise RuntimeError("Home Assistant is already running")
# _async_stop will set this instead of stopping the loop
self._stopped = asyncio.Event()
await self.async_start()
if attach_signals:
# pylint: disable=import-outside-toplevel
from homeassistant.helpers.signal import async_register_signal_handling
async_register_signal_handling(self)
await self._stopped.wait()
return self.exit_code
async def async_start(self) -> None:
"""Finalize startup from inside the event loop.
This method is a coroutine.
"""
_LOGGER.info("Starting Home Assistant")
setattr(self.loop, "_thread_ident", threading.get_ident())
self.state = CoreState.starting
self.bus.async_fire(EVENT_CORE_CONFIG_UPDATE)
self.bus.async_fire(EVENT_HOMEASSISTANT_START)
try:
# Only block for EVENT_HOMEASSISTANT_START listener
self.async_stop_track_tasks()
async with self.timeout.async_timeout(TIMEOUT_EVENT_START):
await self.async_block_till_done()
except asyncio.TimeoutError:
_LOGGER.warning(
"Something is blocking Home Assistant from wrapping up the "
"start up phase. We're going to continue anyway. Please "
"report the following info at https://github.com/home-assistant/core/issues: %s",
", ".join(self.config.components),
)
# Allow automations to set up the start triggers before changing state
await asyncio.sleep(0)
if self.state != CoreState.starting:
_LOGGER.warning(
"Home Assistant startup has been interrupted. "
"Its state may be inconsistent"
)
return
self.state = CoreState.running
self.bus.async_fire(EVENT_CORE_CONFIG_UPDATE)
self.bus.async_fire(EVENT_HOMEASSISTANT_STARTED)
_async_create_timer(self)
def add_job(self, target: Callable[..., Any], *args: Any) -> None:
"""Add job to the executor pool.
target: target to call.
args: parameters for method to call.
"""
if target is None:
raise ValueError("Don't call add_job with None")
self.loop.call_soon_threadsafe(self.async_add_job, target, *args)
@callback
def async_add_job(
self, target: Callable[..., Any], *args: Any
) -> asyncio.Future | None:
"""Add a job from within the event loop.
This method must be run in the event loop.
target: target to call.
args: parameters for method to call.
"""
if target is None:
raise ValueError("Don't call async_add_job with None")
if asyncio.iscoroutine(target):
return self.async_create_task(cast(Coroutine, target))
return self.async_add_hass_job(HassJob(target), *args)
@callback
def async_add_hass_job(self, hassjob: HassJob, *args: Any) -> asyncio.Future | None:
"""Add a HassJob from within the event loop.
This method must be run in the event loop.
hassjob: HassJob to call.
args: parameters for method to call.
"""
if hassjob.job_type == HassJobType.Coroutinefunction:
task = self.loop.create_task(hassjob.target(*args))
elif hassjob.job_type == HassJobType.Callback:
self.loop.call_soon(hassjob.target, *args)
return None
else:
task = self.loop.run_in_executor( # type: ignore
None, hassjob.target, *args
)
# If a task is scheduled
if self._track_task:
self._pending_tasks.append(task)
return task
def create_task(self, target: Awaitable) -> None:
"""Add task to the executor pool.
target: target to call.
"""
self.loop.call_soon_threadsafe(self.async_create_task, target)
@callback
def async_create_task(self, target: Awaitable) -> asyncio.Task:
"""Create a task from within the eventloop.
This method must be run in the event loop.
target: target to call.
"""
task: asyncio.Task = self.loop.create_task(target)
if self._track_task:
self._pending_tasks.append(task)
return task
@callback
def async_add_executor_job(
self, target: Callable[..., T], *args: Any
) -> Awaitable[T]:
"""Add an executor job from within the event loop."""
task = self.loop.run_in_executor(None, target, *args)
# If a task is scheduled
if self._track_task:
self._pending_tasks.append(task)
return task
@callback
def async_track_tasks(self) -> None:
"""Track tasks so you can wait for all tasks to be done."""
self._track_task = True
@callback
def async_stop_track_tasks(self) -> None:
"""Stop track tasks so you can't wait for all tasks to be done."""
self._track_task = False
@callback
def async_run_hass_job(self, hassjob: HassJob, *args: Any) -> asyncio.Future | None:
"""Run a HassJob from within the event loop.
This method must be run in the event loop.
hassjob: HassJob
args: parameters for method to call.
"""
if hassjob.job_type == HassJobType.Callback:
hassjob.target(*args)
return None
return self.async_add_hass_job(hassjob, *args)
@callback
def async_run_job(
self, target: Callable[..., None | Awaitable], *args: Any
) -> asyncio.Future | None:
"""Run a job from within the event loop.
This method must be run in the event loop.
target: target to call.
args: parameters for method to call.
"""
if asyncio.iscoroutine(target):
return self.async_create_task(cast(Coroutine, target))
return self.async_run_hass_job(HassJob(target), *args)
def block_till_done(self) -> None:
"""Block until all pending work is done."""
asyncio.run_coroutine_threadsafe(
self.async_block_till_done(), self.loop
).result()
async def async_block_till_done(self) -> None:
"""Block until all pending work is done."""
# To flush out any call_soon_threadsafe
await asyncio.sleep(0)
start_time: float | None = None
while self._pending_tasks:
pending = [task for task in self._pending_tasks if not task.done()]
self._pending_tasks.clear()
if pending:
await self._await_and_log_pending(pending)
if start_time is None:
# Avoid calling monotonic() until we know
# we may need to start logging blocked tasks.
start_time = 0
elif start_time == 0:
# If we have waited twice then we set the start
# time
start_time = monotonic()
elif monotonic() - start_time > BLOCK_LOG_TIMEOUT:
# We have waited at least three loops and new tasks
# continue to block. At this point we start
# logging all waiting tasks.
for task in pending:
_LOGGER.debug("Waiting for task: %s", task)
else:
await asyncio.sleep(0)
async def _await_and_log_pending(self, pending: Iterable[Awaitable[Any]]) -> None:
"""Await and log tasks that take a long time."""
# pylint: disable=no-self-use
wait_time = 0
while pending:
_, pending = await asyncio.wait(pending, timeout=BLOCK_LOG_TIMEOUT)
if not pending:
return
wait_time += BLOCK_LOG_TIMEOUT
for task in pending:
_LOGGER.debug("Waited %s seconds for task: %s", wait_time, task)
def stop(self) -> None:
"""Stop Home Assistant and shuts down all threads."""
if self.state == CoreState.not_running: # just ignore
return
fire_coroutine_threadsafe(self.async_stop(), self.loop)
async def async_stop(self, exit_code: int = 0, *, force: bool = False) -> None:
"""Stop Home Assistant and shuts down all threads.
The "force" flag commands async_stop to proceed regardless of
Home Assistan't current state. You should not set this flag
unless you're testing.
This method is a coroutine.
"""
if not force:
# Some tests require async_stop to run,
# regardless of the state of the loop.
if self.state == CoreState.not_running: # just ignore
return
if self.state in [CoreState.stopping, CoreState.final_write]:
_LOGGER.info("Additional call to async_stop was ignored")
return
if self.state == CoreState.starting:
# This may not work
_LOGGER.warning(
"Stopping Home Assistant before startup has completed may fail"
)
# stage 1
self.state = CoreState.stopping
self.async_track_tasks()
self.bus.async_fire(EVENT_HOMEASSISTANT_STOP)
try:
async with self.timeout.async_timeout(STAGE_1_SHUTDOWN_TIMEOUT):
await self.async_block_till_done()
except asyncio.TimeoutError:
_LOGGER.warning(
"Timed out waiting for shutdown stage 1 to complete, the shutdown will continue"
)
# stage 2
self.state = CoreState.final_write
self.bus.async_fire(EVENT_HOMEASSISTANT_FINAL_WRITE)
try:
async with self.timeout.async_timeout(STAGE_2_SHUTDOWN_TIMEOUT):
await self.async_block_till_done()
except asyncio.TimeoutError:
_LOGGER.warning(
"Timed out waiting for shutdown stage 2 to complete, the shutdown will continue"
)
# stage 3
self.state = CoreState.not_running
self.bus.async_fire(EVENT_HOMEASSISTANT_CLOSE)
# Prevent run_callback_threadsafe from scheduling any additional
# callbacks in the event loop as callbacks created on the futures
# it returns will never run after the final `self.async_block_till_done`
# which will cause the futures to block forever when waiting for
# the `result()` which will cause a deadlock when shutting down the executor.
shutdown_run_callback_threadsafe(self.loop)
try:
async with self.timeout.async_timeout(STAGE_3_SHUTDOWN_TIMEOUT):
await self.async_block_till_done()
except asyncio.TimeoutError:
_LOGGER.warning(
"Timed out waiting for shutdown stage 3 to complete, the shutdown will continue"
)
self.exit_code = exit_code
self.state = CoreState.stopped
if self._stopped is not None:
self._stopped.set()
@attr.s(slots=True, frozen=True)
class Context:
"""The context that triggered something."""
user_id: str = attr.ib(default=None)
parent_id: str | None = attr.ib(default=None)
id: str = attr.ib(factory=uuid_util.random_uuid_hex)
def as_dict(self) -> dict[str, str | None]:
"""Return a dictionary representation of the context."""
return {"id": self.id, "parent_id": self.parent_id, "user_id": self.user_id}
class EventOrigin(enum.Enum):
"""Represent the origin of an event."""
local = "LOCAL"
remote = "REMOTE"
def __str__(self) -> str:
"""Return the event."""
return self.value
class Event:
"""Representation of an event within the bus."""
__slots__ = ["event_type", "data", "origin", "time_fired", "context"]
def __init__(
self,
event_type: str,
data: dict[str, Any] | None = None,
origin: EventOrigin = EventOrigin.local,
time_fired: datetime.datetime | None = None,
context: Context | None = None,
) -> None:
"""Initialize a new event."""
self.event_type = event_type
self.data = data or {}
self.origin = origin
self.time_fired = time_fired or dt_util.utcnow()
self.context: Context = context or Context()
def __hash__(self) -> int:
"""Make hashable."""
# The only event type that shares context are the TIME_CHANGED
return hash((self.event_type, self.context.id, self.time_fired))
def as_dict(self) -> dict[str, Any]:
"""Create a dict representation of this Event.
Async friendly.
"""
return {
"event_type": self.event_type,
"data": dict(self.data),
"origin": str(self.origin.value),
"time_fired": self.time_fired.isoformat(),
"context": self.context.as_dict(),
}
def __repr__(self) -> str:
"""Return the representation."""
if self.data:
return f"<Event {self.event_type}[{str(self.origin)[0]}]: {util.repr_helper(self.data)}>"
return f"<Event {self.event_type}[{str(self.origin)[0]}]>"
def __eq__(self, other: Any) -> bool:
"""Return the comparison."""
return ( # type: ignore
self.__class__ == other.__class__
and self.event_type == other.event_type
and self.data == other.data
and self.origin == other.origin
and self.time_fired == other.time_fired
and self.context == other.context
)
class EventBus:
"""Allow the firing of and listening for events."""
def __init__(self, hass: HomeAssistant) -> None:
"""Initialize a new event bus."""
self._listeners: dict[str, list[tuple[HassJob, Callable | None]]] = {}
self._hass = hass
@callback
def async_listeners(self) -> dict[str, int]:
"""Return dictionary with events and the number of listeners.
This method must be run in the event loop.
"""
return {key: len(listeners) for key, listeners in self._listeners.items()}
@property
def listeners(self) -> dict[str, int]:
"""Return dictionary with events and the number of listeners."""
return run_callback_threadsafe(self._hass.loop, self.async_listeners).result()
def fire(
self,
event_type: str,
event_data: dict | None = None,
origin: EventOrigin = EventOrigin.local,
context: Context | None = None,
) -> None:
"""Fire an event."""
self._hass.loop.call_soon_threadsafe(
self.async_fire, event_type, event_data, origin, context
)
@callback
def async_fire(
self,
event_type: str,
event_data: dict[str, Any] | None = None,
origin: EventOrigin = EventOrigin.local,
context: Context | None = None,
time_fired: datetime.datetime | None = None,
) -> None:
"""Fire an event.
This method must be run in the event loop.
"""
if len(event_type) > MAX_LENGTH_EVENT_EVENT_TYPE:
raise MaxLengthExceeded(
event_type, "event_type", MAX_LENGTH_EVENT_EVENT_TYPE
)
listeners = self._listeners.get(event_type, [])
# EVENT_HOMEASSISTANT_CLOSE should go only to his listeners
match_all_listeners = self._listeners.get(MATCH_ALL)
if match_all_listeners is not None and event_type != EVENT_HOMEASSISTANT_CLOSE:
listeners = match_all_listeners + listeners
event = Event(event_type, event_data, origin, time_fired, context)
if event_type != EVENT_TIME_CHANGED:
_LOGGER.debug("Bus:Handling %s", event)
if not listeners:
return
for job, event_filter in listeners:
if event_filter is not None:
try:
if not event_filter(event):
continue
except Exception: # pylint: disable=broad-except
_LOGGER.exception("Error in event filter")
continue
self._hass.async_add_hass_job(job, event)
def listen(self, event_type: str, listener: Callable) -> CALLBACK_TYPE:
"""Listen for all events or events of a specific type.
To listen to all events specify the constant ``MATCH_ALL``
as event_type.
"""
async_remove_listener = run_callback_threadsafe(
self._hass.loop, self.async_listen, event_type, listener
).result()
def remove_listener() -> None:
"""Remove the listener."""
run_callback_threadsafe(self._hass.loop, async_remove_listener).result()
return remove_listener
@callback
def async_listen(
self,
event_type: str,
listener: Callable,
event_filter: Callable | None = None,
) -> CALLBACK_TYPE:
"""Listen for all events or events of a specific type.
To listen to all events specify the constant ``MATCH_ALL``
as event_type.
An optional event_filter, which must be a callable decorated with
@callback that returns a boolean value, determines if the
listener callable should run.
This method must be run in the event loop.
"""
if event_filter is not None and not is_callback(event_filter):
raise HomeAssistantError(f"Event filter {event_filter} is not a callback")
return self._async_listen_filterable_job(
event_type, (HassJob(listener), event_filter)
)
@callback
def _async_listen_filterable_job(
self, event_type: str, filterable_job: tuple[HassJob, Callable | None]
) -> CALLBACK_TYPE:
self._listeners.setdefault(event_type, []).append(filterable_job)
def remove_listener() -> None:
"""Remove the listener."""
self._async_remove_listener(event_type, filterable_job)
return remove_listener
def listen_once(
self, event_type: str, listener: Callable[[Event], None]
) -> CALLBACK_TYPE:
"""Listen once for event of a specific type.
To listen to all events specify the constant ``MATCH_ALL``
as event_type.
Returns function to unsubscribe the listener.
"""
async_remove_listener = run_callback_threadsafe(
self._hass.loop, self.async_listen_once, event_type, listener
).result()
def remove_listener() -> None:
"""Remove the listener."""
run_callback_threadsafe(self._hass.loop, async_remove_listener).result()
return remove_listener
@callback
def async_listen_once(self, event_type: str, listener: Callable) -> CALLBACK_TYPE:
"""Listen once for event of a specific type.
To listen to all events specify the constant ``MATCH_ALL``
as event_type.
Returns registered listener that can be used with remove_listener.
This method must be run in the event loop.
"""
filterable_job: tuple[HassJob, Callable | None] | None = None
@callback
def _onetime_listener(event: Event) -> None:
"""Remove listener from event bus and then fire listener."""
nonlocal filterable_job
if hasattr(_onetime_listener, "run"):
return
# Set variable so that we will never run twice.
# Because the event bus loop might have async_fire queued multiple
# times, its possible this listener may already be lined up
# multiple times as well.
# This will make sure the second time it does nothing.
setattr(_onetime_listener, "run", True)
assert filterable_job is not None
self._async_remove_listener(event_type, filterable_job)
self._hass.async_run_job(listener, event)
filterable_job = (HassJob(_onetime_listener), None)
return self._async_listen_filterable_job(event_type, filterable_job)
@callback
def _async_remove_listener(
self, event_type: str, filterable_job: tuple[HassJob, Callable | None]
) -> None:
"""Remove a listener of a specific event_type.
This method must be run in the event loop.
"""
try:
self._listeners[event_type].remove(filterable_job)
# delete event_type list if empty
if not self._listeners[event_type]:
self._listeners.pop(event_type)
except (KeyError, ValueError):
# KeyError is key event_type listener did not exist
# ValueError if listener did not exist within event_type
_LOGGER.exception(
"Unable to remove unknown job listener %s", filterable_job
)
class State:
"""Object to represent a state within the state machine.
entity_id: the entity that is represented.
state: the state of the entity
attributes: extra information on entity and state
last_changed: last time the state was changed, not the attributes.
last_updated: last time this object was updated.
context: Context in which it was created
domain: Domain of this state.
object_id: Object id of this state.
"""
__slots__ = [
"entity_id",
"state",
"attributes",
"last_changed",
"last_updated",
"context",
"domain",
"object_id",
"_as_dict",
]
def __init__(
self,
entity_id: str,
state: str,
attributes: Mapping[str, Any] | None = None,
last_changed: datetime.datetime | None = None,
last_updated: datetime.datetime | None = None,
context: Context | None = None,
validate_entity_id: bool | None = True,
) -> None:
"""Initialize a new state."""
state = str(state)
if validate_entity_id and not valid_entity_id(entity_id):
raise InvalidEntityFormatError(
f"Invalid entity id encountered: {entity_id}. "
"Format should be <domain>.<object_id>"
)
if not valid_state(state):
raise InvalidStateError(
f"Invalid state encountered for entity ID: {entity_id}. "
"State max length is 255 characters."
)
self.entity_id = entity_id.lower()
self.state = state
self.attributes = MappingProxyType(attributes or {})
self.last_updated = last_updated or dt_util.utcnow()
self.last_changed = last_changed or self.last_updated
self.context = context or Context()
self.domain, self.object_id = split_entity_id(self.entity_id)
self._as_dict: dict[str, Collection[Any]] | None = None
@property
def name(self) -> str:
"""Name of this state."""
return self.attributes.get(ATTR_FRIENDLY_NAME) or self.object_id.replace(
"_", " "
)
def as_dict(self) -> dict:
"""Return a dict representation of the State.
Async friendly.
To be used for JSON serialization.
Ensures: state == State.from_dict(state.as_dict())
"""
if not self._as_dict:
last_changed_isoformat = self.last_changed.isoformat()
if self.last_changed == self.last_updated:
last_updated_isoformat = last_changed_isoformat
else:
last_updated_isoformat = self.last_updated.isoformat()
self._as_dict = {
"entity_id": self.entity_id,
"state": self.state,
"attributes": dict(self.attributes),
"last_changed": last_changed_isoformat,
"last_updated": last_updated_isoformat,
"context": self.context.as_dict(),
}
return self._as_dict
@classmethod
def from_dict(cls, json_dict: dict) -> Any:
"""Initialize a state from a dict.
Async friendly.
Ensures: state == State.from_json_dict(state.to_json_dict())
"""
if not (json_dict and "entity_id" in json_dict and "state" in json_dict):
return None
last_changed = json_dict.get("last_changed")
if isinstance(last_changed, str):
last_changed = dt_util.parse_datetime(last_changed)
last_updated = json_dict.get("last_updated")
if isinstance(last_updated, str):
last_updated = dt_util.parse_datetime(last_updated)
if context := json_dict.get("context"):
context = Context(id=context.get("id"), user_id=context.get("user_id"))
return cls(
json_dict["entity_id"],
json_dict["state"],
json_dict.get("attributes"),
last_changed,
last_updated,
context,
)
def __eq__(self, other: Any) -> bool:
"""Return the comparison of the state."""
return ( # type: ignore
self.__class__ == other.__class__
and self.entity_id == other.entity_id
and self.state == other.state
and self.attributes == other.attributes
and self.context == other.context
)
def __repr__(self) -> str:
"""Return the representation of the states."""
attrs = f"; {util.repr_helper(self.attributes)}" if self.attributes else ""
return (
f"<state {self.entity_id}={self.state}{attrs}"
f" @ {dt_util.as_local(self.last_changed).isoformat()}>"
)
class StateMachine:
"""Helper class that tracks the state of different entities."""
def __init__(self, bus: EventBus, loop: asyncio.events.AbstractEventLoop) -> None:
"""Initialize state machine."""
self._states: dict[str, State] = {}
self._reservations: set[str] = set()
self._bus = bus
self._loop = loop
def entity_ids(self, domain_filter: str | None = None) -> list[str]:
"""List of entity ids that are being tracked."""
future = run_callback_threadsafe(
self._loop, self.async_entity_ids, domain_filter
)
return future.result()
@callback
def async_entity_ids(
self, domain_filter: str | Iterable | None = None
) -> list[str]:
"""List of entity ids that are being tracked.
This method must be run in the event loop.
"""
if domain_filter is None:
return list(self._states)
if isinstance(domain_filter, str):
domain_filter = (domain_filter.lower(),)
return [
state.entity_id
for state in self._states.values()
if state.domain in domain_filter
]
@callback
def async_entity_ids_count(
self, domain_filter: str | Iterable | None = None
) -> int:
"""Count the entity ids that are being tracked.
This method must be run in the event loop.
"""
if domain_filter is None:
return len(self._states)
if isinstance(domain_filter, str):
domain_filter = (domain_filter.lower(),)
return len(
[None for state in self._states.values() if state.domain in domain_filter]
)
def all(self, domain_filter: str | Iterable | None = None) -> list[State]:
"""Create a list of all states."""
return run_callback_threadsafe(
self._loop, self.async_all, domain_filter
).result()
@callback
def async_all(self, domain_filter: str | Iterable | None = None) -> list[State]:
"""Create a list of all states matching the filter.
This method must be run in the event loop.
"""
if domain_filter is None:
return list(self._states.values())
if isinstance(domain_filter, str):
domain_filter = (domain_filter.lower(),)
return [
state for state in self._states.values() if state.domain in domain_filter
]
def get(self, entity_id: str) -> State | None:
"""Retrieve state of entity_id or None if not found.
Async friendly.
"""
return self._states.get(entity_id.lower())
def is_state(self, entity_id: str, state: str) -> bool:
"""Test if entity exists and is in specified state.
Async friendly.
"""
state_obj = self.get(entity_id)
return state_obj is not None and state_obj.state == state
def remove(self, entity_id: str) -> bool:
"""Remove the state of an entity.
Returns boolean to indicate if an entity was removed.
"""
return run_callback_threadsafe(
self._loop, self.async_remove, entity_id
).result()
@callback
def async_remove(self, entity_id: str, context: Context | None = None) -> bool:
"""Remove the state of an entity.
Returns boolean to indicate if an entity was removed.
This method must be run in the event loop.
"""
entity_id = entity_id.lower()
old_state = self._states.pop(entity_id, None)
if entity_id in self._reservations:
self._reservations.remove(entity_id)
if old_state is None:
return False
self._bus.async_fire(
EVENT_STATE_CHANGED,
{"entity_id": entity_id, "old_state": old_state, "new_state": None},
EventOrigin.local,
context=context,
)
return True
def set(
self,
entity_id: str,
new_state: str,
attributes: Mapping[str, Any] | None = None,
force_update: bool = False,
context: Context | None = None,
) -> None:
"""Set the state of an entity, add entity if it does not exist.
Attributes is an optional dict to specify attributes of this state.
If you just update the attributes and not the state, last changed will
not be affected.
"""
run_callback_threadsafe(
self._loop,
self.async_set,
entity_id,
new_state,
attributes,
force_update,
context,
).result()
@callback
def async_reserve(self, entity_id: str) -> None:
"""Reserve a state in the state machine for an entity being added.
This must not fire an event when the state is reserved.
This avoids a race condition where multiple entities with the same
entity_id are added.
"""
entity_id = entity_id.lower()
if entity_id in self._states or entity_id in self._reservations:
raise HomeAssistantError(
"async_reserve must not be called once the state is in the state machine."
)
self._reservations.add(entity_id)
@callback
def async_available(self, entity_id: str) -> bool:
"""Check to see if an entity_id is available to be used."""
entity_id = entity_id.lower()
return entity_id not in self._states and entity_id not in self._reservations
@callback
def async_set(
self,
entity_id: str,
new_state: str,
attributes: Mapping[str, Any] | None = None,
force_update: bool = False,
context: Context | None = None,
) -> None:
"""Set the state of an entity, add entity if it does not exist.
Attributes is an optional dict to specify attributes of this state.
If you just update the attributes and not the state, last changed will
not be affected.
This method must be run in the event loop.
"""
entity_id = entity_id.lower()
new_state = str(new_state)
attributes = attributes or {}
if (old_state := self._states.get(entity_id)) is None:
same_state = False
same_attr = False
last_changed = None
else:
same_state = old_state.state == new_state and not force_update
same_attr = old_state.attributes == MappingProxyType(attributes)
last_changed = old_state.last_changed if same_state else None
if same_state and same_attr:
return
if context is None:
context = Context()
now = dt_util.utcnow()
state = State(
entity_id,
new_state,
attributes,
last_changed,
now,
context,
old_state is None,
)
self._states[entity_id] = state
self._bus.async_fire(
EVENT_STATE_CHANGED,
{"entity_id": entity_id, "old_state": old_state, "new_state": state},
EventOrigin.local,
context,
time_fired=now,
)
class Service:
"""Representation of a callable service."""
__slots__ = ["job", "schema"]
def __init__(
self,
func: Callable,
schema: vol.Schema | None,
context: Context | None = None,
) -> None:
"""Initialize a service."""
self.job = HassJob(func)
self.schema = schema
class ServiceCall:
"""Representation of a call to a service."""
__slots__ = ["domain", "service", "data", "context"]
def __init__(
self,
domain: str,
service: str,
data: dict | None = None,
context: Context | None = None,
) -> None:
"""Initialize a service call."""
self.domain = domain.lower()
self.service = service.lower()
self.data = MappingProxyType(data or {})
self.context = context or Context()
def __repr__(self) -> str:
"""Return the representation of the service."""
if self.data:
return (
f"<ServiceCall {self.domain}.{self.service} "
f"(c:{self.context.id}): {util.repr_helper(self.data)}>"
)
return f"<ServiceCall {self.domain}.{self.service} (c:{self.context.id})>"
class ServiceRegistry:
"""Offer the services over the eventbus."""
def __init__(self, hass: HomeAssistant) -> None:
"""Initialize a service registry."""
self._services: dict[str, dict[str, Service]] = {}
self._hass = hass
@property
def services(self) -> dict[str, dict[str, Service]]:
"""Return dictionary with per domain a list of available services."""
return run_callback_threadsafe(self._hass.loop, self.async_services).result()
@callback
def async_services(self) -> dict[str, dict[str, Service]]:
"""Return dictionary with per domain a list of available services.
This method must be run in the event loop.
"""
return {domain: service.copy() for domain, service in self._services.items()}
def has_service(self, domain: str, service: str) -> bool:
"""Test if specified service exists.
Async friendly.
"""
return service.lower() in self._services.get(domain.lower(), [])
def register(
self,
domain: str,
service: str,
service_func: Callable,
schema: vol.Schema | None = None,
) -> None:
"""
Register a service.
Schema is called to coerce and validate the service data.
"""
run_callback_threadsafe(
self._hass.loop, self.async_register, domain, service, service_func, schema
).result()
@callback
def async_register(
self,
domain: str,
service: str,
service_func: Callable,
schema: vol.Schema | None = None,
) -> None:
"""
Register a service.
Schema is called to coerce and validate the service data.
This method must be run in the event loop.
"""
domain = domain.lower()
service = service.lower()
service_obj = Service(service_func, schema)
if domain in self._services:
self._services[domain][service] = service_obj
else:
self._services[domain] = {service: service_obj}
self._hass.bus.async_fire(
EVENT_SERVICE_REGISTERED, {ATTR_DOMAIN: domain, ATTR_SERVICE: service}
)
def remove(self, domain: str, service: str) -> None:
"""Remove a registered service from service handler."""
run_callback_threadsafe(
self._hass.loop, self.async_remove, domain, service
).result()
@callback
def async_remove(self, domain: str, service: str) -> None:
"""Remove a registered service from service handler.
This method must be run in the event loop.
"""
domain = domain.lower()
service = service.lower()
if service not in self._services.get(domain, {}):
_LOGGER.warning("Unable to remove unknown service %s/%s", domain, service)
return
self._services[domain].pop(service)
if not self._services[domain]:
self._services.pop(domain)
self._hass.bus.async_fire(
EVENT_SERVICE_REMOVED, {ATTR_DOMAIN: domain, ATTR_SERVICE: service}
)
def call(
self,
domain: str,
service: str,
service_data: dict | None = None,
blocking: bool = False,
context: Context | None = None,
limit: float | None = SERVICE_CALL_LIMIT,
target: dict | None = None,
) -> bool | None:
"""
Call a service.
See description of async_call for details.
"""
return asyncio.run_coroutine_threadsafe(
self.async_call(
domain, service, service_data, blocking, context, limit, target
),
self._hass.loop,
).result()
async def async_call(
self,
domain: str,
service: str,
service_data: dict | None = None,
blocking: bool = False,
context: Context | None = None,
limit: float | None = SERVICE_CALL_LIMIT,
target: dict | None = None,
) -> bool | None:
"""
Call a service.
Specify blocking=True to wait until service is executed.
Waits a maximum of limit, which may be None for no timeout.
If blocking = True, will return boolean if service executed
successfully within limit.
This method will fire an event to indicate the service has been called.
Because the service is sent as an event you are not allowed to use
the keys ATTR_DOMAIN and ATTR_SERVICE in your service_data.
This method is a coroutine.
"""
domain = domain.lower()
service = service.lower()
context = context or Context()
service_data = service_data or {}
try:
handler = self._services[domain][service]
except KeyError:
raise ServiceNotFound(domain, service) from None
if target:
service_data.update(target)
if handler.schema:
try:
processed_data = handler.schema(service_data)
except vol.Invalid:
_LOGGER.debug(
"Invalid data for service call %s.%s: %s",
domain,
service,
service_data,
)
raise
else:
processed_data = service_data
service_call = ServiceCall(domain, service, processed_data, context)
self._hass.bus.async_fire(
EVENT_CALL_SERVICE,
{
ATTR_DOMAIN: domain.lower(),
ATTR_SERVICE: service.lower(),
ATTR_SERVICE_DATA: service_data,
},
context=context,
)
coro = self._execute_service(handler, service_call)
if not blocking:
self._run_service_in_background(coro, service_call)
return None
task = self._hass.async_create_task(coro)
try:
await asyncio.wait({task}, timeout=limit)
except asyncio.CancelledError:
# Task calling us was cancelled, so cancel service call task, and wait for
# it to be cancelled, within reason, before leaving.
_LOGGER.debug("Service call was cancelled: %s", service_call)
task.cancel()
await asyncio.wait({task}, timeout=SERVICE_CALL_LIMIT)
raise
if task.cancelled():
# Service call task was cancelled some other way, such as during shutdown.
_LOGGER.debug("Service was cancelled: %s", service_call)
raise asyncio.CancelledError
if task.done():
# Propagate any exceptions that might have happened during service call.
task.result()
# Service call completed successfully!
return True
# Service call task did not complete before timeout expired.
# Let it keep running in background.
self._run_service_in_background(task, service_call)
_LOGGER.debug("Service did not complete before timeout: %s", service_call)
return False
def _run_service_in_background(
self, coro_or_task: Coroutine | asyncio.Task, service_call: ServiceCall
) -> None:
"""Run service call in background, catching and logging any exceptions."""
async def catch_exceptions() -> None:
try:
await coro_or_task
except Unauthorized:
_LOGGER.warning(
"Unauthorized service called %s/%s",
service_call.domain,
service_call.service,
)
except asyncio.CancelledError:
_LOGGER.debug("Service was cancelled: %s", service_call)
except Exception: # pylint: disable=broad-except
_LOGGER.exception("Error executing service: %s", service_call)
self._hass.async_create_task(catch_exceptions())
async def _execute_service(
self, handler: Service, service_call: ServiceCall
) -> None:
"""Execute a service."""
if handler.job.job_type == HassJobType.Coroutinefunction:
await handler.job.target(service_call)
elif handler.job.job_type == HassJobType.Callback:
handler.job.target(service_call)
else:
await self._hass.async_add_executor_job(handler.job.target, service_call)
class Config:
"""Configuration settings for Home Assistant."""
def __init__(self, hass: HomeAssistant) -> None:
"""Initialize a new config object."""
self.hass = hass
self.latitude: float = 0
self.longitude: float = 0
self.elevation: int = 0
self.location_name: str = "Home"
self.time_zone: str = "UTC"
self.units: UnitSystem = METRIC_SYSTEM
self.internal_url: str | None = None
self.external_url: str | None = None
self.currency: str = "EUR"
self.config_source: str = "default"
# If True, pip install is skipped for requirements on startup
self.skip_pip: bool = False
# List of loaded components
self.components: set[str] = set()
# API (HTTP) server configuration, see components.http.ApiConfig
self.api: Any | None = None
# Directory that holds the configuration
self.config_dir: str | None = None
# List of allowed external dirs to access
self.allowlist_external_dirs: set[str] = set()
# List of allowed external URLs that integrations may use
self.allowlist_external_urls: set[str] = set()
# Dictionary of Media folders that integrations may use
self.media_dirs: dict[str, str] = {}
# If Home Assistant is running in safe mode
self.safe_mode: bool = False
# Use legacy template behavior
self.legacy_templates: bool = False
def distance(self, lat: float, lon: float) -> float | None:
"""Calculate distance from Home Assistant.
Async friendly.
"""
return self.units.length(
location.distance(self.latitude, self.longitude, lat, lon), LENGTH_METERS
)
def path(self, *path: str) -> str:
"""Generate path to the file within the configuration directory.
Async friendly.
"""
if self.config_dir is None:
raise HomeAssistantError("config_dir is not set")
return os.path.join(self.config_dir, *path)
def is_allowed_external_url(self, url: str) -> bool:
"""Check if an external URL is allowed."""
parsed_url = f"{str(yarl.URL(url))}/"
return any(
allowed
for allowed in self.allowlist_external_urls
if parsed_url.startswith(allowed)
)
def is_allowed_path(self, path: str) -> bool:
"""Check if the path is valid for access from outside."""
assert path is not None
thepath = pathlib.Path(path)
try:
# The file path does not have to exist (it's parent should)
if thepath.exists():
thepath = thepath.resolve()
else:
thepath = thepath.parent.resolve()
except (FileNotFoundError, RuntimeError, PermissionError):
return False
for allowed_path in self.allowlist_external_dirs:
try:
thepath.relative_to(allowed_path)
return True
except ValueError:
pass
return False
def as_dict(self) -> dict:
"""Create a dictionary representation of the configuration.
Async friendly.
"""
return {
"latitude": self.latitude,
"longitude": self.longitude,
"elevation": self.elevation,
"unit_system": self.units.as_dict(),
"location_name": self.location_name,
"time_zone": self.time_zone,
"components": self.components,
"config_dir": self.config_dir,
# legacy, backwards compat
"whitelist_external_dirs": self.allowlist_external_dirs,
"allowlist_external_dirs": self.allowlist_external_dirs,
"allowlist_external_urls": self.allowlist_external_urls,
"version": __version__,
"config_source": self.config_source,
"safe_mode": self.safe_mode,
"state": self.hass.state.value,
"external_url": self.external_url,
"internal_url": self.internal_url,
"currency": self.currency,
}
def set_time_zone(self, time_zone_str: str) -> None:
"""Help to set the time zone."""
if time_zone := dt_util.get_time_zone(time_zone_str):
self.time_zone = time_zone_str
dt_util.set_default_time_zone(time_zone)
else:
raise ValueError(f"Received invalid time zone {time_zone_str}")
@callback
def _update(
self,
*,
source: str,
latitude: float | None = None,
longitude: float | None = None,
elevation: int | None = None,
unit_system: str | None = None,
location_name: str | None = None,
time_zone: str | None = None,
# pylint: disable=dangerous-default-value # _UNDEFs not modified
external_url: str | dict | None = _UNDEF,
internal_url: str | dict | None = _UNDEF,
currency: str | None = None,
) -> None:
"""Update the configuration from a dictionary."""
self.config_source = source
if latitude is not None:
self.latitude = latitude
if longitude is not None:
self.longitude = longitude
if elevation is not None:
self.elevation = elevation
if unit_system is not None:
if unit_system == CONF_UNIT_SYSTEM_IMPERIAL:
self.units = IMPERIAL_SYSTEM
else:
self.units = METRIC_SYSTEM
if location_name is not None:
self.location_name = location_name
if time_zone is not None:
self.set_time_zone(time_zone)
if external_url is not _UNDEF:
self.external_url = cast(Optional[str], external_url)
if internal_url is not _UNDEF:
self.internal_url = cast(Optional[str], internal_url)
if currency is not None:
self.currency = currency
async def async_update(self, **kwargs: Any) -> None:
"""Update the configuration from a dictionary."""
self._update(source=SOURCE_STORAGE, **kwargs)
await self.async_store()
self.hass.bus.async_fire(EVENT_CORE_CONFIG_UPDATE, kwargs)
async def async_load(self) -> None:
"""Load [homeassistant] core config."""
store = self.hass.helpers.storage.Store(
CORE_STORAGE_VERSION, CORE_STORAGE_KEY, private=True
)
if not (data := await store.async_load()):
return
# In 2021.9 we fixed validation to disallow a path (because that's never correct)
# but this data still lives in storage, so we print a warning.
if data.get("external_url") and urlparse(data["external_url"]).path not in (
"",
"/",
):
_LOGGER.warning("Invalid external_url set. It's not allowed to have a path")
if data.get("internal_url") and urlparse(data["internal_url"]).path not in (
"",
"/",
):
_LOGGER.warning("Invalid internal_url set. It's not allowed to have a path")
self._update(
source=SOURCE_STORAGE,
latitude=data.get("latitude"),
longitude=data.get("longitude"),
elevation=data.get("elevation"),
unit_system=data.get("unit_system"),
location_name=data.get("location_name"),
time_zone=data.get("time_zone"),
external_url=data.get("external_url", _UNDEF),
internal_url=data.get("internal_url", _UNDEF),
currency=data.get("currency"),
)
async def async_store(self) -> None:
"""Store [homeassistant] core config."""
data = {
"latitude": self.latitude,
"longitude": self.longitude,
"elevation": self.elevation,
"unit_system": self.units.name,
"location_name": self.location_name,
"time_zone": self.time_zone,
"external_url": self.external_url,
"internal_url": self.internal_url,
"currency": self.currency,
}
store = self.hass.helpers.storage.Store(
CORE_STORAGE_VERSION, CORE_STORAGE_KEY, private=True
)
await store.async_save(data)
def _async_create_timer(hass: HomeAssistant) -> None:
"""Create a timer that will start on HOMEASSISTANT_START."""
handle = None
timer_context = Context()
def schedule_tick(now: datetime.datetime) -> None:
"""Schedule a timer tick when the next second rolls around."""
nonlocal handle
slp_seconds = 1 - (now.microsecond / 10 ** 6)
target = monotonic() + slp_seconds
handle = hass.loop.call_later(slp_seconds, fire_time_event, target)
@callback
def fire_time_event(target: float) -> None:
"""Fire next time event."""
now = dt_util.utcnow()
hass.bus.async_fire(
EVENT_TIME_CHANGED, {ATTR_NOW: now}, time_fired=now, context=timer_context
)
# If we are more than a second late, a tick was missed
if (late := monotonic() - target) > 1:
hass.bus.async_fire(
EVENT_TIMER_OUT_OF_SYNC,
{ATTR_SECONDS: late},
time_fired=now,
context=timer_context,
)
schedule_tick(now)
@callback
def stop_timer(_: Event) -> None:
"""Stop the timer."""
if handle is not None:
handle.cancel()
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, stop_timer)
_LOGGER.info("Timer:starting")
schedule_tick(dt_util.utcnow())
|
the-stack_0_27598
|
# Script to build bootstrapper image
import argparse
import os
import shutil
import tempfile
import yaml
FILE_PATH = os.path.dirname(os.path.abspath(__file__))
REG_FOLDER = "reg_tmp"
# Clone repos to tmp folder and build docker images
def main(unparsed_args=None):
parser = argparse.ArgumentParser(
description="Build bootstrapper image with ksonnet registries specified in config.")
parser.add_argument(
"--image",
default="",
type=str,
help="Image name.")
parser.add_argument(
"--build_args",
default="",
type=str,
help="Docker build args.")
parser.add_argument(
"--build_opts",
default="",
type=str,
help="Docker build opts.")
parser.add_argument(
"--config",
default="image_registries.yaml",
type=str,
help="Relative path to bootstrapper config file specify which registries to include.")
parser.add_argument(
"--test_registry",
default="",
type=str,
help="e2e test target registry, format is <registry name>:<registry path>")
parser.add_argument(
"--target",
default="",
type=str,
help="Docker build target.")
args = parser.parse_args(args=unparsed_args)
tmp_dir = os.path.join(FILE_PATH, REG_FOLDER)
# Make local dir, clone repos into it and Docker build will copy whole folder.
if os.path.exists(tmp_dir):
shutil.rmtree(tmp_dir)
os.mkdir(tmp_dir)
os_tmp_dir = tempfile.mkdtemp()
print("Using tmp dir: " + os_tmp_dir)
with open(os.path.join(FILE_PATH, args.config), 'r') as conf_input:
conf = yaml.load(conf_input)
test_reg_name, test_reg_path = "", ""
if args.test_registry != "" and len(args.test_registry.split(":")) == 2:
test_reg_name = args.test_registry.split(":")[0]
test_reg_path = args.test_registry.split(":")[1]
for reg in conf['registries']:
if test_reg_name == reg["name"]:
excludes = [".git*", ".idea", "vendor", "node_modules"]
sync_cmd = "rsync -a %s %s %s" % (test_reg_path, os_tmp_dir, " ".join(["--exclude=" + term for term in excludes]))
print(sync_cmd)
os.system(sync_cmd)
else:
reg_path = os.path.join(os_tmp_dir, reg["name"])
print("Adding registry %s from %s %s" % (reg["name"], reg["repo"], reg["version"]))
os.system("git clone --depth 1 --branch %s %s %s" % (reg["version"], reg["repo"], reg_path))
os.system("cp -r %s/* %s" % (os_tmp_dir, tmp_dir))
bargs=""
for buildarg in args.build_args.split(","):
bargs+="--build-arg "+buildarg+" "
print("docker build %s %s -t %s --build-arg registries=%s --target=%s %s" %
(args.build_opts, bargs, args.image, REG_FOLDER, args.target, FILE_PATH))
os.system("docker build %s %s -t %s --build-arg registries=%s --target=%s %s" %
(args.build_opts, bargs, args.image, REG_FOLDER, args.target, FILE_PATH))
if __name__ == '__main__':
main()
|
the-stack_0_27599
|
import os
import shutil
import numpy as np
import pandas as pd
from pathlib import Path
from tqdm import tqdm
from numpy import loadtxt
from sklearn.model_selection import train_test_split
from utils import get_all_files_in_folder
def generate_train_test(data_dir, split):
images = get_all_files_in_folder(data_dir, ['*.jpg'])
with open('data/darknet_prepare_for_train/' + split + '.txt', "w") as outfile:
for image in images:
outfile.write('data/darknet_prepare_for_train/' + split + '/' + image.name)
outfile.write("\n")
outfile.close()
# darknet_path = Path('/home/vid/hdd/projects/darknet/my_data')
root_dir = Path('data/darknet_prepare_for_train/0_dataset')
root_data_jpg_dir = Path('data/darknet_prepare_for_train/data_jpg')
root_data_txt_dir = Path('data/darknet_prepare_for_train/data_txt')
train_dir = Path('data/darknet_prepare_for_train/train')
test_dir = Path('data/darknet_prepare_for_train/test')
backup_dir = Path('data/darknet_prepare_for_train/backup')
dirpath = root_data_jpg_dir
if dirpath.exists() and dirpath.is_dir():
shutil.rmtree(dirpath)
Path(dirpath).mkdir(parents=True, exist_ok=True)
dirpath = root_data_txt_dir
if dirpath.exists() and dirpath.is_dir():
shutil.rmtree(dirpath)
Path(dirpath).mkdir(parents=True, exist_ok=True)
dirpath = train_dir
if dirpath.exists() and dirpath.is_dir():
shutil.rmtree(dirpath)
Path(dirpath).mkdir(parents=True, exist_ok=True)
dirpath = test_dir
if dirpath.exists() and dirpath.is_dir():
shutil.rmtree(dirpath)
Path(dirpath).mkdir(parents=True, exist_ok=True)
dirpath = backup_dir
if dirpath.exists() and dirpath.is_dir():
shutil.rmtree(dirpath)
Path(dirpath).mkdir(parents=True, exist_ok=True)
all_images = get_all_files_in_folder(root_dir, ['*.jpg'])
all_txts = get_all_files_in_folder(root_dir, ['*.txt'])
print(f'Total images: {len(all_images)}')
print(f'Total labels: {len(all_txts)}')
val_part = 0.1
labels = []
images_list = []
for txt in tqdm(all_txts):
lines = loadtxt(str(txt), delimiter=' ', unpack=False).tolist()
if not isinstance(lines[0], list):
lines = [lines]
for line in lines:
labels.append(line[0])
images_list.append(txt.stem)
# classes + counts
labels_dict = pd.DataFrame(labels, columns=["x"]).groupby('x').size().to_dict()
all_labels = sum(labels_dict.values())
print('labels_dict', labels_dict)
labels_parts = []
for key, value in labels_dict.items():
labels_parts.append(value / all_labels)
print('labels_parts', labels_parts)
print('classes ', len(labels_parts))
straify = False
min_part = 0.1
if np.min(labels_parts) < min_part:
straify = True
type = 2 # or 1
if straify:
if type == 1:
# add 0.05 for accuracy stratification
val_part += 0.05
# collect all classes
# stratify
X_train, X_test, y_train, y_test = train_test_split(images_list, labels, test_size=val_part, random_state=42,
stratify=labels, shuffle=True)
# remove dublicates
X_train = np.unique(X_train).tolist()
X_test = np.unique(X_test).tolist()
# get images that exist in train and test
dublicates = []
for xtr in tqdm(X_train):
for xtt in X_test:
if xtr == xtt:
dublicates.append(xtr)
# delete such images from train and test
for dubl in dublicates:
X_train.remove(dubl)
X_test.remove(dubl)
# add dubl images in train and test with stratify
for i, dubl in tqdm(enumerate(dublicates)):
if i % int((10 - (val_part) * 10)) == 0:
X_test.append(dubl)
else:
X_train.append(dubl)
# copy images and txts
for name in tqdm(X_train):
shutil.copy(root_dir.joinpath(name + '.jpg'), train_dir)
shutil.copy(root_dir.joinpath(name + '.txt'), train_dir)
for name in tqdm(X_test):
shutil.copy(root_dir.joinpath(name + '.jpg'), test_dir)
shutil.copy(root_dir.joinpath(name + '.txt'), test_dir)
# check stratification
all_txt_train = get_all_files_in_folder(train_dir, ['*.txt'])
# collect train classes and compare with all classes
labels_train = []
for txt in tqdm(all_txt_train):
lines = loadtxt(str(txt), delimiter=' ', unpack=False).tolist()
if not isinstance(lines[0], list):
lines = [lines]
for line in lines:
labels_train.append(line[0])
labels_train_dict = pd.DataFrame(labels_train, columns=["x"]).groupby('x').size().to_dict()
st = []
for key, value in labels_dict.items():
val = labels_train_dict[key] / value
st.append(val)
print(f'Class {key} | counts {value} | test_part {val}')
print('Train part:', np.mean(st))
else:
labels_dict[-1] = 99999999
# assign to image one class - rarest class
x_all = []
labels_all = []
for txt in tqdm(all_txts):
lines = loadtxt(str(txt), delimiter=' ', unpack=False).tolist()
if not isinstance(lines[0], list):
lines = [lines]
lab = []
for line in lines:
lab.append(line[0])
best_cat = -1
x_all.append(txt.stem)
for l in lab:
if labels_dict[l] < labels_dict[best_cat]:
best_cat = l
labels_all.append(best_cat)
# stratify
X_train, X_test, y_train, y_test = train_test_split(x_all, labels_all, test_size=val_part, random_state=42,
shuffle=True)
# copy images and txts
for name in tqdm(X_train):
shutil.copy(root_dir.joinpath(name + '.jpg'), train_dir)
shutil.copy(root_dir.joinpath(name + '.txt'), train_dir)
for name in tqdm(X_test):
shutil.copy(root_dir.joinpath(name + '.jpg'), test_dir)
shutil.copy(root_dir.joinpath(name + '.txt'), test_dir)
# check stratification
all_txt_train = get_all_files_in_folder(train_dir, ['*.txt'])
# collect train classes and compare with all classes
labels_train = []
for txt in tqdm(all_txt_train):
lines = loadtxt(str(txt), delimiter=' ', unpack=False).tolist()
if not isinstance(lines[0], list):
lines = [lines]
for line in lines:
labels_train.append(line[0])
labels_train_dict = pd.DataFrame(labels_train, columns=["x"]).groupby('x').size().to_dict()
st = []
labels_dict.pop(-1)
for key, value in labels_dict.items():
val = labels_train_dict[key] / value
st.append(val)
print(f'Class {key} | counts {value} | test_part {val}')
print('Train part:', np.mean(st))
else:
# for img in tqdm(all_images):
# shutil.copy(img, root_data_jpg_dir)
#
# for txt in tqdm(all_txts):
# shutil.copy(txt, root_data_txt_dir)
np.random.shuffle(all_images)
train_FileNames, val_FileNames = np.split(np.array(all_images), [int(len(all_images) * (1 - val_part))])
for name in tqdm(train_FileNames):
shutil.copy(name, train_dir)
shutil.copy(root_dir.joinpath(name.stem + '.txt'), train_dir)
for name in tqdm(val_FileNames):
shutil.copy(name, test_dir)
shutil.copy(root_dir.joinpath(name.stem + '.txt'), test_dir)
generate_train_test(train_dir, 'train')
generate_train_test(test_dir, 'test')
# copy cfg data
# shutil.copy('data/darknet_prepare_for_train/0_cfg/obj.data', darknet_path)
# shutil.copy('data/darknet_prepare_for_train/0_cfg/obj.names', darknet_path)
# shutil.copy('data/darknet_prepare_for_train/0_cfg/yolov4-obj-mycustom.cfg', darknet_path)
# shutil.copy('data/darknet_prepare_for_train/0_weights/yolov4-p5.conv.232', darknet_path)
# os.system("/home/vid/hdd/projects/darknet/darknet detector train "
# "/home/vid/hdd/projects/PycharmProjects/Object-Detection-Metrics/data/darknet_prepare_for_train/0_cfg/obj.data "
# "/home/vid/hdd/projects/PycharmProjects/Object-Detection-Metrics/data/darknet_prepare_for_train/0_cfg/yolov4-obj-mycustom.cfg "
# "/home/vid/hdd/projects/PycharmProjects/Object-Detection-Metrics/data/darknet_prepare_for_train/0_weights/yolov4-p5.conv.232 -map")
# ./darknet detector train my_data/obj.data my_data/yolov4-obj-mycustom.cfg my_data/yolov4-p5.conv.232 -dont_show -map
|
the-stack_0_27601
|
# Copyright (c) 2016-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
## @package device_reduce_sum_bench
# Module caffe2.experiments.python.device_reduce_sum_bench
import argparse
import itertools
import logging
import os
from six import add_metaclass
import numpy as np
from caffe2.python import workspace, core
from caffe2.python.hypothesis_test_util import runOpBenchmark, gpu_do
logging.basicConfig()
logger = logging.getLogger(os.path.basename(__file__))
logger.setLevel(logging.INFO)
ALL_BENCHMARKS = {}
class BenchmarkMeta(type):
def __new__(metacls, name, bases, class_dict):
cls = type.__new__(metacls, name, bases, class_dict)
if name != 'Benchmark':
ALL_BENCHMARKS[name] = cls
return cls
@add_metaclass(BenchmarkMeta)
class Benchmark(object):
def __init__(self):
self.results = []
def display(self):
print('Results ({}):'.format(type(self).__name__))
print('input size ms/iter')
print('------------------------------ -----------')
for size, ms in self.results:
print('{!s:<30} {:.4f}'.format(size, ms))
class SumElements(Benchmark):
def run(self):
op = core.CreateOperator(
"SumElements",
["X"],
["y"]
)
for n in itertools.imap(pow, itertools.cycle([10]), range(10)):
X = np.random.rand(n).astype(np.float32)
logger.info('Running benchmark for n = {}'.format(n))
ret = runOpBenchmark(gpu_do, op, inputs=[X])
self.results.append((n, ret[1]))
class SumSqrElements(Benchmark):
def run(self):
op = core.CreateOperator(
"SumSqrElements",
["X"],
["y"]
)
for n in itertools.imap(pow, itertools.cycle([10]), range(10)):
X = np.random.rand(n).astype(np.float32)
logger.info('Running benchmark for n = {}'.format(n))
ret = runOpBenchmark(gpu_do, op, inputs=[X])
self.results.append((n, ret[1]))
class SoftMaxWithLoss(Benchmark):
def run(self):
op = core.CreateOperator(
"SoftmaxWithLoss",
["X", "label"],
["probs", "avgloss"],
)
for n in itertools.imap(pow, itertools.cycle([10]), range(8)):
for D in itertools.imap(pow, itertools.cycle([10]), range(3)):
X = np.random.rand(n, D).astype(np.float32)
label = (np.random.rand(n) * D).astype(np.int32)
logger.info('Running benchmark for n = {}, D= {}'.format(n, D))
ret = runOpBenchmark(gpu_do, op, inputs=[X, label])
self.results.append(((n, D), ret[1]))
def parse_args():
parser = argparse.ArgumentParser(os.path.basename(__file__))
parser.add_argument('-b', '--benchmarks', nargs='+',
default=ALL_BENCHMARKS.keys(),
help='benchmarks to run (default: %(default)s))')
return parser.parse_args()
def main():
args = parse_args()
benchmarks = [ALL_BENCHMARKS[name]() for name in args.benchmarks]
for bench in benchmarks:
bench.run()
for bench in benchmarks:
bench.display()
if __name__ == '__main__':
workspace.GlobalInit(['caffe2', '--caffe2_log_level=2'])
main()
|
the-stack_0_27602
|
"""
Pedigree-related functions
"""
from abc import ABC, abstractmethod
import math
from typing import Optional
from collections import Counter, OrderedDict, defaultdict
from dataclasses import dataclass
import logging
logger = logging.getLogger(__name__)
class ParseError(Exception):
pass
@dataclass
class RecombinationMapEntry:
position: int
cum_distance: int
@dataclass
class RecombinationEvent:
position1: int
position2: int
transmitted_hap_father1: int
transmitted_hap_father2: int
transmitted_hap_mother1: int
transmitted_hap_mother2: int
recombination_cost: float
def _interpolate(point, start_pos, end_pos, start_value, end_value):
assert start_pos <= point <= end_pos
if start_pos == point == end_pos:
assert start_value == end_value
return start_value
return start_value + ((point - start_pos) * (end_value - start_value) / (end_pos - start_pos))
MINIMUM_GENETIC_DISTANCE = 1e-10 # cM
def recombination_cost_map(genetic_map, positions):
assert len(genetic_map) > 0
# Step 1: compute cumulative genetic distances from start of chromosome
# to each position.
cumulative_distances = []
# i and j are such that genetic_map[i].position <= position <= genetic_map[j].position
# i and j are None if no such values exist (because we are at the end of the list)
i = None
j = 0
for position in positions:
# update i to meet the invariant
if (i is None) and (genetic_map[0].position <= position):
i = 0
while (
(i is not None)
and (i + 1 < len(genetic_map))
and (genetic_map[i + 1].position <= position)
):
i += 1
# update j to meet the invariant
while (j is not None) and (genetic_map[j].position < position):
if j + 1 < len(genetic_map):
j += 1
else:
j = None
# interpolate
if i is None:
assert j is not None
d = _interpolate(position, 0, genetic_map[j].position, 0, genetic_map[j].cum_distance)
elif j is None:
# Point outside the genetic map --> extrapolating using average recombination rate
avg_rate = genetic_map[-1].cum_distance / genetic_map[-1].position
d = genetic_map[-1].cum_distance + (position - genetic_map[-1].position) * avg_rate
else:
assert genetic_map[i].position <= position <= genetic_map[j].position
d = _interpolate(
position,
genetic_map[i].position,
genetic_map[j].position,
genetic_map[i].cum_distance,
genetic_map[j].cum_distance,
)
cumulative_distances.append(d)
# Step 2: compute costs (= phred-scaled recombination probabilities between two positions)
result = [0]
for i in range(1, len(cumulative_distances)):
d = cumulative_distances[i] - cumulative_distances[i - 1]
d = max(d, MINIMUM_GENETIC_DISTANCE)
result.append(round(centimorgen_to_phred(d)))
return result
def centimorgen_to_phred(distance):
assert distance >= 0
if distance == 0:
raise ValueError("Cannot convert genetic distance of zero to phred.")
elif distance < 1e-10:
return -10 * (math.log10(distance) - 2)
else:
p = (1.0 - math.exp(-(2.0 * distance) / 100)) / 2.0
return -10 * math.log10(p)
def mendelian_conflict(genotypem, genotypef, genotypec):
# TODO: Maybe inefficient
alleles_m = genotypem.as_vector()
alleles_f = genotypef.as_vector()
alleles_c = genotypec.as_vector()
if alleles_c[0] in alleles_m and alleles_c[1] in alleles_f:
return False
elif alleles_c[1] in alleles_m and alleles_c[0] in alleles_f:
return False
else:
return True
def find_recombination(transmission_vector, components, positions, recombcost):
assert len(transmission_vector) == len(positions) == len(recombcost)
assert set(components.keys()).issubset(set(positions))
position_to_index = {pos: i for i, pos in enumerate(positions)}
blocks = defaultdict(list)
for position, block_id in components.items():
blocks[block_id].append(position)
event_list = []
cum_recomb_cost = 0
for block_id, block in blocks.items():
block.sort()
block_transmission_vector = [transmission_vector[position_to_index[i]] for i in block]
block_recomb_cost = [recombcost[position_to_index[i]] for i in block]
if len(block) <= 2:
continue
for i in range(2, len(block)):
if block_transmission_vector[i - 1] != block_transmission_vector[i]:
event_list.append(
RecombinationEvent(
block[i - 1],
block[i],
block_transmission_vector[i - 1] % 2,
block_transmission_vector[i] % 2,
block_transmission_vector[i - 1] // 2,
block_transmission_vector[i] // 2,
block_recomb_cost[i],
)
)
cum_recomb_cost += block_recomb_cost[i]
logger.info("Cost accounted for by recombination events: %d", cum_recomb_cost)
event_list.sort()
return event_list
class RecombinationCostComputer(ABC):
@abstractmethod
def compute(self, positions):
pass
class GeneticMapRecombinationCostComputer(RecombinationCostComputer):
def __init__(self, genetic_map_path):
self._genetic_map = self.load_genetic_map(genetic_map_path)
@staticmethod
def load_genetic_map(filename):
genetic_map = []
warned_zero_distance = False
with open(filename) as fid:
for line_number, line in enumerate(fid, 1):
if line_number == 1:
continue
fields = line.strip().split()
if not fields:
# Skip empty lines
continue
if len(fields) != 3:
raise ParseError(
"Error at line {} of genetic map file '{}': "
"Found {} fields instead of 3".format(line_number, filename, len(fields))
)
try:
position = int(fields[0])
cum_distance = float(fields[2])
except ValueError as e:
raise ParseError(
"Error at line {} of genetic map file '{}': {}".format(
line_number, filename, e
)
)
genetic_map.append(
RecombinationMapEntry(position=position, cum_distance=cum_distance)
)
if len(genetic_map) >= 2:
if not warned_zero_distance and (
genetic_map[-2].cum_distance == genetic_map[-1].cum_distance
):
logger.warning("Zero genetic distances encountered in %s", filename)
warned_zero_distance = True
return genetic_map
def compute(self, positions):
return recombination_cost_map(self._genetic_map, positions)
class UniformRecombinationCostComputer(RecombinationCostComputer):
def __init__(self, recombination_rate):
self._recombination_rate = recombination_rate
@staticmethod
def uniform_recombination_map(recombrate, positions):
"""
For a list of positions and a constant recombination rate (in cM/Mb),
return a list "results" of the same length as "positions" such that
results[i] is the phred-scaled recombination probability between
positions[i-1] and positions[i].
"""
return [0] + [
round(centimorgen_to_phred((positions[i] - positions[i - 1]) * 1e-6 * recombrate))
for i in range(1, len(positions))
]
def compute(self, positions):
return self.uniform_recombination_map(self._recombination_rate, positions)
@dataclass
class Trio:
"""Relationships are modelled as a set of trios (mother, father, child)."""
child: Optional[int]
father: Optional[int]
mother: Optional[int]
class PedReader:
"""
A parser for PED/FAM files as used by PLINK and other tools.
According to <http://pngu.mgh.harvard.edu/~purcell/plink/data.shtml>:
The PED file is a white-space (space or tab) delimited file: the first six
columns are mandatory:
* Family ID
* Individual ID
* Paternal ID
* Maternal ID
* Sex (1=male; 2=female; other=unknown)
* Phenotype
All fields except the individual, maternal and paternal ID are ignored by
this class. The entire file is read upon construction.
"""
def __init__(self, file):
if isinstance(file, str):
with open(file) as f:
self.trios = self._parse(f)
else:
self.trios = self._parse(file)
@staticmethod
def _parse_record(line):
"""
Parse a single non-comment line of a PED or FAM file.
"""
fields = line.split()
if len(fields) < 6:
raise ParseError("Less than six fields found in PED/FAM file")
individual_id, paternal_id, maternal_id = fields[1:4]
if paternal_id == "0":
paternal_id = None
if maternal_id == "0":
maternal_id = None
return Trio(child=individual_id, father=paternal_id, mother=maternal_id)
def _parse(self, file):
trios = []
for line in file:
if line.startswith("#") or line == "\n":
continue
trios.append(self._parse_record(line))
self._sanity_check(trios)
return trios
@staticmethod
def _sanity_check(trios):
"""
Ensure that each individual occurs only once in the file.
"""
children = [trio.child for trio in trios]
if not children:
return
id, count = Counter(children).most_common()[0]
if count > 1:
raise ParseError("Individual {!r} occurs more than once in PED file".format(id))
def __iter__(self):
return iter(self.trios)
def samples(self):
"""Return a list of all mentioned individuals"""
samples = set()
for trio in self.trios:
if trio.child is None or trio.mother is None or trio.father is None:
continue
samples.add(trio.father)
samples.add(trio.mother)
samples.add(trio.child)
return list(samples)
class CyclicGraphError(Exception):
pass
class Graph:
"""Directed graph that can sort topologically"""
def __init__(self):
# map node to a list of neighbors
self._neighbors = OrderedDict()
def add_edge(self, node1, node2):
"""The edge is directed from node1 to node2"""
if node1 not in self._neighbors:
self._neighbors[node1] = []
self._neighbors[node1].append(node2)
if node2 not in self._neighbors:
self._neighbors[node2] = []
def toposorted(self):
"""
Return nodes of the graph sorted topologically.
For all edges u -> v that the graph has, node v will appear
before node u.
"""
order = []
colors = {node: "white" for node in self._neighbors}
def visit(node):
assert colors[node] == "white"
colors[node] = "gray"
for neighbor in self._neighbors[node]:
if colors[neighbor] == "white":
visit(neighbor)
elif colors[neighbor] == "gray":
raise CyclicGraphError(
"Cycle involving {!r} and {!r} detected".format(node, neighbor)
)
order.append(node)
colors[node] = "black"
for node in self._neighbors:
if colors[node] == "white":
visit(node)
return order
|
the-stack_0_27603
|
# -*- coding: UTF-8 -*-
# version 2 use database instead of csv.
import DBOperation
import StaticUtils
import VerdictAnalyser
import time
def update_case_table(db_conn, case):
fields_list = ['court_level', 'region', 'prosecutor', 'court_procedure', 'verdict']
data_list = list()
for f in fields_list:
data_list.append(case[f])
#print(fields_list, data_list)
#fields_list.append('analysed')
#data_list.append(5)
db_conn.multi_update(StaticUtils.case_table,
fields_list,
data_list,
'doc_id=\'{}\' and name=\'{}\''.format(case['doc_id'],
case['name']))
def update_defendant_table(db_conn, case):
for index, defendant in enumerate(case['defendant']):
fields_list = list()
data_list = list()
for k, v in defendant.items():
# if value is none, do nothing, ignore this field.
if v is not None:
fields_list.append(k)
data_list.append(v)
#print(fields_list, data_list)
defendant_id = case['doc_id'] + '{:0>2}'.format(index)
#print(defendant_id, defendant['name'])
db_defendant_id = db_conn.get(StaticUtils.defendant_table, 'defendant_id', f'defendant_id=\'{defendant_id}\'')
if not db_defendant_id:
add_defendant(db_conn, case['doc_id'], defendant['name'], defendant_id)
db_conn.multi_update(StaticUtils.defendant_table,
fields_list,
data_list,
'doc_id=\'{}\' and name=\'{}\''.format(case['doc_id'], defendant['name']))
def update_to_db(db_conn, case_list):
for case in case_list:
update_case_table(db_conn, case)
update_defendant_table(db_conn, case)
def add_defendant(db_conn, case_doc_id, case_defendant_name, defendant_id):
#INSERT INTO `sichuan_cases`.`defendent_list` (`doc_id`, `name`) VALUES ('0002099c-c066-4f99-a119-a850009ef04b', 'test');
db_conn.insert(StaticUtils.defendant_table, ('`doc_id`, `name`,`defendant_id`'), (case_doc_id, case_defendant_name, defendant_id))
def analyse_case(db_conn):
case_list_out = list()
case_list = list()
case_list = db_conn.get(StaticUtils.case_table, 'name, doc_id, court, YEAR(DATE), content', 'retry=2 and YEAR(DATE)=2017', rows=1000)
#print(case_list)
#case_list = db_conn.get(StaticUtils.case_table, 'name, doc_id, court, YEAR(DATE), content',
# 'doc_id=\'83801051-e508-462b-9602-a8ff00a65e95\'')
#for case_name, case_doc_id, case_court, case_year, case_content in case_list:
for case_info in case_list:
case = dict()
case['name'], case['doc_id'], case['court'], case['year'], case['content'] = case_info
db_conn.update(StaticUtils.case_table, 'retry', 3, 'doc_id=\'{}\''.format(case['doc_id']))
if not case['content'] or len(case['content']) < 100 :
db_conn.update(StaticUtils.case_table, 'analysed', -1, 'doc_id=\'{}\''.format(case['doc_id']))
continue
verdict = VerdictAnalyser.VerdictAnalyser(case['content'], case['year'])
# collect data for case table
case['court_level'] = verdict.get_court_level(case['court'])
case['region'] = verdict.get_region(case['court'])
case['prosecutor'] = verdict.get_prosecutor()
case['court_procedure'] = verdict.get_procedure()
case['verdict'] = verdict.get_verdict_name()
# collect data for defendant table
try:
defendant_info_list, defendant_name_list = verdict.get_defendant_name_list(
verdict.get_defendant_info_list())
except Exception as e:
print(e)
if not defendant_name_list:
db_conn.update(StaticUtils.case_table, 'analysed', 0, 'doc_id=\'{}\''.format(case['doc_id']))
continue
else:
db_conn.update(StaticUtils.case_table, 'analysed', len(defendant_name_list), 'doc_id=\'{}\''.format(case['doc_id']))
case['defendant'] = list()
convict_info = verdict.get_defendant_convict_info(defendant_name_list)
for i in range(len(defendant_name_list)):
lawyer_list = verdict.get_defendant_lawyer(defendant_info_list[i])
s_lawyer_list = verdict.get_defendant_s_lawyer(defendant_info_list[i])
case['defendant'].append(dict())
case['defendant'][i]['name'] = defendant_name_list[i]
case['defendant'][i]['nation'] = verdict.get_defendant_nation(defendant_info_list[i])
case['defendant'][i]['age'] = verdict.get_defendant_age(defendant_info_list[i])
case['defendant'][i]['sex'] = verdict.get_defendant_sex(defendant_info_list[i])
case['defendant'][i]['education'] = verdict.get_defendant_education(defendant_info_list[i])
case['defendant'][i]['job'] = verdict.get_defendant_job(defendant_info_list[i])
case['defendant'][i]['charge'] = verdict.get_defendant_charge(defendant_name_list[i], convict_info)
case['defendant'][i]['charge_c'] = verdict.get_charge_class(case['defendant'][i]['charge'])
case['defendant'][i]['prison'] = verdict.get_defendant_prison(defendant_name_list[i], convict_info)
case['defendant'][i]['prison_l'] = verdict.get_prison_len(case['defendant'][i]['prison'])
case['defendant'][i]['probation'] = verdict.get_defendant_probation(defendant_name_list[i], convict_info)
case['defendant'][i]['probation_l'] = verdict.get_prison_len(case['defendant'][i]['probation'])
case['defendant'][i]['fine'] = verdict.get_defendant_fine(defendant_name_list[i], convict_info)
case['defendant'][i]['lawyer_n'] = len(lawyer_list) if lawyer_list else None
case['defendant'][i]['s_lawyer_n'] = len(s_lawyer_list) if s_lawyer_list else None
case['defendant'][i]['has_lawyer'] = 'yes' if case['defendant'][i]['lawyer_n'] or case['defendant'][i]['s_lawyer_n'] else 'no'
case_list_out.append(case)
return case_list_out
def main():
db_sc_cases = DBOperation.MyDatabase('127.0.0.1', 'root', '082666')
t0 = time.time()
cases = analyse_case(db_sc_cases)
t1 = time.time()
update_to_db(db_sc_cases, cases)
t2 = time.time()
db_sc_cases.commit()
t3 = time.time()
print(t1-t0, t2-t1, t3-t2)
db_sc_cases.close()
if __name__ == "__main__":
main()
|
the-stack_0_27604
|
from typing import Optional, List, Tuple, cast
import numpy as np
try:
from xbbo.alg_auxiliary.lamcts import MCTS
from xbbo.alg_auxiliary.lamcts.latent_space import LatentConverterRNVP, LatentConverterVAE, LatentConverterPCA, LatentConverterCNN, LatentConverterIdentity
except Exception as e:
print(repr(e))
from xbbo.initial_design import ALL_avaliable_design
from xbbo.utils.constants import MAXINT
# from xbbo.configspace.feature_space import Uniform2Gaussian
from xbbo.search_algorithm.base import AbstractOptimizer
from xbbo.configspace.space import DenseConfiguration, DenseConfigurationSpace
from xbbo.core.trials import Trial, Trials
from . import alg_register
@alg_register.register('lamcts')
class LaMCTS(AbstractOptimizer):
def __init__(
self,
space: DenseConfigurationSpace,
seed: int = 42,
initial_design: str = 'lh',
init_budget: int = None,
suggest_limit: int = np.inf,
split_latent_model: str = 'identity',
split_latent_dims: int = None,
sample_latent_dims: int = None,
sample_latent_model: str = 'identity',
device: str = "cpu",
leaf_size=20,
kernel_type='rbf',
gamma_type='auto',
C_p: float = 10,
solver="cmaes",
split_metric='max',
cmaes_sigma_mult=1.0,
use_gpr=True,
treeify_freq=1,
init_within_leaf="mean",
splitter_type="kmeans",
normalize=True,
split_use_predict=True, # False->kmeans result; True->svm
**kwargs):
AbstractOptimizer.__init__(self,
space,
encoding_cat='bin',
encoding_ord='bin',
seed=seed,
suggest_limit=suggest_limit,
**kwargs)
# Uniform2Gaussian.__init__(self, )
if self.space.get_conditions():
raise NotImplementedError(
"LaMCTS optimizer currently does not support conditional space!"
)
self.dimension = self.space.get_dimensions()
self.initial_design = ALL_avaliable_design[initial_design](
self.space,
self.rng,
ta_run_limit=suggest_limit,
init_budget=init_budget,
**kwargs)
self.init_budget = self.initial_design.init_budget
self.initial_design_configs = self.initial_design.select_configurations(
)
self.bounds = self.space.get_bounds()
if split_latent_model == 'identity':
self.split_latent_converter = LatentConverterIdentity(
self.bounds,
dim=self.dimension,
latent_dim=split_latent_dims,
device=device,
rng=self.rng,
**kwargs)
else:
raise NotImplementedError
# elif latent_model == 'pca':
# latent_converter = LatentConverterPCA(args, latent_dim, device=device, **kwargs)
# elif latent_model == 'cnn':
# latent_converter = LatentConverterCNN(args, env_info, device=device)
# elif latent_model == 'vae':
# latent_converter = LatentConverterVAE(args, env_info, device=device)
# elif latent_model == 'realnvp':
# latent_converter = LatentConverterRNVP(args, env_info, device=device)
# elif latent_model == 'identity':
# latent_converter = LatentConverterIdentity(args, env_info, device=device)
if sample_latent_model == 'identity':
self.sample_latent_converter = LatentConverterIdentity(
self.bounds,
dim=self.dimension,
latent_dim=sample_latent_dims,
device=device,
rng=self.rng,
**kwargs)
else:
raise NotImplementedError
self.sample_latent_dims = self.sample_latent_converter.latent_dim
self.split_latent_dims = self.split_latent_converter.latent_dim
self.sample_latent_bounds = self.sample_latent_converter.bounds
# self.bounds = self.space.get_bounds()
# self.es = cma.CMAEvolutionStrategy([0.5] * self.dimension,
# 0.1,
# inopts={
# 'seed':
# self.rng.randint(MAXINT),
# 'bounds': [0, 1]
# })
# self.hp_num = len(configs)
self.trials = Trials(space, dim=self.dimension)
self.agent = MCTS(
self.space,
sample_latent_bounds=self.sample_latent_bounds,
dims=self.dimension,
split_latent_converter=self.split_latent_converter,
sample_latent_converter=self.sample_latent_converter,
split_latent_dims=self.split_latent_dims,
sample_latent_dims=self.sample_latent_dims,
solver=solver,
split_metric=split_metric,
cmaes_sigma_mult=cmaes_sigma_mult,
use_gpr=use_gpr,
treeify_freq=treeify_freq,
init_within_leaf=init_within_leaf,
splitter_type=splitter_type,
C_p=C_p,
leaf_size=leaf_size,
kernel_type=kernel_type,
gamma_type=gamma_type,
normalize=normalize,
rng=self.rng,
split_use_predict=split_use_predict,
**kwargs)
# best_x, best_fx = agent.search(iterations = args.iterations, samples_per_iteration=args.samples_per_iteration, treeify_freq=args.treeify_freq)
# assert func.counter == args.iterations
# return best_x.reshape(args.horizon, env_info['action_dims']), agent
def _suggest(self, n_suggestions=1):
trial_list = []
# currently only suggest one
if (self.trials.trials_num) < self.init_budget:
assert self.trials.trials_num % n_suggestions == 0
configs = self.initial_design_configs[
int(n_suggestions *
self.trials.trials_num):int(n_suggestions *
(self.trials.trials_num + 1))]
for config in configs:
trial_list.append(
Trial(configuration=config,
config_dict=config.get_dictionary(),
array=config.get_array(),
_latent_sample=None,
_leaf=None))
else:
# if (self.trials.trials_num) < self.min_sample:
# while len(trial_list) < n_suggestions: # remove history suggest
# config = self.cs.sample_configuration(size=1)[0]
# if not self.trials.is_contain(config):
# trial_list.append(
# Trial(configuration=config,
# config_dict=config.get_dictionary(),
# array=config.get_array()))
# return trial_list
leaf, latent_samples, samples = self.agent.suggest(n_suggestions)
for n in range(n_suggestions):
array = samples[n]
config = DenseConfiguration.from_array(self.space, array)
trial_list.append(
Trial(config,
config_dict=config.get_dictionary(),
array=array,
_latent_sample=latent_samples[n],
_leaf=leaf))
return trial_list
def _observe(self, trial_list):
for trial in trial_list:
self.trials.add_a_trial(trial)
# self.listx.append(self.array_to_feature(trial.array))
# self.listx.append(trial.array)
# self.listy.append(trial.observe_value)
self.agent.observe(trial._leaf, trial._latent_sample, trial.array,
-trial.observe_value) # LaMCTS Maximize
opt_class = LaMCTS
|
the-stack_0_27605
|
#!/usr/bin/env python
from flask import Flask
from flask import request
from flask import render_template
from flask import redirect, url_for
from raspipe import RasPipe
app = Flask(__name__)
rp = RasPipe(None)
rp.input_lines.append('starting up...')
rp.render_frame()
@app.route('/')
def index():
return render_template('index.html', rp=rp)
@app.route('/display', methods=['POST'])
def display():
rp.input_lines.append(request.form['line'])
rp.render_frame()
return redirect(url_for('index'))
@app.route('/quit')
def quit():
func = request.environ.get('werkzeug.server.shutdown')
func()
return "Quitting..."
if __name__ == '__main__':
app.debug = True
app.run(host='0.0.0.0')
|
the-stack_0_27606
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import api, fields, models
from odoo.tools import float_compare
class SaleOrderLine(models.Model):
_inherit = 'sale.order.line'
@api.multi
def _compute_qty_delivered(self):
super(SaleOrderLine, self)._compute_qty_delivered()
for line in self:
if line.qty_delivered_method == 'stock_move':
# In the case of a kit, we need to check if all components are shipped. Since the BOM might
# have changed, we don't compute the quantities but verify the move state.
bom = self.env['mrp.bom']._bom_find(product=line.product_id, company_id=line.company_id.id)
if bom and bom.type == 'phantom':
moves = line.move_ids.filtered(lambda m: m.picking_id and m.picking_id.state != 'cancel')
bom_delivered = moves and all([move.state == 'done' for move in moves])
if bom_delivered:
line.qty_delivered = line.product_uom_qty
else:
line.qty_delivered = 0.0
@api.multi
def _get_bom_component_qty(self, bom):
bom_quantity = self.product_uom._compute_quantity(1, bom.product_uom_id)
boms, lines = bom.explode(self.product_id, bom_quantity)
components = {}
for line, line_data in lines:
product = line.product_id.id
uom = line.product_uom_id
qty = line.product_qty
if components.get(product, False):
if uom.id != components[product]['uom']:
from_uom = uom
to_uom = self.env['uom.uom'].browse(components[product]['uom'])
qty = from_uom._compute_quantity(qty, to_uom)
components[product]['qty'] += qty
else:
# To be in the uom reference of the product
to_uom = self.env['product.product'].browse(product).uom_id
if uom.id != to_uom.id:
from_uom = uom
qty = from_uom._compute_quantity(qty, to_uom)
components[product] = {'qty': qty, 'uom': to_uom.id}
return components
def _get_qty_procurement(self):
self.ensure_one()
# Specific case when we change the qty on a SO for a kit product.
# We don't try to be too smart and keep a simple approach: we compare the quantity before
# and after update, and return the difference. We don't take into account what was already
# sent, or any other exceptional case.
bom = self.env['mrp.bom']._bom_find(product=self.product_id)
if bom and bom.type == 'phantom' and 'previous_product_uom_qty' in self.env.context:
return self.env.context['previous_product_uom_qty'].get(self.id, 0.0)
return super(SaleOrderLine, self)._get_qty_procurement()
@api.multi
@api.depends('product_id', 'move_ids.state')
def _compute_qty_delivered_method(self):
lines = self.env['sale.order.line']
for line in self:
bom = self.env['mrp.bom']._bom_find(product=line.product_id, company_id=line.company_id.id)
if bom and bom.type == 'phantom' and line.order_id.state == 'sale':
bom_delivered = all([move.state == 'done' for move in line.move_ids])
if not bom_delivered:
line.qty_delivered_method = 'manual'
lines |= line
super(SaleOrderLine, self - lines)._compute_qty_delivered_method()
class AccountInvoiceLine(models.Model):
# TDE FIXME: what is this code ??
_inherit = "account.invoice.line"
def _get_anglo_saxon_price_unit(self):
price_unit = super(AccountInvoiceLine, self)._get_anglo_saxon_price_unit()
# in case of anglo saxon with a product configured as invoiced based on delivery, with perpetual
# valuation and real price costing method, we must find the real price for the cost of good sold
if self.product_id.invoice_policy == "delivery":
for s_line in self.sale_line_ids:
# qtys already invoiced
qty_done = sum([x.uom_id._compute_quantity(x.quantity, x.product_id.uom_id) for x in s_line.invoice_lines if x.invoice_id.state in ('open', 'in_payment', 'paid')])
quantity = self.uom_id._compute_quantity(self.quantity, self.product_id.uom_id)
# Put moves in fixed order by date executed
moves = s_line.move_ids.sorted(lambda x: x.date)
# Go through all the moves and do nothing until you get to qty_done
# Beyond qty_done we need to calculate the average of the price_unit
# on the moves we encounter.
bom = s_line.product_id.product_tmpl_id.bom_ids and s_line.product_id.product_tmpl_id.bom_ids[0]
if bom.type == 'phantom':
average_price_unit = 0
components = s_line._get_bom_component_qty(bom)
for product_id in components:
factor = components[product_id]['qty']
prod_moves = [m for m in moves if m.product_id.id == product_id]
prod_qty_done = factor * qty_done
prod_quantity = factor * quantity
average_price_unit += factor * self._compute_average_price(prod_qty_done, prod_quantity, prod_moves)
price_unit = average_price_unit or price_unit
price_unit = self.product_id.uom_id._compute_price(price_unit, self.uom_id)
return price_unit
|
the-stack_0_27608
|
import socket
HOST = '127.0.0.1'
PORT = 4040
def create_listen_socket(host, port):
""" Setup the sockets our server will receive connection requests on """
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind((host, port))
sock.listen(100)
return sock
def parse_recvd_data(data):
""" Break up raw received data into messages, delimited by null byte """
parts = data.split(b'\0')
msgs = parts[:-1]
rest = parts[-1]
return msgs, rest
def recv_msgs(sock, data=bytes()):
"""
Receive data and break into complete messages on null byte
delimiter. Block until at least one message received, then
return received messages
"""
msgs = []
while not msgs:
recvd = sock.recv(4096) # <-- Blocks
if not recvd:
raise ConnectionError()
data = data + recvd
(msgs, rest) = parse_recvd_data(data)
msgs = [msg.decode('utf-8') for msg in msgs]
return msgs, rest
def recv_msg(sock):
"""
Wait for data to arrive on the socket, then parse into messages using
b'\0' as message delimiter
"""
data = bytearray()
msg = ''
# Repeatedly read 4096 bytes off the socket, storing the bytes
# in data until we see a delimiter
while not msg:
recvd = sock.recv(4096)
if not recvd:
# Socket has been closed prematurely
raise ConnectionError()
data = data + recvd
if b'\0' in recvd:
# we know from out protocol rules that we only send
# one message per connection, so b'\0' will always be
# the last character
msg = data.rstrip(b'\0')
msg = msg.decode('utf-8')
return msg
def prep_msg(msg):
""" Prepare a string to be sent as a message """
msg += '\0'
return msg.encode('utf-8')
def send_msg(sock, msg):
""" Send a string over a socket, preparing it first """
data = prep_msg(msg)
sock.sendall(data)
|
the-stack_0_27609
|
import pandas as pd
from matplotlib import pyplot
import os
os.chdir('/Desktop/web_scraping/')
data=pd.read_csv('final_movie_details.csv')
# plot
pyplot.scatter(data['imdb'],data['metascore'])
pyplot.show()
# plot
pyplot.scatter(data['metascore'],data['votes'])
pyplot.show()
# We can see a bit of linear relationship between imdb score and metascore lets try linear regression on it
## ML model
X = data.loc[:, 'metascore'].values
y = data.loc[:, 'imdb'].values
# Splitting the dataset into the Training set and Test set
from sklearn.cross_validation import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.33, random_state = 0)
from sklearn.linear_model import LinearRegression
regressor = LinearRegression()#making object for reg package
regressor.fit(X_train.reshape(-1,1), y_train.reshape(-1,1))#to fit the regressor to our training data
#predict the test results
y_pred =regressor.predict(X_test.reshape(-1,1))
#Now if we compare y_Pred and y_test we can see the current salary and model predicted salary in y_pred
pyplot.scatter(X_train, y_train, color = 'red')
pyplot.plot(X_train, regressor.predict(X_train.reshape(-1,1)), color = 'blue')
#we have plotted the line where real salary in x axis and
#predicted salary in y axis and we observe thatfew obs which are on line means its quite accurate i.e. real salary approx equal to predcted salary
pyplot.title('IMDB V/S METASCORE (Training set)')
pyplot.xlabel('Metascore')
pyplot.ylabel('IMDB')
pyplot.show()
# Visualising the Test set results
pyplot.scatter(X_test, y_test, color = 'red')
pyplot.plot(X_train, regressor.predict(X_train.reshape(-1,1)), color = 'blue')
pyplot.title('IMDB V/S METASCORE (Training set)')
pyplot.xlabel('Metascore')
pyplot.ylabel('IMDB')
pyplot.show()
from sklearn.metrics import mean_squared_error
mean_squared_error(y_test, y_pred)
# 0.18041462828221905
# Its a good score we are getting it means the meta score is having quite good linear relation with imdb
## Let try with imdb and votes
X1 = data.loc[:, ['metascore','votes']].values
y1 = data.loc[:, 'imdb'].values
# Splitting the dataset into the Training set and Test set
from sklearn.cross_validation import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X1, y1, test_size = 0.33, random_state = 0)
from sklearn.linear_model import LinearRegression
regressor = LinearRegression()#making object for reg package
regressor.fit(X_train, y_train)#to fit the regressor to our training data
#predict the test results
y_pred =regressor.predict(X_test)
from sklearn.metrics import mean_squared_error
mean_squared_error(y_test, y_pred)
# 0.15729132122310804 good score
dur=data['movie duration'].value_counts()
#https://www.analyticsvidhya.com/blog/2017/08/introduction-to-multi-label-classification/
#https://www.analyticsvidhya.com/blog/2019/04/predicting-movie-genres-nlp-multi-label-classification/
### Soon will make a multilable text classfier as the movie description has multiple tags like some are action plus comedy etc.
|
the-stack_0_27610
|
# from: https://github.com/m-labs/nmigen/blob/master/examples/basic/uart.py
from nmigen import *
class UART_NMIGEN(Elaboratable):
def __init__(self, divisor, data_bits=8):
assert divisor >= 4
self.data_bits = data_bits
self.divisor = divisor
self.tx_o = Signal()
self.rx_i = Signal()
self.tx_data = Signal(data_bits)
self.tx_rdy = Signal()
self.tx_ack = Signal()
self.rx_data = Signal(data_bits)
self.rx_err = Signal()
self.rx_ovf = Signal()
self.rx_rdy = Signal()
self.rx_ack = Signal()
def elaborate(self, platform):
m = Module()
### m.domains.sync = ClockDomain()
tx_phase = Signal(len(Const(self.divisor)))
tx_shreg = Signal(1 + self.data_bits + 1, reset=-1)
tx_count = Signal(len(Const(len(tx_shreg) + 1)))
m.d.comb += self.tx_o.eq(tx_shreg[0])
with m.If(tx_count == 0):
m.d.comb += self.tx_ack.eq(1)
with m.If(self.tx_rdy):
m.d.sync += [
tx_shreg.eq(Cat(C(0, 1), self.tx_data, C(1, 1))),
tx_count.eq(len(tx_shreg)),
tx_phase.eq(self.divisor - 1),
]
with m.Else():
with m.If(tx_phase != 0):
m.d.sync += tx_phase.eq(tx_phase - 1)
with m.Else():
m.d.sync += [
tx_shreg.eq(Cat(tx_shreg[1:], C(1, 1))),
tx_count.eq(tx_count - 1),
tx_phase.eq(self.divisor - 1),
]
rx_phase = Signal(len(Const(self.divisor)))
rx_shreg = Signal(1 + self.data_bits + 1, reset=-1)
rx_count = Signal(len(Const(len(rx_shreg) + 1)))
m.d.comb += self.rx_data.eq(rx_shreg[1:-1])
with m.If(rx_count == 0):
m.d.comb += self.rx_err.eq(~(~rx_shreg[0] & rx_shreg[-1]))
with m.If(~self.rx_i):
with m.If(self.rx_ack | ~self.rx_rdy):
m.d.sync += [
self.rx_rdy.eq(0),
self.rx_ovf.eq(0),
rx_count.eq(len(rx_shreg)),
rx_phase.eq(self.divisor // 2),
]
with m.Else():
m.d.sync += self.rx_ovf.eq(1)
with m.Else():
with m.If(rx_phase != 0):
m.d.sync += rx_phase.eq(rx_phase - 1)
with m.Else():
m.d.sync += [
rx_shreg.eq(Cat(rx_shreg[1:], self.rx_i)),
rx_count.eq(rx_count - 1),
rx_phase.eq(self.divisor - 1),
]
with m.If(rx_count == 1):
m.d.sync += self.rx_rdy.eq(1)
return m
|
the-stack_0_27614
|
"""
Hexadecimal Notation
====================
Defines the objects for hexadecimal notation:
- :func:`colour.notation.RGB_to_HEX`
- :func:`colour.notation.HEX_to_RGB`
"""
from __future__ import annotations
import numpy as np
from colour.algebra import normalise_maximum
from colour.hints import ArrayLike, List, NDArray, StrOrArrayLike, StrOrNDArray
from colour.models import eotf_inverse_sRGB, eotf_sRGB
from colour.utilities import (
as_float_array,
as_int_array,
from_range_1,
to_domain_1,
usage_warning,
)
__author__ = "Colour Developers"
__copyright__ = "Copyright 2013 Colour Developers"
__license__ = "New BSD License - https://opensource.org/licenses/BSD-3-Clause"
__maintainer__ = "Colour Developers"
__email__ = "[email protected]"
__status__ = "Production"
__all__ = [
"RGB_to_HEX",
"HEX_to_RGB",
]
def RGB_to_HEX(RGB: ArrayLike) -> StrOrNDArray:
"""
Convert from *RGB* colourspace to hexadecimal representation.
Parameters
----------
RGB
*RGB* colourspace array.
Returns
-------
:class:`str` or :class:`numpy.array`
Hexadecimal representation.
Notes
-----
+------------+-----------------------+---------------+
| **Domain** | **Scale - Reference** | **Scale - 1** |
+============+=======================+===============+
| ``RGB`` | [0, 1] | [0, 1] |
+------------+-----------------------+---------------+
Examples
--------
>>> RGB = np.array([0.66666667, 0.86666667, 1.00000000])
>>> RGB_to_HEX(RGB)
'#aaddff'
"""
RGB = to_domain_1(RGB)
if np.any(RGB < 0):
usage_warning(
'"RGB" array contains negative values, those will be clipped, '
"unpredictable results may occur!"
)
RGB = as_float_array(np.clip(RGB, 0, np.inf))
if np.any(RGB > 1):
usage_warning(
'"RGB" array contains values over 1 and will be normalised, '
"unpredictable results may occur!"
)
RGB = eotf_inverse_sRGB(normalise_maximum(eotf_sRGB(RGB)))
to_HEX = np.vectorize("{:02x}".format)
HEX = to_HEX(as_int_array(RGB * 255, dtype=np.uint8)).astype(object)
HEX = np.asarray("#") + HEX[..., 0] + HEX[..., 1] + HEX[..., 2]
return HEX
def HEX_to_RGB(HEX: StrOrArrayLike) -> NDArray:
"""
Convert from hexadecimal representation to *RGB* colourspace.
Parameters
----------
HEX
Hexadecimal representation.
Returns
-------
:class:`numpy.array`
*RGB* colourspace array.
Notes
-----
+-----------+-----------------------+---------------+
| **Range** | **Scale - Reference** | **Scale - 1** |
+===========+=======================+===============+
| ``RGB`` | [0, 1] | [0, 1] |
+-----------+-----------------------+---------------+
Examples
--------
>>> HEX = '#aaddff'
>>> HEX_to_RGB(HEX) # doctest: +ELLIPSIS
array([ 0.6666666..., 0.8666666..., 1. ])
"""
HEX = np.core.defchararray.lstrip(HEX, "#") # type: ignore[arg-type]
def to_RGB(x: List) -> List:
"""Convert given hexadecimal representation to *RGB*."""
l_x = len(x)
return [
int(x[i : i + l_x // 3], 16) # type: ignore[call-overload]
for i in range(0, l_x, l_x // 3)
]
to_RGB_v = np.vectorize(to_RGB, otypes=[np.ndarray])
RGB = as_float_array(to_RGB_v(HEX).tolist()) / 255
return from_range_1(RGB)
|
the-stack_0_27616
|
# coding: utf-8
# Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class WaitCriteriaSummary(object):
"""
Specifies wait criteria for the Wait stage.
"""
#: A constant which can be used with the wait_type property of a WaitCriteriaSummary.
#: This constant has a value of "ABSOLUTE_WAIT"
WAIT_TYPE_ABSOLUTE_WAIT = "ABSOLUTE_WAIT"
def __init__(self, **kwargs):
"""
Initializes a new WaitCriteriaSummary object with values from keyword arguments. This class has the following subclasses and if you are using this class as input
to a service operations then you should favor using a subclass over the base class:
* :class:`~oci.devops.models.AbsoluteWaitCriteriaSummary`
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param wait_type:
The value to assign to the wait_type property of this WaitCriteriaSummary.
Allowed values for this property are: "ABSOLUTE_WAIT", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:type wait_type: str
"""
self.swagger_types = {
'wait_type': 'str'
}
self.attribute_map = {
'wait_type': 'waitType'
}
self._wait_type = None
@staticmethod
def get_subtype(object_dictionary):
"""
Given the hash representation of a subtype of this class,
use the info in the hash to return the class of the subtype.
"""
type = object_dictionary['waitType']
if type == 'ABSOLUTE_WAIT':
return 'AbsoluteWaitCriteriaSummary'
else:
return 'WaitCriteriaSummary'
@property
def wait_type(self):
"""
**[Required]** Gets the wait_type of this WaitCriteriaSummary.
wait criteria type
Allowed values for this property are: "ABSOLUTE_WAIT", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:return: The wait_type of this WaitCriteriaSummary.
:rtype: str
"""
return self._wait_type
@wait_type.setter
def wait_type(self, wait_type):
"""
Sets the wait_type of this WaitCriteriaSummary.
wait criteria type
:param wait_type: The wait_type of this WaitCriteriaSummary.
:type: str
"""
allowed_values = ["ABSOLUTE_WAIT"]
if not value_allowed_none_or_none_sentinel(wait_type, allowed_values):
wait_type = 'UNKNOWN_ENUM_VALUE'
self._wait_type = wait_type
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
|
the-stack_0_27617
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Loss utility functions."""
import math
import tensorflow as tf
# Define measure types.
TYPE_MEASURE_GAN = 'GAN' # Vanilla GAN.
TYPE_MEASURE_JSD = 'JSD' # Jensen-Shannon divergence.
TYPE_MEASURE_KL = 'KL' # KL-divergence.
TYPE_MEASURE_RKL = 'RKL' # Reverse KL-divergence.
TYPE_MEASURE_H2 = 'H2' # Squared Hellinger.
TYPE_MEASURE_W1 = 'W1' # Wasserstein distance (1-Lipschitz).
# Define generator loss types.
TYPE_GENERATOR_LOSS_MM = 'MM' # Minimax.
TYPE_GENERATOR_LOSS_NS = 'NS' # Non-saturating.
def compute_positive_expectation(samples, measure, reduce_mean=False):
"""Computes the positive part of a divergence or difference.
Args:
samples: A tensor for the positive samples.
measure: A string for measure to compute. See TYPE_MEASURE_* for supported
measure types.
reduce_mean: A boolean indicating whether to reduce results
Returns:
A tensor (has the same shape as samples) or scalar (if reduced) for the
positive expectation of the inputs.
"""
if measure == TYPE_MEASURE_GAN:
expectation = -tf.math.softplus(-samples)
elif measure == TYPE_MEASURE_JSD:
expectation = math.log(2.) - tf.math.softplus(-samples)
elif measure == TYPE_MEASURE_KL:
expectation = samples
elif measure == TYPE_MEASURE_RKL:
expectation = -tf.math.exp(-samples)
elif measure == TYPE_MEASURE_H2:
expectation = 1. - tf.math.exp(-samples)
elif measure == TYPE_MEASURE_W1:
expectation = samples
else:
raise ValueError('Measure {} not supported'.format(measure))
if reduce_mean:
return tf.math.reduce_mean(expectation)
else:
return expectation
def compute_negative_expectation(samples, measure, reduce_mean=False):
"""Computes the negative part of a divergence or difference.
Args:
samples: A tensor for the negative samples.
measure: A string for measure to compute. See TYPE_MEASURE_* for supported
measure types.
reduce_mean: A boolean indicating whether to reduce results
Returns:
A tensor (has the same shape as samples) or scalar (if reduced) for the
negative expectation of the inputs.
"""
if measure == TYPE_MEASURE_GAN:
expectation = tf.math.softplus(-samples) + samples
elif measure == TYPE_MEASURE_JSD:
expectation = tf.math.softplus(-samples) + samples - math.log(2.)
elif measure == TYPE_MEASURE_KL:
expectation = tf.math.exp(samples - 1.)
elif measure == TYPE_MEASURE_RKL:
expectation = samples - 1.
elif measure == TYPE_MEASURE_H2:
expectation = tf.math.exp(samples) - 1.
elif measure == TYPE_MEASURE_W1:
expectation = samples
else:
raise ValueError('Measure {} not supported'.format(measure))
if reduce_mean:
return tf.math.reduce_mean(expectation)
else:
return expectation
def compute_fenchel_dual_loss(local_features,
global_features,
measure,
positive_indicator_matrix=None):
"""Computes the f-divergence loss.
It is the distance between positive and negative joint distributions.
Divergences (measures) supported are Jensen-Shannon (JSD), GAN (equivalent
to JSD), Squared Hellinger (H2), KL and reverse KL (RKL).
Reference:
Hjelm et al. Learning deep representations by mutual information estimation
and maximization. https://arxiv.org/pdf/1808.06670.pdf.
Args:
local_features: A tensor for local features. Shape = [batch_size,
num_locals, feature_dim].
global_features: A tensor for local features. Shape = [batch_size,
num_globals, feature_dim].
measure: A string for f-divergence measure.
positive_indicator_matrix: A tensor for indicating positive sample pairs.
1.0 means positive and otherwise 0.0. It should be symmetric with 1.0 at
diagonal. Shape = [batch_size, batch_size].
Returns:
A scalar for the computed loss.
"""
batch_size, num_locals, feature_dim = local_features.shape
num_globals = global_features.shape[-2]
# Make the input tensors the right shape.
local_features = tf.reshape(local_features, (-1, feature_dim))
global_features = tf.reshape(global_features, (-1, feature_dim))
# Compute outer product, we want a [batch_size, num_locals, batch_size,
# num_globals] tensor.
product = tf.linalg.matmul(local_features, global_features, transpose_b=True)
product = tf.reshape(
product, (batch_size, num_locals, batch_size, num_globals))
# Compute the indicator_matrix for positive and negative samples.
if positive_indicator_matrix is None:
positive_indicator_matrix = tf.eye(batch_size, dtype=tf.dtypes.float32)
negative_indicator_matrix = 1. - positive_indicator_matrix
# Compute the positive and negative scores, and average the spatial locations.
positive_expectation = compute_positive_expectation(
product, measure, reduce_mean=False)
negative_expectation = compute_negative_expectation(
product, measure, reduce_mean=False)
positive_expectation = tf.math.reduce_mean(positive_expectation, axis=[1, 3])
negative_expectation = tf.math.reduce_mean(negative_expectation, axis=[1, 3])
# Mask positive and negative terms.
positive_expectation = tf.math.reduce_sum(
positive_expectation * positive_indicator_matrix) / tf.math.maximum(
tf.math.reduce_sum(positive_indicator_matrix), 1e-12)
negative_expectation = tf.math.reduce_sum(
negative_expectation * negative_indicator_matrix) / tf.math.maximum(
tf.math.reduce_sum(negative_indicator_matrix), 1e-12)
return negative_expectation - positive_expectation
def compute_info_nce_loss(local_features,
global_features,
positive_indicator_matrix=None,
temperature=1.0):
"""Computes the InfoNCE (CPC) loss.
Reference:
Oord et al. Representation Learning with Contrastive Predictive Coding.
https://arxiv.org/pdf/1807.03748.pdf.
Args:
local_features: A tensor for local features. Shape = [batch_size,
num_locals, feature_dim].
global_features: A tensor for local features. Shape = [batch_size,
num_globals, feature_dim].
positive_indicator_matrix: A tensor for indicating positive sample pairs.
1.0 means positive and otherwise 0.0. It should be symmetric with 1.0 at
diagonal. Shape = [batch_size, batch_size].
temperature: A float for temperature hyperparameter.
Returns:
A scalar for the computed loss.
"""
batch_size, num_locals, feature_dim = local_features.shape
num_globals = global_features.shape[-2]
# Make the input tensors the right shape.
# -> Shape = [batch_size * num_locals, feature_dim].
local_features_reshaped = tf.reshape(local_features, (-1, feature_dim))
# -> Shape = [batch_size * num_globals, feature_dim].
global_features_reshaped = tf.reshape(global_features, (-1, feature_dim))
# Inner product for positive samples.
# -> Shape = [batch_size, num_locals, num_globals]
positive_expectation = tf.linalg.matmul(
local_features, tf.transpose(global_features, (0, 2, 1)))
if temperature != 1.0:
positive_expectation /= temperature
# -> Shape = [batch_size, num_locals, 1, num_globals]
positive_expectation = tf.expand_dims(positive_expectation, axis=2)
# Outer product for negative. We want a [batch_size, batch_size, num_locals,
# num_globals] tensor.
# -> Shape = [batch_size * num_globals, batch_size * num_locals].
product = tf.linalg.matmul(
global_features_reshaped, local_features_reshaped, transpose_b=True)
if temperature != 1.0:
product /= temperature
product = tf.reshape(product,
(batch_size, num_globals, batch_size, num_locals))
# -> Shape = [batch_size, batch_size, num_locals, num_globals].
product = tf.transpose(product, (0, 2, 3, 1))
# Mask positive part of the negative tensor.
if positive_indicator_matrix is None:
positive_indicator_matrix = tf.eye(batch_size, dtype=tf.dtypes.float32)
# -> Shape = [batch_size, batch_size, 1, 1].
positive_indicator_matrix = positive_indicator_matrix[:, :, tf.newaxis,
tf.newaxis]
# -> Shape = [batch_size, batch_size, 1, 1].
negative_indicator_matrix = 1. - positive_indicator_matrix
# Masking is done by shifting the diagonal before exp.
negative_expectation = (
negative_indicator_matrix * product - 1e12 * positive_indicator_matrix)
negative_expectation = tf.reshape(
negative_expectation, (batch_size, batch_size * num_locals, num_globals))
# -> Shape = [batch_size, 1, batch_size * num_locals, num_globals]
negative_expectation = tf.expand_dims(negative_expectation, axis=1)
# -> Shape = [batch_size, num_locals, batch_size * num_locals, num_globals]
negative_expectation = tf.tile(negative_expectation,
tf.constant((1, num_locals, 1, 1)))
# -> Shape = [batch_size, num_locals, 1 + batch_size * num_locals,
# num_globals].
logits = tf.concat([positive_expectation, negative_expectation], axis=2)
# -> Shape = [batch_size, num_locals, num_globals].
loss = tf.nn.log_softmax(logits, axis=2)
# The positive score is the first element of the log softmax.
# -> Shape = [batch_size, num_locals].
loss = -loss[:, :, 0]
# -> Shape = [].
return tf.reduce_mean(loss)
def compute_log_likelihood(x_mean, x_logvar, y):
"""Computes the log-likelihood of y|x.
Args:
x_mean: A tensor for mean of y estimated from x. Shape = [batch_size,
feature_dim].
x_logvar: A tensor for log variance of y estimated from x. Shape = [
batch_size, feature_dim].
y: A tensor for value of y. Shape = [batch_size, feature_dim].
Returns:
A scalar for the computed log-likelihood of y|x.
"""
likelihood = -(x_mean - y) ** 2 / tf.math.exp(x_logvar) / 2. - x_logvar / 2.
return tf.math.reduce_mean(likelihood)
def compute_contrastive_log_ratio(x_mean,
x_logvar,
y,
positive_indicator_matrix=None):
"""Computes the contrastive log-ratio of y|x.
The result can be used as a variational upper-bound estimation of the mutual
information I(x, y).
Reference:
Cheng et al. CLUB: A Contrastive Log-ratio Upper Bound of Mutual
Information. https://arxiv.org/pdf/2006.12013.pdf.
Args:
x_mean: A tensor for mean of y estimated from x. Shape = [batch_size,
feature_dim].
x_logvar: A tensor for log variance of y estimated from x. Shape = [
batch_size, feature_dim].
y: A tensor for value of y. Shape = [batch_size, feature_dim].
positive_indicator_matrix: A tensor for indicating positive sample pairs.
1.0 means positive and otherwise 0.0. It should be symmetric with 1.0 at
diagonal. Shape = [batch_size, batch_size].
Returns:
A scalar for the contrastive log-ratio of y|x.
"""
batch_size = tf.shape(x_logvar)[0]
# Compute the indicator_matrix for positive and negative samples.
if positive_indicator_matrix is None:
positive_indicator_matrix = tf.eye(batch_size, dtype=tf.dtypes.float32)
negative_indicator_matrix = 1. - positive_indicator_matrix
# Compute the log-likelihood of y|x samples.
y = tf.expand_dims(y, axis=0)
x_mean = tf.expand_dims(x_mean, axis=1)
x_logvar = tf.expand_dims(x_logvar, axis=1)
log_likelihood = -(x_mean - y)**2 / tf.math.exp(x_logvar) / 2. - x_logvar / 2.
log_likelihood = tf.math.reduce_mean(log_likelihood, axis=-1)
# Compute the positive and negative scores.
positive_expectation = tf.math.reduce_sum(
log_likelihood * positive_indicator_matrix) / tf.math.maximum(
tf.math.reduce_sum(positive_indicator_matrix), 1e-12)
negative_expectation = tf.math.reduce_sum(
log_likelihood * negative_indicator_matrix) / tf.math.maximum(
tf.math.reduce_sum(negative_indicator_matrix), 1e-12)
# Clip the loss to be non-negative since mutual information should always be
# a non-negative scalar.
return tf.math.maximum(0., positive_expectation - negative_expectation)
def compute_gradient_penalty(discriminator, inputs, penalty_weight=1.0):
"""Computes the gradient penalty.
Reference:
Mescheder et al. Which Training Methods for GANs do actually Converge?
https://arxiv.org/pdf/1801.04406.pdf.
Args:
discriminator: Network to apply penalty through.
inputs: An input tensor. Shape = [batch_size, ...].
penalty_weight: A float for the weight of penalty.
Returns:
penalty: A scalar for the gradient penalty loss.
outputs: A tensor for the network outputs.
"""
batch_size = tf.shape(inputs)[0]
with tf.GradientTape() as tape:
tape.watch(inputs)
outputs = discriminator(inputs, training=True)
gradients = tape.gradient(outputs, inputs)
gradients = tf.reshape(gradients, (batch_size, -1))
penalty = tf.reduce_sum(tf.square(gradients), axis=-1)
penalty = tf.reduce_mean(penalty) * penalty_weight
return penalty, outputs
def compute_discriminator_loss(discriminator, real_inputs, fake_inputs):
"""Computes the discriminator loss.
Args:
discriminator: The discriminator network.
real_inputs: A tensor for the real inputs. Shape = [batch_size, ...].
fake_inputs: A tensor for the fake inputs. Shape = [batch_size, ...].
Returns:
loss: A scalar for the discriminator loss.
real_outputs: A tensor for the real outputs.
fake_outputs: A tensor for the fake outputs.
"""
real_gradient_penalty, real_outputs = compute_gradient_penalty(
discriminator, real_inputs)
fake_gradient_penalty, fake_outputs = compute_gradient_penalty(
discriminator, fake_inputs)
gradient_penalty_loss = 0.5 * (real_gradient_penalty + fake_gradient_penalty)
positive_expectation = compute_positive_expectation(
real_outputs, TYPE_MEASURE_GAN, reduce_mean=True)
negative_expectation = compute_negative_expectation(
fake_outputs, TYPE_MEASURE_GAN, reduce_mean=True)
expectation = 0.5 * (positive_expectation - negative_expectation)
loss = -expectation + gradient_penalty_loss
return loss, real_outputs, fake_outputs
def compute_generator_loss(fake_samples, loss_type):
"""Computes the generator loss.
Args:
fake_samples: A tensor for the fake samples. Shape = [batch_size, ...].
loss_type: A string for the type of loss. See TYPE_GENERATOR_LOSS_* for
supported loss types.
Returns:
A scalar for the generator loss.
"""
if loss_type == TYPE_GENERATOR_LOSS_MM:
return compute_negative_expectation(
fake_samples, TYPE_MEASURE_GAN, reduce_mean=True)
elif loss_type == TYPE_GENERATOR_LOSS_NS:
return -compute_positive_expectation(
fake_samples, TYPE_MEASURE_GAN, reduce_mean=True)
else:
raise ValueError('Generator loss {} not supported'.format(loss_type))
|
the-stack_0_27619
|
import pylab
import IPython.display
def display_dataframes(dataframes, nColumns=3):
table = "<table style='width:100%; border:0px'>{content}</table>"
row = "<tr style='border:0px'>{content}</tr>"
cell = "<td style='width:{width}%;vertical-align:top;border:0px'>{{content}}</td>"
cell = cell.format(width=100 / nColumns)
cells = [cell.format(content=df.to_html()) for df in dataframes]
cells += (nColumns - (len(dataframes) % nColumns)) * [cell.format(content="")] # pad
rows = [row.format(content="".join(cells[i:i + nColumns])) for i in range(0, len(cells), nColumns)]
IPython.display.display(IPython.display.HTML(table.format(content="".join(rows))))
def figsize(w, h):
pylab.rcParams['figure.figsize'] = w,h
def linestyle(style, reset_color_counter=True):
pylab.rcParams['lines.linestyle'] = style
if reset_color_counter:
pylab.gca().set_prop_cycle(None) # Reset Colors Counter
|
the-stack_0_27621
|
"""
# Definition for a Node.
class Node:
def __init__(self, val=None, children=None):
self.val = val
self.children = children
"""
class Solution:
def levelOrder(self, root: 'Node') -> List[List[int]]:
if root is None:
return []
result, previousLayer = [], [root,]
while previousLayer:
currentLayer = []
result.append([])
for node in previousLayer:
result[-1].append(node.val)
currentLayer.extend(node.children)
previousLayer = currentLayer
return result
|
the-stack_0_27623
|
import platform
import os
import json
import sublime
import sublime_plugin
if platform.python_version() == '3.3.6':
from importlib.machinery import SourceFileLoader
def run_module(module_name, user_bindings, **kwargs):
user_module = SourceFileLoader(module_name, user_bindings).load_module()
user_module.keybinding(kwargs.get('key'), command=kwargs.get('command'))
else:
import importlib.util
def run_module(module_name, user_bindings, **kwargs):
spec = importlib.util.spec_from_file_location(module_name, user_bindings)
user_module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(user_module)
user_module.keybinding(kwargs.get('key'), command=kwargs.get('command'))
class SpkKeyBinding(sublime_plugin.ApplicationCommand):
def __init__(self):
self.path = '$packages/User/SublimeProKeyBindings/keybindings.py'
self.dest = '$packages/User/SublimeProKeyBindings/Default ($platform).sublime-keymap'
self.module = 'spk_user_bindings'
def run(self, **kwargs):
self.bindings = []
user_bindings = format(kwargs.get('bindings', self.path))
dest = format(kwargs.get('destination', self.dest))
if not os.path.isfile(user_bindings):
sublime.error_message("Couldn't find %s\n\nMake sure the file exists." % user_bindings)
return
try:
run_module(self.module, user_bindings, key=self.key, command=self.command)
except Exception as err:
sublime.error_message("Something went wrong.\nCheck out sublime console to see more information")
self.bindings = []
raise err
if len(self.bindings) == 0:
sublime.error_message("Couldn't find any key bindings")
return
with open(dest, 'w') as file:
json.dump(self.bindings, file, indent=2)
self.bindings = []
sublime.message_dialog("Key bindings updated")
def key(self, keys, command, *context, **args):
if not isinstance(keys, list) or len(keys) < 1:
raise Exception("The first argument needs to be an array of keys")
if not isinstance(command, (str, list)) or len(keys) < 1:
raise Exception("The command needs to be a string or an array")
data = {}
data['keys'] = keys
if isinstance(command, list):
data['command'] = 'spk_multi_cmd'
data['args'] = {"commands": command}
else:
data['command'] = command
if len(args) > 0:
data['args'] = self.set_args(args)
if len(context) > 0:
data['context'] = self.context(context)
self.bindings.append(data)
def set_args(self, args):
data = {}
for arg in args:
data[arg] = args.get(arg)
return data
def context(self, args):
res = []
for a in args:
if isinstance(a, list):
res.extend(a)
else:
res.append(a)
return res
def command(self, name, **kwargs):
data = {}
data['command'] = name
if len(kwargs) > 0:
data['args'] = {}
for k in kwargs:
data['args'][k] = kwargs.get(k)
return data
class SpkMultiCmd(sublime_plugin.TextCommand):
def run(self, edit, **kwargs):
commands = kwargs.get('commands', [])
if len(commands) == 0:
return
window = self.view.window()
for cmd in commands:
name = cmd.get('command', "")
args = cmd.get('args', None)
if len(name) < 1:
return
if args is None:
window.run_command(name)
else:
window.run_command(name, args)
def format(string):
return sublime.expand_variables(string, sublime.active_window().extract_variables())
|
the-stack_0_27624
|
from functools import reduce
from collections import namedtuple, Counter
with open("input.txt", "r") as f:
template = f.readline().strip()
f.readline() # empty line
insertions = dict(line.strip().split(" -> ") for line in f.readlines())
def apply_insertions(template):
Acc = namedtuple("Accumulator", ["last_letter", "new_string"])
def f(acc: Acc, new_letter: str):
code = f"{acc.last_letter}{new_letter}"
return Acc(new_letter, f"{acc.new_string}{insertions.get(code, '')}{new_letter}")
return reduce(f, template, Acc("", "")).new_string
for i in range(10):
template = apply_insertions(template)
# print(template)
c = Counter(template).most_common()
print(c[0][1] - c[-1][1])
|
the-stack_0_27626
|
import pymongo
import os
from pycare_api.data import scrape_data as sdata
from typing import Optional
from pymongo import MongoClient
db_username=os.environ.get('db_username')
db_pass=os.environ.get('db_pass')
url = "mongodb+srv://backend:[email protected]/covid19Report?retryWrites=true&w=majority".format(db_username,db_pass)
client = pymongo.MongoClient(url.format(
os.getenv("username"), os.getenv("password")))
db = client["covid19Report"]
def getData(collectionName: str, fields: Optional[list] = None):
if fields != None:
showOnly = dict(zip(fields, [True]*len(fields)))
showOnly["_id"] = False
return db.get_collection(collectionName).find({}, showOnly)
else:
return db.get_collection(collectionName).find({}, {"_id": False})
def updateHospitalDetailsData():
collection = db["hospitalDetails"]
try:
for i in sdata.hospitalDetails():
collection.update_many({"hospitalName": i.hospitalName},
{"$set": {"isolationBeds.alloted": i.isolationBeds['alloted'],
"isolationBeds.vacant": i.isolationBeds['vacant'],
"oxygenBeds.alloted": i.oxygenBeds['alloted'],
"oxygenBeds.vacant": i.oxygenBeds['vacant'],
"ventilatorBeds.alloted": i.ventilatorBeds['alloted'],
"ventilatorBeds.vacant": i.ventilatorBeds['vacant']}})
return "successfully updated hospitalDetails collection"
except Exception as err:
return str(err)+"failed to update data hospitalDetails collection"
def updateStatusData():
collection = db["status"]
a = sdata.status()[0]
try:
collection.update_one({}, {"$set": {
"total": a["total"], "cured": a["cured"], "active": a["active"], "death": a["death"],"lastUpdatedOn":a["lastUpdatedOn"]}})
return "successfully updated status collection", a
except Exception as err:
return str(err)+"failed to update data status collection"
def getTranslation():
cursor = db.get_collection("translation").find({}, {"_id": False})
cursor = list(cursor)
data = {key: c[key] for c in cursor for key in c.keys()}
return data
def updateDistrictWiseReport():
collection = db["districtWiseReport"]
try:
for i in sdata.districtWiseReport():
collection.update_many({"district": i.district},
{"$set": {
"reported": i.reported,
"active": i.active,
"cured": i.cured,
"death": i.death,
}})
return "successfully updated districtWiseReport collection"
except Exception as err:
return str(err)+"failed to update data districtWiseReport collection"
|
the-stack_0_27627
|
def movielens_preprocess():
total_data = {} # {user_id: [[interaction item_id, timestamp], [], ...]}
item_cnt = {} # {item_id: count_int}
# ratings.txt -> user_id :: item_id :: rating :: timestamp
with open('ratings.txt', 'r') as f:
while True:
line = f.readline().strip()
if not line: break
tokens = line.split('::')
uid = int(tokens[0]) # user_id
iid = int(tokens[1]) # item_id
if not iid in item_cnt:
item_cnt[iid] = 0
item_cnt[iid] += 1
timestamp = int(tokens[3])
if not uid in total_data:
total_data[uid] = []
total_data[uid].append([iid, timestamp])
tr_data = []
va_data = []
te_data = []
for uid in total_data:
temp = sorted(total_data[uid], key=lambda x: x[1]) # 개별 user_id의 interaction item_id 를 timestamp 순으로 정렬
iids = list(map(lambda x: x[0], temp)) # interaction item_id 만 추출 후 list로 가공
iids = list(filter(lambda x: item_cnt[x] >= 20, iids)) # 전체 데이터에서 20회 이상 등장한 item_id만 사용
if len(iids) < 3:
continue # 시퀀스 길이기 3초과일 경우만 사용
# Leave One Out setting
te_data.append(iids)
va_data.append(iids[:-1])
tr_data.append(iids[:-2])
return tr_data, va_data, te_data
|
the-stack_0_27631
|
import os
import logging as log
log_file = os.path.join(os.path.dirname(os.path.abspath(__file__)), "../logs/", 'FeedbackBot.log')
log_level = log.INFO
fh = log.FileHandler(log_file)
fh.setLevel(log_level)
loggers = set()
formatter = log.Formatter(
"%(asctime)s %(name)s[%(lineno)s][%(funcName)s] - %(levelname)s: %(message)s", datefmt="%d/%m/%y %H:%M:%S"
)
fh.setFormatter(formatter)
log.setLoggerClass(log.Logger)
def getlogger(name=None):
logger = log.getLogger(name)
logger.setLevel(log_level)
logger.addHandler(fh)
loggers.add(logger)
return logger
|
the-stack_0_27632
|
# -*- coding: utf-8 -*-
# =================================================================
#
# Authors: Tom Kralidis <[email protected]>
#
# Copyright (c) 2015 Tom Kralidis
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# =================================================================
import os
from pycsw.core import util
from pycsw.core.etree import etree
NAMESPACE = 'http://www.w3.org/2005/Atom'
NAMESPACES = {'atom': NAMESPACE, 'georss': 'http://www.georss.org/georss'}
XPATH_MAPPINGS = {
'pycsw:Identifier': 'atom:id',
'pycsw:Title': 'atom:title',
'pycsw:Creator': 'atom:author',
'pycsw:Abstract': 'atom:summary',
'pycsw:PublicationDate': 'atom:published',
'pycsw:Keywords': 'atom:category',
'pycsw:Contributor': 'atom:contributor',
'pycsw:AccessConstraints': 'atom:rights',
'pycsw:Modified': 'atom:updated',
'pycsw:Source': 'atom:source',
}
def write_record(result, esn, context, url=None):
''' Return csw:SearchResults child as lxml.etree.Element '''
typename = util.getqattr(result, context.md_core_model['mappings']['pycsw:Typename'])
if esn == 'full' and typename == 'atom:entry':
# dump record as is and exit
return etree.fromstring(util.getqattr(result, context.md_core_model['mappings']['pycsw:XML']), context.parser)
node = etree.Element(util.nspath_eval('atom:entry', NAMESPACES), nsmap=NAMESPACES)
node.attrib[util.nspath_eval('xsi:schemaLocation', context.namespaces)] = \
'%s http://www.kbcafe.com/rss/atom.xsd.xml' % NAMESPACES['atom']
# author
val = util.getqattr(result, context.md_core_model['mappings']['pycsw:Creator'])
if val:
author = etree.SubElement(node, util.nspath_eval('atom:author', NAMESPACES))
etree.SubElement(author, util.nspath_eval('atom:name', NAMESPACES)).text = val
# category
val = util.getqattr(result, context.md_core_model['mappings']['pycsw:Keywords'])
if val:
for kw in val.split(','):
etree.SubElement(node, util.nspath_eval('atom:category', NAMESPACES), term=kw)
for qval in ['pycsw:Contributor', 'pycsw:Identifier']:
val = util.getqattr(result, context.md_core_model['mappings'][qval])
if val:
etree.SubElement(node, util.nspath_eval(XPATH_MAPPINGS[qval], NAMESPACES)).text = val
if qval == 'pycsw:Identifier':
etree.SubElement(node, util.nspath_eval('dc:identifier', context.namespaces)).text = val
rlinks = util.getqattr(result, context.md_core_model['mappings']['pycsw:Links'])
if rlinks:
for link in rlinks.split('^'):
linkset = link.split(',')
url2 = etree.SubElement(node, util.nspath_eval('atom:link', NAMESPACES), href=linkset[-1], type=linkset[2], title=linkset[1])
if linkset[2] == 'enclosure':
url2.attrib['rel'] = linkset[2]
etree.SubElement(node, util.nspath_eval('atom:link', NAMESPACES), href='%s?service=CSW&version=2.0.2&request=GetRepositoryItem&id=%s' % (url, util.getqattr(result, context.md_core_model['mappings']['pycsw:Identifier'])))
# atom:title
el = etree.SubElement(node, util.nspath_eval(XPATH_MAPPINGS['pycsw:Title'], NAMESPACES))
val = util.getqattr(result, context.md_core_model['mappings']['pycsw:Title'])
if val:
el.text =val
# atom:updated
el = etree.SubElement(node, util.nspath_eval(XPATH_MAPPINGS['pycsw:Modified'], NAMESPACES))
val = util.getqattr(result, context.md_core_model['mappings']['pycsw:Modified'])
if val:
el.text =val
else:
val = util.getqattr(result, context.md_core_model['mappings']['pycsw:InsertDate'])
el.text = val
for qval in ['pycsw:PublicationDate', 'pycsw:AccessConstraints', 'pycsw:Source', 'pycsw:Abstract']:
val = util.getqattr(result, context.md_core_model['mappings'][qval])
if val:
etree.SubElement(node, util.nspath_eval(XPATH_MAPPINGS[qval], NAMESPACES)).text = val
# bbox extent
val = util.getqattr(result, context.md_core_model['mappings']['pycsw:BoundingBox'])
bboxel = write_extent(val, context.namespaces)
if bboxel is not None:
node.append(bboxel)
return node
def write_extent(bbox, nsmap):
''' Generate BBOX extent '''
if bbox is not None:
try:
bbox2 = util.wkt2geom(bbox)
except:
return None
where = etree.Element(util.nspath_eval('georss:where', NAMESPACES))
envelope = etree.SubElement(where, util.nspath_eval('gml:Envelope', nsmap), srsName='http://www.opengis.net/def/crs/EPSG/0/4326')
etree.SubElement(envelope, util.nspath_eval('gml:lowerCorner', nsmap)).text = '%s %s' % (bbox2[1], bbox2[0])
etree.SubElement(envelope, util.nspath_eval('gml:upperCorner', nsmap)).text = '%s %s' % (bbox2[3], bbox2[2])
return where
return None
|
the-stack_0_27633
|
import re
class Symbol(object):
def __init__(self, element):
self.value = element
def __hash__(self):
return hash(self.value)
class InvalidPatternElement(Exception):
pass
class SymbolEncoder(object):
def __init__(self):
self._symbol_hashes = []
self._hash_to_hash_index = {}
def encode(self, symbol):
hash_index = self._hash_to_hash_index.get(hash(symbol), None)
if hash_index is None:
self._hash_to_hash_index[hash(symbol)] = len(self._symbol_hashes)
self._symbol_hashes.append(hash(symbol))
return chr(self._hash_to_hash_index[hash(symbol)])
class Match(object):
def __init__(self, match, start, end):
self.match = match
self.span = (start, end)
def __repr__(self):
return "<pygregex.Match object; span={0}, match='{1}'>".format(self.span, self.match)
def _encode_pattern(pattern, encoder):
encoded_pattern = []
for element_idx, element in enumerate(pattern):
if isinstance(element, Symbol):
encoded_pattern.append(encoder.encode(element))
elif isinstance(element, str):
if len(element) > 1:
raise InvalidPatternElement('Pattern element at index {0} is a string longer than one character. Maybe you meant it to be an instance of Symbol?'.format(element_idx))
else:
encoded_pattern.append(element)
else:
raise InvalidPatternElement('Pattern element at index {0} is not an instance of Symbol.'.format(element_idx))
return ''.join(encoded_pattern)
def _encode_sequence(sequence, encoder):
encoded_sequence = []
for element in sequence:
encoded_sequence.append(encoder.encode(element))
return ''.join(encoded_sequence)
def search(pattern, sequence):
encoder = SymbolEncoder()
encoded_pattern = _encode_pattern(pattern, encoder)
encoded_sequence = _encode_sequence(sequence, encoder)
for match in re.finditer(encoded_pattern, encoded_sequence):
yield Match(match=sequence[match.start(): match.end()], start=match.start(), end=match.end())
print([match for match in search(
pattern=[Symbol('aa'), '.', '*', Symbol('aa')],
sequence=['aa', 'bb', 'cc', 'aa']
)])
|
the-stack_0_27635
|
import csv
from rdflib import Graph, Literal, Namespace, URIRef
from rdflib.namespace import DCTERMS, RDF, RDFS, SKOS, XSD
input_file = csv.DictReader(open("test_sheet.csv"))
# make a graph
output_graph = Graph()
for row in input_file:
# convert it from an OrderedDict to a regular dict
row = dict(row)
#{'Subject Label': 'Pearl Wilmer Booker', 'Subject URI': 'None', 'Predicate Label': 'Daughter Of', 'Predicate URI': '', 'Predicate Symmetry': 'Asymmetric', 'Object Label': 'Mary Booker', 'Object URI': 'None'}
# make a literal and add it
output_graph.add( (URIRef(row['Subject URI']), RDFS.label, Literal(row['Subject Label'], lang='en')) )
# make a triple with the object as uri
output_graph.add( (URIRef(row['Subject URI']), URIRef(row['Predicate URI']), URIRef(row['Object URI'])) )
output_graph.serialize(destination='my_graph.nt', format='nt')
|
the-stack_0_27636
|
import taichi as ti
from tests import test_utils
@test_utils.test(require=ti.extension.sparse)
def test_pointer():
x = ti.field(ti.f32)
s = ti.field(ti.i32)
n = 128
ti.root.pointer(ti.i, n).dense(ti.i, n).place(x)
ti.root.place(s)
@ti.kernel
def activate():
for i in range(n):
x[i * n] = 0
@ti.kernel
def func():
for i in x:
s[None] += 1
activate()
func()
assert s[None] == n * n
@test_utils.test(require=ti.extension.sparse)
def test_pointer2():
x = ti.field(ti.f32)
s = ti.field(ti.i32)
n = 128
ti.root.pointer(ti.i, n).dense(ti.i, n).place(x)
ti.root.place(s)
@ti.kernel
def activate():
for i in range(n * n):
x[i] = i
@ti.kernel
def func():
for i in x:
s[None] += i
activate()
func()
N = n * n
assert s[None] == N * (N - 1) / 2
@test_utils.test(require=ti.extension.sparse)
def test_nested_struct_fill_and_clear():
a = ti.field(dtype=ti.f32)
N = 512
ti.root.pointer(ti.ij, [N, N]).dense(ti.ij, [8, 8]).place(a)
@ti.kernel
def fill():
for i, j in ti.ndrange(N * 8, N * 8):
a[i, j] = 2.0
@ti.kernel
def clear():
for i, j in a.parent():
ti.deactivate(a.parent().parent(), [i, j])
def task():
fill()
clear()
for i in range(10):
task()
ti.sync()
|
the-stack_0_27638
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
"""
This file contains primitives for multi-gpu communication.
This is useful when doing distributed training.
"""
import os
import pickle
import tempfile
import time
import torch
from torch import distributed as dist
def get_world_size():
if not dist.is_initialized():
return 1
return dist.get_world_size()
def get_rank():
if not dist.is_initialized():
return 0
return dist.get_rank()
def is_main_process():
if not dist.is_initialized():
return True
return dist.get_rank() == 0
def synchronize():
"""
Helper function to synchronize between multiple processes when
using distributed training
"""
if not dist.is_initialized():
return
world_size = dist.get_world_size()
rank = dist.get_rank()
if world_size == 1:
return
def _send_and_wait(r):
if rank == r:
tensor = torch.tensor(0, device="cuda")
else:
tensor = torch.tensor(1, device="cuda")
dist.broadcast(tensor, r)
while tensor.item() == 1:
time.sleep(1)
_send_and_wait(0)
# now sync on the main process
_send_and_wait(1)
def _encode(encoded_data, data):
# gets a byte representation for the data
encoded_bytes = pickle.dumps(data)
# convert this byte string into a byte tensor
storage = torch.ByteStorage.from_buffer(encoded_bytes)
tensor = torch.ByteTensor(storage).to("cuda")
# encoding: first byte is the size and then rest is the data
s = tensor.numel()
assert s <= 255, "Can't encode data greater than 255 bytes"
# put the encoded data in encoded_data
encoded_data[0] = s
encoded_data[1:(s + 1)] = tensor
def _decode(encoded_data):
size = encoded_data[0]
encoded_tensor = encoded_data[1:(size + 1)].to("cpu")
return pickle.loads(bytearray(encoded_tensor.tolist()))
# TODO try to use tensor in shared-memory instead of serializing to disk
# this involves getting the all_gather to work
def scatter_gather(data):
"""
This function gathers data from multiple processes, and returns them
in a list, as they were obtained from each process.
This function is useful for retrieving data from multiple processes,
when launching the code with torch.distributed.launch
Note: this function is slow and should not be used in tight loops, i.e.,
do not use it in the training loop.
Arguments:
data: the object to be gathered from multiple processes.
It must be serializable
Returns:
result (list): a list with as many elements as there are processes,
where each element i in the list corresponds to the data that was
gathered from the process of rank i.
"""
# strategy: the main process creates a temporary directory, and communicates
# the location of the temporary directory to all other processes.
# each process will then serialize the data to the folder defined by
# the main process, and then the main process reads all of the serialized
# files and returns them in a list
if not dist.is_initialized():
return [data]
synchronize()
# get rank of the current process
rank = dist.get_rank()
# the data to communicate should be small
data_to_communicate = torch.empty(256, dtype=torch.uint8, device="cuda")
if rank == 0:
# manually creates a temporary directory, that needs to be cleaned
# afterwards
tmp_dir = tempfile.mkdtemp()
_encode(data_to_communicate, tmp_dir)
synchronize()
# the main process (rank=0) communicates the data to all processes
dist.broadcast(data_to_communicate, 0)
# get the data that was communicated
tmp_dir = _decode(data_to_communicate)
# each process serializes to a different file
file_template = "file{}.pth"
tmp_file = os.path.join(tmp_dir, file_template.format(rank))
torch.save(data, tmp_file)
# synchronize before loading the data
synchronize()
# only the master process returns the data
if rank == 0:
data_list = []
world_size = dist.get_world_size()
for r in range(world_size):
file_path = os.path.join(tmp_dir, file_template.format(r))
d = torch.load(file_path)
data_list.append(d)
# cleanup
os.remove(file_path)
# cleanup
os.rmdir(tmp_dir)
return data_list
|
the-stack_0_27641
|
# Copyright 2015 Sanghack Lee
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import shlee.RCDLight
import shlee.RCDLight
from causality.citest.CITest import Oracle
from causality.dseparation.AbstractGroundGraph import AbstractGroundGraph
from causality.learning import ModelEvaluation
from causality.learning.RCD import RCD, SchemaDependencyWrapper
from causality.model.RelationalDependency import RelationalVariable
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.ERROR)
# Parameters
schema, model = shlee.RCDLight.incompleteness_example()
logger.info('Model: %s', model.dependencies)
hopThreshold = max(len(d.relVar1.path) + 1 for d in model.dependencies)
oracle = Oracle(model, 3 * hopThreshold)
rcd = RCD(schema, oracle, hopThreshold, depth=2)
rcd.identifyUndirectedDependencies()
rcd.orientDependencies()
print('Skeleton precision:', ModelEvaluation.skeletonPrecision(model, rcd.undirectedDependencies))
print('Skeleton recall:', ModelEvaluation.skeletonRecall(model, rcd.undirectedDependencies))
precision = ModelEvaluation.orientedPrecision(model, rcd.orientedDependencies)
print('Oriented precision:', precision)
print('Oriented recall:', ModelEvaluation.orientedRecall(model, rcd.orientedDependencies))
rcdl = shlee.RCDLight.RCDLight(schema, oracle, hopThreshold)
rcdl.identifyUndirectedDependencies()
rcdl.orientDependencies()
print('Skeleton precision:', ModelEvaluation.skeletonPrecision(model, rcdl.undirectedDependencies))
print('Skeleton recall:', ModelEvaluation.skeletonRecall(model, rcdl.undirectedDependencies))
precision = ModelEvaluation.orientedPrecision(model, rcdl.orientedDependencies)
print('Oriented precision:', precision)
print('Oriented recall:', ModelEvaluation.orientedRecall(model, rcdl.orientedDependencies))
assert ModelEvaluation.orientedRecall(model, rcdl.orientedDependencies) == \
ModelEvaluation.orientedRecall(model, rcd.orientedDependencies) == \
0.0
# Demonstrate that there is no 'unshielded triple' in AGGs for the counter-example.
schema, model = shlee.RCDLight.incompleteness_example()
hopThreshold = max(len(d.relVar1.path) + 1 for d in model.dependencies)
oracle = Oracle(model, 3 * hopThreshold)
schemaDepWrapper = SchemaDependencyWrapper(schema, model.dependencies)
perspectives = [si.name for si in schema.getSchemaItems()]
perspectiveToAgg = {perspective: AbstractGroundGraph(schemaDepWrapper, perspective, 3 * hopThreshold)
for perspective in perspectives}
for agg in perspectiveToAgg.values():
for node1 in agg.nodes():
neighbors1 = set(agg.predecessors(node1) + agg.successors(node1))
for node2 in neighbors1:
neighbors2 = set(agg.predecessors(node2) + agg.successors(node2)) - {node1}
for node3 in neighbors2:
if node3 not in neighbors1:
if not isinstance(node1, RelationalVariable) or not isinstance(node2, RelationalVariable) or \
not isinstance(node3, RelationalVariable):
continue
print(node1, node2, node3)
assert False
# There is no 'unshielded triple' in AGGs
|
the-stack_0_27642
|
import numpy as np
import gc
import unittest
import geosoft
import geosoft.gxapi as gxapi
import geosoft.gxpy.gx as gx
import geosoft.gxpy.va as gxva
import geosoft.gxpy.vv as gxvv
import geosoft.gxpy.gdb as gxdb
class Test(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.gx = gx.GXpy(log=print)
def start(self):
self._func = self.id().split('.')[-1]
gx.gx().log('\n' + self._func)
def test_vv(self):
self.start()
max = gxapi.iMAX // 16
npdata = np.empty(max)
with gxvv.GXvv(npdata) as vv:
self.assertTrue(vv.length, max)
del npdata
gc.collect()
npdata = np.empty(gxapi.iMAX + 1)
self.assertRaises(gxvv.VVException, gxvv.GXvv, npdata)
del npdata
gc.collect()
def test_va(self):
self.start()
max = gxapi.iMAX // 16
print('max', max)
npdata = np.empty(max * 2).reshape((max, 2))
with gxva.GXva(npdata) as va:
self.assertTrue(va.length, gxapi.iMAX)
del npdata
gc.collect()
npdata = np.empty((gxapi.iMAX + 1) * 2).reshape(((gxapi.iMAX + 1), 2))
self.assertRaises(gxva.VAException, gxva.GXva, npdata)
del npdata
gc.collect()
def test_gdb(self):
self.start()
name = None
pagesize = 4096
try:
max_index = 65534 * pagesize // 8
print('maximum index', max_index)
with gxdb.Geosoft_gdb.new('new', overwrite=True, comp=gxdb.COMP_NONE, page_size=pagesize) as gdb:
name = gdb.file_name
line = gdb.new_line('test')
npd = np.zeros(max_index)
npd_size = np.size(npd)
gdb.write_line(line, npd, ['xx'])
del npd
npd2, ch, fid = gdb.read_line(line)
self.assertEqual(len(ch), 1)
self.assertEqual(np.size(npd2), npd_size)
del npd2
finally:
gxdb.delete_files(name)
##############################################################################################
if __name__ == '__main__':
unittest.main()
|
the-stack_0_27643
|
# Copyright 2021 DAI Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at: http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from ethtx import EthTx, EthTxConfig
from flask import Flask
from werkzeug.middleware.dispatcher import DispatcherMiddleware
from ethtx_ce import frontend, api
app = Flask(__name__)
ethtx_config = EthTxConfig(
mongo_connection_string=os.getenv("MONGO_CONNECTION_STRING"),
mongo_database=os.getenv("MONGODB_DB"),
etherscan_api_key=os.getenv("ETHERSCAN_KEY"),
web3nodes={
"mainnet": dict(hook=os.getenv("MAINNET_NODE_URL", ""), poa=False),
"goerli": dict(hook=os.getenv("GOERLI_NODE_URL", ""), poa=True),
"rinkeby": dict(hook=os.getenv("RINKEBY_NODE_URL", ""), poa=True),
},
default_chain="mainnet",
etherscan_urls={
"mainnet": "https://api.etherscan.io/api",
"goerli": "https://api-goerli.etherscan.io/api",
"rinkeby": "https://api-rinkeby.etherscan.io/api",
},
)
ethtx = EthTx.initialize(ethtx_config)
app.wsgi_app = DispatcherMiddleware(
frontend.create_app(engine=ethtx, settings_override=EthTxConfig),
{"/api": api.create_app(engine=ethtx, settings_override=EthTxConfig)},
)
if __name__ == "__main__":
app.run()
|
the-stack_0_27644
|
#!/usr/bin/env python
from sebastian.midi.write_midi import SMF
from sebastian.core import OSequence, DURATION_64
from sebastian.core.notes import Key, major_scale
from sebastian.core.transforms import degree_in_key_with_octave, midi_pitch, transpose
# Hanon 1
up_degrees = [1, 3, 4, 5, 6, 5, 4, 3]
down_degrees = [6, 4, 3, 2, 1, 2, 3, 4]
final_degree = [1]
sections = [
(up_degrees, 4, range(14)),
(down_degrees, 4, range(13, -2, -1)),
(final_degree, 32, range(1)),
]
hanon_1 = OSequence()
for section in sections:
pattern, duration_64, offset = section
for o in offset:
for note in pattern:
hanon_1.append({"degree": note + o, DURATION_64: duration_64})
hanon_1 = hanon_1 | degree_in_key_with_octave(Key("C", major_scale), 4) | midi_pitch()
hanon_rh_1 = hanon_1
hanon_lh_1 = hanon_1 | transpose(-12)
seq = hanon_lh_1 // hanon_rh_1
if __name__ == "__main__":
f = open("hanon.mid", "w")
s = SMF([seq])
s.write(f)
f.close()
|
the-stack_0_27645
|
#!/usr/bin/env python3
from rdt import Rdt
class Server:
def __init__(self, address: str, port: int):
self.__port = port
self.__conn = Rdt.create_server_connection(address, port)
def run(self):
print(f"Server is listening on port: {self.__port}")
while True:
data, client_address = self.__conn.recv()
print(f"Msg received from: {client_address}: {data}")
if __name__ == "__main__":
server = Server("localhost", 12000)
server.run()
|
the-stack_0_27646
|
# Copyright (c) 2018 Pablo Moreno-Munoz
# Universidad Carlos III de Madrid and University of Sheffield
import sys
import numpy as np
from GPy.likelihoods import link_functions
from GPy.likelihoods import Likelihood
from GPy.util.misc import safe_exp, safe_square
from scipy.misc import logsumexp
class Exponential(Likelihood):
"""
Exponential likelihood with a latent function over its parameter
"""
def __init__(self, gp_link=None):
if gp_link is None:
gp_link = link_functions.Identity()
super(Exponential, self).__init__(gp_link, name='Exponential')
def pdf(self, f, y, Y_metadata=None):
b = safe_exp(-f)
pdf = safe_exp(-y/b) / b
return pdf
def logpdf(self, f, y, Y_metadata=None):
b = safe_exp(-f) # a veces se va a zero -> np.clip
b = np.clip(b, 1e-9, 1e9) # numerical stability
logpdf = - np.log(b) - (y/b)
return logpdf
def mean(self, f, Y_metadata=None):
b = safe_exp(-f)
b = np.clip(b, 1e-9, 1e9) # numerical stability
mean = b
return mean
def mean_sq(self, f, Y_metadata=None):
b = safe_exp(-f)
b = np.clip(b, 1e-9, 1e9) # numerical stability
mean_sq = safe_square(b)
return mean_sq
def variance(self, f, Y_metadata=None):
b = safe_exp(-f)
b = np.clip(b, 1e-9, 1e9) # numerical stability
var = safe_square(b)
return var
def samples(self, f, num_samples, Y_metadata=None):
b = safe_exp(-f)
b = np.clip(b, 1e-9, 1e9) # numerical stability
samples = np.random.exponential(scale=b)
return samples
def dlogp_df(self, f, y, Y_metadata=None):
b = safe_exp(-f)
b = np.clip(b, 1e-9, 1e9) # numerical stability
dlogp = 1 - (y/b)
return dlogp
def d2logp_df2(self, f, y, Y_metadata=None):
b = safe_exp(-f)
b = np.clip(b, 1e-9, 1e9) # numerical stability
d2logp = - y/b
return d2logp
def var_exp(self, Y, m, v, gh_points=None, Y_metadata=None):
# Variational Expectation
# gh: Gaussian-Hermite quadrature
if gh_points is None:
gh_f, gh_w = self._gh_points()
else:
gh_f, gh_w = gh_points
gh_w = gh_w / np.sqrt(np.pi)
m, v, Y = m.flatten(), v.flatten(), Y.flatten()
f = gh_f[None, :] * np.sqrt(2. * v[:, None]) + m[:, None]
logp = self.logpdf(f, np.tile(Y[:, None], (1, f.shape[1])))
var_exp = logp.dot(gh_w[:, None])
return var_exp
def var_exp_derivatives(self, Y, m, v, gh_points=None, Y_metadata=None):
# Variational Expectations of derivatives
# gh: Gaussian-Hermite quadrature
if gh_points is None:
gh_f, gh_w = self._gh_points()
else:
gh_f, gh_w = gh_points
gh_w = gh_w / np.sqrt(np.pi)
m, v, Y = m.flatten(), v.flatten(), Y.flatten()
f = gh_f[None, :] * np.sqrt(2. * v[:, None]) + m[:, None]
dlogp_df = self.dlogp_df(f, np.tile(Y[:, None], (1, f.shape[1])))
d2logp_df2 = self.d2logp_df2(f, np.tile(Y[:, None], (1, f.shape[1])))
var_exp_dm = dlogp_df.dot(gh_w[:, None])
var_exp_dv = 0.5 * d2logp_df2.dot(gh_w[:, None])
return var_exp_dm, var_exp_dv
def predictive(self, m, v, gh_points=None, Y_metadata=None):
# Variational Expectation
# gh: Gaussian-Hermite quadrature
if gh_points is None:
gh_f, gh_w = self._gh_points()
else:
gh_f, gh_w = gh_points
gh_w = gh_w / np.sqrt(np.pi)
m, v = m.flatten(), v.flatten()
f = gh_f[None, :] * np.sqrt(2. * v[:, None]) + m[:, None]
mean = self.mean(f)
var = self.variance(f).dot(gh_w[:, None]) + self.mean_sq(f).dot(gh_w[:, None]) - np.square(
mean.dot(gh_w[:, None]))
mean_pred = mean.dot(gh_w[:, None])
var_pred = var
return mean_pred, var_pred
def log_predictive(self, Ytest, mu_F_star, v_F_star, num_samples):
Ntest, D = mu_F_star.shape
F_samples = np.empty((Ntest, num_samples, D))
# function samples:
for d in range(D):
mu_fd_star = mu_F_star[:, d][:, None]
var_fd_star = v_F_star[:, d][:, None]
F_samples[:,:,d] = np.random.normal(mu_fd_star, np.sqrt(var_fd_star), size=(Ntest, num_samples))
# monte-carlo:
log_pred = -np.log(num_samples) + logsumexp(self.logpdf(F_samples[:,:,0], Ytest), axis=-1)
log_pred = np.array(log_pred).reshape(*Ytest.shape)
log_predictive = (1/num_samples)*log_pred.sum()
return log_predictive
def get_metadata(self):
dim_y = 1
dim_f = 1
dim_p = 1
return dim_y, dim_f, dim_p
def ismulti(self):
# Returns if the distribution is multivariate
return False
|
the-stack_0_27647
|
import numpy as np
import torch
import glob
import numpy as np
np.random.seed(1001)
import torch
from torch_geometric.data import Data, Dataset
from sklearn.datasets import make_blobs
class FakeDataset(Dataset):
"""
Random number dataset to test with.
Generates numbers on the fly, but also caches them so .get(i) will return
something consistent
"""
def __init__(self, n_events=100):
super(FakeDataset, self).__init__('nofile')
self.cache = {}
self.n_events = n_events
def get(self, i):
if i >= self.n_events: raise IndexError
if i not in self.cache:
n_hits = np.random.randint(10, 100)
n_clusters = min(np.random.randint(1, 6), n_hits)
x = np.random.rand(n_hits, 5)
y = (np.random.rand(n_hits) * n_clusters).astype(np.int8)
# Also make a cluster 'truth': energy, boundary_x, boundary_y, pid (4)
y_cluster = np.random.rand(n_clusters, 4)
# pid (last column) should be an integer; do 3 particle classes now
y_cluster[:,-1] = np.floor(y_cluster[:,-1] * 3)
self.cache[i] = Data(
x = torch.from_numpy(x).type(torch.float),
y = torch.from_numpy(y),
truth_cluster_props = torch.from_numpy(y_cluster)
)
return self.cache[i]
def __len__(self):
return self.n_events
def len(self):
return self.n_events
class BlobsDataset(Dataset):
"""
Dataset around sklearn.datasets.make_blobs
"""
def __init__(self, n_events=100, seed_offset=0):
super(BlobsDataset, self).__init__('nofile')
self.cache = {}
self.n_events = n_events
self.cluster_space_dim = 2
self.seed_offset = seed_offset
def get(self, i):
if i >= self.n_events: raise IndexError
if i not in self.cache:
n_hits = np.random.randint(50, 70)
n_clusters = min(np.random.randint(2, 4), n_hits)
n_bkg = np.random.randint(10, 20)
# Generate the 'signal'
X, y = make_blobs(
n_samples=n_hits,
centers=n_clusters, n_features=self.cluster_space_dim,
random_state=i+self.seed_offset
)
y += 1 # To reserve index 0 for background
# Add background
cluster_space_min = np.min(X, axis=0)
cluster_space_max = np.max(X, axis=0)
cluster_space_width = cluster_space_max - cluster_space_min
X_bkg = cluster_space_min + np.random.rand(n_bkg, self.cluster_space_dim)*cluster_space_width
y_bkg = np.zeros(n_bkg)
X = np.concatenate((X,X_bkg))
y = np.concatenate((y,y_bkg))
# Calculate geom centers
truth_cluster_props = np.zeros((n_hits+n_bkg,2))
for i in range(1,n_clusters+1):
truth_cluster_props[y==i] = np.mean(X[y==i], axis=0)
# shuffle
order = np.random.permutation(n_hits+n_bkg)
X = X[order]
y = y[order]
truth_cluster_props = truth_cluster_props[order]
self.cache[i] = Data(
x = torch.from_numpy(X).float(),
y = torch.from_numpy(y).long(),
truth_cluster_props = torch.from_numpy(truth_cluster_props).float()
)
return self.cache[i]
def __len__(self):
return self.n_events
def len(self):
return self.n_events
class TauDataset(Dataset):
"""Tau dataset.
Features in x:
0 recHitEnergy,
1 recHitEta,
2 zeroFeature, #indicator if it is track or not
3 recHitTheta,
4 recHitR,
5 recHitX,
6 recHitY,
7 recHitZ,
8 recHitTime
(https://github.com/cms-pepr/HGCalML/blob/master/modules/datastructures/TrainData_NanoML.py#L211-L221)
Args:
flip (bool): If True, flips the negative endcap z-values to positive
reduce_noise (float): Randomly delete a fraction of noise. Useful
to speed up training.
"""
def __init__(self, path, flip=True, reduce_noise: float=None):
super(TauDataset, self).__init__(path)
self.npzs = list(sorted(glob.iglob(path + '/*.npz')))
self.flip = flip
self.reduce_noise = reduce_noise
self.noise_index = -1
self.noise_mask_cache = {}
def blacklist(self, npzs):
"""
Remove a list of npzs from the dataset
Useful to remove bad events
"""
for npz in npzs: self.npzs.remove(npz)
def get(self, i):
d = np.load(self.npzs[i])
x = d['recHitFeatures']
y = d['recHitTruthClusterIdx'].squeeze()
if self.flip and np.mean(x[:,7]) < 0:
# Negative endcap: Flip z-dependent coordinates
x[:,1] *= -1 # eta
x[:,7] *= -1 # z
if self.reduce_noise:
# Throw away a fraction of noise
# Have to be careful to throw away to same noise upon
# future calls of this function.
mask = self.noise_mask_cache.setdefault(i, mask_fraction_of_noise(y, self.reduce_noise, self.noise_index))
x = x[mask]
y = y[mask]
cluster_index = incremental_cluster_index_np(y.squeeze(), noise_index=self.noise_index)
if np.all(cluster_index == 0): print('WARNING: No objects in', self.npzs[i])
truth_cluster_props = np.hstack((
d['recHitTruthEnergy'],
d['recHitTruthPosition'],
d['recHitTruthTime'],
d['recHitTruthID'],
))
if self.reduce_noise: truth_cluster_props = truth_cluster_props[mask]
assert truth_cluster_props.shape == (x.shape[0], 5)
order = cluster_index.argsort()
return Data(
x = torch.from_numpy(x[order]).type(torch.float),
y = torch.from_numpy(cluster_index[order]).type(torch.int),
truth_cluster_props = torch.from_numpy(truth_cluster_props[order]).type(torch.float),
inpz = torch.Tensor([i])
)
def __len__(self):
return len(self.npzs)
def len(self):
return len(self.npzs)
def split(self, fraction):
"""
Creates two new instances of TauDataset with a fraction of events split
"""
left = self.__class__(self.root, self.flip, self.reduce_noise)
right = self.__class__(self.root, self.flip, self.reduce_noise)
split_index = int(fraction*len(self))
left.npzs = self.npzs[:split_index]
right.npzs = self.npzs[split_index:]
return left, right
def incremental_cluster_index(input: torch.Tensor, noise_index=None):
"""
Build a map that translates arbitrary indices to ordered starting from zero
By default the first unique index will be 0 in the output, the next 1, etc.
E.g. [13 -1 -1 13 -1 13 13 42 -1 -1] -> [0 1 1 0 1 0 0 2 1 1]
If noise_index is not None, the output will be 0 where input==noise_index:
E.g. noise_index=-1, [13 -1 -1 13 -1 13 13 42 -1 -1] -> [1 0 0 1 0 1 1 2 0 0]
If noise_index is not None but the input does not contain noise_index, 0
will still be reserved for it:
E.g. noise_index=-1, [13 4 4 13 4 13 13 42 4 4] -> [1 2 2 1 2 1 1 3 2 2]
"""
unique_indices, locations = torch.unique(input, return_inverse=True, sorted=True)
cluster_index_map = torch.arange(unique_indices.size(0))
if noise_index is not None:
if noise_index in unique_indices:
# Sort so that 0 aligns with the noise_index
cluster_index_map = cluster_index_map[(unique_indices != noise_index).argsort()]
else:
# Still reserve 0 for noise, even if it's not present
cluster_index_map += 1
return torch.gather(cluster_index_map, 0, locations).long()
def incremental_cluster_index_np(input: np.array, noise_index=None):
"""
Reimplementation of incremental_cluster_index for numpy arrays
"""
unique_indices, locations = np.unique(input, return_inverse=True)
cluster_index_map = np.arange(unique_indices.shape[0])
if noise_index is not None:
if noise_index in unique_indices:
# Sort so that 0 aligns with the noise_index
cluster_index_map = cluster_index_map[(unique_indices != noise_index).argsort()]
else:
# Still reserve 0 for noise, even if it's not present
cluster_index_map += 1
return np.take(cluster_index_map, locations)
def mask_fraction_of_noise(y: np.array, reduce_fraction: float, noise_index: int=-1) -> np.array:
"""Create a mask that throws out a fraction of noise (but keeps all signal)."""
is_noise = y == noise_index
n_noise = is_noise.sum()
n_target_noise = (1.-reduce_fraction) * n_noise
noise_mask = np.random.permutation(n_noise) < n_target_noise
mask = np.ones(y.shape[0], dtype=bool)
mask[is_noise] = noise_mask
return mask
|
the-stack_0_27650
|
# coding=utf-8
# Copyright 2014-2017 F5 Networks Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import glob
import json
import os
import types
from oslo_log import log as logging
from f5_openstack_agent.lbaasv2.drivers.bigip import exceptions as f5_ex
from f5_openstack_agent.lbaasv2.drivers.bigip.resource_helper import \
BigIPResourceHelper
from f5_openstack_agent.lbaasv2.drivers.bigip.resource_helper import \
ResourceType
LOG = logging.getLogger(__name__)
class EsdJSONValidation(object):
"""Class reads the json file(s)
It checks and parses the content of json file(s) to a dictionary
"""
def __init__(self, esddir):
self.esdJSONFileList = glob.glob(os.path.join(esddir, '*.json'))
self.esdJSONDict = {}
def read_json(self):
for fileList in self.esdJSONFileList:
try:
with open(fileList) as json_file:
# Reading each file to a dictionary
fileJSONDict = json.load(json_file)
# Combine all dictionaries to one
self.esdJSONDict.update(fileJSONDict)
except ValueError as err:
LOG.error('ESD JSON File is invalid: %s', err)
raise f5_ex.esdJSONFileInvalidException()
return self.esdJSONDict
class EsdTagProcessor(EsdJSONValidation):
"""Class processes json dictionary
It checks compares the tags from esdjson dictionary to list of valid tags
"""
def __init__(self, esddir):
super(EsdTagProcessor, self).__init__(esddir)
# this function will return intersection of known valid esd tags
# and the ones that user provided
def valid_tag_key_subset(self):
self.validtags = list(set(self.esdJSONDict.keys()) &
set(self.valid_esd_tags.keys()))
if not self.validtags:
LOG.error("Intersect of valid esd tags and user esd tags is empty")
if set(self.validtags) != set(self.esdJSONDict.keys()):
LOG.error("invalid tags in the user esd tags")
def process_esd(self, bigips):
try:
dict = self.read_json()
self.esd_dict = self.verify_esd_dict(bigips, dict)
except f5_ex.esdJSONFileInvalidException:
self.esd_dict = {}
raise
def get_esd(self, name):
return self.esd_dict.get(name, None)
def resource_exists(self, bigip, tag_name, resource_type):
helper = BigIPResourceHelper(resource_type)
name = tag_name
# allow user to define chain cert name with or without '.crt'
if resource_type == ResourceType.ssl_cert_file and not \
name.endswith('.crt'):
name += '.crt'
return helper.exists_in_collection(bigip, name)
def get_resource_type(self, bigip, resource_type, value):
if resource_type == ResourceType.persistence:
return self.get_persistence_type(bigip, value)
else:
return resource_type
def get_persistence_type(self, bigip, value):
resource_types = [
ResourceType.cookie_persistence,
ResourceType.dest_addr_persistence,
ResourceType.source_addr_persistence,
ResourceType.hash_persistence,
ResourceType.msrdp_persistence,
ResourceType.sip_persistence,
ResourceType.ssl_persistence,
ResourceType.universal_persistence]
for resource_type in resource_types:
if self.resource_exists(bigip, value, resource_type):
return resource_type
return None
def is_valid_tag(self, tag):
return self.valid_esd_tags.get(tag, None) is not None
def is_valid_value(self, bigip, value, resource_type):
return self.resource_exists(bigip, value, resource_type)
def is_valid_value_list(self, bigip, value, resource_type):
for v in value:
if not self.resource_exists(bigip, v, resource_type):
return False
return True
def verify_esd_dict(self, bigips, esd_dict):
valid_esd_dict = {}
for esd in esd_dict:
# check that ESD is valid for every BIG-IP
valid_esd = True
for bigip in bigips:
valid_esd = self.verify_esd(bigip, esd, esd_dict[esd])
if not valid_esd:
break
if valid_esd:
# add non-empty valid ESD to return dict
valid_esd_dict[esd] = valid_esd
return valid_esd_dict
def verify_esd(self, bigip, name, esd):
valid_esd = {}
for tag in esd:
try:
self.verify_tag(tag)
self.verify_value(bigip, tag, esd[tag])
# add tag to valid ESD
valid_esd[tag] = esd[tag]
LOG.debug("Tag {0} is valid for ESD {1}.".format(tag, name))
except f5_ex.esdJSONFileInvalidException as err:
LOG.info('Tag {0} failed validation for ESD {1} and was not '
'added to ESD. Error: {2}'.
format(tag, name, err.message))
return valid_esd
def verify_value(self, bigip, tag, value):
tag_def = self.valid_esd_tags.get(tag)
# verify resource type
resource_type = self.get_resource_type(
bigip, tag_def['resource_type'], value)
if not resource_type:
msg = 'Unable to determine resource type for tag {0} and ' \
'value {1}'.format(tag, value)
raise f5_ex.esdJSONFileInvalidException(msg)
# verify value type
value_type = tag_def['value_type']
if not isinstance(value, value_type):
msg = 'Invalid value {0} for tag {1}. ' \
'Type must be {2}.'.format(value, tag, value_type)
raise f5_ex.esdJSONFileInvalidException(msg)
# verify value exists on BIG-IP
if isinstance(value, list):
is_valid = self.is_valid_value_list(bigip, value, resource_type)
elif value=='':
# ESD Processing we will handle this as a special case and use this to toggle things like fastl4
is_valid = True
else:
is_valid = self.is_valid_value(bigip, value, resource_type)
if not is_valid:
msg = ("Invalid value {0} for tag {1}".format(value, tag))
raise f5_ex.esdJSONFileInvalidException(msg)
def verify_tag(self, tag):
if not self.is_valid_tag(tag):
msg = 'Tag {0} is not valid.'.format(tag)
raise f5_ex.esdJSONFileInvalidException(msg)
# this dictionary contains all the tags
# that are listed in the esd confluence page:
# https://docs.f5net.com/display/F5OPENSTACKPROJ/Enhanced+Service+Definition
# we are implementing the tags that can be applied only to listeners
valid_esd_tags = {
'lbaas_fastl4': {
'resource_type': ResourceType.fastl4_profile,
'value_type': types.StringTypes},
'lbaas_ctcp': {
'resource_type': ResourceType.tcp_profile,
'value_type': types.StringTypes},
'lbaas_stcp': {
'resource_type': ResourceType.tcp_profile,
'value_type': types.StringTypes},
'lbaas_http': {
'resource_type': ResourceType.http_profile,
'value_type': types.StringTypes},
'lbaas_one_connect': {
'resource_type': ResourceType.one_connect_profile,
'value_type': types.StringTypes},
'lbaas_http_compression': {
'resource_type': ResourceType.http_compression_profile,
'value_type': types.StringTypes},
'lbaas_cssl_profile': {
'resource_type': ResourceType.client_ssl_profile,
'value_type': types.StringTypes},
'lbaas_sssl_profile': {
'resource_type': ResourceType.server_ssl_profile,
'value_type': types.StringTypes},
'lbaas_irule': {
'resource_type': ResourceType.rule,
'value_type': types.ListType},
'lbaas_policy': {
'resource_type': ResourceType.l7policy,
'value_type': types.ListType},
'lbaas_persist': {
'resource_type': ResourceType.persistence,
'value_type': types.StringTypes},
'lbaas_fallback_persist': {
'resource_type': ResourceType.persistence,
'value_type': types.StringTypes}
}
|
the-stack_0_27651
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib
from sklearn.metrics import jaccard_score
from scipy.cluster.hierarchy import dendrogram, linkage
from scipy.spatial.distance import squareform
def plot_sgbars(result_df, size_results ,ylabel="target share", title="Discovered Subgroups", dynamic_widths=False, _suffix=""):
result_df = result_df[:size_results]
shares_sg = result_df["target_share_sg"]
shares_compl = result_df["target_share_complement"]
sg_relative_sizes = result_df["relative_size_sg"]
x = np.arange(len(result_df))
base_width = 0.8
if dynamic_widths:
width_sg = 0.02 + base_width * sg_relative_sizes
width_compl = base_width - width_sg
else:
width_sg = base_width / 2
width_compl = base_width / 2
fig, ax = plt.subplots()
rects1 = ax.bar(x, shares_sg, width_sg, align='edge')
rects2 = ax.bar(x + width_sg, shares_compl, width_compl, align='edge', color='#61b76f')
ax.set_ylabel(ylabel)
ax.set_title(title)
ax.set_xticks(x + base_width / 2)
ax.set_xticklabels(result_df.subgroup, rotation=90)
ax.legend((rects1[0], rects2[0]), ('subgroup', 'complement'))
fig.set_size_inches(len(result_df),5)
def plot_npspace(result_df, size_results, data, annotate=True, fixed_limits=False):
result_df = result_df[:size_results]
fig, ax = plt.subplots()
for i, sg in result_df.iterrows():
target_share_sg = sg['target_share_sg']
size_sg = sg['size_sg']
ax.plot(size_sg, target_share_sg, 'ro', color='b')
ax.set_ylabel('target share')
ax.set_xlabel('size of subgroup')
if annotate:
ax.annotate(str(i), (size_sg, target_share_sg + 0.005))
def jaccard (sg, sg2, data) :
return jaccard_score(sg.covers(data), sg2.covers(data))
def similarity_sgs(results_descriptions, result_size ,data, color=True):
results_descriptions = results_descriptions[:result_size]
sgs = [x[1] for x in results_descriptions]
dists = [[jaccard(sg, sg2, data) for sg2 in sgs] for sg in sgs]
dist_df = pd.DataFrame(dists)
if color:
dist_df = dist_df.style.background_gradient()
return dist_df
def similarity_dendrogram(result_descriptions, result_size, data, truncated = False, p = None):
fig, _ = plt.subplots(figsize=(18, 9))
dist_df = similarity_sgs(result_descriptions, result_size, data, color=False)
sgNames = [str(x[1]) for x in result_descriptions[:result_size]]
mat = 1 - dist_df.values
dists = squareform(mat)
linkage_matrix = linkage(dists, "single")
if truncated == True :
r = dendrogram(linkage_matrix, labels=sgNames, leaf_rotation=90, p = p, truncate_mode='lastp')
count = 0
l_count = []
for sg in r['ivl'] :
if '(' in sg : # number
print(sgNames[count])
l_count.append(count)
count = count + int(sg[sg.find('(') + 1 : sg.find(')')])
else :
print(sg)
l_count.append(count)
count = count + 1
jaccard_threshold = 1- min([j for i in r['dcoord'] for j in i[1:-1]])
print(jaccard_threshold)
return l_count
else :
dendrogram(linkage_matrix, labels=sgNames, leaf_rotation=90)
def greedy_jaccard(results_descriptions, result_size ,data, threshold) :
results_descriptions = results_descriptions[:result_size]
sgs = [x[1] for x in results_descriptions]
sgNames = [str(x[1]) for x in results_descriptions[:result_size]]
dists = pd.DataFrame([[jaccard(sg, sg2, data) for sg2 in sgs] for sg in sgs]).values
d = {}
d_names = {}
l = []
for i in range(dists.shape[0]) :
if i not in l :
d[i] = []
d_names[sgNames[i]] = []
for j in range(i+1,dists.shape[0]) :
if dists[i,j] >= threshold :
l.append(j)
d[i] = d.get(i,[]) + [j]
d_names[sgNames[i]] = d_names.get(sgNames[i],[]) + [sgNames[j]]
return(d,d_names,sgNames)
def plot_distribution_numeric(sg, data, bins, target):
fig, _ = plt.subplots(figsize=(4, 3))
target_values_sg = data[sg.covers(data)][target].values
target_values_data = data[target].values
plt.hist(target_values_sg, bins= 100, range = (np.amin(target_values_data),np.amax(target_values_data)) ,alpha=0.5, label="subgroup(1)", density=True)
plt.hist(target_values_data, bins=100,range = (np.amin(target_values_data),np.amax(target_values_data)) ,alpha=0.5, label="Overall Data", density=True)
plt.xlim(0, np.amax(target_values_sg))
plt.xlabel('time')
#plt.yscale('log')
plt.ticklabel_format(axis="x", style="sci", scilimits=(0,0))
plt.legend(loc='upper right')
def plot_distribution_numeric_sgs(sgs, result_size ,data, bins, target):
fig, _ = plt.subplots(figsize=(4, 2))
target_values_data = data[target].values
maxlim = 0
for i in range(result_size):
sg = sgs[i][1]
target_values_sg = data[sg.covers(data)][target].values
plt.hist(target_values_sg, bins= 20, range = (np.amin(target_values_data),np.amax(target_values_data)),linewidth=1.5,histtype=u'step' ,alpha=0.5, label="subgroup"+str(i+1), density=True)
maxlim = max(maxlim,np.amax(target_values_sg))
plt.hist(target_values_data, bins=20,range = (np.amin(target_values_data),np.amax(target_values_data)),linewidth=1.5 ,alpha=0.5, label="Overall Data", density=True)
plt.xlim(0, maxlim + 42000)
plt.xlabel('time')
#plt.yscale('log')
plt.ticklabel_format(axis="x", style="sci", scilimits=(0,0))
plt.legend(loc='upper right')
def compare_distributions_numeric(sgd_results, data, bins, target):
fig, _ = plt.subplots()
sgs = [x[1] for x in sgd_results]
for sg in sgs:
target_values_sg = data[sg.covers(data)][target].values
plt.hist(target_values_sg, bins, alpha=0.3, label=str(sg), density=True)
plt.legend(loc='upper right')
def plot_npspace_numeric (result_df, size_results, data, annotate=True, fixed_limits=False):
result_df = result_df[:size_results]
print('mean of the dataset :',result_df['mean_dataset'].unique()[0])
fig, ax = plt.subplots()
for i, sg in result_df.iterrows():
mean_sg = sg['mean_sg']
size_sg = sg['size_sg']
ax.plot(size_sg, mean_sg, 'ro', color='b')
ax.set_ylabel('mean_sg')
ax.set_xlabel('size of subgroup')
if annotate:
ax.annotate(str(i), (size_sg, mean_sg + 0.005))
|
the-stack_0_27652
|
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
def print_list(head: ListNode) -> None:
while head:
print(head.val, end=" -> ")
head = head.next
class Solution:
@staticmethod
def has_cycle(head: ListNode) -> bool:
slow, fast = head, head.next
while slow != fast:
if fast is None or fast.next is None:
return False
slow = slow.next
fast = fast.next.next
return True
three = ListNode(3)
# two = ListNode(2)
# zero = ListNode(0)
# neg_four = ListNode(-4)
# three.next = two
# two.next = zero
# zero.next = neg_four
# neg_four.next = two
solution = Solution()
print(solution.has_cycle(three))
|
the-stack_0_27653
|
# Copyright 2018 Neural Networks and Deep Learning lab, MIPT
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from pathlib import Path
from typing import List, Any, Dict, Iterable, Tuple
from deeppavlov.core.common.log import get_logger
from deeppavlov.core.data.dataset_reader import DatasetReader
from deeppavlov.core.common.registry import register
from deeppavlov.core.commands.utils import expand_path
from deeppavlov.core.data.utils import download_decompress, mark_done, is_done
logger = get_logger(__name__)
@register('amazon_ecommerce_reader')
class AmazonEcommerceReader(DatasetReader):
"""Class to download and load ecommerce data catalog"""
def read(self, data_path: str, catalog: list, **kwargs) -> Dict[str, List[Tuple[Any, Any]]]:
"""Load data from specific catalog
Parameters:
data_path: where the dataset is located
catalog: names of the specific subcategories
Returns:
dataset: loaded dataset
"""
logger.info(f"Ecommerce loader is loaded with catalog {catalog}")
if not isinstance(catalog, list):
catalog = [catalog]
ec_data_global: List[Any] = []
data_path = Path(expand_path(data_path))
if not is_done(data_path):
self._download_data(data_path)
if data_path.is_dir():
for fname in data_path.rglob("*.txt"):
if any(cat in fname.name for cat in catalog):
logger.info(f"File {fname.name} is loaded")
ec_data_global += self._load_amazon_ecommerce_file(fname)
dataset = {
'train': [((item['Title'], [], {}), item) for item in ec_data_global],
'valid': [],
'test': []
}
logger.info(f"In total {len(ec_data_global)} items are loaded")
return dataset
def _download_data(self, data_path: str) -> None:
"""Download dataset"""
url = "https://github.com/SamTube405/Amazon-E-commerce-Data-set/archive/master.zip"
download_decompress(url, data_path)
mark_done(data_path)
def _load_amazon_ecommerce_file(self, fname: str) -> List[Dict[Any, Any]]:
"""Parse dataset
Parameters:
fname: catalog file
Returns:
ec_data: parsed catalog data
"""
ec_data = []
item: Dict = {}
new_item_re = re.compile("ITEM *\d+")
with open(fname, 'r', encoding='utf-8', errors='ignore') as file:
for line in file:
if new_item_re.match(line):
if len(item.keys()) > 0:
if 'Title' in item and 'Feature' in item:
ec_data.append(item)
item = {'Item': int(line[5:]), 'Category': fname.name.split("_")[1]}
else:
row = line.strip().split("=")
if len(row) == 2:
if row[0] in item:
item[row[0]] += "." + row[1]
else:
item[row[0]] = row[1]
return ec_data
|
the-stack_0_27656
|
import fileinput
from functools import reduce
lines = list(l for l in fileinput.input())
def narrow(nums, i, keep, pref):
ones = [n[i] for n in nums].count('1')
zeros = [n[i] for n in nums].count('0')
if ones > zeros:
pref = keep
elif ones < zeros:
pref = keep ^ 1
return [n for n in nums if int(n[i]) == pref]
oxy, i = lines[:], 0
while len(oxy) > 1:
oxy = narrow(oxy, i, 1, 1)
i += 1
car, i = lines[:], 0
while len(car) > 1:
car = narrow(car, i, 0, 0)
i += 1
print(int(f'0b{oxy[0]}', 2) * int(f'0b{car[0]}', 2))
|
the-stack_0_27658
|
import torch.nn as nn
import torch.nn.functional as F
class SampleNet(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(3, 6, 5)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(6, 16, 5)
self.fc1 = nn.Linear(16 * 5 * 5, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
def forward(self, x):
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = x.view(-1, 16 * 5 * 5)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
|
the-stack_0_27659
|
#!/usr/bin/env python
import subprocess
import re
import shutil
import os
from os.path import exists, join, split, splitext, normpath, abspath
def ensuredir(path):
if not exists(path):
os.makedirs(path)
def _cmd(*args):
print(' '.join(args))
return subprocess.check_output(args)
def extract_dependent_dylibs(dylib_fpath, filter_regex=None):
'Extracts the dependent libraries of the input dylib'
out = _cmd('otool', '-L', dylib_fpath)
out = [line.strip() for line in out.split('\n')]
if filter_regex is not None:
out = filter(lambda line: re.search(filter_regex, line), out)
dylib_list = [line.split(' ')[0] for line in out]
return dylib_list
def append_suffix(fpath, suffix):
'appends sufix like /some/filename<suffix>.ext'
root, fname = split(fpath)
name, ext = splitext(fname)
new_fname = name + suffix + ext
new_fpath = join(root, new_fname)
return new_fpath
def get_localize_name_cmd(dylib_fpath, fpath_src):
fname = split(fpath_src)[1]
loader_dst = join('@loader_path', fname)
instname_cmd = ['install_name_tool', '-change', fpath_src, loader_dst, dylib_fpath]
return instname_cmd
def inspect_dylib(dylib_fpath):
print(_cmd('otool', '-L', dylib_fpath))
def make_distributable_dylib(dylib_fpath, filter_regex='/opt/local/lib/'):
'removes absolute paths from dylibs on mac using otool'
print('[otool] making distributable: %r' % dylib_fpath)
assert exists(dylib_fpath), 'does not exist dylib_fpath=%r' % dylib_fpath
loader_path = split(dylib_fpath)[0]
depends_list = extract_dependent_dylibs(dylib_fpath, filter_regex=filter_regex)
dependency_moved = False
# Build task list
copy_list = []
instname_list = []
for fpath_src in depends_list:
# Skip depenencies which are relative paths
# they have probably already been fixed
if not exists(fpath_src):
continue
fpath_dst = join(loader_path, split(fpath_src)[1])
# Only copy if the file doesnt already exist
if not exists(fpath_dst):
if re.search(filter_regex, fpath_src):
dependency_moved = True
copy_list.append((fpath_src, fpath_dst))
instname_list.append(get_localize_name_cmd(dylib_fpath, fpath_src))
# Change input name as well
instname_list.append(get_localize_name_cmd(dylib_fpath, dylib_fpath))
# Copy the dependencies to the dylib location
for (fpath_src, fpath_dst) in copy_list:
shutil.copy(fpath_src, fpath_dst)
# Change the dependencies in the dylib
for instname_cmd in instname_list:
_cmd(*instname_cmd)
return dependency_moved
def check_depends_dylib(dylib_fpath, filter_regex='/opt/local/lib/'):
print('[otool] checking dependencies: %r' % dylib_fpath)
assert exists(dylib_fpath), 'does not exist dylib_fpath=%r' % dylib_fpath
depends_list = extract_dependent_dylibs(dylib_fpath, filter_regex=filter_regex)
loader_path = split(dylib_fpath)[0]
exists_list = []
missing_list = []
missing_abs_list = []
for fpath in depends_list:
fixed_fpath = normpath(fpath.replace('@loader_path', loader_path))
absfpath = abspath(fixed_fpath)
if exists(absfpath):
exists_list.append(fpath)
else:
missing_list.append(fpath)
missing_abs_list.append(absfpath)
if len(exists_list) > 0:
print('Verified Dependencies: ')
print('\n'.join(exists_list))
print('----')
else:
print('Nothing exists')
if len(missing_list) > 0:
print('Missing Dependencies: ')
print('\n'.join(missing_list))
print('----')
print('Missing Dependencies: (absolute path)')
print('\n'.join(missing_abs_list))
print('----')
else:
print('Nothing missing')
if __name__ == '__main__':
#from os.path import expanduser
#dylib_fpath = expanduser('~/code/hotspotter/hstpl/extern_feat/libhesaff.dylib')
import sys
if len(sys.argv) == 3:
dylib_fpath = sys.argv[2]
if sys.argv[1] == 'make_distributable':
make_distributable_dylib(dylib_fpath, filter_regex='/opt/local/lib/')
elif sys.argv[1] == 'check_depends':
check_depends_dylib(dylib_fpath, filter_regex='')
else:
print('[otool] unknown command')
else:
print('[otool] not enough arguments')
print(sys.argv)
|
the-stack_0_27661
|
# Script to to create file structure based on lists #by SimpleShell
import os
from countries import * # Imports the list of folders from countries.py file
main_dir = [Africa, Asia, Australia, Europe, NorthAmerica,
SouthAmerica] # Loading the list of sub-directories
root_dir = 'Continents'
# Name of the sub-directories
main_dir_names = ['Africa', 'Asia', 'Australia', 'Europe',
'North America', 'South America']
def main():
# Create directory
for i in range(0, len(main_dir)):
for j in range(0, len(main_dir[i])):
global main_dir_names
dirName = str(root_dir) + '/' + \
str(main_dir_names[i]) + '/' + str(main_dir[i][j])
try:
# Create target Directory
os.makedirs(dirName)
print("Directory ", dirName, " Created ")
except FileExistsError:
print("Directory ", dirName, " already exists")
# Create target Directory if don't exist
if not os.path.exists(dirName):
os.makedirs(dirName)
print("Directory ", dirName, " Created ")
else:
print("Directory ", dirName, " already exists")
for filename in main_dir_names:
filename = str(root_dir) + \
'/' + str(main_dir_names[i]) + '/' + \
str(main_dir_names[i])+'.md'
p = open(filename, 'w')
p.write("Add " + str(main_dir_names[i]) +
"'s Land Sector Data Analysis")
p.close
for j in range(0, len(main_dir[i])):
filename1 = str(root_dir) + '/' + str(main_dir_names[i]) + \
'/' + str(main_dir[i][j]) + '/' + str(main_dir[i][j])+'.md'
f = open(filename1, 'w')
f.write("Add " + str(main_dir[i][j]) +
"'s Land Sector Data Analysis")
f.close
if __name__ == '__main__':
main()
|
the-stack_0_27662
|
from django.conf.urls import patterns, include, url
from django.contrib import admin
urlpatterns = patterns(
'',
url(r'^admin/', include(admin.site.urls)),
url('', include('goggles.warehouse.urls', namespace='warehouse')),
url('', include('social.apps.django_app.urls', namespace='social'))
)
|
the-stack_0_27664
|
import pathlib, sys
file_path = pathlib.Path(__file__).parent.absolute()
import numpy as np
import time
import matplotlib.pyplot as plt
from numpy import linalg as LA
from matplotlib import cm
import pressiodemoapps as pda
nx=80
ny=40
def makePlot(meshPath, yn):
fomCoords = np.loadtxt(meshPath+"/coordinates.dat", dtype=float)
x_fom, y_fom = fomCoords[:,1], fomCoords[:,2]
x_fom = np.reshape(x_fom, (ny,nx))
y_fom = np.reshape(y_fom, (ny,nx))
fig = plt.figure(1)
fomS = np.reshape(yn, (nx*ny, 4))
rho = fomS[:,0]
rho1 = np.reshape(rho, (ny,nx))
fig = plt.figure(1)
ax = plt.gca()
h = plt.contourf(x_fom, y_fom, rho1)
ax.set_aspect(aspect=1.)
plt.colorbar()
plt.show()
def test_run():
meshPath = str(file_path)
meshObj = pda.load_cellcentered_uniform_mesh(meshPath)
appObj = pda.create_problem(meshObj,
pda.Euler2d.NormalShock,
pda.InviscidFluxReconstruction.Weno3)
yn = appObj.initialCondition()
dt = 0.0005
Nsteps = int(0.1/dt)
pda.advanceSSP3(appObj, yn, dt, Nsteps, showProgress=True)
# makePlot(meshPath, yn)
fomS = np.reshape(yn, (nx*ny, 4))
rho = fomS[:,0]
goldD = np.loadtxt(str(file_path)+"/rho_gold.txt")
assert(np.allclose(rho.shape, goldD.shape))
assert(np.isnan(rho).all() == False)
assert(np.isnan(goldD).all() == False)
assert(np.allclose(rho, goldD,rtol=1e-10, atol=1e-12))
# ---------------------------
if __name__ == '__main__':
# ---------------------------
test_run()
|
the-stack_0_27666
|
#Modified from https://github.com/lernapparat/lernapparat/
import torch
import torch.nn as nn
import torch.nn.functional as F
from collections import OrderedDict
import pickle
import numpy as np
class MyLinear(nn.Module):
"""Linear layer with equalized learning rate and custom learning rate multiplier."""
def __init__(self, input_size, output_size, gain=2**(0.5), use_wscale=False, lrmul=1, bias=True):
super().__init__()
he_std = gain * input_size**(-0.5) # He init
# Equalized learning rate and custom learning rate multiplier.
if use_wscale:
init_std = 1.0 / lrmul
self.w_mul = he_std * lrmul
else:
init_std = he_std / lrmul
self.w_mul = lrmul
self.weight = torch.nn.Parameter(
torch.randn(output_size, input_size) * init_std)
if bias:
self.bias = torch.nn.Parameter(torch.zeros(output_size))
self.b_mul = lrmul
else:
self.bias = None
def forward(self, x):
bias = self.bias
if bias is not None:
bias = bias * self.b_mul
return F.linear(x, self.weight * self.w_mul, bias)
class MyConv2d(nn.Module):
"""Conv layer with equalized learning rate and custom learning rate multiplier."""
def __init__(self, input_channels, output_channels, kernel_size, gain=2**(0.5), use_wscale=False, lrmul=1, bias=True,
intermediate=None, upscale=False):
super().__init__()
if upscale:
self.upscale = Upscale2d()
else:
self.upscale = None
he_std = gain * (input_channels * kernel_size **
2) ** (-0.5) # He init
self.kernel_size = kernel_size
if use_wscale:
init_std = 1.0 / lrmul
self.w_mul = he_std * lrmul
else:
init_std = he_std / lrmul
self.w_mul = lrmul
self.weight = torch.nn.Parameter(torch.randn(
output_channels, input_channels, kernel_size, kernel_size) * init_std)
if bias:
self.bias = torch.nn.Parameter(torch.zeros(output_channels))
self.b_mul = lrmul
else:
self.bias = None
self.intermediate = intermediate
def forward(self, x):
bias = self.bias
if bias is not None:
bias = bias * self.b_mul
have_convolution = False
if self.upscale is not None and min(x.shape[2:]) * 2 >= 128:
# this is the fused upscale + conv from StyleGAN, sadly this seems incompatible with the non-fused way
# this really needs to be cleaned up and go into the conv...
w = self.weight * self.w_mul
w = w.permute(1, 0, 2, 3)
# probably applying a conv on w would be more efficient. also this quadruples the weight (average)?!
w = F.pad(w, (1, 1, 1, 1))
w = w[:, :, 1:, 1:] + w[:, :, :-1, 1:] + \
w[:, :, 1:, :-1] + w[:, :, :-1, :-1]
x = F.conv_transpose2d(
x, w, stride=2, padding=int((w.size(-1)-1)//2))
have_convolution = True
elif self.upscale is not None:
x = self.upscale(x)
if not have_convolution and self.intermediate is None:
return F.conv2d(x, self.weight * self.w_mul, bias, padding=int(self.kernel_size//2))
elif not have_convolution:
x = F.conv2d(x, self.weight * self.w_mul, None,
padding=int(self.kernel_size//2))
if self.intermediate is not None:
x = self.intermediate(x)
if bias is not None:
x = x + bias.view(1, -1, 1, 1)
return x
class NoiseLayer(nn.Module):
"""adds noise. noise is per pixel (constant over channels) with per-channel weight"""
def __init__(self, channels):
super().__init__()
self.weight = nn.Parameter(torch.zeros(channels))
self.noise = None
def forward(self, x, noise=None):
if noise is None and self.noise is None:
noise = torch.randn(x.size(0), 1, x.size(
2), x.size(3), device=x.device, dtype=x.dtype)
elif noise is None:
# here is a little trick: if you get all the noiselayers and set each
# modules .noise attribute, you can have pre-defined noise.
# Very useful for analysis
noise = self.noise
x = x + self.weight.view(1, -1, 1, 1) * noise
return x
class StyleMod(nn.Module):
def __init__(self, latent_size, channels, use_wscale):
super(StyleMod, self).__init__()
self.lin = MyLinear(latent_size,
channels * 2,
gain=1.0, use_wscale=use_wscale)
def forward(self, x, latent):
style = self.lin(latent) # style => [batch_size, n_channels*2]
shape = [-1, 2, x.size(1)] + (x.dim() - 2) * [1]
style = style.view(shape) # [batch_size, 2, n_channels, ...]
x = x * (style[:, 0] + 1.) + style[:, 1]
return x
class PixelNormLayer(nn.Module):
def __init__(self, epsilon=1e-8):
super().__init__()
self.epsilon = epsilon
def forward(self, x):
return x * torch.rsqrt(torch.mean(x**2, dim=1, keepdim=True) + self.epsilon)
class BlurLayer(nn.Module):
def __init__(self, kernel=[1, 2, 1], normalize=True, flip=False, stride=1):
super(BlurLayer, self).__init__()
kernel = [1, 2, 1]
kernel = torch.tensor(kernel, dtype=torch.float32)
kernel = kernel[:, None] * kernel[None, :]
kernel = kernel[None, None]
if normalize:
kernel = kernel / kernel.sum()
if flip:
kernel = kernel[:, :, ::-1, ::-1]
self.register_buffer('kernel', kernel)
self.stride = stride
def forward(self, x):
# expand kernel channels
kernel = self.kernel.expand(x.size(1), -1, -1, -1)
x = F.conv2d(
x,
kernel,
stride=self.stride,
padding=int((self.kernel.size(2)-1)/2),
groups=x.size(1)
)
return x
def upscale2d(x, factor=2, gain=1):
assert x.dim() == 4
if gain != 1:
x = x * gain
if factor != 1:
shape = x.shape
x = x.view(shape[0], shape[1], shape[2], 1, shape[3],
1).expand(-1, -1, -1, factor, -1, factor)
x = x.contiguous().view(
shape[0], shape[1], factor * shape[2], factor * shape[3])
return x
class Upscale2d(nn.Module):
def __init__(self, factor=2, gain=1):
super().__init__()
assert isinstance(factor, int) and factor >= 1
self.gain = gain
self.factor = factor
def forward(self, x):
return upscale2d(x, factor=self.factor, gain=self.gain)
class G_mapping(nn.Sequential):
def __init__(self, nonlinearity='lrelu', use_wscale=True):
act, gain = {'relu': (torch.relu, np.sqrt(2)),
'lrelu': (nn.LeakyReLU(negative_slope=0.2), np.sqrt(2))}[nonlinearity]
layers = [
('pixel_norm', PixelNormLayer()),
('dense0', MyLinear(512, 512, gain=gain,
lrmul=0.01, use_wscale=use_wscale)),
('dense0_act', act),
('dense1', MyLinear(512, 512, gain=gain,
lrmul=0.01, use_wscale=use_wscale)),
('dense1_act', act),
('dense2', MyLinear(512, 512, gain=gain,
lrmul=0.01, use_wscale=use_wscale)),
('dense2_act', act),
('dense3', MyLinear(512, 512, gain=gain,
lrmul=0.01, use_wscale=use_wscale)),
('dense3_act', act),
('dense4', MyLinear(512, 512, gain=gain,
lrmul=0.01, use_wscale=use_wscale)),
('dense4_act', act),
('dense5', MyLinear(512, 512, gain=gain,
lrmul=0.01, use_wscale=use_wscale)),
('dense5_act', act),
('dense6', MyLinear(512, 512, gain=gain,
lrmul=0.01, use_wscale=use_wscale)),
('dense6_act', act),
('dense7', MyLinear(512, 512, gain=gain,
lrmul=0.01, use_wscale=use_wscale)),
('dense7_act', act)
]
super().__init__(OrderedDict(layers))
def forward(self, x):
x = super().forward(x)
return x
class Truncation(nn.Module):
def __init__(self, avg_latent, max_layer=8, threshold=0.7):
super().__init__()
self.max_layer = max_layer
self.threshold = threshold
self.register_buffer('avg_latent', avg_latent)
def forward(self, x):
assert x.dim() == 3
interp = torch.lerp(self.avg_latent, x, self.threshold)
do_trunc = (torch.arange(x.size(1)) < self.max_layer).view(1, -1, 1)
return torch.where(do_trunc, interp, x)
class LayerEpilogue(nn.Module):
"""Things to do at the end of each layer."""
def __init__(self, channels, dlatent_size, use_wscale, use_noise, use_pixel_norm, use_instance_norm, use_styles, activation_layer):
super().__init__()
layers = []
if use_noise:
self.noise = NoiseLayer(channels)
else:
self.noise = None
layers.append(('activation', activation_layer))
if use_pixel_norm:
layers.append(('pixel_norm', PixelNormLayer()))
if use_instance_norm:
layers.append(('instance_norm', nn.InstanceNorm2d(channels)))
self.top_epi = nn.Sequential(OrderedDict(layers))
if use_styles:
self.style_mod = StyleMod(
dlatent_size, channels, use_wscale=use_wscale)
else:
self.style_mod = None
def forward(self, x, dlatents_in_slice=None, noise_in_slice=None):
if(self.noise is not None):
x = self.noise(x, noise=noise_in_slice)
x = self.top_epi(x)
if self.style_mod is not None:
x = self.style_mod(x, dlatents_in_slice)
else:
assert dlatents_in_slice is None
return x
class InputBlock(nn.Module):
def __init__(self, nf, dlatent_size, const_input_layer, gain, use_wscale, use_noise, use_pixel_norm, use_instance_norm, use_styles, activation_layer):
super().__init__()
self.const_input_layer = const_input_layer
self.nf = nf
if self.const_input_layer:
# called 'const' in tf
self.const = nn.Parameter(torch.ones(1, nf, 4, 4))
self.bias = nn.Parameter(torch.ones(nf))
else:
# tweak gain to match the official implementation of Progressing GAN
self.dense = MyLinear(dlatent_size, nf*16,
gain=gain/4, use_wscale=use_wscale)
self.epi1 = LayerEpilogue(nf, dlatent_size, use_wscale, use_noise,
use_pixel_norm, use_instance_norm, use_styles, activation_layer)
self.conv = MyConv2d(nf, nf, 3, gain=gain, use_wscale=use_wscale)
self.epi2 = LayerEpilogue(nf, dlatent_size, use_wscale, use_noise,
use_pixel_norm, use_instance_norm, use_styles, activation_layer)
def forward(self, dlatents_in_range, noise_in_range):
batch_size = dlatents_in_range.size(0)
if self.const_input_layer:
x = self.const.expand(batch_size, -1, -1, -1)
x = x + self.bias.view(1, -1, 1, 1)
else:
x = self.dense(dlatents_in_range[:, 0]).view(
batch_size, self.nf, 4, 4)
x = self.epi1(x, dlatents_in_range[:, 0], noise_in_range[0])
x = self.conv(x)
x = self.epi2(x, dlatents_in_range[:, 1], noise_in_range[1])
return x
class GSynthesisBlock(nn.Module):
def __init__(self, in_channels, out_channels, blur_filter, dlatent_size, gain, use_wscale, use_noise, use_pixel_norm, use_instance_norm, use_styles, activation_layer):
# 2**res x 2**res # res = 3..resolution_log2
super().__init__()
if blur_filter:
blur = BlurLayer(blur_filter)
else:
blur = None
self.conv0_up = MyConv2d(in_channels, out_channels, kernel_size=3, gain=gain, use_wscale=use_wscale,
intermediate=blur, upscale=True)
self.epi1 = LayerEpilogue(out_channels, dlatent_size, use_wscale, use_noise,
use_pixel_norm, use_instance_norm, use_styles, activation_layer)
self.conv1 = MyConv2d(out_channels, out_channels,
kernel_size=3, gain=gain, use_wscale=use_wscale)
self.epi2 = LayerEpilogue(out_channels, dlatent_size, use_wscale, use_noise,
use_pixel_norm, use_instance_norm, use_styles, activation_layer)
def forward(self, x, dlatents_in_range, noise_in_range):
x = self.conv0_up(x)
x = self.epi1(x, dlatents_in_range[:, 0], noise_in_range[0])
x = self.conv1(x)
x = self.epi2(x, dlatents_in_range[:, 1], noise_in_range[1])
return x
class G_synthesis(nn.Module):
def __init__(self,
# Disentangled latent (W) dimensionality.
dlatent_size=512,
num_channels=3, # Number of output color channels.
resolution=1024, # Output resolution.
# Overall multiplier for the number of feature maps.
fmap_base=8192,
# log2 feature map reduction when doubling the resolution.
fmap_decay=1.0,
# Maximum number of feature maps in any layer.
fmap_max=512,
use_styles=True, # Enable style inputs?
const_input_layer=True, # First layer is a learned constant?
use_noise=True, # Enable noise inputs?
# True = randomize noise inputs every time (non-deterministic), False = read noise inputs from variables.
randomize_noise=True,
nonlinearity='lrelu', # Activation function: 'relu', 'lrelu'
use_wscale=True, # Enable equalized learning rate?
use_pixel_norm=False, # Enable pixelwise feature vector normalization?
use_instance_norm=True, # Enable instance normalization?
# Data type to use for activations and outputs.
dtype=torch.float32,
# Low-pass filter to apply when resampling activations. None = no filtering.
blur_filter=[1, 2, 1],
):
super().__init__()
def nf(stage):
return min(int(fmap_base / (2.0 ** (stage * fmap_decay))), fmap_max)
self.dlatent_size = dlatent_size
resolution_log2 = int(np.log2(resolution))
assert resolution == 2**resolution_log2 and resolution >= 4
act, gain = {'relu': (torch.relu, np.sqrt(2)),
'lrelu': (nn.LeakyReLU(negative_slope=0.2), np.sqrt(2))}[nonlinearity]
num_layers = resolution_log2 * 2 - 2
num_styles = num_layers if use_styles else 1
torgbs = []
blocks = []
for res in range(2, resolution_log2 + 1):
channels = nf(res-1)
name = '{s}x{s}'.format(s=2**res)
if res == 2:
blocks.append((name,
InputBlock(channels, dlatent_size, const_input_layer, gain, use_wscale,
use_noise, use_pixel_norm, use_instance_norm, use_styles, act)))
else:
blocks.append((name,
GSynthesisBlock(last_channels, channels, blur_filter, dlatent_size, gain, use_wscale, use_noise, use_pixel_norm, use_instance_norm, use_styles, act)))
last_channels = channels
self.torgb = MyConv2d(channels, num_channels, 1,
gain=1, use_wscale=use_wscale)
self.blocks = nn.ModuleDict(OrderedDict(blocks))
def forward(self, dlatents_in, noise_in):
# Input: Disentangled latents (W) [minibatch, num_layers, dlatent_size].
# lod_in = tf.cast(tf.get_variable('lod', initializer=np.float32(0), trainable=False), dtype)
batch_size = dlatents_in.size(0)
for i, m in enumerate(self.blocks.values()):
if i == 0:
x = m(dlatents_in[:, 2*i:2*i+2], noise_in[2*i:2*i+2])
else:
x = m(x, dlatents_in[:, 2*i:2*i+2], noise_in[2*i:2*i+2])
rgb = self.torgb(x)
return rgb
|
the-stack_0_27669
|
import gym
import numpy as np
import matplotlib.pyplot as plt
ACTION_DICT = {0: 'NOOP', 1: 'FIRE', 2:'RIGHT', 3:'LEFT'}
def preprocess(observation):
observation = observation / 255
return np.mean(observation[30:,:], axis=2).reshape(180,160)
def stack_frames(stacked_frames, frame, stack_size, actions, action):
if stacked_frames is None:
stacked_frames = np.zeros((*frame.shape, stack_size))
actions = np.zeros(stack_size)
for idx in range(stack_size):
stacked_frames[:,:,idx] = frame
else:
stacked_frames[:,:,0:stack_size-1] = stacked_frames[:,:,1:]
stacked_frames[:,:,stack_size-1] = frame
actions[0:stack_size-1] = actions[1:]
actions[stack_size-1] = action
fig, (ax1, ax2, ax3, ax4) = plt.subplots(1, 4)
ax1.imshow(stacked_frames[:,:,0])
ax1.set_title(ACTION_DICT[actions[0]])
ax2.imshow(stacked_frames[:,:,1])
ax2.set_title(ACTION_DICT[actions[1]])
ax3.imshow(stacked_frames[:,:,2])
ax3.set_title(ACTION_DICT[actions[2]])
ax4.imshow(stacked_frames[:,:,3])
ax4.set_title(ACTION_DICT[actions[3]])
plt.show()
return actions, stacked_frames
if __name__ == '__main__':
env = gym.make('Breakout-v0')
stack_size = 4
for i in range(10):
done = False
observation = env.reset()
observation = preprocess(observation)
stacked_frames = None
actions=None
actions, stacked_frames = stack_frames(stacked_frames, observation,
stack_size, actions, 0)
while not done:
action = env.action_space.sample()
observation_, reward, done, info = env.step(action)
actions, stacked_frames_ = stack_frames(stacked_frames,
preprocess(observation_), stack_size,
actions, action)
|
the-stack_0_27671
|
import numpy as np
import cv2
thres = 0.5 # Threshold to detect object
nms_threshold = 0.2 # (0.1 to 1) 1 means no suppress , 0.1 means high suppress
cap = cv2.VideoCapture("Road_traffic_video2.mp4")
cap.set(cv2.CAP_PROP_FRAME_WIDTH, 280) # width
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 120) # height
cap.set(cv2.CAP_PROP_BRIGHTNESS, 150) # brightness
classNames = []
with open("coco.names", "r") as f:
classNames = f.read().splitlines()
print(classNames)
font = cv2.FONT_HERSHEY_PLAIN
# font = cv2.FONT_HERSHEY_COMPLEX
Colors = np.random.uniform(0, 255, size=(len(classNames), 3))
weightsPath = "frozen_inference_graph.pb"
configPath = "ssd_mobilenet_v3_large_coco_2020_01_14.pbtxt"
net = cv2.dnn_DetectionModel(weightsPath, configPath)
net.setInputSize(320, 320)
net.setInputScale(1.0 / 127.5)
net.setInputMean((127.5, 127.5, 127.5))
net.setInputSwapRB(True)
while True:
success, img = cap.read()
classIds, confs, bbox = net.detect(img, confThreshold=thres)
bbox = list(bbox)
confs = list(np.array(confs).reshape(1, -1)[0])
confs = list(map(float, confs))
# print(type(confs[0]))
# print(confs)
indices = cv2.dnn.NMSBoxes(bbox, confs, thres, nms_threshold)
if len(classIds) != 0:
for i in indices:
i = i[0]
box = bbox[i]
confidence = str(round(confs[i], 2))
color = Colors[classIds[i][0] - 1]
x, y, w, h = box[0], box[1], box[2], box[3]
cv2.rectangle(img, (x, y), (x + w, y + h), color, thickness=2)
cv2.putText(
img,
classNames[classIds[i][0] - 1] + " " + confidence,
(x + 10, y + 20),
font,
1,
color,
2,
)
# cv2.putText(img,str(round(confidence,2)),(box[0]+100,box[1]+30),
# font,1,colors[classId-1],2)
cv2.imshow("Output", img)
cv2.waitKey(1)
|
the-stack_0_27675
|
# Copyright (C) 2020-2021 Intel Corporation
#
# SPDX-License-Identifier: MIT
from collections import OrderedDict, defaultdict
from enum import Enum, auto
from itertools import chain
import logging as log
import os
import os.path as osp
# Disable B410: import_lxml - the library is used for writing
from lxml import etree as ET # nosec
from datumaro.components.annotation import (
AnnotationType, CompiledMask, LabelCategories,
)
from datumaro.components.converter import Converter
from datumaro.components.dataset import ItemStatus
from datumaro.components.extractor import DatasetItem
from datumaro.util import find, str_to_bool
from datumaro.util.annotation_util import make_label_id_mapping
from datumaro.util.image import save_image
from datumaro.util.mask_tools import paint_mask, remap_mask
from .format import (
VocInstColormap, VocPath, VocTask, make_voc_categories, make_voc_label_map,
parse_label_map, write_label_map,
)
def _convert_attr(name, attributes, type_conv, default=None):
d = object()
value = attributes.get(name, d)
if value is d:
return default
try:
return type_conv(value)
except Exception as e:
log.warning("Failed to convert attribute '%s'='%s': %s" % \
(name, value, e))
return default
def _write_xml_bbox(bbox, parent_elem):
x, y, w, h = bbox
bbox_elem = ET.SubElement(parent_elem, 'bndbox')
ET.SubElement(bbox_elem, 'xmin').text = str(x)
ET.SubElement(bbox_elem, 'ymin').text = str(y)
ET.SubElement(bbox_elem, 'xmax').text = str(x + w)
ET.SubElement(bbox_elem, 'ymax').text = str(y + h)
return bbox_elem
class LabelmapType(Enum):
voc = auto()
source = auto()
class VocConverter(Converter):
DEFAULT_IMAGE_EXT = VocPath.IMAGE_EXT
BUILTIN_ATTRS = {'difficult', 'pose', 'truncated', 'occluded' }
@staticmethod
def _split_tasks_string(s):
return [VocTask[i.strip()] for i in s.split(',')]
@staticmethod
def _get_labelmap(s):
if osp.isfile(s):
return s
try:
return LabelmapType[s].name
except KeyError:
import argparse
raise argparse.ArgumentTypeError()
@classmethod
def build_cmdline_parser(cls, **kwargs):
parser = super().build_cmdline_parser(**kwargs)
parser.add_argument('--apply-colormap', type=str_to_bool, default=True,
help="Use colormap for class and instance masks "
"(default: %(default)s)")
parser.add_argument('--label-map', type=cls._get_labelmap, default=None,
help="Labelmap file path or one of %s" % \
', '.join(t.name for t in LabelmapType))
parser.add_argument('--allow-attributes',
type=str_to_bool, default=True,
help="Allow export of attributes (default: %(default)s)")
parser.add_argument('--keep-empty',
type=str_to_bool, default=False,
help="Write subset lists even if they are empty "
"(default: %(default)s)")
parser.add_argument('--tasks', type=cls._split_tasks_string,
help="VOC task filter, comma-separated list of {%s} "
"(default: all)" % ', '.join(t.name for t in VocTask))
return parser
def __init__(self, extractor, save_dir,
tasks=None, apply_colormap=True, label_map=None,
allow_attributes=True, keep_empty=False, **kwargs):
super().__init__(extractor, save_dir, **kwargs)
assert tasks is None or isinstance(tasks, (VocTask, list, set))
if tasks is None:
tasks = set(VocTask)
elif isinstance(tasks, VocTask):
tasks = {tasks}
else:
tasks = set(t if t in VocTask else VocTask[t] for t in tasks)
self._tasks = tasks
self._apply_colormap = apply_colormap
self._allow_attributes = allow_attributes
self._keep_empty = keep_empty
if label_map is None:
label_map = LabelmapType.source.name
assert isinstance(label_map, (str, dict)), label_map
self._load_categories(label_map)
self._patch = None
def apply(self):
self.make_dirs()
self.save_subsets()
self.save_label_map()
def make_dirs(self):
save_dir = self._save_dir
subsets_dir = osp.join(save_dir, VocPath.SUBSETS_DIR)
cls_subsets_dir = osp.join(subsets_dir,
VocPath.TASK_DIR[VocTask.classification])
action_subsets_dir = osp.join(subsets_dir,
VocPath.TASK_DIR[VocTask.action_classification])
layout_subsets_dir = osp.join(subsets_dir,
VocPath.TASK_DIR[VocTask.person_layout])
segm_subsets_dir = osp.join(subsets_dir,
VocPath.TASK_DIR[VocTask.segmentation])
ann_dir = osp.join(save_dir, VocPath.ANNOTATIONS_DIR)
img_dir = osp.join(save_dir, VocPath.IMAGES_DIR)
segm_dir = osp.join(save_dir, VocPath.SEGMENTATION_DIR)
inst_dir = osp.join(save_dir, VocPath.INSTANCES_DIR)
images_dir = osp.join(save_dir, VocPath.IMAGES_DIR)
os.makedirs(subsets_dir, exist_ok=True)
os.makedirs(ann_dir, exist_ok=True)
os.makedirs(img_dir, exist_ok=True)
os.makedirs(segm_dir, exist_ok=True)
os.makedirs(inst_dir, exist_ok=True)
os.makedirs(images_dir, exist_ok=True)
self._subsets_dir = subsets_dir
self._cls_subsets_dir = cls_subsets_dir
self._action_subsets_dir = action_subsets_dir
self._layout_subsets_dir = layout_subsets_dir
self._segm_subsets_dir = segm_subsets_dir
self._ann_dir = ann_dir
self._img_dir = img_dir
self._segm_dir = segm_dir
self._inst_dir = inst_dir
self._images_dir = images_dir
def get_label(self, label_id):
return self._extractor. \
categories()[AnnotationType.label].items[label_id].name
def save_subsets(self):
for subset_name, subset in self._extractor.subsets().items():
class_lists = OrderedDict()
clsdet_list = OrderedDict()
action_list = OrderedDict()
layout_list = OrderedDict()
segm_list = OrderedDict()
for item in subset:
log.debug("Converting item '%s'", item.id)
image_filename = self._make_image_filename(item)
if self._save_images:
if item.has_image and item.image.has_data:
self._save_image(item,
osp.join(self._images_dir, image_filename))
else:
log.debug("Item '%s' has no image", item.id)
labels = []
bboxes = []
masks = []
for a in item.annotations:
if a.type == AnnotationType.label:
labels.append(a)
elif a.type == AnnotationType.bbox:
bboxes.append(a)
elif a.type == AnnotationType.mask:
masks.append(a)
if self._tasks & {VocTask.detection, VocTask.person_layout,
VocTask.action_classification}:
root_elem = ET.Element('annotation')
if '_' in item.id:
folder = item.id[ : item.id.find('_')]
else:
folder = ''
ET.SubElement(root_elem, 'folder').text = folder
ET.SubElement(root_elem, 'filename').text = image_filename
source_elem = ET.SubElement(root_elem, 'source')
ET.SubElement(source_elem, 'database').text = 'Unknown'
ET.SubElement(source_elem, 'annotation').text = 'Unknown'
ET.SubElement(source_elem, 'image').text = 'Unknown'
if item.has_image and item.image.has_size:
h, w = item.image.size
size_elem = ET.SubElement(root_elem, 'size')
ET.SubElement(size_elem, 'width').text = str(w)
ET.SubElement(size_elem, 'height').text = str(h)
ET.SubElement(size_elem, 'depth').text = ''
item_segmented = 0 < len(masks)
ET.SubElement(root_elem, 'segmented').text = \
str(int(item_segmented))
objects_with_parts = []
objects_with_actions = defaultdict(dict)
main_bboxes = []
layout_bboxes = []
for bbox in bboxes:
label = self.get_label(bbox.label)
if self._is_part(label):
layout_bboxes.append(bbox)
elif self._is_label(label):
main_bboxes.append(bbox)
for new_obj_id, obj in enumerate(main_bboxes):
attr = obj.attributes
obj_elem = ET.SubElement(root_elem, 'object')
obj_label = self.get_label(obj.label)
ET.SubElement(obj_elem, 'name').text = obj_label
if 'pose' in attr:
ET.SubElement(obj_elem, 'pose').text = \
str(attr['pose'])
ET.SubElement(obj_elem, 'truncated').text = \
'%d' % _convert_attr('truncated', attr, int, 0)
ET.SubElement(obj_elem, 'occluded').text = \
'%d' % _convert_attr('occluded', attr, int, 0)
ET.SubElement(obj_elem, 'difficult').text = \
'%d' % _convert_attr('difficult', attr, int, 0)
bbox = obj.get_bbox()
if bbox is not None:
_write_xml_bbox(bbox, obj_elem)
for part_bbox in filter(
lambda x: obj.group and obj.group == x.group,
layout_bboxes):
part_elem = ET.SubElement(obj_elem, 'part')
ET.SubElement(part_elem, 'name').text = \
self.get_label(part_bbox.label)
_write_xml_bbox(part_bbox.get_bbox(), part_elem)
objects_with_parts.append(new_obj_id)
label_actions = self._get_actions(obj_label)
actions_elem = ET.Element('actions')
for action in label_actions:
present = 0
if action in attr:
present = _convert_attr(action, attr,
lambda v: int(v == True), 0)
ET.SubElement(actions_elem, action).text = \
'%d' % present
objects_with_actions[new_obj_id][action] = present
if len(actions_elem) != 0:
obj_elem.append(actions_elem)
if self._allow_attributes:
native_attrs = set(self.BUILTIN_ATTRS)
native_attrs.update(label_actions)
attrs_elem = ET.Element('attributes')
for k, v in attr.items():
if k in native_attrs:
continue
attr_elem = ET.SubElement(attrs_elem, 'attribute')
ET.SubElement(attr_elem, 'name').text = str(k)
ET.SubElement(attr_elem, 'value').text = str(v)
if len(attrs_elem):
obj_elem.append(attrs_elem)
if self._tasks & {VocTask.detection, VocTask.person_layout,
VocTask.action_classification}:
ann_path = osp.join(self._ann_dir, item.id + '.xml')
os.makedirs(osp.dirname(ann_path), exist_ok=True)
with open(ann_path, 'w', encoding='utf-8') as f:
f.write(ET.tostring(root_elem,
encoding='unicode', pretty_print=True))
clsdet_list[item.id] = True
if objects_with_parts:
layout_list[item.id] = objects_with_parts
if objects_with_actions:
action_list[item.id] = objects_with_actions
for label_ann in labels:
label = self.get_label(label_ann.label)
if not self._is_label(label):
continue
class_list = class_lists.get(item.id, set())
class_list.add(label_ann.label)
class_lists[item.id] = class_list
clsdet_list[item.id] = True
if masks and VocTask.segmentation in self._tasks:
compiled_mask = CompiledMask.from_instance_masks(masks,
instance_labels=[self._label_id_mapping(m.label)
for m in masks])
self.save_segm(
osp.join(self._segm_dir, item.id + VocPath.SEGM_EXT),
compiled_mask.class_mask)
self.save_segm(
osp.join(self._inst_dir, item.id + VocPath.SEGM_EXT),
compiled_mask.instance_mask,
colormap=VocInstColormap)
segm_list[item.id] = True
elif not masks and self._patch:
cls_mask_path = osp.join(self._segm_dir,
item.id + VocPath.SEGM_EXT)
if osp.isfile(cls_mask_path):
os.remove(cls_mask_path)
inst_mask_path = osp.join(self._inst_dir,
item.id + VocPath.SEGM_EXT)
if osp.isfile(inst_mask_path):
os.remove(inst_mask_path)
if len(item.annotations) == 0:
clsdet_list[item.id] = None
layout_list[item.id] = None
action_list[item.id] = None
segm_list[item.id] = None
if self._tasks & {VocTask.classification, VocTask.detection,
VocTask.action_classification, VocTask.person_layout}:
self.save_clsdet_lists(subset_name, clsdet_list)
if self._tasks & {VocTask.classification}:
self.save_class_lists(subset_name, class_lists)
if self._tasks & {VocTask.action_classification}:
self.save_action_lists(subset_name, action_list)
if self._tasks & {VocTask.person_layout}:
self.save_layout_lists(subset_name, layout_list)
if self._tasks & {VocTask.segmentation}:
self.save_segm_lists(subset_name, segm_list)
@staticmethod
def _get_filtered_lines(path, patch, subset, items=None):
lines = {}
with open(path, encoding='utf-8') as f:
for line in f:
line = line.strip()
if not line:
continue
line_parts = line.split(maxsplit=1)
if len(line_parts) < 2:
line_parts.append('')
item, text = line_parts
if not patch or patch.updated_items.get((item, subset)) != \
ItemStatus.removed:
lines.setdefault(item, []).append(text)
if items is not None:
items.update((k, True) for k in lines)
return lines
def save_action_lists(self, subset_name, action_list):
os.makedirs(self._action_subsets_dir, exist_ok=True)
ann_file = osp.join(self._action_subsets_dir, subset_name + '.txt')
items = {k: True for k in action_list}
if self._patch and osp.isfile(ann_file):
self._get_filtered_lines(ann_file, self._patch, subset_name, items)
if items or self._keep_empty:
with open(ann_file, 'w', encoding='utf-8') as f:
for item in items:
f.write('%s\n' % item)
elif osp.isfile(ann_file):
os.remove(ann_file)
if not items and not self._patch and not self._keep_empty:
return
def _write_item(f, item, objs, action):
if not objs:
return
for obj_id, obj_actions in objs.items():
presented = obj_actions[action]
f.write('%s %s % d\n' % \
(item, 1 + obj_id, 1 if presented else -1))
all_actions = {
act: osp.join(self._action_subsets_dir,
'%s_%s.txt' % (act, subset_name))
for act in chain(*(self._get_actions(l) for l in self._label_map))
}
for action, ann_file in all_actions.items():
if not items and not self._keep_empty:
if osp.isfile(ann_file):
os.remove(ann_file)
continue
lines = {}
if self._patch and osp.isfile(ann_file):
lines = self._get_filtered_lines(ann_file, None, subset_name)
with open(ann_file, 'w', encoding='utf-8') as f:
for item in items:
if item in action_list:
_write_item(f, item, action_list[item], action)
elif item in lines:
print(item, *lines[item], file=f)
def save_class_lists(self, subset_name, class_lists):
def _write_item(f, item, item_labels):
if not item_labels:
return
item_labels = [self.get_label(l) for l in item_labels]
presented = label in item_labels
f.write('%s % d\n' % (item, 1 if presented else -1))
os.makedirs(self._cls_subsets_dir, exist_ok=True)
for label in self._label_map:
ann_file = osp.join(self._cls_subsets_dir,
'%s_%s.txt' % (label, subset_name))
items = {k: True for k in class_lists}
lines = {}
if self._patch and osp.isfile(ann_file):
lines = self._get_filtered_lines(ann_file, self._patch,
subset_name, items)
if not items and not self._keep_empty:
if osp.isfile(ann_file):
os.remove(ann_file)
continue
with open(ann_file, 'w', encoding='utf-8') as f:
for item in items:
if item in class_lists:
_write_item(f, item, class_lists[item])
elif item in lines:
print(item, *lines[item], file=f)
def save_clsdet_lists(self, subset_name, clsdet_list):
os.makedirs(self._cls_subsets_dir, exist_ok=True)
ann_file = osp.join(self._cls_subsets_dir, subset_name + '.txt')
items = {k: True for k in clsdet_list}
if self._patch and osp.isfile(ann_file):
self._get_filtered_lines(ann_file, self._patch, subset_name, items)
if items or self._keep_empty:
with open(ann_file, 'w', encoding='utf-8') as f:
for item in items:
f.write('%s\n' % item)
elif osp.isfile(ann_file):
os.remove(ann_file)
def save_segm_lists(self, subset_name, segm_list):
os.makedirs(self._segm_subsets_dir, exist_ok=True)
ann_file = osp.join(self._segm_subsets_dir, subset_name + '.txt')
items = {k: True for k in segm_list}
if self._patch and osp.isfile(ann_file):
self._get_filtered_lines(ann_file, self._patch, subset_name, items)
if items or self._keep_empty:
with open(ann_file, 'w', encoding='utf-8') as f:
for item in items:
f.write('%s\n' % item)
elif osp.isfile(ann_file):
os.remove(ann_file)
def save_layout_lists(self, subset_name, layout_list):
def _write_item(f, item, item_layouts):
if 1 < len(item.split()):
item = '\"' + item + '\"'
if item_layouts:
for obj_id in item_layouts:
f.write('%s % d\n' % (item, 1 + obj_id))
else:
f.write('%s\n' % item)
os.makedirs(self._layout_subsets_dir, exist_ok=True)
ann_file = osp.join(self._layout_subsets_dir, subset_name + '.txt')
items = {k: True for k in layout_list}
lines = {}
if self._patch and osp.isfile(ann_file):
self._get_filtered_lines(ann_file, self._patch, subset_name, items)
if not items and not self._keep_empty:
if osp.isfile(ann_file):
os.remove(ann_file)
return
with open(ann_file, 'w', encoding='utf-8') as f:
for item in items:
if item in layout_list:
_write_item(f, item, layout_list[item])
elif item in lines:
print(item, *lines[item], file=f)
def save_segm(self, path, mask, colormap=None):
if self._apply_colormap:
if colormap is None:
colormap = self._categories[AnnotationType.mask].colormap
mask = paint_mask(mask, colormap)
save_image(path, mask, create_dir=True)
def save_label_map(self):
path = osp.join(self._save_dir, VocPath.LABELMAP_FILE)
write_label_map(path, self._label_map)
def _load_categories(self, label_map_source):
if label_map_source == LabelmapType.voc.name:
# use the default VOC colormap
label_map = make_voc_label_map()
elif label_map_source == LabelmapType.source.name and \
AnnotationType.mask not in self._extractor.categories():
# generate colormap for input labels
labels = self._extractor.categories() \
.get(AnnotationType.label, LabelCategories())
label_map = OrderedDict((item.name, [None, [], []])
for item in labels.items)
elif label_map_source == LabelmapType.source.name and \
AnnotationType.mask in self._extractor.categories():
# use source colormap
labels = self._extractor.categories()[AnnotationType.label]
colors = self._extractor.categories()[AnnotationType.mask]
label_map = OrderedDict()
for idx, item in enumerate(labels.items):
color = colors.colormap.get(idx)
if color is not None:
label_map[item.name] = [color, [], []]
elif isinstance(label_map_source, dict):
label_map = OrderedDict(
sorted(label_map_source.items(), key=lambda e: e[0]))
elif isinstance(label_map_source, str) and osp.isfile(label_map_source):
label_map = parse_label_map(label_map_source)
else:
raise Exception("Wrong labelmap specified: '%s', "
"expected one of %s or a file path" % \
(label_map_source, ', '.join(t.name for t in LabelmapType)))
# There must always be a label with color (0, 0, 0) at index 0
bg_label = find(label_map.items(), lambda x: x[1][0] == (0, 0, 0))
if bg_label is not None:
bg_label = bg_label[0]
else:
bg_label = 'background'
if bg_label not in label_map:
has_colors = any(v[0] is not None for v in label_map.values())
color = (0, 0, 0) if has_colors else None
label_map[bg_label] = [color, [], []]
label_map.move_to_end(bg_label, last=False)
self._categories = make_voc_categories(label_map)
# Update colors with assigned values
colormap = self._categories[AnnotationType.mask].colormap
for label_id, color in colormap.items():
label_desc = label_map[
self._categories[AnnotationType.label].items[label_id].name]
label_desc[0] = color
self._label_map = label_map
self._label_id_mapping = self._make_label_id_map()
def _is_label(self, s):
return self._label_map.get(s) is not None
def _is_part(self, s):
for label_desc in self._label_map.values():
if s in label_desc[1]:
return True
return False
def _is_action(self, label, s):
return s in self._get_actions(label)
def _get_actions(self, label):
label_desc = self._label_map.get(label)
if not label_desc:
return []
return label_desc[2]
def _make_label_id_map(self):
map_id, id_mapping, src_labels, dst_labels = make_label_id_mapping(
self._extractor.categories().get(AnnotationType.label),
self._categories[AnnotationType.label])
void_labels = [src_label for src_id, src_label in src_labels.items()
if src_label not in dst_labels]
if void_labels:
log.warning("The following labels are remapped to background: %s" %
', '.join(void_labels))
log.debug("Saving segmentations with the following label mapping: \n%s" %
'\n'.join(["#%s '%s' -> #%s '%s'" %
(
src_id, src_label, id_mapping[src_id],
self._categories[AnnotationType.label] \
.items[id_mapping[src_id]].name
)
for src_id, src_label in src_labels.items()
])
)
return map_id
def _remap_mask(self, mask):
return remap_mask(mask, self._label_id_mapping)
@classmethod
def patch(cls, dataset, patch, save_dir, **kwargs):
conv = cls(patch.as_dataset(dataset), save_dir=save_dir, **kwargs)
conv._patch = patch
conv.apply()
for filename in os.listdir(conv._cls_subsets_dir):
if '_' not in filename or not filename.endswith('.txt'):
continue
label, subset = osp.splitext(filename)[0].split('_', maxsplit=1)
if label not in conv._label_map or subset not in dataset.subsets():
os.remove(osp.join(conv._cls_subsets_dir, filename))
# Find images that need to be removed
# images from different subsets are stored in the common directory
# Avoid situations like:
# (a, test): added
# (a, train): removed
# where the second line removes images from the first.
ids_to_remove = {}
for (item_id, subset), status in patch.updated_items.items():
if status != ItemStatus.removed:
item = patch.data.get(item_id, subset)
else:
item = DatasetItem(item_id, subset=subset)
if not (status == ItemStatus.removed or not item.has_image):
ids_to_remove[item_id] = (item, False)
else:
ids_to_remove.setdefault(item_id, (item, True))
for item, to_remove in ids_to_remove.values():
if not to_remove:
continue
if conv._tasks & {VocTask.detection,
VocTask.action_classification, VocTask.person_layout}:
ann_path = osp.join(conv._ann_dir, item.id + '.xml')
if osp.isfile(ann_path):
os.remove(ann_path)
image_path = osp.join(conv._images_dir,
conv._make_image_filename(item))
if osp.isfile(image_path):
os.unlink(image_path)
if not [a for a in item.annotations
if a.type is AnnotationType.mask]:
path = osp.join(save_dir, VocPath.SEGMENTATION_DIR,
item.id + VocPath.SEGM_EXT)
if osp.isfile(path):
os.unlink(path)
path = osp.join(save_dir, VocPath.INSTANCES_DIR,
item.id + VocPath.SEGM_EXT)
if osp.isfile(path):
os.unlink(path)
class VocClassificationConverter(VocConverter):
def __init__(self, *args, **kwargs):
kwargs['tasks'] = VocTask.classification
super().__init__(*args, **kwargs)
class VocDetectionConverter(VocConverter):
def __init__(self, *args, **kwargs):
kwargs['tasks'] = VocTask.detection
super().__init__(*args, **kwargs)
class VocLayoutConverter(VocConverter):
def __init__(self, *args, **kwargs):
kwargs['tasks'] = VocTask.person_layout
super().__init__(*args, **kwargs)
class VocActionConverter(VocConverter):
def __init__(self, *args, **kwargs):
kwargs['tasks'] = VocTask.action_classification
super().__init__(*args, **kwargs)
class VocSegmentationConverter(VocConverter):
def __init__(self, *args, **kwargs):
kwargs['tasks'] = VocTask.segmentation
super().__init__(*args, **kwargs)
|
the-stack_0_27676
|
# Copyright (c) 2011 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
FormPost Middleware
Translates a browser form post into a regular Swift object PUT.
The format of the form is::
<form action="<swift-url>" method="POST"
enctype="multipart/form-data">
<input type="hidden" name="redirect" value="<redirect-url>" />
<input type="hidden" name="max_file_size" value="<bytes>" />
<input type="hidden" name="max_file_count" value="<count>" />
<input type="hidden" name="expires" value="<unix-timestamp>" />
<input type="hidden" name="signature" value="<hmac>" />
<input type="file" name="file1" /><br />
<input type="submit" />
</form>
Optionally, if you want the uploaded files to be temporary you can set
x-delete-at or x-delete-after attributes by adding one of these as a
form input::
<input type="hidden" name="x_delete_at" value="<unix-timestamp>" />
<input type="hidden" name="x_delete_after" value="<seconds>" />
The <swift-url> is the URL of the Swift destination, such as::
https://swift-cluster.example.com/v1/AUTH_account/container/object_prefix
The name of each file uploaded will be appended to the <swift-url>
given. So, you can upload directly to the root of container with a
url like::
https://swift-cluster.example.com/v1/AUTH_account/container/
Optionally, you can include an object prefix to better separate
different users' uploads, such as::
https://swift-cluster.example.com/v1/AUTH_account/container/object_prefix
Note the form method must be POST and the enctype must be set as
"multipart/form-data".
The redirect attribute is the URL to redirect the browser to after the upload
completes. This is an optional parameter. If you are uploading the form via an
XMLHttpRequest the redirect should not be included. The URL will have status
and message query parameters added to it, indicating the HTTP status code for
the upload (2xx is success) and a possible message for further information if
there was an error (such as "max_file_size exceeded").
The max_file_size attribute must be included and indicates the
largest single file upload that can be done, in bytes.
The max_file_count attribute must be included and indicates the
maximum number of files that can be uploaded with the form. Include
additional ``<input type="file" name="filexx" />`` attributes if
desired.
The expires attribute is the Unix timestamp before which the form
must be submitted before it is invalidated.
The signature attribute is the HMAC-SHA1 signature of the form. Here is
sample code for computing the signature::
import hmac
from hashlib import sha1
from time import time
path = '/v1/account/container/object_prefix'
redirect = 'https://srv.com/some-page' # set to '' if redirect not in form
max_file_size = 104857600
max_file_count = 10
expires = int(time() + 600)
key = 'mykey'
hmac_body = '%s\\n%s\\n%s\\n%s\\n%s' % (path, redirect,
max_file_size, max_file_count, expires)
signature = hmac.new(key, hmac_body, sha1).hexdigest()
The key is the value of either the X-Account-Meta-Temp-URL-Key or the
X-Account-Meta-Temp-Url-Key-2 header on the account.
Be certain to use the full path, from the /v1/ onward.
Note that x_delete_at and x_delete_after are not used in signature generation
as they are both optional attributes.
The command line tool ``swift-form-signature`` may be used (mostly
just when testing) to compute expires and signature.
Also note that the file attributes must be after the other attributes
in order to be processed correctly. If attributes come after the
file, they won't be sent with the subrequest (there is no way to
parse all the attributes on the server-side without reading the whole
thing into memory -- to service many requests, some with large files,
there just isn't enough memory on the server, so attributes following
the file are simply ignored).
"""
__all__ = ['FormPost', 'filter_factory', 'READ_CHUNK_SIZE', 'MAX_VALUE_LENGTH']
import hmac
import rfc822
from hashlib import sha1
from time import time
from urllib import quote
from swift.common.exceptions import MimeInvalid
from swift.common.middleware.tempurl import get_tempurl_keys_from_metadata
from swift.common.utils import streq_const_time, register_swift_info, \
parse_content_disposition, iter_multipart_mime_documents
from swift.common.wsgi import make_pre_authed_env
from swift.common.swob import HTTPUnauthorized
from swift.proxy.controllers.base import get_account_info
#: The size of data to read from the form at any given time.
READ_CHUNK_SIZE = 4096
#: The maximum size of any attribute's value. Any additional data will be
#: truncated.
MAX_VALUE_LENGTH = 4096
class FormInvalid(Exception):
pass
class FormUnauthorized(Exception):
pass
class _CappedFileLikeObject(object):
"""
A file-like object wrapping another file-like object that raises
an EOFError if the amount of data read exceeds a given
max_file_size.
:param fp: The file-like object to wrap.
:param max_file_size: The maximum bytes to read before raising an
EOFError.
"""
def __init__(self, fp, max_file_size):
self.fp = fp
self.max_file_size = max_file_size
self.amount_read = 0
self.file_size_exceeded = False
def read(self, size=None):
ret = self.fp.read(size)
self.amount_read += len(ret)
if self.amount_read > self.max_file_size:
self.file_size_exceeded = True
raise EOFError('max_file_size exceeded')
return ret
def readline(self):
ret = self.fp.readline()
self.amount_read += len(ret)
if self.amount_read > self.max_file_size:
self.file_size_exceeded = True
raise EOFError('max_file_size exceeded')
return ret
class FormPost(object):
"""
FormPost Middleware
See above for a full description.
The proxy logs created for any subrequests made will have swift.source set
to "FP".
:param app: The next WSGI filter or app in the paste.deploy
chain.
:param conf: The configuration dict for the middleware.
"""
def __init__(self, app, conf):
#: The next WSGI application/filter in the paste.deploy pipeline.
self.app = app
#: The filter configuration dict.
self.conf = conf
def __call__(self, env, start_response):
"""
Main hook into the WSGI paste.deploy filter/app pipeline.
:param env: The WSGI environment dict.
:param start_response: The WSGI start_response hook.
:returns: Response as per WSGI.
"""
if env['REQUEST_METHOD'] == 'POST':
try:
content_type, attrs = \
parse_content_disposition(env.get('CONTENT_TYPE') or '')
if content_type == 'multipart/form-data' and \
'boundary' in attrs:
http_user_agent = "%s FormPost" % (
env.get('HTTP_USER_AGENT', ''))
env['HTTP_USER_AGENT'] = http_user_agent.strip()
status, headers, body = self._translate_form(
env, attrs['boundary'])
start_response(status, headers)
return [body]
except (FormInvalid, MimeInvalid, EOFError) as err:
body = 'FormPost: %s' % err
start_response(
'400 Bad Request',
(('Content-Type', 'text/plain'),
('Content-Length', str(len(body)))))
return [body]
except FormUnauthorized as err:
message = 'FormPost: %s' % str(err).title()
return HTTPUnauthorized(body=message)(
env, start_response)
return self.app(env, start_response)
def _translate_form(self, env, boundary):
"""
Translates the form data into subrequests and issues a
response.
:param env: The WSGI environment dict.
:param boundary: The MIME type boundary to look for.
:returns: status_line, headers_list, body
"""
keys = self._get_keys(env)
status = message = ''
attributes = {}
subheaders = []
file_count = 0
for fp in iter_multipart_mime_documents(
env['wsgi.input'], boundary, read_chunk_size=READ_CHUNK_SIZE):
hdrs = rfc822.Message(fp, 0)
disp, attrs = parse_content_disposition(
hdrs.getheader('Content-Disposition', ''))
if disp == 'form-data' and attrs.get('filename'):
file_count += 1
try:
if file_count > int(attributes.get('max_file_count') or 0):
status = '400 Bad Request'
message = 'max file count exceeded'
break
except ValueError:
raise FormInvalid('max_file_count not an integer')
attributes['filename'] = attrs['filename'] or 'filename'
if 'content-type' not in attributes and 'content-type' in hdrs:
attributes['content-type'] = \
hdrs['Content-Type'] or 'application/octet-stream'
status, subheaders, message = \
self._perform_subrequest(env, attributes, fp, keys)
if status[:1] != '2':
break
else:
data = ''
mxln = MAX_VALUE_LENGTH
while mxln:
chunk = fp.read(mxln)
if not chunk:
break
mxln -= len(chunk)
data += chunk
while fp.read(READ_CHUNK_SIZE):
pass
if 'name' in attrs:
attributes[attrs['name'].lower()] = data.rstrip('\r\n--')
if not status:
status = '400 Bad Request'
message = 'no files to process'
headers = [(k, v) for k, v in subheaders
if k.lower().startswith('access-control')]
redirect = attributes.get('redirect')
if not redirect:
body = status
if message:
body = status + '\r\nFormPost: ' + message.title()
headers.extend([('Content-Type', 'text/plain'),
('Content-Length', len(body))])
return status, headers, body
status = status.split(' ', 1)[0]
if '?' in redirect:
redirect += '&'
else:
redirect += '?'
redirect += 'status=%s&message=%s' % (quote(status), quote(message))
body = '<html><body><p><a href="%s">' \
'Click to continue...</a></p></body></html>' % redirect
headers.extend(
[('Location', redirect), ('Content-Length', str(len(body)))])
return '303 See Other', headers, body
def _perform_subrequest(self, orig_env, attributes, fp, keys):
"""
Performs the subrequest and returns the response.
:param orig_env: The WSGI environment dict; will only be used
to form a new env for the subrequest.
:param attributes: dict of the attributes of the form so far.
:param fp: The file-like object containing the request body.
:param keys: The account keys to validate the signature with.
:returns: (status_line, headers_list, message)
"""
if not keys:
raise FormUnauthorized('invalid signature')
try:
max_file_size = int(attributes.get('max_file_size') or 0)
except ValueError:
raise FormInvalid('max_file_size not an integer')
subenv = make_pre_authed_env(orig_env, 'PUT', agent=None,
swift_source='FP')
if 'QUERY_STRING' in subenv:
del subenv['QUERY_STRING']
subenv['HTTP_TRANSFER_ENCODING'] = 'chunked'
subenv['wsgi.input'] = _CappedFileLikeObject(fp, max_file_size)
if subenv['PATH_INFO'][-1] != '/' and \
subenv['PATH_INFO'].count('/') < 4:
subenv['PATH_INFO'] += '/'
subenv['PATH_INFO'] += attributes['filename'] or 'filename'
if 'x_delete_at' in attributes:
try:
subenv['HTTP_X_DELETE_AT'] = int(attributes['x_delete_at'])
except ValueError:
raise FormInvalid('x_delete_at not an integer: '
'Unix timestamp required.')
if 'x_delete_after' in attributes:
try:
subenv['HTTP_X_DELETE_AFTER'] = int(
attributes['x_delete_after'])
except ValueError:
raise FormInvalid('x_delete_after not an integer: '
'Number of seconds required.')
if 'content-type' in attributes:
subenv['CONTENT_TYPE'] = \
attributes['content-type'] or 'application/octet-stream'
elif 'CONTENT_TYPE' in subenv:
del subenv['CONTENT_TYPE']
try:
if int(attributes.get('expires') or 0) < time():
raise FormUnauthorized('form expired')
except ValueError:
raise FormInvalid('expired not an integer')
hmac_body = '%s\n%s\n%s\n%s\n%s' % (
orig_env['PATH_INFO'],
attributes.get('redirect') or '',
attributes.get('max_file_size') or '0',
attributes.get('max_file_count') or '0',
attributes.get('expires') or '0')
has_valid_sig = False
for key in keys:
sig = hmac.new(key, hmac_body, sha1).hexdigest()
if streq_const_time(sig, (attributes.get('signature') or
'invalid')):
has_valid_sig = True
if not has_valid_sig:
raise FormUnauthorized('invalid signature')
substatus = [None]
subheaders = [None]
wsgi_input = subenv['wsgi.input']
def _start_response(status, headers, exc_info=None):
if wsgi_input.file_size_exceeded:
raise EOFError("max_file_size exceeded")
substatus[0] = status
subheaders[0] = headers
i = iter(self.app(subenv, _start_response))
try:
i.next()
except StopIteration:
pass
return substatus[0], subheaders[0], ''
def _get_keys(self, env):
"""
Fetch the tempurl keys for the account. Also validate that the request
path indicates a valid container; if not, no keys will be returned.
:param env: The WSGI environment for the request.
:returns: list of tempurl keys
"""
parts = env['PATH_INFO'].split('/', 4)
if len(parts) < 4 or parts[0] or parts[1] != 'v1' or not parts[2] or \
not parts[3]:
return []
account_info = get_account_info(env, self.app, swift_source='FP')
return get_tempurl_keys_from_metadata(account_info['meta'])
def filter_factory(global_conf, **local_conf):
"""Returns the WSGI filter for use with paste.deploy."""
conf = global_conf.copy()
conf.update(local_conf)
register_swift_info('formpost')
return lambda app: FormPost(app, conf)
|
the-stack_0_27677
|
#!/usr/bin/env python
"""Simple Bytes Editor / Binary Stream Editor.
Usage:
sbsed --file source[,target] --edit editor_action [editor_action_1 [editor_action_2]
edit_action: target_offset:source_data:[data_length[:operation]]
operation: can be either "overwrite" or "copy". Default: overwrite
The 2-Clause BSD License:
https://opensource.org/licenses/BSD-2-Clause
Copyright (c) 2018, Timothy Lin
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
The views and conclusions contained in the software and documentation are those
of the authors and should not be interpreted as representing official policies,
either expressed or implied, of the "Simple Binary Stream Editor" project."""
from __future__ import print_function
import os
import re
import sys
import math
import shlex
import argparse
import traceback
def ShlexSplit(shstr, splitters={','}, escape=''):
"""Syntactically split a string with specified splitters, which honors its sub-strings
in the quotation marks."""
lex = shlex.shlex(shstr)
lex.escape = escape
lex.commenters = ''
parms = ['']
for dp in list(lex):
dp = dp.strip()
if dp not in splitters:
parms[-1] += dp
else:
parms += ['']
return parms
def LengthAdjust(bstr, width=0):
"""Adjust the length of a byte-string to meet the byte-width with leading padding "0" regardless of its endianess.
bstr - the byte string.\n
width - the data length, counted in byte."""
blen = width*2 if width else len(bstr)
if blen%2:
bstr = '0' + bstr
blen += 1
if blen > len(bstr):
bstr = '0' * (blen - len(bstr)) + bstr
return bstr, blen
def Bs2Ba(bstr, width=0):
"""Conversion from a hex-digit byte-string to the network-byte-order hex-digit byte-array.
bstr - the byte string.\n
width - the data length, counted in byte."""
bstr, blen = LengthAdjust(bstr, width)
return bytearray([int(bstr[b*2:(b+1)*2], base=0x10) for b in range(int(blen/2))])
def Le2N(bstr, width=0):
"""Conversion from the little-endian hex-digit byte-array to the network-byte-order hex-digit byte-array.
bstr - the byte string.\n
width - the data length, counted in byte."""
bstr, blen = LengthAdjust(bstr, width)
return Bs2Ba(''.join(reversed([bstr[b*2:(b+1)*2] for b in range(int(blen/2))])))
def LeInt(source, width=0):
"""Conversion from a normal decimal-digit string to the network-byte-order hex-digit byte-array.
bstr - the byte string.\n
width - the data length, counted in byte."""
if sys.byteorder == 'little':
return Le2N(hex(int(source)), width)
else:
return Bs2Ba(hex(int(source)))
def Guid2N(gstr):
"""Conversion from a UUID string to to the network-byte-order hex-digit byte-array."""
# The canonical 8-4-4-4-12 format GUID string:
# 123e4567-e89b-12d3-a456-426655440000
# xxxxxxxx-xxxx-Mxxx-Nxxx-xxxxxxxxxxxx
gs = gstr.split('-')
try:
if len(gs) != 5: raise Exception('')
return Le2N(gs[0], 4) + Le2N(gs[1], 2) + Le2N(gs[2], 2) + Bs2Ba(gs[3], 2) + Bs2Ba(gs[4], 6)
except Exception:
raise Exception('Incorrect Guid format:%s' % gstr)
def SafeEval(evalstr, default=None, globalz=None, localz=None):
"""A safer wrapper for eval()."""
if not globalz:
globalz = {}
if not localz:
localz = {}
try:
return eval(evalstr, globalz, localz)
except SyntaxError:
return default if default else evalstr
class EditorAction(object):
"""The editor's action."""
def __init__(self, edstr):
self.target_offset = None
self.source_data = None
self.length = None
self.operation = 'overwrite'
self.hexdata = None
self.from_offset = None
self.encoding = 'utf-8'
eds = ShlexSplit(edstr, {':'})
try:
self.target_offset = SafeEval(eds[0]) if eds[0] else None
self.source_data = eds[1]
self.length = SafeEval(eds[2]) if eds[2] else None
self.operation = eds[3].lower()
except IndexError:
pass
if not self.source_data:
raise Exception('Error')
data_x = self.source_data.lower().split('=')
def strx(data):
if data.startswith('"') and data.endswith('"'):
data = data[1:-1]
return bytearray(data, encoding=self.encoding)
def intx(data, width):
if data_data.startswith('0x'):
return Le2N(data[2:], width)
else:
return LeInt(data, width)
if len(data_x) == 1:
self.hexdata = strx(self.source_data)
elif len(data_x) == 2:
data_type, data_data = data_x
if not data_type or not data_data:
raise Exception('Incorrect source data: [%s]' % self.source_data)
intz = re.match('(i|int|integer)(\d+)', data_type)
if intz:
# Handle these integers: i8, int8, intger8, i16, int16, integer 16....
# Note: integer9 would be treated as integer16
self.hexdata=intx(data_data, int(math.ceil(int(intz.group(2))/8.)))
elif data_type in {'b', 'bytes'}:
if data_data.startswith('0x'):
self.hexdata = Bs2Ba(data_data[2:])
elif data_type in {'g', 'guid'}:
self.hexdata = Guid2N(data_data)
# NEWREL: Format check?
# NEWREL: Shall we cover GUID with "big-endian"?
# self.hexdata = Bs2Ba(data_data.replace('-', ''))
elif data_type in {'s', 'string'}:
self.hexdata = strx(data_data)
elif data_type in {'from'}:
self.hexdata = strx(data_data)
self.from_offset = int(data_data, base=0)
if not self.hexdata:
raise Exception('Invalid data type or format:%s.' % self.source_data)
if isinstance(self.length, int):
remain_len = self.length - len(self.hexdata)
if remain_len > 0:
self.hexdata += bytearray(remain_len)
# NOTE: when the length of hexdata > the specified length,
# its the editor's choice to either truncate or append the hexdata's content.
elif self.length is None:
self.length = len(self.hexdata)
def __repr__(self):
def _repr(member):
if (member is None) or (not isinstance(member, int)):
return '%s' % str(member)
else:
return '0x%X(%d)' % (member, member)
return '\n'.join([
' target offset : %s' % _repr(self.target_offset),
' source data : %s' % str(self.source_data),
' length : %s' % _repr(self.length),
' operation : %s' % self.operation,
])
class CommandArgument(argparse.ArgumentParser):
def __init__(self, usage=''):
if not usage:
usage = 'bsed --file source[,target] --edit editor_action [editor_action_1 [editor_action_2]'
argparse.ArgumentParser.__init__(self, usage=usage, prefix_chars='-/', fromfile_prefix_chars='@', add_help=False)
self.add_argument('-f', '--file', dest='file', metavar='File', nargs='+', help='Specify the source file and the optional target file.')
self.add_argument('-e', '--edit', dest='edit', metavar='Edit', nargs='+', help='The editor action that consists of offset:data[:length:[operation]]')
args, __unknown = self.parse_known_args(sys.argv[1:])
self.edits = []
self.specific_help = ''
self.need_help = self.format_help()
if not args.file:
self.specific_help = 'No specified file.'
return
if not args.edit:
self.specific_help = 'No specified editor action.'
return
parms = ShlexSplit(','.join(args.file))
if len(parms) > 2:
self.specific_help = 'Too many specified files.'
return
self.input_file = parms[0]
if len(parms) == 2:
self.output_file = parms[1]
else:
self.output_file = ''
for ed in args.edit:
try:
self.edits += [EditorAction(ed)]
except Exception:
self.specific_help = 'Invalid editor action:%s' % ed
raise #Exception('')
self.need_help = ''
def help(self):
if arg.specific_help:
print('Argument error: %s\n' % arg.specific_help)
if arg.need_help:
print('%s' % arg.need_help)
sys.exit(1)
def __repr__(self):
strs = []
strs += ['Source File: %s' % self.input_file]
strs += ['Target File: %s' % self.output_file]
strs += ['Editor Action%s:' % ('s' if len(self.edits)>1 else '')]
for ed in self.edits:
strs += [str(ed), '']
return '\n'.join(strs)
class Editor(object):
"""A UTF-8 editor"""
def __init__(self, input_file, output_file=None):
self.input_file = input_file
self.output_file = output_file if output_file else input_file
with open(input_file, 'rb') as fin:
self.content = bytearray(fin.read())
self.changed = False
self.PreviousAction = None
def overwrite(self, action):
if action.from_offset is not None:
data = self.content[action.from_offset:action.from_offset+action.length]
dlen = len(data)
#elif action.from_file is not None:
# pass
else:
dlen = min(action.length, len(self.content) - action.target_offset)
data = action.hexdata[:dlen]
if dlen < 1:
return # Ignore the boundary-overrun error. Or, what else shall we do?
# BUGBUG. verify this: boundary-hit.
self.content[action.target_offset:action.target_offset+dlen] = data
self.changed = True
def edit(self, action):
if action.target_offset in {None, '+'} and self.PreviousAction:
action.target_offset = self.PreviousAction.target_offset
if action.target_offset in {None, '+'}:
raise Exception('Invalid target offset')
action.target_offset += self.PreviousAction.length
if action.operation in {'overwrite', 'copy'}:
self.overwrite(action)
else:
print('Unsupported editor action: %s' % action.operation)
self.PreviousAction = action
def commit(self):
"""Apply the changes to the target file."""
if self.changed:
with open(self.output_file, 'wb') as fout:
fout.write(self.content)
if __name__ == '__main__':
arg = CommandArgument()
if arg.need_help:
arg.help()
sys.exit(1)
input_file = arg.input_file
output_file = arg.output_file if arg.output_file else arg.input_file
try:
ed = Editor(input_file, output_file)
except IOError:
traceback.print_exc()
sys.exit(2)
for ar in arg.edits:
ed.edit(ar)
ed.commit()
print('%s' % str(arg))
|
the-stack_0_27678
|
def mergesort(arr):
if len(arr) == 1:
return arr
mid = len(arr) // 2
left = arr[:mid]
right = arr[mid:]
mergesort(left)
mergesort(right)
i = 0
j = 0
k = 0
while i < len(left) and j < len(right):
if left[i] <= right[j]:
arr[k] = left[i]
i += 1
else:
arr[k] = right[j]
j += 1
k += 1
while i < len(left):
arr[k] = left[i]
i += 1
k += 1
while j < len(right):
arr[k] = right[j]
j += 1
k += 1
|
the-stack_0_27679
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
'''
Just render something...with trajectories.
This script is stale. Use with caution.
Usage:
./sclrq.py [options] (--show|-s) <i>
./sclrq.py [options] <i> <outname>
Options:
--help -h
--show -s Show
--verbose -v Make some noise.
--nozip -U sclr/flds are NOT gzipped.
--zip -Z sclr/flds are gzipped. If neither of these two are set,
guess based on name.
--log10 -l Log it.
--lims=LIM Set lims [default: (1e2,6e8)]
--highlight=H Set highlight [default: 3e8]
--quantity=Q -Q Q Render this quantity [default: RhoN10]
--dir=D -D D Read from this dir [default: .]
--restrict=R Restrict it.
--title=T Set the title [default: Electron density]
--units=U Set the colorbar units [default: number/cc]
--laser Plot contours of the laser poyting vector.
--intensity=I -I I Make a contour of this intensity [default: 3e18]
--traj=F Plot trajectories from this file. If not used,
will not plot trajectories.
--traj-offset=O Set the offset and factor to get the relevant
trajectory timestep such that i_t = factor*i+offset, where
i is the passed index in <i>. The factor and offset are passed
to this option in the form of a tuple (factor, offset). If not
used, script will search for the closest time in the trajectory
file.
--traj-n=N Plot only first N trajectories.
--traj-energy Color the trajectory lines by their energy.
--traj-E-log Logarithmic color for trajectories.
--traj-maxE=E Set the maximum E in eV explicitly. If set, anything above
will be cut off.
--traj-minE=E Set the minimum E in eV. [default: 1]
--equal -E Make spatial dimensions equal.
--no-ticks Don't include ticks.
'''
from docopt import docopt;
import numpy as np;
import numpy.linalg as lin;
from pys import parse_ftuple, parse_ituple;
from lspreader.flds import read_indexed, restrict
from lspplot.sclr import S;
from lspplot.pc import pc,highlight,trajectories;
from lspplot.consts import c,mu0,e0;
opts = docopt(__doc__,help=True);
if opts['--nozip']:
gzip = False;
elif opts['--zip']:
gzip = True;
else:
gzip = 'guess';
quantity = opts['--quantity'];
fvar=['E','B'] if opts['--laser'] else None;
titlestr=opts['--title']
units=opts['--units'];
svar=[quantity];
#####################################
#reading data
i = int(opts['<i>']);
d = read_indexed(i,
flds=fvar,sclr=svar,
gzip=gzip,dir=opts['--dir'],
gettime=True,vector_norms=False);
t = d['t'];
if opts['--traj']:
factor, offset = None, None;
if opts['--traj-offset']:
factor, offset = parse_ituple(opts['--traj-offset'],length=2);
with np.load(opts['--traj'], mmap_mode='r') as f:
if factor:
tri = i*factor+offset;
trt = f['time'][tri];
if not np.isclose(trt,t):
import sys
sys.stderr.write(
"warning: time from trajectory is {} while time from sclr is {}\n".format(
trt,t));
else:
tri = np.sum((f['time'] <= t).astype(int));
trt = f['time'][tri];
tr = f['data'][:tri+1,:];
if opts['--traj-n']:
tr = tr[:,:int(opts['--traj-n'])];
if opts['--verbose']:
print("size of trajectories: {}".format(tr.shape));
print("final time is {}".format(trt));
print("with sclr time as {}".format(t));
pass;
if opts['--restrict']:
res = parse_ituple(opts['--restrict'],length=None);
restrict(d,res);
#massaging data
x,y = d['x']*1e4,d['y']*1e4
coords = ['x','y'];
if np.isclose(y.max(),y.min()):
y = d['z']*1e4
coords[1] = 'z';
q = d[quantity];
#####################################
#plotting
#getting options from user
mn,mx = parse_ftuple(opts['--lims'],length=2);
myhi = float(opts['--highlight']);
#plot the density
title="{}\nTime: {:.2f} fs".format(titlestr,t*1e6);
r=pc(
q,(x,y), lims=(mx,mn),log=opts['--log10'],
clabel=units, title=title,
agg=not opts['--show']);
highlight(
r, myhi,
color="lightyellow", alpha=0.5);
if opts['--laser']:
laser = S(d);
I = float(opts['--intensity']);
highlight(r, I, q=laser,
color="red", alpha=0.15);
import matplotlib.pyplot as plt;
gm = lambda itr: np.sqrt(itr['ux']**2+itr['uy']**2+itr['uz']**2+1);
massE = .511e6
if opts['--traj']:
tr[coords[1]]*=1e4;
tr[coords[0]]*=1e4;
if opts['--traj-energy']:
en = lambda itr:np.nan_to_num(massE*(gm(itr)-1));
if opts['--traj-E-log']:
minE = float(opts['--traj-minE']);
def _energy(itr):
E = en(itr);
return np.log10(
np.where(E < minE, minE, E));
energy=_energy;
else:
energy=en;
#find max energy
if opts['--traj-maxE']:
maxE=float(opts['--traj-maxE']);
def _cf(itr):
E = energy(itr);
return np.where(E>maxE, 1.0, E/maxE)
cf = _cf;
else:
maxE=np.max(energy(tr));
cf = lambda itr: energy(itr)/maxE;
else:
cf = None;
#massaging alpha
maxq=np.log10(np.max(np.abs(tr['q'])[0,:]));
alphaf = lambda itr: np.log10(np.abs(itr['q'])[0])/maxq
trajectories(
r, tr,
alpha=alphaf,
lw=0,
coords = list(reversed(coords)),
cmap = 'copper',
color_quantity=cf);
if opts['--equal']:
plt.axis('equal');
r['axes'].autoscale(tight=True);
if opts['--no-ticks']:
plt.tick_params(
axis='both',
which='both',
bottom='off',
top='off',
right='off',
left='off');
if opts['--show']:
plt.show();
else:
plt.savefig(opts['<outname>']);
|
the-stack_0_27683
|
import pytest
from plenum.test.delayers import ppDelay
from plenum.test.freshness.helper import has_freshness_instance_change
from plenum.test.helper import freshness
from plenum.test.node_request.helper import sdk_ensure_pool_functional
from plenum.test.stasher import delay_rules
from stp_core.loop.eventually import eventually
FRESHNESS_TIMEOUT = 5
@pytest.fixture(scope="module")
def tconf(tconf):
with freshness(tconf, enabled=True, timeout=FRESHNESS_TIMEOUT):
yield tconf
def test_view_change_happens_if_ordering_is_halted(looper, tconf, txnPoolNodeSet,
sdk_wallet_client, sdk_pool_handle):
current_view_no = txnPoolNodeSet[0].viewNo
for node in txnPoolNodeSet:
assert node.viewNo == current_view_no
def check_next_view():
for node in txnPoolNodeSet:
assert node.viewNo > current_view_no
stashers = [n.nodeIbStasher for n in txnPoolNodeSet]
with delay_rules(stashers, ppDelay()):
looper.run(eventually(check_next_view, timeout=FRESHNESS_TIMEOUT * 3))
assert sum(1 for node in txnPoolNodeSet if has_freshness_instance_change(node)) >= 3
sdk_ensure_pool_functional(looper, txnPoolNodeSet, sdk_wallet_client, sdk_pool_handle)
|
the-stack_0_27684
|
from io import StringIO
from cms.constants import TEMPLATE_INHERITANCE_MAGIC
from cms.models import Page
from cms.models.pluginmodel import CMSPlugin
from cms.models.static_placeholder import StaticPlaceholder
from django.conf import settings
from django.contrib.auth import get_user_model
from django.contrib.sites.models import Site
from django.core.management.base import OutputWrapper
from django.db.models import Count
from django.test import TestCase
from devday.utils.devdata import DevData
from event.models import Event
from speaker.models import Speaker
from talk import COMMITTEE_GROUP
from talk.models import (Room, Talk, TalkFormat, TalkSlot, TimeSlot,
Track, Vote)
from twitterfeed.models import Tweet, TwitterProfileImage
User = get_user_model()
class DevDataTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.stdout = StringIO()
cls.devdata = DevData(stdout=OutputWrapper(cls.stdout))
def setUp(self):
self.stdout.seek(0)
self.stdout.truncate(0)
def test_create_objects_failure(self):
class FooManager:
def count(self):
return 0
class Foo:
objects = FooManager()
def create():
raise Exception('testing')
try:
self.devdata.create_objects('foo', Foo, 1, create)
except Exception:
pass
self.assertTrue('FAILED' in self.stdout.getvalue(),
'should have FAILED, but got: {}'
.format(self.stdout.getvalue()))
def subtest_create_admin_user(self):
self.devdata.create_admin_user()
u = User.objects.get(email=settings.ADMINUSER_EMAIL)
self.assertEquals(u.email, settings.ADMINUSER_EMAIL)
self.assertEquals(u.is_superuser, True)
self.assertEquals(u.is_staff, True)
def subtest_update_site(self):
self.devdata.update_site()
site = Site.objects.get(pk=1)
self.assertEquals(site.domain, 'devday.de')
self.assertEquals(site.name, 'Dev Data')
def get_page(self, title):
return Page.objects.get(
title_set__title=title, title_set__published=True,
publisher_is_draft=False)
def check_plugin(self, page, slot, plugin_type):
placeholder = page.placeholders.get(slot=slot)
plugins = placeholder.get_plugins()
self.assertEquals(len(plugins), 1,
'{} placeholder has exactly one plugin'
.format(slot))
self.assertEquals(plugins[0].plugin_type, plugin_type,
'{} placeholder is of type {}'
.format(slot, plugin_type))
def subtest_create_pages(self):
self.devdata.create_objects('pages', Page, 3,
self.devdata.create_pages)
index = self.get_page('Deutsche Homepage')
self.assertEquals(index.languages, 'de', 'Homepage is German')
self.assertEquals(index.template, 'devday_index.html',
'Homepage uses correct template')
self.assertTrue(index.is_home, 'Homepage is_home')
self.check_plugin(index, 'eventinfo', 'TextPlugin')
self.check_plugin(index, 'cfp_open', 'TextPlugin')
self.check_plugin(index, 'save_the_date', 'TextPlugin')
self.check_plugin(index, 'sign_up', 'TextPlugin')
sponsoring = self.get_page('Sponsoring')
self.assertEquals(sponsoring.languages, 'de', 'Sponsoring is German')
self.assertEquals(sponsoring.template, 'devday_no_cta.html',
'Sponsoring uses correct template')
impress = self.get_page('Impressum')
self.assertEquals(impress.languages, 'de', 'Impress is German')
self.assertEquals(impress.template, 'devday_no_cta.html',
'Impress uses correct template')
def subtest_update_static_placeholders(self):
self.devdata.update_static_placeholders()
name = 'create-talk-introtext'
lang = 'de'
sph = StaticPlaceholder.objects.get(name=name)
ph = sph.draft
np = CMSPlugin.objects.filter(placeholder=ph,
language=lang).count()
self.assertEquals(np, 1, 'Exactly one static placeholder create')
def subtest_create_talk_formats(self):
self.devdata.create_objects('talk formats', TalkFormat, 3,
self.devdata.create_talk_formats)
formats = TalkFormat.objects.all().order_by('name', 'duration')
self.assertEquals(len(formats), 4, 'There are four TalkFormats')
self.assertEquals(formats[0].name, 'Lightning Talk')
self.assertEquals(formats[0].duration, 10)
self.assertEquals(formats[1].name, 'Vortrag')
self.assertEquals(formats[1].duration, 30)
self.assertEquals(formats[2].name, 'Vortrag')
self.assertEquals(formats[2].duration, 60)
self.assertEquals(formats[3].name, 'Workshop')
self.assertEquals(formats[3].duration, 180)
def subtest_update_events(self):
self.devdata.update_events()
events = list(Event.objects.order_by('start_time'))
self.assertEquals(len(events), 3,
'there are three events')
stdformat = TalkFormat.objects.get(name='Vortrag', duration=60)
for e in events[:-1]:
self.assertEquals(e.registration_open, False,
'registration not open')
self.assertEquals(e.submission_open, False,
'submission not open')
tf = e.talkformat.filter(id=stdformat.id)
self.assertTrue(len(tf) == 1,
'standard format assigned')
e = events[-1]
self.assertEquals(e.registration_open, True,
'registration open')
self.assertEquals(e.submission_open, True,
'submission open')
tf = e.talkformat.all()
self.assertTrue(len(tf) == 4,
'all formats assigned')
def subtest_get_committee_members(self):
count = len(self.devdata.get_committee_members()
.strip().split('\n')) - 1 # one extra line
self.assertEquals(count, 7, 'Seven users are committee members')
def subtest_create_users_and_attendees(self):
self.devdata.create_objects(
'users', User, 3, self.devdata.create_attendees,
self.devdata.get_committee_members)
users = len(User.objects.all())
self.assertTrue(520 <= users <= 522, 'About 520 users')
events = Event.objects.annotate(natt=Count('attendee'))
for e in events:
self.assertTrue(users * 0.70 <= e.natt <= users * 0.80,
'about {:d} attend event {}: actual {}'
.format(int(users * 0.8), e.title, e.natt))
self.subtest_get_committee_members()
def subtest_get_speakers(self):
count = len(self.devdata.get_speakers().strip().split(
'\n')) - 1 # one extra line
self.assertEquals(count, 10, 'At least 10 speakers')
def subtest_create_speakers(self):
self.devdata.create_objects(
'speakers', Speaker, 1, self.devdata.create_speakers,
self.devdata.get_speakers)
speakers = 150
number_of_speakers = Speaker.objects.count()
self.assertTrue(
speakers * 0.70 <= number_of_speakers <= speakers * 1.2,
'about {:d} speakers: actual {}'
.format(speakers, number_of_speakers))
self.subtest_get_speakers()
def subtest_create_talks(self):
self.devdata.create_objects(
'talks', Talk, 1, self.devdata.create_talks)
speakers = 50
# With a probability of 10% a speaker will submit 2 talks, and with
# a probability of 75% will submit one talk. For each event, we will
# have talk in the amount of about 0.95 times the number of speakers.
talks = speakers * 0.95
events = Event.objects.annotate(
ntalk=Count('talk'))
for e in events:
self.assertTrue(talks * 0.75 <= e.ntalk <= talks * 1.25,
'about {:d} talks at event {}: actual {}'
.format(int(talks), e.title, e.ntalk))
def subtest_create_votes(self):
event = Event.objects.current_event()
self.devdata.create_objects(
'votes', Vote, 1, self.devdata.vote_for_talk)
number_of_votes = Vote.objects.exclude(talk__event=event).count()
self.assertEquals(number_of_votes, 0, 'No votes for older events')
number_of_votes = Vote.objects.count()
number_of_talks = Talk.objects.filter(event=event).count()
potential_votes = number_of_talks * User.objects.filter(
groups__name=COMMITTEE_GROUP).count()
self.assertTrue(
potential_votes * 0.7 <= number_of_votes <= potential_votes,
'about {} votes for {} talks: actual {}'.format(
int(potential_votes * 0.8), number_of_talks, number_of_votes))
def subtest_create_tracks(self):
self.devdata.create_objects(
'tracks', Track, 1, self.devdata.create_tracks)
# FIXME implement data checks
ntracks = Track.objects.filter(
event=Event.objects.current_event()).count()
self.assertEquals(ntracks, 0, 'No tracks for current event')
ntracks = Track.objects.filter(
event=Event.objects.get(title='devdata.17')).count()
self.assertEquals(ntracks, 5, '5 tracks for devdata.17')
ntracks = Track.objects.filter(
event=Event.objects.get(title='devdata.18')).count()
self.assertEquals(ntracks, 6, '6 tracks for devdata.18')
def subtest_create_rooms(self):
self.devdata.create_objects('rooms', Room, 1,
self.devdata.create_rooms)
nrooms = Room.objects.filter(
event=Event.objects.get(title='devdata.17')).count()
self.assertEquals(nrooms, 4, 'we have 4 rooms for devdata.17')
nrooms = Room.objects.filter(
event=Event.objects.get(title='devdata.18')).count()
self.assertEquals(nrooms, 4, 'we have 4 rooms for devdata.18')
def subtest_create_time_slots(self):
self.devdata.create_objects('time slots', TimeSlot, 1,
self.devdata.create_time_slots)
events = Event.objects.exclude(
id=Event.objects.current_event_id()).annotate(
ntimeslot=Count('timeslot'))
for e in events:
self.assertEquals(e.ntimeslot, 13, 'we have 13 time slots for {}'
.format(Event))
def subtest_create_talk_slots(self):
self.devdata.create_objects('talk slots', TalkSlot, 1,
self.devdata.create_talk_slots)
events = Event.objects.exclude(
id=Event.objects.current_event_id()).annotate(
ntalkslot=Count('talk__talkslot'))
for e in events:
self.assertEquals(e.ntalkslot, 14, 'we have 14 talk slots for {}'
.format(e))
def subtest_create_twitter_profiles(self):
self.devdata.create_objects('twitter profiles', TwitterProfileImage, 1,
self.devdata.create_twitter_profiles)
ntpp = TwitterProfileImage.objects.count()
self.assertEquals(ntpp, 1, 'we have 1 twitter profile picture')
def subtest_create_tweets(self):
self.devdata.create_objects(
'tweets', Tweet, 1, self.devdata.create_tweets)
number_of_tweets = Tweet.objects.count()
self.assertEquals(number_of_tweets, 7, 'we have 7 tweets')
def test_get_name_from_email(self):
self.assertEquals(
self.devdata.get_name_from_email('[email protected]'),
'[email protected]')
self.assertEquals(
self.devdata.get_name_from_email('[email protected]'),
'First Last')
def test_create_devdata(self):
self.subtest_create_admin_user()
self.subtest_update_site()
self.subtest_create_pages()
self.subtest_update_static_placeholders()
self.subtest_create_talk_formats()
self.subtest_update_events()
self.subtest_create_users_and_attendees()
self.subtest_create_speakers()
self.subtest_create_talks()
self.subtest_create_votes()
self.subtest_create_tracks()
self.subtest_create_rooms()
self.subtest_create_time_slots()
self.subtest_create_talk_slots()
self.subtest_create_twitter_profiles()
self.subtest_create_tweets()
self.stdout.seek(0)
self.stdout.truncate(0)
self.devdata.create_devdata()
self.assertTrue('OK' in self.stdout.getvalue(),
'At least one OK in output')
self.assertTrue('FAILED' not in self.stdout.getvalue(),
'No FAILED in output')
|
the-stack_0_27685
|
#! /usr/bin/env python3
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Author: Giangi Sacco
# Copyright 2012, by the California Institute of Technology. ALL RIGHTS RESERVED. United States Government Sponsorship acknowledged.
#
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
from builtins import str
import sys
from interferogram.Interferogram import Interferogram as InterferogramBase
from utils.contextUtils import toContext
from interferogram.createPrepareInterferogram import createPrepareInterferogram
class Interferogram(InterferogramBase):
def __init__(self):
super(Interferogram,self).__init__()
def run(self,ops):
filename = ops.inputFile
self._productList.append(filename);
#try:
process = 'InterferogramTrigger'
try:
listMeta = self.createMetadata(filename)
self._sensor = listMeta[0][0].spacecraftName
#db start
#self._sensor = 'CSKS4'
#db end
self._prepareInterferogram = createPrepareInterferogram(self._sensor)
self._inputFile = self.createInputFile(listMeta)
# hack to make isce believe this is the command line
self._insar = self._insarClass(cmdline=self._inputFile)
self._insar._insar.unwrappedIntFilename = self._insar._insar.topophaseFlatFilename.replace('.flat','.unw')
#these tow statements need to be here before configure in order to be set
self._insar._insar.geocode_bbox = self.createGeoBBox(listMeta)
self._insar._insar.geocode_list = self.createGeocodeList(self._insar._insar)
self._insar._configure()
self._insar.run()
#here dump insar object
# delete it and reload from file
self.createPngList(self._insar._insar)
self.createBrowseImages()
self.createProductList()
self.createProductJson(listMeta)
except Exception as e:
print(e)
message = 'InterferogramTrigger.py: run failed with exception ' + str(e)
exit = 1
toContext(process,exit,message)
raise Exception
exit = 0
message = 'InterferogramTrigger: completed'
toContext(process,exit,message)
return 0
|
the-stack_0_27686
|
# Copyright 2021 The Pigweed Authors
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
"""pw_console preferences"""
from dataclasses import dataclass, field
import os
from pathlib import Path
from typing import Any, Dict, List, Union
import yaml
from pw_console.style import get_theme_colors
_DEFAULT_REPL_HISTORY: Path = Path.home() / '.pw_console_history'
_DEFAULT_SEARCH_HISTORY: Path = Path.home() / '.pw_console_search'
_DEFAULT_CONFIG = {
'pw_console': {
# History files
'repl_history': _DEFAULT_REPL_HISTORY,
'search_history': _DEFAULT_SEARCH_HISTORY,
# Appearance
'ui_theme': 'dark',
'code_theme': 'pigweed-code',
'swap_light_and_dark': False,
'spaces_between_columns': 2,
'column_order_omit_unspecified_columns': False,
'column_order': [],
'column_colors': {},
'show_python_file': False,
'show_python_logger': False,
'hide_date_from_log_time': False,
# Window arrangement
'windows': {},
'window_column_split_method': 'vertical',
},
}
class UnknownWindowTitle(Exception):
"""Exception for window titles not present in the window manager layout."""
def error_unknown_window(window_title: str,
existing_pane_titles: List[str]) -> None:
"""Raise an error when the window config has an unknown title.
If a window title does not already exist on startup it must have a loggers:
or duplicate_of: option set."""
pane_title_text = ' ' + '\n '.join(existing_pane_titles)
existing_pane_title_example = 'Window Title'
if existing_pane_titles:
existing_pane_title_example = existing_pane_titles[0]
raise UnknownWindowTitle(
f'\n\n"{window_title}" does not exist.\n'
'Existing windows include:\n'
f'{pane_title_text}\n'
'If this window should be a duplicate of one of the above,\n'
f'add "duplicate_of: {existing_pane_title_example}" to your config.\n'
'If this is a brand new window, include a "loggers:" section.\n'
'See also: '
'https://pigweed.dev/pw_console/docs/user_guide.html#example-config')
@dataclass
class ConsolePrefs:
"""Pigweed Console preferences storage class."""
project_file: Union[Path, bool] = Path('$PW_PROJECT_ROOT/.pw_console.yaml')
user_file: Union[Path, bool] = Path('$HOME/.pw_console.yaml')
_config: Dict[Any, Any] = field(default_factory=dict)
def __post_init__(self) -> None:
self._update_config(_DEFAULT_CONFIG)
if self.project_file:
assert isinstance(self.project_file, Path)
self.project_file = Path(
os.path.expandvars(str(self.project_file.expanduser())))
self.load_config(self.project_file)
if self.user_file:
assert isinstance(self.user_file, Path)
self.user_file = Path(
os.path.expandvars(str(self.user_file.expanduser())))
self.load_config(self.user_file)
# Check for a config file specified by an environment variable.
environment_config = os.environ.get('PW_CONSOLE_CONFIG_FILE', None)
if environment_config:
env_file_path = Path(environment_config)
if not env_file_path.is_file():
raise FileNotFoundError(
f'Cannot load config file: {env_file_path}')
self.reset_config()
self.load_config(env_file_path)
def _update_config(self, cfg: Dict[Any, Any]) -> None:
assert 'pw_console' in cfg
self._config.update(cfg.get('pw_console', {}))
def reset_config(self) -> None:
self._config = {}
self._update_config(_DEFAULT_CONFIG)
def load_config(self, file_path: Path) -> None:
if not file_path.is_file():
return
cfg = yaml.load(file_path.read_text(), Loader=yaml.Loader)
self._update_config(cfg)
@property
def ui_theme(self) -> str:
return self._config.get('ui_theme', '')
def set_ui_theme(self, theme_name: str):
self._config['ui_theme'] = theme_name
@property
def theme_colors(self):
return get_theme_colors(self.ui_theme)
@property
def code_theme(self) -> str:
return self._config.get('code_theme', '')
@property
def swap_light_and_dark(self) -> bool:
return self._config.get('swap_light_and_dark', False)
@property
def repl_history(self) -> Path:
history = Path(self._config['repl_history'])
history = Path(os.path.expandvars(str(history.expanduser())))
return history
@property
def search_history(self) -> Path:
history = Path(self._config['search_history'])
history = Path(os.path.expandvars(str(history.expanduser())))
return history
@property
def spaces_between_columns(self) -> int:
spaces = self._config.get('spaces_between_columns', 2)
assert isinstance(spaces, int) and spaces > 0
return spaces
@property
def omit_unspecified_columns(self) -> bool:
return self._config.get('column_order_omit_unspecified_columns', False)
@property
def hide_date_from_log_time(self) -> bool:
return self._config.get('hide_date_from_log_time', False)
@property
def show_python_file(self) -> bool:
return self._config.get('show_python_file', False)
@property
def show_python_logger(self) -> bool:
return self._config.get('show_python_logger', False)
def toggle_bool_option(self, name: str):
existing_setting = self._config[name]
assert isinstance(existing_setting, bool)
self._config[name] = not existing_setting
@property
def column_order(self) -> list:
return self._config.get('column_order', [])
def column_style(self,
column_name: str,
column_value: str,
default='') -> str:
column_colors = self._config.get('column_colors', {})
column_style = default
if column_name in column_colors:
# If key exists but doesn't have any values.
if not column_colors[column_name]:
return default
# Check for user supplied default.
column_style = column_colors[column_name].get('default', default)
# Check for value specific color, otherwise use the default.
column_style = column_colors[column_name].get(
column_value, column_style)
return column_style
@property
def window_column_split_method(self) -> str:
return self._config.get('window_column_split_method', 'vertical')
@property
def windows(self) -> dict:
return self._config.get('windows', {})
@property
def window_column_modes(self) -> list:
return list(column_type for column_type in self.windows.keys())
@property
def unique_window_titles(self) -> set:
titles = []
for column in self.windows.values():
for window_key_title, window_dict in column.items():
window_options = window_dict if window_dict else {}
# Use 'duplicate_of: Title' if it exists, otherwise use the key.
titles.append(
window_options.get('duplicate_of', window_key_title))
return set(titles)
|
the-stack_0_27692
|
from six import string_types
import unittest
from six.moves import StringIO
from robot.parsing.populators import FromFilePopulator, DataRow, FromDirectoryPopulator
from robot.parsing.model import TestCaseFile
from robot.utils.asserts import assert_equals, assert_true, assert_false
from robot.output import LOGGER
LOGGER.unregister_console_logger()
class _MockLogger(object):
def __init__(self):
self._output = StringIO()
def message(self, msg):
self._output.write(msg.message)
def value(self):
return self._output.getvalue()
class FromDirectoryPopulatorTest(unittest.TestCase):
def test_included_suites_with_dot(self):
create_included_suites = FromDirectoryPopulator()._create_included_suites
for inp, exp in [([], []),
(['foo'], ['foo']),
(['bar.zoo'], ['bar.zoo', 'zoo']),
(['1.2.3', 'x.y', 'z'],
['1.2.3', '2.3', '3', 'x.y', 'y', 'z'])]:
assert_equals(list(create_included_suites(inp)), exp)
class _PopulatorTest(unittest.TestCase):
def setUp(self):
self._datafile = TestCaseFile()
self._datafile.directory = '/path/to'
self._populator = FromFilePopulator(self._datafile)
self._logger = _MockLogger()
LOGGER.disable_message_cache()
LOGGER.register_logger(self._logger)
def tearDown(self):
LOGGER.unregister_logger(self._logger)
def _assert_no_parsing_errors(self):
assert_true(self._logger.value() == '', self._logger.value())
def _start_table(self, name):
if isinstance(name, string_types):
name = [name]
return self._populator.start_table(name)
def _create_table(self, name, rows, eof=True):
self._start_table(name)
for r in rows:
self._populator.add(r)
if eof:
self._populator.eof()
def _assert_setting(self, name, exp_value, exp_comment=None):
setting = self._setting_with(name)
assert_equals(setting.value, exp_value)
self._assert_comment(setting, exp_comment)
def _assert_fixture(self, fixture_name, exp_name, exp_args, exp_comment=None):
fixture = self._setting_with(fixture_name)
self._assert_name_and_args(fixture, exp_name, exp_args)
self._assert_comment(fixture, exp_comment)
def _assert_import(self, index, exp_name, exp_args, exp_comment=None):
imp = self._datafile.setting_table.imports[index]
self._assert_name_and_args(imp, exp_name, exp_args)
self._assert_comment(imp, exp_comment)
def _assert_name_and_args(self, item, exp_name, exp_args):
assert_equals(item.name, exp_name)
assert_equals(item.args, exp_args)
def _assert_meta(self, index, exp_name, exp_value, exp_comment=None):
meta = self._setting_with('metadata')[index]
assert_equals(meta.name, exp_name)
assert_equals(meta.value, exp_value)
self._assert_comment(meta, exp_comment)
def _assert_tags(self, tag_name, exp_value):
tag = self._setting_with(tag_name)
assert_equals(tag.value, exp_value)
def _assert_variable(self, index, exp_name, exp_value, exp_comment=[]):
var = self._datafile.variable_table.variables[index]
assert_equals(var.name, exp_name)
assert_equals(var.value, exp_value)
self._assert_comment(var, exp_comment)
def _assert_comment(self, item, exp_comment):
if exp_comment:
assert_equals(item.comment.as_list(), exp_comment)
def _setting_with(self, name):
return getattr(self._datafile.setting_table, name)
def _nth_test(self, index):
return self._datafile.testcase_table.tests[index-1]
def _first_test(self):
return self._nth_test(1)
def _nth_uk(self, index):
return self._datafile.keyword_table.keywords[index-1]
def _number_of_steps_should_be(self, test, expected_steps):
assert_equals(len(test.steps), expected_steps)
class TablePopulatorTest(_PopulatorTest):
def test_starting_valid_table(self):
for name in ['Test Cases', ' variables ', 'K E Y WO R D S']:
assert_true(self._start_table(name))
def test_table_headers(self):
header_list = ['seTTINGS', 'header', 'again']
self._create_table(header_list,[])
setting_table = self._datafile.setting_table
assert_equals(setting_table.header, header_list)
assert_equals(setting_table.name, header_list[0])
def test_starting_invalid_table(self):
assert_false(self._start_table('Per Se'))
def test_adding_empty_row_should_not_fail(self):
self._create_table('Settings', [[]])
def test_curdir_handling(self):
self._create_table('Test cases', [['My test name'],
['', 'Log', '${CURDIR}']])
assert_equals(self._first_test().steps[0].args,
[self._datafile.directory])
def test_turn_off_curdir_handling(self):
from robot.parsing import populators
populators.PROCESS_CURDIR = False
self.setUp()
self._create_table('Test cases', [['My test name'],
['', 'Log', '${CURDIR}']])
assert_equals(self._first_test().steps[0].args, ['${CURDIR}'])
populators.PROCESS_CURDIR = True
def test_whitespace_is_ignored(self):
self._create_table('Test Cases', [['My test'],
[' ', '[Tags]', 'foo', ' \t '],
[' '],
[ '\t'],
['', 'Log Many', '', 'argh']])
test = self._first_test()
assert_equals(test.name, 'My test')
self._number_of_steps_should_be(test, 1)
assert_equals(test.tags.value, ['foo'])
def test_escaping_empty_cells(self):
self._create_table('Settings', [['Documentation', '\\']],)
self._assert_setting('doc', '')
self._create_table('Test cases', [['test',
'', 'Log Many', 'foo', '\\']],)
assert_equals(self._first_test().steps[0].args, ['Log Many', 'foo', ''])
def test_populator_happy_path_workflow(self):
self._create_table('settings', [['Library', 'FooBarness']], eof=False)
self._create_table('Variables', [['${scalar}', 'value']], eof=False)
self._create_table('Test cases', [['My test name'],
['', 'Log', 'quux']], eof=False)
self._create_table('More cases', [['My other test name'],
['', 'Log', 'foox']], eof=False)
self._create_table('Keywords', [['My User Keyword'],
['', 'Foo', 'Bar']], eof=False)
self._populator.eof()
self._assert_import(0, 'FooBarness', [])
assert_equals(len(self._datafile.variable_table.variables), 1)
assert_equals(len(self._datafile.testcase_table.tests), 1)
assert_equals(len(self._nth_uk(1).steps), 1)
class SettingTablePopulatingTest(_PopulatorTest):
def test_testcasefile_settings(self):
self._try_testcasefile_settings_with_postfix('')
def test_testcasefile_settings_with_colon(self):
self._try_testcasefile_settings_with_postfix(':')
def test_testcasefile_settings_with_colon_and_spaces(self):
self._try_testcasefile_settings_with_postfix(' : ')
def _try_testcasefile_settings_with_postfix(self, postfix):
doc = 'This is doc'
template = 'Foo'
more_doc = 'smore'
force_tags = 'force'
more_tags = 'more tagness'
even_more_tags = 'even more'
default_tags = 'default'
setup_name, setup_args = 'Keyword Name', ['a1', 'a2']
table = [['Documentation', doc],
['S uite Tear Down'] + [setup_name],
['S uite SeTUp'] + [setup_name] + setup_args,
['S uite teardown'] + setup_args,
['Doc um entati on', more_doc],
['force tags', force_tags],
['Default tags', default_tags],
['FORCETAGS', more_tags],
['test timeout', '1s'],
['De Fault TAGS', more_tags, even_more_tags],
['test timeout', 'timeout message'],
['test timeout', more_doc],
['test template', template]
]
self._postfix_settings(table, postfix)
self._create_table('Settings', table)
self._assert_setting('doc', doc + ' ' + more_doc)
self._assert_fixture('suite_setup', setup_name, setup_args)
self._assert_fixture('suite_teardown', setup_name, setup_args)
self._assert_tags('default_tags', [default_tags, more_tags, even_more_tags])
self._assert_tags('force_tags', [force_tags, more_tags])
timeout = self._setting_with('test_timeout')
assert_equals(timeout.value, '1s')
assert_equals(timeout.message, 'timeout message '+more_doc)
self._assert_setting('test_template', template)
def _postfix_settings(self, table, postfix):
for setting in table:
setting[0] = setting[0]+postfix
def test_imports(self):
self._create_table('settings', [['Library', 'FooBarness'],
['Library', 'BarFooness', 'arg1', 'arg2'],
['Resource', 'QuuxNess.txt'],
['Variables', 'varzors.py']])
assert_equals(len(self._datafile.setting_table.imports), 4)
self._assert_import(0, 'FooBarness', [])
self._assert_import(1, 'BarFooness', ['arg1', 'arg2'])
self._assert_import(2, 'QuuxNess.txt', [])
self._assert_import(3, 'varzors.py', [])
def test_free_suite_metadata(self):
self._create_table('settings', [['Meta: Foon:ess', 'Barness'],
['Metadata', 'Quux', 'Value']])
self._assert_meta(0, 'Foon:ess', 'Barness')
self._assert_meta(1, 'Quux', 'Value')
def test_line_continuation(self):
self._create_table('Settings', [['Documentation', 'doc'],
['...'],
['...', 'in multiple lines'],
['Force Tags', 'one', 'two'],
['...'],
['', '...', 'three']
])
self._assert_setting('doc', 'doc\\n\\nin multiple lines')
self._assert_setting('force_tags', ['one', 'two', 'three'])
def test_invalid_settings(self):
self._create_table('Settings', [['In valid', 'val ue']])
assert_equals(self._logger.value(), "Error in file 'None': "
"Non-existing setting 'In valid'.")
def test_continuing_in_the_begining_of_the_table(self):
self._create_table('Settings', [['...']])
assert_equals(self._logger.value(), "Error in file 'None': "
"Non-existing setting '...'.")
class DocumentationCatenationTest(_PopulatorTest):
def test_multiple_cells_are_catenated_with_space(self):
self._assert_doc([['doc', 'in two cells']],
'doc in two cells')
def test_multiple_rows_are_catenated_with_newline(self):
self._assert_doc([['doc'], ['...', 'in two lines']],
'doc\\nin two lines')
def test_newline_is_not_added_if_it_already_exists(self):
self._assert_doc([['doc\\n'], ['in two lines']],
'doc\\nin two lines')
def test_newline_is_not_added_if_it_already_exists2(self):
self._assert_doc([['doc\\\\n'], ['in multiple\\\\\\n'], ['lines']],
'doc\\\\n\\nin multiple\\\\\\nlines')
def test_backslash_escapes_newline_adding(self):
self._assert_doc([['doc\\'], ['in two lines']],
'doc\\ in two lines')
def test_backslash_escapes_newline_adding2(self):
self._assert_doc([['doc\\\\'], ['in multiple\\\\\\', 'lines']],
'doc\\\\\\nin multiple\\\\\\ lines')
def test_documentation_defined_multiple_times(self):
self._create_table('Settings', [['Documentation', 'some doc'],
['Documentation', 'other doc'],
['...', 'third line']])
self._assert_setting('doc', 'some doc other doc\\nthird line')
def _assert_doc(self, doc_lines, expected):
doc_lines = [['...'] + line for line in doc_lines]
self._create_table('Settings', [['Documentation']] + doc_lines)
self._assert_setting('doc', expected)
class MetadataCatenationTest(_PopulatorTest):
def test_value_on_many_cells_is_catenated_with_spaces(self):
self._assert_metadata_value([['value', 'in', 'cells']],
'value in cells')
def test_value_on_many_lines_is_catenated_with_newlines(self):
self._assert_metadata_value([['value'], ['in'], ['lines']],
'value\\nin\\nlines')
def _assert_metadata_value(self, doc_lines, expected):
value_lines = [['...'] + line for line in doc_lines]
self._create_table('Settings', [['Metadata', 'metaname']] + value_lines)
self._assert_meta(0, 'metaname', expected)
class VariableTablePopulatingTest(_PopulatorTest):
def test_populating_variables(self):
self._create_table('Variables', [['${scalar}', 'value'],
['${slist}', '[s, o, m, e]'],
['@{list}', 'v1', 'v2', 'v3', 'v4']])
assert_equals(len(self._datafile.variable_table.variables), 3)
self._assert_variable(0, '${scalar}', ['value'])
self._assert_variable(1, '${slist}', ['[s, o, m, e]'])
self._assert_variable(2, '@{list}', ['v1', 'v2', 'v3', 'v4'])
def test_line_continuation(self):
self._create_table('Variables', [['@{list}'],
['...', 'v1'],
['', '...', 'v2'],
['', '', '...', 'v3', 'v4']])
self._assert_variable(0, '@{list}', ['v1', 'v2', 'v3', 'v4'])
def test_continuing_in_the_begining_of_the_table(self):
self._create_table('Variables', [['...', 'val']])
self._assert_variable(0, '...', ['val'])
class TestCaseTablePopulatingTest(_PopulatorTest):
def test_test_case_populating(self):
self._create_table('Test cases', [['My test name'],
['', 'No operation'],
['Another test'],
['', 'Log', 'quux']])
assert_equals(len(self._datafile.testcase_table.tests), 2)
test = self._first_test()
assert_equals(len(test.steps), 1)
assert_equals(test.steps[0].name, 'No operation')
assert_equals(len(self._first_test().steps), 1)
def test_case_name_and_first_step_on_same_row(self):
self._create_table('Test cases', [['My test name', 'No Operation']])
assert_equals(len(self._first_test().steps), 1)
def test_continuing_in_the_begining_of_the_table(self):
self._create_table('test cases', [['...', 'foo']])
assert_equals(self._first_test().name, '...')
def test_line_continuation(self):
self._create_table('Test cases', [['My test name', 'Log Many', 'foo'],
['', '...', 'bar', 'quux'],
['Another test'],
['', 'Log Many', 'quux'],
['', '', '...', 'fooness'],
['', '', '', '...', 'and more'],
['', 'Log', 'barness']])
self._number_of_steps_should_be((self._first_test()), 1)
self._number_of_steps_should_be(self._nth_test(2), 2)
assert_equals(self._nth_test(2).steps[0].name, 'Log Many')
assert_equals(self._nth_test(2).steps[0].args, ['quux', 'fooness', 'and more'])
def test_unnamed_testcase(self):
self._create_table('test cases', [['', 'foo', '#comment'],
['', '[documentation]', "What's up doc?"]])
test = self._first_test()
assert_equals(test.name, '')
assert_equals(test.doc.value, "What's up doc?")
assert_equals(test.steps[0].comment.as_list(), ['#comment'])
def test_unnamed_test_and_line_continuation(self):
self._create_table('test cases', [['', '...', 'foo', '# comment']])
assert_equals(self._first_test().name, '')
assert_equals(self._first_test().steps[0].name, 'foo')
assert_equals(self._first_test().steps[0].comment.as_list(), ['# comment'])
def test_test_settings(self):
self._try_test_settings([['My test name'],
['', '[Documentation]', 'This is domumentation for the test case'],
['', '[ Tags ]', 'ankka', 'kameli'],
['', '... ', '', 'aasi'],
['', 'Log', 'barness']])
def test_test_settings_with_colons(self):
self._try_test_settings([['My test name'],
['', '[Documentation:]', 'This is domumentation for the test case'],
['', '[ Tags : ]', 'ankka', 'kameli'],
['', '... ', '', 'aasi'],
['', 'Log', 'barness']])
def _try_test_settings(self, table):
self._create_table('Test cases', table)
test = self._first_test()
assert_equals(len(test.steps), 1)
assert_equals(test.doc.value, 'This is domumentation for the test case')
assert_equals(test.tags.value, ['ankka', 'kameli', '', 'aasi'])
def test_invalid_test_settings(self):
self._create_table('Test cases', [['My test name'],
['', '[Aasi]']])
assert_equals(self._logger.value(), "Error in file 'None': "
"Invalid syntax in test case "
"'My test name': Non-existing "
"setting 'Aasi'.")
def test_test_template_overrides_setting(self):
setting_test_template = 'Foo'
test_test_template = 'Bar'
self._create_table('Settings', [['Test Template', setting_test_template]],
eof=False)
self._create_table('Test Cases', [['','[Template]', test_test_template]])
test = self._first_test()
assert_equals(test.template.value, test_test_template)
class UserKeywordTablePopulatingTest(_PopulatorTest):
def test_user_keyword_populating(self):
self._create_table('Keywords', [['My User Keyword'],
['', '[Arguments]', '${foo}', '${bar}'],
['', 'Log Many', '${foo}'],
['', '...', 'bar'],
['', 'No Operation'],
['', '[Return]', 'ankka', 'kameli']])
uk = self._nth_uk(0)
assert_equals(len(uk.steps), 2)
assert_equals(uk.args.value, ['${foo}', '${bar}'])
assert_equals(uk.return_.value, ['ankka', 'kameli'])
def test_continuing_in_the_begining_of_the_table(self):
self._create_table('keywords', [['...', 'foo']])
assert_equals(self._nth_uk(1).name, '...')
def test_invalid_keyword_settings(self):
self._create_table('Keywords', [['My User Keyword'],
['', '[ank ka]']])
assert_equals(self._logger.value(), "Error in file 'None': "
"Invalid syntax in keyword "
"'My User Keyword': Non-existing "
"setting 'ank ka'.")
class ForLoopPopulatingTest(_PopulatorTest):
def test_single_loop(self):
self._create_table('Test cases', [['For loop test'],
['', ':FOR', '${i}', 'IN', '@{list}'],
['', '', 'Log', '${i}']])
assert_equals(len(self._first_test().steps), 1)
for_loop = self._first_test().steps[0]
assert_equals(len(for_loop.steps), 1)
assert_equals(for_loop.flavor, 'IN')
assert_equals(for_loop.vars, ['${i}'])
assert_equals(for_loop.items, ['@{list}'])
def test_in_range_for_loop(self):
self._create_table('Test cases', [['For loop test'],
['', 'Log', 'Before FOR'],
['', ': for', '${i}', '${j}', 'IN RANGE', '10'],
['', '', 'Log', '${i}'],
['', '', 'Fail', '${j}'],
['', 'Log', 'Outside FOR']])
assert_equals(len(self._first_test().steps), 3)
for_loop = self._first_test().steps[1]
assert_equals(len(for_loop.steps), 2)
assert_equals(for_loop.flavor, 'IN RANGE')
assert_equals(for_loop.vars, ['${i}', '${j}'])
def test_line_continuation(self):
self._create_table('Test cases', [['Malicious for loop test'],
['', 'Log', 'Before FOR'],
['', ':::: fOr', '${i}', 'IN', '10'],
['', '...', '20'],
['', '', '...', '30', '40'],
['', '', '', '...', '50', '60'],
['', '', 'Log Many', '${i}'],
['', '', '...', '${i}'],
['', '...', '${i}'],
['', 'Log', 'Outside FOR']])
assert_equals(len(self._first_test().steps), 3)
for_loop = self._first_test().steps[1]
assert_equals(len(for_loop.steps), 1)
assert_equals(for_loop.flavor, 'IN')
assert_equals(for_loop.vars, ['${i}'])
assert_equals(for_loop.items, ['10', '20', '30', '40', '50', '60'])
def test_with_empty_body(self):
self._create_table('Test cases', [['For loop test'],
['', ':FOR ', '${var}', 'IN', 'foo'],
['', 'Log', 'outside FOR']])
test = self._first_test()
assert_equals(len(test.steps), 2)
assert_equals(test.steps[0].steps, [])
class TestPopulatingComments(_PopulatorTest):
def test_setting_table(self):
self._create_table('settings', [['Force Tags', 'Foo', 'Bar', '#comment'],
['Library', 'Foo', '# Lib comment'],
[' #Resource', 'resource.txt'],
['Resource', 'resource2.txt'],
['# comment', 'between rows', 'in many cells'],
['Default Tags', 'Quux', '# also eol'],
['Variables', 'varz.py'],
['# between values'],
['...', 'arg'],
['Metadata', 'metaname', 'metavalue'],
['### last line is commented'],
])
self._assert_no_parsing_errors()
self._assert_setting('force_tags', ['Foo', 'Bar'], ['#comment'])
self._assert_import(0, 'Foo', [], ['# Lib comment'])
self._assert_import(1, 'resource2.txt', [], ['#Resource', 'resource.txt'])
self._assert_setting('default_tags', ['Quux'], ['# comment', 'between rows', 'in many cells', '# also eol'])
self._assert_import(2, 'varz.py', ['arg'], ['# between values'])
self._assert_meta(0, 'metaname', 'metavalue', ['### last line is commented'])
def test_variable_table(self):
self._create_table('variables', [['# before'],
['${varname}', 'varvalue', '# has comment'],
['${name}', '# no value'],
['# middle', 'A', 'B', 'C'],
['@{items}', '1', '2', '3'],
['# s1'],
['', '# s2', ''],
['', '', '# s3'],
['@{X}', '# c1'],
['', '', '# c2'],
['...', 'V1', '# c3'],
['# c4'],
['...', 'V2', '# c5'],
['###EOT###']])
self._assert_no_parsing_errors()
self._assert_variable(0, '', [], ['# before'])
self._assert_variable(1, '${varname}', ['varvalue'], ['# has comment'])
self._assert_variable(2, '${name}', [''], ['# no value'])
self._assert_variable(3, '', [], ['# middle', 'A', 'B', 'C'])
self._assert_variable(4, '@{items}', ['1', '2', '3'])
self._assert_variable(5, '', [], ['# s1'])
self._assert_variable(6, '', [], ['# s2'])
self._assert_variable(7, '', [], ['# s3'])
self._assert_variable(8, '@{X}', ['V1', 'V2'], ['# c1', '# c2', '# c3', '# c4', '# c5'])
self._assert_variable(9, '', [], ['###EOT###'])
def test_test_case_table(self):
self._create_table('test cases', [['# start of table comment'],
['Test case'],
['', 'No operation', '# step comment'],
['', '', '#This step has', 'only comment'],
['Another test', '#comment in name row'],
['', 'Log many', 'argh'],
['#', 'Comment between step def'],
['', '...', 'urgh'],
['Test with for loop'],
['',':FOR', '${i}', 'IN', '1', '# FOR comment'],
['','...', '2', '3', '##continues', 'here'],
['#commented out in for loop'],
['', '#commented out in for loop, again'],
['','', 'Fooness in the bar', '###end commtne'],
['','# ', ' Barness '],
['', 'Lodi']
])
self._assert_comment(self._first_test().steps[0], ['# start of table comment'])
self._assert_comment(self._first_test().steps[1], ['# step comment'])
self._assert_comment(self._first_test().steps[2], ['#This step has', 'only comment'])
self._assert_comment(self._nth_test(2).steps[0], ['#comment in name row'])
self._assert_comment(self._nth_test(2).steps[1], ['#', 'Comment between step def'])
assert_equals(self._nth_test(2).steps[1].args, ['argh', 'urgh'])
self._assert_comment(self._nth_test(3).steps[0], ['# FOR comment', '##continues', 'here'])
self._assert_comment(self._nth_test(3).steps[0].steps[0], ['#commented out in for loop'])
self._assert_comment(self._nth_test(3).steps[0].steps[1], ['#commented out in for loop, again'])
self._assert_comment(self._nth_test(3).steps[0].steps[2], ['###end commtne'])
self._assert_comment(self._nth_test(3).steps[1], ['#', 'Barness'])
self._number_of_steps_should_be(self._nth_test(3), 3)
def _assert_comment(self, step, expected_comment):
assert_equals(step.comment.as_list(), expected_comment)
class DataRowTest(unittest.TestCase):
def test_commented_row(self):
assert_true(DataRow(['#start of table comment']).is_commented())
def test_escaping_empty_cells(self):
assert_equals(DataRow(['foo', '\\', '']).all, ['foo', ''])
if __name__ == '__main__':
unittest.main()
|
the-stack_0_27693
|
from __future__ import absolute_import
# Set the right path to collections
try:
from collections.abc import Mapping, MutableMapping
except ImportError:
from collections import Mapping, MutableMapping
try:
from threading import RLock
except ImportError: # Platform-specific: No threads available
class RLock:
def __enter__(self):
pass
def __exit__(self, exc_type, exc_value, traceback):
pass
try: # Python 2.7+
from collections import OrderedDict
except ImportError:
from .packages.ordered_dict import OrderedDict
from .packages.six import iterkeys, itervalues, PY3
__all__ = ['RecentlyUsedContainer', 'HTTPHeaderDict']
_Null = object()
class RecentlyUsedContainer(MutableMapping):
"""
Provides a thread-safe dict-like container which maintains up to
``maxsize`` keys while throwing away the least-recently-used keys beyond
``maxsize``.
:param maxsize:
Maximum number of recent elements to retain.
:param dispose_func:
Every time an item is evicted from the container,
``dispose_func(value)`` is called. Callback which will get called
"""
ContainerCls = OrderedDict
def __init__(self, maxsize=10, dispose_func=None):
self._maxsize = maxsize
self.dispose_func = dispose_func
self._container = self.ContainerCls()
self.lock = RLock()
def __getitem__(self, key):
# Re-insert the item, moving it to the end of the eviction line.
with self.lock:
item = self._container.pop(key)
self._container[key] = item
return item
def __setitem__(self, key, value):
evicted_value = _Null
with self.lock:
# Possibly evict the existing value of 'key'
evicted_value = self._container.get(key, _Null)
self._container[key] = value
# If we didn't evict an existing value, we might have to evict the
# least recently used item from the beginning of the container.
if len(self._container) > self._maxsize:
_key, evicted_value = self._container.popitem(last=False)
if self.dispose_func and evicted_value is not _Null:
self.dispose_func(evicted_value)
def __delitem__(self, key):
with self.lock:
value = self._container.pop(key)
if self.dispose_func:
self.dispose_func(value)
def __len__(self):
with self.lock:
return len(self._container)
def __iter__(self):
raise NotImplementedError('Iteration over this class is unlikely to be threadsafe.')
def clear(self):
with self.lock:
# Copy pointers to all values, then wipe the mapping
values = list(itervalues(self._container))
self._container.clear()
if self.dispose_func:
for value in values:
self.dispose_func(value)
def keys(self):
with self.lock:
return list(iterkeys(self._container))
class HTTPHeaderDict(MutableMapping):
"""
:param headers:
An iterable of field-value pairs. Must not contain multiple field names
when compared case-insensitively.
:param kwargs:
Additional field-value pairs to pass in to ``dict.update``.
A ``dict`` like container for storing HTTP Headers.
Field names are stored and compared case-insensitively in compliance with
RFC 7230. Iteration provides the first case-sensitive key seen for each
case-insensitive pair.
Using ``__setitem__`` syntax overwrites fields that compare equal
case-insensitively in order to maintain ``dict``'s api. For fields that
compare equal, instead create a new ``HTTPHeaderDict`` and use ``.add``
in a loop.
If multiple fields that are equal case-insensitively are passed to the
constructor or ``.update``, the behavior is undefined and some will be
lost.
>>> headers = HTTPHeaderDict()
>>> headers.add('Set-Cookie', 'foo=bar')
>>> headers.add('set-cookie', 'baz=quxx')
>>> headers['content-length'] = '7'
>>> headers['SET-cookie']
'foo=bar, baz=quxx'
>>> headers['Content-Length']
'7'
"""
def __init__(self, headers=None, **kwargs):
super(HTTPHeaderDict, self).__init__()
self._container = {}
if headers is not None:
if isinstance(headers, HTTPHeaderDict):
self._copy_from(headers)
else:
self.extend(headers)
if kwargs:
self.extend(kwargs)
def __setitem__(self, key, val):
self._container[key.lower()] = (key, val)
return self._container[key.lower()]
def __getitem__(self, key):
val = self._container[key.lower()]
return ', '.join(val[1:])
def __delitem__(self, key):
del self._container[key.lower()]
def __contains__(self, key):
return key.lower() in self._container
def __eq__(self, other):
if not isinstance(other, Mapping) and not hasattr(other, 'keys'):
return False
if not isinstance(other, type(self)):
other = type(self)(other)
return (dict((k.lower(), v) for k, v in self.itermerged()) ==
dict((k.lower(), v) for k, v in other.itermerged()))
def __ne__(self, other):
return not self.__eq__(other)
if not PY3: # Python 2
iterkeys = MutableMapping.iterkeys
itervalues = MutableMapping.itervalues
__marker = object()
def __len__(self):
return len(self._container)
def __iter__(self):
# Only provide the originally cased names
for vals in self._container.values():
yield vals[0]
def pop(self, key, default=__marker):
'''D.pop(k[,d]) -> v, remove specified key and return the corresponding value.
If key is not found, d is returned if given, otherwise KeyError is raised.
'''
# Using the MutableMapping function directly fails due to the private marker.
# Using ordinary dict.pop would expose the internal structures.
# So let's reinvent the wheel.
try:
value = self[key]
except KeyError:
if default is self.__marker:
raise
return default
else:
del self[key]
return value
def discard(self, key):
try:
del self[key]
except KeyError:
pass
def add(self, key, val):
"""Adds a (name, value) pair, doesn't overwrite the value if it already
exists.
>>> headers = HTTPHeaderDict(foo='bar')
>>> headers.add('Foo', 'baz')
>>> headers['foo']
'bar, baz'
"""
key_lower = key.lower()
new_vals = key, val
# Keep the common case aka no item present as fast as possible
vals = self._container.setdefault(key_lower, new_vals)
if new_vals is not vals:
# new_vals was not inserted, as there was a previous one
if isinstance(vals, list):
# If already several items got inserted, we have a list
vals.append(val)
else:
# vals should be a tuple then, i.e. only one item so far
# Need to convert the tuple to list for further extension
self._container[key_lower] = [vals[0], vals[1], val]
def extend(self, *args, **kwargs):
"""Generic import function for any type of header-like object.
Adapted version of MutableMapping.update in order to insert items
with self.add instead of self.__setitem__
"""
if len(args) > 1:
raise TypeError("extend() takes at most 1 positional "
"arguments ({0} given)".format(len(args)))
other = args[0] if len(args) >= 1 else ()
if isinstance(other, HTTPHeaderDict):
for key, val in other.iteritems():
self.add(key, val)
elif isinstance(other, Mapping):
for key in other:
self.add(key, other[key])
elif hasattr(other, "keys"):
for key in other.keys():
self.add(key, other[key])
else:
for key, value in other:
self.add(key, value)
for key, value in kwargs.items():
self.add(key, value)
def getlist(self, key):
"""Returns a list of all the values for the named field. Returns an
empty list if the key doesn't exist."""
try:
vals = self._container[key.lower()]
except KeyError:
return []
else:
if isinstance(vals, tuple):
return [vals[1]]
else:
return vals[1:]
# Backwards compatibility for httplib
getheaders = getlist
getallmatchingheaders = getlist
iget = getlist
def __repr__(self):
return "%s(%s)" % (type(self).__name__, dict(self.itermerged()))
def _copy_from(self, other):
for key in other:
val = other.getlist(key)
if isinstance(val, list):
# Don't need to convert tuples
val = list(val)
self._container[key.lower()] = [key] + val
def copy(self):
clone = type(self)()
clone._copy_from(self)
return clone
def iteritems(self):
"""Iterate over all header lines, including duplicate ones."""
for key in self:
vals = self._container[key.lower()]
for val in vals[1:]:
yield vals[0], val
def itermerged(self):
"""Iterate over all headers, merging duplicate ones together."""
for key in self:
val = self._container[key.lower()]
yield val[0], ', '.join(val[1:])
def items(self):
return list(self.iteritems())
@classmethod
def from_httplib(cls, message): # Python 2
"""Read headers from a Python 2 httplib message object."""
# python2.7 does not expose a proper API for exporting multiheaders
# efficiently. This function re-reads raw lines from the message
# object and extracts the multiheaders properly.
headers = []
for line in message.headers:
if line.startswith((' ', '\t')):
key, value = headers[-1]
headers[-1] = (key, value + '\r\n' + line.rstrip())
continue
key, value = line.split(':', 1)
headers.append((key, value.strip()))
return cls(headers)
|
the-stack_0_27694
|
#!/usr/bin/python3
#‑∗‑ coding: utf‑8 ‑∗‑
import torch
import torch.nn as nn
class DQNConv2D(nn.Module):
def __init__(self, input_shape, action_space):
super(DQNConv2D, self).__init__()
self.conv2d = nn.Sequential(
nn.Conv2d(input_shape[0], 128, 5, padding='same'),
nn.ReLU(),
nn.Conv2d(128, 128, 5),
nn.ReLU(),
)
out_size = self._get_conv_out(input_shape)
self.fc_val = nn.Sequential(
nn.Linear(out_size, 512),
nn.ReLU(),
nn.Linear(512, 1),
)
self.fc_adv = nn.Sequential(
nn.Linear(out_size, 512),
nn.ReLU(),
nn.Linear(512, action_space),
)
def _get_conv_out(self, shape):
out = self.conv2d(torch.zeros(1, *shape))
return int(torch.prod(torch.tensor(out.size())))
def forward(self, x):
conv1d = self.conv1d(x).view(x.size()[0], -1)
val = self.fc_val(conv1d)
adv = self.fc_adv(conv1d)
return val + (adv - adv.mean(dim=1, keepdim=True))
|
the-stack_0_27696
|
# #########################################################################
# Copyright (c) , UChicago Argonne, LLC. All rights reserved. #
# #
# See LICENSE file. #
# #########################################################################
"""
This user script manages reconstruction(s).
Depending on configuration it starts either single reconstruction, GA, or multiple reconstructions. In multiple reconstruction scenario or split scans the script runs concurrent reconstructions.
"""
__author__ = "Barbara Frosik"
__copyright__ = "Copyright (c), UChicago Argonne, LLC."
__docformat__ = 'restructuredtext en'
__all__ = ['rec_process',
'get_gpu_use',
'manage_reconstruction',
'main']
import sys
import signal
import os
import argparse
from multiprocessing import Process, Queue
import cohere
import convertconfig as conv
MEM_FACTOR = 1500
ADJUST = 0.0
def rec_process(proc, conf_file, datafile, dir, gpus, r, q):
"""
Calls the reconstruction function in a module identified by parameter. After the reconstruction is finished, it enqueues th eprocess id wit associated list of gpus.
Parameters
----------
proc : str
processing library, chices are cpu, cuda, opencl
conf_file : str
configuration file with reconstruction parameters
datafile : str
name of file containing data
dir : str
parent directory to the <prefix>/results, or results directory
gpus : list
a list of gpus that will be used for reconstruction
r : str
a string indentifying the module to use for reconstruction
q : Queue
a queue that returns tuple of procees id and associated gpu list after the reconstruction process is done
Returns
-------
nothing
"""
if r == 'g':
cohere.reconstruction_GA.reconstruction(proc, conf_file, datafile, dir, gpus)
elif r == 'm':
cohere.reconstruction_multi.reconstruction(proc, conf_file, datafile, dir, gpus)
elif r == 's':
cohere.reconstruction_single.reconstruction(proc, conf_file, datafile, dir, gpus)
q.put((os.getpid(), gpus))
def get_gpu_use(devices, no_dir, no_rec, data_shape):
"""
Determines the use case, available GPUs that match configured devices, and selects the optimal distribution of reconstructions on available devices.
Parameters
----------
devices : list
list of configured GPU ids to use for reconstructions. If -1, operating system is assigning it
no_dir : int
number of directories to run independent reconstructions
no_rec : int
configured number of reconstructions to run in each directory
data_shape : tuple
shape of data array, needed to estimate how many reconstructions will fit into available memory
Returns
-------
gpu_use : list
a list of int indicating number of runs per consecuitive GPUs
"""
from functools import reduce
if sys.platform == 'darwin':
# the gpu library is not working on OSX, so run one reconstruction on each GPU
gpu_load = len(devices) * [1, ]
else:
# find size of data array
data_size = reduce((lambda x, y: x * y), data_shape)
rec_mem_size = data_size / MEM_FACTOR
gpu_load = cohere.get_gpu_load(rec_mem_size, devices)
no_runs = no_dir * no_rec
gpu_distribution = cohere.get_gpu_distribution(no_runs, gpu_load)
gpu_use = []
available = reduce((lambda x, y: x + y), gpu_distribution)
dev_index = 0
i = 0
while i < available:
if gpu_distribution[dev_index] > 0:
gpu_use.append(devices[dev_index])
gpu_distribution[dev_index] = gpu_distribution[dev_index] - 1
i += 1
dev_index += 1
dev_index = dev_index % len(devices)
if no_dir > 1:
gpu_use = [gpu_use[x:x + no_rec] for x in range(0, len(gpu_use), no_rec)]
return gpu_use
def manage_reconstruction(experiment_dir, rec_id=None):
"""
This function starts the interruption discovery process and continues the recontruction processing.
It reads configuration file defined as <experiment_dir>/conf/config_rec.
If multiple generations are configured, or separate scans are discovered, it will start concurrent reconstructions.
It creates image.npy file for each successful reconstruction.
Parameters
----------
experiment_dir : str
directory where the experiment files are loacted
rec_id : str
optional, if given, alternate configuration file will be used for reconstruction, (i.e. <rec_id>_config_rec)
Returns
-------
nothing
"""
print('starting reconstruction')
# the rec_id is a postfix added to config_rec configuration file. If defined, use this configuration.
conf_dir = os.path.join(experiment_dir, 'conf')
# convert configuration files if needed
main_conf = os.path.join(conf_dir, 'config')
if os.path.isfile(main_conf):
config_map = cohere.read_config(main_conf)
if config_map is None:
print ("info: can't read " + main_conf + " configuration file")
return None
else:
print("info: missing " + main_conf + " configuration file")
return None
if 'converter_ver' not in config_map or conv.get_version() is None or conv.get_version() < config_map['converter_ver']:
config_map = conv.convert(conf_dir, 'config')
# verify main config file
er_msg = cohere.verify('config', config_map)
if len(er_msg) > 0:
# the error message is printed in verifier
return None
if rec_id is None:
conf_file = os.path.join(conf_dir, 'config_rec')
else:
conf_file = os.path.join(conf_dir, 'config_rec_' + rec_id)
config_map = cohere.read_config(conf_file)
if config_map is None:
return
# verify configuration
er_msg = cohere.verify('config_rec', config_map)
if len(er_msg) > 0:
# the error message is printed in verifier
return None
# find which library to run it on, default is numpy ('np')
if 'processing' in config_map:
proc = config_map['processing']
else:
proc = 'auto'
lib = 'np'
if proc == 'auto':
try:
import cupy
lib = 'cp'
except:
# currently we could not install arrayfire on linux, so numpy is the second choice
pass
elif proc == 'cp':
try:
import cupy
lib = 'cp'
except:
print('cupy is not installed, select different library (proc)')
return
elif proc == 'np':
pass # lib set to 'np'
elif proc == 'af' or 'cpu' or proc == 'cuda' or proc == 'opencl':
try:
import arrayfire
lib = proc
except:
print('arrayfire is not installed, select different library (proc)')
return
else:
print('invalid "proc" value', proc, 'is not supported')
return
# exp_dirs_data list hold pairs of data and directory, where the directory is the root of data/data.tif file, and
# data is the data.tif file in this directory.
exp_dirs_data = []
# experiment may be multi-scan in which case reconstruction will run for each scan
for dir in os.listdir(experiment_dir):
if dir.startswith('scan'):
datafile = os.path.join(experiment_dir, dir, 'data', 'data.tif')
if os.path.isfile(datafile):
exp_dirs_data.append((datafile, os.path.join(experiment_dir, dir)))
# if there are no scan directories, assume it is combined scans experiment
if len(exp_dirs_data) == 0:
# in typical scenario data_dir is not configured, and it is defaulted to <experiment_dir>/data
# the data_dir is ignored in multi-scan scenario
if 'data_dir' in config_map:
data_dir = config_map['data_dir']
else:
data_dir = os.path.join(experiment_dir, 'phasing_data')
datafile = os.path.join(data_dir, 'data.tif')
if os.path.isfile(datafile):
exp_dirs_data.append((datafile, experiment_dir))
no_runs = len(exp_dirs_data)
if no_runs == 0:
print('did not find data.tif nor data.npy file(s). ')
return
if 'ga_generations' in config_map:
generations = config_map['ga_generations']
else:
generations = 0
if 'reconstructions' in config_map:
reconstructions = config_map['reconstructions']
else:
reconstructions = 1
device_use = []
if lib == 'cpu' or lib == 'np':
cpu_use = [-1] * reconstructions
if no_runs > 1:
for _ in range(no_runs):
device_use.append(cpu_use)
else:
device_use = cpu_use
else:
if 'device' in config_map:
devices = config_map['device']
else:
devices = [-1]
if no_runs * reconstructions > 1:
data_shape = cohere.read_tif(exp_dirs_data[0][0]).shape
device_use = get_gpu_use(devices, no_runs, reconstructions, data_shape)
else:
device_use = devices
if no_runs == 1:
if len(device_use) == 0:
device_use = [-1]
dir_data = exp_dirs_data[0]
datafile = dir_data[0]
dir = dir_data[1]
if generations > 1:
cohere.reconstruction_GA.reconstruction(lib, conf_file, datafile, dir, device_use)
elif reconstructions > 1:
cohere.reconstruction_multi.reconstruction(lib, conf_file, datafile, dir, device_use)
else:
cohere.reconstruction_single.reconstruction(lib, conf_file, datafile, dir, device_use)
else:
if len(device_use) == 0:
device_use = [[-1]]
else:
# check if is it worth to use last chunk
if lib != 'cpu' and lib != 'np' and len(device_use[0]) > len(device_use[-1]) * 2:
device_use = device_use[0:-1]
if generations > 1:
r = 'g'
elif reconstructions > 1:
r = 'm'
else:
r = 's'
q = Queue()
for gpus in device_use:
q.put((None, gpus))
# index keeps track of the multiple directories
index = 0
processes = {}
pr = []
while index < no_runs:
pid, gpus = q.get()
if pid is not None:
os.kill(pid, signal.SIGKILL)
del processes[pid]
datafile = exp_dirs_data[index][0]
dir = exp_dirs_data[index][1]
p = Process(target=rec_process, args=(lib, conf_file, datafile, dir, gpus, r, q))
p.start()
pr.append(p)
processes[p.pid] = index
index += 1
for p in pr:
p.join()
# close the queue
q.close()
print('finished reconstruction')
def main(arg):
parser = argparse.ArgumentParser()
parser.add_argument("experiment_dir", help="experiment directory.")
parser.add_argument("--rec_id", help="reconstruction id, a postfix to 'results_phasing_' directory")
args = parser.parse_args()
experiment_dir = args.experiment_dir
if args.rec_id:
manage_reconstruction(experiment_dir, args.rec_id)
else:
manage_reconstruction(experiment_dir)
if __name__ == "__main__":
main(sys.argv[1:])
# python run_reconstruction.py opencl experiment_dir
|
the-stack_0_27697
|
# coding: utf-8
# -----------------------------------------------------------------------------------
# <copyright company="Aspose Pty Ltd" file="ObjectExist.py">
# Copyright (c) 2003-2021 Aspose Pty Ltd
# </copyright>
# <summary>
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# </summary>
# -----------------------------------------------------------------------------------
import pprint
import re # noqa: F401
import six
class ObjectExist(object):
"""
Object exists
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'exists': 'bool',
'is_folder': 'bool'
}
attribute_map = {
'exists': 'Exists',
'is_folder': 'IsFolder'
}
def __init__(self, exists=None, is_folder=None, **kwargs): # noqa: E501
"""Initializes new instance of ObjectExist""" # noqa: E501
self._exists = None
self._is_folder = None
if exists is not None:
self.exists = exists
if is_folder is not None:
self.is_folder = is_folder
@property
def exists(self):
"""
Gets the exists. # noqa: E501
Indicates that the file or folder exists. # noqa: E501
:return: The exists. # noqa: E501
:rtype: bool
"""
return self._exists
@exists.setter
def exists(self, exists):
"""
Sets the exists.
Indicates that the file or folder exists. # noqa: E501
:param exists: The exists. # noqa: E501
:type: bool
"""
if exists is None:
raise ValueError("Invalid value for `exists`, must not be `None`") # noqa: E501
self._exists = exists
@property
def is_folder(self):
"""
Gets the is_folder. # noqa: E501
True if it is a folder, false if it is a file. # noqa: E501
:return: The is_folder. # noqa: E501
:rtype: bool
"""
return self._is_folder
@is_folder.setter
def is_folder(self, is_folder):
"""
Sets the is_folder.
True if it is a folder, false if it is a file. # noqa: E501
:param is_folder: The is_folder. # noqa: E501
:type: bool
"""
if is_folder is None:
raise ValueError("Invalid value for `is_folder`, must not be `None`") # noqa: E501
self._is_folder = is_folder
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ObjectExist):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
the-stack_0_27698
|
'''
wget https://doc-0g-4s-docs.googleusercontent.com/docs/securesc/ha0ro937gcuc7l7deffksulhg5h7mbp1/327iep5nmg492g7968am9g08prba2usg/1500897600000/13951467387256278872/*/0Bz7KyqmuGsilT0J5dmRCM0ROVHc?e=download -O vgg16_weights.h5
This script goes along the blog post
"Building powerful image classification models using very little data"
from blog.keras.io.
It uses data that can be downloaded at:
https://www.kaggle.com/c/dogs-vs-cats/data
In our setup, we:
- created a data/ folder
- created train/ and validation/ subfolders inside data/
- created cats/ and dogs/ subfolders inside train/ and validation/
- put the cat pictures index 0-999 in data/train/cats
- put the cat pictures index 1000-1400 in data/validation/cats
- put the dogs pictures index 12500-13499 in data/train/dogs
- put the dog pictures index 13500-13900 in data/validation/dogs
So that we have 1000 training examples for each class, and 400 validation examples for each class.
In summary, this is our directory structure:
data/
train/
dogs/
dog001.jpg
dog002.jpg
...
cats/
cat001.jpg
cat002.jpg
...
validation/
dogs/
dog001.jpg
dog002.jpg
...
cats/
cat001.jpg
cat002.jpg
...
'''
### IMPORTS
from __future__ import print_function
import os
import fnmatch
from keras import applications
from keras.preprocessing.image import ImageDataGenerator
from keras import optimizers
from keras.optimizers import RMSprop, Adagrad
from keras.models import Sequential, Model
from keras.layers import Dropout, Flatten, Dense
from keras.callbacks import ModelCheckpoint, CSVLogger, TensorBoard, EarlyStopping
import logging
FORMAT = "[%(lineno)4s : %(funcName)-30s ] %(message)s"
logging.basicConfig(level=logging.DEBUG, format=FORMAT)
### GLOBALS
# dimensions of our images.
# img_width = 150
# img_height = 150
img_width = 224
img_height = 224
# dataset_path = 'dataset_dogs_cats'
dataset_path = 'dataset'
dataset_train_path=os.path.join(dataset_path, 'train')
dataset_val_path=os.path.join(dataset_path, 'validation')
dataset_test_path=os.path.join(dataset_path, 'test')
# path to the model weights files.
weights_path = 'weights/vgg16_weights.h5'
top_model_weights_path = 'output/bottleneck_fc_model.h5'
top_model_weights_path = 'output/best-weights-015-0.5636-0.7923.hdf5'
finetune_model_weights_path = 'output/finetune_bottleneck_fc_model.h5'
#epochs = 50
epochs = 5
#batch_size = 16
batch_size = 1
# Count no. of images(.jpg) in a directory
def get_images_count_recursive(path):
matches = []
logging.debug('path {}'.format(path))
for root, dirnames, filenames in os.walk(path):
for filename in fnmatch.filter(filenames, '*.jpg'):
matches.append(os.path.join(root, filename))
# logging.debug('matches {}'.format(matches))
images_count = len(matches)
return images_count
# nb_train_samples = 2000
# nb_validation_samples = 800
nb_train_samples = get_images_count_recursive(dataset_train_path)
logging.debug('nb_train_samples {}'.format(nb_train_samples))
nb_validation_samples = get_images_count_recursive(dataset_val_path)
logging.debug('nb_validation_samples {}'.format(nb_validation_samples))
if not os.path.exists('output'):
os.makedirs('output')
if not os.path.exists('logs'):
os.makedirs('logs')
# TODO: HARDCODING - Should be same as used during training VGG; Else error (None, None, 512)
input_shape = (img_width, img_height, 3)
# Sorted subdirectories list
def get_subdir_list(path):
names=[]
for name in sorted(os.listdir(path)):
if os.path.isdir(os.path.join(path, name)):
names.append(name)
logging.debug('names {}'.format(names))
return names
class_names = get_subdir_list(dataset_train_path)
logging.debug('class_names {}'.format(class_names))
# build the VGG16 network
base_model = applications.VGG16(weights='imagenet', include_top=False, input_shape=input_shape)
print('Model loaded.')
print(base_model.output_shape) # (None, None, None, 512) if input_shape not given in applications.VGG16
print(base_model.output_shape[1:]) # (None, None, 512)
# build a classifier model to put on top of the convolutional model
top_model = Sequential()
top_model.add(Flatten(input_shape=base_model.output_shape[1:]))
top_model.add(Dense(256, activation='relu'))
top_model.add(Dropout(0.5))
top_model.add(Dense(1, activation='sigmoid'))
#top_model.add(Dense(len(class_names), activation='softmax')) # Binary to Multi classification changes
# note that it is necessary to start with a fully-trained
# classifier, including the top classifier,
# in order to successfully do fine-tuning
top_model.load_weights(top_model_weights_path)
# add the model on top of the convolutional base
# base_model.add(top_model) # Not working; AttributeError: 'Model' object has no attribute 'add'
model = Model(inputs=base_model.input, outputs=top_model(base_model.output))
print(model.summary())
# set the first 25 layers (up to the last conv block)
# to non-trainable (weights will not be updated)
for layer in model.layers[:15]: # Should be 15 instead of 25 I guess
layer.trainable = False
# compile the model with a SGD/momentum optimizer and a very slow learning rate.
# rmsprop = RMSprop(lr=1e-4, rho=0.9, epsilon=1e-8, decay=0.0)
# rmsprop = RMSprop(lr=1e-4, rho=0.9, epsilon=1e-8, decay=0.0)
# adagrad = Adagrad(lr=1e-4, epsilon=1e-08, decay=0.0)
model.compile(loss='binary_crossentropy',
optimizer=optimizers.SGD(lr=1e-4, momentum=0.9),
metrics=['accuracy'])
# # with pre-precessing version
# # Use keras 1.2.2 for preprocessing_function
# # x = 3D tensor version
# def preprocess_input(x):
# # 'RGB'->'BGR'
# x = x[:, :, ::-1]
# # Zero-center by mean pixel
# x[:, :, 0] -= 103.939
# x[:, :, 1] -= 116.779
# x[:, :, 2] -= 123.68
# return x
# train_datagen = ImageDataGenerator(
# preprocessing_function=preprocess_input,
# shear_range=0.2,
# zoom_range=0.2,
# horizontal_flip=True)
# test_datagen = ImageDataGenerator(
# preprocessing_function=preprocess_input)
# prepare data augmentation configuration
train_datagen = ImageDataGenerator(
rescale=1. / 255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True)
test_datagen = ImageDataGenerator(rescale=1. / 255)
train_generator = train_datagen.flow_from_directory(
dataset_train_path,
target_size=(img_height, img_width),
batch_size=batch_size,
class_mode='binary')
# class_mode='sparse') # Binary to Multi classification changes
# class_mode='categorical') # Binary to Multi classification changes
validation_generator = test_datagen.flow_from_directory(
dataset_val_path,
target_size=(img_height, img_width),
batch_size=batch_size,
class_mode='binary')
# class_mode='sparse') # Binary to Multi classification changes
# class_mode='categorical') # Binary to Multi classification changes
# Callbacks
filename = 'output/model_train_finetune.csv'
csv_log = CSVLogger(filename, separator=' ', append=False)
early_stopping = EarlyStopping(
monitor='val_loss', patience=50, verbose=1, mode='min')
filepath = "output/best-weights-finetune-{epoch:03d}-{loss:.4f}-{acc:.4f}.hdf5"
checkpoint = ModelCheckpoint(filepath, monitor='val_loss', verbose=1,
save_best_only=True, save_weights_only=False, mode='min', period=1) # min because we are monitoring val_loss that should decrease
tensorboard = TensorBoard(log_dir='./logs', histogram_freq=0, write_graph=True, write_images=True)
callbacks_list = [csv_log, early_stopping, checkpoint, tensorboard]
logging.debug('callbacks_list {}'.format(callbacks_list))
# fine-tune the model
model.fit_generator(
train_generator,
samples_per_epoch=nb_train_samples,
epochs=epochs,
validation_data=validation_generator,
nb_val_samples=nb_validation_samples,
callbacks=callbacks_list)
# TODO: These are not the best weights
model.save_weights(finetune_model_weights_path)
|
the-stack_0_27699
|
# -*- encoding: utf-8 -*-
"""
Created by eniocc at 11/10/2020
"""
import ctypes
from py_dss_interface.models.Base import Base
class LineCodesS(Base):
"""
This interface can be used to read/modify the properties of the LineCode Class where the values are Strings.
The structure of the interface is as follows:
CStr LineCodesS(int32_t Parameter, CStr argument)
This interface returns a string, the variable “parameter” is used to specify the property of the class to be used
and the variable “argument” can be used to modify the value of the property when necessary. Reading and writing
properties are separated and require a different parameter number to be executed.
The properties (parameter) are integer numbers and are described as follows.;
"""
def linecodes_read_name(self) -> str:
"""Gets the name of the active LineCode element."""
result = ctypes.c_char_p(self.dss_obj.LineCodesS(ctypes.c_int32(0), ctypes.c_int32(0)))
return result.value.decode('ascii')
def linecodes_write_name(self, argument: str) -> str:
"""Sets the name of the active LineCode element. The new value must be specified in the argument as a string."""
argument = Base.check_string_param(argument)
result = ctypes.c_char_p(self.dss_obj.LineCodesS(ctypes.c_int32(1), argument.encode('ascii')))
return result.value.decode('ascii')
|
the-stack_0_27701
|
import pybullet as p
import time
import math
from datetime import datetime
from numpy import *
from pylab import *
import struct
import sys
import os, fnmatch
import argparse
from time import sleep
def readLogFile(filename, verbose=True):
f = open(filename, 'rb')
print('Opened'),
print(filename)
keys = f.readline().decode('utf8').rstrip('\n').split(',')
fmt = f.readline().decode('utf8').rstrip('\n')
# The byte number of one record
sz = struct.calcsize(fmt)
# The type number of one record
ncols = len(fmt)
if verbose:
print('Keys:'),
print(keys)
print('Format:'),
print(fmt)
print('Size:'),
print(sz)
print('Columns:'),
print(ncols)
# Read data
wholeFile = f.read()
# split by alignment word
chunks = wholeFile.split(b'\xaa\xbb')
log = list()
for chunk in chunks:
if len(chunk) == sz:
values = struct.unpack(fmt, chunk)
record = list()
for i in range(ncols):
record.append(values[i])
log.append(record)
return log
#clid = p.connect(p.SHARED_MEMORY)
p.connect(p.GUI)
p.loadURDF("plane.urdf", [0, 0, -0.3])
p.loadURDF("kuka_iiwa/model.urdf", [0, 0, 1])
p.loadURDF("cube.urdf", [2, 2, 5])
p.loadURDF("cube.urdf", [-2, -2, 5])
p.loadURDF("cube.urdf", [2, -2, 5])
log = readLogFile("LOG0001.txt")
recordNum = len(log)
itemNum = len(log[0])
print('record num:'),
print(recordNum)
print('item num:'),
print(itemNum)
for record in log:
Id = record[2]
pos = [record[3], record[4], record[5]]
orn = [record[6], record[7], record[8], record[9]]
p.resetBasePositionAndOrientation(Id, pos, orn)
numJoints = p.getNumJoints(Id)
for i in range(numJoints):
jointInfo = p.getJointInfo(Id, i)
qIndex = jointInfo[3]
if qIndex > -1:
p.resetJointState(Id, i, record[qIndex - 7 + 17])
sleep(0.0005)
|
the-stack_0_27702
|
import ast
import csv
import eccpy.settings as eccpysettings
import eccpy.tools as tools
import matplotlib.pyplot as plt
import numpy as np
import os
import pandas as pd
import sys
# show wide pandas dataframes when using print function
pd.set_option('display.expand_frame_repr', False)
def run_gatherer(settings_excel_file, **kwargs):
""" Gathers and compares EC50 data from multiple experiments.
Collects and analyses the output files from previously executed "run_curvefit".
Processes the datafiles in settings excel file marked as "TRUE" for "run gatherer."
Parameters
----------
settings_excel_file : filepath
Settings file containing the list of datafiles for analysis, and also chosen parameters
savefig_with_longnames : boolean
If True, output figures will be created with both long and short sample names.
Usage
-----
import eccpy
settings = r"C:/path/to/your/settings/file.xlsx"
eccpy.run_gatherer(settings)
Saved Files
-----------
EC50_barchart : Barchart of EC50 values from all experiments.
Only data is included that is judged as good quality (i.e. not labelled as "data_needs_checking").
Four barcharts are created.
1) Original data, long sample names
2) Original data, short sample names
3) Adjusted data (e.g. fixed upper limit), long sample names
4) Adjusted data (e.g. fixed upper limit), short sample names
EC50_datapoints: Scattergram, sample names on x-axis, EC50 on y-axis.
Effectively the same data as the EC50_barchart, except that the datapoints from each
experiment are displayed individually, as in a scattergram. A legend with colours for each experiment
is included. Very useful to determine if the EC50 values from one particular day were uniformly higher or
lower than the EC50 values calculated on the other days.
As with the barchart, four variations are created.
1) Original data, long sample names
2) Original data, short sample names
3) Adjusted data (e.g. fixed upper limit), long sample names
4) Adjusted data (e.g. fixed upper limit), short sample names
Notes
-------
The gather scripts must be run after run_curvefit, and filepaths must not be changed.
The output of the analysis is saved in the following folder:
ORIGINAL_SUBFOLDER_WITH_SETTINGS_EXCEL_FILE/analysed/todays_date/
Running this script will overwrite any previous files with the same name (i.e., analysed on the same day).
The "run_curvefit" program needs to be run to create the relevent output files, before the analysis program
can be started. Shifting the location of the output files will result in an error.
The output files begin as follows:
todays_date_analysed_
todays_date is represented as YEAR|MONTH|DAY, e.g. 20151215.
Figures are usually better displayed if a dictionary of long to short sample names is created. This can be saved
in the settings_excel_file.
Currently, the analysis automatically runs for original and adjusted datasets (e.g. fixed upper limit dataset).
However summary graphs are created separately for each dataset.
"""
print("\nStarting run_gatherer program\n")
# create output folder for analysed data, define basename
outpath, basename = eccpysettings.setup_output_folder(settings_excel_file, "analysed")
analysed_data_basename = os.path.join(outpath, basename)
# add the relevant paths to the data files to the dataframe for files (dff)
settings, dff, df_samplenames = eccpysettings.read_settings_file(settings_excel_file)
# set the long sample name as the index
df_samplenames.set_index("long name", inplace = True)
# create t20 colour list
t20 = tools.setup_t20_colour_list()
# extract list of adjusted datasets for analysis
datasets = ast.literal_eval(settings["datasets"])
"""
COLLECT THE EC50 VALUES FROM ALL THE OUTPUT FILES
"""
# fix the suffixes denoting the datasets (_orig for original data, _ful for fixed upper limit)
# if any of the files are labelled True for "run gatherer"
if True in list(dff.loc[:, "run gatherer"]):
# create an empty dataframe to hold the average EC50 values
dfc = pd.DataFrame()
# create another dataframe to hold all the boolean data
dfcb = pd.DataFrame()
# create dataframe for all individual EC50 datapoints (df_allp), including samples repeated in a single experiment
df_allp = pd.DataFrame()
print("Analysing data from multiple experiments. List of experiments analysed :")
# iterate through only the files labelled "True" for "run gatherer", and join all output dataframes together
for fn in dff.loc[dff["run gatherer"] == True].index:
# define the response data file
data_file = dff.loc[fn, "response data file"]
print(data_file)
# if it is a real file, open
if os.path.isfile(dff.loc[fn,"ofd_EC50_eval_excel"]):
# open as a new pandas dataframe
df_eval_values = pd.read_excel(dff.loc[fn,"ofd_EC50_eval_excel"], sheet_name="v_" + data_file[:20])
# add the sample name to the dataframe, so it can be identified later
df_eval_values["file"] = dff.loc[fn,"ofd_EC50_eval_excel"]
# convert the sample_name column to a string datatype (forcing np.nan to be "nan")
df_eval_values["sample_name"] = df_eval_values["sample_name"].astype(str)
# drop any rows that contain "nan" as the sample name
df_eval_values = df_eval_values.loc[df_eval_values["sample_name"] != "nan"]
# join the dataframe with all previously joined dataframes
dfc = pd.concat([dfc,df_eval_values], axis=0)
# open the tab of the summary excel file that contains all the boolean values
df_eval_bool = pd.read_excel(dff.loc[fn,"ofd_EC50_eval_excel"], sheet_name="b_" + data_file[:20])
# join the dataframe with all previously joined dataframes
dfcb = pd.concat([dfcb,df_eval_bool], axis=0)
# set the sample_name as the index
df_eval_values = df_eval_values.set_index("sample_name")
# iterate through datasets (save in the same df, by adding suffix to the column name)
for d in datasets:
# change the dataset name (e.g. "_orig" to "") to an empty string if there is only one dataset for analysis
d_name = "" if len(datasets) == 1 else d
# define the column name in the dataframe
col_EC50 = "EC50" + d
# select data which "seems okay" according the the automatic data analysis
df_eval_values_OK = df_eval_values.loc[df_eval_values["data_seems_okay" + d] == True]
# create a list of unique sample names
unique_names = list(df_eval_values_OK.index.unique())
# create a new dataframe, called df_eval_uniq, which has a single row for each unique sample
df_eval_uniq = pd.DataFrame().astype(object)
for sn in unique_names:
# select only the data for that sample
df_sel = df_eval_values_OK.loc[sn,:]
# if there is only one sample, the selected data will form a series
if isinstance(df_sel, pd.Series):
# add the n, the EC50, and the std
df_eval_uniq.loc[sn,data_file + d] = df_sel["EC50{}".format(d)]
# if the name is not unique, the selected data will form a dataframe.
elif isinstance(df_sel, pd.DataFrame):
# transfer the EC50 values as a stringlist
df_eval_uniq.loc[sn,data_file + d] = str(["%0.2f"%l for l in df_sel[col_EC50]])
else:
raise TypeError("expected a series or dataframe.")
# add the dataframe containing _orig and other dataset columns for that exp to the final df with all data
df_allp = pd.concat([df_allp,df_eval_uniq], axis=1)
else:
print("File not found! {}".format(dff.loc[fn,"ofd_EC50_eval_excel"]))
print("\nPercentage data okay:")
if dfc.empty:
raise ValueError("No data collected for analysis! Double-check that run_curvefit program has been"
" carried out for all the relevant files")
for d in datasets:
vc = dfc["data_seems_okay{}".format(d)].value_counts()
if True in vc:
n_data_okay = vc[True]
else:
n_data_okay = 0
if False in vc:
n_data_not_okay = vc[False]
else:
n_data_not_okay = 0
perc_data_okay = n_data_okay / (n_data_okay + n_data_not_okay)*100
print("{b:0.0f}% ({a} dataset)".format(a=d[1:], b=perc_data_okay))
# select only the data labeled as "data_seems_okay"
# save the current index as the sample letter
dfc["sLet"] = dfc.index
# convert the index to the sample name
dfc.index = dfc.sample_name
# create a new dataframe, called dfm, which has a single row for each unique sample, and contains MEAN values
dfm = pd.DataFrame()
for d in datasets:
# select only the data labeled as "data_seems_okay"
series_data_seems_okay = dfc["data_seems_okay{}".format(d)] == True
dfc_ok = dfc.loc[series_data_seems_okay]
# create list of unique sample names, where data is available
list_unique_sample_names = list(dfc_ok.sample_name.dropna().unique())
for sn in list_unique_sample_names:
# select data for that one sample, resulting in either a series, or dataframe (indicating mult. exper.)
data_1_sample_name = dfc_ok.loc[sn,:]
# if there is only one sample, df_sel will be a series.
if isinstance(data_1_sample_name, pd.Series):
# add the n, the EC50, and the std
dfm.loc[sn,"n{}".format(d)] = 1
dfm.loc[sn,"mean{}".format(d)] = data_1_sample_name["EC50{}".format(d)]
dfm.loc[sn,"std{}".format(d)] = 0
dfm.loc[sn,"SEM{}".format(d)] = 0
# if there are multiple datapoints with the same name, df_sel will be a DataFrame
elif isinstance(data_1_sample_name, pd.DataFrame):
# add the n, the mean EC50 of the samples, and the std
dfm.loc[sn,"n{}".format(d)] = data_1_sample_name["EC50{}".format(d)].shape[0]
dfm.loc[sn,"mean{}".format(d)] = data_1_sample_name["EC50{}".format(d)].mean()
dfm.loc[sn,"std{}".format(d)] = data_1_sample_name["EC50{}".format(d)].std()
dfm.loc[sn,"SEM{}".format(d)] = data_1_sample_name["EC50{}".format(d)].sem()
# convert longnames in index to a new column
dfm["longname"] = dfm.index
df_allp["longname"] = df_allp.index
# create a dictionary from the long and short sample names
samplenames_dict = dict(zip(df_samplenames.index, df_samplenames["short name"]))
# create a new column with the relevant shortnames for each sample, based on the samplenames_dict
dfm["shortname"] = dfm["longname"].apply(lambda x : samplenames_dict[x] if x in list(samplenames_dict.keys()) else x)
df_allp["shortname"] = df_allp["longname"].apply(lambda x : samplenames_dict[x] if x in list(samplenames_dict.keys()) else x)
##########################################################################################
# reorder the samples according to desired order in the settings file #
##########################################################################################
# sort original index, in case
dfm.sort_index(inplace=True)
df_allp.sort_index(inplace=True)
# create a dictionary of the sample order
sampleorder_dict = dict(zip(df_samplenames.index, df_samplenames["order in figure"]))
# create a new column with the preferred sample order, if available in settings file.
dfm["sampleorder"] = dfm.longname.apply(lambda x : sampleorder_dict[x] if x in list(sampleorder_dict.keys()) else np.nan)
df_allp["sampleorder"] = df_allp.longname.apply(lambda x : sampleorder_dict[x] if x in list(sampleorder_dict.keys()) else np.nan)
# sort by sample order
dfm.sort_values(by="sampleorder", inplace=True)
df_allp.sort_values(by="sampleorder", inplace=True)
# save the dataframe with all mean data from all experiments to a csv
# dfm.to_csv(analysed_data_basename + "_EC50_mean" + ".csv", sep=",", quoting=csv.QUOTE_NONNUMERIC)
df_allp.to_csv(analysed_data_basename + "_EC50_indiv_exp" + ".csv", sep=",", quoting=csv.QUOTE_NONNUMERIC)
# save both dataframes (mean data and indiv datapoints) from all experiments to excel
writer = pd.ExcelWriter(analysed_data_basename + ".xlsx")#engine='xlsx'
# dfm.to_excel(writer, sheet_name = "EC50_mean")
df_allp.to_excel(writer, sheet_name="EC50_indiv_exp")
# sort the columns
df_allp.sort_index(axis=1, inplace=True)
# replace index of df_allp with a simple range of integers
df_allp.index = range(df_allp.shape[0])
# give the index a name, indicating sample numbers
df_allp.index.name = "sSnum"
# set the fontsize
if df_allp.shape[0] < 10:
fontsize = 10
else:
fontsize = 6
plt.rcParams['font.size'] = fontsize
# set matplotlib legend parameters
plt.rcParams['legend.numpoints'] = 3
# iterate through the datasets (e.g. _orig, _ful)
for d in datasets:
col_mean = "mean" + d
col_std = "std" + d
col_SEM = "SEM" + d
# create a subset to plot that contain data
df_for_barchart = dfm.dropna(subset = [col_mean]).copy()
# # use the dictionary of long to short names to obtain the short name for the datapoints
# df_for_barchart["longname"] = df_for_barchart.index
# df_for_barchart["shortname"] = df_for_barchart.longname.apply(lambda x : samplenames_dict[x] if x in list(samplenames_dict.keys()) else x)
# setup normalisation by checking if a standard is listed in the settings file
conduct_normalisation, list_norm_datasets = setup_normalisation(df_samplenames, df_allp)
# identify the df_allp columns associated with that dataset
col_contains_d = list(pd.Series(df_allp.columns).apply(lambda x: d in x))
# select only data for that dataset (e.g. only orig data)
sel_df_allp = df_allp.loc[:,col_contains_d]
# create separate figures for the long names, and the short names
for norm_dataset in list_norm_datasets:
sys.stdout.write(".")
sys.stdout.flush()
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
# #
# Analysis Scattergram, individual datapoints for multiple experiments #
# by dataset (ful/orig), by name (long,short), by normalisation (orig/norm) #
# #
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
# _________
# |XXXXXXXXX| Full Canvas
# |XXXXXXXXX|
# |XXXXXXXXX|
# |XXXXXXXXX|
# create a scattergram figure object
scat_fig, scat_ax = plt.subplots()
# create an lists to hold the individual sample numbers (x-values) from all experiments
xvalues_all_exp = []
# create an lists to hold the individual datapoints (y-values) from all experiments
yvalues_all_exp = []
# create a string for the dose units (e.g. "mg/L", or "% positive control")
if norm_dataset == "":
dose_units = settings["x-axis (dose) units"]
elif norm_dataset == "_normalised":
samplenames_selected = df_samplenames.loc[df_allp.longname, :]
# find the sample longname that is marked "True" as a standard
standard_name = samplenames_selected[samplenames_selected["standard for normalisation?"] == True].index[0]
# check if the standard for normalisation has a short name
if standard_name in samplenames_selected.index:
standard_name_short = samplenames_selected.loc[standard_name, "short name"]
if not isinstance(standard_name_short, str):
raise ValueError("Standard for normalisation appears more than once in 'samplenames' tab of excel settings file.\n Samplenames:\n{}".format(standard_name_short))
else:
# use the long name
standard_name_short = standard_name
# create a string to extend the y-axis label
dose_units = "% {}".format(standard_name_short)
else:
raise TypeError("dataset for standardisation{} is not recognised".format(norm_dataset))
ylabel_str = "{a}{b}, {c} ({d})".format(a=settings["calculation_type"],
b=str(settings["percentage_response"]),
c=settings["x-axis (dose) label"],
d=dose_units)
# drop the non-numerical columns
list_cols_to_drop = ["longname", "sampleorder", "shortname"]
for col in list_cols_to_drop:
if col in sel_df_allp.columns:
sel_df_allp.drop(col, axis=1, inplace=True)
# iterate through the experiment number (exp_nr) and columns (c) in the dataframe
# Each column represents a single experiment
for exp_nr, c in enumerate(sel_df_allp.columns):
data_colour = t20[exp_nr]
sys.stdout.write(".")
sys.stdout.flush()
# convert any stringlists to lists
df_allp.loc[:,c] = df_allp.loc[:,c].apply(lambda x: ast.literal_eval(x) if isinstance(x,str) else x)
# convert any lists of strings to lists of floats
df_allp.loc[:,c] = df_allp.loc[:,c].apply(lambda x: [float(s) for s in x] if isinstance(x,list) else x)
# convert any lists of floats to lists of numpy arrays
df_allp.loc[:,c] = df_allp.loc[:,c].apply(lambda x: np.array(x) if isinstance(x,list) else x)
####################################################################################################
# NORMALISATION TO A STANDARD FOR EACH EXPERIMENT #
# a) find the standard sample number #
# b) find the standard #
# c) divide all datapoints by the standard (or mean of standard, if duplicated on that day)#
# d) multiply by 100 to give a percentage #
####################################################################################################
if norm_dataset == "":
series_EC50_data = df_allp.loc[:,c].dropna()
elif norm_dataset == "_normalised":
# find the sample longname that is marked "True" as a standard
standard_name = samplenames_selected[samplenames_selected["standard for normalisation?"] == True].index[0]
# find the sSNum (index number) of the standard
standard_sSnum = df_allp[df_allp.longname == standard_name].index[0]
# calculated the average EC50 for the standard
standard_EC50 = np.mean(df_allp.loc[standard_sSnum, c])
if standard_EC50 != 0.0:
# calculate the normalised EC50 as a percentage (EC50/standard*100)
series_EC50_data = df_allp.loc[:,c]/standard_EC50*100
series_EC50_data.dropna(inplace=True)
else:
print("Normalisation for experiment {} cannot be carried out, as the EC50 of the standard is 0.0. ".format(c))
break
########################################################################################
# #
# Add values to a "scatterplot" which has the sample number on the x-axis, #
# and the EC50 on the y-axis. #
# For duplicate samples in that experiment, it's a bit tricky!! #
# a) find data with duplicates [S1_EC50_1,S1_EC50_2] #
# b) iterate through each sample with duplicates #
# c) create matching list of sample numbers [S1,S1],[S1_EC50_1,S1_EC50_2] #
# d) add to list of x-values and y-values from the samples with duplicates #
# x=[S1,S1,S2,S2,S5,S5], #
# y=[S1_EC50_1,S1_EC50_2,S2_EC50_1,S2_EC50_2,S5_EC50_1,S5_EC50_2] #
# e) append(extend) to the list of samples #
# x=[S2,S3,S4,S1,S1,S2,S2,S5,S5], #
# y=[S2_EC50_1,S3_EC50_1,S4_EC50_1,S1_EC50_1,S1_EC50_2, #
# S2_EC50_1,S2_EC50_2,S5_EC50_1,S5_EC50_2] #
# f) plot as a scattergram, so multiple datapoints are on each sample #
# g) append (extend) the sample names and datapoints to a list #
# containing data from all experiments combined #
# h) move and repeat for the next experiment #
# i) after iteration through all experiments, use collected list to #
# calculate the mean values (whether raw or normalised) for barchart #
# #
########################################################################################
# set the transparency
alpha = 0.5
# create a series of bools, describing if the data is a float
is_float_series = series_EC50_data.apply(lambda x: isinstance(x,float))
# use boolean series to select the float data
values_single_sample = series_EC50_data.loc[is_float_series]
# create list of x-values (dose) and y-values (response) from the data with single samples
xvalues_from_single = list(values_single_sample.index)
yvalues_from_single = list(values_single_sample)
# create a series of bools, describing if the data is a list
is_array_series = series_EC50_data.apply(lambda x: isinstance(x,np.ndarray))
# if some of the datapoints are lists (multiple sample replicates in a single experiment)
if True in list(is_array_series):
# use boolean series to select the data with lists
values_mult_sample = series_EC50_data.loc[is_array_series]
# the multiple EC50 values belong to a single sample number. Identify list of sample numbers.
index_ssNum_with_lists = values_mult_sample.index.tolist()
# create a list of xvalues for the scattergram, for multiple samples per day
list_xvalues_from_mult = []
# create a list of yvalues for the scattergram, for multiple samples per day
list_yvalues_from_mult = []
# iterate through each selected sample number, associated with multiple EC50 values
for sSnum in index_ssNum_with_lists:
list_EC50_values = list(values_mult_sample[sSnum])
# create corresponding x-axis sample numbers (array of sSnum of length len(values_mult_sample))
list_index_values = list(np.ones(len(list_EC50_values)).astype(np.int64)*sSnum)
# add x and y values to the lists for all samples
list_xvalues_from_mult.extend(list_index_values)
list_yvalues_from_mult.extend(list_EC50_values)
# add the x and y-values from the multiple samples per experiment to the single sample data
xvalues_all_single_exp = xvalues_from_single + list_xvalues_from_mult
yvalues_all_single_exp = yvalues_from_single + list_yvalues_from_mult
else:
# there is no data from multiple samples in an experiment, therefore all data comes from single
xvalues_all_single_exp = xvalues_from_single
yvalues_all_single_exp = yvalues_from_single
# plot the float data as a scattergram (x-axis is the range, resembling a bar or line chart)
scat_ax.scatter(xvalues_all_single_exp, yvalues_all_single_exp, color=data_colour, s=40,
alpha=alpha, label=c[:-5])
# add the values from that experiment to a list
xvalues_all_exp.extend(xvalues_all_single_exp)
yvalues_all_exp.extend(yvalues_all_single_exp)
#######################################################
# format_and_save_analysis_scatterplot #
#######################################################
settings_name = os.path.split(settings_excel_file)[1]
# set the xticks and labels to match the index of df_allp
scat_ax.set_xticks(np.arange(df_allp.shape[0]))
# set the grid to go in between the sample names, as minor xticks
scat_ax.set_xticks(np.arange(df_allp.shape[0])+0.5, minor=True)
scat_ax.grid(which='minor', alpha=0.9)
# set the x axis limits
scat_ax.set_xlim(-0.5, df_allp.shape[0]-0.5)
# set the y-axis title
scat_ax.set_ylabel(ylabel_str)
if "savefig_with_longnames" in kwargs.keys():
if kwargs["savefig_with_longnames"] == True:
list_nametypes = ["longname", "shortname"]
else:
list_nametypes = ["shortname"]
else:
list_nametypes = ["shortname"]
for nametype in list_nametypes:
# add legend
if nametype == "longname":
scat_lgd = scat_ax.legend(loc='center left', bbox_to_anchor=(1, 0.5), ncol=1,
scatterpoints=1,numpoints=1, borderaxespad=1,
fontsize=fontsize)# mode="expand",
elif nametype == "shortname":
scat_lgd = scat_ax.legend(loc='upper center', bbox_to_anchor=(0.5,-0.15), ncol=2,
scatterpoints=1,numpoints=1, fontsize=fontsize)
# define the x-axis labels (long or short)
scat_xticklabels = list(df_allp[nametype])
scat_ax.set_xticklabels(scat_xticklabels, rotation=90)
scat_ax.set_title("analysed data ({e} experiments), {b}".format(b=d_name,e=sel_df_allp.shape[1]))
# save scatter figure
scat_fig.savefig(analysed_data_basename + "_datapoints" + d_name + norm_dataset + '.png',
format='png', dpi=300,bbox_extra_artists=(scat_lgd,), bbox_inches='tight')
if settings["save_as_pdf"] in (True, "TRUE"):
scat_fig.savefig(analysed_data_basename + "_datapoints" + d_name + norm_dataset + '.pdf',
format='pdf', bbox_extra_artists=(scat_lgd,), bbox_inches='tight')
# create figure object for the barchart with normalised data (collected from the scatter data)
#barnorm_fig, barnorm_ax = plt.subplots()
series_all_exp_redundant = pd.Series(yvalues_all_exp, index = xvalues_all_exp)
# convert index to integer format
series_all_exp_redundant.index = series_all_exp_redundant.index.astype(np.int64)
# from the redundant index, create a unique, sorted index of sample numbers
series_all_exp_redundant_index_uniq = pd.Series(series_all_exp_redundant.index.unique())
series_all_exp_redundant_index_uniq.sort_values()
df_all_exp_nonredundant = pd.DataFrame(index=series_all_exp_redundant_index_uniq)
for sSnum_b in series_all_exp_redundant_index_uniq:
data_for_that_sSnum = series_all_exp_redundant.loc[sSnum_b]
if isinstance(data_for_that_sSnum, pd.Series):
n_samples = data_for_that_sSnum.shape[0]
else:
n_samples = 1
df_all_exp_nonredundant.loc[sSnum_b, "n{}{}".format(d, norm_dataset)] = n_samples
df_all_exp_nonredundant.loc[sSnum_b, "mean{}{}".format(d,norm_dataset)] = data_for_that_sSnum.mean()
df_all_exp_nonredundant.loc[sSnum_b, "std{}{}".format(d,norm_dataset)] = data_for_that_sSnum.std()
df_all_exp_nonredundant.loc[sSnum_b, "SEM{}{}".format(d,norm_dataset)] = pd.Series(data_for_that_sSnum).sem()
# extract the original longnames from dfm
longname_series = dfm.longname
longname_series.index = range(len(longname_series))
# add the longnames to df_all_exp_nonredundant (index should be sample number)
df_all_exp_nonredundant["longname"] = longname_series
# extract the shortnames from the dictionary in the settings file, if available
df_all_exp_nonredundant["shortname"] = df_all_exp_nonredundant["longname"].apply(lambda x : samplenames_dict[x] if x in list(samplenames_dict.keys()) else x)
#######################################################
# DEPRECATED BARCHART THAT HAD AN INDEXING ERROR #
#######################################################
#
# # count the number of samples (number of boxes in barchart)
# bar_x_n_boxes = df_all_exp_nonredundant.shape[0]
# # set the range of number of boxes as the indices
# box_indices = range(bar_x_n_boxes)
# # define error bar parameters
# bar_error_kw = dict(ecolor='k', lw=1, capsize=2, capthick=1)
#
# # plot the data as a barchart
# barcontainer_norm = barnorm_ax.bar(box_indices,
# df_all_exp_nonredundant["mean{}{}".format(d,norm_dataset)],
# yerr=df_all_exp_nonredundant["SEM{}{}".format(d,norm_dataset)],
# align="center",
# error_kw=bar_error_kw, color = '#1b9e77')
# # set the xticks
# barnorm_ax.set_xticks(box_indices)
#
# # set the limits of the x-axis
# barnorm_ax.set_xlim([-1, bar_x_n_boxes])
# # set the limit of the y-axis
# barnorm_ax.set_ylim(0)
# # set the y-axis title
# # bar_ax.set_ylabel("EC50 (ug/ml)")
# # set the ylabel extension string
# if norm_dataset == "":
# dose_units = settings["x-axis (dose) units"]
# elif norm_dataset == "_normalised":
# samplenames_selected = df_samplenames.loc[df_allp.longname, :]
# # find the sample longname that is marked "True" as a standard
# standard_name = samplenames_selected[samplenames_selected["standard for normalisation?"] == True].index[0]
# # check if the standard for normalisation has a short name
# if standard_name in samplenames_selected.index:
# standard_name_short = samplenames_selected.loc[standard_name, "short name"]
# else:
# # use the long name
# standard_name_short = standard_name
# # create a string to extend the y-axis label
# dose_units = "% {}".format(standard_name_short)
# else:
# raise TypeError("dataset for standardisation{} is not recognised".format(norm_dataset))
# ylabel_str = "{a}{b}, {c} ({d})".format(a=settings["calculation_type"],
# b=str(settings["percentage_response"]),
# c=settings["x-axis (dose) label"],
# d=dose_units)
# barnorm_ax.set_ylabel(ylabel_str)
#
# for nametype in list_nametypes:
# # define name on the x-axis
# barnorm_x_names = df_for_barchart[nametype]
# # set the labels of the x-bar_axis
# barnorm_ax.set_xticklabels(barnorm_x_names, rotation=90)
#
# # ax.annotate(text="%s%s" % (nametype,d), xy=(0.015,0.93), fontsize=af, xycoords=xyc)
# barnorm_ax.set_title("analysed data ({e} experiments), {b}".format(b=d_name,
# e=dff.loc[dff["run gatherer"] == True].shape[0]))
# # automatically tighten the layout and save figure
# barnorm_fig.tight_layout()
# # save the figure
# barnorm_fig.savefig(analysed_data_basename + "_bar" + d_name + norm_dataset + '.png', format='png', dpi=300)
# if settings["save_as_pdf"] in (True, "TRUE"):
# barnorm_fig.savefig(analysed_data_basename + "_bar" + d_name + norm_dataset + '.pdf', format='pdf')
# plt.close('all')
# move sample names to index
df_all_exp_nonredundant["sample_number"] = df_all_exp_nonredundant.index
df_all_exp_nonredundant.set_index("shortname", inplace=True)
df_all_exp_nonredundant.sort_values("sample_number", inplace=True)
#######################################################
# NEW BARCHART BASED ON UNIQUE SAMPLENAMES #
#######################################################
fig, ax = plt.subplots()
mean_ser = df_all_exp_nonredundant["mean{}{}".format(d_name,norm_dataset)]
SEM_ser = df_all_exp_nonredundant["SEM{}{}".format(d_name,norm_dataset)]
mean_ser.plot(kind="bar", ax=ax, yerr=SEM_ser)
ax.set_title("analysed data ({e} experiments), {b}".format(b=d_name, e=dff.loc[dff["run gatherer"] == True].shape[0]))
ylabel_str = "{a}{b} {c}\n({d})".format(a=settings["calculation_type"],
b=str(settings["percentage_response"]),
c=settings["x-axis (dose) label"],
d=dose_units)
ax.set_ylabel(ylabel_str)
ax.set_xlabel("")
fig.tight_layout()
fig.savefig(analysed_data_basename + "_bar" + d_name + norm_dataset + '.png', format='png', dpi=300)
if settings["save_as_pdf"] in (True, "TRUE"):
fig.savefig(analysed_data_basename + "_bar" + d_name + norm_dataset + '.pdf', format='pdf')
plt.close('all')
#######################################################
# SAVE TO EXCEL AND CSV #
#######################################################
# revert back to longname for csv and excel
df_all_exp_nonredundant.set_index("longname", inplace=True)
# save to excel
df_all_exp_nonredundant.to_excel(writer, sheet_name = "EC50_mean{}{}".format(d_name,norm_dataset))
df_all_exp_nonredundant.to_csv(analysed_data_basename + "_EC50_mean{}{}".format(d_name,norm_dataset) + ".csv",
sep=",", quoting=csv.QUOTE_NONNUMERIC)
writer.save()
writer.close()
print('\nCollection and analysis of data from multiple experiments is finished.\n'
'Output files are saved in the following directory:\n{}'.format(outpath))
else:
print("\nNo files are selected for the run_gatherer program. Double-check TRUE/FALSE columns in settings file.")
def setup_normalisation(df_samplenames, df_allp):
""" Sets up the normalisation of data for that experiment relative to a control.
Checks to see that a single sample is labelled TRUE for "standard for normalisation?" in the settings file.
Parameters
----------
df_samplenames : pandas DataFrame
Dataframe containing the samplenames and column identifying the sample for normalisation.
df_allp : pandas Dataframe
Dataframe for all individual EC50 datapoints from all experiments marked as "TRUE" in the run_gatherer
column of the settings file. Includes individual values for samples found twice in a single experiment.
index : unique sample numbers (sSnum)
columns: experiment names (e.g. "Mon_23.05", "Tue_24.05", "Wed_25.05")
values : EC50 values, or lists of EC50 values as strings
Returns
-------
conduct_normalisation : boolean
If True, all necessary requirements for normalisation OF THAT DATASET are found, and it should proceed.
list_norm_datasets : list
Suffix for the datasets, e.g. ["", "_normalised"] if conduct_normalisation is True
If conduct_normalisation is False, will contain a suffix for a single, non-normalised dataset, [""]
"""
# if any of the sample names with valid EC50 data extracted from output files are listed in the settings file
dataset_contains_samples_in_settings_file = bool(set(df_allp.longname).intersection(set(df_samplenames.index)))
if dataset_contains_samples_in_settings_file:
# select the relevant sample names in the settings file
samplenames_selected = df_samplenames.loc[df_allp.longname, :]
# if any of the samples are labelled as a standard
if True in list(samplenames_selected["standard for normalisation?"]):
# determine number of samples labelled True as the "standard for normalisation?"
n_standards_labelled_as_True = samplenames_selected["standard for normalisation?"].value_counts()[True]
# if a single sample is labelled as the standard
if n_standards_labelled_as_True == 1:
# this particular experiment can be normalised to a standard
conduct_normalisation = True
# prepare suffixes for the filenames that will include both non-normalised and normalised data
list_norm_datasets = ["", "_normalised"]
elif n_standards_labelled_as_True > 1:
raise ValueError("Multiple samples are labelled as standards for normalisation. "
"Only a single sample can be labelled as a standard."
"Please check the samplenames tab of your settings file.")
else:
conduct_normalisation = False
list_norm_datasets = [""]
else:
conduct_normalisation = False
list_norm_datasets = [""]
print("Note: Full sample names were used in the output graphs. \n If you want to shorten sample names, normalise to a control, "
" or reorder samples in the output graph, please check the samples tab of the settings file.")
return conduct_normalisation, list_norm_datasets
|
the-stack_0_27704
|
from typing import Optional
from discord.ext import commands
from bot import ModmailBot
from core import checks
from core.models import PermissionLevel
class Tagging(commands.Cog):
"""A plugin that enables mods to prefix the thread name with a tag."""
def __init__(self, bot: ModmailBot):
self.bot = bot
@checks.has_permissions(PermissionLevel.SUPPORTER)
@commands.command()
@checks.thread_only()
async def tag(self, ctx: commands.Context, tag: Optional[str]) -> None:
"""
Append a tag at the beginning of the channel name.
Using the command without any argument will reset it.
"""
clean_name = ctx.channel.name.split("|", maxsplit=1)[-1]
if tag:
name = f"{tag}|{clean_name}"
else:
name = clean_name
await ctx.channel.edit(name=name)
await ctx.message.add_reaction("\u2705")
def setup(bot: ModmailBot) -> None:
"""Add the Tagging plugin."""
bot.add_cog(Tagging(bot))
|
the-stack_0_27705
|
"""
There are a few important sets of datastructures:
dimensions
* N - Size of the dstore.
* K - Number of retrieved neighbors.
* D - Size of the key vectors.
dstore - This is the "ground truth" source of keys, values, and other important
items created by the KNN-LM.
* dstore_keys.npy - The vectors. NxD
* dstore_vals.npy - The source token. Note: These are NOT the values used in the KNN-LM paper. Nx1
* dstore_tgts.npy - The target token. Note: These ARE the values used in the KNN-LM paper. Nx1
* dstore_prob.npy - The predicted probability of the target token. This can be used to compute perplexity of the non-retrieval model. Nx1
lookup - This is a cache of retrieved neighbors on a subset of the dstore.
* lookup_knns.npy - The retrieved neighbors. NxKx1
* lookup_dist.npy - The approximate distance determined by product quantization and faiss. NxKx1
* lookup_done.npy - We only compute `knns` and `dist` for a subset of the data, and we can use `done` to keep track
of which rows we did this for. If `done` is 1, then the row has been computed, and 0 otherwise. Nx1
"""
import argparse
import collections
import json
import os
import sys
from vocab import Dictionary
import numpy as np
import torch
from tqdm import tqdm
_my_globals = {}
class RunOriginal:
def run(self, dstore, vocab, mask=None, mask_b=None, tag=None):
if mask is None:
p = dstore.prob[:].copy()
else:
p = dstore.prob[:].copy()[mask]
p_ = torch.from_numpy(p).float()
ppl = EvalUtil.eval_ppl(p_)
out = {}
out['ppl'] = ppl
out['cfg'] = str(None)
out['desc'] = 'original[shape={}]'.format(p.shape[0])
return out
class RunKNNLM:
def __init__(self, cfg):
self.cfg = cfg
self.use_exact = False
self.flip_distance = True
self.sort = True
for k, v in cfg.items():
setattr(self, k, v)
def run(self, dstore, vocab, mask=None, mask_b=None, tag=None):
if mask is None:
p = dstore.prob[:].copy()
dist = dstore.dist[:].copy()
knn_tgts = dstore.knn_tgts[:].copy()
tgts = dstore.tgts[:].copy()
else:
p = dstore.prob[:].copy()[mask]
dist = dstore.dist[:].copy()[mask]
knn_tgts = dstore.knn_tgts[:].copy()[mask]
tgts = dstore.tgts[:].copy()[mask]
if self.use_exact:
dist = dstore.exact[:].copy()
if mask is not None:
dist = dist[mask]
if self.flip_distance:
dist = -dist
index = None
if self.sort:
assert len(dist.shape) == 3
index = np.argsort(dist, axis=1)[:, ::-1]
dist = np.take_along_axis(dist, index, axis=1)
knn_tgts = np.take_along_axis(knn_tgts, index, axis=1)
p_ = torch.from_numpy(p).float()
original_ppl = EvalUtil.eval_ppl(p_)
best_val = None
best_knn_p = None
best_cfg = None
limits_to_check = 8
limit_size = self.k // limits_to_check
limits_to_check_lst = [i * limit_size for i in range(1, limits_to_check)]
if not self.find_best_lim:
limits_to_check_lst = [self.k]
coeff_lst = np.arange(20) / 20
for lim in limits_to_check_lst:
dist_ = dist[:, :lim]
knn_tgts_ = knn_tgts[:, :lim]
knn_p = EvalUtil.get_knn_log_prob(tgts, dist_, knn_tgts_)
knn_p_ = torch.from_numpy(knn_p).float()
for coeff in coeff_lst[1:]:
new_p = EvalUtil.combine_knn_and_vocab_probs(
knn_p_,
p_,
coeff)
ppl = EvalUtil.eval_ppl(new_p)
#print('lim={} coeff={} ppl={}'.format(lim, coeff, ppl))
if best_val is None or ppl < best_val:
best_val = ppl
best_knn_p = knn_p
best_cfg = (lim, coeff)
out = {}
out['tgts'] = tgts
out['knn_tgts'] = knn_tgts
out['dist'] = dist
out['knn_p'] = best_knn_p
out['p'] = p
out['ppl'] = best_val
out['index'] = index
out['cfg'] = str(tuple(best_cfg))
desc = self.cfg.copy()
desc['n'] = p.shape[0]
out['desc'] = 'knn-lm[{}]'.format(desc)
if tag is not None:
out['desc'] += '[{}]'.format(tag)
return out
def main(args):
dstore = Dstore(args.dstore, args.dstore_size, 1024)
dstore.initialize()
dstore.add_neighbors(args.lookup, args.lookup_k)
dstore.add_exact(args.lookup, args.lookup_k)
#dstore.add_annotations(args.dstore)
tgts = dstore.tgts[:]
knn_tgts = dstore.knn_tgts[:, :args.k]
label = (knn_tgts == tgts.reshape(-1, 1, 1)).astype(np.int)
print('read vocab')
vocab = Dictionary()
vocab.add_from_file(args.vocab)
vocab.finalize()
print('found {} tokens'.format(len(vocab)))
print('')
def print_results(out, baseline):
if isinstance(out, (list, tuple)):
for x in out:
print_results(x)
return
diff = 0
if baseline is not None:
diff = out['ppl'] - baseline
print('{:.4f} {:.4f} {:<16} {}'.format(diff, out['ppl'], out['cfg'], out['desc']))
def find_occurs_gte_pivot(vocab, count):
self = vocab
for i, count_ in enumerate(self.count):
if i < vocab.nspecial:
continue
if count >= count_:
return i
return None
freq_list = [10**i for i in range(1, 8)]
for freq in freq_list:
piv = find_occurs_gte_pivot(vocab, freq)
mask = np.logical_and(tgts >= vocab.nspecial, tgts < piv)
lt = mask.sum().item()
gte = (mask == False).sum().item()
print('freq = {}, piv = {}, lt = {}, gte = {}'.format(freq, piv, lt, gte))
print('')
out_baseline = RunOriginal().run(dstore, vocab)
baseline = out_baseline['ppl']
print_results(out_baseline, None)
res_approx = RunKNNLM(dict(k=1024, find_best_lim=False, use_exact=False, flip_distance=False, sort=True)).run(dstore, vocab)
print_results(res_approx, baseline)
res_exact = RunKNNLM(dict(k=1024, find_best_lim=False, use_exact=True, flip_distance=True, sort=True)).run(dstore, vocab)
print_results(res_exact, baseline)
bin_0 = [None] + [10**i for i in range(1, 7)]
bin_1 = bin_0[1:] + [None]
sofar = 0
print('max-count={}'.format(max(vocab.count)))
print('len-vocab={}'.format(len(vocab)))
coeff = 0.25
for lo_freq, hi_freq in zip(bin_0, bin_1):
if hi_freq is not None and lo_freq is not None:
piv_start = find_occurs_gte_pivot(vocab, hi_freq + 1)
piv_end = find_occurs_gte_pivot(vocab, lo_freq)
elif hi_freq is not None:
piv_start = find_occurs_gte_pivot(vocab, hi_freq + 1)
piv_end = len(vocab)
else:
piv_start = vocab.nspecial
piv_end = find_occurs_gte_pivot(vocab, lo_freq)
assert piv_start < piv_end
mask = np.logical_and(tgts >= piv_start, tgts <= piv_end)
n = mask.sum().item()
sofar += n
# approx
knn_p_, p_ = res_approx['knn_p'], res_approx['p']
knn_p_, p_ = knn_p_[mask], p_[mask]
knn_p_ = torch.from_numpy(knn_p_).float()
p_ = torch.from_numpy(p_).float()
approx_p = EvalUtil.combine_knn_and_vocab_probs(
knn_p_,
p_,
coeff)
approx_ppl = EvalUtil.eval_ppl(approx_p).item()
# exact
knn_p_, p_ = res_exact['knn_p'], res_exact['p']
knn_p_, p_ = knn_p_[mask], p_[mask]
knn_p_ = torch.from_numpy(knn_p_).float()
p_ = torch.from_numpy(p_).float()
exact_p = EvalUtil.combine_knn_and_vocab_probs(
knn_p_,
p_,
coeff)
exact_ppl = EvalUtil.eval_ppl(exact_p).item()
# baseline
baseline_ppl = EvalUtil.eval_ppl(p_).item()
# main
n = mask.sum().item()
out = collections.OrderedDict()
out['lo_freq'] = lo_freq
out['hi_freq'] = hi_freq
out['n'] = n
out['approx-ppl'] = approx_ppl
out['exact-ppl'] = exact_ppl
out['baseline-ppl'] = baseline_ppl
print(json.dumps(out))
######
def pick(d, keys, mask=None):
if mask is not None:
return [d[k][mask] for k in keys]
return [d[k] for k in keys]
mask = mask.reshape(-1)
tgts_ = tgts[mask]
k = 128
#
index_a, dist_a, knn_tgts_a = pick(res_approx, ['index', 'dist', 'knn_tgts'], mask)
index_e, dist_e, knn_tgts_e = pick(res_exact, ['index', 'dist', 'knn_tgts'], mask)
res_overlap = collections.defaultdict(list)
res_ppl = {}
for k in [16, 64, 256]:
for i in range(index_a.shape[0]):
#a_, e_ = knn_tgts_a[i, :k].flatten().tolist(), knn_tgts_e[i, :k].flatten().tolist()
a_, e_ = index_a[i, :k].flatten().tolist(), index_e[i, :k].flatten().tolist()
overlap = len(set.intersection(set(a_), set(e_)))
res_overlap[k].append(overlap)
out = collections.OrderedDict()
for k, v in res_overlap.items():
out['overlap-{}'.format(k)] = np.mean(v)
print(json.dumps(out))
#print('piv=[{}:{}), freq=[{}:{}], n={}/{}, sofar={}'.format(
# piv_start, piv_end, lo_freq, hi_freq, n, mask.shape[0], sofar))
sys.exit()
def edit_distance(x0, x1):
m = len(x0)
n = len(x1)
d = [[i] for i in range(1, m + 1)] # d matrix rows
d.insert(0, list(range(0, n + 1))) # d matrix columns
for j in range(1, n + 1):
for i in range(1, m + 1):
if x0[i - 1] == x1[j - 1]:
substitutionCost = 0
else:
substitutionCost = 1
d[i].insert(j, min(d[i - 1][j] + 1,
d[i][j - 1] + 1,
d[i - 1][j - 1] + substitutionCost))
return d[-1][-1]
class Dstore:
def __init__(self, path, dstore_size=None, vec_size=None):
self.path = path
self.dstore_size = dstore_size
self.vec_size = vec_size
self._initialized = False
def initialize(self):
path = self.path
# self.keys = np.memmap(os.path.join(path, 'dstore_keys.npy'), dtype=np.float32, mode='r', shape=(self.dstore_size, self.vec_size))
self.tgts = np.memmap(os.path.join(path, 'dstore_tgts.npy'), dtype=np.int, mode='r', shape=(self.dstore_size, 1))
self.vals = np.memmap(os.path.join(path, 'dstore_vals.npy'), dtype=np.int, mode='r', shape=(self.dstore_size, 1))
self.prob = np.memmap(os.path.join(path, 'dstore_prob.npy'), dtype=np.float32, mode='r', shape=(self.dstore_size, 1))
self._initialized = True
def add_neighbors(self, path, k):
self.knns = np.memmap(os.path.join(path, 'lookup_knns.npy'), dtype=np.int, mode='r', shape=(self.dstore_size, k, 1))
self.knn_tgts = np.memmap(os.path.join(path, 'lookup_knn_tgts.npy'), dtype=np.int, mode='r', shape=(self.dstore_size, k, 1))
self.dist = np.memmap(os.path.join(path, 'lookup_dist.npy'), dtype=np.float32, mode='r', shape=(self.dstore_size, k, 1))
# self.lookup_done = np.memmap(os.path.join(path, 'lookup_done.npy'), dtype=np.int, mode='r', shape=(self.dstore_size, 1))
def add_exact(self, path, k):
self.exact = np.memmap(os.path.join(path, 'lookup_exact.npy'), dtype=np.float32, mode='r', shape=(self.dstore_size, k, 1))
def add_annotations(self, path):
self.src_pos = np.memmap(os.path.join(path, 'annotation_src_pos.npy'), dtype=np.int, mode='r', shape=(self.dstore_size, 1))
# read pos dict
self.idx2tag = []
print('Reading POS Vocab...')
path_pos_dict = os.path.join(path, 'pos_dict.txt')
with open(path_pos_dict) as f:
for line in f:
print(line.strip())
idx, sym, _ = line.strip().split()
self.idx2tag.append(sym)
self.tag2idx = {v: k for k, v in enumerate(self.idx2tag)}
print('done\n')
class EvalUtil:
@staticmethod
def get_knn_log_prob(tgts, dists, knn_tgts):
tgts = torch.from_numpy(tgts).long().view(-1)
dists = torch.from_numpy(dists).float().squeeze(-1)
#dists = -dists
probs = torch.log_softmax(dists, dim=-1)
index_mask = torch.eq(torch.from_numpy(knn_tgts).long().squeeze(-1), tgts.unsqueeze(-1)).float()
index_mask[index_mask == 0] = -10000 # for stability
index_mask[index_mask == 1] = 0
# (T_reducedxB)
yhat_knn_prob = torch.logsumexp(probs + index_mask, dim=-1).clone().numpy()
# Bx1
return yhat_knn_prob.reshape(-1, 1)
@staticmethod
def combine_knn_and_vocab_probs(knn_p, vocab_p, coeff):
combine_probs = torch.stack([vocab_p, knn_p], dim=0)
coeffs = torch.ones_like(combine_probs)
coeffs[0] = np.log(1 - coeff)
coeffs[1] = np.log(coeff)
curr_prob = torch.logsumexp(combine_probs + coeffs, dim=0)
return curr_prob
@staticmethod
def eval_ppl(p):
avg_nll = -p.mean() / np.log(2)
ppl = 2**avg_nll
return ppl
if __name__ == '__main__':
parser = argparse.ArgumentParser()
# dstore
parser.add_argument('--dstore', default='from_dstore_valid/va', type=str)
parser.add_argument('--dstore-size', default=10000, type=int)
parser.add_argument('--vocab', default='data-bin/wikitext-103/dict.txt')
# dstore neighbors
parser.add_argument('--lookup', default='from_dstore_valid/lookup_va', type=str)
parser.add_argument('--lookup-k', default=1024, type=int)
# examine
parser.add_argument('--k', default=1024, type=int)
args = parser.parse_args()
print(args)
main(args)
|
the-stack_0_27707
|
from jingo import register, env
import jinja2
from dashboards.personal import personal_dashboards
@register.function
@jinja2.contextfunction
def personal_dashboard_tabs(context, active_tab):
"""Render the tabs for the user/group dashboard."""
c = {'dashboards': personal_dashboards(context['request']),
'user': context['request'].user,
'active_tab': active_tab,
'request': context['request']}
t = env.get_template('dashboards/includes/personal_tabs.html').render(c)
return jinja2.Markup(t)
|
the-stack_0_27709
|
import os.path
import tempfile
from dipy.core.gradients import GradientTable
import numpy as np
class MockDiffusionWeightedImage():
def __init__(self, data=None, gtab=None, mask=None):
if data is None:
data = np.random.uniform(100, 300, size=(50, 50, 3, 100))
if gtab is None:
b0s = 20 * np.ones([20, 1])
b1000s = 1020 * np.ones([80, 1])
bvals = np.append(b0s, b1000s, 0)
np.random.shuffle(bvals)
phis = np.random.uniform(0, 2 * np.pi, (100, 1))
thetas = np.arccos(np.random.uniform(-1, 1, (100, 1)))
x = bvals * np.sin(thetas) * np.cos(phis)
y = bvals * np.sin(thetas) * np.cos(phis)
z = bvals * np.cos(thetas)
gradients = np.append(x, np.append(y, z, 1), 1)
gtab = GradientTable(gradients)
if mask is None:
mask = np.ones([50, 50, 3])
self.data = data
self.mask = mask
self.gtab = gtab
def get_image(self):
return self.data
def get_flat_data(self):
return self.data.flatten()
class MockDerivedImage():
def __init__(self, data=None, mask=None):
if data is None:
data = np.zeros([30, 30, 4])
mask = np.zeros([30, 30, 4])
centroid = np.array([15, 15])
z_slice = 2
for idx, val in np.ndenumerate(data[..., 2]):
disp = idx - centroid
r = np.linalg.norm(disp)
if r <= 10:
data[idx[0], idx[1], z_slice] = r
mask[idx[0], idx[1], z_slice] = 1
self.data = data
self.mask = mask
def get_image(self):
return self.data
def get_flat_data(self):
return self.data[mask == 1]
|
the-stack_0_27710
|
import sys
from datetime import datetime
now = datetime.now()
def setup_db(module):
module.app.config['SQLALCHEMY_DATABASE_URI'] = \
"sqlite:///./pushrodr.db"
module.db.drop_all()
module.db.create_all()
with module.app.test_request_context():
author_one = module.Author()
author_one.name = "Author One"
author_one.description = "Spam"
module.db.session.add(author_one)
author_two = module.Author()
author_two.name = "Author Two"
author_two.description = "Eggs"
module.db.session.add(author_two)
post_one = module.Post()
post_one.id = 1
post_one.timestamp = now
post_one.title = "Hello, World!"
post_one.content = "This is the first post"
post_one.author = author_one
module.db.session.add(post_one)
post_two = module.Post()
post_two.id = 2
post_two.timestamp = now
post_two.title = "Another Test!"
post_two.content = "This is the second post"
post_two.author = author_one
module.db.session.add(post_two)
post_three = module.Post()
post_three.id = 3
post_three.timestamp = now
post_three.title = "Goodbye, World!"
post_three.content = "This is the third post"
post_three.author = author_two
module.db.session.add(post_three)
post_two_comment_one = module.Comment()
post_two_comment_one.post = post_two
post_two_comment_one.timestamp = now
post_two_comment_one.author = "Anonymous Coward"
post_two_comment_one.content = "THIS POST IS TERRIBLE"
post_two_comment_two = module.Comment()
post_two_comment_two.post = post_two
post_two_comment_two.timestamp = now
post_two_comment_two.author = "AAA"
post_two_comment_two.content = "BBB"
post_three_comment_one = module.Comment()
post_three_comment_one.post = post_three
post_three_comment_one.timestamp = now
post_three_comment_one.author = "CCC"
post_three_comment_one.content = "ASD"
module.db.session.commit()
if __name__ == '__main__': # pragma: no cover
module = __import__(sys.argv[1])
setup_db(module)
module.app.run(debug=True)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.