filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_0_11036 | # -*- coding: utf-8 -*-
# =============================================================================
# Copyright (c) 2020 NVIDIA. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
from collections.abc import MutableMapping
from typing import Any, Dict, List, Optional
from frozendict import frozendict
from nemo.utils import logging
from nemo.utils.neural_graph.connection import StepModulePort
class GraphOutput(object):
""" A helper class represenging a single bound output. """
def __init__(self, ntype: "NeuralType", producer_step_module_port: StepModulePort):
"""
Initializes object.
Args:
ntype: a NeuralType object.
producer_step_module_port: a producer StepModulePort tuple (step number (module name), port name).
"""
self._ntype = ntype
self._producer_step_module_port = producer_step_module_port
@property
def ntype(self) -> "NeuralType":
"""
Returns:
NeuralType of a given output.
"""
return self._ntype
@property
def producer_step_module_port(self) -> StepModulePort:
""" Returns producer step port (step number (module), port name) tuple. """
return self._producer_step_module_port
class GraphOutputs(MutableMapping):
'''
A specialized dictionary that contains bound outputs of a Neural Graph.
In fact stores two lists of "outputs":
- "default" outputs with default keys taken from outputs of modules, and
- "manual" used for specifying the subset of outputs.
When accessing the outputs, it returns the one of those two lists following the rule:
return "manual" outputs if they were define (at least one manual output defined by the user),
otherwise return the "default" outputs.
'''
def __init__(self, tensors_ref):
"""
Initializes two (empty) dictionaries.
Args:
tensors_ref - reference to neural graph's tensor (dict of dict).
"""
# Tensors[step][output_port_name] passed from the external neural graph object.
self._tensors_ref = tensors_ref
# This dictionary stores the output tensors collected during the "default" tensor recording.
# As they are using the default port names, the second/next tensor published on the same port
# will generate a new unique name following the (step_number.module.port_name) pattern.
self._default_outputs = {}
# This dictionary stores list of output tensors of module "manually" indicated by the user.
# In this case tring to overwriting the existing ports with new tensors will be forbidden (Exception).
self._manual_outputs = {}
def __setitem__(self, key: str, value: "NmTensor"):
"""
This method is used to set the manual output - creates a GraphOutput item and adds it to the list.
Args:
key: The name of the output (port).
value: NmTensor that will be used to create a given GraphOutput.
"""
# Make sure that user passed a NmTensor.
if type(value).__name__ != "NmTensor":
raise TypeError("Port `{}` definition must be must be set using a NmTensor".format(key))
if key in self._manual_outputs.keys():
raise KeyError("Overwriting of a port `{}` that was previously manually bound is not allowed".format(key))
# Ok, set thee "manual" output.
self._manual_outputs[key] = GraphOutput(value.ntype, value.producer_step_module_port)
def __getitem__(self, key: str) -> GraphOutput:
"""
Returns the bound output associated with the given key.
Uses default or manual dict depending whether there are some manual outputs or not.
Args:
key: Name of the bound input.
"""
if len(self._manual_outputs) > 0:
return self._manual_outputs[key]
else: # Use default dict.
return self._default_outputs[key]
def __delitem__(self, key: str):
"""
Raises:
TypeError as deletion of a bound input port is not allowed.
"""
raise TypeError("Deleting a bound output is not allowed")
def __iter__(self):
"""
Returns:
Iterator over the outputs - depending whether there are some manual outputs or not.
"""
if len(self._manual_outputs) > 0:
return iter(self._manual_outputs)
else: # Use default dict.
return iter(self._default_outputs)
def __len__(self) -> int:
"""
Returns:
The number of outputs - depending whether there are some manual outputs or not.
"""
if len(self._manual_outputs) > 0:
return len(self._manual_outputs)
else: # Use default dict.
return len(self._default_outputs)
def bind(self, tensors_ref: List["NmTensor"], port_names: Optional[str] = None):
"""
Binds the "default" outputs.
Args:
tensors_ref: List of tensors to be added.
port_names: List of port names (visible outside). If None: using internal tensor "output port names".
"""
# Set names.
if port_names is None:
port_names = [tensor.name for tensor in tensors_ref]
for name, tensor in zip(port_names, tensors_ref):
# Check the presence of the port name in "default" dictionary.
if name in self._default_outputs.keys():
# Name present - use the name being combination of producer and port names.
name = (
str(tensor.producer_step_number) + "_" + tensor.producer_name + "_" + tensor.name
) # last = port name
logging.debug(
"Setting unigue name of the default output port `{}` produced in step {} by `{}` to `{}`".format(
tensor.name, tensor.producer_step_number, tensor.producer_name, name
)
)
# Store the output.
self._default_outputs[name] = GraphOutput(tensor.ntype, tensor.producer_step_module_port)
@property
def definitions(self) -> Dict[str, GraphOutput]:
"""
Property returns definitions of the output ports by extracting them on the fly from the bound outputs.
..info:
This property actually returns a FrozenDict containing port definitions to indicate that
port definitions SHOULD not be used during the actual binding.
Returns:
Dictionary of neural types associated with bound outputs.
"""
# Get the right output dictionary.
d = self._manual_outputs if len(self._manual_outputs) > 0 else self._default_outputs
# Extract port definitions (Neural Types) and return an immutable dictionary,
# so the user won't be able to modify its content by an accident!
return frozendict({k: v.ntype for k, v in d.items()})
@property
def tensors(self) -> Dict[str, "NmTensor"]:
"""
Property returns output tensors by extracting them on the fly from the bound outputs.
Returns:
Dictionary of tensors in the format (output-name: tensor).
"""
# Get the right output dictionary.
d = self._manual_outputs if len(self._manual_outputs) > 0 else self._default_outputs
output_tensors = {}
# Get tensors by acessing the producer-ports.
# At that point all keys (k) are unigue - we made sure of that during binding/__setitem__.
for k, v in d.items():
producer_step = v.producer_step_module_port.step_number
producer_port_name = v.producer_step_module_port.port_name
# Find the right output tensor.
tensor = self._tensors_ref[producer_step][producer_port_name]
# Add it to the dictionary.
output_tensors[k] = tensor
# Return the result as an immutable dictionary,
# so the user won't be able to modify its content by an accident!
return frozendict(output_tensors)
@property
def tensor_list(self) -> List["NmTensor"]:
"""
Property returns output tensors by extracting them on the fly from the bound outputs.
Returns:
List of tensors.
"""
# Get the right output dictionary.
d = self._manual_outputs if len(self._manual_outputs) > 0 else self._default_outputs
output_tensor_list = []
# Get tensors by acessing the producer-ports.
for k, v in d.items():
producer_step = v.producer_step_module_port.step_number
producer_port_name = v.producer_step_module_port.port_name
# Find the right output tensor.
tensor = self._tensors_ref[producer_step][producer_port_name]
# Add it to the list.
output_tensor_list.append(tensor)
# Return the result.
return output_tensor_list
def serialize(self) -> Dict[str, Any]:
""" Method responsible for serialization of the graph outputs.
Returns:
List containing mappings (step.module.output_port -> output | ntype).
"""
serialized_outputs = {"mappings": []}
# Get the right output dictionary.
if len(self._manual_outputs) > 0:
serialized_outputs["type"] = "manual"
d = self._manual_outputs
else:
serialized_outputs["type"] = "default"
d = self._default_outputs
# Iterate through "bindings" (GraphOutputs).
for key, binding in d.items():
# Serialize: step.module.port -> output | ntype.
smp = binding.producer_step_module_port
source = str(smp.step_number) + "." + smp.module_name + "." + smp.port_name
# Get type.
ntype_str = str(binding.ntype)
# Serialize!
serialized_outputs["mappings"].append(source + "->" + key + " | " + ntype_str)
# Return the result.
return serialized_outputs
def deserialize(self, serialized_outputs: Dict[str, Any], modules: Dict[str, 'NeuralModule']):
"""
Method responsible for deserialization of graph outputs.
Args:
serialized_outputs: A list of serialized outputs in the form of ("step.module.output_port->key | ntype")
modules: List of modules required for neural type copying/checking.
"""
# Check type.
if serialized_outputs["type"] == "default":
# We still need to deserialize.
# Use-case: deserialization of a graph with nested graph with bound output.
d = self._default_outputs
else:
d = self._manual_outputs
# Iterate through serialized inputs one by one.
for i in serialized_outputs["mappings"]:
# Deserialize!
[producer, key_ntype] = i.split("->")
[key, ntype_str] = key_ntype.split(" | ")
[step_number, producer_name, producer_port_name] = producer.split(".")
# Get neural type from module output port definition.
ntype = modules[producer_name].output_ports[producer_port_name]
# Make sure the graph bound port type matches the deserialized type.
assert ntype_str == str(ntype)
# Create a new input.
go = GraphOutput(ntype, StepModulePort(int(step_number), producer_name, producer_port_name))
d[key] = go
# Done.
|
the-stack_0_11037 | #!/usr/bin/env python
DESC = """
____ _ _ __ __ __ ____ _____
| __ ) ___ __ _ _ _| |_(_)/ _|_ _| \/ | _ \___ /
| _ \ / _ \/ _` | | | | __| | |_| | | | |\/| | |_) ||_ \
| |_) | __/ (_| | |_| | |_| | _| |_| | | | | __/___) |
|____/ \___|\__,_|\__,_|\__|_|_| \__, |_| |_|_| |____/
|___/
______________________________________________________________
| |
| Edit Metadata of MP3 files based on file name |
|____________________________________________________________|
"""
import sys
import shutil
import os
from os import chdir, listdir, rename, walk, path, environ
from os.path import basename, dirname, realpath
import spotipy
import argparse
import configparser
import spotipy.oauth2 as oauth2
import re
from titlecase import titlecase
import requests
from bs4 import BeautifulSoup
import eyed3
import argparse
def setup_config():
'''
read api keys from config.ini file
'''
global CONFIG, GENIUS_KEY, SP_SECRET, SP_ID, config_path
CONFIG = configparser.ConfigParser()
config_path = realpath(__file__).replace(basename(__file__), '')
config_path = config_path + 'config.ini'
CONFIG.read(config_path)
GENIUS_KEY = CONFIG['keys']['genius_key']
SP_SECRET = CONFIG['keys']['spotify_client_secret']
SP_ID = CONFIG['keys']['spotify_client_id']
if GENIUS_KEY == '<insert genius key here>':
print('Warning, you are missing Genius key. Add it using --config\n\n')
if SP_SECRET == '<insert spotify client secret here>':
print('Warning, you are missing Spotify Client Secret. Add it using --config\n\n')
if SP_ID == '<insert spotify client id here>':
print('Warning, you are missing Spotify Client ID. Add it using --config\n\n')
def add_config_keys():
'''
Adds configuration keys in the config.ini file
'''
GENIUS_KEY = CONFIG['keys']['genius_key']
SP_SECRET = CONFIG['keys']['spotify_client_secret']
SP_ID = CONFIG['keys']['spotify_client_id']
if GENIUS_KEY == '<insert genius key here>':
genius_key = input('Enter Genius Client Access token : ')
CONFIG['keys']['genius_key'] = str(genius_key)
if SP_SECRET == '<insert spotify client secret here>':
sp_secret = input('Enter Spotify Secret token : ')
CONFIG['keys']['spotify_client_secret'] = str(sp_secret)
if SP_ID == '<insert spotify client id here>':
sp_id = input('Enter Spotify Client ID : ')
CONFIG['keys']['spotify_client_id'] = str(sp_id)
with open(config_path, 'w') as configfile:
CONFIG.write(configfile)
def improve_song_name(song):
'''
removes all unwanted words and numbers from file name so that the spotify search results can be improved
removes all numbers from beginning, then strip all punctuation marks from the string, then remove words in word_filters, then remove unwanted space
'''
audiofile = eyed3.load(song)
tag = audiofile.tag
artist = tag.artist.split(";", 1)[0]
song = artist + ' - ' + tag.title
char_filters = "()[]{}-:_/=!+\"\'"
word_filters = ('lyrics', 'lyric', 'by', 'video', 'official', 'hd', 'dirty', 'with', 'lyrics', 'original', 'mix',
'www', 'com', '.', 'mp3', 'audio', 'full', 'feat', 'version', 'music', 'hq', 'uploaded', 'explicit')
reg_exp = 's/^\d\d //'
song = song.strip()
song = song.lstrip("0123456789.- ")
# re.sub(reg_exp, '', song)
song = song[0:-4]
song = ''.join(
map(lambda c: " " if c in char_filters else c, song))
song = re.sub('|'.join(re.escape(key) for key in word_filters),
"", song, flags=re.IGNORECASE)
song = ' '.join(song.split()).strip()
print(song)
return song
def get_song_name(title, artist):
'''
return search query for spotify api call
'''
return title + ' - ' + artist
def get_metadata_spotify(spotify, song_name):
'''
call spotify.com api to get the metadata required, as much as possible
'''
print("trying to find data on Spotify...")
metadata = {}
try:
meta_tags = spotify.search(song_name, limit=1)['tracks']['items'][0]
except IndexError:
print("Could not find the song on Spotify")
return []
metadata['title'] = meta_tags['name']
metadata['artist'] = meta_tags['artists'][0]['name']
metadata['album'] = meta_tags['album']['name']
metadata['album_artist'] = meta_tags['album']['artists'][0]['name']
album_id = meta_tags['album']['id']
album_meta_tags = spotify.album(album_id)
metadata['release_date'] = album_meta_tags['release_date']
print(album_meta_tags['genres'])
try:
metadata['genre'] = titlecase(album_meta_tags['genres'][0])
#genre = "; ".join((album_meta_tags['genres']))
#metadata['genre'] = titlecase(genre)
except IndexError:
try:
artist_id = meta_tags['artists'][0]['id']
artist_meta_tags = spotify.artist(artist_id)
#metadata['genre'] = titlecase(artist_meta_tags['genres'][0])
genre = "; ".join((artist_meta_tags['genres']))
metadata['genre'] = titlecase(genre)
except IndexError:
print("song genre could not be found.")
pass
metadata['track_num'] = meta_tags['track_number']
metadata['disc_num'] = meta_tags['disc_number']
print()
return metadata
def list_files():
'''
list all files in current directory with extension .mp3
'''
files = []
return [f for f in listdir('.') if f.endswith('.mp3')]
def set_metadata(file_name, metadata):
'''
call eyed3 module to set mp3 song metadata as received from spotify
'''
print("setting metadata for " + file_name)
print()
audiofile = eyed3.load(file_name)
tag = audiofile.tag
if 'genre' in metadata:
#tag.genre = metadata['genre']
#tag.comments.set = metadata['genre']
tag.comments.set(metadata['genre'])
tag.save(version=(2, 3, 0))
#tag.save()
# if not norename:
# song_title = rename_format.format(
# title=metadata['title'] + ' -',
# artist=metadata['artist'] + ' -',
# album=metadata['album'] + ' -')
# song_title = song_title[:-1] if song_title.endswith('-') else song_title
# song_title = ' '.join(song_title.split()).strip()
# print("renaming " + file_name + "to " + song_title)
# new_path = path.dirname(file_name) + '{}.mp3'.format(song_title)
# rename(file_name, new_path)
print()
return
def fix_music_file(spotify, file_name, norename, rename_format):
print("------------------------------------------------------------------------")
print()
print()
print("Currently processing " + file_name)
metadata = get_metadata_spotify(spotify, improve_song_name(file_name))
if not metadata:
is_improvemet_needed = True
return is_improvemet_needed
else:
set_metadata(file_name, metadata)
is_improvemet_needed = False
rename_file = rename_to_format(
file_name, norename, rename_format, metadata)
shutil.move(rename_file, 'Organized')
return is_improvemet_needed
def rename_to_format(file_name, norename, rename_format, metadata):
# if not norename:
# song_title = rename_format.format(
# title=metadata['title'] + ' -',
# artist=metadata['artist'] + ' -',
# album=metadata['album'] + ' -')
# song_title = song_title[:-1] if song_title.endswith('-') else song_title
# song_title = ' '.join(song_title.split()).strip()
song_title = file_name
print("renaming " + file_name + "to " + song_title)
new_path = path.dirname(file_name) + '{}.mp3'.format(song_title)
rename(file_name, new_path)
return new_path
def fix_music_files(spotify, files, norename, rename_format):
need_to_improve = []
for file_name in files:
response = fix_music_file(spotify, file_name, norename, rename_format)
if response is True:
need_to_improve.append(file_name)
("------------------------------------------------------------------------")
print()
print()
return need_to_improve
def main():
'''
Deals with arguements and calls other functions
'''
setup_config()
parser = argparse.ArgumentParser(
description="{}".format(DESC), formatter_class=argparse.RawDescriptionHelpFormatter
)
# group = parser.add_mutually_exclusive_group(required=True)
parser.add_argument('-d', '--dir', action="store", dest='repair_directory',
help='give path of music files\' directory', default=os.getcwd())
parser.add_argument('-s', '--song', action='store', dest='song_name',
help='Only fix metadata of the file specified', default=None)
parser.add_argument('-c', '--config', action='store_true', dest='config',
help="Add API Keys to config\n\n")
parser.add_argument('-n', '--norename', action='store_true',
help='Does not rename files to song title\n\n')
parser.add_argument('-f', '--format', action='store', dest='rename_format', help='''Specify the Name format used in renaming,
Valid Keywords are:
{title}{artist}{album}\n\n)''')
args = parser.parse_args()
repair_directory = args.repair_directory or '.'
song_name = args.song_name or None
norename = args.norename or False
rename_format = args.rename_format or '{title}'
config = args.config
if config:
add_config_keys()
auth = oauth2.SpotifyClientCredentials(
client_id="622a0e16a4914e3eadc2a37b4a134f1e", client_secret="6fe008a8b7754954a58a9849fa3172df")
token = auth.get_access_token()
spotify = spotipy.Spotify(auth=token)
files = []
if song_name is not None:
need_to_improve = fix_music_file(
spotify, song_name, norename, rename_format)
if need_to_improve is True:
print(song_name)
elif repair_directory:
chdir(repair_directory or '.')
if not os.path.exists("Organized"):
os.makedirs("Organized")
files = list_files()
need_to_improve = fix_music_files(
spotify, files, norename, rename_format)
print(need_to_improve)
if __name__ == "__main__":
main()
|
the-stack_0_11040 | # Copyright 2010-2012 the SGC project developers.
# See the LICENSE file at the top-level directory of this distribution
# and at http://program.sambull.org/sgc/license.html.
import warnings
import pygame
from pygame.locals import *
from .._locals import *
class SelectableText:
_text = ""
_text_offset = _text_pos = 0
__blink = True
_blink_time = 0
_chars = ((0,0),)
_repeat_key = None
_repeat_time = 0
_select = None # Starting point of selection
__cursor_pos = 0
@property
def _blink(self):
"""Always return False when a selection is made."""
return self.__blink and not bool(self._select)
@_blink.setter
def _blink(self, value):
self.__blink = value
def _select_fix(self):
"""
Returns the selection area corrected, so if the selection is
right-to-left it returns the positions reversed.
"""
if self._select > self._cursor_pos:
return (self._cursor_pos, self._select)
else:
return (self._select, self._cursor_pos)
def _calc_chars(self):
"""
Calculates the position and size of each character.
Stores the results in self._chars as a tuple of (pos, width) tuples.
"""
p = self._settings["font"].size(self._text[0])[0]
chars = [(0,p)]
for c in range(len(self._text)):
w = self._settings["font"].size(self._text[:c+2])[0]
xmax, advance = self._settings["font"].metrics(
self._text[c])[0][1::3]
if xmax > advance: # Adjust for overhang
chars.append((p - (xmax - advance), w - p))
else:
chars.append((p, w - p))
p = w
self._chars = tuple(chars)
def _mouse_cursor(self, mouse_pos):
"""Return the text cursor position of the mouse."""
pos = mouse_pos[0] - self.rect_abs.x - self._text_pos
for index, (p,w) in enumerate(self._chars):
if pos <= p + w/2:
break
return index
def _update_select_text(self, time):
"""
Update text stuff for selectable text.
Should be called from widget's update() method.
"""
# Repeat key if held down
if self._repeat_key:
self._repeat_time += time
while self._repeat_time > self._settings["repeat_begin"]:
self._repeat_time -= self._settings["repeat_interval"]
self._event(self._repeat_key)
def _event_select_text(self, event):
"""
Handles events for selectable text.
Call from widget's _event() method.
"""
if event.type == MOUSEBUTTONDOWN and event.button == 1:
# Begin drawing selection
if pygame.key.get_mods() & KMOD_SHIFT and self._select is None:
self._select = self._cursor_pos
self._cursor_pos = self._mouse_cursor(event.pos)
if not pygame.key.get_mods() & KMOD_SHIFT:
self._select = self._cursor_pos
elif event.type == MOUSEMOTION and event.buttons[0]:
# Continue drawing selection while mouse held down
self._cursor_pos = self._mouse_cursor(event.pos)
elif event.type == MOUSEBUTTONUP:
# Set cursor position with mouse click
self._cursor_pos = self._mouse_cursor(event.pos)
if self._select == self._cursor_pos:
self._select = None
elif event.type == KEYDOWN:
# Save last key press for repeat
if self._repeat_key != event:
self._repeat_key = event
self._repeat_time = 0
if event.key == K_ESCAPE:
self._select = None
elif event.key == K_LEFT:
if not event.mod & KMOD_SHIFT:
self._select = None # Break selection
elif self._select is None:
# Reset selection if not selecting
self._select = self._cursor_pos
self._cursor_pos -= 1
# Remove selection when cursor is at same position
if self._select == self._cursor_pos:
self._select = None
elif event.key == K_RIGHT:
if not event.mod & KMOD_SHIFT:
self._select = None # Break selection
elif self._select is None:
self._select = self._cursor_pos
self._cursor_pos += 1
if self._select == self._cursor_pos:
self._select = None
elif event.key == K_HOME:
if not event.mod & KMOD_SHIFT:
self._select = None
elif self._select is None:
self._select = self._cursor_pos
self._cursor_pos = 0
if self._select == self._cursor_pos:
self._select = None
elif event.key == K_END:
if not event.mod & KMOD_SHIFT:
self._select = None
elif self._select is None:
self._select = self._cursor_pos
self._cursor_pos = len(self._text)
if self._select == self._cursor_pos:
self._select = None
'''elif event.mod & KMOD_CTRL:
if event.key == K_a: # Select all
self._select = 0
self._cursor_pos = len(self._text)
elif event.key == K_c and self._select is not None: # Copy
select = self._select_fix()
string = "".join(self._text[select[0]:select[1]])
try:
pygame.scrap.put(SCRAP_TEXT, string)
except pygame.error:
warnings.warn("Please run 'pygame.scrap.init()'"
" to use the clipboard.", RuntimeWarning)'''
elif event.type == KEYUP:
if self._repeat_key and self._repeat_key.key == event.key:
self._repeat_key = None # Stop repeat
def _update_modify_text(self, time):
"""
Update text stuff for editable text (e.g. input box).
Should be called from widget's update() method.
"""
# If enough time has passed, blink cursor
self._blink_time += time
if self._blink_time > self._settings["blink_interval"]:
self._blink_time -= self._settings["blink_interval"]
self._blink = not self._blink
def _event_modify_text(self, event):
"""
Handles events for editable text (e.g. input box).
Should be called from widget's _event() method.
Will typically be used alongside `_event_select_text()`.
"""
if event.type == KEYDOWN:
# Reset cursor blink when typing
self._blink_time = 0
self._blink = True
if event.key in (9,K_RETURN,K_ESCAPE,K_KP_ENTER): # Keys to ignore
return
if event.key == K_BACKSPACE:
if self._select is not None:
self._delete_selection()
elif self._cursor_pos > 0:
self._cursor_pos -= 1
self._text.pop(self._cursor_pos)
self._calc_chars()
elif event.key == K_DELETE:
if self._select is not None:
self._delete_selection()
elif self._cursor_pos < len(self._text):
self._text.pop(self._cursor_pos)
self._calc_chars()
elif event.unicode:
if event.mod & KMOD_CTRL:
if event.key == K_v: # Paste
text = None#pygame.scrap.get(SCRAP_TEXT)
'''if text:
if self._select is not None:
sel = self._select_fix()
self._select = None
else:
sel = (self._cursor_pos, self._cursor_pos)
# Get list of text to insert into input_text
text = [unicode(char) for char in text]
self._text[sel[0]:sel[1]] = text
self._calc_chars()
self._cursor_pos = sel[0] + len(text)'''
elif event.key == K_x and self._select is not None: # Cut
select = None#self._select_fix()
'''string = "".join(self._text[select[0]:select[1]])
try:
pygame.scrap.put(SCRAP_TEXT, string)
except pygame.error:
warnings.warn("Please run 'pygame.scrap.init()'"
" to use the clipboard",
RuntimeWarning)
self._delete_selection()'''
else:
# Delete selection
if self._select is not None:
self._delete_selection()
# Insert new character
if len(self._text) < self._settings["max_chars"]:
self._text.insert(self._cursor_pos, event.unicode)
self._calc_chars()
self._cursor_pos += 1
def _delete_selection(self):
"""Delete the current selection of text."""
select = self._select_fix()
del self._text[select[0]:select[1]]
self._select = None
self._cursor_pos = select[0]
self._calc_chars()
def _draw_selection(self, image, y, height):
"""Draw selection onto image. Does nothing if no selection."""
if self._select is None:
return
select = self._select_fix()
# Semi-transparent selection rectangle
w = self._chars[select[1]][0] - self._chars[select[0]][0]
x = self._chars[select[0]][0] + self._text_pos - 1
r = Rect((x,y), (w+2,height))
selection = pygame.surface.Surface(r.size, flags=SRCALPHA)
selection.fill(self._settings["col_selection"] + (100,))
image.blit(selection, r)
# Border around selection rectangle
pygame.draw.rect(image, self._settings["col_selection"], r, 1)
@property
def _cursor_pos(self):
"""
The cursor position in characters. Will ensure cursor is always in
valid location when set.
"""
return self.__cursor_pos
@_cursor_pos.setter
def _cursor_pos(self, value):
# Keep cursor position within text
self.__cursor_pos = min(max(value, 0), len(self._text))
# Ensure text is visible when less than full width
if self._chars[-1][0] < self.rect.w - self._text_offset:
self._text_pos = self._text_offset
else:
# Scroll text in input box when it's too long
pos = self._chars[self._cursor_pos][0]
if pos > (self.rect.w - self._text_pos):
self._text_pos = -(pos - self.rect.w + self._text_offset)
elif pos < (self._text_offset - self._text_pos):
self._text_pos = self._text_offset - pos
# Ensure no unnecessary space is left at right-edge
right_edge = self._chars[-1][0] - self.rect.w + self._text_offset
if right_edge > 0:
self._text_pos = max(-right_edge, self._text_pos)
|
the-stack_0_11041 | #!/usr/bin/env python3
# ==============================================================================
# Copyright 2019 - Philip Paquette
#
# NOTICE: Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# ==============================================================================
""" Renders a same tournament game
Argument: File path to .json in history folder
"""
import argparse
import os
import multiprocessing
import shutil
from diplomacy import Game
import ujson as json
from diplomacy_research.proto.diplomacy_proto.game_pb2 import SavedGame as SavedGameProto
from diplomacy_research.utils.proto import proto_to_dict, read_next_proto
def render_saved_game(saved_game, output_dir, prefix=''):
""" Renders a specific saved game
:param saved_game: The saved game to render
:param output_dir: The output directory where to save the rendering
:param prefix: An optional prefix to add before the game id
"""
if prefix:
output_dir = os.path.join(output_dir, prefix + '_' + saved_game['id'])
else:
output_dir = os.path.join(output_dir, saved_game['id'])
nb_phases = len(saved_game['phases'])
svg_count = 0
# Checking if already generated
# Otherwise, regenerating completely
if os.path.exists(output_dir):
nb_svg = len([os.path.join(output_dir, file) for file in os.listdir(output_dir) if file[-4:] == '.svg'])
if nb_svg == 2 * nb_phases:
print('Rendered {} (Skipped)'.format(saved_game['id']))
return
shutil.rmtree(output_dir, ignore_errors=True)
os.makedirs(output_dir, exist_ok=True)
# Creating a Game to replay all orders, and a new Game object per phase to validate
entire_game = Game()
if saved_game['phases']:
entire_game.set_state(saved_game['phases'][0]['state'])
# Rendering
for phase in saved_game['phases']:
phase_game = Game()
# Setting state
state = phase['state']
phase_game.set_state(state)
entire_game.note = phase_game.note
# Setting orders
phase_game.clear_orders()
orders = phase['orders']
for power_name in orders:
phase_game.set_orders(power_name, orders[power_name])
entire_game.set_orders(power_name, orders[power_name])
# Validating that we are at the same place
for power_name in orders:
assert sorted(phase_game.get_units(power_name)) == sorted(entire_game.get_units(power_name))
assert sorted(phase_game.get_centers(power_name)) == sorted(entire_game.get_centers(power_name))
# Rendering with and without orders
with open(os.path.join(output_dir, '%03d%s' % (svg_count, '.svg')), 'w') as file:
file.write(entire_game.render(incl_orders=False))
svg_count += 1
with open(os.path.join(output_dir, '%03d%s' % (svg_count, '.svg')), 'w') as file:
file.write(entire_game.render(incl_orders=True))
# Processing (for entire game)
svg_count += 1
entire_game.process()
print('Rendered {}'.format(saved_game['id']))
# =========================================
# ------- JSON RENDERING ----------
# =========================================
def render_json(file_path):
""" Renders a specific json file
:param file_path: The full path to the json file
:return: Nothing, but creates a directory (file_path without '.json') containing the rendered images
"""
dir_path = os.path.dirname(file_path)
# Aborting if file doesn't exist
if not os.path.exists(file_path):
print('File {} does not exist.'.format(file_path))
return
# Loading saved game
file_content = open(file_path, 'r').read()
saved_game = json.loads(file_content)
# Rendering
render_saved_game(saved_game, dir_path)
def render_multi_json_per_folder(history_dir, nb_json_per_folder):
""" Finds all subfolders under history and renders 'nb_jsons' games in each subfolder found
:param history_dir: The full path to the history folder
:param nb_json_per_folder: The number of jsons to render per subfolder
:return: Nothing
"""
jsons_to_render = []
# Finding files to render
subfolders = [os.path.join(history_dir, path)
for path in os.listdir(history_dir)
if os.path.isdir(os.path.join(history_dir, path))]
for folder in subfolders:
json_games = sorted([os.path.join(folder, json_filename)
for json_filename in os.listdir(folder)
if json_filename[-5:] == '.json'])
json_games = json_games[:nb_json_per_folder]
for json_path in json_games:
jsons_to_render += [json_path]
# Running over multiple processes
nb_cores = multiprocessing.cpu_count()
with multiprocessing.Pool(nb_cores) as pool:
pool.map(render_json, jsons_to_render)
# =========================================
# ------- PROTO RENDERING ----------
# =========================================
def render_saved_game_proto(saved_game_proto, output_dir, prefix='', json_only=False):
""" Renders a saved game proto
:param saved_game_proto: A `.proto.game.SavedGame` object
:param output_dir: The output directory where the save the renderings
:param prefix: An optional prefix to add before the game id
:param json_only: Indicates we only want to extract the underlying JSON
"""
saved_game = proto_to_dict(saved_game_proto)
if json_only:
os.makedirs(os.path.join(output_dir, 'json'), exist_ok=True)
output_path = os.path.join(output_dir, 'json', prefix + '_' + saved_game['id'] + '.json')
with open(output_path, 'w') as file:
file.write(json.dumps(saved_game))
print('Saved JSON for {}'.format(saved_game['id']))
else:
render_saved_game(saved_game, output_dir, prefix)
def render_proto_file(file_path, args, compressed=True):
""" Renders all saved game proto in a proto file
:param file_path: The path to the proto file
:param args: The parsed command line arguments
:param compressed: Boolean that indicates if compression was used.
"""
dir_path = os.path.dirname(file_path)
game_count = 0
# Aborting if file doesn't exist
if not os.path.exists(file_path):
print('File {} does not exist.'.format(file_path))
return
# Processing filter
games_to_render = []
if args.filter:
for part in args.filter.split(','):
if '-' in part:
start, stop = part.split('-')
games_to_render += list(range(int(start), int(stop) + 1))
elif ':' in part:
start, stop, step = part.split(':')
games_to_render += list(range(int(start), int(stop) + 1, int(step)))
else:
games_to_render += [int(part)]
# Rendering each game in the proto file
with open(file_path, 'rb') as file:
while True:
saved_game_proto = read_next_proto(SavedGameProto, file, compressed)
if saved_game_proto is None:
break
game_count += 1
if game_count in games_to_render or (not games_to_render and not args.count):
print('(Game #%d) ' % game_count, end='')
render_saved_game_proto(saved_game_proto, dir_path, prefix='%05d' % game_count, json_only=args.json)
if game_count % 100 == 0 and args.count:
print('... %d games found so far.' % game_count)
# Printing the number of games in the proto file
if args.count:
print('Found %d games in the proto file.' % game_count)
# =========================================
if __name__ == '__main__':
PARSER = argparse.ArgumentParser(description='Render some saved games.')
PARSER.add_argument('--count', action='store_true', help='Count the number of games in the file')
PARSER.add_argument('--json', action='store_true', help='Only extract jsons without rendering the games')
PARSER.add_argument('--filter', help='Only render some games e.g. 1-5,6,8,10:100:2')
PARSER.add_argument('--nb_per_folder', type=int, default=0, help='The number of games per folder to generate')
PARSER.add_argument('file_path', help='The file path containing the saved games.')
ARGS = PARSER.parse_args()
# Rendering a single JSON
# Syntax: render.py <json path>
if ARGS.file_path[-5:] == '.json':
render_json(ARGS.file_path)
exit(0)
# Render a series of game in a .pb file
# Syntax: render.py <pb path>
if ARGS.file_path[-3:] == '.pb':
render_proto_file(ARGS.file_path, ARGS, compressed=False)
exit(0)
if ARGS.file_path[-4:] == '.pbz':
render_proto_file(ARGS.file_path, ARGS, compressed=True)
exit(0)
# Rendering a certain number of JSON per folder
# Syntax: render.py <history/> --nb_per_folder <# of json per folder to generate>
if os.path.exists(ARGS.file_path) and ARGS.nb_per_folder:
render_multi_json_per_folder(ARGS.file_path, ARGS.nb_per_folder)
exit(0)
# Invalid syntax
PARSER.print_help()
exit(-1)
|
the-stack_0_11042 | """
Copyright (c) IBM 2015-2017. All Rights Reserved.
Project name: c4-high-availability
This project is licensed under the MIT License, see LICENSE
"""
import sys
from setuptools import setup, find_packages
import versioneer
needs_pytest = {"pytest", "test", "ptr", "coverage"}.intersection(sys.argv)
pytest_runner = ["pytest-runner"] if needs_pytest else []
setup(
name = "c4-high-availability",
version = versioneer.get_version(),
cmdclass = versioneer.get_cmdclass(),
packages = find_packages(),
install_requires = ["c4-systemmanager"],
setup_requires = [] + pytest_runner,
tests_require = ["pytest", "pytest-cov"],
author = "IBM",
author_email = "",
description = "This is a high availability implementation based on device managers",
license = "MIT",
keywords = "python c4 ha",
url = "",
)
|
the-stack_0_11046 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import datetime
import re
from bs4 import BeautifulSoup
import scrape_common as sc
url = 'https://www.jura.ch/fr/Autorites/Coronavirus/Chiffres-H-JU/Evolution-des-cas-COVID-19-dans-le-Jura.html'
d = sc.download(url, silent=True)
d = d.replace(' ', ' ')
soup = BeautifulSoup(d, 'html.parser')
is_first = True
data_table = soup.find('caption', string=re.compile(r'Evolution du nombre de cas.*Jura')).find_parent('table')
if data_table:
headers = [" ".join(cell.stripped_strings) for cell in data_table.find('tr').find_all(['td', 'th'])]
assert len(headers) == 6, f"Number of headers changed: {len(headers)} != 6"
rows = []
for row in data_table.find_all('tr')[1:-1]:
data = {}
for col_num, cell in enumerate(row.find_all(['th', 'td'])):
content = " ".join(cell.stripped_strings).strip()
if content:
data[headers[col_num]] = content
rows.append(data)
if rows:
for i, row in enumerate(rows[:-1]):
if not row.get('Date') or row.get('Date') == 'Date':
continue
if not is_first:
print('-' * 10)
is_first = False
dd = sc.DayData(canton='JU', url=url)
current_year = datetime.datetime.now().year
if row.get('Date') and not re.search(f'{current_year}', row.get('Date')):
dd.datetime = f"{row.get('Date', '')} {current_year}"
else:
dd.datetime = row.get('Date', '')
dd.datetime = dd.datetime.replace('1 er', '1')
dd.cases = row.get('Cumul des cas confirmés')
dd.hospitalized = row.get('Nombre de cas actuellement hospitalisés')
dd.icu = row.get('Nombre de cas actuellement en soins intensifs')
dd.deaths = sum(int(str(r.get('Nombre de nouveaux décès', 0)).replace('*', '')) for r in rows[i:] if r.get('Nombre de nouveaux décès'))
print(dd)
|
the-stack_0_11048 | """
Source: https://pe.usps.com/text/pub28/welcome.htm
"""
STREET_NAME_POST_ABBREVIATIONS = {
"ALLEE": "ALY",
"ALLEY": "ALY",
"ALLY": "ALY",
"ALY": "ALY",
"ANEX": "ANX",
"ANNEX": "ANX",
"ANNX": "ANX",
"ANX": "ANX",
"ARC": "ARC",
"ARC ": "ARC",
"ARCADE": "ARC",
"ARCADE ": "ARC",
"AV": "AVE",
"AVE": "AVE",
"AVEN": "AVE",
"AVENU": "AVE",
"AVENUE": "AVE",
"AVN": "AVE",
"AVNUE": "AVE",
"BAYOO": "BYU",
"BAYOU": "BYU",
"BCH": "BCH",
"BEACH": "BCH",
"BEND": "BND",
"BLF": "BLF",
"BLUF": "BLF",
"BLUFF": "BLF",
"BLUFFS": "BLFS",
"BLUFFS ": "BLFS",
"BLVD": "BLVD",
"BND": "BND",
"BOT": "BTM",
"BOTTM": "BTM",
"BOTTOM": "BTM",
"BOUL": "BLVD",
"BOULEVARD": "BLVD",
"BOULEVARD ": "BLVD",
"BOULV": "BLVD",
"BR": "BR",
"BRANCH": "BR",
"BRDGE": "BRG",
"BRG": "BRG",
"BRIDGE": "BRG",
"BRK": "BRK",
"BRNCH": "BR",
"BROOK": "BRK",
"BROOKS": "BRKS",
"BROOKS ": "BRKS",
"BTM": "BTM",
"BURG": "BG",
"BURGS": "BGS",
"BYP": "BYP",
"BYPA": "BYP",
"BYPAS": "BYP",
"BYPASS": "BYP",
"BYPS": "BYP",
"CAMP": "CP",
"CANYN": "CYN",
"CANYON": "CYN",
"CAPE": "CPE",
"CAUSEWAY": "CSWY",
"CAUSWA": "CSWY",
"CAUSWAY": "CSWY",
"CEN": "CTR",
"CENT": "CTR",
"CENTER": "CTR",
"CENTERS": "CTRS",
"CENTERS ": "CTRS",
"CENTR": "CTR",
"CENTRE": "CTR",
"CIR": "CIR",
"CIRC": "CIR",
"CIRCL": "CIR",
"CIRCLE": "CIR",
"CIRCLES": "CIRS",
"CK": "CRK",
"CLB": "CLB",
"CLF": "CLF",
"CLFS": "CLFS",
"CLIFF": "CLF",
"CLIFFS": "CLFS",
"CLUB": "CLB",
"CMP": "CP",
"CNTER": "CTR",
"CNTR": "CTR",
"CNYN": "CYN",
"COMMON": "CMN",
"COMMONS": "CMNS",
"COR": "COR",
"CORNER": "COR",
"CORNERS": "CORS",
"CORS": "CORS",
"COURSE": "CRSE",
"COURT": "CT",
"COURTS": "CTS",
"COVE": "CV",
"COVES": "CVS",
"CP": "CP",
"CPE": "CPE",
"CR": "CRK",
"CRCL": "CIR",
"CRCLE": "CIR",
"CRECENT": "CRES",
"CREEK": "CRK",
"CRES": "CRES",
"CRESCENT": "CRES",
"CRESENT": "CRES",
"CREST": "CRST",
"CRK": "CRK",
"CROSSING": "XING",
"CROSSING ": "XING",
"CROSSROAD": "XRD",
"CROSSROADS": "XRDS",
"CRSCNT": "CRES",
"CRSE": "CRSE",
"CRSENT": "CRES",
"CRSNT": "CRES",
"CRSSING": "XING",
"CRSSNG": "XING",
"CRSSNG ": "XING",
"CRT": "CT",
"CSWY": "CSWY",
"CT": "CT",
"CTR": "CTR",
"CTS": "CTS",
"CURVE": "CURV",
"CURVE ": "CURV",
"CV": "CV",
"CYN": "CYN",
"DALE": "DL",
"DALE ": "DL",
"DAM": "DM",
"DAM ": "DM",
"DIV": "DV",
"DIVIDE": "DV",
"DL": "DL",
"DL ": "DL",
"DM": "DM",
"DM ": "DM",
"DR": "DR",
"DRIV": "DR",
"DRIVE": "DR",
"DRIVES": "DRS",
"DRV": "DR",
"DV": "DV",
"DVD": "DV",
"EST": "EST",
"ESTATE": "EST",
"ESTATES": "ESTS",
"ESTS": "ESTS",
"EXP": "EXPY",
"EXPR": "EXPY",
"EXPRESS": "EXPY",
"EXPRESSWAY": "EXPY",
"EXPW": "EXPY",
"EXPY": "EXPY",
"EXT": "EXT",
"EXTENSION": "EXT",
"EXTENSIONS": "EXTS",
"EXTN": "EXT",
"EXTNSN": "EXT",
"EXTS": "EXTS",
"FALL": "FALL",
"FALLS": "FLS",
"FERRY": "FRY",
"FIELD": "FLD",
"FIELDS": "FLDS",
"FLAT": "FLT",
"FLATS": "FLTS",
"FLD": "FLD",
"FLDS": "FLDS",
"FLS": "FLS",
"FLT": "FLT",
"FLTS": "FLTS",
"FORD": "FRD",
"FORDS": "FRDS",
"FOREST": "FRST",
"FORESTS": "FRST",
"FORG": "FRG",
"FORGE": "FRG",
"FORGES": "FRGS",
"FORK": "FRK",
"FORKS": "FRKS",
"FORT": "FT",
"FRD": "FRD",
"FREEWAY": "FWY",
"FREEWY": "FWY",
"FRG": "FRG",
"FRK": "FRK",
"FRKS": "FRKS",
"FRRY": "FRY",
"FRST": "FRST",
"FRT": "FT",
"FRWAY": "FWY",
"FRWY": "FWY",
"FRY": "FRY",
"FT": "FT",
"FWY": "FWY",
"GARDEN": "GDN",
"GARDENS": "GDNS",
"GARDN": "GDN",
"GATEWAY": "GTWY",
"GATEWY": "GTWY",
"GATWAY": "GTWY",
"GDN": "GDN",
"GDNS": "GDNS",
"GLEN": "GLN",
"GLENS": "GLNS",
"GLN": "GLN",
"GRDEN": "GDN",
"GRDN": "GDN",
"GRDNS": "GDNS",
"GREEN": "GRN",
"GREENS": "GRNS",
"GRN": "GRN",
"GROV": "GRV",
"GROVE": "GRV",
"GROVES": "GRVS",
"GRV": "GRV",
"GTWAY": "GTWY",
"GTWY": "GTWY",
"HARB": "HBR",
"HARBOR": "HBR",
"HARBORS": "HBRS",
"HARBR": "HBR",
"HAVEN": "HVN",
"HAVN": "HVN",
"HBR": "HBR",
"HEIGHT": "HTS",
"HEIGHTS": "HTS",
"HGTS": "HTS",
"HIGHWAY": "HWY",
"HIGHWY": "HWY",
"HILL": "HL",
"HILLS": "HLS",
"HIWAY": "HWY",
"HIWY": "HWY",
"HL": "HL",
"HLLW": "HOLW",
"HLS": "HLS",
"HOLLOW": "HOLW",
"HOLLOWS": "HOLW",
"HOLW": "HOLW",
"HOLWS": "HOLW",
"HRBOR": "HBR",
"HT": "HTS",
"HTS": "HTS",
"HVN": "HVN",
"HWAY": "HWY",
"HWY": "HWY",
"INLET": "INLT",
"INLT": "INLT",
"IS": "IS",
"ISLAND": "IS",
"ISLANDS": "ISS",
"ISLE": "ISLE",
"ISLES": "ISLE",
"ISLND": "IS",
"ISLNDS": "ISS",
"ISS": "ISS",
"JCT": "JCT",
"JCTION": "JCT",
"JCTN": "JCT",
"JCTNS": "JCTS",
"JCTS": "JCTS",
"JUNCTION": "JCT",
"JUNCTIONS": "JCTS",
"JUNCTN": "JCT",
"JUNCTON": "JCT",
"KEY": "KY",
"KEYS": "KYS",
"KNL": "KNL",
"KNLS": "KNLS",
"KNOL": "KNL",
"KNOLL": "KNL",
"KNOLLS": "KNLS",
"KY": "KY",
"KYS": "KYS",
"LA": "LN",
"LAKE": "LK",
"LAKES": "LKS",
"LAND": "LAND",
"LANDING": "LNDG",
"LANE": "LN",
"LANES": "LN",
"LCK": "LCK",
"LCKS": "LCKS",
"LDG": "LDG",
"LDGE": "LDG",
"LF": "LF",
"LGT": "LGT",
"LIGHT": "LGT",
"LIGHTS": "LGTS",
"LK": "LK",
"LKS": "LKS",
"LN": "LN",
"LNDG": "LNDG",
"LNDNG": "LNDG",
"LOAF": "LF",
"LOCK": "LCK",
"LOCKS": "LCKS",
"LODG": "LDG",
"LODGE": "LDG",
"LOOP": "LOOP",
"LOOPS": "LOOP",
"MALL": "MALL",
"MANOR": "MNR",
"MANORS": "MNRS",
"MDW": "MDW",
"MDWS": "MDWS",
"MEADOW": "MDW",
"MEADOWS": "MDWS",
"MEDOWS": "MDWS",
"MEWS": "MEWS",
"MILL": "ML",
"MILLS": "MLS",
"MISSION": "MSN",
"MISSN": "MSN",
"ML": "ML",
"MLS": "MLS",
"MNR": "MNR",
"MNRS": "MNRS",
"MNT": "MT",
"MNTAIN": "MTN",
"MNTN": "MTN",
"MNTNS": "MTNS",
"MOTORWAY": "MTWY",
"MOUNT": "MT",
"MOUNTAIN": "MTN",
"MOUNTAINS": "MTNS",
"MOUNTIN": "MTN",
"MSN": "MSN",
"MSSN": "MSN",
"MT": "MT",
"MTIN": "MTN",
"MTN": "MTN",
"NCK": "NCK",
"NECK": "NCK",
"ORCH": "ORCH",
"ORCHARD": "ORCH",
"ORCHRD": "ORCH",
"OVAL": "OVAL",
"OVERPASS": "OPAS",
"OVL": "OVAL",
"PARK": "PARK",
"PARKS": "PARK",
"PARKWAY": "PKWY",
"PARKWAYS": "PKWY",
"PARKWY": "PKWY",
"PASS": "PASS",
"PASSAGE": "PSGE",
"PATH": "PATH",
"PATHS": "PATH",
"PIKE": "PIKE",
"PIKES": "PIKE",
"PINE": "PNE",
"PINES": "PNES",
"PK": "PARK",
"PKWAY": "PKWY",
"PKWY": "PKWY",
"PKWYS": "PKWY",
"PKY": "PKWY",
"PL": "PL",
"PLACE": "PL",
"PLAIN": "PLN",
"PLAINES": "PLNS",
"PLAINS": "PLNS",
"PLAZA": "PLZ",
"PLN": "PLN",
"PLNS": "PLNS",
"PLZ": "PLZ",
"PLZA": "PLZ",
"PNES": "PNES",
"POINT": "PT",
"POINTS": "PTS",
"PORT": "PRT",
"PORTS": "PRTS",
"PR": "PR",
"PRAIRIE": "PR",
"PRARIE": "PR",
"PRK": "PARK",
"PRR": "PR",
"PRT": "PRT",
"PRTS": "PRTS",
"PT": "PT",
"PTS": "PTS",
"RAD": "RADL",
"RADIAL": "RADL",
"RADIEL": "RADL",
"RADL": "RADL",
"RAMP": "RAMP",
"RANCH": "RNCH",
"RANCHES": "RNCH",
"RAPID": "RPD",
"RAPIDS": "RPDS",
"RD": "RD",
"RDG": "RDG",
"RDGE": "RDG",
"RDGS": "RDGS",
"RDS": "RDS",
"REST": "RST",
"RIDGE": "RDG",
"RIDGES": "RDGS",
"RIV": "RIV",
"RIVER": "RIV",
"RIVR": "RIV",
"RNCH": "RNCH",
"RNCHS": "RNCH",
"ROAD": "RD",
"ROADS": "RDS",
"ROUTE": "RTE",
"ROW": "ROW",
"RPD": "RPD",
"RPDS": "RPDS",
"RST": "RST",
"RUE": "RUE",
"RUN": "RUN",
"RVR": "RIV",
"SHL": "SHL",
"SHLS": "SHLS",
"SHOAL": "SHL",
"SHOALS": "SHLS",
"SHOAR": "SHR",
"SHOARS": "SHRS",
"SHORE": "SHR",
"SHORES": "SHRS",
"SHR": "SHR",
"SHRS": "SHRS",
"SKYWAY": "SKWY",
"SMT": "SMT",
"SPG": "SPG",
"SPGS": "SPGS",
"SPNG": "SPG",
"SPNGS": "SPGS",
"SPRING": "SPG",
"SPRINGS": "SPGS",
"SPRNG": "SPG",
"SPRNGS": "SPGS",
"SPUR": "SPUR",
"SPURS": "SPUR",
"SQ": "SQ",
"SQR": "SQ",
"SQRE": "SQ",
"SQRS": "SQS",
"SQU": "SQ",
"SQUARE": "SQ",
"SQUARES": "SQS",
"ST": "ST",
"STA": "STA",
"STATION": "STA",
"STATN": "STA",
"STN": "STA",
"STR": "ST",
"STRA": "STRA",
"STRAV": "STRA",
"STRAVE": "STRA",
"STRAVEN": "STRA",
"STRAVENUE": "STRA",
"STRAVN": "STRA",
"STREAM": "STRM",
"STREET": "ST",
"STREETS": "STS",
"STREME": "STRM",
"STRM": "STRM",
"STRT": "ST",
"STRVN": "STRA",
"STRVNUE": "STRA",
"SUMIT": "SMT",
"SUMITT": "SMT",
"SUMMIT": "SMT",
"TER": "TER",
"TERR": "TER",
"TERRACE": "TER",
"THROUGHWAY": "TRWY",
"TPK": "TPKE",
"TPKE": "TPKE",
"TR": "TRL",
"TRACE": "TRCE",
"TRACES": "TRCE",
"TRACK": "TRAK",
"TRACKS": "TRAK",
"TRAFFICWAY": "TRFY",
"TRAIL": "TRL",
"TRAILER": "TRLR",
"TRAILS": "TRL",
"TRAK": "TRAK",
"TRCE": "TRCE",
"TRFY": "TRFY",
"TRK": "TRAK",
"TRKS": "TRAK",
"TRL": "TRL",
"TRLR": "TRLR",
"TRLRS": "TRLR",
"TRLS": "TRL",
"TRNPK": "TPKE",
"TRPK": "TPKE",
"TUNEL": "TUNL",
"TUNL": "TUNL",
"TUNLS": "TUNL",
"TUNNEL": "TUNL",
"TUNNELS": "TUNL",
"TUNNL": "TUNL",
"TURNPIKE": "TPKE",
"TURNPK": "TPKE",
"UN": "UN",
"UNDERPASS": "UPAS",
"UNION": "UN",
"UNIONS": "UNS",
"VALLEY": "VLY",
"VALLEYS": "VLYS",
"VALLY": "VLY",
"VDCT": "VIA",
"VIA": "VIA",
"VIADCT": "VIA",
"VIADUCT": "VIA",
"VIEW": "VW",
"VIEWS": "VWS",
"VILL": "VLG",
"VILLAG": "VLG",
"VILLAGE": "VLG",
"VILLAGES": "VLGS",
"VILLE": "VL",
"VILLG": "VLG",
"VILLIAGE": "VLG",
"VIS": "VIS",
"VIST": "VIS",
"VISTA": "VIS",
"VL": "VL",
"VLG": "VLG",
"VLGS": "VLGS",
"VLLY": "VLY",
"VLY": "VLY",
"VLYS": "VLYS",
"VST": "VIS",
"VSTA": "VIS",
"VW": "VW",
"VWS": "VWS",
"WALK": "WALK",
"WALKS": "WALK",
"WALL": "WALL",
"WAY": "WAY",
"WAYS": "WAYS",
"WELL": "WL",
"WELLS": "WLS",
"WLS": "WLS",
"WY": "WAY",
"XING": "XING",
"XING ": "XING"
}
# Even though we don't care about normalizing the state names themselves,
# state names may appear inside of street names (i.e. Kentucky Highway).
STATE_ABBREVIATIONS = {
'ALABAMA': 'AL',
'ALA': 'AL',
'ALASKA': 'AK',
'ALAS': 'AK',
'ARIZONA': 'AZ',
'ARIZ': 'AZ',
'ARKANSAS': 'AR',
'ARK': 'AR',
'CALIFORNIA': 'CA',
'CALIF': 'CA',
'CAL': 'CA',
'COLORADO': 'CO',
'COLO': 'CO',
'COL': 'CO',
'CONNECTICUT': 'CT',
'CONN': 'CT',
'DELAWARE': 'DE',
'DEL': 'DE',
'DISTRICT OF COLUMBIA': 'DC',
'FLORIDA': 'FL',
'FLA': 'FL',
'FLOR': 'FL',
'GEORGIA': 'GA',
'GA': 'GA',
'HAWAII': 'HI',
'IDAHO': 'ID',
'IDA': 'ID',
'ILLINOIS': 'IL',
'ILL': 'IL',
'INDIANA': 'IN',
'IND': 'IN',
'IOWA': 'IA',
'KANSAS': 'KS',
'KANS': 'KS',
'KAN': 'KS',
'KENTUCKY': 'KY',
'KEN': 'KY',
'KENT': 'KY',
'LOUISIANA': 'LA',
'MAINE': 'ME',
'MARYLAND': 'MD',
'MASSACHUSETTS': 'MA',
'MASS': 'MA',
'MICHIGAN': 'MI',
'MICH': 'MI',
'MINNESOTA': 'MN',
'MINN': 'MN',
'MISSISSIPPI': 'MS',
'MISS': 'MS',
'MISSOURI': 'MO',
'MONTANA': 'MT',
'MONT': 'MT',
'NEBRASKA': 'NE',
'NEBR': 'NE',
'NEB': 'NE',
'NEVADA': 'NV',
'NEV': 'NV',
'NEW HAMPSHIRE': 'NH',
'NEW JERSEY': 'NJ',
'NEW MEXICO': 'NM',
'N MEX': 'NM',
'NEW M': 'NM',
'NEW YORK': 'NY',
'NORTH CAROLINA': 'NC',
'NORTH DAKOTA': 'ND',
'N DAK': 'ND',
'OHIO': 'OH',
'OKLAHOMA': 'OK',
'OKLA': 'OK',
'OREGON': 'OR',
'OREG': 'OR',
'ORE': 'OR',
'PENNSYLVANIA': 'PA',
'PENN': 'PA',
'RHODE ISLAND': 'RI',
'SOUTH CAROLINA': 'SC',
'SOUTH DAKOTA': 'SD',
'S DAK': 'SD',
'TENNESSEE': 'TN',
'TENN': 'TN',
'TEXAS': 'TX',
'TEX': 'TX',
'UTAH': 'UT',
'VERMONT': 'VT',
'VIRGINIA': 'VA',
'WASHINGTON': 'WA',
'WASH': 'WA',
'WEST VIRGINIA': 'WV',
'W VA': 'WV',
'WISCONSIN': 'WI',
'WIS': 'WI',
'WISC': 'WI',
'WYOMING': 'WY',
'WYO': 'WY'
}
STREET_NAME_ABBREVIATIONS = {
'COUNTY HWY': 'COUNTY HIGHWAY',
'CNTY HWY': 'COUNTY HIGHWAY',
'COUNTY RD': 'COUNTY ROAD',
'CR': 'COUNTY ROAD',
'CNTY RD': 'COUNTY ROAD',
'CORD': 'COUNTY ROAD',
'CO. RD': 'COUNTY ROAD',
'CO RD': 'COUNTY ROAD',
'CR-': 'COUNTY ROAD',
'CR #': 'COUNTY ROAD',
'CNTY. RD': 'COUNTY ROAD',
'CR.': 'COUNTY ROAD',
'FARM TO MARKET': 'FM',
'HWY FM': 'FM',
'HIWAY': 'HIGHWAY',
'HWY': 'HIGHWAY',
'FRONTAGE ROAD': 'FRONTAGE RD',
'BYPASS': 'BYP',
'BYP RD': 'BYPASS RD',
'INTERSTATE HWY': 'INTERSTATE',
'IH': 'INTERSTATE',
'I': 'INTERSTATE', #to account for cases like I10 OR I 55
'RD': 'ROAD',
'RT': 'ROUTE',
'RTE': 'ROUTE',
'RANCH ROAD': 'RANCH ROAD',
'ST HWY': 'STATE HIGHWAY',
'STHWY': 'STATE HIGHWAY',
'ST-HWY': 'STATE HIGHWAY',
'ST.HWY.': 'STATE HIGHWAY',
'STATE HIGH WAY': 'STATE HIGHWAY',
'S HWY': 'STATE HIGHWAY',
'ST HIGHWAY': 'STATE HIGHWAY',
'STATE HWY': 'STATE HIGHWAY',
'SR': 'STATE ROAD',
'ST RT': 'STATE ROUTE',
'STATE RTE': 'STATE ROUTE',
'TSR': 'TOWNSHIP ROAD',
'TWP HWY': 'TOWNSHIP HIGHWAY',
'TWN HWY': 'TOWNSHIP HIGHWAY',
'TNHW': 'TOWNSHIP HIGHWAY',
'US': 'US HIGHWAY',
'US HWY' : 'US HIGHWAY',
'USHWY' : 'US HIGHWAY',
'US HWY': 'US HIGHWAY',
'US-HWY': 'US HIGHWAY',
'US.HWY.': 'US HIGHWAY',
'PR': 'PRIAVATE ROAD',
}
# Can be used for pre and post directional info
DIRECTIONAL_ABBREVIATIONS = {
'EAST': 'E',
'WEST': 'W',
'NORTH': 'N',
'SOUTH': 'S',
'NORTHEAST': 'NE',
'NORTHWEST': 'NW',
'SOUTHEAST': 'SE',
'SOUTHWEST': 'SW',
"NORTE": "N",
"NO": "N",
"NORESTE": "NE",
"NOROESTE": "NW",
"SUR": "S",
"SO": "S",
"SURESTE": "SE",
"SUROESTE": "SW",
"ESTE": "E",
"EA": "E",
"OESTE": "W",
"WE": "W"
}
#From USPS "C2 Secondary Unit Designators"
#Subaddress Type/WSDESC1 (?)
OCCUPANCY_TYPE_ABBREVIATIONS = {
'APARTMENT': 'APT',
'BUILDING': 'BLDG',
'BASEMENT': 'BSMT',
'DEPARTMENT': 'DEPT',
'FLOOR': 'FL',
'FRONT': 'FRNT',
'HANGER': 'HNGR',
'KEY': 'KEY',
'LOBBY': 'LBBY',
'LOT': 'LOT',
'LOWER': 'LOWR',
'OFFICE': 'OFC',
'PENTHOUSE': 'PH',
'PIER': 'PIER',
'REAR': 'REAR',
'ROOM': 'RM',
'SIDE': 'SIDE',
'SLIP': 'SLIP',
'SPACE': 'SPC',
'STOP': 'STOP',
'SUITE': 'STE',
'TRAILER': 'TRLR',
'UNIT': 'UNIT',
'UPPER': 'UPPER'
}
DIRECTION_CODES = {
"N": 1,
"S": 2,
"E": 3,
"W": 4,
"NE": 5,
"NW": 6,
"SE": 7,
"SW": 8
}
EXTENSION_CODES = {
"EXTD": 1,
"EXTN": 2,
"LP": 3,
"BYP": 4,
"ALT": 5,
"BUS": 6,
"OLD": 7,
"SPUR": 8
}
STREET_TYPE_CODES = {
"ALY": 11,
"ALT": 12,
"ARC": 15,
"ARRY": 16,
"APTA": 17,
"AVA": 18,
"AVE": 19,
"BLVD": 26,
"BLV": 32,
"BSRT": 33,
"BYP": 34,
"CLLE": 36,
"CJA": 37,
"CJON": 38,
"CAM": 39,
"CARR": 47,
"CSWY": 48,
"CTR": 51,
"CIR": 57,
"CORD": 70,
"CT": 71,
"CV": 73,
"CRES": 76,
"XING": 77,
"CRU": 78,
"DR": 87,
"EXP": 93,
"EXPY": 94,
"FM": 99,
"4WD": 110,
"FWY": 112,
"HWY": 122,
"I-": 133,
"JPTR": 138,
"LN": 146,
"LOOP": 151,
"MARG": 154,
"MTWY": 164,
"MRO": 167,
"OVPS": 178,
"PARK": 179,
"PKY": 180,
"PAS": 182,
"PSO": 183,
"PASS": 185,
"PATH": 187,
"PIKE": 189,
"PSTA": 191,
"PL": 192,
"PLZ": 193,
"PTE": 202,
"RML": 208,
"RMP": 210,
"ROAD": 223,
"RT": 227,
"ROW": 228,
"RUE": 229,
"RUN": 230,
"RUTA": 232,
"SNDR": 239,
"SVRD": 240,
"SKWY": 248,
"SPWY": 253,
"SQ": 256,
"STHY": 259,
"ST": 263,
"TER": 268,
"THFR": 269,
"THWY": 270,
"TWHY": 273,
"TFWY": 274,
"TRL": 275,
"TUN": 278,
"TUNL": 279,
"TPKE": 280,
"UNPS": 281,
"USHY": 283,
"UNRD": 286,
"VRDA": 288,
"VIA": 289,
"WALK": 291,
"WKWY": 292,
"WALL": 293,
"WAY": 296,
"NFD": 302,
"OVAL": 303,
"EST": 304,
"VLLA": 305,
"DRWY": 306,
"RDWY": 307,
"STRA": 308,
"CLUB": 309,
"CTS": 310,
"JCT": 311,
"LNDG": 312,
"LDGE": 313,
"MALL": 314,
"MNR": 315,
"STA": 316,
"VLG": 317,
"CORS": 318,
"COMN": 319,
"PRRD": 320,
"EMS": 321
}
#Frankly, I don't know when we actually use this
Building_Codes = {
"AFB": 1,
"ARPT": 2,
"APTS": 3,
"ARC": 4,
"BAZR": 5,
"BLDG": 6,
"BSPK": 7,
"CTR": 8,
"CLUB": 9,
"CLTN": 10,
"CMMN": 11,
"CPLX": 12,
"COND": 13,
"CNCN": 14,
"CORS": 15,
"CTHS": 16,
"CTS": 17,
"CTYD": 18,
"XING": 19,
"XRDS": 20,
"EDIF": 21,
"ESP": 22,
"EXCH": 23,
"FEST": 24,
"GALR": 25,
"HALL": 26,
"HOME": 27,
"HOSP": 28,
"HOTEL": 29,
"HOUSE": 30,
"INPK": 31,
"INN": 32,
"JCT": 33,
"LNDG": 34,
"LDGE": 35,
"MALL": 36,
"MNR": 37,
"MKT": 38,
"MERC": 39,
"MTL": 40,
"NAS": 41,
"OFPK": 42,
"OTLT": 43,
"PARK": 44,
"PAVL": 45,
"PLNT": 46,
"PLZ": 47,
"PROM": 49,
"QTRS": 50,
"RES": 51,
"7 CO": 52,
"SC": 53,
"SQ": 54,
"STA": 55,
"STES": 56,
"TOWER": 57,
"TWNH": 58,
"TRPK": 59,
"VLLA": 60,
"VLG": 61,
"VIVI": 62,
"ESTS": 63,
"COLL": 64,
"COTT": 65,
"PROJ": 66,
"TORRE": 67
} |
the-stack_0_11049 | import nltk
class Analyzer():
"""Implements sentiment analysis."""
def __init__(self, positives, negatives):
"""Initialize Analyzer."""
# load positive and negative words
# Set, list or dict?:
# http://stackoverflow.com/questions/3489071/in-python-when-to-use-a-dictionary-list-or-set
# In this case, only having a particular value matters and not the order. Hence use set()
self.positives = set()
self.negatives = set()
# with open - opens the file and automatically closes once finishes
with open ("positive-words.txt", "r") as fpos:
for line in fpos:
# C (!) is (not) in python
# http://stackoverflow.com/questions/6117733/negation-in-python
if not line.startswith((";", " ")):
# Standardization that every text files has "\n" for every end of line
self.positives.add(line.strip("\n"))
with open ("negative-words.txt", "r") as fneg:
for line in fneg:
if not line.startswith((";", " ")):
self.negatives.add(line.strip("\n"))
def analyze(self, text):
"""Analyze text for sentiment, returning its score."""
# http://www.nltk.org/api/nltk.tokenize.html
# This breaks the lines into list of words
# and stores them as tokens
self.tokenizer = nltk.tokenize.TweetTokenizer()
tokens = self.tokenizer.tokenize(text)
ind = 0
# Cross check text with positive and negative list and returns appropriate indicator
for token in tokens:
# indicator for sentiment score
if token.lower() in self.positives:
ind += 1
elif token.lower() in self.negatives:
ind -= 1
return ind
|
the-stack_0_11050 | import numpy as np
from .trading_env import TradingEnv, Actions, Positions
class ForexEnv(TradingEnv):
def __init__(self, df, window_size, frame_bound, min_index_start, unit_side='left'):
assert len(frame_bound) == 2
assert unit_side.lower() in ['left', 'right']
self.frame_bound = frame_bound
self.unit_side = unit_side.lower()
self.min_index_start = min_index_start
super().__init__(df, window_size, min_index_start)
self.trade_fee = 0.0003 # unit
def _process_data(self):
prices = self.df.loc[:, 'Close'].to_numpy()
prices[self.frame_bound[0] - self.window_size] # validate index (TODO: Improve validation)
prices = prices[self.frame_bound[0]-self.window_size:self.frame_bound[1]]
diff = np.insert(np.diff(prices), 0, 0)
signal_features = np.column_stack((prices, diff))
return prices, signal_features
def _calculate_reward(self, action):
step_reward = 0 # pip
trade = False
if ((action == Actions.Buy.value and self._position == Positions.Short) or
(action == Actions.Sell.value and self._position == Positions.Long)):
trade = True
if trade:
current_price = self.prices[self._current_tick]
last_trade_price = self.prices[self._last_trade_tick]
price_diff = current_price - last_trade_price
if self._position == Positions.Short:
step_reward += -price_diff * 10000
elif self._position == Positions.Long:
step_reward += price_diff * 10000
return step_reward
def _update_profit(self, action):
trade = False
if ((action == Actions.Buy.value and self._position == Positions.Short) or
(action == Actions.Sell.value and self._position == Positions.Long)):
trade = True
if trade or self._done:
current_price = self.prices[self._current_tick]
last_trade_price = self.prices[self._last_trade_tick]
if self.unit_side == 'left':
if self._position == Positions.Short:
quantity = self._total_profit * (last_trade_price - self.trade_fee)
self._total_profit = quantity / current_price
elif self.unit_side == 'right':
if self._position == Positions.Long:
quantity = self._total_profit / last_trade_price
self._total_profit = quantity * (current_price - self.trade_fee)
def max_possible_profit(self):
current_tick = self._start_tick
last_trade_tick = current_tick - 1
profit = 1.
while current_tick <= self._end_tick:
position = None
if self.prices[current_tick] < self.prices[current_tick - 1]:
while (current_tick <= self._end_tick and
self.prices[current_tick] < self.prices[current_tick - 1]):
current_tick += 1
position = Positions.Short
else:
while (current_tick <= self._end_tick and
self.prices[current_tick] >= self.prices[current_tick - 1]):
current_tick += 1
position = Positions.Long
current_price = self.prices[current_tick - 1]
last_trade_price = self.prices[last_trade_tick]
if self.unit_side == 'left':
if position == Positions.Short:
quantity = profit * (last_trade_price - self.trade_fee)
profit = quantity / current_price
elif self.unit_side == 'right':
if position == Positions.Long:
quantity = profit / last_trade_price
profit = quantity * (current_price - self.trade_fee)
last_trade_tick = current_tick - 1
return profit
|
the-stack_0_11051 | # Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
from lib.actions import OrionBaseAction
from lib.utils import send_user_error
class GetNodeCustomProperty(OrionBaseAction):
def run(self, node, custom_property):
"""
Gets a specific Node Custom Property.
"""
self.connect()
orion_node = self.get_node(node)
if not orion_node.npm:
msg = "Node ({}) does not exist".format(node)
send_user_error(msg)
raise ValueError(msg)
swql = """SELECT {1}
FROM Orion.NodesCustomProperties
WHERE NodeID={0}""".format(orion_node.npm_id, custom_property)
data = self.query(swql)
if 'results' not in data:
msg = "No results from Orion: {}".format(data)
self.logger.info(msg)
raise Exception(msg)
if len(data['results']) == 1:
results = data['results'][0]
return results.get(custom_property)
elif len(data['results']) >= 2:
self.logger.debug(
"Muliple Properties match '{}'".format(node))
raise ValueError("Muliple Properties match '{}'".format(node))
|
the-stack_0_11052 | def selecao(a, b, c, d):
if (b > c) and (d > a) and ((c+d) > (a+b)) and (c > 0) and (d > 0) and (a % 2 == 0):
return print('Valores aceitos')
else:
return print('Valores nao aceitos')
def entrada():
valores = input().split(' ')
valor_a = int(valores[0])
valor_b = int(valores[1])
valor_c = int(valores[2])
valor_d = int(valores[3])
return valor_a, valor_b, valor_c, valor_d
n1, n2, n3, n4 = entrada()
selecao(n1, n2, n3, n4)
|
the-stack_0_11055 | import tensorflow as tf
import tensorflow.keras as keras
from tensorflow.keras.layers import *
from tensorflow.keras import regularizers
import numpy as np
#tf.enable_eager_execution() #added. so as to be able to use numpy arrays easily
def limit_mem():
config = tf.compat.v1.ConfigProto()
config.gpu_options.allow_growth = True
tf.compat.v1.InteractiveSession(config=config)
class PeriodicPadding2D(tf.keras.layers.Layer):
def __init__(self, pad_width, **kwargs):
super().__init__(**kwargs)
self.pad_width = pad_width
def call(self, inputs, **kwargs):
if self.pad_width == 0:
return inputs
inputs_padded = tf.concat(
[inputs[:, :, -self.pad_width:, :], inputs, inputs[:, :, :self.pad_width, :]], axis=2)
# Zero padding in the lat direction
inputs_padded = tf.pad(inputs_padded, [[0, 0], [self.pad_width, self.pad_width], [0, 0], [0, 0]])
return inputs_padded
def get_config(self):
config = super().get_config()
config.update({'pad_width': self.pad_width})
return config
class ChannelReLU2D(tf.keras.layers.Layer):
def __init__(self, relu_idxs, **kwargs):
super().__init__(**kwargs)
self.relu_idxs = relu_idxs if type(relu_idxs) is list else [relu_idxs]
def call(self, inputs, **kwargs):
if inputs.shape[-1] == len(self.relu_idxs):
return tf.nn.relu(inputs)
else:
channels = [inputs[..., i] for i in range(inputs.shape[-1])]
for i, t in enumerate(channels):
if i in self.relu_idxs:
channels[i] = tf.nn.relu(t)
return tf.stack(channels, -1)
def get_config(self):
config = super().get_config()
config.update({'relu_idxs': self.relu_idxs})
return config
class PeriodicConv2D(tf.keras.layers.Layer):
def __init__(self, filters,
kernel_size,
conv_kwargs={},
**kwargs, ):
super().__init__(**kwargs)
self.filters = filters
self.kernel_size = kernel_size
self.conv_kwargs = conv_kwargs
if type(kernel_size) is not int:
assert kernel_size[0] == kernel_size[1], 'PeriodicConv2D only works for square kernels'
kernel_size = kernel_size[0]
pad_width = (kernel_size - 1) // 2
self.padding = PeriodicPadding2D(pad_width)
self.conv = Conv2D(
filters, kernel_size, padding='valid', **conv_kwargs
)
def call(self, inputs):
return self.conv(self.padding(inputs))
def get_config(self):
config = super().get_config()
config.update({'filters': self.filters, 'kernel_size': self.kernel_size, 'conv_kwargs': self.conv_kwargs})
return config
class ChannelSlice(tf.keras.layers.Layer):
def __init__(self, n_out, **kwargs):
self.n_out = n_out
super().__init__(**kwargs)
def _slice(self, inputs):
# Input: [samples, lat, lon, filters]
return inputs[..., :self.n_out]
def __call__(self, inputs):
out = Lambda(self._slice)(inputs)
return out
def convblock(inputs, filters, kernel=3, stride=1, bn_position=None, l2=0,
use_bias=True, dropout=0, activation='relu'):
x = inputs
if bn_position == 'pre': x = BatchNormalization()(x)
x = PeriodicConv2D(
filters, kernel, conv_kwargs={
'kernel_regularizer': regularizers.l2(l2),
'use_bias': use_bias
}
)(x)
if bn_position == 'mid': x = BatchNormalization()(x)
x = LeakyReLU()(x) if activation == 'leakyrelu' else Activation(activation)(x)
if bn_position == 'post': x = BatchNormalization()(x)
if dropout > 0: x = Dropout(dropout)(x)
return x
def resblock(inputs, filters, kernel, bn_position=None, l2=0, use_bias=True,
dropout=0, skip=True, activation='relu', down=False, up=False):
x = inputs
if down:
x = MaxPooling2D()(x)
for i in range(2):
x = convblock(
x, filters, kernel, bn_position=bn_position, l2=l2, use_bias=use_bias,
dropout=dropout, activation=activation
)
if down or up:
inputs = PeriodicConv2D(
filters, kernel, conv_kwargs={
'kernel_regularizer': regularizers.l2(l2),
'use_bias': use_bias,
'strides': 2 if down else 1
}
)(inputs)
if skip: x = Add()([inputs, x])
return x
def build_uresnet(filters, kernels, unres, input_shape, bn_position=None, use_bias=True, l2=0,
skip=True, dropout=0, activation='relu', **kwargs):
"""
filters
0: init Conv2D
1: first and last resblock
[2:-1]: all down layers
-1: last conv2d
nres has to have len(filters) - 2
"""
if len(unres) == 1: nres = [unres]*(len(filters)-2)
x = input = Input(shape=input_shape)
# First conv block to get up to shape
x = convblock(
x, filters[0], kernels[0], bn_position=bn_position, l2=l2, use_bias=use_bias,
dropout=dropout, activation=activation
)
# Resblocks
for _ in range(unres[0]):
x = resblock(x, filters[1], kernels[1], bn_position=bn_position, l2=l2, use_bias=use_bias,
dropout=dropout, skip=skip, activation=activation)
connections = []
for f, k, nr in zip(filters[2:-1], kernels[2:-1], unres[1:]):
connections.append(x)
for i in range(nr):
x = resblock(x, f, k, bn_position=bn_position, l2=l2, use_bias=use_bias,
dropout=dropout, skip=skip, activation=activation, down=i == 0)
for c, f, k, nr in zip(connections[::-1], filters[1:-2][::-1], kernels[1:-2][::-1], unres[:-1][::-1]):
x = UpSampling2D()(x)
x = Concatenate()([c, x])
for i in range(nr):
x = resblock(x, f, k, bn_position=bn_position, l2=l2, use_bias=use_bias,
dropout=dropout, skip=skip, activation=activation, up=i == 0)
# Final convolution
output = PeriodicConv2D(
filters[-1], kernels[-1],
conv_kwargs={'kernel_regularizer': regularizers.l2(l2)},
)(x)
output = Activation('linear', dtype='float32')(output)
return keras.models.Model(input, output)
def build_resnet(filters, kernels, input_shape, bn_position=None, use_bias=True, l2=0,
skip=True, dropout=0, activation='relu', long_skip=False, relu_idxs=None,
categorical=False, nvars=None,
**kwargs):
x = input = Input(shape=input_shape)
# First conv block to get up to shape
x = ls = convblock(
x, filters[0], kernels[0], bn_position=bn_position, l2=l2, use_bias=use_bias,
dropout=dropout, activation=activation
)
# Resblocks
for f, k in zip(filters[1:-1], kernels[1:-1]):
x = resblock(x, f, k, bn_position=bn_position, l2=l2, use_bias=use_bias,
dropout=dropout, skip=skip, activation=activation)
if long_skip:
x = Add()([x, ls])
# Final convolution
output = PeriodicConv2D(
filters[-1], kernels[-1],
conv_kwargs={'kernel_regularizer': regularizers.l2(l2)},
)(x)
if not relu_idxs is None:
output = ChannelReLU2D(relu_idxs)(output)
if categorical:
bins = int(filters[-1] / nvars)
outputs = []
for i in range(nvars):
o = Softmax()(output[..., i*bins:(i+1)*bins])
outputs.append(o)
output = tf.stack(outputs, axis=3)
output = Activation('linear', dtype='float32')(output)
return keras.models.Model(input, output)
def build_unet(input_shape, n_layers, filters_start, channels_out, kernel=3, u_skip=True,
res_skip=True, l2=0, bn_position=None, dropout=0):
"https://github.com/Nishanksingla/UNet-with-ResBlock/blob/master/resnet34_unet_model.py"
x = input = Input(shape=input_shape)
filters = filters_start
# Down
down_layers = []
for i in range(n_layers):
# Resblock
x_res = PeriodicConv2D(
filters, 1, conv_kwargs={
'use_bias': False, 'kernel_regularizer': regularizers.l2(l2)})(x)
x = convblock(x, filters, kernel, bn_position=bn_position, l2=l2, dropout=dropout)
x = convblock(x, filters, kernel, bn_position=bn_position, l2=l2, activation='linear',
dropout=dropout)
if res_skip: x = Add()([x, x_res])
x = ReLU()(x)
if not i == n_layers - 1:
down_layers.append(x)
# Downsampling
x = MaxPooling2D()(x)
filters *= 2
# Up
for dl in reversed(down_layers):
filters //= 2
# Upsample
x = UpSampling2D()(x)
x = PeriodicConv2D(filters, 3, conv_kwargs={'kernel_regularizer': regularizers.l2(l2)})(x)
x = ReLU()(x)
# Concatenate
if u_skip:
x = Concatenate()([x, dl])
# Resblock
x_res = PeriodicConv2D(filters, 1, conv_kwargs={'use_bias': False})(x)
x = convblock(x, filters, kernel, bn_position=bn_position, l2=l2, dropout=dropout)
x = convblock(x, filters, kernel, bn_position=bn_position, l2=l2, activation='linear',
dropout=dropout)
if res_skip: x = Add()([x, x_res])
x = ReLU()(x)
x = PeriodicConv2D(channels_out, 1, conv_kwargs={'kernel_regularizer': regularizers.l2(l2)})(x)
return keras.models.Model(input, x)
def create_lat_mse(lat):
weights_lat = np.cos(np.deg2rad(lat)).values
weights_lat /= weights_lat.mean()
def lat_mse(y_true, y_pred):
error = y_true - y_pred
mse = error**2 * weights_lat[None, : , None, None]
return mse
return lat_mse
def create_lat_mae(lat):
weights_lat = np.cos(np.deg2rad(lat)).values
weights_lat /= weights_lat.mean()
def lat_mae(y_true, y_pred):
error = y_true - y_pred
mae = tf.abs(error) * weights_lat[None, : , None, None]
return mae
return lat_mae
def create_lat_rmse(lat):
weights_lat = np.cos(np.deg2rad(lat)).values
weights_lat /= weights_lat.mean()
def lat_rmse(y_true, y_pred):
error = y_true - y_pred
mse = error**2 * weights_lat[None, : , None, None]
return tf.math.sqrt(tf.math.reduce_mean(mse, axis=(1, 2, 3)))
return lat_rmse
def create_lat_crps(lat, n_vars, relu=False):
weights_lat = np.cos(np.deg2rad(lat)).values
weights_lat /= weights_lat.mean()
def crps_loss(y_true, y_pred):
# Split input
mu = y_pred[:, :, :, :n_vars]
sigma = y_pred[:, :, :, n_vars:]
# To stop sigma from becoming negative we first have to
# convert it the the variance and then take the square
# root again.
if relu:
sigma = tf.nn.relu(sigma)
else:
sigma = tf.math.sqrt(tf.math.square(sigma))
# The following three variables are just for convenience
loc = (y_true - mu) / tf.maximum(1e-7, sigma)
phi = 1.0 / np.sqrt(2.0 * np.pi) * tf.math.exp(-tf.math.square(loc) / 2.0)
Phi = 0.5 * (1.0 + tf.math.erf(loc / np.sqrt(2.0)))
# First we will compute the crps for each input/target pair
crps = sigma * (loc * (2. * Phi - 1.) + 2 * phi - 1. / np.sqrt(np.pi))
crps = crps * weights_lat[None, : , None, None]
# Then we take the mean. The cost is now a scalar
return tf.reduce_mean(crps)
return crps_loss
def gev_cdf_tf(y, mu, sigma, xi):
y = (y-mu)/sigma
x = 1 + xi * y
# x[x < 0] = 0
x = tf.where(x<0, 0., x)
x = x**(-1/xi)
return tf.where(tf.math.is_inf(tf.exp(-x)), 0., tf.exp(-x))
def crps_lcgev_tf(y, mu, sigma, xi, dtype='float32'):
SCdSH = sigma/xi
Gam1mSH = tf.exp(tf.math.lgamma(1-xi))
# print(Gam1mSH)
probY = gev_cdf_tf(y, mu, sigma, xi)
prob0 = gev_cdf_tf(tf.constant(0., dtype), mu, sigma, xi)
igammaY = tf.cast(tf.math.igamma(1-tf.cast(xi, 'float64'), -tf.math.log(tf.cast(probY, 'float64'))), dtype)
igamma0 = tf.cast(tf.math.igamma(1-tf.cast(xi, 'float64'), -2 * tf.math.log(tf.cast(prob0, 'float64'))), dtype)
T1 = (y-mu) * (2*probY-1) + mu * prob0**2
T2 = SCdSH * ( 1-prob0**2 - 2**xi*Gam1mSH*igamma0)
T3 = -2*SCdSH * ( 1-probY - Gam1mSH*igammaY)
# print(T1, T2, T3)
return T1 + T2 + T3
def create_lat_crps_lcgev(lat, n_vars):
weights_lat = np.cos(np.deg2rad(lat)).values
weights_lat /= weights_lat.mean()
def crps_lcgev_loss(y_true, y_pred):
mu, sigma, xi = y_pred[..., 0], y_pred[..., 1], y_pred[..., 2]
sigma = tf.nn.relu(sigma)
# Make sure xi isn't 0
# eps = 1e-7
# xi = tf.where(tf.abs(xi)<eps, eps, xi)
# # Keep xi in bounds
xi = tf.clip_by_value(xi, -0.278, 0.999)
# import pdb
# pdb.set_trace()
return crps_lcgev_tf(y_true[..., 0], mu, sigma, xi) * weights_lat[None, : , None]
return crps_lcgev_loss
def create_lat_crps_mae(lat, n_vars, beta=1.):
weights_lat = np.cos(np.deg2rad(lat)).values
weights_lat /= weights_lat.mean()
def crps_mae(y_true, y_pred):
### CRPS
# Split input
mu = y_pred[:, :, :, :n_vars]
sigma = y_pred[:, :, :, n_vars:]
# To stop sigma from becoming negative we first have to
# convert it the the variance and then take the square
# root again.
sigma = tf.nn.relu(sigma)
# The following three variables are just for convenience
loc = (y_true - mu) / tf.maximum(1e-7, sigma)
phi = 1.0 / np.sqrt(2.0 * np.pi) * tf.math.exp(-tf.math.square(loc) / 2.0)
Phi = 0.5 * (1.0 + tf.math.erf(loc / np.sqrt(2.0)))
# First we will compute the crps for each input/target pair
crps = sigma * (loc * (2. * Phi - 1.) + 2 * phi - 1. / np.sqrt(np.pi))
crps = crps * weights_lat[None, : , None, None]
# Then we take the mean. The cost is now a scalar
crps = tf.reduce_mean(crps)
### MAE
error = y_true - mu
mae = tf.abs(error) * weights_lat[None, :, None, None]
mae = tf.reduce_mean(mae)
return crps + beta * mae
return crps_mae
def create_lat_log_loss(lat, n_vars):
weights_lat = np.cos(np.deg2rad(lat)).values
weights_lat /= weights_lat.mean()
def log_loss(y_true, y_pred):
# Split input
mu = y_pred[:, :, :, :n_vars]
sigma = y_pred[:, :, :, n_vars:]
sigma = tf.nn.relu(sigma)
# Compute PDF
eps = 1e-7
sigma = tf.maximum(eps, sigma)
prob = 1 / sigma / np.sqrt(2 * np.pi) * tf.math.exp(
-0.5 * ((y_true - mu) / sigma) ** 2
)
# Compute log loss
ll = - tf.math.log(tf.maximum(prob, eps))
ll = ll * weights_lat[None, :, None, None]
return tf.reduce_mean(ll)
return log_loss
def create_lat_categorical_loss(lat, n_vars):
weights_lat = np.cos(np.deg2rad(lat)).values
weights_lat /= weights_lat.mean()
def categorical_loss(y_true, y_pred):
cce = tf.keras.losses.categorical_crossentropy
loss = 0
for i in range(n_vars):
loss += cce(y_true[:,:,:,i,:], y_pred[:,:,:,i,:])*weights_lat[None, :, None]
return loss
return categorical_loss
# Agrawal et al version
def basic_block(x, filters, dropout):
shortcut = x
x = PeriodicConv2D(filters, kernel_size=3)(x)
x = BatchNormalization()(x)
x = LeakyReLU()(x)
x = PeriodicConv2D(filters, kernel_size=3)(x)
if dropout > 0: x = Dropout(dropout)(x)
shortcut = PeriodicConv2D(filters, kernel_size=3)(shortcut)
return Add()([x, shortcut])
def downsample_block(x, filters, dropout):
shortcut = x
x = BatchNormalization()(x)
x = LeakyReLU()(x)
x = MaxPooling2D()(x)
x = BatchNormalization()(x)
x = LeakyReLU()(x)
x = PeriodicConv2D(filters, kernel_size=3)(x)
if dropout > 0: x = Dropout(dropout)(x)
shortcut = PeriodicConv2D(filters, kernel_size=3, conv_kwargs={'strides': 2})(shortcut)
return Add()([x, shortcut])
def upsample_block(x, from_down, filters, dropout):
x = Concatenate()([x, from_down])
x = UpSampling2D()(x)
shortcut = x
x = BatchNormalization()(x)
x = LeakyReLU()(x)
x = PeriodicConv2D(filters, kernel_size=3)(x)
x = BatchNormalization()(x)
x = LeakyReLU()(x)
x = PeriodicConv2D(filters, kernel_size=3)(x)
if dropout > 0: x = Dropout(dropout)(x)
shortcut = PeriodicConv2D(filters, kernel_size=3)(shortcut)
return Add()([x, shortcut])
def build_unet_google(filters, input_shape, output_channels, dropout=0):
inputs = x = Input(input_shape)
x = basic_block(x, filters[0], dropout=dropout)
# Encoder
from_down = []
for f in filters[:-1]:
x = downsample_block(x, f, dropout=dropout)
from_down.append(x)
# Bottleneck
x = basic_block(x, filters[-1], dropout=dropout)
# Decoder
for f, d in zip(filters[:-1][::-1], from_down[::-1]):
x = upsample_block(x, d, f, dropout=dropout)
# Final
outputs = PeriodicConv2D(output_channels, kernel_size=1)(x)
return keras.models.Model(inputs, outputs)
###
def create_multi_dt_model(model, multi_dt, dg_train):
const_inp = Input((len(dg_train.data.lat), len(dg_train.data.lon), len(dg_train.const_idxs)))
x = inp = Input((len(dg_train.data.lat), len(dg_train.data.lon), len(dg_train.not_const_idxs)))
outputs = []
for _ in range(multi_dt):
x = model(Concatenate()([x, const_inp]))
outputs.append(x)
model2 = keras.models.Model([inp, const_inp], outputs)
return model2
|
the-stack_0_11056 | import matplotlib.pyplot as plt
import numpy as np
import torch
from torch import nn
from torch.autograd import Variable
x_train = np.array([[3.3], [4.4], [5.5], [6.71], [6.93], [4.168],
[9.779], [6.182], [7.59], [2.167], [7.042],
[10.791], [5.313], [7.997], [3.1]], dtype=np.float32)
y_train = np.array([[1.7], [2.76], [2.09], [3.19], [1.694], [1.573],
[3.366], [2.596], [2.53], [1.221], [2.827],
[3.465], [1.65], [2.904], [1.3]], dtype=np.float32)
# from sklearn.utils import check_random_state
# n = 100
# x = np.arange(n)
# rs = check_random_state(0)
# y = rs.randint(-50, 50, size=(n,)) + 50. * np.log1p(np.arange(n))
# x_train = np.array(x.reshape(-1, 1), dtype=np.float32)
# y_train = np.array(y.reshape(-1, 1), dtype=np.float32)
# numpy array to tensor
x_train = torch.from_numpy(x_train)
y_train = torch.from_numpy(y_train)
# define a linear regression model
class linear_regression(nn.Module):
def __init__(self):
super(linear_regression, self).__init__()
self.linear = nn.Linear(1, 1) # input and output is 1 dimension
def forward(self, x):
out = self.linear(x)
return out
model = linear_regression()
# define loss and optimization function
criterion = nn.MSELoss()
optimizer = torch.optim.SGD(model.parameters(), lr=1e-4)
# start training
num_epochs = 1000
epoch = 0
loss = float('inf')
for epoch in range(1, 21):
inputs = x_train
target = y_train
# forward
out = model(inputs)
loss = criterion(out, target)
# backward
optimizer.zero_grad()
loss.backward()
optimizer.step()
if epoch % 20 == 0:
print(f'Epoch[{epoch}/{num_epochs}], loss: {loss.item(): .6f}')
# eval mode - prevent batchnorm and dropout operations
model.eval()
with torch.no_grad():
predictions = model(x_train)
predictions = predictions.data.numpy()
fig = plt.figure(figsize=(10, 5))
plt.plot(x_train.numpy(), y_train.numpy(), 'ro', label='Original data') # ro for red circles
plt.plot(x_train.numpy(), predictions, 'o-', color='#1f77b4', label='Fitting Line')
# show diagram
plt.legend()
plt.show()
# # save the model to file
PATH = './linear.pth'
# model parameters only
# torch.save(model.state_dict(), PATH)
# parameters needed for resuming training
torch.save({'epoch': epoch, 'model_state_dict': model.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),
'loss': loss}, PATH)
# load the model and continuously train for another 999 epochs
model = linear_regression()
optimizer = torch.optim.SGD(model.parameters(), lr=1e-4)
checkpoint = torch.load(PATH)
model.load_state_dict(checkpoint['model_state_dict'])
optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
epoch = checkpoint['epoch']
loss = checkpoint['loss']
print(f'last epoch is {epoch}, loss is {loss}')
criterion = nn.MSELoss()
# training mode
model.train()
# eval mode - prevent batchnorm and dropout operations
# model.eval()
for epoch in range(epoch+1, num_epochs+1):
inputs = x_train
target = y_train
# forward
out = model(inputs)
loss = criterion(out, target)
# backward
optimizer.zero_grad()
loss.backward()
optimizer.step()
if epoch % 20 == 0:
print(f'Epoch[{epoch}/{num_epochs}], loss: {loss.item(): .6f}')
# eval mode - prevent batchnorm and dropout operations
model.eval()
with torch.no_grad():
predictions = model(x_train)
predictions = predictions.data.numpy()
fig = plt.figure(figsize=(10, 5))
plt.plot(x_train.numpy(), y_train.numpy(), 'ro', label='Original data') # ro for red circles
plt.plot(x_train.numpy(), predictions, 'o-', color='#1f77b4', label='Fitting Line')
plt.legend()
plt.show()
# -------------------------------------------------
# model save load another way (entire model) -- not recommand
# torch.save(model, './entire_model.pth')
# model = torch.load('./entire_model.pth')
|
the-stack_0_11057 | # SPDX-License-Identifier: Apache-2.0
# Copyright Contributors to the Rez Project
"""
Sends a post-release email
"""
from __future__ import print_function
from rez.release_hook import ReleaseHook
from rez.system import system
from email.mime.text import MIMEText
from rez.utils.logging_ import print_warning, print_error
from rez.utils.yaml import load_yaml
from rez.utils.scope import scoped_formatter
from rez.vendor.schema.schema import Or
from rez.vendor.six import six
import os.path
import smtplib
basestring = six.string_types[0]
class EmailReleaseHook(ReleaseHook):
schema_dict = {
"subject": basestring,
"body": basestring,
"smtp_host": basestring,
"smtp_port": int,
"sender": basestring,
"recipients": Or(basestring, [basestring])
}
@classmethod
def name(cls):
return "emailer"
def __init__(self, source_path):
super(EmailReleaseHook, self).__init__(source_path)
def post_release(self, user, install_path, variants, release_message=None,
changelog=None, previous_version=None, **kwargs):
if not variants:
return # nothing was released
# construct email body
release_dict = dict(path=install_path,
previous_version=previous_version or "None.",
message=release_message or "No release message.",
changelog=changelog or "No changelog.")
paths_str = '\n'.join(x.root for x in variants)
variants_dict = dict(count=len(variants),
paths=paths_str)
formatter = scoped_formatter(release=release_dict,
variants=variants_dict,
system=system,
package=self.package)
body = formatter.format(self.settings.body)
body = body.strip()
body = body.replace("\n\n\n", "\n\n")
# construct subject line, send email
subject = formatter.format(self.settings.subject)
self.send_email(subject, body)
def send_email(self, subject, body):
if not self.settings.recipients:
return # nothing to do, sending email to nobody
if not self.settings.smtp_host:
print_warning("did not send release email: "
"SMTP host is not specified")
return
recipients = self.get_recipients()
if not recipients:
return
print("Sending release email to:")
print('\n'.join("- %s" % x for x in recipients))
msg = MIMEText(body)
msg["Subject"] = subject
msg["From"] = self.settings.sender
msg["To"] = str(',').join(recipients)
try:
s = smtplib.SMTP(self.settings.smtp_host, self.settings.smtp_port)
s.sendmail(from_addr=self.settings.sender,
to_addrs=recipients,
msg=msg.as_string())
print('Email(s) sent.')
except Exception as e:
print_error("release email delivery failed: %s" % str(e))
def get_recipients(self):
value = self.settings.recipients
if isinstance(value, list):
return value
if os.path.exists(value):
filepath = value
try:
return self.load_recipients(filepath)
except Exception as e:
print_error("failed to load recipients config: %s. Emails "
"not sent" % str(e))
elif '@' in value:
return [value] # assume it's an email address
else:
print_error("email recipient file does not exist: %s. Emails not "
"sent" % value)
return []
def load_recipients(self, filepath):
def test(value, type_):
if not isinstance(value, type_):
raise TypeError("Expected %s, not %s" % type_, value)
return value
conf = load_yaml(filepath)
recipients = set()
for rule in test(conf.get("rules", []), list):
filters = rule.get("filters")
match = True
if filters:
for attr, test_value in test(filters, dict).items():
missing = object()
value = getattr(self.package, attr, missing)
if value is missing:
match = False
elif test_value is None:
match = True
elif isinstance(test_value, list):
match = (value in test_value)
else:
match = (value == test_value)
if not match:
break
if match:
rule_recipients = rule.get("recipients")
recipients.update(test(rule_recipients, list))
return sorted(recipients)
def register_plugin():
return EmailReleaseHook
|
the-stack_0_11059 | #coding:utf-8
'''
filename:relationship_of_point_circle.py
chap:6
subject:8
conditions:Point(),Circle()
solution:relationship between circle and point
'''
from circle import Circle
from point import Point
import math
class Relationship:
def __init__(self,circle:Circle,point:Point):
self.c=circle
self.p=point
def get_relation(self):
distance = abs(self.c.center - self.p)
rst = ''
if distance < self.c.r:
rst = 'in'
elif math.isclose(distance,self.c.r):
rst = 'on'
elif distance > self.c.r:
rst = 'outside'
return 'The {!r} is {} the {!r}.'.format(self.p,rst,self.c)
def __str__(self):
return self.get_relation()
if __name__ == '__main__':
p1 = Point(0,0)
p2 = Point(1,0)
p3 = Point(1,1)
c = Circle(1,p2)
print(Relationship(c,p1))
print(Relationship(c,p2))
print(Relationship(c,p3))
|
the-stack_0_11060 | #!/usr/bin/python
"""
(C) Copyright 2018-2022 Intel Corporation.
SPDX-License-Identifier: BSD-2-Clause-Patent
"""
import re
import traceback
from daos_utils_base import DaosCommandBase
class DaosCommand(DaosCommandBase):
# pylint: disable=too-many-ancestors,too-many-public-methods
"""Defines a object representing a daos command."""
METHOD_REGEX = {
"run": r"(.*)",
"container_query":
r"Pool UUID:\s+([0-9a-f-]+)\n" +
r"Container UUID:\s+([0-9a-f-]+)\n" +
r"Number of snapshots:\s+(\d+)\n" +
r"Latest Persistent Snapshot:\s+(\d+)\n" +
r"Highest Aggregated Epoch:\s+(\d+)",
}
def pool_query(self, pool, sys_name=None, sys=None):
"""Query a pool.
Args:
pool (str): pool UUID
sys_name (str, optional): DAOS system name context for servers.
Defaults to None.
sys (str, optional): [description]. Defaults to None.
Returns:
dict: JSON output
Raises:
CommandFailure: if the daos pool query command fails.
"""
return self._get_json_result(
("pool", "query"), pool=pool, sys_name=sys_name, sys=sys)
def pool_autotest(self, pool):
"""Runs autotest for pool
Args:
pool (str): pool UUID
Returns:
CmdResult: Object that contains exit status, stdout, and other
information.
Raises:
CommandFailure: if the daos pool autotest command fails.
"""
return self._get_result(
("pool", "autotest"), pool=pool)
def container_create(self, pool, sys_name=None, cont=None,
path=None, cont_type=None, oclass=None,
chunk_size=None, properties=None, acl_file=None):
# pylint: disable=too-many-arguments
"""Create a container.
Args:
pool (str): UUID of the pool in which to create the container
sys_name (str, optional): DAOS system name context for servers.
Defaults to None.
cont (str, optional): container UUID. Defaults to None.
path (str, optional): container namespace path. Defaults to None.
cont_type (str, optional): the type of container to create. Defaults
to None.
oclass (str, optional): object class. Defaults to None.
chunk_size (str, optional): chunk size of files created. Supports
suffixes: K (KB), M (MB), G (GB), T (TB), P (PB), E (EB).
Defaults to None.
properties (str, optional): String of comma-separated <name>:<value>
pairs defining the container properties. Defaults to None
acl_file (str, optional): ACL file. Defaults to None.
Returns:
dict: the daos json command output converted to a python dictionary
Raises:
CommandFailure: if the daos container create command fails.
"""
return self._get_json_result(
("container", "create"), pool=pool, sys_name=sys_name,
cont=cont, path=path, type=cont_type, oclass=oclass,
chunk_size=chunk_size, properties=properties, acl_file=acl_file)
def container_clone(self, src, dst):
"""Clone a container to a new container.
Args:
src (str): the source, formatted as daos://<pool>/<cont>
dst (str): the destination, formatted as daos://<pool>/<cont>
Returns:
CmdResult: Object that contains exit status, stdout, and other
information.
Raises:
CommandFailure: if the daos container clone command fails.
"""
return self._get_result(
("container", "clone"), src=src, dst=dst)
def container_destroy(self, pool, cont, force=None, sys_name=None):
"""Destroy a container.
Args:
pool (str): UUID of the pool in which to create the container
cont (str): container UUID.
force (bool, optional): Force the container destroy. Defaults to
None.
sys_name (str, optional): DAOS system name context for servers.
Defaults to None.
Returns:
CmdResult: Object that contains exit status, stdout, and other
information.
Raises:
CommandFailure: if the daos container destroy command fails.
"""
return self._get_result(
("container", "destroy"), pool=pool, sys_name=sys_name,
cont=cont, force=force)
def container_check(self, pool, cont, sys_name=None, path=None):
"""Check the integrity of container objects.
Args:
pool (str): UUID of the pool in which to create the container
cont (str): container UUID.
sys_name (str, optional): DAOS system name context for servers.
Defaults to None.
path (str): Container namespace path. Defaults to None
Returns:
CmdResult: Object that contains exit status, stdout, and other
information.
Raises:
CommandFailure: if the daos container check command fails.
"""
return self._get_result(
("container", "check"), pool=pool, cont=cont,
sys_name=sys_name, path=path)
def container_get_acl(self, pool, cont,
verbose=False, outfile=None):
"""Get the ACL for a given container.
Args:
pool (str): Pool UUID
cont (str): Container for which to get the ACL.
verbose (bool, optional): Verbose mode.
outfile (str, optional): Write ACL to file.
Returns:
CmdResult: Object that contains exit status, stdout, and other
information.
Raises:
CommandFailure: if the daos container get-acl command fails.
"""
return self._get_result(
("container", "get-acl"), pool=pool, cont=cont,
verbose=verbose, outfile=outfile)
def container_delete_acl(self, pool, cont, principal):
"""Delete an entry for a given principal in an existing container ACL.
Args:
pool (str): Pool UUID
cont (str): Container for which to get the ACL.
principal (str): principal portion of the ACL.
Returns:
CmdResult: Object that contains exit status, stdout, and other
information.
Raises:
CommandFailure: if the daos container delete-acl command fails.
"""
return self._get_result(
("container", "delete-acl"), pool=pool, cont=cont,
principal=principal)
def container_overwrite_acl(self, pool, cont, acl_file):
"""Overwrite the ACL for a given container.
Args:
pool (str): Pool UUID
cont (str): Container for which to get the ACL.
acl_file (str): input file containing ACL
Returns:
CmdResult: Object that contains exit status, stdout, and other
information.
Raises:
CommandFailure: if the daos container overwrite-acl command fails.
"""
return self._get_result(
("container", "overwrite-acl"), pool=pool, cont=cont,
acl_file=acl_file)
def container_update_acl(self, pool, cont, entry=None, acl_file=None):
"""Add or update the ACL entries for a given container.
Args:
pool (str): Pool UUID
cont (str): Container for which to get the ACL.
entry (bool, optional): Add or modify a single ACL entry
acl_file (str, optional): Input file containing ACL
Returns:
CmdResult: Object that contains exit status, stdout, and other
information.
Raises:
CommandFailure: if the daos container get-acl command fails.
"""
return self._get_result(
("container", "update-acl"), pool=pool, cont=cont,
entry=entry, acl_file=acl_file)
def container_list(self, pool, sys_name=None):
"""List containers in the given pool.
Args:
pool (str): Pool label or UUID
sys_name (str, optional): System name. Defaults to None.
Returns:
dict: JSON output
Raises:
CommandFailure: if the daos container list command fails.
"""
# Sample output.
# {
# "response": [
# {
# "UUID": "bad80a98-aabd-498c-b001-6547cd061c8c",
# "Label": "container_label_not_set"
# },
# {
# "UUID": "dd9fc365-5729-4736-9d34-e46504a4a92d",
# "Label": "mkc1"
# }
# ],
# "error": null,
# "status": 0
# }
return self._get_json_result(
("container", "list"), pool=pool, sys_name=sys_name)
def pool_set_attr(self, pool, attr, value, sys_name=None):
"""Set pool attribute.
Args:
pool (str): Pool UUID.
attr (str): Attribute name.
value (str): Attribute value.
sys_name (str): DAOS system name. Defaults to None.
Returns:
CmdResult: Object that contains exit status, stdout, and other
information.
Raises:
CommandFailure: if the daos pool set-attr command fails.
"""
return self._get_result(
("pool", "set-attr"), pool=pool, attr=attr, value=value,
sys_name=sys_name)
def pool_get_attr(self, pool, attr, sys_name=None):
"""Set pool attribute.
Args:
pool (str): Pool UUID.
attr (str): Pool UUID.
sys_name (str): DAOS system name. Defaults to None.
Returns:
dict: JSON output
Raises:
CommandFailure: if the daos pool query command fails.
"""
return self._get_json_result(
("pool", "get-attr"), pool=pool, attr=attr, sys_name=sys_name)
def pool_list_attrs(self, pool, sys_name=None, verbose=False):
"""List pool attributes.
Args:
pool (str): Pool UUID.
sys_name (str): DAOS system name. Defaults to None.
verbose (bool): False - name only. True - name and value. Defaults
to False.
Returns:
dict: JSON output
Raises:
CommandFailure: if the daos pool list-attrs command fails.
"""
return self._get_json_result(
("pool", "list-attrs"), pool=pool, sys_name=sys_name,
verbose=verbose)
def container_query(self, pool, cont, sys_name=None):
"""Query a container.
Args:
pool (str): Pool UUID.
cont (str): Container UUID.
sys_name (str, optional): DAOS system name context for servers.
Defaults to None.
Returns:
dict: JSON output
Raises:
CommandFailure: if the daos container query command fails.
"""
return self._get_json_result(
("container", "query"), pool=pool, cont=cont, sys_name=sys_name)
def container_set_prop(self, pool, cont, prop, value):
"""Call daos container set-prop.
Args:
pool (str): Pool UUID.
cont (str): Container UUID.
prop (str): Container property-name.
value (str): Container property-name value to set.
Returns:
CmdResult: Object that contains exit status, stdout, and other
information.
Raises:
CommandFailure: if the daos container set-prop command fails.
"""
prop_value = ":".join([prop, value])
return self._get_result(
("container", "set-prop"),
pool=pool, cont=cont, prop=prop_value)
def container_get_prop(self, pool, cont):
"""Call daos container get-prop.
Args:
pool (str): Pool UUID.
cont (str): Container UUID.
Returns:
CmdResult: Object that contains exit status, stdout, and other
information.
Raises:
CommandFailure: if the daos container get-prop command fails.
"""
return self._get_result(
("container", "get-prop"), pool=pool, cont=cont)
def container_set_owner(self, pool, cont, user, group):
"""Call daos container set-owner.
Args:
pool (str): Pool UUID.
cont (str): Container UUID.
user (str): New-user who will own the container.
group (str): New-group who will own the container.
Returns:
CmdResult: Object that contains exit status, stdout, and other
information.
Raises:
CommandFailure: if the daos container set-owner command fails.
"""
return self._get_result(
("container", "set-owner"),
pool=pool, cont=cont, user=user, group=group)
def container_set_attr(
self, pool, cont, attr, val, sys_name=None):
"""Call daos container set-attr.
Args:
pool (str): Pool UUID.
cont (str): Container UUID.
attr (str): Attribute name.
val (str): Attribute value.
sys_name (str, optional): DAOS system name context for servers.
Defaults to None.
Returns:
CmdResult: Object that contains exit status, stdout, and other
information.
Raises:
CommandFailure: if the daos container set-attr command fails.
"""
return self._get_result(
("container", "set-attr"), pool=pool, cont=cont,
sys_name=sys_name, attr=attr, value=val)
def container_get_attr(self, pool, cont, attr, sys_name=None):
"""Call daos container get-attr.
Args:
pool (str): Pool UUID.
cont (str): Container UUID.
attr (str): Attribute name.
sys_name (str, optional): DAOS system name context for servers.
Defaults to None.
Returns:
dict: the daos json command output converted to a python dictionary
Raises:
CommandFailure: if the daos get-attr command fails.
"""
return self._get_json_result(
("container", "get-attr"), pool=pool, cont=cont, attr=attr, sys_name=sys_name)
def container_list_attrs(self, pool, cont, sys_name=None, verbose=False):
"""Call daos container list-attrs.
Args:
pool (str): Pool UUID.
cont (str): Container UUID.
sys_name (str, optional): DAOS system name context for servers.
Defaults to None.
verbose (bool, optional): True - fetch values of all attributes.
Returns:
dict: the daos json command output converted to a python dictionary
Raises:
CommandFailure: if the daos container list-attrs command fails.
"""
return self._get_json_result(
("container", "list-attrs"), pool=pool, cont=cont, sys_name=sys_name,
verbose=verbose)
def container_create_snap(self, pool, cont, snap_name=None, epoch=None,
sys_name=None):
"""Call daos container create-snap.
Args:
pool (str): Pool UUID.
cont (str): Container UUID.
snap_name (str, optional): Snapshot name. Defaults to None.
epoch (str, optional): Epoch number. Defaults to None.
sys_name (str, optional): DAOS system name context for servers.
Defaults to None.
Returns:
dict: Dictionary that stores the created epoch in the key "epoch".
Raises:
CommandFailure: if the daos container create-snap command fails.
"""
self._get_result(
("container", "create-snap"), pool=pool, cont=cont,
sys_name=sys_name, snap=snap_name, epc=epoch)
# Sample create-snap output.
# snapshot/epoch 0x51e719907180000 has been created
data = {}
match = re.findall(r"[A-Za-z\/]+\s(0x[0-9a-fA-F]+)\s[a-z\s]+", self.result.stdout_text)
if match:
data["epoch"] = match[0]
return data
def container_destroy_snap(self, pool, cont, snap_name=None, epc=None,
sys_name=None, epcrange=None):
"""Call daos container destroy-snap.
Args:
pool (str): Pool UUID.
cont (str): Container UUID.
snap_name (str, optional): Snapshot name. Defaults to None.
epc (str, optional): Epoch value of the snapshot to be destroyed.
Defaults to None.
sys_name (str, optional): DAOS system name context for servers.
Defaults to None.
epcrange (str, optional): Epoch range in the format "<start>-<end>".
Defaults to None.
Returns:
CmdResult: Object that contains exit status, stdout, and other
information.
Raises:
CommandFailure: if the daos container destroy-snap command fails.
"""
kwargs = {
"pool": pool,
"cont": cont,
"sys_name": sys_name,
"snap": snap_name,
"epc": epc,
"epcrange": epcrange
}
return self._get_result(("container", "destroy-snap"), **kwargs)
def container_list_snaps(self, pool, cont):
"""List snapshot in a container.
Args:
pool (str): Pool UUID.
cont (str): Container UUID.
Returns:
dict: Dictionary that contains epoch values in key "epochs". Value
is a list of string.
"""
self._get_result(
("container", "list-snaps"), pool=pool, cont=cont)
# Sample container list-snaps output.
# Container's snapshots :
# 0x51ebe2f21500000
# 0x51ebe4f5b6c0000
# 0x51ebe5233780000
data = {}
match = re.findall(r"(0x[0-9a-fA-F]+)", self.result.stdout_text)
if match:
data["epochs"] = match
return data
def object_query(self, pool, cont, oid, sys_name=None):
"""Call daos object query and return its output with a dictionary.
Args:
pool (str): Pool UUID
cont (str): Container UUID
oid (str): oid hi lo value in the format <hi>.<lo>
sys_name (str, optional): System name. Defaults to None.
Returns:
dict: cmd output
oid: (oid.hi, oid.lo)
ver: num
grp_nr: num
layout: [{grp: num, replica: [(n0, n1), (n2, n3)...]}, ...]
Each row of replica nums is a tuple and stored top->bottom.
Raises:
CommandFailure: if the daos object query command fails.
"""
self._get_result(
("object", "query"), pool=pool, cont=cont,
oid=oid, sys_name=sys_name)
# Sample daos object query output.
# oid: 1152922453794619396.1 ver 0 grp_nr: 2
# grp: 0
# replica 0 1
# replica 1 0
# grp: 1
# replica 0 0
# replica 1 1
data = {}
vals = re.findall(
r"oid:\s+([\d.]+)\s+ver\s+(\d+)\s+grp_nr:\s+(\d+)|"\
r"grp:\s+(\d+)\s+|"\
r"replica\s+(\d+)\s+(\d+)\s*", self.result.stdout_text)
try:
oid_vals = vals[0][0]
oid_list = oid_vals.split(".")
oid_hi = oid_list[0]
oid_lo = oid_list[1]
data["oid"] = (oid_hi, oid_lo)
data["ver"] = vals[0][1]
data["grp_nr"] = vals[0][2]
data["layout"] = []
for i in range(1, len(vals)):
if vals[i][3] == "":
if "replica" in data["layout"][-1]:
data["layout"][-1]["replica"].append(
(vals[i][4], vals[i][5]))
else:
data["layout"][-1]["replica"] = [(
vals[i][4], vals[i][5])]
else:
data["layout"].append({"grp": vals[i][3]})
except IndexError:
traceback.print_exc()
self.log.error("--- re.findall output ---")
self.log.error(vals)
return data
def filesystem_copy(self, src, dst, preserve_props=None):
"""Copy a POSIX container or path to another POSIX container or path.
Args:
src (str): The source, formatted as
daos:<pool>/<cont>/<path> or posix:<path>
dst (str): The destination, formatted as
daos:<pool>/<cont>/<path> or posix:<path>
preserve_props (str): The filename to read or write container properties
Returns:
CmdResult: Object that contains exit status, stdout, and other
information.
Raises:
CommandFailure: if the daos filesystem copy command fails.
"""
return self._get_result(
("filesystem", "copy"), src=src, dst=dst, preserve_props=preserve_props)
def version(self):
"""Call daos version.
Returns:
CmdResult: an avocado CmdResult object containing the dmg command
information, e.g. exit status, stdout, stderr, etc.
Raises:
CommandFailure: if the dmg storage query command fails.
"""
return self._get_result(["version"])
|
the-stack_0_11061 | # -*- coding: utf-8 -*-
from app.constants import S_OK, S_ERR
import random
import math
import base64
import time
import ujson as json
from wand.image import Image
from StringIO import StringIO
from app.constants import *
from app import cfg
from app import util
_CONTENT_TYPE_POSTFIX_MAP = {
'image/jpeg': 'jpg',
'image/jpg': 'jpg',
'image/gif': 'gif',
'image/png': 'png',
'application/pdf': 'pdf',
'application/x-pdf': 'pdf',
}
_IMG_TYPES = ['png', 'jpg', 'gif']
_IMG_TYPE_MAP = {
'jpg': 'jpeg',
}
def p_img_handler(data, content_type, idx):
idx = util._int(idx)
postfix = _parse_postfix(content_type)
result = _save_img(data, postfix, content_type)
result['the_idx'] = idx
return result
def _save_img(data, postfix, content_type):
the_timestamp = util.get_timestamp()
the_datetime = util.timestamp_to_datetime(the_timestamp)
the_id = str(the_timestamp) + "_" + util.uuid()
filename = the_id + '.' + postfix
the_dir = '/data/img/bee/' + the_datetime.strftime('%Y-%m-%d')
util.makedirs(the_dir)
with open(the_dir + '/' + filename, 'w') as f:
f.write(data)
(the_thumbnail, thumbnail_postfix) = _make_thumbnail(data, postfix)
the_dir = '/data/thumbnail/bee/' + the_datetime.strftime('%Y-%m-%d')
util.makedirs(the_dir)
thumbnail_filename = the_id + '.' + thumbnail_postfix
with open(the_dir + '/' + thumbnail_filename, 'w') as f:
f.write(the_thumbnail)
db_data = {"filename": the_datetime.strftime('%Y-%m-%d/') + filename, "thumbnail_filename": the_datetime.strftime("%Y-%m-%d/") + thumbnail_filename, "the_id": the_id, 'content_type': content_type, 'save_time': the_timestamp}
util.db_insert('bee_img', [db_data])
if '_id' in db_data:
del db_data['_id']
return db_data
def _parse_postfix(content_type):
return _CONTENT_TYPE_POSTFIX_MAP.get(content_type.lower(), 'unknown')
def _make_thumbnail(data, postfix):
postfix = 'png'
converted_data = ''
try:
with Image(blob=data) as img:
(width, height) = img.size
(resized_width, resized_height) = _parse_resize(width, height)
img.resize(resized_width, resized_height)
converted_data = img.make_blob(_IMG_TYPE_MAP.get(postfix, postfix))
except Exception as e:
logging.exception('unable to _make_thumbnail: postfix: %s e: %s', postfix, e)
converted_data = data
return (converted_data, postfix)
def _parse_resize(width, height):
max_size = max(width, height)
the_ratio = float(RESIZE_SIZE) / max_size
resize_width = int(width * the_ratio)
resize_height = int(height * the_ratio)
cfg.logger.debug('width: %s height: %s resize_width: %s resize_height: %s', width, height, resize_width, resize_height)
return (resize_width, resize_height)
|
the-stack_0_11063 | from collections import (
defaultdict,
)
from operator import (
attrgetter,
)
from typing import (
Any,
Iterable,
Optional,
Union,
get_args,
get_origin,
)
from uuid import (
UUID,
)
from minos.common import (
TypeHintBuilder,
is_model_type,
)
from .models import (
ModelRef,
)
class ModelRefExtractor:
"""Model Reference Extractor class."""
def __init__(self, value: Any, type_: Optional[type] = None, as_uuids: bool = True):
if type_ is None:
type_ = TypeHintBuilder(value).build()
self.value = value
self.type_ = type_
self.as_uuids = as_uuids
def build(self) -> dict[str, set[UUID]]:
"""Run the model reference extractor.
:return: A dictionary in which the keys are the class names and the values are the identifiers.
"""
ans = defaultdict(set)
self._build(self.value, self.type_, ans)
if self.as_uuids:
ans = {k: set(map(attrgetter("uuid"), v)) for k, v in ans.items()}
return ans
def _build(self, value: Any, type_: type, ans: dict[str, set[ModelRef]]) -> None:
if get_origin(type_) is Union:
type_ = next((t for t in get_args(type_) if get_origin(t) is ModelRef), type_)
if isinstance(value, (tuple, list, set)):
self._build_iterable(value, get_args(type_)[0], ans)
elif isinstance(value, dict):
self._build_iterable(value.keys(), get_args(type_)[0], ans)
self._build_iterable(value.values(), get_args(type_)[1], ans)
elif isinstance(value, ModelRef):
cls = value.data_cls or get_args(type_)[0]
name = cls.__name__
ans[name].add(value)
elif is_model_type(value):
# noinspection PyUnresolvedReferences
for field in value.fields.values():
self._build(field.value, field.type, ans)
def _build_iterable(self, value: Iterable, value_: type, ans: dict[str, set[ModelRef]]) -> None:
for sub_value in value:
self._build(sub_value, value_, ans)
|
the-stack_0_11064 | from django.db import models
from django.core.validators import MaxValueValidator, MinValueValidator
from django.utils import timezone
from .custom_functions import isOnlyOneTrue
from users.models import OpticUser, Account
import decimal
from termcolor import colored
# Create your models here.
class Patient(models.Model):
"""
Tabla con los campos del paciente
"""
class Gender(models.TextChoices):
MALE = 'MALE', 'Masculino'
FEMALE = 'FEMALE', 'Femenino'
OTHER = 'OTHER', 'Otro'
patient_optic_id = models.PositiveIntegerField(blank=True)
optic = models.ForeignKey(
OpticUser, verbose_name="Optica", on_delete=models.CASCADE, null=False)
full_name = models.CharField("Nombre completo", max_length=100)
dni = models.CharField(
"Dni o Pasaporte", max_length=20, blank=True, null=True)
age = models.PositiveSmallIntegerField("Edad", blank=True, null=True)
gender = models.CharField("Genero", max_length=20,
blank=True, choices=Gender.choices)
phone = models.CharField("Celular", max_length=30, blank=True)
job = models.CharField('Ocupacion', max_length=70, blank=True)
class Meta:
verbose_name = "Paciente"
verbose_name_plural = "Pacientes"
unique_together = (('optic', 'dni'), ('optic', 'patient_optic_id'))
def __str__(self):
return f"{self.full_name}"
def save(self, force_insert=False, force_update=False, using=None, update_fields=None):
if self._state.adding is True:
optic = OpticUser.objects.get(pk=self.optic.id)
last_patient = optic.patient_set.last()
if last_patient:
patient_optic_id = last_patient.patient_optic_id + 1
else:
patient_optic_id = 1
self.patient_optic_id = patient_optic_id
return super().save(force_insert=force_insert, force_update=force_update, using=using, update_fields=update_fields)
class DiagnosisChoices(models.TextChoices):
"""
Diagnosticos a sugerir
"""
MYOPIA = 'MYOPIA', 'Miopía'
ASTIGMATISM = 'ASTIGMATISM', 'Astigmatismo'
FARSIGHTEDNESS = 'FARSIGHTEDNESS', 'Hipermetropía'
PRESBYOPIA = 'PRESBYOPIA', 'Presbicia'
SQUINT = 'SQUINT', 'Estrabismo'
AMBLYOPIA = 'AMBLYOPIA', 'Ambliopía'
DIOPIA = 'DIOPIA', 'Diopía'
GLAUCOMA = 'GLAUCOMA', 'Glaucoma'
DETACHED_RETINA = 'DETACHED_RETINA', 'Desprendimiento de la retina'
CATARACT = 'CATARACT', 'Catarata'
DALTONISM = 'DALTONISM', 'Daltonismo'
CONJUNCTIVITIS = 'CONJUNCTIVITIS', 'Conjuntivitis'
DIABETIC_RETINOPATHY = 'DIABETIC_RETINOPATHY', 'Retinopatía diabética'
DRY_EYE = 'DRY_EYE', 'Ojo seco'
MACULAR_DEGENERATION = 'MACULAR_DEGENERATION', 'Degeneración macular'
class Subsidiary(models.Model):
"""
Tabla de sucursales
"""
subsidiary_name = models.CharField(
"Nombre Sucursal", max_length=30, blank=True)
direction = models.CharField("Dirección", max_length=50, blank=True)
phone = models.CharField("Telefono", max_length=23, blank=True)
optic = models.ForeignKey(
OpticUser, verbose_name="Optica", on_delete=models.CASCADE, null=False)
class Meta:
verbose_name = "Sucursal"
verbose_name_plural = "Sucursales"
def __str__(self):
return self.subsidiary_name
class Laboratory(models.Model):
"""
Tabla de laboratorio
"""
laboratory_name = models.CharField(
"Laboratorio", max_length=40, null=False)
direction = models.CharField("Dirección", max_length=50, blank=True)
phone = models.CharField("Telefono", max_length=23, blank=True)
optic = models.ForeignKey(
OpticUser, verbose_name="Optica", on_delete=models.CASCADE, null=False)
class Meta:
verbose_name = "Laboratorio"
verbose_name_plural = "Laboratorios"
def __str__(self):
return self.laboratory_name
class CrystalTreatments(models.Model):
"""
Tabla de tratamiento
"""
treatment_name = models.CharField("Nombre del tratamiento", max_length=50)
description = models.TextField("Descripcion", blank=True)
optic = models.ForeignKey(
OpticUser, verbose_name="Optica", on_delete=models.CASCADE, null=False)
class Meta:
verbose_name = "Tratamiento"
verbose_name_plural = "Tratamientos"
def __str__(self):
return self.treatment_name
class CrystalMaterial(models.Model):
"""
Tabla de material
"""
material_name = models.CharField("Nombre del Material", max_length=50)
refractive_index = models.DecimalField(
"Indice de refracción", max_digits=4, decimal_places=3, blank=True, null=True)
abbe = models.DecimalField(
"Valor abbe", max_digits=3, decimal_places=1, blank=True, null=True)
description = models.TextField("Descripcion", blank=True)
optic = models.ForeignKey(
OpticUser, verbose_name="Optica", on_delete=models.CASCADE, null=False)
class Meta:
verbose_name = "Material de la luna"
verbose_name_plural = "Materiales de las lunas"
def __str__(self):
return self.material_name
class Crystal(models.Model):
"""
Tabla de lunas
"""
crystal_name = models.CharField("Nombre Luna", max_length=70)
material = models.ForeignKey(
CrystalMaterial, verbose_name="Material", on_delete=models.SET_NULL, null=True, blank=True)
treatments = models.ManyToManyField(
CrystalTreatments, verbose_name="Tratamientos", blank=True)
default_price = models.DecimalField('Precio de los lentes', max_digits=10, decimal_places=2, validators=[
MinValueValidator(0, 'No se permite el valor ingresado')], blank=True, null=True)
optic = models.ForeignKey(
OpticUser, verbose_name="Optica", on_delete=models.CASCADE, null=False)
class Meta:
verbose_name = "Luna"
verbose_name_plural = "Lunas"
def __str__(self):
return self.crystal_name
def get_treatments(self):
treatments = list(self.treatments.all())
treatments = [treatment.treatment_name for treatment in treatments]
if len(treatments) == 0:
return "--"
return ", ".join(treatments)
class Prescription(models.Model):
"""
Tabla de prescripcion
"""
class PrescriptionType(models.TextChoices):
MONOFOCAL = 'MONOFOCAL', 'Monofocal'
BIFOCAL = 'BIFOCAL', 'Bifocal'
OCCUPATIONAL = 'OCCUPATIONAL', 'Ocupacional'
PROGRESSIVE = 'PROGRESSIVE', 'Progressivo'
@staticmethod
def generateChoices(start, end):
choices = [(decimal.Decimal(f'{i*0.25}0') if i % 2 == 0 else decimal.Decimal(f'{i*0.25}'), (f'{i*0.25}0' if i <=
0 else f'+{i*0.25}0') if i % 2 == 0 else (f'{i*0.25}' if i <= 0 else f'+{i*0.25}')) for i in range(end-1, start-1, -1)]
for i, (value, name) in enumerate(choices):
if value == decimal.Decimal(0):
choices.insert(i, ('', '--'))
break
return choices
spherical_choices = generateChoices.__func__(-100, 101)
cylinder_choices = generateChoices.__func__(-40, 1)
axis_choices = [(i, f'{i}°') for i in range(180, -1, -1)]
axis_choices.append(('', '--'))
dip_choices = [(i, f'{i}mm') for i in range(81, 40, -1)]
dip_choices.append(('', '--'))
dnp_choices = [(decimal.Decimal(f'{i/2}') if i % 2 == 0 else decimal.Decimal(
f'{i/2}'), f'{i/2}mm' if i % 2 == 1 else f'{int(i/2)}mm') for i in range(81, 40, -1)]
dnp_choices.append(('', '--'))
add_choices = generateChoices.__func__(1, 25)
add_choices.append(('', '--'))
# print(colored(spherical_choices,'green'))
# print(colored(cylinder_choices,'red'))
# print(colored(axis_choices,'green'))
# print(colored(dip_choices,'red'))
# print(colored(add_choices,'green'))
optic = models.ForeignKey(
OpticUser, verbose_name="Optica", on_delete=models.CASCADE, null=False)
is_dip = models.BooleanField('Dip o Dnp')
patient = models.ForeignKey(
Patient, on_delete=models.PROTECT, verbose_name="Paciente")
subsidiary = models.ForeignKey(
Subsidiary, on_delete=models.SET_NULL, verbose_name="Sucursal", blank=True, null=True)
laboratory = models.ForeignKey(
Laboratory, verbose_name="Laboratorio", on_delete=models.SET_NULL, null=True, blank=True)
doctor = models.ForeignKey(
Account, verbose_name="Doctor", on_delete=models.SET_NULL, blank=True, null=True)
prescription_optic_id = models.PositiveIntegerField(blank=True)
prescription_type = models.CharField(
"Tipo", max_length=50, choices=PrescriptionType.choices, null=True, blank=True)
date = models.DateField(verbose_name='Fecha', default=timezone.now)
time = models.TimeField(verbose_name='Hora', default=timezone.now)
far_spherical_right = models.DecimalField(
"Esf. derecho Lejos", max_digits=4, decimal_places=2, blank=True, null=True, choices=spherical_choices)
far_cylinder_right = models.DecimalField(
"Cil. derecho Lejos", max_digits=4, decimal_places=2, blank=True, null=True, choices=cylinder_choices)
far_axis_right = models.PositiveSmallIntegerField("Eje derecho Lejos", validators=[MaxValueValidator(
180, 'El eje solo permite valores entre 0° y 180°')], blank=True, null=True, choices=axis_choices)
far_av_right = models.CharField(
"Av. derecho lejos", max_length=50, blank=True, null=True)
far_dnp_right = models.DecimalField(
"Dnp. derecho lejos", max_digits=3, decimal_places=1, blank=True, null=True, choices=dnp_choices)
far_spherical_left = models.DecimalField(
"Esf. izquierdo Lejos", max_digits=4, decimal_places=2, blank=True, null=True, choices=spherical_choices)
far_cylinder_left = models.DecimalField(
"Cil. izquierdo Lejos", max_digits=4, decimal_places=2, blank=True, null=True, choices=cylinder_choices)
far_axis_left = models.PositiveSmallIntegerField("Eje izquierdo Lejos", validators=[MaxValueValidator(
180, 'El eje solo permite valores entre 0° y 180°')], blank=True, null=True, choices=axis_choices)
far_av_left = models.CharField(
"Av. izquierdo lejos", max_length=50, blank=True, null=True)
far_dnp_left = models.DecimalField(
"Dnp. izquierdo lejos", max_digits=3, decimal_places=1, blank=True, null=True, choices=dnp_choices)
intermediate_spherical_right = models.DecimalField(
"Esf. derecho intermedio", max_digits=4, decimal_places=2, blank=True, null=True, choices=spherical_choices)
intermediate_cylinder_right = models.DecimalField(
"Cil. derecho intermedio", max_digits=4, decimal_places=2, blank=True, null=True, choices=cylinder_choices)
intermediate_axis_right = models.PositiveSmallIntegerField("Eje derecho Lintermedio", validators=[MaxValueValidator(
180, 'El eje solo permite valores entre 0° y 180°')], blank=True, null=True, choices=axis_choices)
intermediate_av_right = models.CharField(
"Av. derecho intermedio", max_length=50, blank=True, null=True)
intermediate_dnp_right = models.DecimalField(
"Dnp. derecho intermedio", max_digits=3, decimal_places=1, blank=True, null=True, choices=dnp_choices)
intermediate_spherical_left = models.DecimalField(
"Esf. izquierdo intermedio", max_digits=4, decimal_places=2, blank=True, null=True, choices=spherical_choices)
intermediate_cylinder_left = models.DecimalField(
"Cil. izquierdo intermedio", max_digits=4, decimal_places=2, blank=True, null=True, choices=cylinder_choices)
intermediate_axis_left = models.PositiveSmallIntegerField("Eje izquierdo intermedio", validators=[MaxValueValidator(
180, 'El eje solo permite valores entre 0° y 180°')], blank=True, null=True, choices=axis_choices)
intermediate_av_left = models.CharField(
"Av. izquierdo intermedio", max_length=50, blank=True, null=True)
intermediate_dnp_left = models.DecimalField(
"Dnp. izquierdo intermedio", max_digits=3, decimal_places=1, blank=True, null=True, choices=dnp_choices)
near_spherical_right = models.DecimalField(
"Esf. derecho Cerca", max_digits=4, decimal_places=2, blank=True, null=True, choices=spherical_choices)
near_cylinder_right = models.DecimalField(
"Cil. derecho Cerca", max_digits=4, decimal_places=2, blank=True, null=True, choices=cylinder_choices)
near_axis_right = models.PositiveSmallIntegerField("Eje derecho Cerca", validators=[MaxValueValidator(
180, 'El eje solo permite valores entre 0° y 180°')], blank=True, null=True, choices=axis_choices)
near_av_right = models.CharField(
"Av. derecho Cerca", max_length=50, blank=True, null=True)
near_dnp_right = models.DecimalField(
"Dnp. derecho Cerca", max_digits=3, decimal_places=1, blank=True, null=True, choices=dnp_choices)
near_spherical_left = models.DecimalField(
"Esf. izquierdo Cerca", max_digits=4, decimal_places=2, blank=True, null=True, choices=spherical_choices)
near_cylinder_left = models.DecimalField(
"Cil. izquierdo Cerca", max_digits=4, decimal_places=2, blank=True, null=True, choices=cylinder_choices)
near_axis_left = models.PositiveSmallIntegerField("Eje izquierdo Cerca", validators=[MaxValueValidator(
180, 'El eje solo permite valores entre 0 y 180')], blank=True, null=True, choices=axis_choices)
near_av_left = models.CharField(
"Av. izquierdo Cerca", max_length=50, blank=True, null=True)
near_dnp_left = models.DecimalField(
"Dnp. izquierdo Cerca", max_digits=3, decimal_places=1, blank=True, null=True, choices=dnp_choices)
patient_notes = models.TextField("Notas para el paciente", blank=True)
laboratory_notes = models.TextField(
"Notas para el laboratorio", blank=True)
optic_notes = models.TextField("Notas para tu optica", blank=True)
intermediate_add = models.DecimalField(
"Add. intermedio", max_digits=4, decimal_places=2, blank=True, null=True, choices=add_choices)
near_add = models.DecimalField(
"Add. cerca", max_digits=4, decimal_places=2, blank=True, null=True, choices=add_choices)
diagnosis = models.CharField("Diagnostico", max_length=84, blank=True,
help_text="Diagnostico del paciente según las medidas")
measure_price = models.DecimalField('Precio de la medida', max_digits=10, decimal_places=2, default=0, validators=[
MinValueValidator(0, 'No se permite el valor ingresado')], blank=True, null=True)
crystals = models.ForeignKey(
Crystal, on_delete=models.SET_NULL, verbose_name="Lunas", blank=True, null=True)
crystals_cost = models.DecimalField('Costo de las lunas', max_digits=10, decimal_places=2, validators=[
MinValueValidator(0, 'No se permite el valor ingresado')], blank=True, null=True)
crystals_price = models.DecimalField('Precio de venta de las lunas', max_digits=10, decimal_places=2, validators=[
MinValueValidator(0, 'No se permite el valor ingresado')], blank=True, null=True)
frame = models.CharField("Descripcion de la montura",
max_length=120, null=True, blank=True)
frame_price = models.DecimalField('Precio de venta de la montura', max_digits=10, decimal_places=2, validators=[
MinValueValidator(0, 'No se permite el valor ingresado')], blank=True, null=True)
class Meta:
verbose_name = "Receta"
verbose_name_plural = "Recetas"
unique_together = ('optic', 'prescription_optic_id')
def __str__(self):
return f"""{self.patient}"""
# ODL:{self.far_spherical_right if self.far_spherical_right is not None else '?'}({self.far_cylinder_right if self.far_cylinder_right is not None else '?'}){self.far_axis_right if self.far_axis_right is not None else '?'}°
# OIL:{self.far_spherical_left if self.far_spherical_left is not None else '?'}({self.far_cylinder_left if self.far_cylinder_left is not None else '?'}){self.far_axis_left if self.far_axis_left is not None else '?'}°
# ODC:{self.near_spherical_right if self.near_spherical_right is not None else '?'}({self.near_cylinder_right if self.near_cylinder_right is not None else '?'}){self.near_axis_right if self.near_axis_right is not None else '?'}°
# OIC:{self.near_spherical_left if self.near_spherical_left is not None else '?'}({self.near_cylinder_left if self.near_cylinder_left is not None else '?'}){self.near_axis_left if self.near_axis_left is not None else '?'}°
def save(self, force_insert=False, force_update=False, using=None, update_fields=None):
if self._state.adding is True:
optic = OpticUser.objects.get(pk=self.optic.id)
last_prescription = optic.prescription_set.last()
if last_prescription:
prescription_optic_id = last_prescription.prescription_optic_id + 1
else:
prescription_optic_id = 1
self.prescription_optic_id = prescription_optic_id
near = self.has_near_table() or self.near_add is not None
intermediate = self.has_intermediate_table() or self.intermediate_add is not None
far = self.has_far_table()
if isOnlyOneTrue(near, intermediate, far):
self.prescription_type = Prescription.PrescriptionType.MONOFOCAL
elif near and intermediate and far:
self.prescription_type = Prescription.PrescriptionType.PROGRESSIVE
elif near and intermediate:
self.prescription_type = Prescription.PrescriptionType.OCCUPATIONAL
elif (near and far) or (intermediate and far):
self.prescription_type = Prescription.PrescriptionType.BIFOCAL
else:
self.prescription_type = None
super().save(force_insert=force_insert, force_update=force_update,
using=using, update_fields=update_fields)
def get_total(self):
if self.frame_price is None and self.crystals_price is None and self.measure_price is None:
return None
if self.frame_price is None:
frame_price = 0
else:
frame_price = self.frame_price
if self.crystals_price is None:
crystals_price = 0
else:
crystals_price = self.crystals_price
if self.measure_price is None:
measure_price = 0
else:
measure_price = self.measure_price
total = frame_price + crystals_price + measure_price
return total
def has_far_table(self):
"""
Comprueba si la prescripcion tiene tabla de lejos
"""
if (self.far_spherical_right is not None or self.far_cylinder_right is not None
or self.far_axis_right is not None or self.far_av_right is not None or
self.far_dnp_right is not None
or self.far_spherical_left is not None or self.far_cylinder_left is not None
or self.far_axis_left is not None or self.far_av_left is not None or
self.far_dnp_left is not None):
return True
return False
def has_intermediate_table(self):
"""
Comprueba si la repscripciones tiene tabla intermedia
"""
if (self.intermediate_spherical_right is not None or self.intermediate_cylinder_right is not None
or self.intermediate_axis_right is not None or self.intermediate_av_right is not None or
self.intermediate_dnp_right is not None
or self.intermediate_spherical_left is not None or self.intermediate_cylinder_left is not None
or self.intermediate_axis_left is not None or self.intermediate_av_left is not None or
self.intermediate_dnp_left is not None):
return True
return False
def has_near_table(self):
"""
Comprueba si la prescripcion tiene tabla de cerca
"""
if (self.near_spherical_right is not None or self.near_cylinder_right is not None
or self.near_axis_right is not None or self.near_av_right is not None or
self.near_dnp_right is not None
or self.near_spherical_left is not None or self.near_cylinder_left is not None
or self.near_axis_left is not None or self.near_av_left is not None or
self.near_dnp_left is not None):
return True
return False
|
the-stack_0_11065 | #!/usr/bin/env python
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import glob
import os
import sys
import ah_bootstrap
from setuptools import setup
# A dirty hack to get around some early import/configurations ambiguities
import builtins
builtins._ASTROPY_SETUP_ = True
from astropy_helpers.setup_helpers import (register_commands, get_debug_option,
get_package_info)
from astropy_helpers.git_helpers import get_git_devstr
from astropy_helpers.version_helpers import generate_version_py
# Get some values from the setup.cfg
from configparser import ConfigParser
conf = ConfigParser()
conf.read(['setup.cfg'])
metadata = dict(conf.items('metadata'))
PACKAGENAME = metadata.get('package_name', 'pyvo')
DESCRIPTION = metadata.get('description', 'Astropy affiliated package')
AUTHOR = metadata.get('author', '')
AUTHOR_EMAIL = metadata.get('author_email', '')
LICENSE = metadata.get('license', 'unknown')
URL = metadata.get('url', '')
# order of priority for long_description:
# (1) set in setup.cfg,
# (2) load LONG_DESCRIPTION.rst,
# (3) load README.rst,
# (4) package docstring
readme_glob = 'README*'
_cfg_long_description = metadata.get('long_description', '')
if _cfg_long_description:
LONG_DESCRIPTION = _cfg_long_description
elif os.path.exists('LONG_DESCRIPTION.rst'):
with open('LONG_DESCRIPTION.rst') as f:
LONG_DESCRIPTION = f.read()
elif len(glob.glob(readme_glob)) > 0:
with open(glob.glob(readme_glob)[0]) as f:
LONG_DESCRIPTION = f.read()
else:
# Get the long description from the package's docstring
__import__(PACKAGENAME)
package = sys.modules[PACKAGENAME]
LONG_DESCRIPTION = package.__doc__
# Store the package name in a built-in variable so it's easy
# to get from other parts of the setup infrastructure
builtins._ASTROPY_PACKAGE_NAME_ = PACKAGENAME
# VERSION should be PEP440 compatible (http://www.python.org/dev/peps/pep-0440)
VERSION = metadata.get('version', '0.0.dev')
# Indicates if this version is a release version
RELEASE = 'dev' not in VERSION
if not RELEASE:
VERSION += get_git_devstr(False)
# Populate the dict of setup command overrides; this should be done before
# invoking any other functionality from distutils since it can potentially
# modify distutils' behavior.
cmdclassd = register_commands(PACKAGENAME, VERSION, RELEASE)
# Freeze build information in version.py
generate_version_py(PACKAGENAME, VERSION, RELEASE,
get_debug_option(PACKAGENAME))
# Get configuration information from all of the various subpackages.
# See the docstring for setup_helpers.update_package_files for more
# details.
package_info = get_package_info()
# Add the project-global data
package_info['package_data'].setdefault(PACKAGENAME, [])
package_info['package_data'][PACKAGENAME].append('data/*')
# Define entry points for command-line scripts
entry_points = {'console_scripts': []}
entry_point_list = conf.items('entry_points')
for entry_point in entry_point_list:
entry_points['console_scripts'].append('{} = {}'.format(entry_point[0],
entry_point[1]))
# Include all .c files, recursively, including those generated by
# Cython, since we can not do this in MANIFEST.in with a "dynamic"
# directory name.
c_files = []
for root, dirs, files in os.walk(PACKAGENAME):
for filename in files:
if filename.endswith('.c'):
c_files.append(
os.path.join(
os.path.relpath(root, PACKAGENAME), filename))
package_info['package_data'][PACKAGENAME].extend(c_files)
# Note that requires and provides should not be included in the call to
# ``setup``, since these are now deprecated. See this link for more details:
# https://groups.google.com/forum/#!topic/astropy-dev/urYO8ckB2uM
setup(name=PACKAGENAME,
version=VERSION,
description=DESCRIPTION,
python_requires='>=3.7',
install_requires=metadata.get('install_requires', 'astropy').strip().split(),
author=AUTHOR,
author_email=AUTHOR_EMAIL,
license=LICENSE,
url=URL,
long_description=LONG_DESCRIPTION,
cmdclass=cmdclassd,
zip_safe=False,
use_2to3=False,
entry_points=entry_points,
**package_info
)
|
the-stack_0_11066 | from unittest.mock import patch
from django.core.management import call_command
from django.db.utils import OperationalError
from django.test import TestCase
class CommandsTestCase(TestCase):
def test_wait_for_db_ready(self):
"""Test waiting for db when db is available"""
with patch('django.db.utils.ConnectionHandler.__getitem__') as gi:
gi.return_value = True
call_command('wait_for_db')
self.assertEqual(gi.call_count, 1)
@patch('time.sleep', return_value=True)
def test_wait_for_db(self, ts):
"""Test waiting for db"""
with patch('django.db.utils.ConnectionHandler.__getitem__') as gi:
gi.side_effect = [OperationalError] * 5 + [True]
call_command('wait_for_db')
self.assertEqual(gi.call_count, 6)
|
the-stack_0_11068 | from PyTsetlinMachineCUDA.tm import MultiClassConvolutionalTsetlinMachine2D
import numpy as np
from time import time
from keras.datasets import mnist
(X_train, Y_train), (X_test, Y_test) = mnist.load_data()
X_train = np.where(X_train >= 75, 1, 0)
X_test = np.where(X_test >= 75, 1, 0)
tm = MultiClassConvolutionalTsetlinMachine2D(2000, 50*15, 5.0, (10, 10), max_weight=16)
print("\nAccuracy over 50 epochs:\n")
for i in range(50):
start_training = time()
tm.fit(X_train, Y_train, epochs=1, incremental=True)
stop_training = time()
start_testing = time()
result = 100*(tm.predict(X_test) == Y_test).mean()
stop_testing = time()
print("#%d Accuracy: %.2f%% Training: %.2fs Testing: %.2fs" % (i+1, result, stop_training-start_training, stop_testing-start_testing))
|
the-stack_0_11070 | # Copyright 2021 The SODA Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http:#www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest import TestCase, mock
from delfin import context
from delfin.drivers.dell_emc.vplex.rest_handler import RestHandler
from delfin.drivers.dell_emc.vplex.vplex_stor import VplexStorageDriver
ACCESS_INFO = {
"storage_id": "12345",
"vendor": "dell_emc",
"model": "vplex",
"rest": {
"host": "8.44.162.250",
"port": 443,
"username": "service",
"password": "Abcdef@123"
}
}
TRAP_INFO = {
"1.3.6.1.2.1.1.3.0": "0",
'1.3.6.1.6.3.1.1.4.1.0': '1.3.6.1.4.1.1139.21.0',
'1.3.6.1.4.1.1139.21.1.5.0': 'this is test',
'1.3.6.1.4.1.1139.21.1.3.0': '123321'
}
trap_result = {
'alert_id': '123321',
'alert_name': 'this is test',
'severity': 'Informational',
'category': 'Fault',
'type': 'EquipmentAlarm',
'occur_time': 1614067724000,
'description': 'this is test',
'resource_type': 'Storage',
'location': '',
'match_key': '8c6d115258631625b625486f81b09532'
}
GET_ALL_CLUSTER = {
"context": [{
"children": [{
"name": "cluster-1",
"type": "cluster"
}
]
}
]
}
GET_ALL_LUNS = {
"context": [
{
"children": [
{
"name": "device_VPLEX_LUN0_1_vol",
"type": "virtual-volume"
}
]
}
]
}
GET_LUN = {
"context": [
{
"attributes": [
{
"name": "capacity",
"value": "644245094400B"
},
{
"name": "health-state",
"value": "ok"
},
{
"name": "operational-status",
"value": "ok"
},
{
"name": "supporting-device",
"value": "device__VPLEX_LUN0_1"
},
{
"name": "thin-enabled",
"value": "unavailable"
},
{
"name": "vpd-id",
"value": "VPD83T3:60000000000000000000000000000000"
}
]
}
]
}
volume_result = [{
'name': 'device_VPLEX_LUN0_1_vol',
'storage_id': '12345',
'description': 'EMC VPlex volume',
'status': 'normal',
'native_volume_id': 'VPD83T3:60000000000000000000000000000000',
'native_storage_pool_id': 'device__VPLEX_LUN0_1',
'type': 'thick',
'total_capacity': 644245094400,
'used_capacity': 644245094400,
'free_capacity': 0,
'wwn': '60000000000000000000000000000000'
}
]
GET_ALL_POOLS = {
"context": [
{
"children": [
{
"name": "Device_KLM_test01",
"type": "local-device"
}
]
}
]
}
GET_POOL = {
"context": [
{
"attributes": [
{
"name": "capacity",
"value": "732212254720B"
},
{
"name": "health-state",
"value": "ok"
},
{
"name": "operational-status",
"value": "ok"
},
{
"name": "system-id",
"value": "Device_KLM_test01"
},
{
"name": "virtual-volume",
"value": "Volume_CLARiiON0041_KLM_test01"
}
]
}
]
}
pool_result = [
{
'name': 'Device_KLM_test01',
'storage_id': '12345',
'native_storage_pool_id': 'Device_KLM_test01',
'description': 'EMC VPlex Pool',
'status': 'normal',
'storage_type': 'block',
'total_capacity': 732212254720,
'used_capacity': 732212254720,
'free_capacity': 0
}
]
GET_HEALH_CHECK = {
"context": None,
"message": "health-check -l",
"exception": None,
"custom-data": "Product Version: 6.1.0.01.00.13\n"
"Product Type: Local\n"
}
GET_CLUSTER = {
"context": [
{
"type": "cluster",
"parent": "/clusters",
"attributes": [
{
"name": "health-state",
"value": "major-failure"
},
{
"name": "operational-status",
"value": "degraded"
},
{
"name": "top-level-assembly",
"value": "FNM00000000000"
}
],
}
]
}
storage_result = {
'name': 'cluster-1',
'vendor': 'DELL EMC',
'description': 'EMC VPlex Storage',
'status': 'abnormal',
'serial_number': 'FNM00000000000',
'firmware_version': ' 6.1.0.01.00.13',
'model': 'EMC VPLEX Local',
'location': '',
'raw_capacity': 12754334882201,
'total_capacity': 11654823254425,
'used_capacity': 8983009998929,
'free_capacity': 2671813255496,
'subscribed_capacity': 0
}
GET_ALL_STORAGE_VOLUME_SUMMARY = {
"custom-data": "Capacity total 11.6T\n\n"
}
GET_ALL_POOLS_SUMMARY = {
"custom-data": "total capacity 1.88T total capacity "
"8.68T total capacity 10.6T\n\n"
}
GET_ALL_LUNS_SUMMARY = {
"custom-data": "Total virtual-volume capacity is 8.17T."
}
class TestVplexStorDriver(TestCase):
RestHandler.login = mock.Mock(return_value=None)
def test_parse_alert(self):
trap = VplexStorageDriver(**ACCESS_INFO).parse_alert(context,
TRAP_INFO)
trap_result['occur_time'] = trap['occur_time']
self.assertDictEqual(trap, trap_result)
@mock.patch.object(RestHandler, 'get_cluster_resp')
@mock.patch.object(RestHandler, 'get_virtual_volume_resp')
@mock.patch.object(RestHandler, 'get_virtual_volume_by_name_resp')
def test_list_volumes(self, mock_name, mock_volume, mock_cluster):
mock_cluster.return_value = GET_ALL_CLUSTER
mock_volume.return_value = GET_ALL_LUNS
mock_name.return_value = GET_LUN
volume = VplexStorageDriver(**ACCESS_INFO).list_volumes(context)
self.assertDictEqual(volume[0], volume_result[0])
@mock.patch.object(RestHandler, 'get_cluster_resp')
@mock.patch.object(RestHandler, 'get_devcie_resp')
@mock.patch.object(RestHandler, 'get_device_by_name_resp')
def test_list_storage_pools(self, mock_name, mock_device, mock_cluster):
mock_cluster.return_value = GET_ALL_CLUSTER
mock_device.return_value = GET_ALL_POOLS
mock_name.return_value = GET_POOL
pool = VplexStorageDriver(**ACCESS_INFO).list_storage_pools(context)
self.assertDictEqual(pool[0], pool_result[0])
def test_get_storage(self):
RestHandler.get_rest_info = mock.Mock(
side_effect=[GET_HEALH_CHECK, GET_ALL_CLUSTER, GET_CLUSTER,
GET_ALL_STORAGE_VOLUME_SUMMARY, GET_ALL_POOLS_SUMMARY,
GET_ALL_LUNS_SUMMARY])
storage = VplexStorageDriver(**ACCESS_INFO).get_storage(context)
self.assertDictEqual(storage, storage_result)
def test_list_alerts(self):
with self.assertRaises(Exception) as exc:
VplexStorageDriver(**ACCESS_INFO).list_alerts(context)
self.assertEqual('list_alerts is not supported in model VPLEX',
str(exc.exception))
|
the-stack_0_11072 | import ConfigParser
import logging
import os
import re
from galaxy import util
from galaxy import web
from galaxy.web.form_builder import build_select_field
from galaxy.webapps.tool_shed.model import directory_hash_id
from tool_shed.dependencies.repository import relation_builder
from tool_shed.util import common_util
from tool_shed.util import hg_util
from tool_shed.util import shed_util_common as suc
log = logging.getLogger( __name__ )
VALID_REPOSITORYNAME_RE = re.compile( "^[a-z0-9\_]+$" )
def build_allow_push_select_field( trans, current_push_list, selected_value='none' ):
options = []
for user in trans.sa_session.query( trans.model.User ):
if user.username not in current_push_list:
options.append( user )
return build_select_field( trans,
objs=options,
label_attr='username',
select_field_name='allow_push',
selected_value=selected_value,
refresh_on_change=False,
multiple=True )
def change_repository_name_in_hgrc_file( hgrc_file, new_name ):
config = ConfigParser.ConfigParser()
config.read( hgrc_file )
config.read( hgrc_file )
config.set( 'web', 'name', new_name )
new_file = open( hgrc_file, 'wb' )
config.write( new_file )
new_file.close()
def check_or_update_tool_shed_status_for_installed_repository( app, repository ):
updated = False
tool_shed_status_dict = suc.get_tool_shed_status_for_installed_repository( app, repository )
if tool_shed_status_dict:
ok = True
if tool_shed_status_dict != repository.tool_shed_status:
repository.tool_shed_status = tool_shed_status_dict
app.install_model.context.add( repository )
app.install_model.context.flush()
updated = True
else:
ok = False
return ok, updated
def create_repo_info_dict( app, repository_clone_url, changeset_revision, ctx_rev, repository_owner, repository_name=None,
repository=None, repository_metadata=None, tool_dependencies=None, repository_dependencies=None ):
"""
Return a dictionary that includes all of the information needed to install a repository into a local
Galaxy instance. The dictionary will also contain the recursive list of repository dependencies defined
for the repository, as well as the defined tool dependencies.
This method is called from Galaxy under four scenarios:
1. During the tool shed repository installation process via the tool shed's get_repository_information()
method. In this case both the received repository and repository_metadata will be objects, but
tool_dependencies and repository_dependencies will be None.
2. When getting updates for an installed repository where the updates include newly defined repository
dependency definitions. This scenario is similar to 1. above. The tool shed's get_repository_information()
method is the caller, and both the received repository and repository_metadata will be objects, but
tool_dependencies and repository_dependencies will be None.
3. When a tool shed repository that was uninstalled from a Galaxy instance is being reinstalled with no
updates available. In this case, both repository and repository_metadata will be None, but tool_dependencies
and repository_dependencies will be objects previously retrieved from the tool shed if the repository includes
definitions for them.
4. When a tool shed repository that was uninstalled from a Galaxy instance is being reinstalled with updates
available. In this case, this method is reached via the tool shed's get_updated_repository_information()
method, and both repository and repository_metadata will be objects but tool_dependencies and
repository_dependencies will be None.
"""
repo_info_dict = {}
repository = suc.get_repository_by_name_and_owner( app, repository_name, repository_owner )
if app.name == 'tool_shed':
# We're in the tool shed.
repository_metadata = suc.get_repository_metadata_by_changeset_revision( app,
app.security.encode_id( repository.id ),
changeset_revision )
if repository_metadata:
metadata = repository_metadata.metadata
if metadata:
tool_shed_url = str( web.url_for( '/', qualified=True ) ).rstrip( '/' )
rb = relation_builder.RelationBuilder( app, repository, repository_metadata, tool_shed_url )
# Get a dictionary of all repositories upon which the contents of the received repository depends.
repository_dependencies = rb.get_repository_dependencies_for_changeset_revision()
tool_dependencies = metadata.get( 'tool_dependencies', {} )
if tool_dependencies:
new_tool_dependencies = {}
for dependency_key, requirements_dict in tool_dependencies.items():
if dependency_key in [ 'set_environment' ]:
new_set_environment_dict_list = []
for set_environment_dict in requirements_dict:
set_environment_dict[ 'repository_name' ] = repository_name
set_environment_dict[ 'repository_owner' ] = repository_owner
set_environment_dict[ 'changeset_revision' ] = changeset_revision
new_set_environment_dict_list.append( set_environment_dict )
new_tool_dependencies[ dependency_key ] = new_set_environment_dict_list
else:
requirements_dict[ 'repository_name' ] = repository_name
requirements_dict[ 'repository_owner' ] = repository_owner
requirements_dict[ 'changeset_revision' ] = changeset_revision
new_tool_dependencies[ dependency_key ] = requirements_dict
tool_dependencies = new_tool_dependencies
# Cast unicode to string, with the exception of description, since it is free text and can contain special characters.
repo_info_dict[ str( repository.name ) ] = ( repository.description,
str( repository_clone_url ),
str( changeset_revision ),
str( ctx_rev ),
str( repository_owner ),
repository_dependencies,
tool_dependencies )
return repo_info_dict
def create_repository( app, name, type, description, long_description, user_id, category_ids=[] ):
sa_session = app.model.context.current
# Add the repository record to the database.
repository = app.model.Repository( name=name,
type=type,
description=description,
long_description=long_description,
user_id=user_id )
# Flush to get the id.
sa_session.add( repository )
sa_session.flush()
# Create an admin role for the repository.
repository_admin_role = create_repository_admin_role( app, repository )
# Determine the repository's repo_path on disk.
dir = os.path.join( app.config.file_path, *directory_hash_id( repository.id ) )
# Create directory if it does not exist.
if not os.path.exists( dir ):
os.makedirs( dir )
# Define repo name inside hashed directory.
repository_path = os.path.join( dir, "repo_%d" % repository.id )
# Create local repository directory.
if not os.path.exists( repository_path ):
os.makedirs( repository_path )
# Create the local repository.
repo = hg_util.get_repo_for_repository( app, repository=None, repo_path=repository_path, create=True )
# Add an entry in the hgweb.config file for the local repository.
lhs = "repos/%s/%s" % ( repository.user.username, repository.name )
app.hgweb_config_manager.add_entry( lhs, repository_path )
# Create a .hg/hgrc file for the local repository.
hg_util.create_hgrc_file( app, repository )
flush_needed = False
if category_ids:
# Create category associations
for category_id in category_ids:
category = sa_session.query( app.model.Category ) \
.get( app.security.decode_id( category_id ) )
rca = app.model.RepositoryCategoryAssociation( repository, category )
sa_session.add( rca )
flush_needed = True
if flush_needed:
sa_session.flush()
# Update the repository registry.
app.repository_registry.add_entry( repository )
message = "Repository <b>%s</b> has been created." % str( repository.name )
return repository, message
def create_repository_admin_role( app, repository ):
"""
Create a new role with name-spaced name based on the repository name and its owner's public user
name. This will ensure that the tole name is unique.
"""
sa_session = app.model.context.current
name = get_repository_admin_role_name( str( repository.name ), str( repository.user.username ) )
description = 'A user or group member with this role can administer this repository.'
role = app.model.Role( name=name, description=description, type=app.model.Role.types.SYSTEM )
sa_session.add( role )
sa_session.flush()
# Associate the role with the repository owner.
ura = app.model.UserRoleAssociation( repository.user, role )
# Associate the role with the repository.
rra = app.model.RepositoryRoleAssociation( repository, role )
sa_session.add( rra )
sa_session.flush()
return role
def get_installed_tool_shed_repository( app, id ):
"""Get a tool shed repository record from the Galaxy database defined by the id."""
return app.install_model.context.query( app.install_model.ToolShedRepository ) \
.get( app.security.decode_id( id ) )
def get_repo_info_dict( app, user, repository_id, changeset_revision ):
repository = suc.get_repository_in_tool_shed( app, repository_id )
repo = hg_util.get_repo_for_repository( app, repository=repository, repo_path=None, create=False )
repository_clone_url = common_util.generate_clone_url_for_repository_in_tool_shed( user, repository )
repository_metadata = suc.get_repository_metadata_by_changeset_revision( app,
repository_id,
changeset_revision )
if not repository_metadata:
# The received changeset_revision is no longer installable, so get the next changeset_revision
# in the repository's changelog. This generally occurs only with repositories of type
# repository_suite_definition or tool_dependency_definition.
next_downloadable_changeset_revision = \
suc.get_next_downloadable_changeset_revision( repository, repo, changeset_revision )
if next_downloadable_changeset_revision:
repository_metadata = suc.get_repository_metadata_by_changeset_revision( app,
repository_id,
next_downloadable_changeset_revision )
if repository_metadata:
# For now, we'll always assume that we'll get repository_metadata, but if we discover our assumption
# is not valid we'll have to enhance the callers to handle repository_metadata values of None in the
# returned repo_info_dict.
metadata = repository_metadata.metadata
if 'tools' in metadata:
includes_tools = True
else:
includes_tools = False
includes_tools_for_display_in_tool_panel = repository_metadata.includes_tools_for_display_in_tool_panel
repository_dependencies_dict = metadata.get( 'repository_dependencies', {} )
repository_dependencies = repository_dependencies_dict.get( 'repository_dependencies', [] )
has_repository_dependencies, has_repository_dependencies_only_if_compiling_contained_td = \
suc.get_repository_dependency_types( repository_dependencies )
if 'tool_dependencies' in metadata:
includes_tool_dependencies = True
else:
includes_tool_dependencies = False
else:
# Here's where we may have to handle enhancements to the callers. See above comment.
includes_tools = False
has_repository_dependencies = False
has_repository_dependencies_only_if_compiling_contained_td = False
includes_tool_dependencies = False
includes_tools_for_display_in_tool_panel = False
ctx = hg_util.get_changectx_for_changeset( repo, changeset_revision )
repo_info_dict = create_repo_info_dict( app=app,
repository_clone_url=repository_clone_url,
changeset_revision=changeset_revision,
ctx_rev=str( ctx.rev() ),
repository_owner=repository.user.username,
repository_name=repository.name,
repository=repository,
repository_metadata=repository_metadata,
tool_dependencies=None,
repository_dependencies=None )
return repo_info_dict, includes_tools, includes_tool_dependencies, includes_tools_for_display_in_tool_panel, \
has_repository_dependencies, has_repository_dependencies_only_if_compiling_contained_td
def get_repository_admin_role_name( repository_name, repository_owner ):
return '%s_%s_admin' % ( str( repository_name ), str( repository_owner ) )
def get_role_by_id( app, role_id ):
"""Get a Role from the database by id."""
sa_session = app.model.context.current
return sa_session.query( app.model.Role ).get( app.security.decode_id( role_id ) )
def handle_role_associations( app, role, repository, **kwd ):
sa_session = app.model.context.current
message = kwd.get( 'message', '' )
status = kwd.get( 'status', 'done' )
repository_owner = repository.user
if kwd.get( 'manage_role_associations_button', False ):
in_users_list = util.listify( kwd.get( 'in_users', [] ) )
in_users = [ sa_session.query( app.model.User ).get( x ) for x in in_users_list ]
# Make sure the repository owner is always associated with the repostory's admin role.
owner_associated = False
for user in in_users:
if user.id == repository_owner.id:
owner_associated = True
break
if not owner_associated:
in_users.append( repository_owner )
message += "The repository owner must always be associated with the repository's administrator role. "
status = 'error'
in_groups_list = util.listify( kwd.get( 'in_groups', [] ) )
in_groups = [ sa_session.query( app.model.Group ).get( x ) for x in in_groups_list ]
in_repositories = [ repository ]
app.security_agent.set_entity_role_associations( roles=[ role ],
users=in_users,
groups=in_groups,
repositories=in_repositories )
sa_session.refresh( role )
message += "Role <b>%s</b> has been associated with %d users, %d groups and %d repositories. " % \
( str( role.name ), len( in_users ), len( in_groups ), len( in_repositories ) )
in_users = []
out_users = []
in_groups = []
out_groups = []
for user in sa_session.query( app.model.User ) \
.filter( app.model.User.table.c.deleted==False ) \
.order_by( app.model.User.table.c.email ):
if user in [ x.user for x in role.users ]:
in_users.append( ( user.id, user.email ) )
else:
out_users.append( ( user.id, user.email ) )
for group in sa_session.query( app.model.Group ) \
.filter( app.model.Group.table.c.deleted==False ) \
.order_by( app.model.Group.table.c.name ):
if group in [ x.group for x in role.groups ]:
in_groups.append( ( group.id, group.name ) )
else:
out_groups.append( ( group.id, group.name ) )
associations_dict = dict( in_users=in_users,
out_users=out_users,
in_groups=in_groups,
out_groups=out_groups,
message=message,
status=status )
return associations_dict
def validate_repository_name( app, name, user ):
# Repository names must be unique for each user, must be at least four characters
# in length and must contain only lower-case letters, numbers, and the '_' character.
if name in [ 'None', None, '' ]:
return 'Enter the required repository name.'
if name in [ 'repos' ]:
return "The term <b>%s</b> is a reserved word in the tool shed, so it cannot be used as a repository name." % name
check_existing = suc.get_repository_by_name_and_owner( app, name, user.username )
if check_existing is not None:
if check_existing.deleted:
return 'You have a deleted repository named <b>%s</b>, so choose a different name.' % name
else:
return "You already have a repository named <b>%s</b>, so choose a different name." % name
if len( name ) < 4:
return "Repository names must be at least 4 characters in length."
if len( name ) > 80:
return "Repository names cannot be more than 80 characters in length."
if not( VALID_REPOSITORYNAME_RE.match( name ) ):
return "Repository names must contain only lower-case letters, numbers and underscore <b>_</b>."
return ''
|
the-stack_0_11073 | import unittest
import os
import sys
import os.path as path
import numpy as np
import scipy
# Path to where the bindings live
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "src")))
if os.name == 'nt': # if Windows
# handle default location where VS puts binary
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "build", "Debug")))
else:
# normal / unix case
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "build")))
import potpourri3d as pp3d
asset_path = os.path.abspath(os.path.dirname(__file__))
def generate_verts(n_pts=999):
np.random.seed(777)
return np.random.rand(n_pts, 3)
def generate_faces(n_pts=999):
# n_pts should be a multiple of 3 for indexing to work out
np.random.seed(777)
rand_faces = np.random.randint(0, n_pts, size=(2*n_pts,3))
coverage_faces = np.arange(n_pts).reshape(-1, 3)
faces = np.vstack((rand_faces, coverage_faces))
return faces
def is_symmetric(A, eps=1e-6):
resid = A - A.T
return np.all(np.abs(resid.data) < eps)
def is_nonnegative(A, eps=1e-6):
return np.all(A.data > -eps)
class TestCore(unittest.TestCase):
def test_write_read_mesh(self):
for ext in ['obj']:
V = generate_verts()
F = generate_faces()
fname = "test." + ext
# write
pp3d.write_mesh(V,F,fname)
Vnew, Fnew = pp3d.read_mesh(fname)
self.assertLess(np.amax(np.abs(V-Vnew)), 1e-6)
self.assertTrue((F==Fnew).all())
def test_write_read_point_cloud(self):
for ext in ['obj', 'ply']:
V = generate_verts()
fname = "test_cloud." + ext
# write
pp3d.write_point_cloud(V, fname)
Vnew = pp3d.read_point_cloud(fname)
self.assertLess(np.amax(np.abs(V-Vnew)), 1e-6)
# self.assertTrue(is_nonnegative(off_L)) # positive edge weights
# self.assertGreater(L.sum(), -1e-5)
# self.assertEqual(M.sum(), M.diagonal().sum())
def test_mesh_heat_distance(self):
V = generate_verts()
F = generate_faces()
# Test stateful version
solver = pp3d.MeshHeatMethodDistanceSolver(V,F)
dist = solver.compute_distance(7)
self.assertEqual(dist.shape[0], V.shape[0])
dist = solver.compute_distance_multisource([1,2,3])
self.assertEqual(dist.shape[0], V.shape[0])
# = Test one-off versions
dist = pp3d.compute_distance(V,F,7)
self.assertEqual(dist.shape[0], V.shape[0])
dist = pp3d.compute_distance_multisource(V,F,[1,3,4])
self.assertEqual(dist.shape[0], V.shape[0])
def test_mesh_vector_heat(self):
V, F = pp3d.read_mesh(os.path.join(asset_path, "bunny_small.ply"))
solver = pp3d.MeshVectorHeatSolver(V,F)
# Scalar extension
ext = solver.extend_scalar([1, 22], [0., 6.])
self.assertEqual(ext.shape[0], V.shape[0])
self.assertGreaterEqual(np.amin(ext), 0.)
# Get frames
basisX, basisY, basisN = solver.get_tangent_frames()
self.assertEqual(basisX.shape[0], V.shape[0])
self.assertEqual(basisY.shape[0], V.shape[0])
self.assertEqual(basisN.shape[0], V.shape[0])
# TODO could check orthogonal
# Vector heat (transport vector)
ext = solver.transport_tangent_vector(1, [6., 6.])
self.assertEqual(ext.shape[0], V.shape[0])
self.assertEqual(ext.shape[1], 2)
ext = solver.transport_tangent_vectors([1, 22], [[6., 6.], [3., 4.]])
self.assertEqual(ext.shape[0], V.shape[0])
self.assertEqual(ext.shape[1], 2)
# Vector heat (log map)
logmap = solver.compute_log_map(1)
self.assertEqual(logmap.shape[0], V.shape[0])
self.assertEqual(logmap.shape[1], 2)
def test_mesh_cotan_laplace(self):
V, F = pp3d.read_mesh(os.path.join(asset_path, "bunny_small.ply"))
L = pp3d.cotan_laplacian(V,F)
self.assertEqual(L.shape[0],V.shape[0])
self.assertEqual(L.shape[1],V.shape[0])
self.assertLess(np.abs(np.sum(L)), 1e-6)
def test_mesh_areas(self):
V, F = pp3d.read_mesh(os.path.join(asset_path, "bunny_small.ply"))
face_area = pp3d.face_areas(V,F)
self.assertEqual(face_area.shape[0],F.shape[0])
self.assertTrue(np.all(face_area >= 0))
vert_area = pp3d.vertex_areas(V,F)
self.assertLess(np.abs(np.sum(face_area) - np.sum(vert_area)), 1e-6)
def test_mesh_flip_geodesic(self):
V, F = pp3d.read_mesh(os.path.join(asset_path, "bunny_small.ply"))
# Test stateful version
path_solver = pp3d.EdgeFlipGeodesicSolver(V,F)
# Do a first path
path_pts = path_solver.find_geodesic_path(v_start=14, v_end=22)
self.assertEqual(len(path_pts.shape), 2)
self.assertEqual(path_pts.shape[1], 3)
# Do some more
for i in range(5):
path_pts = path_solver.find_geodesic_path(v_start=14, v_end=22+i)
self.assertEqual(len(path_pts.shape), 2)
self.assertEqual(path_pts.shape[1], 3)
# Initialize with a compound path
path_pts = path_solver.find_geodesic_path_poly([1173, 148, 870, 898])
self.assertEqual(len(path_pts.shape), 2)
self.assertEqual(path_pts.shape[1], 3)
# Do a loop
loop_pts = path_solver.find_geodesic_loop([1173, 148, 870, 898])
self.assertEqual(len(loop_pts.shape), 2)
self.assertEqual(loop_pts.shape[1], 3)
# Do another loop
# this one contracts to a point
loop_pts = path_solver.find_geodesic_loop([307, 757, 190])
self.assertEqual(len(loop_pts.shape), 2)
self.assertEqual(loop_pts.shape[1], 3)
def test_point_cloud_distance(self):
P = generate_verts()
solver = pp3d.PointCloudHeatSolver(P)
dist = solver.compute_distance(7)
self.assertEqual(dist.shape[0], P.shape[0])
dist = solver.compute_distance_multisource([1,2,3])
self.assertEqual(dist.shape[0], P.shape[0])
def test_point_cloud_vector_heat(self):
P = generate_verts()
solver = pp3d.PointCloudHeatSolver(P)
# Scalar extension
ext = solver.extend_scalar([1, 22], [0., 6.])
self.assertEqual(ext.shape[0], P.shape[0])
self.assertGreaterEqual(np.amin(ext), 0.)
# Get frames
basisX, basisY, basisN = solver.get_tangent_frames()
self.assertEqual(basisX.shape[0], P.shape[0])
self.assertEqual(basisY.shape[0], P.shape[0])
self.assertEqual(basisN.shape[0], P.shape[0])
# TODO could check orthogonal
# Vector heat (transport vector)
ext = solver.transport_tangent_vector(1, [6., 6.])
self.assertEqual(ext.shape[0], P.shape[0])
self.assertEqual(ext.shape[1], 2)
ext = solver.transport_tangent_vectors([1, 22], [[6., 6.], [3., 4.]])
self.assertEqual(ext.shape[0], P.shape[0])
self.assertEqual(ext.shape[1], 2)
# Vector heat (log map)
logmap = solver.compute_log_map(1)
self.assertEqual(logmap.shape[0], P.shape[0])
self.assertEqual(logmap.shape[1], 2)
if __name__ == '__main__':
unittest.main()
|
the-stack_0_11074 | #!/usr/bin/env python3
import io
import os
import requests
# Imports the Google Cloud client library
from google.cloud import speech
from google.cloud.speech import enums
from google.cloud.speech import types
# Gazebo
# prefix = "http://10.16.103.133:8080/"
prefix = "http://10.16.104.100:8080/"
# prefix = "http://turtle1.athenian.org:8080/"
def move_robot(direction):
try:
return requests.get(prefix + direction)
except BaseException as e:
print(e)
def main():
# Instantiates a client
client = speech.SpeechClient()
while (True):
input("Hit return to give command")
# os.system("say 'speak'")
os.system("rec --rate 16k --channels=1 test.flac trim 0 1.5")
# The name of the audio file to transcribe
file_name = os.path.join(os.path.dirname(__file__) + '/test.flac')
# Loads the audio into memory
with io.open(file_name, 'rb') as audio_file:
content = audio_file.read()
audio = types.RecognitionAudio(content=content)
config = types.RecognitionConfig(
encoding=enums.RecognitionConfig.AudioEncoding.FLAC,
sample_rate_hertz=16000,
language_code='en-US')
# Detects speech in the audio file
response = client.recognize(config, audio)
for result in response.results:
translation = result.alternatives[0].transcript
print('Transcript: {}'.format(translation))
print('Confidence: {}'.format(result.alternatives[0].confidence))
if ("left" in translation):
print("Send left command")
resp = move_robot('left')
elif ("right" in translation):
print("Send right command")
resp = move_robot('right')
elif ("forward" in translation):
print("Send forward command")
resp = move_robot('forward')
elif ("back" in translation):
print("Send backward command")
resp = move_robot('backward')
elif ("stop" in translation):
print("Send stop command")
resp = move_robot('stop')
if __name__ == "__main__":
main()
|
the-stack_0_11075 | # -*- coding: utf-8 -*-
"""
Created on Mon Dec 21 21:38:29 2020
@author: oxenb
"""
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from mne_features.feature_extraction import FeatureExtractor
from sklearn.pipeline import Pipeline
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.model_selection import ShuffleSplit, cross_val_score
from sklearn.multiclass import OneVsRestClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import (GridSearchCV, cross_val_score,
StratifiedKFold)
from mne import Epochs, pick_types, events_from_annotations
import mne
from mne.decoding import (SlidingEstimator, GeneralizingEstimator, Scaler,
cross_val_multiscore, LinearModel, get_coef,
Vectorizer, CSP)
DATA_PATH = "data/"
EXP_NAME = DATA_PATH+"Or_3_raw.fif" ## file name to run the anaylsis on
features = ['app_entropy', 'decorr_time', 'higuchi_fd',
'hjorth_complexity', 'hjorth_complexity_spect', 'hjorth_mobility',
'hjorth_mobility_spect', 'hurst_exp', 'katz_fd', 'kurtosis',
'line_length', 'mean', 'ptp_amp', 'samp_entropy',
'skewness', 'spect_edge_freq', 'spect_entropy', 'spect_slope',
'std', 'svd_entropy', 'svd_fisher_info', 'teager_kaiser_energy',
'variance', 'wavelet_coef_energy', 'zero_crossings', 'max_cross_corr',
'nonlin_interdep', 'phase_lock_val', 'spect_corr', 'time_corr']
selected_features = ["std","mean","kurtosis","skewness"] # can be cgahnged to any feature
def preprocess():
tmin, tmax = -1., 0.8 #: need to check the best
raw = mne.io.read_raw_fif(EXP_NAME, preload=True)
raw.filter(5., 40., fir_design='firwin', skip_by_annotation='edge')
events = mne.find_events(raw, 'STI')
picks = pick_types(raw.info, meg=False, eeg=True, stim=False, eog=False,
exclude='bads')
event_id = {'Left': 1, 'right': 2,'none': 3}
epochs = Epochs(raw, events, event_id, tmin, tmax, proj=True, picks=picks,
baseline=None, preload=True)
epochs.pick_types(eeg=True, exclude='bads') # remove stim and EOG
return epochs,raw
def train_mne_feature(data,labels,raw):
pipe = Pipeline([('fe', FeatureExtractor(sfreq = raw.info['sfreq'],
selected_funcs = selected_features)),
('scaler', StandardScaler()),
('clf', GradientBoostingClassifier())])
y = labels
# params_grid = {'fe__app_entropy__emb': np.arange(2, 5)} #: can addd gradinet boost hyperparametrs
params_grid = {} #: can addd gradinet boost hyperparametrs
gs = GridSearchCV(estimator=pipe, param_grid=params_grid,
cv=StratifiedKFold(n_splits=5, random_state=42), n_jobs=1,
return_train_score=True)
gs.fit(data, y)
scores = pd.DataFrame(gs.cv_results_)
print(scores[['params', 'mean_test_score', 'mean_train_score']])
# Best parameters obtained with GridSearchCV:
print(gs.best_params_)
#: run the best model maybe need to create test seprate dataset
# gs_best = gs.best_estimator_
# new_scores = cross_val_score(gs_best, data, y, cv=skf)
# print('Cross-validation accuracy score (with optimized parameters) = %1.3f '
# '(+/- %1.5f)' % (np.mean(new_scores), np.std(new_scores)))
return pipe
def main():
epochs,raw = preprocess()
labels = epochs.events[:, -1]
# get MEG and EEG data
epochs_data_train = epochs.get_data()
pipe = train_mne_feature(epochs_data_train,labels,raw)
transformed_data = pipe["fe"].fit_transform(epochs_data_train) #: transformed_data is matrix dim by the featuhers X events
return pipe,epochs_data_train
if __name__ == '__main__':
pipe,epochs_data_train = main()
'''
['app_entropy', 'decorr_time', 'energy_freq_bands', 'higuchi_fd',
'hjorth_complexity', 'hjorth_complexity_spect', 'hjorth_mobility'
'hjorth_mobility_spect', 'hurst_exp', 'katz_fd', 'kurtosis', 'line_length',
'mean', 'pow_freq_bands', 'ptp_amp', 'samp_entropy', 'skewness',
'spect_edge_freq', 'spect_entropy', 'spect_slope', 'std', 'svd_entropy',
'svd_fisher_info', 'teager_kaiser_energy', 'variance', 'wavelet_coef_energy',
'zero_crossings', 'max_cross_corr', 'nonlin_interdep', 'phase_lock_val',
'spect_corr', 'time_corr']
'''
|
the-stack_0_11078 | '''
Uses [[https://github.com/fabianonline/telegram_backup#readme][telegram_backup]] database for messages data
'''
from pathlib import Path
from typing import Optional, Union, TypeVar
from urllib.parse import unquote # TODO mm, make it easier to rememember to use...
from ..common import PathIsh, Visit, get_logger, Loc, extract_urls, from_epoch, Results, echain
# TODO potentially, belongs to my. package
# TODO kython?
T = TypeVar('T')
def unwrap(res: Union[T, Exception]) -> T:
if isinstance(res, Exception):
raise res
else:
return res
# TODO move to common?
def dataset_readonly(db: Path):
import dataset # type: ignore
# see https://github.com/pudo/dataset/issues/136#issuecomment-128693122
import sqlite3
creator = lambda: sqlite3.connect(f'file:{db}?immutable=1', uri=True)
return dataset.connect('sqlite:///' , engine_kwargs={'creator': creator})
def index(database: PathIsh) -> Results:
logger = get_logger()
path = Path(database)
assert path.is_file(), path
# TODO context manager?
db = dataset_readonly(path) # TODO could check is_file inside
def make_query(text_query: str):
return f"""
WITH entities AS (
SELECT 'dialog' as type, id, coalesce(username, id) as handle, coalesce(first_name || " " || last_name, username, id) as display_name FROM users
UNION
SELECT 'group' as type, id, id as handle , coalesce(name, id) as display_name FROM chats
)
SELECT src.display_name AS chatname
, src.handle AS chat
, snd.display_name AS sender
, M.time AS time
, {text_query} AS text
, M.id AS mid
FROM messages AS M
/* chat types are 'dialog' (1-1), 'group' and 'supergroup' */
/* this is abit hacky way to handle all groups in one go */
LEFT JOIN entities AS src ON M.source_id = src.id AND src.type = (CASE M.source_type WHEN 'supergroup' THEN 'group' ELSE M.source_type END)
LEFT JOIN entities AS snd ON M.sender_id = snd.id AND snd.type = 'dialog'
WHERE
M.message_type NOT IN ('service_message', 'empty_message')
/* used to do this, but doesn't really give much of a speedup */
/* AND (M.has_media == 1 OR (text LIKE '%http%')) */
ORDER BY time;
""".strip()
# TODO yield error if chatname or chat or smth else is null?
for row in db.query(make_query('M.text')):
try:
yield from _handle_row(row)
except Exception as ex:
yield echain(RuntimeError(f'While handling {row}'), ex)
# , None, sys.exc_info()[2]
# TODO hmm. traceback isn't preserved; wonder if that's because it's too heavy to attach to every single exception object..
# old (also 'stable') version doesn't have 'json' column yet...
if 'json' in db['messages'].columns:
for row in db.query(make_query("json_extract(json, '$.media.webpage.description')")):
try:
yield from _handle_row(row)
except Exception as ex:
yield echain(RuntimeError(f'While handling {row}'), ex)
def _handle_row(row) -> Results:
text = row['text']
if text is None:
return
urls = extract_urls(text)
if len(urls) == 0:
return
dt = from_epoch(row['time'])
mid: str = unwrap(row['mid'])
# TODO perhaps we could be defensive with null sender/chat etc and still emit the Visit
sender: str = unwrap(row['sender'])
chatname: str = unwrap(row['chatname'])
chat: str = unwrap(row['chat'])
in_context = f'https://t.me/{chat}/{mid}'
for u in urls:
# https://www.reddit.com/r/Telegram/comments/6ufwi3/link_to_a_specific_message_in_a_channel_possible/
# hmm, only seems to work on mobile app, but better than nothing...
yield Visit(
url=unquote(u),
dt=dt,
context=f"{sender}: {text}",
locator=Loc.make(
title=f"chat with {chatname}",
href=in_context,
),
)
|
the-stack_0_11079 | """
Policy rules class
"""
from typing import Union, List, Dict
from marshmallow import Schema, fields, post_load
from .conditions.attribute.base import validate_path
from .conditions.schema import ConditionSchema
from ..context import EvaluationContext
class Rules(object):
"""
Policy rules
"""
def __init__(
self,
subject: Union[List, Dict],
resource: Union[List, Dict],
action: Union[List, Dict],
context: Union[List, Dict]
):
self.subject = subject
self.resource = resource
self.action = action
self.context = context
def is_satisfied(self, ctx: EvaluationContext):
"""
Check if request satisfies all conditions
:param ctx: policy evaluation context
:return: True if satisfied else False
"""
return self._is_satisfied("subject", self.subject, ctx) and \
self._is_satisfied("resource", self.resource, ctx) and \
self._is_satisfied("action", self.action, ctx) and \
self._is_satisfied("context", self.context, ctx)
def _is_satisfied(self, ace_name: str, ace_conditions, ctx: EvaluationContext):
"""
Check if the access control element satisfies request
:param ace_name: access control element name
:param ace_conditions: access control element conditions
:param ctx: policy evaluation context
:return: True if satisfied else False
"""
if isinstance(ace_conditions, list):
return self._implicit_or(ace_name, ace_conditions, ctx)
if isinstance(ace_conditions, dict):
return self._implicit_and(ace_name, ace_conditions, ctx)
# If ace is not in correct format, return False. This condition is just for best
# practice and will never happen
return False # pragma: no cover
def _implicit_or(self, ace_name: str, ace_conditions: list, ctx: EvaluationContext):
for _ace_conditions in ace_conditions:
# If even one of the conditions is satisfied, return True
if self._implicit_and(ace_name, _ace_conditions, ctx):
return True
# If no conditions are satisfied, return False
return False
@staticmethod
def _implicit_and(ace_name: str, ace_conditions: dict, ctx: EvaluationContext):
for attribute_path, condition in ace_conditions.items():
ctx.ace = ace_name
ctx.attribute_path = attribute_path
# If even one of the conditions is not satisfied, return False
if not condition.is_satisfied(ctx):
return False
# If all conditions are satisfied, return True
return True
class RuleField(fields.Field):
"""
Marshmallow field class for rules
"""
_implicit_and_field = fields.Dict(
keys=fields.String(validate=validate_path),
values=fields.Nested(ConditionSchema)
)
_implicit_or_field = fields.List(
fields.Dict(
keys=fields.String(validate=validate_path),
values=fields.Nested(ConditionSchema)
)
)
def _serialize(self, value, attr, obj, **kwargs):
if isinstance(value, list):
return self._implicit_or_field._serialize(value, attr, obj, **kwargs) # pylint: disable=protected-access
return self._implicit_and_field._serialize(value, attr, obj, **kwargs) # pylint: disable=protected-access
def _deserialize(self, value, attr, data, **kwargs):
if isinstance(value, list):
return self._implicit_or_field.deserialize(value, attr, data, **kwargs) # pylint: disable=protected-access
return self._implicit_and_field.deserialize(value, attr, data, **kwargs) # pylint: disable=protected-access
class RulesSchema(Schema):
"""
JSON schema for rules
"""
subject = RuleField(default={}, missing={})
resource = RuleField(default={}, missing={})
action = RuleField(default={}, missing={})
context = RuleField(default={}, missing={})
@post_load
def post_load(self, data, **_): # pylint: disable=missing-docstring,no-self-use
return Rules(**data)
|
the-stack_0_11080 | """coBib parser test class."""
import pytest
from cobib.config import config
from .. import get_resource
class ParserTest:
"""The base class for coBib's parser test classes."""
EXAMPLE_BIBTEX_FILE = get_resource("example_entry.bib")
"""Path to the example BibTeX file."""
EXAMPLE_YAML_FILE = get_resource("example_entry.yaml")
"""Path to the example YAML file (matching the BibTeX file)."""
EXAMPLE_ENTRY_DICT = {
"ENTRYTYPE": "article",
"author": "Yudong Cao and Jonathan Romero and Jonathan P. Olson and Matthias Degroote and "
+ "Peter D. Johnson and M{\\'a}ria Kieferov{\\'a} and Ian D. Kivlichan and Tim Menke and "
+ "Borja Peropadre and Nicolas P. D. Sawaya and Sukin Sim and Libor Veis and Al{\\'a}n "
+ "Aspuru-Guzik",
"doi": "10.1021/acs.chemrev.8b00803",
"journal": "Chemical Reviews",
"month": "aug",
"number": 19,
"pages": "10856--10915",
"publisher": "American Chemical Society ({ACS})",
"title": "Quantum Chemistry in the Age of Quantum Computing",
"url": ["https://doi.org/10.1021%2Facs.chemrev.8b00803"],
"volume": 119,
"year": 2019,
}
"""The matching dictionary to the example files also included here."""
@pytest.fixture(autouse=True)
def setup(self) -> None:
# pylint: disable=no-self-use
"""Setup."""
config.defaults()
|
the-stack_0_11081 | """
Module contains tools for collecting data from various remote sources
"""
import warnings
import tempfile
import datetime as dt
import time
from collections import defaultdict
import numpy as np
from pandas.compat import(
StringIO, bytes_to_str, range, lmap, zip
)
import pandas.compat as compat
from pandas import Panel, DataFrame, Series, read_csv, concat, to_datetime, DatetimeIndex, DateOffset
from pandas.core.common import is_list_like, PandasError
from pandas.io.common import urlopen, ZipFile, urlencode
from pandas.tseries.offsets import MonthEnd
from pandas.util.testing import _network_error_classes
from pandas.io.html import read_html
warnings.warn("\n"
"The pandas.io.data module is moved to a separate package "
"(pandas-datareader) and will be removed from pandas in a "
"future version.\nAfter installing the pandas-datareader package "
"(https://github.com/pydata/pandas-datareader), you can change "
"the import ``from pandas.io import data, wb`` to "
"``from pandas_datareader import data, wb``.",
FutureWarning)
class SymbolWarning(UserWarning):
pass
class RemoteDataError(PandasError, IOError):
pass
def DataReader(name, data_source=None, start=None, end=None,
retry_count=3, pause=0.001):
"""
Imports data from a number of online sources.
Currently supports Yahoo! Finance, Google Finance, St. Louis FED (FRED)
and Kenneth French's data library.
Parameters
----------
name : str or list of strs
the name of the dataset. Some data sources (yahoo, google, fred) will
accept a list of names.
data_source: str
the data source ("yahoo", "google", "fred", or "ff")
start : {datetime, None}
left boundary for range (defaults to 1/1/2010)
end : {datetime, None}
right boundary for range (defaults to today)
Examples
----------
# Data from Yahoo! Finance
gs = DataReader("GS", "yahoo")
# Data from Google Finance
aapl = DataReader("AAPL", "google")
# Data from FRED
vix = DataReader("VIXCLS", "fred")
# Data from Fama/French
ff = DataReader("F-F_Research_Data_Factors", "famafrench")
ff = DataReader("F-F_Research_Data_Factors_weekly", "famafrench")
ff = DataReader("6_Portfolios_2x3", "famafrench")
ff = DataReader("F-F_ST_Reversal_Factor", "famafrench")
"""
start, end = _sanitize_dates(start, end)
if data_source == "yahoo":
return get_data_yahoo(symbols=name, start=start, end=end,
adjust_price=False, chunksize=25,
retry_count=retry_count, pause=pause)
elif data_source == "google":
return get_data_google(symbols=name, start=start, end=end,
adjust_price=False, chunksize=25,
retry_count=retry_count, pause=pause)
elif data_source == "fred":
return get_data_fred(name, start, end)
elif data_source == "famafrench":
return get_data_famafrench(name)
def _sanitize_dates(start, end):
from pandas.core.datetools import to_datetime
start = to_datetime(start)
end = to_datetime(end)
if start is None:
start = dt.datetime(2010, 1, 1)
if end is None:
end = dt.datetime.today()
return start, end
def _in_chunks(seq, size):
"""
Return sequence in 'chunks' of size defined by size
"""
return (seq[pos:pos + size] for pos in range(0, len(seq), size))
_yahoo_codes = {'symbol': 's', 'last': 'l1', 'change_pct': 'p2', 'PE': 'r',
'time': 't1', 'short_ratio': 's7'}
_YAHOO_QUOTE_URL = 'http://finance.yahoo.com/d/quotes.csv?'
def get_quote_yahoo(symbols):
"""
Get current yahoo quote
Returns a DataFrame
"""
if isinstance(symbols, compat.string_types):
sym_list = symbols
else:
sym_list = '+'.join(symbols)
# for codes see: http://www.gummy-stuff.org/Yahoo-data.htm
request = ''.join(compat.itervalues(_yahoo_codes)) # code request string
header = list(_yahoo_codes.keys())
data = defaultdict(list)
url_str = _YAHOO_QUOTE_URL + 's=%s&f=%s' % (sym_list, request)
with urlopen(url_str) as url:
lines = url.readlines()
for line in lines:
fields = line.decode('utf-8').strip().split(',')
for i, field in enumerate(fields):
if field[-2:] == '%"':
v = float(field.strip('"%'))
elif field[0] == '"':
v = field.strip('"')
else:
try:
v = float(field)
except ValueError:
v = field
data[header[i]].append(v)
idx = data.pop('symbol')
return DataFrame(data, index=idx)
def get_quote_google(symbols):
raise NotImplementedError("Google Finance doesn't have this functionality")
def _retry_read_url(url, retry_count, pause, name):
for _ in range(retry_count):
time.sleep(pause)
# kludge to close the socket ASAP
try:
with urlopen(url) as resp:
lines = resp.read()
except _network_error_classes:
pass
else:
rs = read_csv(StringIO(bytes_to_str(lines)), index_col=0,
parse_dates=True, na_values='-')[::-1]
# Yahoo! Finance sometimes does this awesome thing where they
# return 2 rows for the most recent business day
if len(rs) > 2 and rs.index[-1] == rs.index[-2]: # pragma: no cover
rs = rs[:-1]
#Get rid of unicode characters in index name.
try:
rs.index.name = rs.index.name.decode('unicode_escape').encode('ascii', 'ignore')
except AttributeError:
#Python 3 string has no decode method.
rs.index.name = rs.index.name.encode('ascii', 'ignore').decode()
return rs
raise IOError("after %d tries, %s did not "
"return a 200 for url %r" % (retry_count, name, url))
_HISTORICAL_YAHOO_URL = 'http://ichart.finance.yahoo.com/table.csv?'
def _get_hist_yahoo(sym, start, end, interval, retry_count, pause):
"""
Get historical data for the given name from yahoo.
Date format is datetime
Returns a DataFrame.
"""
start, end = _sanitize_dates(start, end)
url = (_HISTORICAL_YAHOO_URL + 's=%s' % sym +
'&a=%s' % (start.month - 1) +
'&b=%s' % start.day +
'&c=%s' % start.year +
'&d=%s' % (end.month - 1) +
'&e=%s' % end.day +
'&f=%s' % end.year +
'&g=%s' % interval +
'&ignore=.csv')
return _retry_read_url(url, retry_count, pause, 'Yahoo!')
_HISTORICAL_GOOGLE_URL = 'http://www.google.com/finance/historical?'
def _get_hist_google(sym, start, end, interval, retry_count, pause):
"""
Get historical data for the given name from google.
Date format is datetime
Returns a DataFrame.
"""
start, end = _sanitize_dates(start, end)
# www.google.com/finance/historical?q=GOOG&startdate=Jun+9%2C+2011&enddate=Jun+8%2C+2013&output=csv
url = "%s%s" % (_HISTORICAL_GOOGLE_URL,
urlencode({"q": sym,
"startdate": start.strftime('%b %d, ' '%Y'),
"enddate": end.strftime('%b %d, %Y'),
"output": "csv"}))
return _retry_read_url(url, retry_count, pause, 'Google')
def _adjust_prices(hist_data, price_list=None):
"""
Return modifed DataFrame or Panel with adjusted prices based on
'Adj Close' price. Adds 'Adj_Ratio' column.
"""
if price_list is None:
price_list = 'Open', 'High', 'Low', 'Close'
adj_ratio = hist_data['Adj Close'] / hist_data['Close']
data = hist_data.copy()
for item in price_list:
data[item] = hist_data[item] * adj_ratio
data['Adj_Ratio'] = adj_ratio
del data['Adj Close']
return data
def _calc_return_index(price_df):
"""
Return a returns index from a input price df or series. Initial value
(typically NaN) is set to 1.
"""
df = price_df.pct_change().add(1).cumprod()
mask = df.ix[1].notnull() & df.ix[0].isnull()
df.ix[0][mask] = 1
# Check for first stock listings after starting date of index in ret_index
# If True, find first_valid_index and set previous entry to 1.
if (~mask).any():
for sym in mask.index[~mask]:
tstamp = df[sym].first_valid_index()
t_idx = df.index.get_loc(tstamp) - 1
df[sym].ix[t_idx] = 1
return df
_YAHOO_COMPONENTS_URL = 'http://download.finance.yahoo.com/d/quotes.csv?'
def get_components_yahoo(idx_sym):
"""
Returns DataFrame containing list of component information for
index represented in idx_sym from yahoo. Includes component symbol
(ticker), exchange, and name.
Parameters
----------
idx_sym : str
Stock index symbol
Examples:
'^DJI' (Dow Jones Industrial Average)
'^NYA' (NYSE Composite)
'^IXIC' (NASDAQ Composite)
See: http://finance.yahoo.com/indices for other index symbols
Returns
-------
idx_df : DataFrame
"""
stats = 'snx'
# URL of form:
# http://download.finance.yahoo.com/d/quotes.csv?s=@%5EIXIC&f=snxl1d1t1c1ohgv
url = _YAHOO_COMPONENTS_URL + 's={0}&f={1}&e=.csv&h={2}'
idx_mod = idx_sym.replace('^', '@%5E')
url_str = url.format(idx_mod, stats, 1)
idx_df = DataFrame()
mask = [True]
comp_idx = 1
# LOOP across component index structure,
# break when no new components are found
while True in mask:
url_str = url.format(idx_mod, stats, comp_idx)
with urlopen(url_str) as resp:
raw = resp.read()
lines = raw.decode('utf-8').strip().strip('"').split('"\r\n"')
lines = [line.strip().split('","') for line in lines]
temp_df = DataFrame(lines, columns=['ticker', 'name', 'exchange'])
temp_df = temp_df.drop_duplicates()
temp_df = temp_df.set_index('ticker')
mask = ~temp_df.index.isin(idx_df.index)
comp_idx = comp_idx + 50
idx_df = idx_df.append(temp_df[mask])
return idx_df
def _dl_mult_symbols(symbols, start, end, interval, chunksize, retry_count, pause,
method):
stocks = {}
failed = []
passed = []
for sym_group in _in_chunks(symbols, chunksize):
for sym in sym_group:
try:
stocks[sym] = method(sym, start, end, interval, retry_count, pause)
passed.append(sym)
except IOError:
warnings.warn('Failed to read symbol: {0!r}, replacing with '
'NaN.'.format(sym), SymbolWarning)
failed.append(sym)
if len(passed) == 0:
raise RemoteDataError("No data fetched using "
"{0!r}".format(method.__name__))
try:
if len(stocks) > 0 and len(failed) > 0 and len(passed) > 0:
df_na = stocks[passed[0]].copy()
df_na[:] = np.nan
for sym in failed:
stocks[sym] = df_na
return Panel(stocks).swapaxes('items', 'minor')
except AttributeError:
# cannot construct a panel with just 1D nans indicating no data
raise RemoteDataError("No data fetched using "
"{0!r}".format(method.__name__))
_source_functions = {'google': _get_hist_google, 'yahoo': _get_hist_yahoo}
def _get_data_from(symbols, start, end, interval, retry_count, pause, adjust_price,
ret_index, chunksize, source):
src_fn = _source_functions[source]
# If a single symbol, (e.g., 'GOOG')
if isinstance(symbols, (compat.string_types, int)):
hist_data = src_fn(symbols, start, end, interval, retry_count, pause)
# Or multiple symbols, (e.g., ['GOOG', 'AAPL', 'MSFT'])
elif isinstance(symbols, DataFrame):
hist_data = _dl_mult_symbols(symbols.index, start, end, interval, chunksize,
retry_count, pause, src_fn)
else:
hist_data = _dl_mult_symbols(symbols, start, end, interval, chunksize,
retry_count, pause, src_fn)
if source.lower() == 'yahoo':
if ret_index:
hist_data['Ret_Index'] = _calc_return_index(hist_data['Adj Close'])
if adjust_price:
hist_data = _adjust_prices(hist_data)
return hist_data
def get_data_yahoo(symbols=None, start=None, end=None, retry_count=3,
pause=0.001, adjust_price=False, ret_index=False,
chunksize=25, interval='d'):
"""
Returns DataFrame/Panel of historical stock prices from symbols, over date
range, start to end. To avoid being penalized by Yahoo! Finance servers,
pauses between downloading 'chunks' of symbols can be specified.
Parameters
----------
symbols : string, array-like object (list, tuple, Series), or DataFrame
Single stock symbol (ticker), array-like object of symbols or
DataFrame with index containing stock symbols.
start : string, (defaults to '1/1/2010')
Starting date, timestamp. Parses many different kind of date
representations (e.g., 'JAN-01-2010', '1/1/10', 'Jan, 1, 1980')
end : string, (defaults to today)
Ending date, timestamp. Same format as starting date.
retry_count : int, default 3
Number of times to retry query request.
pause : int, default 0
Time, in seconds, to pause between consecutive queries of chunks. If
single value given for symbol, represents the pause between retries.
adjust_price : bool, default False
If True, adjusts all prices in hist_data ('Open', 'High', 'Low',
'Close') based on 'Adj Close' price. Adds 'Adj_Ratio' column and drops
'Adj Close'.
ret_index : bool, default False
If True, includes a simple return index 'Ret_Index' in hist_data.
chunksize : int, default 25
Number of symbols to download consecutively before intiating pause.
interval : string, default 'd'
Time interval code, valid values are 'd' for daily, 'w' for weekly,
'm' for monthly and 'v' for dividend.
Returns
-------
hist_data : DataFrame (str) or Panel (array-like object, DataFrame)
"""
if interval not in ['d', 'w', 'm', 'v']:
raise ValueError("Invalid interval: valid values are 'd', 'w', 'm' and 'v'")
return _get_data_from(symbols, start, end, interval, retry_count, pause,
adjust_price, ret_index, chunksize, 'yahoo')
def get_data_google(symbols=None, start=None, end=None, retry_count=3,
pause=0.001, adjust_price=False, ret_index=False,
chunksize=25):
"""
Returns DataFrame/Panel of historical stock prices from symbols, over date
range, start to end. To avoid being penalized by Google Finance servers,
pauses between downloading 'chunks' of symbols can be specified.
Parameters
----------
symbols : string, array-like object (list, tuple, Series), or DataFrame
Single stock symbol (ticker), array-like object of symbols or
DataFrame with index containing stock symbols.
start : string, (defaults to '1/1/2010')
Starting date, timestamp. Parses many different kind of date
representations (e.g., 'JAN-01-2010', '1/1/10', 'Jan, 1, 1980')
end : string, (defaults to today)
Ending date, timestamp. Same format as starting date.
retry_count : int, default 3
Number of times to retry query request.
pause : int, default 0
Time, in seconds, to pause between consecutive queries of chunks. If
single value given for symbol, represents the pause between retries.
chunksize : int, default 25
Number of symbols to download consecutively before intiating pause.
Returns
-------
hist_data : DataFrame (str) or Panel (array-like object, DataFrame)
"""
return _get_data_from(symbols, start, end, None, retry_count, pause,
adjust_price, ret_index, chunksize, 'google')
_FRED_URL = "http://research.stlouisfed.org/fred2/series/"
def get_data_fred(name, start=dt.datetime(2010, 1, 1),
end=dt.datetime.today()):
"""
Get data for the given name from the St. Louis FED (FRED).
Date format is datetime
Returns a DataFrame.
If multiple names are passed for "series" then the index of the
DataFrame is the outer join of the indicies of each series.
"""
start, end = _sanitize_dates(start, end)
if not is_list_like(name):
names = [name]
else:
names = name
urls = [_FRED_URL + '%s' % n + '/downloaddata/%s' % n + '.csv' for
n in names]
def fetch_data(url, name):
with urlopen(url) as resp:
data = read_csv(resp, index_col=0, parse_dates=True,
header=None, skiprows=1, names=["DATE", name],
na_values='.')
try:
return data.truncate(start, end)
except KeyError:
if data.ix[3].name[7:12] == 'Error':
raise IOError("Failed to get the data. Check that {0!r} is "
"a valid FRED series.".format(name))
raise
df = concat([fetch_data(url, n) for url, n in zip(urls, names)],
axis=1, join='outer')
return df
_FAMAFRENCH_URL = 'http://mba.tuck.dartmouth.edu/pages/faculty/ken.french/ftp'
def get_data_famafrench(name):
# path of zip files
zip_file_path = '{0}/{1}_TXT.zip'.format(_FAMAFRENCH_URL, name)
with urlopen(zip_file_path) as url:
raw = url.read()
with tempfile.TemporaryFile() as tmpf:
tmpf.write(raw)
with ZipFile(tmpf, 'r') as zf:
data = zf.open(zf.namelist()[0]).readlines()
line_lengths = np.array(lmap(len, data))
file_edges = np.where(line_lengths == 2)[0]
datasets = {}
edges = zip(file_edges + 1, file_edges[1:])
for i, (left_edge, right_edge) in enumerate(edges):
dataset = [d.split() for d in data[left_edge:right_edge]]
if len(dataset) > 10:
ncol_raw = np.array(lmap(len, dataset))
ncol = np.median(ncol_raw)
header_index = np.where(ncol_raw == ncol - 1)[0][-1]
header = dataset[header_index]
ds_header = dataset[header_index + 1:]
# to ensure the header is unique
header = ['{0} {1}'.format(j, hj) for j, hj in enumerate(header,
start=1)]
index = np.array([d[0] for d in ds_header], dtype=int)
dataset = np.array([d[1:] for d in ds_header], dtype=float)
datasets[i] = DataFrame(dataset, index, columns=header)
return datasets
# Items needed for options class
CUR_MONTH = dt.datetime.now().month
CUR_YEAR = dt.datetime.now().year
CUR_DAY = dt.datetime.now().day
def _two_char(s):
return '{0:0>2}'.format(s)
class Options(object):
"""
***Experimental***
This class fetches call/put data for a given stock/expiry month.
It is instantiated with a string representing the ticker symbol.
The class has the following methods:
get_options_data:(month, year, expiry)
get_call_data:(month, year, expiry)
get_put_data: (month, year, expiry)
get_near_stock_price(opt_frame, above_below)
get_all_data(call, put)
get_forward_data(months, call, put) (deprecated)
Examples
--------
# Instantiate object with ticker
>>> aapl = Options('aapl', 'yahoo')
# Fetch next expiry call data
>>> calls = aapl.get_call_data()
# Can now access aapl.calls instance variable
>>> aapl.calls
# Fetch next expiry put data
>>> puts = aapl.get_put_data()
# Can now access aapl.puts instance variable
>>> aapl.puts
# cut down the call data to be 3 below and 3 above the stock price.
>>> cut_calls = aapl.get_near_stock_price(call=True, above_below=3)
# Fetch call and put data with expiry from now to 8 months out
>>> forward_data = aapl.get_forward_data(8, call=True, put=True)
# Fetch all call and put data
>>> all_data = aapl.get_all_data()
"""
_TABLE_LOC = {'calls': 1, 'puts': 2}
_OPTIONS_BASE_URL = 'http://finance.yahoo.com/q/op?s={sym}'
_FINANCE_BASE_URL = 'http://finance.yahoo.com'
def __init__(self, symbol, data_source=None):
""" Instantiates options_data with a ticker saved as symbol """
self.symbol = symbol.upper()
if data_source is None:
warnings.warn("Options(symbol) is deprecated, use Options(symbol,"
" data_source) instead", FutureWarning, stacklevel=2)
data_source = "yahoo"
if data_source != "yahoo":
raise NotImplementedError("currently only yahoo supported")
def get_options_data(self, month=None, year=None, expiry=None):
"""
***Experimental***
Gets call/put data for the stock with the expiration data in the
given month and year
Parameters
----------
month : number, int, optional(default=None)
The month the options expire. This should be either 1 or 2
digits.
year : number, int, optional(default=None)
The year the options expire. This should be a 4 digit int.
expiry : date-like or convertible or list-like object, optional (default=None)
The date (or dates) when options expire (defaults to current month)
Returns
-------
pandas.DataFrame
A DataFrame with requested options data.
Index:
Strike: Option strike, int
Expiry: Option expiry, Timestamp
Type: Call or Put, string
Symbol: Option symbol as reported on Yahoo, string
Columns:
Last: Last option price, float
Chg: Change from prior day, float
Bid: Bid price, float
Ask: Ask price, float
Vol: Volume traded, int64
Open_Int: Open interest, int64
IsNonstandard: True if the the deliverable is not 100 shares, otherwise false
Underlying: Ticker of the underlying security, string
Underlying_Price: Price of the underlying security, float64
Quote_Time: Time of the quote, Timestamp
Notes
-----
Note: Format of returned data frame is dependent on Yahoo and may change.
When called, this function will add instance variables named
calls and puts. See the following example:
>>> aapl = Options('aapl', 'yahoo') # Create object
>>> aapl.calls # will give an AttributeError
>>> aapl.get_options() # Get data and set ivars
>>> aapl.calls # Doesn't throw AttributeError
Also note that aapl.calls and appl.puts will always be the calls
and puts for the next expiry. If the user calls this method with
a different expiry, the ivar will be named callsYYMMDD or putsYYMMDD,
where YY, MM and DD are, respectively, two digit representations of
the year, month and day for the expiry of the options.
"""
return concat([f(month, year, expiry)
for f in (self.get_put_data,
self.get_call_data)]).sortlevel()
def _get_option_frames_from_yahoo(self, expiry):
url = self._yahoo_url_from_expiry(expiry)
option_frames = self._option_frames_from_url(url)
frame_name = '_frames' + self._expiry_to_string(expiry)
setattr(self, frame_name, option_frames)
return option_frames
@staticmethod
def _expiry_to_string(expiry):
m1 = _two_char(expiry.month)
d1 = _two_char(expiry.day)
return str(expiry.year)[-2:] + m1 + d1
def _yahoo_url_from_expiry(self, expiry):
try:
expiry_links = self._expiry_links
except AttributeError:
_, expiry_links = self._get_expiry_dates_and_links()
return self._FINANCE_BASE_URL + expiry_links[expiry]
def _option_frames_from_url(self, url):
frames = read_html(url)
nframes = len(frames)
frames_req = max(self._TABLE_LOC.values())
if nframes < frames_req:
raise RemoteDataError("%s options tables found (%s expected)" % (nframes, frames_req))
if not hasattr(self, 'underlying_price'):
try:
self.underlying_price, self.quote_time = self._underlying_price_and_time_from_url(url)
except IndexError:
self.underlying_price, self.quote_time = np.nan, np.nan
calls = frames[self._TABLE_LOC['calls']]
puts = frames[self._TABLE_LOC['puts']]
calls = self._process_data(calls, 'call')
puts = self._process_data(puts, 'put')
return {'calls': calls, 'puts': puts}
def _underlying_price_and_time_from_url(self, url):
root = self._parse_url(url)
underlying_price = self._underlying_price_from_root(root)
quote_time = self._quote_time_from_root(root)
return underlying_price, quote_time
@staticmethod
def _underlying_price_from_root(root):
underlying_price = root.xpath('.//*[@class="time_rtq_ticker Fz-30 Fw-b"]')[0]\
.getchildren()[0].text
underlying_price = underlying_price.replace(',', '') #GH11
try:
underlying_price = float(underlying_price)
except ValueError:
underlying_price = np.nan
return underlying_price
@staticmethod
def _quote_time_from_root(root):
#Gets the time of the quote, note this is actually the time of the underlying price.
try:
quote_time_text = root.xpath('.//*[@class="time_rtq Fz-m"]')[0].getchildren()[1].getchildren()[0].text
##TODO: Enable timezone matching when strptime can match EST with %Z
quote_time_text = quote_time_text.split(' ')[0]
quote_time = dt.datetime.strptime(quote_time_text, "%I:%M%p")
quote_time = quote_time.replace(year=CUR_YEAR, month=CUR_MONTH, day=CUR_DAY)
except ValueError:
quote_time = np.nan
return quote_time
def _get_option_data(self, expiry, name):
frame_name = '_frames' + self._expiry_to_string(expiry)
try:
frames = getattr(self, frame_name)
except AttributeError:
frames = self._get_option_frames_from_yahoo(expiry)
option_data = frames[name]
if expiry != self.expiry_dates[0]:
name += self._expiry_to_string(expiry)
setattr(self, name, option_data)
return option_data
def get_call_data(self, month=None, year=None, expiry=None):
"""
***Experimental***
Gets call/put data for the stock with the expiration data in the
given month and year
Parameters
----------
month : number, int, optional(default=None)
The month the options expire. This should be either 1 or 2
digits.
year : number, int, optional(default=None)
The year the options expire. This should be a 4 digit int.
expiry : date-like or convertible or list-like object, optional (default=None)
The date (or dates) when options expire (defaults to current month)
Returns
-------
call_data: pandas.DataFrame
A DataFrame with requested options data.
Index:
Strike: Option strike, int
Expiry: Option expiry, Timestamp
Type: Call or Put, string
Symbol: Option symbol as reported on Yahoo, string
Columns:
Last: Last option price, float
Chg: Change from prior day, float
Bid: Bid price, float
Ask: Ask price, float
Vol: Volume traded, int64
Open_Int: Open interest, int64
IsNonstandard: True if the the deliverable is not 100 shares, otherwise false
Underlying: Ticker of the underlying security, string
Underlying_Price: Price of the underlying security, float64
Quote_Time: Time of the quote, Timestamp
Notes
-----
Note: Format of returned data frame is dependent on Yahoo and may change.
When called, this function will add instance variables named
calls and puts. See the following example:
>>> aapl = Options('aapl', 'yahoo') # Create object
>>> aapl.calls # will give an AttributeError
>>> aapl.get_call_data() # Get data and set ivars
>>> aapl.calls # Doesn't throw AttributeError
Also note that aapl.calls will always be the calls for the next
expiry. If the user calls this method with a different month
or year, the ivar will be named callsYYMMDD where YY, MM and DD are,
respectively, two digit representations of the year, month and day
for the expiry of the options.
"""
expiry = self._try_parse_dates(year, month, expiry)
return self._get_data_in_date_range(expiry, call=True, put=False)
def get_put_data(self, month=None, year=None, expiry=None):
"""
***Experimental***
Gets put data for the stock with the expiration data in the
given month and year
Parameters
----------
month : number, int, optional(default=None)
The month the options expire. This should be either 1 or 2
digits.
year : number, int, optional(default=None)
The year the options expire. This should be a 4 digit int.
expiry : date-like or convertible or list-like object, optional (default=None)
The date (or dates) when options expire (defaults to current month)
Returns
-------
put_data: pandas.DataFrame
A DataFrame with requested options data.
Index:
Strike: Option strike, int
Expiry: Option expiry, Timestamp
Type: Call or Put, string
Symbol: Option symbol as reported on Yahoo, string
Columns:
Last: Last option price, float
Chg: Change from prior day, float
Bid: Bid price, float
Ask: Ask price, float
Vol: Volume traded, int64
Open_Int: Open interest, int64
IsNonstandard: True if the the deliverable is not 100 shares, otherwise false
Underlying: Ticker of the underlying security, string
Underlying_Price: Price of the underlying security, float64
Quote_Time: Time of the quote, Timestamp
Notes
-----
Note: Format of returned data frame is dependent on Yahoo and may change.
When called, this function will add instance variables named
puts. See the following example:
>>> aapl = Options('aapl') # Create object
>>> aapl.puts # will give an AttributeError
>>> aapl.get_put_data() # Get data and set ivars
>>> aapl.puts # Doesn't throw AttributeError
return self.__setattr__(self, str(str(x) + str(y)))
Also note that aapl.puts will always be the puts for the next
expiry. If the user calls this method with a different month
or year, the ivar will be named putsYYMMDD where YY, MM and DD are,
respectively, two digit representations of the year, month and day
for the expiry of the options.
"""
expiry = self._try_parse_dates(year, month, expiry)
return self._get_data_in_date_range(expiry, put=True, call=False)
def get_near_stock_price(self, above_below=2, call=True, put=False,
month=None, year=None, expiry=None):
"""
***Experimental***
Returns a data frame of options that are near the current stock price.
Parameters
----------
above_below : number, int, optional (default=2)
The number of strike prices above and below the stock price that
should be taken
call : bool
Tells the function whether or not it should be using calls
put : bool
Tells the function weather or not it should be using puts
month : number, int, optional(default=None)
The month the options expire. This should be either 1 or 2
digits.
year : number, int, optional(default=None)
The year the options expire. This should be a 4 digit int.
expiry : date-like or convertible or list-like object, optional (default=None)
The date (or dates) when options expire (defaults to current month)
Returns
-------
chopped: DataFrame
The resultant DataFrame chopped down to be 2 * above_below + 1 rows
desired. If there isn't data as far out as the user has asked for
then
Note: Format of returned data frame is dependent on Yahoo and may change.
"""
expiry = self._try_parse_dates(year, month, expiry)
data = self._get_data_in_date_range(expiry, call=call, put=put)
return self.chop_data(data, above_below, self.underlying_price)
def chop_data(self, df, above_below=2, underlying_price=None):
"""Returns a data frame only options that are near the current stock price."""
if not underlying_price:
try:
underlying_price = self.underlying_price
except AttributeError:
underlying_price = np.nan
max_strike = max(df.index.get_level_values('Strike'))
min_strike = min(df.index.get_level_values('Strike'))
if not np.isnan(underlying_price) and min_strike < underlying_price < max_strike:
start_index = np.where(df.index.get_level_values('Strike')
> underlying_price)[0][0]
get_range = slice(start_index - above_below,
start_index + above_below + 1)
df = df[get_range].dropna(how='all')
return df
def _try_parse_dates(self, year, month, expiry):
"""
Validates dates provided by user. Ensures the user either provided both a month and a year or an expiry.
Parameters
----------
year : int
Calendar year
month : int
Calendar month
expiry : date-like or convertible, (preferred)
Expiry date
Returns
-------
list of expiry dates (datetime.date)
"""
#Checks if the user gave one of the month or the year but not both and did not provide an expiry:
if (month is not None and year is None) or (month is None and year is not None) and expiry is None:
msg = "You must specify either (`year` and `month`) or `expiry` " \
"or none of these options for the next expiry."
raise ValueError(msg)
if expiry is not None:
if hasattr(expiry, '__iter__'):
expiry = [self._validate_expiry(exp) for exp in expiry]
else:
expiry = [self._validate_expiry(expiry)]
if len(expiry) == 0:
raise ValueError('No expiries available for given input.')
elif year is None and month is None:
#No arguments passed, provide next expiry
year = CUR_YEAR
month = CUR_MONTH
expiry = dt.date(year, month, 1)
expiry = [self._validate_expiry(expiry)]
else:
#Year and month passed, provide all expiries in that month
expiry = [expiry for expiry in self.expiry_dates if expiry.year == year and expiry.month == month]
if len(expiry) == 0:
raise ValueError('No expiries available in %s-%s' % (year, month))
return expiry
def _validate_expiry(self, expiry):
"""Ensures that an expiry date has data available on Yahoo
If the expiry date does not have options that expire on that day, return next expiry"""
expiry_dates = self.expiry_dates
expiry = to_datetime(expiry)
if hasattr(expiry, 'date'):
expiry = expiry.date()
if expiry in expiry_dates:
return expiry
else:
index = DatetimeIndex(expiry_dates).order()
return index[index.date >= expiry][0].date()
def get_forward_data(self, months, call=True, put=False, near=False,
above_below=2):
"""
***Experimental***
Gets either call, put, or both data for months starting in the current
month and going out in the future a specified amount of time.
Parameters
----------
months : number, int
How many months to go out in the collection of the data. This is
inclusive.
call : bool, optional (default=True)
Whether or not to collect data for call options
put : bool, optional (default=False)
Whether or not to collect data for put options.
near : bool, optional (default=False)
Whether this function should get only the data near the
current stock price. Uses Options.get_near_stock_price
above_below : number, int, optional (default=2)
The number of strike prices above and below the stock price that
should be taken if the near option is set to True
Returns
-------
pandas.DataFrame
A DataFrame with requested options data.
Index:
Strike: Option strike, int
Expiry: Option expiry, Timestamp
Type: Call or Put, string
Symbol: Option symbol as reported on Yahoo, string
Columns:
Last: Last option price, float
Chg: Change from prior day, float
Bid: Bid price, float
Ask: Ask price, float
Vol: Volume traded, int64
Open_Int: Open interest, int64
IsNonstandard: True if the the deliverable is not 100 shares, otherwise false
Underlying: Ticker of the underlying security, string
Underlying_Price: Price of the underlying security, float64
Quote_Time: Time of the quote, Timestamp
Note: Format of returned data frame is dependent on Yahoo and may change.
"""
warnings.warn("get_forward_data() is deprecated", FutureWarning,
stacklevel=2)
end_date = dt.date.today() + MonthEnd(months)
dates = (date for date in self.expiry_dates if date <= end_date.date())
data = self._get_data_in_date_range(dates, call=call, put=put)
if near:
data = self.chop_data(data, above_below=above_below)
return data
def get_all_data(self, call=True, put=True):
"""
***Experimental***
Gets either call, put, or both data for all available months starting
in the current month.
Parameters
----------
call : bool, optional (default=True)
Whether or not to collect data for call options
put : bool, optional (default=True)
Whether or not to collect data for put options.
Returns
-------
pandas.DataFrame
A DataFrame with requested options data.
Index:
Strike: Option strike, int
Expiry: Option expiry, Timestamp
Type: Call or Put, string
Symbol: Option symbol as reported on Yahoo, string
Columns:
Last: Last option price, float
Chg: Change from prior day, float
Bid: Bid price, float
Ask: Ask price, float
Vol: Volume traded, int64
Open_Int: Open interest, int64
IsNonstandard: True if the the deliverable is not 100 shares, otherwise false
Underlying: Ticker of the underlying security, string
Underlying_Price: Price of the underlying security, float64
Quote_Time: Time of the quote, Timestamp
Note: Format of returned data frame is dependent on Yahoo and may change.
"""
try:
expiry_dates = self.expiry_dates
except AttributeError:
expiry_dates, _ = self._get_expiry_dates_and_links()
return self._get_data_in_date_range(dates=expiry_dates, call=call, put=put)
def _get_data_in_date_range(self, dates, call=True, put=True):
to_ret = Series({'calls': call, 'puts': put})
to_ret = to_ret[to_ret].index
data = []
for name in to_ret:
for expiry_date in dates:
nam = name + self._expiry_to_string(expiry_date)
try: # Try to access on the instance
frame = getattr(self, nam)
except AttributeError:
frame = self._get_option_data(expiry=expiry_date, name=name)
data.append(frame)
return concat(data).sortlevel()
@property
def expiry_dates(self):
"""
Returns a list of available expiry dates
"""
try:
expiry_dates = self._expiry_dates
except AttributeError:
expiry_dates, _ = self._get_expiry_dates_and_links()
return expiry_dates
def _get_expiry_dates_and_links(self):
"""
Gets available expiry dates.
Returns
-------
Tuple of:
List of datetime.date objects
Dict of datetime.date objects as keys and corresponding links
"""
url = self._OPTIONS_BASE_URL.format(sym=self.symbol)
root = self._parse_url(url)
try:
links = root.xpath('//*[@id="options_menu"]/form/select/option')
except IndexError:
raise RemoteDataError('Expiry dates not available')
expiry_dates = [dt.datetime.strptime(element.text, "%B %d, %Y").date() for element in links]
links = [element.attrib['data-selectbox-link'] for element in links]
if len(expiry_dates) == 0:
raise RemoteDataError('Data not available')
expiry_links = dict(zip(expiry_dates, links))
self._expiry_links = expiry_links
self._expiry_dates = expiry_dates
return expiry_dates, expiry_links
def _parse_url(self, url):
"""
Downloads and parses a URL, returns xml root.
"""
try:
from lxml.html import parse
except ImportError:
raise ImportError("Please install lxml if you want to use the "
"{0!r} class".format(self.__class__.__name__))
try:
doc = parse(url)
except _network_error_classes:
raise RemoteDataError("Unable to parse URL "
"{0!r}".format(url))
else:
root = doc.getroot()
if root is None:
raise RemoteDataError("Parsed URL {0!r} has no root"
"element".format(url))
return root
def _process_data(self, frame, type):
"""
Adds columns for Expiry, IsNonstandard (ie: deliverable is not 100 shares)
and Tag (the tag indicating what is actually deliverable, None if standard).
"""
frame.columns = ['Strike', 'Symbol', 'Last', 'Bid', 'Ask', 'Chg', 'PctChg', 'Vol', 'Open_Int', 'IV']
frame["Rootexp"] = frame.Symbol.str[0:-9]
frame["Root"] = frame.Rootexp.str[0:-6]
frame["Expiry"] = to_datetime(frame.Rootexp.str[-6:])
#Removes dashes in equity ticker to map to option ticker.
#Ex: BRK-B to BRKB140517C00100000
frame["IsNonstandard"] = frame['Root'] != self.symbol.replace('-', '')
del frame["Rootexp"]
frame["Underlying"] = self.symbol
try:
frame['Underlying_Price'] = self.underlying_price
frame["Quote_Time"] = self.quote_time
except AttributeError:
frame['Underlying_Price'] = np.nan
frame["Quote_Time"] = np.nan
frame.rename(columns={'Open Int': 'Open_Int'}, inplace=True)
frame['Type'] = type
frame.set_index(['Strike', 'Expiry', 'Type', 'Symbol'], inplace=True)
return frame
|
the-stack_0_11084 | # -*- coding: utf-8 -*-
# @createTime : 2019/10/22 20:59
# @author : Huanglg
# @fileName: BOM.py
# @email: [email protected]
import time
from mesService.lib.OracleLib.OracleDBUtil import Oracle
def print_run_time(func):
def wrapper(*args, **kw):
local_time = time.time()
func(*args, **kw)
print('current Function [%s] run time is %.2f' % (func.__name__ ,time.time() - local_time))
return wrapper
@print_run_time
def test_oracle():
oracle = Oracle()
product_sql = """select id productid, tt.medium productdesc from product p
left join TEXT_TRANSLATION tt on p.textid = tt.textid and tt.LANGUAGEID = 2052"""
products = oracle.query(product_sql)
print(products)
product_component_sql = """select PC.COMPONENTID,TT.MEDIUM,PC.PRODUCTID PRODUCTID,C.PRODUCTID CPRODUCTID from PRODUCT_COMPONENT PC
left join COMPONENT C on C.ID = PC.COMPONENTID
left join PRODUCT P on P.ID = C.PRODUCTID
left join TEXT_TRANSLATION TT on TT.TEXTID = P.TEXTID and TT.LANGUAGEID = 2052
where PC.PRODUCTID={productid}"""
for product in products:
productid = product['productid']
sql = product_component_sql.format(productid=productid)
product_component = oracle.query(sql)
print(product_component)
if __name__ == '__main__':
test_oracle()
|
the-stack_0_11085 | # coding=utf-8
from distutils.util import convert_path
import os
from fnmatch import fnmatchcase
from setuptools import setup, find_packages
from pip.req import parse_requirements
import uuid
import sys
AUTHOR = 'Nekmo'
EMAIL = '[email protected]'
PLUGIN_NAME = 'userscommands'
DESCRIPTION = ''
WEBSITE = 'http://nekmo.com'
DOWNLOAD_URL = ''
STATUS_LEVEL = 1 # 1:Planning 2:Pre-Alpha 3:Alpha 4:Beta 5:Production/Stable 6:Mature 7:Inactive
CLASSIFIERS = [
'License :: OSI Approved :: MIT License',
# 'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',
# 'License :: OSI Approved :: BSD License',
]
ROOT_INCLUDE = ['requirements.txt', 'VERSION', 'LICENSE.txt']
SETUP_REQUIRES = ['pip']
##############################################################################
# find_package_data is an Ian Bicking creation.
# Provided as an attribute, so you can append to these instead
# of replicating them:
standard_exclude = ('*.py', '*.pyc', '*~', '.*', '*.bak', '*.swp*')
standard_exclude_directories = ('.*', 'CVS', '_darcs', './build',
'./dist', 'EGG-INFO', '*.egg-info')
def find_package_data(where='.', package='',
exclude=standard_exclude,
exclude_directories=standard_exclude_directories,
only_in_packages=True,
show_ignored=False):
"""
Return a dictionary suitable for use in ``package_data``
in a distutils ``setup.py`` file.
The dictionary looks like::
{'package': [files]}
Where ``files`` is a list of all the files in that package that
don't match anything in ``exclude``.
If ``only_in_packages`` is true, then top-level directories that
are not packages won't be included (but directories under packages
will).
Directories matching any pattern in ``exclude_directories`` will
be ignored; by default directories with leading ``.``, ``CVS``,
and ``_darcs`` will be ignored.
If ``show_ignored`` is true, then all the files that aren't
included in package data are shown on stderr (for debugging
purposes).
Note patterns use wildcards, or can be exact paths (including
leading ``./``), and all searching is case-insensitive.
This function is by Ian Bicking.
"""
out = {}
stack = [(convert_path(where), '', package, only_in_packages)]
while stack:
where, prefix, package, only_in_packages = stack.pop(0)
for name in os.listdir(where):
fn = os.path.join(where, name)
if os.path.isdir(fn):
bad_name = False
for pattern in exclude_directories:
if (fnmatchcase(name, pattern)
or fn.lower() == pattern.lower()):
bad_name = True
if show_ignored:
print >> sys.stderr, (
"Directory %s ignored by pattern %s"
% (fn, pattern))
break
if bad_name:
continue
if os.path.isfile(os.path.join(fn, '__init__.py')):
if not package:
new_package = name
else:
new_package = package + '.' + name
stack.append((fn, '', new_package, False))
else:
stack.append(
(fn, prefix + name + '/', package, only_in_packages)
)
elif package or not only_in_packages:
# is a file
bad_name = False
for pattern in exclude:
if (fnmatchcase(name, pattern)
or fn.lower() == pattern.lower()):
bad_name = True
if show_ignored:
print >> sys.stderr, (
"File %s ignored by pattern %s"
% (fn, pattern))
break
if bad_name:
continue
out.setdefault(package, []).append(prefix+name)
return out
##############################################################################
__dir__ = os.path.abspath(os.path.dirname(__file__))
def get_url(dep):
if hasattr(dep, 'url'):
return dep.url
if dep.link is None:
return
return dep.link.url
VERSION = open('VERSION').read().replace('\n', '') # Please, change VERSION file
requirements = parse_requirements('requirements.txt', session=uuid.uuid1()) # Please, change requirements.txt file
INSTALL_REQUIRES = [str(ir.req) for ir in requirements if not get_url(ir)]
try:
LONG_DESCRIPTION = open('README', 'rt').read() # Please, change README file
except IOError:
LONG_DESCRIPTION = ''
if not DESCRIPTION:
DESCRIPTION = '%s, a plugin for NekBot, a modular and multiprotocol bot written in Python.' % PLUGIN_NAME
STATUS_NAME = ['Planning', 'Pre-Alpha', 'Alpha', 'Beta',
'Production/Stable', 'Mature', 'Inactive'][STATUS_LEVEL - 1]
packages = find_packages(__dir__)
# Prevent include symbolic links
for package_name in tuple(packages):
path = os.path.join(__dir__, package_name.replace('.', '/'))
if not os.path.exists(path):
continue
if not os.path.islink(path):
continue
packages.remove(package_name)
setup(
name='nekbot.plugins.%s' % PLUGIN_NAME,
namespace_packages=['nekbot.plugins'],
version=VERSION,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
author=AUTHOR,
author_email=EMAIL,
url=WEBSITE,
download_url=DOWNLOAD_URL,
classifiers=CLASSIFIERS.extend([
'Development Status :: %i - %s' % (STATUS_LEVEL, STATUS_NAME),
'Intended Audience :: Developers',
'Environment :: Console',
'Topic :: Communications :: Chat',
'Topic :: Communications :: Chat :: Internet Relay Chat',
'Topic :: Communications :: Conferencing',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
]),
platforms=['linux'],
scripts=[
# 'scripts/myscript.sh'
],
provides=['nekbot.plugins.%s' % PLUGIN_NAME],
install_requires=INSTALL_REQUIRES,
setup_requires=SETUP_REQUIRES,
packages=['nekbot', 'nekbot.plugins', 'nekbot.plugins.%s' % PLUGIN_NAME],
include_package_data=True,
keywords=['nekbot', 'bot', PLUGIN_NAME, 'plugins', 'chat'],
entry_points={
},
zip_safe=False,
) |
the-stack_0_11090 | #Author: Thy H. Nguyen
import turtle
wn = turtle.Screen()
wn.bgcolor("#E0FFFF")
mom = turtle.Turtle()
mom.color("#0000CD")
mom.shape("circle")
thy = int(input())
i=1
while i < thy:
mom.right(10)
mom.forward(100)
mom.stamp()
mom.backward(thy)
mom.dot()
i +=1
wn.exitonclick()
|
the-stack_0_11091 | from __future__ import unicode_literals
import re
CHUNK_RANGE_RE = re.compile(
r'^@@ -(?P<orig_start>\d+)(,(?P<orig_len>\d+))? '
r'\+(?P<new_start>\d+)(,(?P<new_len>\d+))? @@',
re.M)
def filter_interdiff_opcodes(opcodes, filediff_data, interfilediff_data):
"""Filters the opcodes for an interdiff to remove unnecessary lines.
An interdiff may contain lines of code that have changed as the result of
updates to the tree between the time that the first and second diff were
created. This leads to some annoyances when reviewing.
This function will filter the opcodes to remove as much of this as
possible. It will only output non-"equal" opcodes if it falls into the
ranges of lines dictated in the uploaded diff files.
"""
def _find_range_info(diff):
lines = diff.splitlines()
process_changes = False
ranges = []
chunk_start = None
chunk_len = 0
lines_of_context = 0
# Look through the chunks of the diff, trying to find the amount
# of context shown at the beginning of each chunk. Though this
# will usually be 3 lines, it may be fewer or more, depending
# on file length and diff generation settings.
for line in lines:
if process_changes:
if line.startswith((b'-', b'+')):
# We've found the first change in the chunk. We now
# know how many lines of context we have.
#
# We reduce the indexes by 1 because the chunk ranges
# in diffs start at 1, and we want a 0-based index.
start = chunk_start - 1 + lines_of_context
ranges.append((start, start + chunk_len))
process_changes = False
continue
else:
lines_of_context += 1
# This was not a change within a chunk, or we weren't processing,
# so check to see if this is a chunk header instead.
m = CHUNK_RANGE_RE.match(line)
if m:
# It is a chunk header. Reset the state for the next range,
# and pull the line number and length from the header.
chunk_start = int(m.group('new_start'))
chunk_len = int(m.group('new_len') or '1')
process_changes = True
lines_of_context = 0
return ranges
def _is_range_valid(line_range, tag, i1, i2):
return (line_range is not None and
i1 >= line_range[0] and
(tag == 'delete' or i1 != i2))
orig_ranges = _find_range_info(filediff_data)
new_ranges = _find_range_info(interfilediff_data)
orig_range_i = 0
new_range_i = 0
if orig_ranges:
orig_range = orig_ranges[orig_range_i]
else:
orig_range = None
if new_ranges:
new_range = new_ranges[new_range_i]
else:
new_range = None
if not orig_range and not new_range:
# There's nothing in here, or it's not a unified diff. Just yield
# what we get.
for tag, i1, i2, j1, j2 in opcodes:
yield tag, i1, i2, j1, j2
return
for tag, i1, i2, j1, j2 in opcodes:
while orig_range and i1 > orig_range[1]:
# We've left the range of the current chunk to consider in the
# original diff. Move on to the next one.
orig_range_i += 1
if orig_range_i < len(orig_ranges):
orig_range = orig_ranges[orig_range_i]
else:
orig_range = None
while new_range and j1 > new_range[1]:
# We've left the range of the current chunk to consider in the
# new diff. Move on to the next one.
new_range_i += 1
if new_range_i < len(new_ranges):
new_range = new_ranges[new_range_i]
else:
new_range = None
# See if the chunk we're looking at is in the range of the chunk in
# one of the uploaded diffs. If so, allow it through.
orig_starts_valid = _is_range_valid(orig_range, tag, i1, i2)
new_starts_valid = _is_range_valid(new_range, tag, j1, j2)
valid_chunk = orig_starts_valid or new_starts_valid
if valid_chunk:
# This chunk is valid. It may only be a portion of the real
# chunk, though. We'll need to split it up into a known valid
# segment first, and yield that.
if orig_range:
cap_i2 = orig_range[1] + 1
else:
cap_i2 = i2
if new_range:
cap_j2 = new_range[1] + 1
else:
cap_j2 = j2
if orig_starts_valid:
valid_i2 = min(i2, cap_i2)
else:
valid_i2 = i2
if new_starts_valid:
valid_j2 = min(j2, cap_j2)
else:
valid_j2 = j2
if tag in ('equal', 'replace'):
# We need to take care to not let the replace lines have
# differing ranges for the orig and modified files. We want the
# replace to take up the full bounds of the two sides, but
# capped to the valid chunk range.
#
# For this, we need to pick a consistent value for the length
# of the range. We know at least one side will be within
# bounds, since we have a valid chunk and at least one is
# capped to be <= the end of the range.
#
# If one side is out of bounds of the range, the other range
# will win. If both are in bounds, the largest wins.
i_diff = valid_i2 - i1
j_diff = valid_j2 - j1
if valid_i2 > cap_i2:
# Sanity-check that valid_j2 is in bounds. We don't need
# to check this in the following conditionals, though,
# since that's covered by the conditionals themselves.
assert valid_j2 <= cap_j2
max_cap = j_diff
elif valid_j2 > cap_j2:
max_cap = i_diff
else:
max_cap = max(i_diff, j_diff)
# Set each valid range to be the same length.
valid_i2 = i1 + max_cap
valid_j2 = j1 + max_cap
# Update the caps, so that we'll process whatever we've
# chopped off.
cap_i2 = valid_i2
cap_j2 = valid_j2
yield tag, i1, valid_i2, j1, valid_j2
if valid_i2 == i2 and valid_j2 == j2:
continue
# There were more parts of this range remaining. We know they're
# all invalid, so let's update i1 and j1 to point to the start
# of those invalid ranges, and mark them.
if orig_range is not None and i2 + 1 > cap_i2:
i1 = cap_i2
if new_range is not None and j2 + 1 > cap_j2:
j1 = cap_j2
valid_chunk = False
if not valid_chunk:
# Turn this into an "filtered-equal" chunk. The left-hand and
# right-hand side of the diffs will look different, which may be
# noticeable, but it will still help the user pay attention to
# what's actually changed that they care about.
#
# These will get turned back into "equal" chunks in the
# post-processing step.
yield 'filtered-equal', i1, i2, j1, j2
def post_process_filtered_equals(opcodes):
"""Post-processes filtered-equal and equal chunks from interdiffs.
Any filtered-out "filtered-equal" chunks will get turned back into "equal"
chunks and merged into any prior equal chunks. Likewise, simple "equal"
chunks will also get merged.
"equal" chunks that have any indentation information will remain
their own chunks, with nothing merged in.
"""
cur_chunk = None
for tag, i1, i2, j1, j2, meta in opcodes:
if ((tag == 'equal' and not meta.get('indentation_changes')) or
tag == 'filtered-equal'):
# We either have a plain equal chunk without any indentation
# changes, or a filtered-equal chunk. In these cases, we can
# safely merge the chunks together and transform them into
# an "equal" chunk.
if cur_chunk:
i1 = cur_chunk[1]
j1 = cur_chunk[3]
meta = cur_chunk[5]
cur_chunk = ('equal', i1, i2, j1, j2, meta)
else:
# This is some sort of changed chunk (insert, delete, replace,
# or equal with indentation changes). Yield the previous chunk
# we were working with, if any, and then yield the current chunk.
if cur_chunk:
yield cur_chunk
cur_chunk = None
yield tag, i1, i2, j1, j2, meta
if cur_chunk:
yield cur_chunk
|
the-stack_0_11092 | #!/usr/bin/env python3
# -*- coding:utf-8 -*-
# author: bigfoolliu
"""
Pyro4客户端,此处调用远程对象
"""
import Pyro4
def main():
uri = input("What is the Pyro uri of the greeting object?(help: 输入server启动时对应的uri) ").strip()
name = input("What is your name? ").strip()
print(f'uri:{uri}, name:{name}')
server = Pyro4.Proxy(uri) # 获取server
print(server.welcomeMessage(name))
if __name__ == '__main__':
main()
|
the-stack_0_11093 | import torch
import torch.nn as nn
import neat.activations as a
from torch import autograd
class FeedForwardNet(nn.Module):
def __init__(self, genome, config):
super(FeedForwardNet, self).__init__()
self.genome = genome
self.units = self.build_units()
self.lin_modules = nn.ModuleList()
self.config = config
self.activation = a.Activations().get(config.ACTIVATION)
for unit in self.units:
self.lin_modules.append(unit.linear)
def forward(self, x):
outputs = dict()
input_units = [u for u in self.units if u.ref_node.type == 'input']
output_units = [u for u in self.units if u.ref_node.type == 'output']
bias_units = [u for u in self.units if u.ref_node.type == 'bias']
stacked_units = self.genome.order_units(self.units)
# Set input values
for u in input_units:
outputs[u.ref_node.id] = x[0][u.ref_node.id]
# Set bias value
for u in bias_units:
outputs[u.ref_node.id] = torch.ones((1, 1)).to(device)[0][0]
# Compute through directed topology
while len(stacked_units) > 0:
current_unit = stacked_units.pop()
if current_unit.ref_node.type != 'input' and current_unit.ref_node.type != 'bias':
# Build input vector to current node
inputs_ids = self.genome.get_inputs_ids(current_unit.ref_node.id)
in_vec = autograd.Variable(torch.zeros((1, len(inputs_ids)), device=device, requires_grad=True))
for i, input_id in enumerate(inputs_ids):
in_vec[0][i] = outputs[input_id]
# Compute output of current node
linear_module = self.lin_modules[self.units.index(current_unit)]
if linear_module is not None: # TODO: Can this be avoided?
scaled = self.config.SCALE_ACTIVATION * linear_module(in_vec)
out = self.activation(scaled)
else:
out = torch.zeros((1, 1))
# Add to outputs dictionary
outputs[current_unit.ref_node.id] = out
# Build output vector
output = autograd.Variable(torch.zeros((1, len(output_units)), device=device, requires_grad=True))
for i, u in enumerate(output_units):
output[0][i] = outputs[u.ref_node.id]
return output
def build_units(self):
units = []
for n in self.genome.node_genes:
in_genes = self.genome.get_connections_in(n.id)
num_in = len(in_genes)
weights = [g.weight for g in in_genes]
new_unit = Unit(n, num_in)
new_unit.set_weights(weights)
units.append(new_unit)
return units
class Unit:
def __init__(self, ref_node, num_in_features):
self.ref_node = ref_node
self.linear = self.build_linear(num_in_features)
def set_weights(self, weights):
if self.ref_node.type != 'input' and self.ref_node.type != 'bias':
weights = torch.cat(weights).unsqueeze(0)
for p in self.linear.parameters():
p.data = weights
def build_linear(self, num_in_features):
if self.ref_node.type == 'input' or self.ref_node.type == 'bias':
return None
return nn.Linear(num_in_features, 1, False)
def __str__(self):
return 'Reference Node: ' + str(self.ref_node) + '\n'
# TODO: Multiple GPU support get from config
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
|
the-stack_0_11094 | '''
This script helps creating and managing experiments.
Possible commands:
- launch: launch an experiment loading its specification from a CSV file
- view: list the experiments which are still running
- stop: stop all the runners of the experiment
'''
import pandas as pd
import argparse, os, sys, re
from multiprocessing import Pool
from screenutils import Screen, list_screens
from datetime import datetime
class Screener(object):
def command_sender(self, zipped_pair):
screen, command = zipped_pair
screen.send_commands(command)
def run(self, commands, name='s'):
n_screens = len(commands)
screens = [Screen(name+'_%d' % (i+1), True) for i in range(n_screens)]
p = Pool(n_screens)
p.map(self.command_sender, zip(screens, commands))
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--command', help='Command to execute.', type=str, default='launch', choices=['launch', 'view', 'stop'])
# Experiment selection
parser.add_argument('--name', help='Name of the experiment', type=str, default=None)
parser.add_argument('--dir', help='Directory from which to load the experiment (to launch).', type=str, default=None)
# Env
parser.add_argument('--condaenv', help='Conda environment to activate.', type=str, default=None)
parser.add_argument('--pythonv', help='Python version to use', type=str, default='python3')
parser.add_argument('--pythonpath', help='Pythonpath to use for script.', type=str, default=None)
parser.add_argument('--cuda_devices', help='CUDA visible devices.', type=str, default='')
# Sacred
parser.add_argument('--sacred', action='store_true', default=False, help='Enable sacred.')
parser.add_argument('--sacred_dir', help='Dir used by sacred to log.', type=str, default=None)
parser.add_argument('--sacred_slack', help='Config file for slack.', type=str, default=None)
parser.add_argument('--dirty', action='store_true', default=False, help='Enable sacred dirty running.')
args = parser.parse_args()
if args.command == 'launch':
assert args.name is not None, "Provide an experiment name."
assert args.dir is not None, "Provide a directory to load the experiment."
# Load experiment
experiment_path = args.dir + '/' + args.name + '.csv'
experiment = pd.read_csv(experiment_path)
# Start build base command
cmd_base = ''
# Set env variables
cmd_base += 'export CUDA_VISIBLE_DEVICES=' + args.cuda_devices + ' && '
cmd_base += 'export EXPERIMENT_NAME=' + args.name + ' && '
if args.sacred_dir and args.sacred:
cmd_base += 'export SACRED_RUNS_DIRECTORY=' + args.sacred_dir + ' && '
if args.sacred_slack and args.sacred:
cmd_base += 'export SACRED_SLACK_CONFIG=' + args.sacred_slack + ' && '
if args.pythonpath:
cmd_base += "export PYTHONPATH='PYTHONPATH:" + args.pythonpath + "' && "
if args.condaenv:
cmd_base += 'source activate ' + args.condaenv + ' && '
# Parse the CSV
param_cols = list(experiment)
param_cols.remove('script')
# Build the commands
cmd_base += args.pythonv + ' '
cmds = []
for index, row in experiment.iterrows():
# Get the script, check if we need to use sacred (just append _sacred to script name)
script = row['script']
if args.sacred:
script += '_sacred'
script = 'baselines/' + script + '.py '
_c = cmd_base + script
# Check if dirty and if to use with
if args.sacred and not args.dirty:
_c += '-e '
if args.sacred and len(param_cols) > 0:
_c += 'with '
# Add experiment_name to params
if args.sacred:
_c += 'experiment_name=' + args.name + ' '
else:
_c += '--experiment_name=' + args.name + ' '
# Params
for p in param_cols:
if args.sacred:
_c += str(p).strip() + '=' + str(row[p]).strip() + ' '
else:
_c += '--' + str(p).strip() + '=' + str(row[p]).strip() + ' '
# Add the exit command to terminate the experiment
_c += '&& exit'
cmds.append(_c)
scr = Screener()
scr.run(cmds, name=args.name)
elif args.command == 'view':
from baselines.common.sacred_utils import load_runs, filter_runs
from baselines.common import colorize
assert args.name is not None, "Provide an experiment name."
assert args.dir is not None, "Provide a directory for experiment."
rule = re.compile(args.name + '_*')
# Get all screens
all_active_screens = 0
for s in list_screens():
if rule.match(s.name):
all_active_screens += 1
# Load runs to get active ones
runs = load_runs(args.dir)
running_runs = filter_runs({'run.status': 'RUNNING'}, runs)
print(colorize("==========================================", color='red'))
max_eta, max_duration = None, None
for key in running_runs.keys():
run = running_runs[key]
print(colorize('Run:', color='blue'), "{0} ({1})".format(key, run['config']['env']))
print("\t" + colorize("Steps:", color='blue') +
"{0}/{1}".format(len(run['metrics']['EpRewMean']['steps'])+1, run['config']['max_iters']) +
"\t\t" + colorize("Reward:", color='blue') + "{0}".format(run['metrics']['EpRewMean']['values'][-1]) +
"\t\t" + colorize("Seed:", color='blue') + "{0}".format(run['config']['seed']) +
"\t\t" + colorize("Delta:", color='blue') + "{0}".format(run['config']['delta']))
completion = (len(run['metrics']['EpRewMean']['steps'])+1) / run['config']['max_iters']
start_time = datetime.strptime(run['run']['start_time'], '%Y-%m-%dT%H:%M:%S.%f')
duration = datetime.utcnow() - start_time
eta = duration * (1 - completion) / completion
max_eta = max(eta, max_eta) if max_eta is not None else eta
max_duration = max(duration, max_duration) if max_duration is not None else duration
if len(running_runs.keys()) == 0:
print(colorize("Done.", color='red'))
else:
t = max_eta.total_seconds()
d = max_duration.total_seconds()
print(colorize("==========================================", color='red'))
print(colorize("Active screens: {0}".format(all_active_screens), color='red'))
print(colorize("Active runs: {0}".format(len(running_runs.keys())), color='red'))
print(colorize("Elapsed time: {0} hours, {1} minutes, {2} seconds".format(int(d // 3600), int((d%3600)//60), int(d%3600)%60), color='red'))
print(colorize("ETA: {0} hours, {1} minutes, {2} seconds".format(int(t // 3600), int((t%3600)//60), int(t%3600)%60), color='red'))
print(colorize("==========================================", color='red'))
elif args.command == 'stop':
assert args.name is not None, "Provide an experiment name."
rule = re.compile(args.name + '_*')
# Get all screens
for s in list_screens():
if rule.match(s.name):
print("Stopping", s.name)
s.kill()
else:
raise Exception('Unrecognized command.')
|
the-stack_0_11095 | #!/usr/bin/env python3
# FreeRTOS Common IO V0.1.2
# Copyright (C) 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
# http://aws.amazon.com/freertos
# http://www.FreeRTOS.org
import serial
from time import sleep
import csv
import os, sys
import argparse
import threading
import socket
import re
scriptdir = os.path.dirname(os.path.realpath(__file__))
parentdir = os.path.dirname(scriptdir)
if parentdir not in sys.path:
print("Script Dir: %s" % scriptdir)
print("Parent Dir: %s" % parentdir)
sys.path.append(parentdir)
from test_iot_test_template import test_template
class TestI2cMasterAssisted(test_template):
"""
Test class for i2c master tests.
"""
def __init__(self, serial, ip, login, pwd, csv_handler):
self._func_list = [self.test_IotI2CWriteSyncAssisted,
self.test_IotI2CWriteAsyncAssisted,
self.test_IotI2CReadSyncAssisted,
self.test_IotI2CReadAsyncAssisted
]
self._serial = serial
self._ip = ip
self._login = login
self._pwd = pwd
self._cr = csv_handler
shell_script = "%s/test_iot_runonPI_i2c_master.sh" % scriptdir
port = 50007
def i2c_write_test(self, cmd):
"""
Test body of write test.
:param cmd: iot test cmd
:return:
"""
t_shell = threading.Thread(target=self.run_shell_script,
args=(" ".join([self.shell_script, self._ip, self._login, self._pwd, '-s']),))
t_shell.start()
socket.setdefaulttimeout(10)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
time_out = 10
# Wait until connection with the process on rpi is established.
while s.connect_ex((self._ip, self.port)) != 0 and time_out > 0:
time_out -= 1
sleep(1)
if time_out == 0:
print("Socket connection cannot be established")
s.close()
return "Fail"
self._serial.reset_input_buffer()
self._serial.write('\r\n'.encode('utf-8'))
self._serial.write(cmd.encode('utf-8'))
self._serial.write('\r\n'.encode('utf-8'))
res = self._serial.read_until(terminator=serial.to_bytes([ord(c) for c in 'Ignored '])).decode('utf-8')
w_bytes = []
for x in re.sub(r'\r', '', res).split('\n'):
if x.find('IGNORE') != -1:
w_bytes = [s for s in x.split(',') if len(s) == 2]
break
# Retrieve bytes read by rpi.
s.sendall(b's')
try:
r_bytes = s.recv(1024)
except:
print("No data received from rpi.\n", repr(res))
s.close()
return 'Fail'
r_bytes = ["{:02X}".format(b) for b in r_bytes]
# End process on the rpi.
s.sendall(b'E')
t_shell.join()
s.close()
# Compare read and write bytes.
if self.compare_host_dut_result(r_bytes, w_bytes) == -1:
print(repr(res))
return "Fail"
return 'Pass'
def test_IotI2CWriteSyncAssisted(self):
return self.i2c_write_test("iot_tests test 11 1")
def test_IotI2CWriteAsyncAssisted(self):
return self.i2c_write_test("iot_tests test 11 2")
def i2c_read_test(self, cmd):
"""
Test body for read test. The i2c slave callback function in the rpi library is only called after i2c stop. The
register address cannot be read by rpi before restart so the data to send can only be loaded to rpi fifo after
stop. As a result, the first read from host is always the data loaded from last request or some random value if
fifo is never loaded before.
The solution with the current rpi library is to read rpi twice. Compare the second dut read data with the first
rpi send data.
:param cmd: iot test cmd
:return:
"""
w_bytes, r_bytes = ([] for i in range(2))
t_shell = threading.Thread(target=self.run_shell_script,
args=(" ".join([self.shell_script, self._ip, self._login, self._pwd, '-s']),))
t_shell.start()
socket.setdefaulttimeout(10)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
time_out = 10
# Wait until connection with the process on rpi is established.
while s.connect_ex((self._ip, self.port)) != 0 and time_out > 0:
time_out -= 1
sleep(1)
if time_out == 0:
print("Socket connection cannot be established")
s.close()
return "Fail"
for i in range(2):
self._serial.reset_input_buffer()
self._serial.write('\r\n'.encode('utf-8'))
self._serial.write(cmd.encode('utf-8'))
self._serial.write('\r\n'.encode('utf-8'))
res = self._serial.read_until(terminator=serial.to_bytes([ord(c) for c in 'Ignored '])).decode('utf-8')
for x in re.sub(r'\r', '', res).split('\n'):
if x.find('IGNORE') != -1:
r_bytes.append([s for s in x.split(',') if len(s) == 2])
break
# Retrieve bytes sent by rpi
s.sendall(b's')
try:
data = s.recv(1024)
except:
print("No data from pi")
s.close()
return 'Fail'
w_bytes.append(["{:02X}".format(b) for b in data])
# Exit if failed to read bytes from DUT.
if len(r_bytes) != i + 1:
print("No data read by DUT.\n", repr(res))
break
# End process on the rpi.
s.sendall(b'E')
t_shell.join()
s.close()
if len(r_bytes) != 2 or len(w_bytes) != 2:
print("Write and read different number of bytes.\npi:", w_bytes, "\ndut:", r_bytes)
return 'Fail'
# Compare read and write bytes.
if self.compare_host_dut_result(w_bytes[0], r_bytes[1]) == -1:
print(repr(res))
return "Fail"
return 'Pass'
def test_IotI2CReadSyncAssisted(self):
return self.i2c_read_test("iot_tests test 11 3")
def test_IotI2CReadAsyncAssisted(self):
return self.i2c_read_test("iot_tests test 11 4")
# unit test
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--ip', nargs=1, default=[''], help='ip address of rpi')
parser.add_argument('-l', '--login_name', nargs=1, default=[''], help='login name of rpi')
parser.add_argument('-s', '--password', nargs=1, default=[''], help='password of rpi')
parser.add_argument('-p', '--port', nargs=1, default=[''], help='serial port of connected platform')
args = parser.parse_args()
try:
serial_port = serial.Serial(port=args.port[0], timeout=5)
except Exception as e:
print(e)
exit()
rpi_ip = args.ip[0]
rpi_login = args.login_name[0]
rpi_pwd = args.password[0]
with open(scriptdir + 'test_result.csv', 'w', newline='') as csvfile:
field_name = ['test name', 'test result']
writer = csv.DictWriter(csvfile, fieldnames=field_name)
writer.writeheader()
t_handler = TestI2cMasterAssisted(serial_port, rpi_ip, rpi_login, rpi_pwd, writer)
t_handler.auto_run()
serial_port.close()
|
the-stack_0_11100 | # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
"""
AutoAnchor utils
"""
import random
import numpy as np
import torch
import yaml
from tqdm import tqdm
from utils.general import LOGGER, colorstr, emojis
PREFIX = colorstr('AutoAnchor: ')
def check_anchor_order(m):
# Check anchor order against stride order for YOLOv5 Detect() module m, and correct if necessary
a = m.anchors.prod(-1).mean(-1).view(-1) # mean anchor area per output layer
da = a[-1] - a[0] # delta a
ds = m.stride[-1] - m.stride[0] # delta s
if da and (da.sign() != ds.sign()): # same order
LOGGER.info(f'{PREFIX}Reversing anchor order')
m.anchors[:] = m.anchors.flip(0)
def check_anchors(dataset, model, thr=4.0, imgsz=640):
# Check anchor fit to data, recompute if necessary
m = model.module.model[-1] if hasattr(model, 'module') else model.model[-1] # Detect()
shapes = imgsz * dataset.shapes / dataset.shapes.max(1, keepdims=True)
scale = np.random.uniform(0.9, 1.1, size=(shapes.shape[0], 1)) # augment scale
wh = torch.tensor(np.concatenate([l[:, 3:5] * s for s, l in zip(shapes * scale, dataset.labels)])).float() # wh
def metric(k): # compute metric
r = wh[:, None] / k[None]
x = torch.min(r, 1 / r).min(2)[0] # ratio metric
best = x.max(1)[0] # best_x
aat = (x > 1 / thr).float().sum(1).mean() # anchors above threshold
bpr = (best > 1 / thr).float().mean() # best possible recall
return bpr, aat
stride = m.stride.to(m.anchors.device).view(-1, 1, 1) # model strides
anchors = m.anchors.clone() * stride # current anchors
bpr, aat = metric(anchors.cpu().view(-1, 2))
s = f'\n{PREFIX}{aat:.2f} anchors/target, {bpr:.3f} Best Possible Recall (BPR). '
if bpr > 0.98: # threshold to recompute
LOGGER.info(emojis(f'{s}Current anchors are a good fit to dataset ✅'))
else:
LOGGER.info(emojis(f'{s}Anchors are a poor fit to dataset ⚠️, attempting to improve...'))
na = m.anchors.numel() // 2 # number of anchors
try:
anchors = kmean_anchors(dataset, n=na, img_size=imgsz, thr=thr, gen=1000, verbose=False)
except Exception as e:
LOGGER.info(f'{PREFIX}ERROR: {e}')
new_bpr = metric(anchors)[0]
if new_bpr > bpr: # replace anchors
anchors = torch.tensor(anchors, device=m.anchors.device).type_as(m.anchors)
m.anchors[:] = anchors.clone().view_as(m.anchors)
check_anchor_order(m) # must be in pixel-space (not grid-space)
m.anchors /= stride
s = f'{PREFIX}Done ✅ (optional: update model *.yaml to use these anchors in the future)'
else:
s = f'{PREFIX}Done ⚠️ (original anchors better than new anchors, proceeding with original anchors)'
LOGGER.info(emojis(s))
def kmean_anchors(dataset='./data/coco128.yaml', n=9, img_size=640, thr=4.0, gen=1000, verbose=True):
""" Creates kmeans-evolved anchors from training dataset
Arguments:
dataset: path to data.yaml, or a loaded dataset
n: number of anchors
img_size: image size used for training
thr: anchor-label wh ratio threshold hyperparameter hyp['anchor_t'] used for training, default=4.0
gen: generations to evolve anchors using genetic algorithm
verbose: print all results
Return:
k: kmeans evolved anchors
Usage:
from utils.autoanchor import *; _ = kmean_anchors()
"""
from scipy.cluster.vq import kmeans
npr = np.random
thr = 1 / thr
def metric(k, wh): # compute metrics
r = wh[:, None] / k[None]
x = torch.min(r, 1 / r).min(2)[0] # ratio metric
# x = wh_iou(wh, torch.tensor(k)) # iou metric
return x, x.max(1)[0] # x, best_x
def anchor_fitness(k): # mutation fitness
_, best = metric(torch.tensor(k, dtype=torch.float32), wh)
return (best * (best > thr).float()).mean() # fitness
def print_results(k, verbose=True):
k = k[np.argsort(k.prod(1))] # sort small to large
x, best = metric(k, wh0)
bpr, aat = (best > thr).float().mean(), (x > thr).float().mean() * n # best possible recall, anch > thr
s = f'{PREFIX}thr={thr:.2f}: {bpr:.4f} best possible recall, {aat:.2f} anchors past thr\n' \
f'{PREFIX}n={n}, img_size={img_size}, metric_all={x.mean():.3f}/{best.mean():.3f}-mean/best, ' \
f'past_thr={x[x > thr].mean():.3f}-mean: '
for x in k:
s += '%i,%i, ' % (round(x[0]), round(x[1]))
if verbose:
LOGGER.info(s[:-2])
return k
if isinstance(dataset, str): # *.yaml file
with open(dataset, errors='ignore') as f:
data_dict = yaml.safe_load(f) # model dict
from utils.dataloaders import LoadImagesAndLabels
dataset = LoadImagesAndLabels(data_dict['train'], augment=True, rect=True)
# Get label wh
shapes = img_size * dataset.shapes / dataset.shapes.max(1, keepdims=True)
wh0 = np.concatenate([l[:, 3:5] * s for s, l in zip(shapes, dataset.labels)]) # wh
# Filter
i = (wh0 < 3.0).any(1).sum()
if i:
LOGGER.info(f'{PREFIX}WARNING: Extremely small objects found: {i} of {len(wh0)} labels are < 3 pixels in size')
wh = wh0[(wh0 >= 2.0).any(1)] # filter > 2 pixels
# wh = wh * (npr.rand(wh.shape[0], 1) * 0.9 + 0.1) # multiply by random scale 0-1
# Kmeans init
try:
LOGGER.info(f'{PREFIX}Running kmeans for {n} anchors on {len(wh)} points...')
assert n <= len(wh) # apply overdetermined constraint
s = wh.std(0) # sigmas for whitening
k = kmeans(wh / s, n, iter=30)[0] * s # points
assert n == len(k) # kmeans may return fewer points than requested if wh is insufficient or too similar
except Exception:
LOGGER.warning(f'{PREFIX}WARNING: switching strategies from kmeans to random init')
k = np.sort(npr.rand(n * 2)).reshape(n, 2) * img_size # random init
wh, wh0 = (torch.tensor(x, dtype=torch.float32) for x in (wh, wh0))
k = print_results(k, verbose=False)
# Plot
# k, d = [None] * 20, [None] * 20
# for i in tqdm(range(1, 21)):
# k[i-1], d[i-1] = kmeans(wh / s, i) # points, mean distance
# fig, ax = plt.subplots(1, 2, figsize=(14, 7), tight_layout=True)
# ax = ax.ravel()
# ax[0].plot(np.arange(1, 21), np.array(d) ** 2, marker='.')
# fig, ax = plt.subplots(1, 2, figsize=(14, 7)) # plot wh
# ax[0].hist(wh[wh[:, 0]<100, 0],400)
# ax[1].hist(wh[wh[:, 1]<100, 1],400)
# fig.savefig('wh.png', dpi=200)
# Evolve
f, sh, mp, s = anchor_fitness(k), k.shape, 0.9, 0.1 # fitness, generations, mutation prob, sigma
pbar = tqdm(range(gen), bar_format='{l_bar}{bar:10}{r_bar}{bar:-10b}') # progress bar
for _ in pbar:
v = np.ones(sh)
while (v == 1).all(): # mutate until a change occurs (prevent duplicates)
v = ((npr.random(sh) < mp) * random.random() * npr.randn(*sh) * s + 1).clip(0.3, 3.0)
kg = (k.copy() * v).clip(min=2.0)
fg = anchor_fitness(kg)
if fg > f:
f, k = fg, kg.copy()
pbar.desc = f'{PREFIX}Evolving anchors with Genetic Algorithm: fitness = {f:.4f}'
if verbose:
print_results(k, verbose)
return print_results(k)
|
the-stack_0_11101 | # Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import dnf
from artifact_registry._vendor.google.auth import compute_engine, default
from artifact_registry._vendor.google.auth.exceptions import DefaultCredentialsError, RefreshError
from artifact_registry._vendor.google.auth.transport import requests
from artifact_registry._vendor.google.oauth2 import service_account
class ArtifactRegistry(dnf.Plugin):
"""DNF Plugin for authenticated access to Google Artifact Registry."""
name = 'artifact-registry'
cloud_platform_scope = 'https://www.googleapis.com/auth/cloud-platform'
def __init__(self, base, cli):
super(ArtifactRegistry, self).__init__(base, cli)
self.base = base
self.credentials = self._get_creds()
def config(self):
for repo in self.base.repos.iter_enabled():
opts = dict(repo.cfg.items(repo.id))
if 'pkg.dev' in opts.get('baseurl', ''):
self._add_headers(repo)
def _get_creds(self):
config = self.read_config(self.base.conf)
if config.has_section('main'):
if config.has_option('main', 'service_account_json'):
service_account_json = config.get('main', 'service_account_json')
return service_account.Credentials.from_service_account_file(
service_account_json, scopes=[self.cloud_platform_scope])
if config.has_option('main', 'service_account_email'):
service_account_email = config.get('main', 'service_account_email')
return compute_engine.Credentials(service_account_email)
try:
creds, _ = default()
except DefaultCredentialsError:
return None
return creds
def _add_headers(self, repo):
token = self._get_token()
if token:
headers = repo.get_http_headers()
new_headers = ('Authorization: Bearer %s' % token,) + headers
repo.set_http_headers(new_headers)
def _get_token(self):
if not self.credentials:
return None
if not self.credentials.valid:
try:
self.credentials.refresh(requests.Request())
except RefreshError:
return None
return self.credentials.token
|
the-stack_0_11102 | import hashlib
import json
import pickle
import uuid
from imp import find_module
from importlib import import_module
from django.conf import settings
from django.core.exceptions import ObjectDoesNotExist
from django.db import OperationalError
from django.db.models import Manager, Model
from larvik.logging import get_module_logger
CONSUMERS = {}
ISDISCOVER = False
def setDiscover(mode):
global ISDISCOVER
ISDISCOVER = mode
NODES = {}
logger = get_module_logger(__file__)
def createUniqeNodeName(channel=None):
"""This function generate 10 character long hash"""
hash = hashlib.sha1()
salt = channel if channel is not None else str(uuid.uuid4())
hash.update(salt.encode('utf-8'))
return hash.hexdigest()
class NodeType(object):
inputs = []
outputs = []
name = None
path = None
settings = {}
type = None
def saveConsumers(CONSUMERLIST):
pickle.dump(CONSUMERLIST, "consumers")
class register_consumer(object):
def __init__(self, channel, model: Model= None):
"""
If there are decorator arguments, the function
to be decorated is not passed to the constructor!
"""
self.channel = channel
self.model = model
def getModelForPuts(self, puts):
return json.dumps([input.lower() if isinstance(input,str) else input.__name__.lower() for input in puts]) if puts is not None else json.dumps([])
def __call__(self, cls: NodeType):
self.name = cls.name if cls.name is not None else cls.channel
self.path = cls.path if cls.path is not None else cls.name
self.type = cls.type if cls.type is not None else "consumer"
self.inputmodel = self.getModelForPuts(cls.inputs)
self.outputmodel = self.getModelForPuts(cls.outputs)
self.settings = json.dumps(cls.settings) if cls.settings is not None else json.dumps({})
"""
If there are decorator arguments, __call__() is only called
once, as part of the decoration process! You can only give
it a single argument, which is the function object.
"""
if self.channel in NODES: raise Exception(f"The node {self.node} does already exist. Check for Duplicates")
if self.channel in CONSUMERS: raise Exception(f"The node {self.node} does already exist. Check for Duplicates")
if self.model is not None and ISDISCOVER:
from flow.models import Node
logger.info(f"{self.name} checking {self.model.__name__} - Checking")
manager: Manager = self.model.objects
try:
try:
object = manager.get(channel=self.channel)
object.name = self.name
object.channel = self.channel
object.save()
except ObjectDoesNotExist as e:
logger.info(f"{self.name} did not yet exist on {self.model.__name__} - Creating")
object = manager.create(name=self.name, channel=self.channel, settings=self.settings)
try:
node = Node.objects.get(hash=createUniqeNodeName(self.channel))
node.name = self.name
node.path = self.path
node.variety = self.type
node.inputmodel = self.inputmodel
node.outputmodel = self.outputmodel
node.defaultsettings = self.settings
node.channel = self.channel
node.entityid = object.id
node.save()
except ObjectDoesNotExist as e:
node = Node.objects.create(hash=createUniqeNodeName(self.channel),
entityid=object.id,
name=self.name,
path=self.path,
variety=self.type,
channel=self.channel,
inputmodel=self.inputmodel,
outputmodel=self.outputmodel,
defaultsettings=self.settings)
logger.info(f"{self.name} did not yet exist on {self.channel} - Creating")
# TODO: When everything was mirated consumers should be called here CONSUMERS[self.name] = cls
except OperationalError as e:
logger.error(f'Consumer cannot be installed, migrate first: {e}')
CONSUMERS[self.channel] = cls
NODES[self.channel] = cls
return cls
class register_node(object):
def __init__(self, node):
"""
If there are decorator arguments, the function
to be decorated is not passed to the constructor!
"""
self.node = node
def getModelForPuts(self, puts):
return json.dumps([input.lower() if isinstance(input,str) else input.__name__.lower() for input in puts]) if puts is not None else json.dumps([])
def __call__(self, cls: NodeType):
from flow.models import Node
"""
If there are decorator arguments, __call__() is only called
once, as part of the decoration process! You can only give
it a single argument, which is the function object.
"""
if self.node in NODES: raise Exception(f"The node {self.node} does already exist. Check for Duplicates")
if ISDISCOVER is False:
NODES[self.node] = cls
return cls
try:
try:
node = Node.objects.get(hash=createUniqeNodeName(self.node))
node.name = cls.name
node.path = cls.path
node.variety = cls.type
node.inputmodel = self.getModelForPuts(cls.inputs)
node.outputmodel = self.getModelForPuts(cls.outputs)
node.defaultsettings = json.dumps(cls.settings)
node.channel = "None"
node.entityid = None
node.save()
logger.info(f"Updating {cls.__name__} as {self.node} on {self.node}")
except ObjectDoesNotExist as e:
node = Node.objects.create(hash=createUniqeNodeName(self.node),
entityid=None,
name=cls.name,
path=cls.path,
variety=cls.type,
channel="None",
inputmodel=self.getModelForPuts(cls.inputs),
outputmodel=self.getModelForPuts(cls.outputs),
defaultsettings=json.dumps(cls.settings))
logger.info(f"Installing {cls.__name__} as {self.node} on {self.node}")
# TODO: When everything was mirated consumers should be called here CONSUMERS[self.name] = cls
except OperationalError as e:
logger.error(f'Consumer cannot be installed, migrate first: {e}')
NODES[self.node] = cls
return cls
def autodiscover():
for app in settings.INSTALLED_APPS:
# For each app, we need to look for an consumers.py inside that app's
# package. We can't use os.path here -- recall that modules may be
# imported different ways (think zip files) -- so we need to get
# the app's __path__ and look for admin.py on that path.
# Step 1: find out the app's __path__ Import errors here will (and
# should) bubble up, but a missing __path__ (which is legal, but weird)
# fails silently -- apps that do weird things with __path__ might
# need to roll their own admin registration.
try:
app_path = import_module(app).__path__
except AttributeError:
continue
# Step 2: use imp.find_module to find the app's consumers.py. For some
# reason imp.find_module raises ImportError if the app can't be found
# but doesn't actually try to import the module. So skip this app if
# its admin.py doesn't exist
try:
find_module('consumers', app_path)
except ImportError:
continue
# Step 3: import the app's admin file. If this has errors we want them
# to bubble up.
import_module("%s.consumers" % app)
# autodiscover was successful, reset loading flag.
for app in settings.INSTALLED_APPS:
# For each app, we need to look for an consumers.py inside that app's
# package. We can't use os.path here -- recall that modules may be
# imported different ways (think zip files) -- so we need to get
# the app's __path__ and look for admin.py on that path.
# Step 1: find out the app's __path__ Import errors here will (and
# should) bubble up, but a missing __path__ (which is legal, but weird)
# fails silently -- apps that do weird things with __path__ might
# need to roll their own admin registration.
try:
app_path = import_module(app).__path__
except AttributeError:
continue
# Step 2: use imp.find_module to find the app's consumers.py. For some
# reason imp.find_module raises ImportError if the app can't be found
# but doesn't actually try to import the module. So skip this app if
# its admin.py doesn't exist
try:
find_module('nodes', app_path)
except ImportError:
continue
# Step 3: import the app's admin file. If this has errors we want them
# to bubble up.
import_module("%s.nodes" % app)
# autodiscover was successful, reset loading flag.
return CONSUMERS |
the-stack_0_11103 | #!/usr/bin/env python3.6
"""MISP feed worker pulling down feeds in misp_feeds.txt
and adding data to the platform"""
import argparse
import hashlib
import json
import os
import sys
import traceback
from logging import error, info
from typing import Dict, Generator, Optional, Text
import caep
import requests
import act
import act.api.helpers
from act.workers.libs import misp, worker
try:
import urlparse
except ModuleNotFoundError: # Python3
import urllib.parse as urlparse # type: ignore
def parseargs() -> argparse.ArgumentParser:
""" Parse arguments """
parser = worker.parseargs('Get MISP feeds from MISP sharing directories')
parser.add_argument('--manifest-dir', default=caep.get_cache_dir('misp_manifest'),
help='The directory to store latest manifests')
return parser
def verify_manifest_dir(manifest_dir: Text) -> None:
"""Verify that the directory structure exists and that there is
always a feed file (Even empty)"""
# Manifest is at default location - create directory if it does not exists
if manifest_dir == caep.get_cache_dir('misp_manifest'):
caep.get_cache_dir('misp_manifest', create=True)
# If there is specified a manifest directory in the .ini file we
# verify that it exists (or fail hard). If no such directory
# is defined, we default to using $XDG_CACHE_DIR and create a new
# 'misp_maifest' directory there.
if not os.path.isdir(manifest_dir):
print("Could not open manifest directory:", manifest_dir)
sys.exit(1)
# Check that the misp_feeds.txt file actually exists. If not 'touch'
# the file to make sure there is at least some default config present.
feed_file = os.path.join(manifest_dir, 'misp_feeds.txt')
if not os.path.isfile(feed_file):
with open(feed_file, 'w') as feed_h:
feed_h.write("https://www.circl.lu/doc/misp/feed-osint/")
def handle_event_file(feed_url: Text, uuid: Text, proxy_string: Optional[Text] = None, cert_file: Optional[Text] = None) -> misp.Event:
"""Download, parse and store single event file"""
info("Handling {0} from {1}".format(uuid, feed_url))
proxies: Optional[Dict[Text, Text]] = None
if proxy_string:
proxies = {
'http': proxy_string,
'https': proxy_string
}
url = urlparse.urljoin(feed_url, "{0}.json".format(uuid))
req = requests.get(url, proxies=proxies, verify=cert_file)
return misp.Event(loads=req.text)
def handle_feed(manifest_dir: Text,
feed_url: Text,
proxy_string: Optional[Text] = None,
cert_file: Optional[Text] = None) -> Generator[misp.Event, None, None]:
"""Get the manifest file, check if an event file is downloaded
before (cache) and dispatch event handling of separate files"""
proxies: Optional[Dict[Text, Text]] = None
if proxy_string:
proxies = {
'http': proxy_string,
'https': proxy_string
}
manifest_url = urlparse.urljoin(feed_url, "manifest.json")
req = requests.get(manifest_url, proxies=proxies, verify=cert_file)
manifest = json.loads(req.text)
feed_sha1 = hashlib.sha1(feed_url.encode("utf-8")).hexdigest()
old_manifest = {}
if manifest_dir != "NODIR":
try:
with open(os.path.join(manifest_dir, feed_sha1)) as infile:
old_manifest = json.load(infile)
except IOError:
pass
for uuid in manifest:
if uuid not in old_manifest:
yield handle_event_file(feed_url, uuid, proxy_string, cert_file)
if manifest_dir != "NODIR":
with open(os.path.join(manifest_dir, feed_sha1), "wb") as outfile:
outfile.write(json.dumps(manifest).encode("utf-8"))
def main() -> None:
"""program entry point"""
# Look for default ini file in "/etc/actworkers.ini" and ~/config/actworkers/actworkers.ini
# (or replace .config with $XDG_CONFIG_DIR if set)
args = worker.handle_args(parseargs())
manifest_dir = args.manifest_dir
actapi = worker.init_act(args)
verify_manifest_dir(manifest_dir)
misp_feeds_file = os.path.join(manifest_dir, "misp_feeds.txt")
with open(misp_feeds_file) as f:
for line in f:
feed_data = handle_feed(manifest_dir, line.strip(), args.proxy_string, args.cert_file)
for event in feed_data:
n = 0
e = 0
act.api.helpers.handle_fact(
actapi.fact("name", event.info)
.source("report", str(event.uuid)),
output_format=args.output_format)
n += 1
try:
act.api.helpers.handle_fact(
actapi.fact("externalLink")
.source("uri", "{0}/{1}.json".format(line.strip(), event.uuid))
.destination("report", str(event.uuid)),
output_format=args.output_format)
n += 1
except act.api.base.ResponseError as err:
e += 1
error("misp_feeds, main unable to add fact to platform, error calling actapi: %s" % err, exc_info=True)
for attribute in event.attributes:
if not attribute.act_type:
continue
try:
act.api.helpers.handle_fact(
actapi.fact("mentions")
.source("report", str(event.uuid))
.destination(attribute.act_type, attribute.value),
output_format=args.output_format)
n += 1
except act.api.base.ResponseError as err:
e += 1
error("misp_feeds: main unable to add attribute fact to platform, error calling actapi: %s" % err, exc_info=True)
info("{0} facts. {1} errors.".format(n, e))
def main_log_error() -> None:
"Call main() and log all exceptions as errors"
try:
main()
except Exception:
error("Unhandled exception: {}".format(traceback.format_exc()))
raise
if __name__ == '__main__':
main_log_error()
|
the-stack_0_11104 | import argparse
from collections import Counter
import numpy as np
from mpd import load
def main():
parser = argparse.ArgumentParser()
parser.add_argument('jsonfile')
args = parser.parse_args()
playlists = load(args.jsonfile)
print("N =", len(playlists))
lens = [len(p['tracks']) for p in playlists]
print("Playlist track count:", Counter(lens))
has_playlist = ['name' in p for p in playlists]
print("Has playlist name:", Counter(has_playlist))
nameless_lens = [len(p['tracks']) for p in playlists if 'name' not in p]
print("Playlist track count among nameless:", Counter(nameless_lens))
named_lens = [len(p['tracks']) for p in playlists if 'name' in p]
print("Playlist track count among nameless:", Counter(named_lens))
try:
holdouts = np.array([p['num_holdouts'] for p in playlists])
print("Holdouts: {:.2f} {:.2f}".format(holdouts.mean(), holdouts.std()))
except KeyError:
print("[warn] Num holdouts property missing")
if __name__ == '__main__':
main()
|
the-stack_0_11105 | from typing import Tuple
import numpy as np
import torch
from .bandits import DataBasedBandit
class WheelBandit(DataBasedBandit):
"""The wheel contextual bandit from the Riquelme et al 2018 paper.
Source:
https://github.com/tensorflow/models/tree/archive/research/deep_contextual_bandits
Citation:
Riquelme, Tucker, Snoek. Deep Bayesian bandits showdown: An empirical comparison of Bayesian deep networks for Thompson sampling. InProceedings ofthe 6th International Conference on Learning Representations, 2018.
Args:
device (str): Device to use for tensor operations.
"cpu" for cpu or "cuda" for cuda. Defaults to "cpu".
Attributes:
n_actions (int): Number of actions available.
context_dim (int): The length of context vector.
len (int): The number of examples (context, reward pairs) in the dataset.
device (torch.device): Device to use for tensor operations.
"""
def __init__(self, delta=0.5, n_samples=2000, **kwargs):
super(WheelBandit, self).__init__(kwargs.get("device", "cpu"))
self.delta = delta
self.n_actions = 5
self.context_dim = 2
self.len = n_samples
self.mu = [1.2, 1.0, 50.0]
self.sigma = 0.01
self._sign_opt_action = {
(1.0, 1.0): 1,
(1.0, -1.0): 2,
(-1.0, 1.0): 3,
(-1.0, -1.0): 4,
}
self._generate_contexts()
self._generate_rewards()
def _generate_rewards(self):
r_all = np.random.normal(self.mu[1], self.sigma, size=(self.len, self.n_actions))
r_all[:,0] += self.mu[0] - self.mu[1]
for t in range(self.len):
if np.linalg.norm(self._context[t]) > self.delta:
signs = np.sign(self._context[t])
opt_act = self._sign_opt_action[(signs[0], signs[1])]
r_all[t, opt_act] += self.mu[2] - self.mu[1]
self.rewards = r_all
self.max_rewards = np.max(self.rewards, axis=1)
def reset(self) -> torch.Tensor:
"""Reset bandit by shuffling indices and get new context.
Returns:
torch.Tensor: Current context selected by bandit.
"""
self._reset()
self._generate_contexts()
self._generate_rewards()
return self._get_context()
def _compute_reward(self, action: int) -> Tuple[int, int]:
"""Compute the reward for a given action.
Args:
action (int): The action to compute reward for.
Returns:
Tuple[int, int]: Computed reward.
"""
r = self.rewards[self.idx, action]
max_r = self.max_rewards[self.idx]
return r, max_r
def _generate_contexts(self) -> None:
"""Returns 2-dim samples falling in the unit circle.
"""
theta = np.random.uniform(0.0, 2.0 * np.pi, (self.len))
r = np.sqrt(np.random.uniform(size=self.len)) # sqrt is in the original code of Riquelme et al
self._context = np.stack([r * np.cos(theta), r * np.sin(theta)], axis=1)
def _get_context(self) -> torch.Tensor:
"""Get the vector for current selected context.
Returns:
torch.Tensor: Current context vector.
"""
return torch.tensor(
self._context[self.idx],
device=self.device,
dtype=torch.float,
)
|
the-stack_0_11107 | import math
import operator as op
from functools import reduce
def memoize(f):
"""memoization decorator for a function taking one or more arguments"""
class memodict(dict):
def __getitem__(self, *key):
return dict.__getitem__(self, key)
def __missing__(self, key):
ret = self[key] = f(*key)
return ret
return memodict().__getitem__
@memoize
def catalan_recursive(n):
if n == 0:
return 1
return (2 * (2 * n - 1) * catalan_recursive(n - 1)) // (n + 1)
@memoize
def euler_recursive(n, k):
if (k == 0) or (n - 1 == k):
return 1
return (n - k) * euler_recursive(n - 1, k - 1) + (k + 1) * euler_recursive(n - 1, k)
@memoize
def stirling_1_recursive(n, k):
if (n == k == 0):
return 1
if (n == 0) or (k == 0):
return 0
return stirling_1_recursive(n - 1, k - 1) + (n - 1) * stirling_1_recursive(n - 1, k)
@memoize
def stirling_2_recursive(n, k):
if (k == 1) or (n == k):
return 1
return stirling_2_recursive(n - 1, k - 1) + k * stirling_2_recursive(n - 1, k)
nCr = lambda n, r: reduce(op.mul, range(n - r + 1, n + 1), 1) // math.factorial(r)
multinomial = lambda k: math.factorial(sum(k)) // reduce(op.mul, (math.factorial(i) for i in k))
derangements = lambda n: int(math.factorial(n) / math.e + 0.5)
bell = lambda n: sum(stirling_2_recursive(k, n) for k in range(n + 1))
catalan = lambda n: nCr(2 * n, n) // (n + 1)
euler = lambda n, k: sum((1 - 2 * (j & 1)) * nCr(n + 1, j) * ((k + 1 - j)**n) for j in range(k + 1))
stirling_2 = lambda n, k: sum(((-1)**(k - j)) * nCr(k, j) * (j**n) for j in range(k + 1)) // math.factorial(k)
|
the-stack_0_11108 | """Simple version of MBIE-EB
Paper:An analysis of model-based Interval Estimation for Markov
Decision Processes (Strehl and Littman, 2008)
Link: https://doi.org/10.1016/j.jcss.2007.08.009
"""
import numpy as np
from rlpy.representations import Enumerable
from .agent import Agent
from ._vi_impl import compute_q_values
__author__ = "Yuji Kanagawa"
class MBIE_EB(Agent):
"""
Simplified version of MBIE-EB algorithm,
which executes VI only when the episode ends.
"""
def __init__(
self,
*args,
beta=0.1,
seed=1,
spread_prior=False,
show_reward=False,
vi_threshold=1e-6,
):
"""
:param beta: β parameter in MBIB-EB
:param mu0: Prior mean rewards.
:param tau0: Precision of prior mean rewards.
:param tau: Precision of reward noise.
:param spread_prior: Use alpha0/n_states as alpha0
"""
super().__init__(*args, seed=seed)
if not isinstance(self.representation, Enumerable):
raise ValueError("PSRL works only with a tabular representation.")
n_states = self.representation.features_num
n_actions = self.representation.domain.num_actions
self.beta = beta
self.sa_count = np.zeros((n_states, n_actions))
self.r_sum = np.zeros((n_states, n_actions))
self.sas_count = np.zeros((n_states, n_actions, n_states))
self.n_states = n_states
self.n_actions = n_actions
self.ep_cap = self.representation.domain.episode_cap
self.update_steps = 0
self.show_reward = show_reward
self.vi_threshold = vi_threshold
def _update_prior(self, s, a, reward, terminal, ns):
s_id = self.representation.state_id(s)
self.sa_count[s_id, a] += 1
self.r_sum[s_id, a] += reward
if not terminal:
ns_id = self.representation.state_id(ns)
self.sas_count[s_id, a, ns_id] += 1
def _sample_mdp(self, show_reward=False):
r_sample = np.zeros_like(self.sa_count)
p_sample = np.zeros_like(self.sas_count)
for s in range(self.n_states):
for a in range(self.n_actions):
n = self.sa_count[s, a]
if n == 0:
continue
r = self.r_sum[s, a] / n
r_sample[s, a] = r + self.beta / np.sqrt(n)
p_sample[s, a] = self.sas_count[s, a] / n
if show_reward and hasattr(self.representation.domain, "show_reward"):
self.representation.domain.show_reward(r_sample.mean(axis=-1))
return r_sample, p_sample
def _solve_sampled_mdp(self):
r, p = self._sample_mdp(show_reward=self.show_reward)
q_value, _ = compute_q_values(
r, p, self.ep_cap, self.discount_factor, self.vi_threshold
)
self.representation.weight_vec = q_value.T.flatten()
self.update_steps += 1
def learn(self, s, p_actions, a, r, ns, np_actions, na, terminal):
self._update_prior(s, a, r, terminal, ns)
if terminal is False:
return
self._solve_sampled_mdp()
|
the-stack_0_11110 | """Module for building the autocompletion indices."""
from __future__ import print_function
import os
import json
from six import BytesIO
from docutils.core import publish_string
from botocore.docs.bcdoc import textwriter
import awscli.clidriver
from awscli.argprocess import ParamShorthandDocGen
from awsshell import determine_doc_index_filename
from awsshell.utils import remove_html
from awsshell import docs
SHORTHAND_DOC = ParamShorthandDocGen()
def new_index():
return {'arguments': [], 'argument_metadata': {},
'commands': [], 'children': {}}
def index_command(index_dict, help_command):
arg_table = help_command.arg_table
for arg in arg_table:
arg_obj = arg_table[arg]
metadata = {
'required': arg_obj.required,
'type_name': arg_obj.cli_type_name,
'minidoc': '',
'example': '',
# The name used in the API call/botocore,
# typically CamelCased.
'api_name': getattr(arg_obj, '_serialized_name', '')
}
if arg_obj.documentation:
metadata['minidoc'] = remove_html(
arg_obj.documentation.split('\n')[0])
if SHORTHAND_DOC.supports_shorthand(arg_obj.argument_model):
example = SHORTHAND_DOC.generate_shorthand_example(
arg, arg_obj.argument_model)
metadata['example'] = example
index_dict['arguments'].append('--%s' % arg)
index_dict['argument_metadata']['--%s' % arg] = metadata
for cmd in help_command.command_table:
index_dict['commands'].append(cmd)
# Each sub command will trigger a recurse.
child = new_index()
index_dict['children'][cmd] = child
sub_command = help_command.command_table[cmd]
sub_help_command = sub_command.create_help_command()
index_command(child, sub_help_command)
def write_index(output_filename=None):
driver = awscli.clidriver.create_clidriver()
help_command = driver.create_help_command()
index = {'aws': new_index()}
current = index['aws']
index_command(current, help_command)
result = json.dumps(index)
if not os.path.isdir(os.path.dirname(output_filename)):
os.makedirs(os.path.dirname(output_filename))
with open(output_filename, 'w') as f:
f.write(result)
def write_doc_index(output_filename=None, db=None, help_command=None):
if output_filename is None:
output_filename = determine_doc_index_filename()
user_provided_db = True
if db is None:
user_provided_db = False
db = docs.load_doc_db(output_filename)
if help_command is None:
driver = awscli.clidriver.create_clidriver()
help_command = driver.create_help_command()
should_close = not user_provided_db
do_write_doc_index(db, help_command, close_db_on_finish=should_close)
def do_write_doc_index(db, help_command, close_db_on_finish):
try:
_index_docs(db, help_command)
db['__complete__'] = 'true'
finally:
if close_db_on_finish:
# If the user provided their own db object,
# they are responsible for closing it.
# If we created our own db object, we own
# closing the db.
db.close()
def _index_docs(db, help_command):
for command_name in help_command.command_table:
command = help_command.command_table[command_name]
sub_help_command = command.create_help_command()
text_docs = render_docs_for_cmd(sub_help_command)
dotted_name = '.'.join(['aws'] + command.lineage_names)
db[dotted_name] = text_docs
_index_docs(db, sub_help_command)
def render_docs_for_cmd(help_command):
renderer = FileRenderer()
help_command.renderer = renderer
help_command(None, None)
# The report_level override is so that we don't print anything
# to stdout/stderr on rendering issues.
original_cli_help = renderer.contents.decode('utf-8')
text_content = convert_rst_to_basic_text(original_cli_help)
index = text_content.find('DESCRIPTION')
if index > 0:
text_content = text_content[index + len('DESCRIPTION'):]
return text_content
def convert_rst_to_basic_text(contents):
"""Converts restructured text to basic text output.
This function removes most of the decorations added
in restructured text.
This function is used to generate documentation we
can show to users in a cross platform manner.
Basic indentation and list formatting are kept,
but many RST features are removed (such as
section underlines).
"""
# The report_level override is so that we don't print anything
# to stdout/stderr on rendering issues.
converted = publish_string(
contents, writer=BasicTextWriter(),
settings_overrides={'report_level': 5})
return converted.decode('utf-8')
class FileRenderer(object):
def __init__(self):
self._io = BytesIO()
def render(self, contents):
self._io.write(contents)
@property
def contents(self):
return self._io.getvalue()
class BasicTextWriter(textwriter.TextWriter):
def translate(self):
visitor = BasicTextTranslator(self.document)
self.document.walkabout(visitor)
self.output = visitor.body
class BasicTextTranslator(textwriter.TextTranslator):
def depart_title(self, node):
# Make the section titles upper cased, similar to
# the man page output.
text = ''.join(x[1] for x in self.states.pop() if x[0] == -1)
self.stateindent.pop()
self.states[-1].append((0, ['', text.upper(), '']))
# The botocore TextWriter has additional formatting
# for literals, for the aws-shell docs we don't want any
# special processing so these nodes are noops.
def visit_literal(self, node):
pass
def depart_literal(self, node):
pass
|
the-stack_0_11111 | import json
import redis
from collections import defaultdict
class RedisDB:
"""Backend using Redis.
Parameters to open the database can be passed with the url format::
redis://[:password]@localhost:6379/0
"""
def __init__(self, name):
self.name = name
self._dbm = redis.from_url(name)
self._db = defaultdict(dict)
self.dirty = set()
def dump(self):
"""save/close DBM file"""
for task_id in self.dirty:
self._dbm[task_id] = json.dumps(self._db[task_id])
self.dirty = set()
sync = dump
def set(self, task_id, dependency, value):
"""Store value in the DB."""
self._db[task_id][dependency] = value
self.dirty.add(task_id)
def get(self, task_id, dependency):
"""Get value stored in the DB."""
# optimization, just try to get it without checking it exists
if task_id in self._db:
return self._db[task_id].get(dependency, None)
else:
try:
task_data = self._dbm[task_id]
except KeyError:
return
self._db[task_id] = json.loads(task_data.decode('utf-8'))
return self._db[task_id].get(dependency, None)
def in_(self, task_id):
"""@return bool if task_id is in DB"""
return task_id in self._dbm or task_id in self.dirty
def remove(self, task_id):
"""remove saved dependecies from DB for taskId"""
if task_id in self._db:
del self._db[task_id]
if task_id in self._dbm:
del self._dbm[task_id]
if task_id in self.dirty:
self.dirty.remove(task_id)
def remove_all(self):
"""remove saved dependecies from DB for all tasks"""
self._db = defaultdict(dict)
self._dbm.flushdb()
self.dirty = set()
|
the-stack_0_11113 | """Play Routes rules
Bazel rules for running the
[Play routes file compiler](https://github.com/playframework/playframework/tree/master/framework/src/routes-compiler/src/main/scala/play/routes/compiler)
on Play routes files
"""
gendir_base_path = "play/routes"
play_imports = [
"controllers.Assets.Asset",
]
# TODO: update this
canonical_external_repo_name = "XXX_name_goes_here"
def _sanitize_string_for_usage(s):
res_array = []
for i in range(len(s)):
c = s[i]
if c.isalnum() or c == ".":
res_array.append(c)
else:
res_array.append("_")
return "".join(res_array)
def _format_import_args(imports):
return ["--routesImport={}".format(i) for i in imports]
def _impl(ctx):
gendir = ctx.actions.declare_directory(
gendir_base_path + "/" + _sanitize_string_for_usage(ctx.attr.name)
)
paths = [f.path for f in ctx.files.srcs]
args = [gendir.path] + [",".join(paths)]
if ctx.attr.include_play_imports:
args = args + _format_import_args(play_imports)
args = args + _format_import_args(ctx.attr.routes_imports)
if ctx.attr.generate_reverse_router:
args = args + ["--generateReverseRouter"]
if ctx.attr.namespace_reverse_router:
args = args + ["--namespaceReverserRouter"]
if ctx.attr.routes_generator:
args = args + ["--routesGenerator={}".format(ctx.attr.routes_generator)]
ctx.actions.run(
inputs = ctx.files.srcs,
outputs = [gendir],
arguments = args,
progress_message = "Compiling play routes",
executable = ctx.executable._play_routes_compiler,
)
# TODO: something more portable
ctx.actions.run_shell(
inputs = [gendir],
outputs = [ctx.outputs.srcjar],
arguments = [ctx.executable._zipper.path, gendir.path, gendir.short_path, ctx.outputs.srcjar.path],
command = """$1 c $4 META-INF/= $(find -L $2 -type f | while read v; do echo ${v#"${2%$3}"}=$v; done)""",
progress_message = "Bundling compiled play routes into srcjar",
tools = [ctx.executable._zipper],
)
play_routes = rule(
implementation = _impl,
doc = "Compiles Play routes files templates to Scala sources files.",
attrs = {
"srcs": attr.label_list(
doc = "Play routes files",
allow_files = True,
mandatory = True
),
"routes_imports": attr.string_list(
doc = "Additional imports to import to the Play routes",
),
"routes_generator": attr.string(
doc = "The full class of the routes generator, e.g., `play.routes.compiler.InjectedRoutesGenerator`",
default = ""
),
"generate_reverse_router": attr.bool(
doc = "Whether the reverse router should be generated. Setting to false may reduce compile times if it's not needed.",
default = False
),
"namespace_reverse_router": attr.bool(
doc = "Whether the reverse router should be namespaced. Useful if you have many routers that use the same actions.",
default = False
),
"include_play_imports": attr.bool(
doc = "If true, include the imports the Play project includes by default.",
default = False
),
"_play_routes_compiler": attr.label(
executable = True,
cfg = "host",
allow_files = True,
default = Label("@"+canonical_external_repo_name+"//:compiler"),
),
"_zipper": attr.label(cfg = "host", default = "@bazel_tools//tools/zip:zipper", executable = True),
},
outputs = {
"srcjar": "play_routes_%{name}.srcjar",
}
)
# This is the implementation of the repository rule. It downloads the
# play-routes compiler as a deploy JAR from the releases page.
#
# See https://docs.bazel.build/versions/master/skylark/lib/globals.html#repository_rule
def _play_app_repository_rule_implementation(repository_ctx):
"""Implementation for play_app_repository_rule"""
base_url = "https://github.com/lucidsoftware/rules_play_routes/releases/download"
compiler_url = "{}/{}/play-routes-compiler_deploy.jar".format(
base_url,
repository_ctx.attr.version,
)
repository_ctx.report_progress("Downloading compiler from {}".format(compiler_url))
download_info = repository_ctx.download(
compiler_url,
output = "play-routes-compiler_deploy.jar",
sha256 = repository_ctx.attr.sha256,
)
repository_ctx.report_progress("Successfully downloaded compiler from {}, sha256={}".format(
compiler_url,
download_info.sha256,
))
# Write a build file that turns the deployment JAR into a Java binary that
# we can run.
build_file_content = """java_import(
name = "deployjar",
jars = [":play-routes-compiler_deploy.jar"],
)
java_binary(
name = "compiler",
main_class = "rulesplayroutes.routes.CommandLinePlayRoutesCompiler",
visibility = ["//visibility:public"],
runtime_deps = [":deployjar"],
)
"""
repository_ctx.file("BUILD", content = build_file_content, executable = False)
# Declares the repository rule.
_play_app_repository_rule = repository_rule(
implementation = _play_app_repository_rule_implementation,
local = True,
attrs = {
"version": attr.string(mandatory = True),
"sha256": attr.string(mandatory = True),
},
doc = "play_repositories loads the Play Framework rules into a WORKSPACE"
)
# Default release versions specified to play_repositories.
_default_compiler_version = "GITHUB RELEASE NAME HERE"
_default_compiler_jar_sha = "JAR SHA HERE"
# play_repositories is a repository rule that introduces a new external
# repository into the WORKSPACE that invokes this rule. This activates the
# Play rules and is the main entrypoint for consumers of these rules. This is
# required in the WORKSPACE that will depend on the rules.
#
# The rules depend on a small number of compiled binaries which are available
# on the Github releases page for this repository. The argument to this
# function, tools_version_and_shas, is a tuple specifying the
#
# 1. Name of a release (e.g. "v0.0.2")
# 2. SHA256 of the play-routes-compiler_deploy.jar from that release.
#
# A default is provided.
def play_repositories(tools_version_and_shas = (_default_compiler_version, _default_compiler_jar_sha)):
(compiler_vers, jar_shas) = tools_version_and_shas
_play_app_repository_rule(
name = canonical_external_repo_name,
version = compiler_vers,
sha256 = jar_shas,
)
|
the-stack_0_11116 | import typing as t
from typing import TYPE_CHECKING
from starlette.requests import Request
from multipart.multipart import parse_options_header
from starlette.responses import Response
from .base import IOType
from .base import IODescriptor
from ...exceptions import InvalidArgument
from ...exceptions import BentoMLException
from ..utils.formparser import populate_multipart_requests
from ..utils.formparser import concat_to_multipart_responses
if TYPE_CHECKING:
from .file import File
from .json import JSON
from .text import Text
from .image import Image
from .numpy import NumpyNdarray
from .pandas import PandasSeries
from .pandas import PandasDataFrame
MultipartIO = t.Dict[str, IOType]
class Multipart(IODescriptor[MultipartIO]):
"""
:code:`Multipart` defines API specification for the inputs/outputs of a Service, where inputs/outputs
of a Service can receive/send a *multipart* request/responses as specified in your API function signature.
Sample implementation of a sklearn service:
.. code-block:: python
# sklearn_svc.py
import bentoml
from bentoml.io import NumpyNdarray, Multipart, JSON
import bentoml.sklearn
runner = bentoml.sklearn.load_runner("sklearn_model_clf")
svc = bentoml.Service("iris-classifier", runners=[runner])
input_spec = Multipart(arr=NumpyNdarray(), annotations=JSON())
output_spec = Multipart(output=NumpyNdarray(), result=JSON())
@svc.api(input=input_spec, output=output_spec)
def predict(arr, annotations):
res = runner.run(arr)
return {"output":res, "result":annotations}
Users then can then serve this service with :code:`bentoml serve`:
.. code-block:: bash
% bentoml serve ./sklearn_svc.py:svc --reload
(Press CTRL+C to quit)
[INFO] Starting BentoML API server in development mode with auto-reload enabled
[INFO] Serving BentoML Service "iris-classifier" defined in "sklearn_svc.py"
[INFO] API Server running on http://0.0.0.0:3000
Users can then send requests to the newly started services with any client:
.. tabs::
.. code-tab:: python
import requests
from requests_toolbelt.multipart.encoder import MultipartEncoder
m = MultipartEncoder(
fields={'field0': 'value', 'field1': 'value',
'field2': ('filename', open('test.json', 'rb'), 'application/json')}
)
requests.post('http://0.0.0.0:3000/predict', data=m, headers={'Content-Type': m.content_type})
.. code-tab:: bash
% curl -X POST -H "Content-Type: multipart/form-data" -F [email protected] -F arr='[5,4,3,2]' http://0.0.0.0:3000/predict
--b1d72c201a064ecd92a17a412eb9208e
Content-Disposition: form-data; name="output"
content-length: 1
content-type: application/json
1
--b1d72c201a064ecd92a17a412eb9208e
Content-Disposition: form-data; name="result"
content-length: 13
content-type: application/json
{"foo":"bar"}
--b1d72c201a064ecd92a17a412eb9208e--
Args:
inputs (:code:`Dict[str, IODescriptor]`):
Dictionary consisting keys as inputs definition for a Multipart
request/response, values as IODescriptor supported by BentoML. Currently,
Multipart supports Image, NumpyNdarray, PandasDataFrame, PandasSeries, Text,
and File.
Make sure to match the input params in an API function to the keys defined
under :code:`Multipart`:
.. code-block:: bash
+----------------------------------------------------------------+
| |
| +--------------------------------------------------------+ |
| | | |
| | Multipart(arr=NumpyNdarray(), annotations=JSON() | |
| | | |
| +----------------+-----------------------+---------------+ |
| | | |
| | | |
| | | |
| +----+ +---------+ |
| | | |
| +---------------v--------v---------+ |
| | def predict(arr, annotations): | |
| +----------------------------------+ |
| |
+----------------------------------------------------------------+
Returns:
:obj:`~bentoml._internal.io_descriptors.IODescriptor`: IO Descriptor that Multipart request/response.
"""
def __init__(
self,
**inputs: t.Union[
"Image",
"JSON",
"Text",
"NumpyNdarray",
"PandasDataFrame",
"PandasSeries",
"File",
],
):
for descriptor in inputs.values():
if isinstance(descriptor, Multipart): # pragma: no cover
raise InvalidArgument(
"Multipart IO can not contain nested Multipart item"
)
self._inputs: t.Dict[
str,
t.Union[
"Image",
"JSON",
"Text",
"NumpyNdarray",
"PandasDataFrame",
"PandasSeries",
"File",
],
] = inputs
def openapi_schema_type(self) -> t.Dict[str, t.Any]:
return {
"type": "object",
"properties": {
k: io.openapi_schema_type() for k, io in self._inputs.items()
},
}
def openapi_request_schema(self) -> t.Dict[str, t.Any]:
"""Returns OpenAPI schema for incoming requests"""
return {"multipart/form-data": {"schema": self.openapi_schema_type()}}
def openapi_responses_schema(self) -> t.Dict[str, t.Any]:
"""Returns OpenAPI schema for outcoming responses"""
return {"multipart/form-data": {"schema": self.openapi_schema_type()}}
async def from_http_request(self, request: Request) -> MultipartIO:
ctype, _ = parse_options_header(request.headers["content-type"])
if ctype != b"multipart/form-data":
raise BentoMLException(
f"{self.__class__.__name__} only accepts `multipart/form-data` as Content-Type header, got {ctype} instead."
)
res: MultipartIO = dict()
reqs = await populate_multipart_requests(request)
for k, i in self._inputs.items():
req = reqs[k]
v = await i.from_http_request(req)
res[k] = v
return res
async def to_http_response(self, obj: MultipartIO) -> Response:
res_mapping: t.Dict[str, Response] = {}
for k, io_ in self._inputs.items():
data = obj[k]
# TODO(aarnphm): fix with stubs
res_mapping[k] = await io_.to_http_response(data) # type: ignore[reportGeneralTypeIssue]
return await concat_to_multipart_responses(res_mapping)
|
the-stack_0_11117 | # Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.internet.protocol}.
"""
from __future__ import division, absolute_import
from zope.interface.verify import verifyObject
from zope.interface import implementer
from twisted.python.failure import Failure
from twisted.internet.interfaces import (
IProtocol, ILoggingContext, IProtocolFactory, IConsumer)
from twisted.internet.defer import CancelledError
from twisted.internet.protocol import (
Protocol, ClientCreator, Factory, ProtocolToConsumerAdapter,
ConsumerToProtocolAdapter)
from twisted.trial.unittest import TestCase
from twisted.test.proto_helpers import MemoryReactorClock, StringTransport
from twisted.logger import LogLevel, globalLogPublisher
class ClientCreatorTests(TestCase):
"""
Tests for L{twisted.internet.protocol.ClientCreator}.
"""
def _basicConnectTest(self, check):
"""
Helper for implementing a test to verify that one of the I{connect}
methods of L{ClientCreator} passes the right arguments to the right
reactor method.
@param check: A function which will be invoked with a reactor and a
L{ClientCreator} instance and which should call one of the
L{ClientCreator}'s I{connect} methods and assert that all of its
arguments except for the factory are passed on as expected to the
reactor. The factory should be returned.
"""
class SomeProtocol(Protocol):
pass
reactor = MemoryReactorClock()
cc = ClientCreator(reactor, SomeProtocol)
factory = check(reactor, cc)
protocol = factory.buildProtocol(None)
self.assertIsInstance(protocol, SomeProtocol)
def test_connectTCP(self):
"""
L{ClientCreator.connectTCP} calls C{reactor.connectTCP} with the host
and port information passed to it, and with a factory which will
construct the protocol passed to L{ClientCreator.__init__}.
"""
def check(reactor, cc):
cc.connectTCP('example.com', 1234, 4321, ('1.2.3.4', 9876))
host, port, factory, timeout, bindAddress = reactor.tcpClients.pop()
self.assertEqual(host, 'example.com')
self.assertEqual(port, 1234)
self.assertEqual(timeout, 4321)
self.assertEqual(bindAddress, ('1.2.3.4', 9876))
return factory
self._basicConnectTest(check)
def test_connectUNIX(self):
"""
L{ClientCreator.connectUNIX} calls C{reactor.connectUNIX} with the
filename passed to it, and with a factory which will construct the
protocol passed to L{ClientCreator.__init__}.
"""
def check(reactor, cc):
cc.connectUNIX('/foo/bar', 123, True)
address, factory, timeout, checkPID = reactor.unixClients.pop()
self.assertEqual(address, '/foo/bar')
self.assertEqual(timeout, 123)
self.assertEqual(checkPID, True)
return factory
self._basicConnectTest(check)
def test_connectSSL(self):
"""
L{ClientCreator.connectSSL} calls C{reactor.connectSSL} with the host,
port, and context factory passed to it, and with a factory which will
construct the protocol passed to L{ClientCreator.__init__}.
"""
def check(reactor, cc):
expectedContextFactory = object()
cc.connectSSL('example.com', 1234, expectedContextFactory, 4321, ('4.3.2.1', 5678))
host, port, factory, contextFactory, timeout, bindAddress = reactor.sslClients.pop()
self.assertEqual(host, 'example.com')
self.assertEqual(port, 1234)
self.assertIs(contextFactory, expectedContextFactory)
self.assertEqual(timeout, 4321)
self.assertEqual(bindAddress, ('4.3.2.1', 5678))
return factory
self._basicConnectTest(check)
def _cancelConnectTest(self, connect):
"""
Helper for implementing a test to verify that cancellation of the
L{Deferred} returned by one of L{ClientCreator}'s I{connect} methods is
implemented to cancel the underlying connector.
@param connect: A function which will be invoked with a L{ClientCreator}
instance as an argument and which should call one its I{connect}
methods and return the result.
@return: A L{Deferred} which fires when the test is complete or fails if
there is a problem.
"""
reactor = MemoryReactorClock()
cc = ClientCreator(reactor, Protocol)
d = connect(cc)
connector = reactor.connectors.pop()
self.assertFalse(connector._disconnected)
d.cancel()
self.assertTrue(connector._disconnected)
return self.assertFailure(d, CancelledError)
def test_cancelConnectTCP(self):
"""
The L{Deferred} returned by L{ClientCreator.connectTCP} can be cancelled
to abort the connection attempt before it completes.
"""
def connect(cc):
return cc.connectTCP('example.com', 1234)
return self._cancelConnectTest(connect)
def test_cancelConnectUNIX(self):
"""
The L{Deferred} returned by L{ClientCreator.connectTCP} can be cancelled
to abort the connection attempt before it completes.
"""
def connect(cc):
return cc.connectUNIX('/foo/bar')
return self._cancelConnectTest(connect)
def test_cancelConnectSSL(self):
"""
The L{Deferred} returned by L{ClientCreator.connectTCP} can be cancelled
to abort the connection attempt before it completes.
"""
def connect(cc):
return cc.connectSSL('example.com', 1234, object())
return self._cancelConnectTest(connect)
def _cancelConnectTimeoutTest(self, connect):
"""
Like L{_cancelConnectTest}, but for the case where the L{Deferred} is
cancelled after the connection is set up but before it is fired with the
resulting protocol instance.
"""
reactor = MemoryReactorClock()
cc = ClientCreator(reactor, Protocol)
d = connect(reactor, cc)
connector = reactor.connectors.pop()
# Sanity check - there is an outstanding delayed call to fire the
# Deferred.
self.assertEqual(len(reactor.getDelayedCalls()), 1)
# Cancel the Deferred, disconnecting the transport just set up and
# cancelling the delayed call.
d.cancel()
self.assertEqual(reactor.getDelayedCalls(), [])
# A real connector implementation is responsible for disconnecting the
# transport as well. For our purposes, just check that someone told the
# connector to disconnect.
self.assertTrue(connector._disconnected)
return self.assertFailure(d, CancelledError)
def test_cancelConnectTCPTimeout(self):
"""
L{ClientCreator.connectTCP} inserts a very short delayed call between
the time the connection is established and the time the L{Deferred}
returned from one of its connect methods actually fires. If the
L{Deferred} is cancelled in this interval, the established connection is
closed, the timeout is cancelled, and the L{Deferred} fails with
L{CancelledError}.
"""
def connect(reactor, cc):
d = cc.connectTCP('example.com', 1234)
host, port, factory, timeout, bindAddress = reactor.tcpClients.pop()
protocol = factory.buildProtocol(None)
transport = StringTransport()
protocol.makeConnection(transport)
return d
return self._cancelConnectTimeoutTest(connect)
def test_cancelConnectUNIXTimeout(self):
"""
L{ClientCreator.connectUNIX} inserts a very short delayed call between
the time the connection is established and the time the L{Deferred}
returned from one of its connect methods actually fires. If the
L{Deferred} is cancelled in this interval, the established connection is
closed, the timeout is cancelled, and the L{Deferred} fails with
L{CancelledError}.
"""
def connect(reactor, cc):
d = cc.connectUNIX('/foo/bar')
address, factory, timeout, bindAddress = reactor.unixClients.pop()
protocol = factory.buildProtocol(None)
transport = StringTransport()
protocol.makeConnection(transport)
return d
return self._cancelConnectTimeoutTest(connect)
def test_cancelConnectSSLTimeout(self):
"""
L{ClientCreator.connectSSL} inserts a very short delayed call between
the time the connection is established and the time the L{Deferred}
returned from one of its connect methods actually fires. If the
L{Deferred} is cancelled in this interval, the established connection is
closed, the timeout is cancelled, and the L{Deferred} fails with
L{CancelledError}.
"""
def connect(reactor, cc):
d = cc.connectSSL('example.com', 1234, object())
host, port, factory, contextFactory, timeout, bindADdress = reactor.sslClients.pop()
protocol = factory.buildProtocol(None)
transport = StringTransport()
protocol.makeConnection(transport)
return d
return self._cancelConnectTimeoutTest(connect)
def _cancelConnectFailedTimeoutTest(self, connect):
"""
Like L{_cancelConnectTest}, but for the case where the L{Deferred} is
cancelled after the connection attempt has failed but before it is fired
with the resulting failure.
"""
reactor = MemoryReactorClock()
cc = ClientCreator(reactor, Protocol)
d, factory = connect(reactor, cc)
connector = reactor.connectors.pop()
factory.clientConnectionFailed(
connector, Failure(Exception("Simulated failure")))
# Sanity check - there is an outstanding delayed call to fire the
# Deferred.
self.assertEqual(len(reactor.getDelayedCalls()), 1)
# Cancel the Deferred, cancelling the delayed call.
d.cancel()
self.assertEqual(reactor.getDelayedCalls(), [])
return self.assertFailure(d, CancelledError)
def test_cancelConnectTCPFailedTimeout(self):
"""
Similar to L{test_cancelConnectTCPTimeout}, but for the case where the
connection attempt fails.
"""
def connect(reactor, cc):
d = cc.connectTCP('example.com', 1234)
host, port, factory, timeout, bindAddress = reactor.tcpClients.pop()
return d, factory
return self._cancelConnectFailedTimeoutTest(connect)
def test_cancelConnectUNIXFailedTimeout(self):
"""
Similar to L{test_cancelConnectUNIXTimeout}, but for the case where the
connection attempt fails.
"""
def connect(reactor, cc):
d = cc.connectUNIX('/foo/bar')
address, factory, timeout, bindAddress = reactor.unixClients.pop()
return d, factory
return self._cancelConnectFailedTimeoutTest(connect)
def test_cancelConnectSSLFailedTimeout(self):
"""
Similar to L{test_cancelConnectSSLTimeout}, but for the case where the
connection attempt fails.
"""
def connect(reactor, cc):
d = cc.connectSSL('example.com', 1234, object())
host, port, factory, contextFactory, timeout, bindADdress = reactor.sslClients.pop()
return d, factory
return self._cancelConnectFailedTimeoutTest(connect)
class ProtocolTests(TestCase):
"""
Tests for L{twisted.internet.protocol.Protocol}.
"""
def test_interfaces(self):
"""
L{Protocol} instances provide L{IProtocol} and L{ILoggingContext}.
"""
proto = Protocol()
self.assertTrue(verifyObject(IProtocol, proto))
self.assertTrue(verifyObject(ILoggingContext, proto))
def test_logPrefix(self):
"""
L{Protocol.logPrefix} returns the protocol class's name.
"""
class SomeThing(Protocol):
pass
self.assertEqual("SomeThing", SomeThing().logPrefix())
def test_makeConnection(self):
"""
L{Protocol.makeConnection} sets the given transport on itself, and
then calls C{connectionMade}.
"""
result = []
class SomeProtocol(Protocol):
def connectionMade(self):
result.append(self.transport)
transport = object()
protocol = SomeProtocol()
protocol.makeConnection(transport)
self.assertEqual(result, [transport])
class FactoryTests(TestCase):
"""
Tests for L{protocol.Factory}.
"""
def test_interfaces(self):
"""
L{Factory} instances provide both L{IProtocolFactory} and
L{ILoggingContext}.
"""
factory = Factory()
self.assertTrue(verifyObject(IProtocolFactory, factory))
self.assertTrue(verifyObject(ILoggingContext, factory))
def test_logPrefix(self):
"""
L{Factory.logPrefix} returns the name of the factory class.
"""
class SomeKindOfFactory(Factory):
pass
self.assertEqual("SomeKindOfFactory", SomeKindOfFactory().logPrefix())
def test_defaultBuildProtocol(self):
"""
L{Factory.buildProtocol} by default constructs a protocol by calling
its C{protocol} attribute, and attaches the factory to the result.
"""
class SomeProtocol(Protocol):
pass
f = Factory()
f.protocol = SomeProtocol
protocol = f.buildProtocol(None)
self.assertIsInstance(protocol, SomeProtocol)
self.assertIs(protocol.factory, f)
def test_forProtocol(self):
"""
L{Factory.forProtocol} constructs a Factory, passing along any
additional arguments, and sets its C{protocol} attribute to the given
Protocol subclass.
"""
class ArgTakingFactory(Factory):
def __init__(self, *args, **kwargs):
self.args, self.kwargs = args, kwargs
factory = ArgTakingFactory.forProtocol(Protocol, 1, 2, foo=12)
self.assertEqual(factory.protocol, Protocol)
self.assertEqual(factory.args, (1, 2))
self.assertEqual(factory.kwargs, {"foo": 12})
def test_doStartLoggingStatement(self):
"""
L{Factory.doStart} logs that it is starting a factory, followed by
the L{repr} of the L{Factory} instance that is being started.
"""
events = []
globalLogPublisher.addObserver(events.append)
self.addCleanup(
lambda: globalLogPublisher.removeObserver(events.append))
f = Factory()
f.doStart()
self.assertIs(events[0]['factory'], f)
self.assertEqual(events[0]['log_level'], LogLevel.info)
self.assertEqual(events[0]['log_format'],
'Starting factory {factory!r}')
def test_doStopLoggingStatement(self):
"""
L{Factory.doStop} logs that it is stopping a factory, followed by
the L{repr} of the L{Factory} instance that is being stopped.
"""
events = []
globalLogPublisher.addObserver(events.append)
self.addCleanup(
lambda: globalLogPublisher.removeObserver(events.append))
class MyFactory(Factory):
numPorts = 1
f = MyFactory()
f.doStop()
self.assertIs(events[0]['factory'], f)
self.assertEqual(events[0]['log_level'], LogLevel.info)
self.assertEqual(events[0]['log_format'],
'Stopping factory {factory!r}')
class AdapterTests(TestCase):
"""
Tests for L{ProtocolToConsumerAdapter} and L{ConsumerToProtocolAdapter}.
"""
def test_protocolToConsumer(self):
"""
L{IProtocol} providers can be adapted to L{IConsumer} providers using
L{ProtocolToConsumerAdapter}.
"""
result = []
p = Protocol()
p.dataReceived = result.append
consumer = IConsumer(p)
consumer.write(b"hello")
self.assertEqual(result, [b"hello"])
self.assertIsInstance(consumer, ProtocolToConsumerAdapter)
def test_consumerToProtocol(self):
"""
L{IConsumer} providers can be adapted to L{IProtocol} providers using
L{ProtocolToConsumerAdapter}.
"""
result = []
@implementer(IConsumer)
class Consumer(object):
def write(self, d):
result.append(d)
c = Consumer()
protocol = IProtocol(c)
protocol.dataReceived(b"hello")
self.assertEqual(result, [b"hello"])
self.assertIsInstance(protocol, ConsumerToProtocolAdapter)
|
the-stack_0_11118 | import asyncio
import re
import requests
import spotipy
from aiohttp import ClientSession
from nextcord import User
from emoji import demojize
from googleapiclient.discovery import build
from spotipy.oauth2 import SpotifyClientCredentials
from src.bot.__tokens__ import __tokens__
from src.music.song import Song
youtube = build('youtube', 'v3', developerKey=__tokens__['google'])
sp = spotipy.Spotify(client_credentials_manager=SpotifyClientCredentials(
__tokens__['spotify_client'], __tokens__['spotify']))
async def get_songs(requester: User, query: str) -> list[Song]:
# Youtube
if query.find('youtube') != -1:
# Playlist
if query.find('list=') != -1:
playlist_id = query[query.find('list=')+5:]
if playlist_id.find('&') != -1:
playlist_id = playlist_id[:playlist_id.find('&')]
return await get_youtube_playlist(requester, playlist_id)
# # Video
if query.find('watch?v=') != -1:
video_id = query[query.find('watch?v=')+8:]
if video_id.find('&') != -1:
video_id = video_id[:video_id.find('&')]
return await get_youtube_video(requester, [video_id])
# Spotify
if query.find('spotify') != -1:
# Playlist
if query.find('playlist/') != -1:
return await get_spotify_playlist(requester, query[query.find('playlist/') + 9:])
# Video
return await getSpotifyTrack(requester, query[query.find('track/')+6:])
# Youtube Search
return await search_youtube_video(requester, query)
async def get_youtube_playlist(requester: User, playlist_id: str) -> list[Song]:
playlist = []
response = {'nextPageToken': None}
# Go through each playlist page and extract all videos in it
while True:
video_ids = []
if 'nextPageToken' not in response.keys():
break
request = youtube.playlistItems().list(
part='contentDetails, snippet',
maxResults=50,
pageToken=response['nextPageToken'],
playlistId=playlist_id
)
response = request.execute()
if response['items']:
for video in response['items'][:-1]:
if video['snippet']['thumbnails']:
video_ids.append(video['contentDetails']['videoId'])
playlist += await get_youtube_video(requester, video_ids)
return playlist
async def get_youtube_video(requester: User, video_ids: list) -> list[Song]:
videos = []
if video_ids:
id_string = ''.join(video_id + ',' for video_id in video_ids[:-1])
id_string += video_ids[-1]
request = youtube.videos().list(
part='snippet,contentDetails',
id=id_string
)
response = request.execute()
for video in response['items']:
videos.append(Song(requester, video))
return videos
async def search_youtube_video(requester: User, query: str, max_results: int = 1) -> list[Song]:
url = demojize(f'https://www.youtube.com/results?search_query={query.replace(" ", "+")}&sp=EgIQAQ%253D%253D')
response = requests.get(url)
return await get_youtube_video(requester, re.findall(r'watch\?v=(\S{11})', response.text)[:max_results])
async def fetch(url: str, session) -> str:
async with session.get(demojize(url)) as response:
html_body = await response.read()
ids = re.findall(r'watch\?v=(\S{11})', html_body.decode())
if ids and len(ids):
return ids[0]
else:
return ""
async def youtube_multi_search(queries: list[str]) -> list[str]:
async with ClientSession() as session:
tasks = []
for query in queries:
url = f'https://www.youtube.com/results?search_query={query.replace(" ", "+")}&sp=EgIQAQ%253D%253D'
tasks.append(
asyncio.create_task(
fetch(url, session)
)
)
pages = await asyncio.gather(*tasks)
return list(filter(None, pages))
async def get_spotify_playlist(requester: User, playlist_id: str) -> list[Song]:
songs = []
results = sp.playlist(playlist_id)
tracks = results['tracks']
items = [await get_track_query(track_meta) for track_meta in tracks['items']]
while tracks['next']:
tracks = sp.next(tracks)
items.extend([await get_track_query(track_meta) for track_meta in tracks['items']])
ids = await youtube_multi_search(items)
for x in range(0, len(ids), 50):
songs.extend(await get_youtube_video(requester, ids[x:x+50]))
return songs
async def get_track_query(track_meta):
return f'{track_meta["track"]["name"]} {track_meta["track"]["album"]["artists"][0]["name"]}'
async def getSpotifyTrack(requester: User, track_id: str) -> list[Song]:
meta = sp.track(track_id)
track_name = f'{meta["name"]} {meta["album"]["artists"][0]["name"]}'
return await search_youtube_video(requester, track_name)
|
the-stack_0_11120 | #Façaumalgoritmoquerecebaovalordosaláriomínimoeovalordosaláriodeumfuncionário,
#calculeemostreaquantidadedesaláriosmínimosqueganhaessefuncionário.
salarioMin=float(input("Informe o valor do salário mín:"))
salarioFun=float(input("Informe o valor do salário do funcionário:"))
qtdSalMin= salarioFun/salarioMin
if(qtdSalMin < 1):
print("O funcionário ganha menos que um salário mínimo!")
else:
print("O funcionário recebe {0:.2f}".format(round(qtdSalMin,2))," salários mínimos." ) |
the-stack_0_11124 | # Copyright 2013 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import re
import time
import urllib
from tempest_lib import exceptions as lib_exc
from tempest.common import service_client
from tempest import exceptions
class OrchestrationClient(service_client.ServiceClient):
def list_stacks(self, params=None):
"""Lists all stacks for a user."""
uri = 'stacks'
if params:
uri += '?%s' % urllib.urlencode(params)
resp, body = self.get(uri)
self.expected_success(200, resp.status)
body = json.loads(body)
return service_client.ResponseBodyList(resp, body['stacks'])
def create_stack(self, name, disable_rollback=True, parameters=None,
timeout_mins=60, template=None, template_url=None,
environment=None, files=None):
if parameters is None:
parameters = {}
headers, body = self._prepare_update_create(
name,
disable_rollback,
parameters,
timeout_mins,
template,
template_url,
environment,
files)
uri = 'stacks'
resp, body = self.post(uri, headers=headers, body=body)
self.expected_success(201, resp.status)
body = json.loads(body)
return service_client.ResponseBody(resp, body)
def update_stack(self, stack_identifier, name, disable_rollback=True,
parameters=None, timeout_mins=60, template=None,
template_url=None, environment=None, files=None):
if parameters is None:
parameters = {}
headers, body = self._prepare_update_create(
name,
disable_rollback,
parameters,
timeout_mins,
template,
template_url,
environment)
uri = "stacks/%s" % stack_identifier
resp, body = self.put(uri, headers=headers, body=body)
self.expected_success(202, resp.status)
return service_client.ResponseBody(resp, body)
def _prepare_update_create(self, name, disable_rollback=True,
parameters=None, timeout_mins=60,
template=None, template_url=None,
environment=None, files=None):
if parameters is None:
parameters = {}
post_body = {
"stack_name": name,
"disable_rollback": disable_rollback,
"parameters": parameters,
"timeout_mins": timeout_mins,
"template": "HeatTemplateFormatVersion: '2012-12-12'\n",
"environment": environment,
"files": files
}
if template:
post_body['template'] = template
if template_url:
post_body['template_url'] = template_url
body = json.dumps(post_body)
# Password must be provided on stack create so that heat
# can perform future operations on behalf of the user
headers = self.get_headers()
headers['X-Auth-Key'] = self.password
headers['X-Auth-User'] = self.user
return headers, body
def get_stack(self, stack_identifier):
"""Returns the details of a single stack."""
url = "stacks/%s" % stack_identifier
resp, body = self.get(url)
self.expected_success(200, resp.status)
body = json.loads(body)
return service_client.ResponseBody(resp, body['stack'])
def suspend_stack(self, stack_identifier):
"""Suspend a stack."""
url = 'stacks/%s/actions' % stack_identifier
body = {'suspend': None}
resp, body = self.post(url, json.dumps(body))
self.expected_success(200, resp.status)
return service_client.ResponseBody(resp)
def resume_stack(self, stack_identifier):
"""Resume a stack."""
url = 'stacks/%s/actions' % stack_identifier
body = {'resume': None}
resp, body = self.post(url, json.dumps(body))
self.expected_success(200, resp.status)
return service_client.ResponseBody(resp)
def list_resources(self, stack_identifier):
"""Returns the details of a single resource."""
url = "stacks/%s/resources" % stack_identifier
resp, body = self.get(url)
self.expected_success(200, resp.status)
body = json.loads(body)
return service_client.ResponseBodyList(resp, body['resources'])
def get_resource(self, stack_identifier, resource_name):
"""Returns the details of a single resource."""
url = "stacks/%s/resources/%s" % (stack_identifier, resource_name)
resp, body = self.get(url)
self.expected_success(200, resp.status)
body = json.loads(body)
return service_client.ResponseBody(resp, body['resource'])
def delete_stack(self, stack_identifier):
"""Deletes the specified Stack."""
resp, _ = self.delete("stacks/%s" % str(stack_identifier))
self.expected_success(204, resp.status)
return service_client.ResponseBody(resp)
def wait_for_resource_status(self, stack_identifier, resource_name,
status, failure_pattern='^.*_FAILED$'):
"""Waits for a Resource to reach a given status."""
start = int(time.time())
fail_regexp = re.compile(failure_pattern)
while True:
try:
body = self.get_resource(
stack_identifier, resource_name)
except lib_exc.NotFound:
# ignore this, as the resource may not have
# been created yet
pass
else:
resource_name = body['resource_name']
resource_status = body['resource_status']
if resource_status == status:
return
if fail_regexp.search(resource_status):
raise exceptions.StackResourceBuildErrorException(
resource_name=resource_name,
stack_identifier=stack_identifier,
resource_status=resource_status,
resource_status_reason=body['resource_status_reason'])
if int(time.time()) - start >= self.build_timeout:
message = ('Resource %s failed to reach %s status '
'(current %s) within the required time (%s s).' %
(resource_name,
status,
resource_status,
self.build_timeout))
raise exceptions.TimeoutException(message)
time.sleep(self.build_interval)
def wait_for_stack_status(self, stack_identifier, status,
failure_pattern='^.*_FAILED$'):
"""Waits for a Stack to reach a given status."""
start = int(time.time())
fail_regexp = re.compile(failure_pattern)
while True:
try:
body = self.get_stack(stack_identifier)
except lib_exc.NotFound:
if status == 'DELETE_COMPLETE':
return
stack_name = body['stack_name']
stack_status = body['stack_status']
if stack_status == status:
return body
if fail_regexp.search(stack_status):
raise exceptions.StackBuildErrorException(
stack_identifier=stack_identifier,
stack_status=stack_status,
stack_status_reason=body['stack_status_reason'])
if int(time.time()) - start >= self.build_timeout:
message = ('Stack %s failed to reach %s status (current: %s) '
'within the required time (%s s).' %
(stack_name, status, stack_status,
self.build_timeout))
raise exceptions.TimeoutException(message)
time.sleep(self.build_interval)
def show_resource_metadata(self, stack_identifier, resource_name):
"""Returns the resource's metadata."""
url = ('stacks/{stack_identifier}/resources/{resource_name}'
'/metadata'.format(**locals()))
resp, body = self.get(url)
self.expected_success(200, resp.status)
body = json.loads(body)
return service_client.ResponseBody(resp, body['metadata'])
def list_events(self, stack_identifier):
"""Returns list of all events for a stack."""
url = 'stacks/{stack_identifier}/events'.format(**locals())
resp, body = self.get(url)
self.expected_success(200, resp.status)
body = json.loads(body)
return service_client.ResponseBodyList(resp, body['events'])
def list_resource_events(self, stack_identifier, resource_name):
"""Returns list of all events for a resource from stack."""
url = ('stacks/{stack_identifier}/resources/{resource_name}'
'/events'.format(**locals()))
resp, body = self.get(url)
self.expected_success(200, resp.status)
body = json.loads(body)
return service_client.ResponseBodyList(resp, body['events'])
def show_event(self, stack_identifier, resource_name, event_id):
"""Returns the details of a single stack's event."""
url = ('stacks/{stack_identifier}/resources/{resource_name}/events'
'/{event_id}'.format(**locals()))
resp, body = self.get(url)
self.expected_success(200, resp.status)
body = json.loads(body)
return service_client.ResponseBody(resp, body['event'])
def show_template(self, stack_identifier):
"""Returns the template for the stack."""
url = ('stacks/{stack_identifier}/template'.format(**locals()))
resp, body = self.get(url)
self.expected_success(200, resp.status)
body = json.loads(body)
return service_client.ResponseBody(resp, body)
def _validate_template(self, post_body):
"""Returns the validation request result."""
post_body = json.dumps(post_body)
resp, body = self.post('validate', post_body)
self.expected_success(200, resp.status)
body = json.loads(body)
return service_client.ResponseBody(resp, body)
def validate_template(self, template, parameters=None):
"""Returns the validation result for a template with parameters."""
if parameters is None:
parameters = {}
post_body = {
'template': template,
'parameters': parameters,
}
return self._validate_template(post_body)
def validate_template_url(self, template_url, parameters=None):
"""Returns the validation result for a template with parameters."""
if parameters is None:
parameters = {}
post_body = {
'template_url': template_url,
'parameters': parameters,
}
return self._validate_template(post_body)
def list_resource_types(self):
"""List resource types."""
resp, body = self.get('resource_types')
self.expected_success(200, resp.status)
body = json.loads(body)
return service_client.ResponseBodyList(resp, body['resource_types'])
def get_resource_type(self, resource_type_name):
"""Return the schema of a resource type."""
url = 'resource_types/%s' % resource_type_name
resp, body = self.get(url)
self.expected_success(200, resp.status)
return service_client.ResponseBody(resp, json.loads(body))
def get_resource_type_template(self, resource_type_name):
"""Return the template of a resource type."""
url = 'resource_types/%s/template' % resource_type_name
resp, body = self.get(url)
self.expected_success(200, resp.status)
return service_client.ResponseBody(resp, json.loads(body))
def create_software_config(self, name=None, config=None, group=None,
inputs=None, outputs=None, options=None):
headers, body = self._prep_software_config_create(
name, config, group, inputs, outputs, options)
url = 'software_configs'
resp, body = self.post(url, headers=headers, body=body)
self.expected_success(200, resp.status)
body = json.loads(body)
return service_client.ResponseBody(resp, body)
def get_software_config(self, conf_id):
"""Returns a software configuration resource."""
url = 'software_configs/%s' % str(conf_id)
resp, body = self.get(url)
self.expected_success(200, resp.status)
body = json.loads(body)
return service_client.ResponseBody(resp, body)
def delete_software_config(self, conf_id):
"""Deletes a specific software configuration."""
url = 'software_configs/%s' % str(conf_id)
resp, _ = self.delete(url)
self.expected_success(204, resp.status)
return service_client.ResponseBody(resp)
def create_software_deploy(self, server_id=None, config_id=None,
action=None, status=None,
input_values=None, output_values=None,
status_reason=None, signal_transport=None):
"""Creates or updates a software deployment."""
headers, body = self._prep_software_deploy_update(
None, server_id, config_id, action, status, input_values,
output_values, status_reason, signal_transport)
url = 'software_deployments'
resp, body = self.post(url, headers=headers, body=body)
self.expected_success(200, resp.status)
body = json.loads(body)
return service_client.ResponseBody(resp, body)
def update_software_deploy(self, deploy_id=None, server_id=None,
config_id=None, action=None, status=None,
input_values=None, output_values=None,
status_reason=None, signal_transport=None):
"""Creates or updates a software deployment."""
headers, body = self._prep_software_deploy_update(
deploy_id, server_id, config_id, action, status, input_values,
output_values, status_reason, signal_transport)
url = 'software_deployments/%s' % str(deploy_id)
resp, body = self.put(url, headers=headers, body=body)
self.expected_success(200, resp.status)
body = json.loads(body)
return service_client.ResponseBody(resp, body)
def get_software_deploy_list(self):
"""Returns a list of all deployments."""
url = 'software_deployments'
resp, body = self.get(url)
self.expected_success(200, resp.status)
body = json.loads(body)
return service_client.ResponseBody(resp, body)
def get_software_deploy(self, deploy_id):
"""Returns a specific software deployment."""
url = 'software_deployments/%s' % str(deploy_id)
resp, body = self.get(url)
self.expected_success(200, resp.status)
body = json.loads(body)
return service_client.ResponseBody(resp, body)
def get_software_deploy_meta(self, server_id):
"""Return a config metadata for a specific server."""
url = 'software_deployments/metadata/%s' % server_id
resp, body = self.get(url)
self.expected_success(200, resp.status)
body = json.loads(body)
return service_client.ResponseBody(resp, body)
def delete_software_deploy(self, deploy_id):
"""Deletes a specific software deployment."""
url = 'software_deployments/%s' % str(deploy_id)
resp, _ = self.delete(url)
self.expected_success(204, resp.status)
return service_client.ResponseBody(resp)
def _prep_software_config_create(self, name=None, conf=None, group=None,
inputs=None, outputs=None, options=None):
"""Prepares a software configuration body."""
post_body = {}
if name is not None:
post_body["name"] = name
if conf is not None:
post_body["config"] = conf
if group is not None:
post_body["group"] = group
if inputs is not None:
post_body["inputs"] = inputs
if outputs is not None:
post_body["outputs"] = outputs
if options is not None:
post_body["options"] = options
body = json.dumps(post_body)
headers = self.get_headers()
return headers, body
def _prep_software_deploy_update(self, deploy_id=None, server_id=None,
config_id=None, action=None, status=None,
input_values=None, output_values=None,
status_reason=None,
signal_transport=None):
"""Prepares a deployment create or update (if an id was given)."""
post_body = {}
if deploy_id is not None:
post_body["id"] = deploy_id
if server_id is not None:
post_body["server_id"] = server_id
if config_id is not None:
post_body["config_id"] = config_id
if action is not None:
post_body["action"] = action
if status is not None:
post_body["status"] = status
if input_values is not None:
post_body["input_values"] = input_values
if output_values is not None:
post_body["output_values"] = output_values
if status_reason is not None:
post_body["status_reason"] = status_reason
if signal_transport is not None:
post_body["signal_transport"] = signal_transport
body = json.dumps(post_body)
headers = self.get_headers()
return headers, body
|
the-stack_0_11125 | import plistlib
from scripts.artifact_report import ArtifactHtmlReport
from scripts.ilapfuncs import logfunc, tsv, is_platform_windows
def get_wifi(files_found, report_folder, seeker):
data_list = []
file_found = str(files_found[0])
with open(file_found, "rb") as fp:
pl = plistlib.load(fp)
if 'List of known networks' in pl.keys():
for dic in pl['List of known networks']:
ssid = dic['SSID_STR']
bssid = ""
if 'BSSID' in dic.keys():
bssid = dic['BSSID']
netusage = ""
if 'networkUsage' in dic.keys():
netusage = str(dic['networkUsage'])
countrycode = ""
if '80211D_IE' in dic.keys():
for key2, val2 in dic['80211D_IE'].items():
if key2 == 'IE_KEY_80211D_COUNTRY_CODE':
countrycode = val2
devname = ""
mfr = ""
serialnum = ""
modelname = ""
if 'WPS_PROB_RESP_IE' in dic.keys():
for key3, val3 in dic['WPS_PROB_RESP_IE'].items():
if key3 == 'IE_KEY_WPS_DEV_NAME':
devname = val3
if key3 == 'IE_KEY_WPS_MANUFACTURER':
mfr = val3
if key3 == 'IE_KEY_WPS_SERIAL_NUM':
serialnum = val3
if key3 == 'IE_KEY_WPS_MODEL_NAME':
modelname = val3
lastjoined = ""
if 'lastJoined' in dic.keys():
lastjoined = str(dic['lastJoined'])
lastautojoined = ""
if 'lastAutoJoined' in dic.keys():
lastautojoined = str(dic['lastAutoJoined'])
enabled = ""
if 'enabled' in dic.keys():
enabled = str(dic['enabled'])
data_list.append((ssid, bssid, netusage, countrycode, devname, mfr, serialnum, modelname, lastjoined, lastautojoined, enabled))
if len(data_list) > 0:
report = ArtifactHtmlReport('Wifi')
report.start_artifact_report(report_folder, 'Wifi')
report.add_script()
data_headers = ('SSID','BSSID', 'Network usage', 'Country code', 'Device name', 'Manufacturer', 'Serial number', 'Model name', 'Last joined', 'Last autojoined', 'Enabled')
report.write_artifact_data_table(data_headers, data_list, file_found)
report.end_artifact_report()
tsvname = 'Wifi'
tsv(report_folder, data_headers, data_list, tsvname)
else:
logfunc('No Networks data')
|
the-stack_0_11126 | #!/usr/bin/env python
# -*- coding: utf8 -*-
"""
The MetadataWizard(pymdwizard) software was developed by the
U.S. Geological Survey Fort Collins Science Center.
See: https://github.com/usgs/fort-pymdwizard for current project source code
See: https://usgs.github.io/fort-pymdwizard/ for current user documentation
See: https://github.com/usgs/fort-pymdwizard/tree/master/examples
for examples of use in other scripts
License: Creative Commons Attribution 4.0 International (CC BY 4.0)
http://creativecommons.org/licenses/by/4.0/
PURPOSE
------------------------------------------------------------------------------
Module contains a variety of miscellaneous functions
SCRIPT DEPENDENCIES
------------------------------------------------------------------------------
This script is part of the pymdwizard package and is not intented to be
used independently. All pymdwizard package requirements are needed.
See imports section for external packages used in this script as well as
inter-package dependencies
U.S. GEOLOGICAL SURVEY DISCLAIMER
------------------------------------------------------------------------------
This software has been approved for release by the U.S. Geological Survey (USGS).
Although the software has been subjected to rigorous review,
the USGS reserves the right to update the software as needed pursuant to
further analysis and review. No warranty, expressed or implied, is made by
the USGS or the U.S. Government as to the functionality of the software and
related material nor shall the fact of release constitute any such warranty.
Furthermore, the software is released on condition that neither the USGS nor
the U.S. Government shall be held liable for any damages resulting from
its authorized or unauthorized use.
Any use of trade, product or firm names is for descriptive purposes only and
does not imply endorsement by the U.S. Geological Survey.
Although this information product, for the most part, is in the public domain,
it also contains copyrighted material as noted in the text. Permission to
reproduce copyrighted items for other than personal use must be secured from
the copyright owner.
------------------------------------------------------------------------------
"""
import sys
import os
import traceback
import pkg_resources
import re
try:
from urllib.parse import urlparse
except:
from urlparse import urlparse
from pathlib import Path
from datetime import datetime, timedelta
import pandas as pd
from PyQt5.QtWidgets import QLineEdit
from PyQt5.QtWidgets import QTextBrowser
from PyQt5.QtWidgets import QPlainTextEdit
from PyQt5.QtWidgets import QApplication
from PyQt5.QtWidgets import QComboBox
from PyQt5.QtCore import Qt
from PyQt5.QtGui import QIcon
from PyQt5.QtCore import QSettings
def set_text(widget, text):
"""
set the text of a widget regardless of it's base type
Parameters
----------
widget : QtGui:QWidget
This widget is a QlineEdit or QPlainText edit
text : str
The text that will be inserted
Returns
-------
None
"""
if isinstance(widget, QLineEdit):
widget.setText(text)
widget.setCursorPosition(0)
if isinstance(widget, QPlainTextEdit):
widget.setPlainText(text)
if isinstance(widget, QTextBrowser):
widget.setText(text)
if isinstance(widget, QComboBox):
index = widget.findText(text, Qt.MatchFixedString)
if index >= 0:
widget.setCurrentIndex(index)
else:
widget.setEditText(text)
def launch_widget(Widget, title="", **kwargs):
"""
run a widget within it's own application
Parameters
----------
widget : QWidget
title : str
The title to use for the application
Returns
-------
None
"""
try:
app = QApplication([])
app.title = title
widget = Widget(**kwargs)
print('blah')
widget.setWindowTitle(title)
widget.show()
sys.exit(app.exec_())
# return widget
except:
e = sys.exc_info()[0]
print('problem encountered', e)
print(traceback.format_exc())
# def get_resource_path(fname):
# """
#
# Parameters
# ----------
# fname : str
# filename that you would like to find
#
# Returns
# -------
# the full file path to the resource specified
# """
#
# if getattr(sys, 'frozen') and hasattr(sys, '_MEIPASS'):
#
# return pkg_resources.resource_filename('guanoeditor',
# 'DATA/{}'.format(fname))
# else:
# return pkg_resources.resource_filename('guanoeditor',
# 'resources/{}'.format(fname))
def set_window_icon(widget, remove_help=True):
"""
Add our default ducky icon to a widget
Parameters
----------
widget : PyQt widget
remove_help : Bool
Whether to show the help question mark icon.
Returns
-------
None
"""
icon = QIcon(get_resource_path('icons/Ducky.ico'))
widget.setWindowIcon(icon)
if remove_help:
widget.setWindowFlags(Qt.Window |
Qt.CustomizeWindowHint |
Qt.WindowTitleHint |
Qt.WindowCloseButtonHint |
Qt.WindowStaysOnTopHint)
def get_setting(which, default=None):
"""
return a pymdwizard application setting
Parameters
----------
which: str
name of setting to return
Returns
-------
setting in native format, string, integer, etc
"""
settings = QSettings('USGS', 'guanoeditor')
if default is None:
return settings.value(which)
else:
return settings.value(which, default)
def resource_path(relative_path):
if hasattr(sys, '_MEIPASS'):
relative_path = relative_path.split('/')[-1]
return os.path.join(sys._MEIPASS, f"DATA/{relative_path}")
else:
return os.path.join(os.path.abspath('.'), relative_path)
def read_namespace(fname):
namespace_df = pd.read_csv(fname)
namespace_df = namespace_df[['tag', 'description', 'required', 'data_type', 'picklist']]
# namespace_df = namespace_df[namespace_df.tag.str.startswith('NABat|')]
namespace_df.picklist = namespace_df.picklist.fillna('')
namespace_dict = namespace_df.to_dict('records')
for thing in namespace_dict:
if thing['picklist']:
thing['picklist'] = thing['picklist'].split('|')
return namespace_dict
def clean_name(fname):
if isinstance(fname, Path):
fname = str(fname)
f = Path(fname)
name = f.stem
extension = f.suffix
# Step 1: remove anything in square brackets
name = re.sub("\[.*\]", '', name)
# Step 2: replace any non word characters with underscores
name = re.sub("\W", '_', name)
# Step 3: replace multiple underscores with a single
name = re.sub("_+", '_', name)
# Step 4: replace underscore separated single digits
name = re.sub("_[0-9]_", '_', name)
# Step 5: remove non digit characters at the begining of the file
name = re.sub("^\D+", '', name)
# Step 6: remove trailing _000, _001, _005, _0001 etc.
name = re.sub("_[0-9]{3,4}$", '', name)
return name + extension
|
the-stack_0_11128 | """Requirements specific to SQLAlchemy's own unit tests.
"""
import sys
from sqlalchemy import exc
from sqlalchemy.sql import text
from sqlalchemy.testing import exclusions
from sqlalchemy.testing.exclusions import against
from sqlalchemy.testing.exclusions import fails_if
from sqlalchemy.testing.exclusions import fails_on
from sqlalchemy.testing.exclusions import fails_on_everything_except
from sqlalchemy.testing.exclusions import LambdaPredicate
from sqlalchemy.testing.exclusions import NotPredicate
from sqlalchemy.testing.exclusions import only_if
from sqlalchemy.testing.exclusions import only_on
from sqlalchemy.testing.exclusions import skip_if
from sqlalchemy.testing.exclusions import SpecPredicate
from sqlalchemy.testing.exclusions import succeeds_if
from sqlalchemy.testing.requirements import SuiteRequirements
def no_support(db, reason):
return SpecPredicate(db, description=reason)
def exclude(db, op, spec, description=None):
return SpecPredicate(db, op, spec, description=description)
class DefaultRequirements(SuiteRequirements):
@property
def deferrable_or_no_constraints(self):
"""Target database must support deferrable constraints."""
return skip_if(
[
no_support("firebird", "not supported by database"),
no_support("mysql", "not supported by database"),
no_support("mariadb", "not supported by database"),
no_support("mssql", "not supported by database"),
]
)
@property
def check_constraints(self):
"""Target database must support check constraints."""
return exclusions.open()
@property
def enforces_check_constraints(self):
"""Target database must also enforce check constraints."""
return self.check_constraints + fails_on(
self._mysql_check_constraints_dont_exist,
"check constraints don't enforce on MySQL, MariaDB<10.2",
)
@property
def named_constraints(self):
"""target database must support names for constraints."""
return exclusions.open()
@property
def implicitly_named_constraints(self):
"""target database must apply names to unnamed constraints."""
return skip_if([no_support("sqlite", "not supported by database")])
@property
def foreign_keys(self):
"""Target database must support foreign keys."""
return skip_if(no_support("sqlite", "not supported by database"))
@property
def table_ddl_if_exists(self):
"""target platform supports IF NOT EXISTS / IF EXISTS for tables."""
return only_on(["postgresql", "mysql", "mariadb", "sqlite"])
@property
def index_ddl_if_exists(self):
"""target platform supports IF NOT EXISTS / IF EXISTS for indexes."""
# mariadb but not mysql, tested up to mysql 8
return only_on(["postgresql", "mariadb", "sqlite"])
@property
def on_update_cascade(self):
"""target database must support ON UPDATE..CASCADE behavior in
foreign keys."""
return skip_if(
["sqlite", "oracle"],
"target backend %(doesnt_support)s ON UPDATE CASCADE",
)
@property
def non_updating_cascade(self):
"""target database must *not* support ON UPDATE..CASCADE behavior in
foreign keys."""
return fails_on_everything_except("sqlite", "oracle") + skip_if(
"mssql"
)
@property
def recursive_fk_cascade(self):
"""target database must support ON DELETE CASCADE on a self-referential
foreign key"""
return skip_if(["mssql"])
@property
def deferrable_fks(self):
"""target database must support deferrable fks"""
return only_on(["oracle", "postgresql"])
@property
def foreign_key_constraint_option_reflection_ondelete(self):
return only_on(["postgresql", "mysql", "mariadb", "sqlite", "oracle"])
@property
def fk_constraint_option_reflection_ondelete_restrict(self):
return only_on(["postgresql", "sqlite", self._mysql_80])
@property
def fk_constraint_option_reflection_ondelete_noaction(self):
return only_on(["postgresql", "mysql", "mariadb", "sqlite"])
@property
def foreign_key_constraint_option_reflection_onupdate(self):
return only_on(["postgresql", "mysql", "mariadb", "sqlite"])
@property
def fk_constraint_option_reflection_onupdate_restrict(self):
return only_on(["postgresql", "sqlite", self._mysql_80])
@property
def comment_reflection(self):
return only_on(["postgresql", "mysql", "mariadb", "oracle"])
@property
def unbounded_varchar(self):
"""Target database must support VARCHAR with no length"""
return skip_if(
["firebird", "oracle", "mysql", "mariadb"],
"not supported by database",
)
@property
def boolean_col_expressions(self):
"""Target database must support boolean expressions as columns"""
return skip_if(
[
no_support("firebird", "not supported by database"),
no_support("oracle", "not supported by database"),
no_support("mssql", "not supported by database"),
no_support("sybase", "not supported by database"),
]
)
@property
def non_native_boolean_unconstrained(self):
"""target database is not native boolean and allows arbitrary integers
in it's "bool" column"""
return skip_if(
[
LambdaPredicate(
lambda config: against(config, "mssql"),
"SQL Server drivers / odbc seem to change "
"their mind on this",
),
LambdaPredicate(
lambda config: config.db.dialect.supports_native_boolean,
"native boolean dialect",
),
]
)
@property
def standalone_binds(self):
"""target database/driver supports bound parameters as column expressions
without being in the context of a typed column.
"""
return skip_if(["firebird", "mssql+mxodbc"], "not supported by driver")
@property
def qmark_paramstyle(self):
return only_on(
[
"firebird",
"sqlite",
"+pyodbc",
"+mxodbc",
"mysql+oursql",
"mariadb+oursql",
]
)
@property
def named_paramstyle(self):
return only_on(["sqlite", "oracle+cx_oracle"])
@property
def format_paramstyle(self):
return only_on(
[
"mysql+mysqldb",
"mysql+pymysql",
"mysql+cymysql",
"mysql+mysqlconnector",
"mariadb+mysqldb",
"mariadb+pymysql",
"mariadb+cymysql",
"mariadb+mysqlconnector",
"postgresql+pg8000",
]
)
@property
def pyformat_paramstyle(self):
return only_on(
[
"postgresql+psycopg2",
"postgresql+psycopg2cffi",
"postgresql+pypostgresql",
"postgresql+pygresql",
"mysql+mysqlconnector",
"mysql+pymysql",
"mysql+cymysql",
"mariadb+mysqlconnector",
"mariadb+pymysql",
"mariadb+cymysql",
"mssql+pymssql",
]
)
@property
def no_quoting_special_bind_names(self):
"""Target database will quote bound parameter names, doesn't support
EXPANDING"""
return skip_if(["oracle"])
@property
def temporary_tables(self):
"""target database supports temporary tables"""
return skip_if(["firebird", self._sqlite_file_db], "not supported (?)")
@property
def temp_table_reflection(self):
return self.temporary_tables
@property
def temp_table_reflect_indexes(self):
return skip_if(
["mssql", "firebird", self._sqlite_file_db], "not supported (?)"
)
@property
def reflectable_autoincrement(self):
"""Target database must support tables that can automatically generate
PKs assuming they were reflected.
this is essentially all the DBs in "identity" plus PostgreSQL, which
has SERIAL support. FB and Oracle (and sybase?) require the Sequence
to be explicitly added, including if the table was reflected.
"""
return skip_if(
["firebird", "oracle", "sybase"], "not supported by database"
)
@property
def insert_from_select(self):
return skip_if(["firebird"], "crashes for unknown reason")
@property
def fetch_rows_post_commit(self):
return skip_if(["firebird"], "not supported")
@property
def non_broken_binary(self):
"""target DBAPI must work fully with binary values"""
# see https://github.com/pymssql/pymssql/issues/504
return skip_if(["mssql+pymssql"])
@property
def binary_comparisons(self):
"""target database/driver can allow BLOB/BINARY fields to be compared
against a bound parameter value.
"""
return skip_if(["oracle", "mssql"], "not supported by database/driver")
@property
def binary_literals(self):
"""target backend supports simple binary literals, e.g. an
expression like::
SELECT CAST('foo' AS BINARY)
Where ``BINARY`` is the type emitted from :class:`.LargeBinary`,
e.g. it could be ``BLOB`` or similar.
Basically fails on Oracle.
"""
# adding mssql here since it doesn't support comparisons either,
# have observed generally bad behavior with binary / mssql.
return skip_if(["oracle", "mssql"], "not supported by database/driver")
@property
def tuple_in(self):
def _sqlite_tuple_in(config):
return against(
config, "sqlite"
) and config.db.dialect.dbapi.sqlite_version_info >= (3, 15, 0)
return only_on(
["mysql", "mariadb", "postgresql", _sqlite_tuple_in, "oracle"]
)
@property
def tuple_in_w_empty(self):
return self.tuple_in + skip_if(["oracle"])
@property
def independent_cursors(self):
"""Target must support simultaneous, independent database cursors
on a single connection."""
return skip_if(["mssql", "mysql", "mariadb"], "no driver support")
@property
def independent_connections(self):
"""
Target must support simultaneous, independent database connections.
"""
# This is also true of some configurations of UnixODBC and probably
# win32 ODBC as well.
return skip_if(
[
no_support(
"sqlite",
"independent connections disabled "
"when :memory: connections are used",
),
exclude(
"mssql",
"<",
(9, 0, 0),
"SQL Server 2005+ is required for "
"independent connections",
),
]
)
@property
def memory_process_intensive(self):
"""Driver is able to handle the memory tests which run in a subprocess
and iterate through hundreds of connections
"""
return skip_if(
[
no_support("oracle", "Oracle XE usually can't handle these"),
no_support("mssql+pyodbc", "MS ODBC drivers struggle"),
self._running_on_windows(),
]
)
@property
def updateable_autoincrement_pks(self):
"""Target must support UPDATE on autoincrement/integer primary key."""
return skip_if(
["mssql", "sybase"], "IDENTITY columns can't be updated"
)
@property
def isolation_level(self):
return only_on(
("postgresql", "sqlite", "mysql", "mariadb", "mssql", "oracle"),
"DBAPI has no isolation level support",
) + fails_on(
"postgresql+pypostgresql",
"pypostgresql bombs on multiple isolation level calls",
)
def get_isolation_levels(self, config):
levels = set(config.db.dialect._isolation_lookup)
if against(config, "sqlite"):
default = "SERIALIZABLE"
levels.add("AUTOCOMMIT")
elif against(config, "postgresql"):
default = "READ COMMITTED"
levels.add("AUTOCOMMIT")
elif against(config, "mysql"):
default = "REPEATABLE READ"
levels.add("AUTOCOMMIT")
elif against(config, "mariadb"):
default = "REPEATABLE READ"
levels.add("AUTOCOMMIT")
elif against(config, "mssql"):
default = "READ COMMITTED"
levels.add("AUTOCOMMIT")
elif against(config, "oracle"):
default = "READ COMMITTED"
levels.add("AUTOCOMMIT")
else:
raise NotImplementedError()
return {"default": default, "supported": levels}
@property
def autocommit(self):
"""target dialect supports 'AUTOCOMMIT' as an isolation_level"""
return self.isolation_level + only_if(
lambda config: "AUTOCOMMIT"
in self.get_isolation_levels(config)["supported"]
)
@property
def row_triggers(self):
"""Target must support standard statement-running EACH ROW triggers."""
return skip_if(
[
# no access to same table
no_support("mysql", "requires SUPER priv"),
no_support("mariadb", "requires SUPER priv"),
exclude("mysql", "<", (5, 0, 10), "not supported by database"),
]
)
@property
def sequences_as_server_defaults(self):
"""Target database must support SEQUENCE as a server side default."""
return only_on(
"postgresql", "doesn't support sequences as a server side default."
)
@property
def sql_expressions_inserted_as_primary_key(self):
return only_if([self.returning, self.sqlite])
@property
def computed_columns_on_update_returning(self):
return self.computed_columns + skip_if("oracle")
@property
def correlated_outer_joins(self):
"""Target must support an outer join to a subquery which
correlates to the parent."""
return skip_if(
"oracle",
'Raises "ORA-01799: a column may not be '
'outer-joined to a subquery"',
)
@property
def update_from(self):
"""Target must support UPDATE..FROM syntax"""
return only_on(
["postgresql", "mssql", "mysql", "mariadb"],
"Backend does not support UPDATE..FROM",
)
@property
def delete_from(self):
"""Target must support DELETE FROM..FROM or DELETE..USING syntax"""
return only_on(
["postgresql", "mssql", "mysql", "mariadb", "sybase"],
"Backend does not support DELETE..FROM",
)
@property
def update_where_target_in_subquery(self):
"""Target must support UPDATE (or DELETE) where the same table is
present in a subquery in the WHERE clause.
This is an ANSI-standard syntax that apparently MySQL can't handle,
such as::
UPDATE documents SET flag=1 WHERE documents.title IN
(SELECT max(documents.title) AS title
FROM documents GROUP BY documents.user_id
)
"""
return fails_if(
self._mysql_not_mariadb_103,
'MySQL error 1093 "Cant specify target table '
'for update in FROM clause", resolved by MariaDB 10.3',
)
@property
def savepoints(self):
"""Target database must support savepoints."""
return skip_if(
["sqlite", "sybase", ("mysql", "<", (5, 0, 3))],
"savepoints not supported",
)
@property
def savepoints_w_release(self):
return self.savepoints + skip_if(
["oracle", "mssql"],
"database doesn't support release of savepoint",
)
@property
def schemas(self):
"""Target database must support external schemas, and have one
named 'test_schema'."""
return skip_if(["firebird"], "no schema support")
@property
def cross_schema_fk_reflection(self):
"""target system must support reflection of inter-schema foreign
keys"""
return only_on(["postgresql", "mysql", "mariadb", "mssql"])
@property
def implicit_default_schema(self):
"""target system has a strong concept of 'default' schema that can
be referred to implicitly.
basically, PostgreSQL.
"""
return only_on(["postgresql"])
@property
def default_schema_name_switch(self):
return only_on(["postgresql", "oracle"])
@property
def unique_constraint_reflection(self):
return fails_on_everything_except(
"postgresql", "mysql", "mariadb", "sqlite", "oracle"
)
@property
def unique_constraint_reflection_no_index_overlap(self):
return (
self.unique_constraint_reflection
+ skip_if("mysql")
+ skip_if("mariadb")
+ skip_if("oracle")
)
@property
def check_constraint_reflection(self):
return fails_on_everything_except(
"postgresql",
"sqlite",
"oracle",
self._mysql_and_check_constraints_exist,
)
@property
def indexes_with_expressions(self):
return only_on(["postgresql", "sqlite>=3.9.0"])
@property
def temp_table_names(self):
"""target dialect supports listing of temporary table names"""
return only_on(["sqlite", "oracle"]) + skip_if(self._sqlite_file_db)
@property
def temporary_views(self):
"""target database supports temporary views"""
return only_on(["sqlite", "postgresql"]) + skip_if(
self._sqlite_file_db
)
@property
def update_nowait(self):
"""Target database must support SELECT...FOR UPDATE NOWAIT"""
return skip_if(
["firebird", "mssql", "mysql", "mariadb", "sqlite", "sybase"],
"no FOR UPDATE NOWAIT support",
)
@property
def subqueries(self):
"""Target database must support subqueries."""
return exclusions.open()
@property
def ctes(self):
"""Target database supports CTEs"""
return only_on(
[
lambda config: against(config, "mysql")
and (
(
config.db.dialect._is_mariadb
and config.db.dialect._mariadb_normalized_version_info
>= (10, 2)
)
or (
not config.db.dialect._is_mariadb
and config.db.dialect.server_version_info >= (8,)
)
),
"mariadb>10.2",
"postgresql",
"mssql",
"oracle",
"sqlite>=3.8.3",
]
)
@property
def ctes_with_update_delete(self):
"""target database supports CTES that ride on top of a normal UPDATE
or DELETE statement which refers to the CTE in a correlated subquery.
"""
return only_on(
[
"postgresql",
"mssql",
# "oracle" - oracle can do this but SQLAlchemy doesn't support
# their syntax yet
]
)
@property
def ctes_on_dml(self):
"""target database supports CTES which consist of INSERT, UPDATE
or DELETE *within* the CTE, e.g. WITH x AS (UPDATE....)"""
return only_if(["postgresql"])
@property
def mod_operator_as_percent_sign(self):
"""target database must use a plain percent '%' as the 'modulus'
operator."""
return only_if(
["mysql", "mariadb", "sqlite", "postgresql+psycopg2", "mssql"]
)
@property
def intersect(self):
"""Target database must support INTERSECT or equivalent."""
return fails_if(
["firebird", self._mysql_not_mariadb_103, "sybase"],
"no support for INTERSECT",
)
@property
def except_(self):
"""Target database must support EXCEPT or equivalent (i.e. MINUS)."""
return fails_if(
["firebird", self._mysql_not_mariadb_103, "sybase"],
"no support for EXCEPT",
)
@property
def order_by_col_from_union(self):
"""target database supports ordering by a column from a SELECT
inside of a UNION
E.g. (SELECT id, ...) UNION (SELECT id, ...) ORDER BY id
Fails on SQL Server
"""
return fails_if("mssql")
@property
def parens_in_union_contained_select_w_limit_offset(self):
"""Target database must support parenthesized SELECT in UNION
when LIMIT/OFFSET is specifically present.
E.g. (SELECT ... LIMIT ..) UNION (SELECT .. OFFSET ..)
This is known to fail on SQLite.
"""
return fails_if("sqlite")
@property
def parens_in_union_contained_select_wo_limit_offset(self):
"""Target database must support parenthesized SELECT in UNION
when OFFSET/LIMIT is specifically not present.
E.g. (SELECT ...) UNION (SELECT ..)
This is known to fail on SQLite. It also fails on Oracle
because without LIMIT/OFFSET, there is currently no step that
creates an additional subquery.
"""
return fails_if(["sqlite", "oracle"])
@property
def offset(self):
"""Target database must support some method of adding OFFSET or
equivalent to a result set."""
return fails_if(["sybase"], "no support for OFFSET or equivalent")
@property
def sql_expression_limit_offset(self):
return (
fails_if(
["mysql", "mariadb"],
"Target backend can't accommodate full expressions in "
"OFFSET or LIMIT",
)
+ self.offset
)
@property
def window_functions(self):
return only_if(
["postgresql>=8.4", "mssql", "oracle", "sqlite>=3.25.0"],
"Backend does not support window functions",
)
@property
def two_phase_transactions(self):
"""Target database must support two-phase transactions."""
def pg_prepared_transaction(config):
if not against(config, "postgresql"):
return True
with config.db.connect() as conn:
try:
num = conn.scalar(
text(
"select cast(setting AS integer) from pg_settings "
"where name = 'max_prepared_transactions'"
)
)
except exc.OperationalError:
return False
else:
return num > 0
return skip_if(
[
no_support("firebird", "no SA implementation"),
no_support("mssql", "two-phase xact not supported by drivers"),
no_support(
"oracle", "two-phase xact not implemented in SQLA/oracle"
),
no_support(
"sqlite", "two-phase xact not supported by database"
),
no_support(
"sybase", "two-phase xact not supported by drivers/SQLA"
),
# in Ia3cbbf56d4882fcc7980f90519412f1711fae74d
# we are evaluating which modern MySQL / MariaDB versions
# can handle two-phase testing without too many problems
# no_support(
# "mysql",
# "recent MySQL communiity editions have too many issues "
# "(late 2016), disabling for now",
# ),
NotPredicate(
LambdaPredicate(
pg_prepared_transaction,
"max_prepared_transactions not available or zero",
)
),
]
)
@property
def two_phase_recovery(self):
return self.two_phase_transactions + (
skip_if(
["mysql", "mariadb"],
"still can't get recover to work w/ MariaDB / MySQL",
)
)
@property
def views(self):
"""Target database must support VIEWs."""
return skip_if("drizzle", "no VIEW support")
@property
def empty_strings_varchar(self):
"""
target database can persist/return an empty string with a varchar.
"""
return fails_if(
["oracle"], "oracle converts empty strings to a blank space"
)
@property
def empty_strings_text(self):
"""target database can persist/return an empty string with an
unbounded text."""
return fails_if(
["oracle"], "oracle converts empty strings to a blank space"
)
@property
def expressions_against_unbounded_text(self):
"""target database supports use of an unbounded textual field in a
WHERE clause."""
return fails_if(
["oracle"],
"ORA-00932: inconsistent datatypes: expected - got CLOB",
)
@property
def unicode_data(self):
"""target drive must support unicode data stored in columns."""
return skip_if([no_support("sybase", "no unicode driver support")])
@property
def unicode_connections(self):
"""
Target driver must support some encoding of Unicode across the wire.
"""
return exclusions.open()
@property
def unicode_ddl(self):
"""Target driver must support some degree of non-ascii symbol names."""
return skip_if(
[
no_support("oracle", "FIXME: no support in database?"),
no_support("sybase", "FIXME: guessing, needs confirmation"),
no_support("mssql+pymssql", "no FreeTDS support"),
]
)
@property
def symbol_names_w_double_quote(self):
"""Target driver can create tables with a name like 'some " table'"""
return skip_if(
[no_support("oracle", "ORA-03001: unimplemented feature")]
)
@property
def emulated_lastrowid(self):
""" "target dialect retrieves cursor.lastrowid or an equivalent
after an insert() construct executes.
"""
return fails_on_everything_except(
"mysql",
"mariadb",
"sqlite+pysqlite",
"sqlite+pysqlcipher",
"sybase",
"mssql",
)
@property
def emulated_lastrowid_even_with_sequences(self):
""" "target dialect retrieves cursor.lastrowid or an equivalent
after an insert() construct executes, even if the table has a
Sequence on it.
"""
return fails_on_everything_except(
"mysql",
"mariadb",
"sqlite+pysqlite",
"sqlite+pysqlcipher",
"sybase",
)
@property
def implements_get_lastrowid(self):
return skip_if([no_support("sybase", "not supported by database")])
@property
def dbapi_lastrowid(self):
""" "target backend includes a 'lastrowid' accessor on the DBAPI
cursor object.
"""
return skip_if(
"mssql+pymssql", "crashes on pymssql"
) + fails_on_everything_except(
"mysql",
"mariadb",
"sqlite+pysqlite",
"sqlite+pysqlcipher",
"mssql",
)
@property
def nullsordering(self):
"""Target backends that support nulls ordering."""
return fails_on_everything_except(
"postgresql", "oracle", "firebird", "sqlite >= 3.30.0"
)
@property
def reflects_pk_names(self):
"""Target driver reflects the name of primary key constraints."""
return fails_on_everything_except(
"postgresql", "oracle", "mssql", "sybase", "sqlite"
)
@property
def nested_aggregates(self):
"""target database can select an aggregate from a subquery that's
also using an aggregate"""
return skip_if(["mssql", "sqlite"])
@property
def array_type(self):
return only_on(
[
lambda config: against(config, "postgresql")
and not against(config, "+pg8000")
]
)
@property
def json_type(self):
return only_on(
[
lambda config: against(config, "mysql")
and (
(
not config.db.dialect._is_mariadb
and against(config, "mysql >= 5.7")
)
or (
config.db.dialect._mariadb_normalized_version_info
>= (10, 2, 7)
)
),
"mariadb>=10.2.7",
"postgresql >= 9.3",
self._sqlite_json,
"mssql",
]
)
@property
def json_index_supplementary_unicode_element(self):
# for sqlite see https://bugs.python.org/issue38749
return skip_if(
[
lambda config: against(config, "mysql")
and config.db.dialect._is_mariadb,
"mariadb",
"sqlite",
]
)
@property
def legacy_unconditional_json_extract(self):
"""Backend has a JSON_EXTRACT or similar function that returns a
valid JSON string in all cases.
Used to test a legacy feature and is not needed.
"""
return self.json_type + only_on(
["postgresql", "mysql", "mariadb", "sqlite"]
)
def _sqlite_file_db(self, config):
return against(config, "sqlite") and config.db.dialect._is_url_file_db(
config.db.url
)
def _sqlite_memory_db(self, config):
return against(
config, "sqlite"
) and not config.db.dialect._is_url_file_db(config.db.url)
def _sqlite_json(self, config):
if not against(config, "sqlite >= 3.9"):
return False
else:
with config.db.connect() as conn:
try:
return (
conn.exec_driver_sql(
"""select json_extract('{"foo": "bar"}', """
"""'$."foo"')"""
).scalar()
== "bar"
)
except exc.DBAPIError:
return False
@property
def reflects_json_type(self):
return only_on(
[
lambda config: against(config, "mysql >= 5.7")
and not config.db.dialect._is_mariadb,
"postgresql >= 9.3",
"sqlite >= 3.9",
]
)
@property
def json_array_indexes(self):
return self.json_type
@property
def datetime_literals(self):
"""target dialect supports rendering of a date, time, or datetime as a
literal string, e.g. via the TypeEngine.literal_processor() method.
"""
return fails_on_everything_except("sqlite")
@property
def datetime(self):
"""target dialect supports representation of Python
datetime.datetime() objects."""
return exclusions.open()
@property
def datetime_microseconds(self):
"""target dialect supports representation of Python
datetime.datetime() with microsecond objects."""
return skip_if(
["mssql", "mysql", "mariadb", "firebird", "oracle", "sybase"]
)
@property
def timestamp_microseconds(self):
"""target dialect supports representation of Python
datetime.datetime() with microsecond objects but only
if TIMESTAMP is used."""
return only_on(["oracle"])
@property
def datetime_historic(self):
"""target dialect supports representation of Python
datetime.datetime() objects with historic (pre 1900) values."""
return succeeds_if(["sqlite", "postgresql", "firebird"])
@property
def date(self):
"""target dialect supports representation of Python
datetime.date() objects."""
return exclusions.open()
@property
def date_coerces_from_datetime(self):
"""target dialect accepts a datetime object as the target
of a date column."""
# does not work as of pyodbc 4.0.22
return fails_on("mysql+mysqlconnector") + skip_if("mssql+pyodbc")
@property
def date_historic(self):
"""target dialect supports representation of Python
datetime.datetime() objects with historic (pre 1900) values."""
return succeeds_if(["sqlite", "postgresql", "firebird"])
@property
def time(self):
"""target dialect supports representation of Python
datetime.time() objects."""
return skip_if(["oracle"])
@property
def time_microseconds(self):
"""target dialect supports representation of Python
datetime.time() with microsecond objects."""
return skip_if(
["mssql", "mysql", "mariadb", "firebird", "oracle", "sybase"]
)
@property
def precision_numerics_general(self):
"""target backend has general support for moderately high-precision
numerics."""
return exclusions.open()
@property
def precision_numerics_enotation_small(self):
"""target backend supports Decimal() objects using E notation
to represent very small values."""
# NOTE: this exclusion isn't used in current tests.
return exclusions.open()
@property
def precision_numerics_enotation_large(self):
"""target backend supports Decimal() objects using E notation
to represent very large values."""
return fails_if(
[
(
"sybase+pyodbc",
None,
None,
"Don't know how do get these values through "
"FreeTDS + Sybase",
),
("firebird", None, None, "Precision must be from 1 to 18"),
]
)
@property
def precision_numerics_many_significant_digits(self):
"""target backend supports values with many digits on both sides,
such as 319438950232418390.273596, 87673.594069654243
"""
def broken_cx_oracle(config):
return (
against(config, "oracle+cx_oracle")
and config.db.dialect.cx_oracle_ver <= (6, 0, 2)
and config.db.dialect.cx_oracle_ver > (6,)
)
return fails_if(
[
("sqlite", None, None, "TODO"),
("firebird", None, None, "Precision must be from 1 to 18"),
("sybase+pysybase", None, None, "TODO"),
]
)
@property
def cast_precision_numerics_many_significant_digits(self):
"""same as precision_numerics_many_significant_digits but within the
context of a CAST statement (hello MySQL)
"""
return self.precision_numerics_many_significant_digits + fails_if(
"mysql"
)
@property
def precision_numerics_retains_significant_digits(self):
"""A precision numeric type will return empty significant digits,
i.e. a value such as 10.000 will come back in Decimal form with
the .000 maintained."""
return fails_if(
[
("oracle", None, None, "driver doesn't do this automatically"),
(
"firebird",
None,
None,
"database and/or driver truncates decimal places.",
),
]
)
@property
def precision_generic_float_type(self):
"""target backend will return native floating point numbers with at
least seven decimal places when using the generic Float type."""
return fails_if(
[
(
"mysql",
None,
None,
"mysql FLOAT type only returns 4 decimals",
),
(
"mariadb",
None,
None,
"mysql FLOAT type only returns 4 decimals",
),
(
"firebird",
None,
None,
"firebird FLOAT type isn't high precision",
),
]
)
@property
def floats_to_four_decimals(self):
return fails_if(
[
("mysql+oursql", None, None, "Floating point error"),
("mariadb+oursql", None, None, "Floating point error"),
(
"firebird",
None,
None,
"Firebird still has FP inaccuracy even "
"with only four decimal places",
),
]
)
@property
def implicit_decimal_binds(self):
"""target backend will return a selected Decimal as a Decimal, not
a string.
e.g.::
expr = decimal.Decimal("15.7563")
value = e.scalar(
select(literal(expr))
)
assert value == expr
See :ticket:`4036`
"""
return exclusions.open()
@property
def fetch_null_from_numeric(self):
return skip_if(("mssql+pyodbc", None, None, "crashes due to bug #351"))
@property
def duplicate_key_raises_integrity_error(self):
return exclusions.open()
def _has_pg_extension(self, name):
def check(config):
if not against(config, "postgresql"):
return False
count = (
config.db.connect(close_with_result=True)
.exec_driver_sql(
"SELECT count(*) FROM pg_extension "
"WHERE extname='%s'" % name
)
.scalar()
)
return bool(count)
return only_if(check, "needs %s extension" % name)
@property
def hstore(self):
return self._has_pg_extension("hstore")
@property
def btree_gist(self):
return self._has_pg_extension("btree_gist")
@property
def range_types(self):
def check_range_types(config):
if not against(
config, ["postgresql+psycopg2", "postgresql+psycopg2cffi"]
):
return False
try:
config.db.connect(close_with_result=True).exec_driver_sql(
"select '[1,2)'::int4range;"
).scalar()
return True
except Exception:
return False
return only_if(check_range_types)
@property
def async_dialect(self):
"""dialect makes use of await_() to invoke operations on the DBAPI."""
return only_on(
LambdaPredicate(
lambda config: config.db.dialect.is_async,
"Async dialect required",
)
)
@property
def oracle_test_dblink(self):
return skip_if(
lambda config: not config.file_config.has_option(
"sqla_testing", "oracle_db_link"
),
"oracle_db_link option not specified in config",
)
@property
def postgresql_test_dblink(self):
return skip_if(
lambda config: not config.file_config.has_option(
"sqla_testing", "postgres_test_db_link"
),
"postgres_test_db_link option not specified in config",
)
@property
def postgresql_jsonb(self):
return only_on("postgresql >= 9.4") + skip_if(
lambda config: config.db.dialect.driver == "pg8000"
and config.db.dialect._dbapi_version <= (1, 10, 1)
)
@property
def psycopg2_native_hstore(self):
return self.psycopg2_compatibility
@property
def psycopg2_compatibility(self):
return only_on(["postgresql+psycopg2", "postgresql+psycopg2cffi"])
@property
def psycopg2_or_pg8000_compatibility(self):
return only_on(
[
"postgresql+psycopg2",
"postgresql+psycopg2cffi",
"postgresql+pg8000",
]
)
@property
def percent_schema_names(self):
return skip_if(
["mysql+aiomysql", "mariadb+aiomysql"],
"see pr https://github.com/aio-libs/aiomysql/pull/545",
)
@property
def order_by_label_with_expression(self):
return fails_if(
[
(
"firebird",
None,
None,
"kinterbasdb doesn't send full type information",
),
("postgresql", None, None, "only simple labels allowed"),
("sybase", None, None, "only simple labels allowed"),
("mssql", None, None, "only simple labels allowed"),
]
)
def get_order_by_collation(self, config):
lookup = {
# will raise without quoting
"postgresql": "POSIX",
# note MySQL databases need to be created w/ utf8mb4 charset
# for the test suite
"mysql": "utf8mb4_bin",
"mariadb": "utf8mb4_bin",
"sqlite": "NOCASE",
# will raise *with* quoting
"mssql": "Latin1_General_CI_AS",
}
try:
return lookup[config.db.name]
except KeyError:
raise NotImplementedError()
@property
def skip_mysql_on_windows(self):
"""Catchall for a large variety of MySQL on Windows failures"""
return skip_if(
self._has_mysql_on_windows, "Not supported on MySQL + Windows"
)
@property
def mssql_freetds(self):
return only_on(["mssql+pymssql"])
@property
def legacy_engine(self):
return exclusions.skip_if(lambda config: config.db._is_future)
@property
def ad_hoc_engines(self):
return (
exclusions.skip_if(
["oracle"],
"works, but Oracle just gets tired with "
"this much connection activity",
)
+ skip_if(self._sqlite_file_db)
)
@property
def no_mssql_freetds(self):
return self.mssql_freetds.not_()
@property
def pyodbc_fast_executemany(self):
def has_fastexecutemany(config):
if not against(config, "mssql+pyodbc"):
return False
if config.db.dialect._dbapi_version() < (4, 0, 19):
return False
with config.db.connect() as conn:
drivername = conn.connection.connection.getinfo(
config.db.dialect.dbapi.SQL_DRIVER_NAME
)
# on linux this is something like 'libmsodbcsql-13.1.so.9.2'.
# on Windows this is something like 'msodbcsql17.dll'.
return "msodbc" in drivername
return only_if(
has_fastexecutemany, "only on pyodbc > 4.0.19 w/ msodbc driver"
)
@property
def python_fixed_issue_8743(self):
return exclusions.skip_if(
lambda: sys.version_info < (2, 7, 8),
"Python issue 8743 fixed in Python 2.7.8",
)
@property
def granular_timezone(self):
"""the datetime.timezone class, or SQLAlchemy's port, supports
seconds and microseconds.
SQLAlchemy ported the Python 3.7 version for Python 2, so
it passes on that. For Python 3.6 and earlier, it is not supported.
"""
return exclusions.skip_if(
lambda: sys.version_info >= (3,) and sys.version_info < (3, 7)
)
@property
def selectone(self):
"""target driver must support the literal statement 'select 1'"""
return skip_if(
["oracle", "firebird"], "non-standard SELECT scalar syntax"
)
@property
def mysql_for_update(self):
return skip_if(
"mysql+mysqlconnector",
"lock-sensitive operations crash on mysqlconnector",
)
@property
def mysql_fsp(self):
return only_if(["mysql >= 5.6.4", "mariadb"])
@property
def mysql_fully_case_sensitive(self):
return only_if(self._has_mysql_fully_case_sensitive)
@property
def mysql_zero_date(self):
def check(config):
if not against(config, "mysql"):
return False
row = (
config.db.connect(close_with_result=True)
.exec_driver_sql("show variables like 'sql_mode'")
.first()
)
return not row or "NO_ZERO_DATE" not in row[1]
return only_if(check)
@property
def mysql_non_strict(self):
def check(config):
if not against(config, "mysql"):
return False
row = (
config.db.connect(close_with_result=True)
.exec_driver_sql("show variables like 'sql_mode'")
.first()
)
return not row or "STRICT_TRANS_TABLES" not in row[1]
return only_if(check)
@property
def mysql_ngram_fulltext(self):
def check(config):
return (
against(config, "mysql")
and not config.db.dialect._is_mariadb
and config.db.dialect.server_version_info >= (5, 7)
)
return only_if(check)
def _mysql_80(self, config):
return (
against(config, "mysql")
and config.db.dialect._is_mysql
and config.db.dialect.server_version_info >= (8,)
)
def _mariadb_102(self, config):
return (
against(config, "mysql")
and config.db.dialect._is_mariadb
and config.db.dialect._mariadb_normalized_version_info > (10, 2)
)
def _mysql_and_check_constraints_exist(self, config):
# 1. we have mysql / mariadb and
# 2. it enforces check constraints
if exclusions.against(config, ["mysql", "mariadb"]):
if config.db.dialect._is_mariadb:
norm_version_info = (
config.db.dialect._mariadb_normalized_version_info
)
return norm_version_info >= (10, 2)
else:
norm_version_info = config.db.dialect.server_version_info
return norm_version_info >= (8, 0, 16)
else:
return False
def _mysql_check_constraints_exist(self, config):
# 1. we dont have mysql / mariadb or
# 2. we have mysql / mariadb that enforces check constraints
return not exclusions.against(
config, ["mysql", "mariadb"]
) or self._mysql_and_check_constraints_exist(config)
def _mysql_check_constraints_dont_exist(self, config):
# 1. we have mysql / mariadb and
# 2. they dont enforce check constraints
return not self._mysql_check_constraints_exist(config)
def _mysql_not_mariadb_102(self, config):
return (against(config, ["mysql", "mariadb"])) and (
not config.db.dialect._is_mariadb
or config.db.dialect._mariadb_normalized_version_info < (10, 2)
)
def _mysql_not_mariadb_103(self, config):
return (against(config, ["mysql", "mariadb"])) and (
not config.db.dialect._is_mariadb
or config.db.dialect._mariadb_normalized_version_info < (10, 3)
)
def _mysql_not_mariadb_104(self, config):
return (against(config, ["mysql", "mariadb"])) and (
not config.db.dialect._is_mariadb
or config.db.dialect._mariadb_normalized_version_info < (10, 4)
)
def _has_mysql_on_windows(self, config):
return (
against(config, ["mysql", "mariadb"])
) and config.db.dialect._detect_casing(config.db) == 1
def _has_mysql_fully_case_sensitive(self, config):
return (
against(config, "mysql")
and config.db.dialect._detect_casing(config.db) == 0
)
@property
def postgresql_utf8_server_encoding(self):
def go(config):
if not against(config, "postgresql"):
return False
with config.db.connect() as conn:
enc = conn.exec_driver_sql("show server_encoding").scalar()
return enc.lower() == "utf8"
return only_if(go)
@property
def cxoracle6_or_greater(self):
return only_if(
lambda config: against(config, "oracle+cx_oracle")
and config.db.dialect.cx_oracle_ver >= (6,)
)
@property
def oracle5x(self):
return only_if(
lambda config: against(config, "oracle+cx_oracle")
and config.db.dialect.cx_oracle_ver < (6,)
)
@property
def computed_columns(self):
return skip_if(["postgresql < 12", "sqlite < 3.31", "mysql < 5.7"])
@property
def python_profiling_backend(self):
return only_on([self._sqlite_memory_db])
@property
def computed_columns_stored(self):
return self.computed_columns + skip_if(["oracle", "firebird"])
@property
def computed_columns_virtual(self):
return self.computed_columns + skip_if(["postgresql", "firebird"])
@property
def computed_columns_default_persisted(self):
return self.computed_columns + only_if("postgresql")
@property
def computed_columns_reflect_persisted(self):
return self.computed_columns + skip_if("oracle")
@property
def regexp_match(self):
return only_on(["postgresql", "mysql", "mariadb", "oracle", "sqlite"])
@property
def regexp_replace(self):
return only_on(["postgresql", "mysql>=8", "mariadb", "oracle"])
@property
def supports_distinct_on(self):
"""If a backend supports the DISTINCT ON in a select"""
return only_if(["postgresql"])
@property
def supports_for_update_of(self):
return only_if(lambda config: config.db.dialect.supports_for_update_of)
@property
def sequences_in_other_clauses(self):
"""sequences allowed in WHERE, GROUP BY, HAVING, etc."""
return skip_if(["mssql", "oracle"])
@property
def supports_lastrowid_for_expressions(self):
"""cursor.lastrowid works if an explicit SQL expression was used."""
return only_on(["sqlite", "mysql", "mariadb"])
@property
def supports_sequence_for_autoincrement_column(self):
"""for mssql, autoincrement means IDENTITY, not sequence"""
return skip_if("mssql")
@property
def identity_columns(self):
return only_if(["postgresql >= 10", "oracle >= 12", "mssql"])
@property
def identity_columns_standard(self):
return self.identity_columns + skip_if("mssql")
@property
def index_reflects_included_columns(self):
return only_on(["postgresql >= 11", "mssql"])
# mssql>= 11 -> >= MS_2012_VERSION
@property
def fetch_first(self):
return only_on(["postgresql", "mssql >= 11", "oracle >= 12"])
@property
def fetch_percent(self):
return only_on(["mssql >= 11", "oracle >= 12"])
@property
def fetch_ties(self):
return only_on(["postgresql >= 13", "mssql >= 11", "oracle >= 12"])
@property
def fetch_no_order_by(self):
return only_on(["postgresql", "oracle >= 12"])
@property
def fetch_offset_with_options(self):
return skip_if("mssql")
|
the-stack_0_11129 | #!/usr/bin/env python3
from PIL import Image
from struct import pack
def pre(p):
p = list(p)
p[0] = p[0]*p[3]//255
p[1] = p[1]*p[3]//255
p[2] = p[2]*p[3]//255
return p
def write(i, o, X, Y):
for y in range(Y):
for x in range(X):
p = pre(i.getpixel((x, y)))
o.write(pack('4B', p[2], p[1], p[0], p[3]))
i = Image.open('images/fish.png')
with open('fish.bin', 'wb') as o:
write(i, o, 100, 59)
i = Image.open('images/window.png')
with open('window.bin', 'wb') as o:
write(i, o, 320, 200)
|
the-stack_0_11131 | #!/usr/bin/python3 -OO
# Copyright 2007-2019 The SABnzbd-Team <[email protected]>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
sabnzbd.newsunpack
"""
import os
import sys
import re
import subprocess
import logging
import time
import binascii
import shutil
import functools
from subprocess import Popen
import sabnzbd
from sabnzbd.encoding import platform_btou
import sabnzbd.utils.rarfile as rarfile
from sabnzbd.misc import format_time_string, find_on_path, int_conv, \
get_all_passwords, calc_age, cmp, caller_name
from sabnzbd.filesystem import make_script_path, real_path, globber, globber_full, \
renamer, clip_path, long_path, remove_file, recursive_listdir, setname_from_path
from sabnzbd.sorting import SeriesSorter
import sabnzbd.cfg as cfg
from sabnzbd.constants import Status
if sabnzbd.WIN32:
try:
import win32api
import win32con
import win32process
# Define scheduling priorities
WIN_SCHED_PRIOS = {1: win32process.IDLE_PRIORITY_CLASS, 2: win32process.BELOW_NORMAL_PRIORITY_CLASS,
3: win32process.NORMAL_PRIORITY_CLASS, 4: win32process.ABOVE_NORMAL_PRIORITY_CLASS,}
except ImportError:
pass
else:
# Define dummy WindowsError for non-Windows
class WindowsError(Exception):
def __init__(self, value):
self.parameter = value
def __str__(self):
return repr(self.parameter)
# Regex globals
RAR_RE = re.compile(r'\.(?P<ext>part\d*\.rar|rar|r\d\d|s\d\d|t\d\d|u\d\d|v\d\d|\d\d\d?\d)$', re.I)
RAR_RE_V3 = re.compile(r'\.(?P<ext>part\d*)$', re.I)
LOADING_RE = re.compile(r'^Loading "(.+)"')
TARGET_RE = re.compile(r'^(?:File|Target): "(.+)" -')
EXTRACTFROM_RE = re.compile(r'^Extracting\sfrom\s(.+)')
EXTRACTED_RE = re.compile(r'^(Extracting|Creating|...)\s+(.*?)\s+OK\s*$')
SPLITFILE_RE = re.compile(r'\.(\d\d\d?\d$)', re.I)
ZIP_RE = re.compile(r'\.(zip$)', re.I)
SEVENZIP_RE = re.compile(r'\.7z$', re.I)
SEVENMULTI_RE = re.compile(r'\.7z\.\d+$', re.I)
TS_RE = re.compile(r'\.(\d+)\.(ts$)', re.I)
PAR2_COMMAND = None
MULTIPAR_COMMAND = None
RAR_COMMAND = None
NICE_COMMAND = None
ZIP_COMMAND = None
SEVEN_COMMAND = None
IONICE_COMMAND = None
RAR_PROBLEM = False
PAR2_MT = True
RAR_VERSION = 0
def find_programs(curdir):
""" Find external programs """
def check(path, program):
p = os.path.abspath(os.path.join(path, program))
if os.access(p, os.X_OK):
return p
else:
return None
if sabnzbd.DARWIN:
sabnzbd.newsunpack.PAR2_COMMAND = check(curdir, 'osx/par2/par2-sl64')
sabnzbd.newsunpack.RAR_COMMAND = check(curdir, 'osx/unrar/unrar')
sabnzbd.newsunpack.SEVEN_COMMAND = check(curdir, 'osx/7zip/7za')
if sabnzbd.WIN32:
if sabnzbd.WIN64:
# 64 bit versions
sabnzbd.newsunpack.MULTIPAR_COMMAND = check(curdir, 'win/par2/multipar/par2j64.exe')
sabnzbd.newsunpack.RAR_COMMAND = check(curdir, 'win/unrar/x64/UnRAR.exe')
else:
# 32 bit versions
sabnzbd.newsunpack.MULTIPAR_COMMAND = check(curdir, 'win/par2/multipar/par2j.exe')
sabnzbd.newsunpack.RAR_COMMAND = check(curdir, 'win/unrar/UnRAR.exe')
sabnzbd.newsunpack.PAR2_COMMAND = check(curdir, 'win/par2/par2.exe')
sabnzbd.newsunpack.SEVEN_COMMAND = check(curdir, 'win/7zip/7za.exe')
else:
if not sabnzbd.newsunpack.PAR2_COMMAND:
sabnzbd.newsunpack.PAR2_COMMAND = find_on_path('par2')
if not sabnzbd.newsunpack.RAR_COMMAND:
sabnzbd.newsunpack.RAR_COMMAND = find_on_path(('unrar', 'rar', 'unrar3', 'rar3',))
sabnzbd.newsunpack.NICE_COMMAND = find_on_path('nice')
sabnzbd.newsunpack.IONICE_COMMAND = find_on_path('ionice')
if not sabnzbd.newsunpack.ZIP_COMMAND:
sabnzbd.newsunpack.ZIP_COMMAND = find_on_path('unzip')
if not sabnzbd.newsunpack.SEVEN_COMMAND:
sabnzbd.newsunpack.SEVEN_COMMAND = find_on_path('7za')
if not sabnzbd.newsunpack.SEVEN_COMMAND:
sabnzbd.newsunpack.SEVEN_COMMAND = find_on_path('7z')
if not (sabnzbd.WIN32 or sabnzbd.DARWIN):
# Run check on rar version
version, original = unrar_check(sabnzbd.newsunpack.RAR_COMMAND)
sabnzbd.newsunpack.RAR_PROBLEM = not original or version < sabnzbd.constants.REC_RAR_VERSION
sabnzbd.newsunpack.RAR_VERSION = version
# Run check on par2-multicore
sabnzbd.newsunpack.PAR2_MT = par2_mt_check(sabnzbd.newsunpack.PAR2_COMMAND)
ENV_NZO_FIELDS = ['bytes', 'bytes_downloaded', 'bytes_tried', 'cat', 'duplicate', 'encrypted',
'fail_msg', 'filename', 'final_name', 'group', 'nzo_id', 'oversized', 'password', 'pp',
'priority', 'repair', 'script', 'status', 'unpack', 'unwanted_ext', 'url']
def external_processing(extern_proc, nzo, complete_dir, nicename, status):
""" Run a user postproc script, return console output and exit value """
failure_url = nzo.nzo_info.get('failure', '')
# Items can be bool or null, causing POpen to fail
command = [str(extern_proc), str(complete_dir), str(nzo.filename), str(nicename), '',
str(nzo.cat), str(nzo.group), str(status), str(failure_url)]
# Add path to original NZB
nzb_paths = globber_full(nzo.workpath, '*.gz')
# Fields not in the NZO directly
extra_env_fields = {'failure_url': failure_url,
'complete_dir': complete_dir,
'pp_status': status,
'download_time': nzo.nzo_info.get('download_time', ''),
'avg_bps': int(nzo.avg_bps_total / nzo.avg_bps_freq) if nzo.avg_bps_freq else 0,
'age': calc_age(nzo.avg_date),
'orig_nzb_gz': clip_path(nzb_paths[0]) if nzb_paths else ''}
try:
stup, need_shell, command, creationflags = build_command(command)
env = create_env(nzo, extra_env_fields)
logging.info('Running external script %s(%s, %s, %s, %s, %s, %s, %s, %s)',
extern_proc, complete_dir, nzo.filename, nicename, '', nzo.cat, nzo.group, status, failure_url)
p = Popen(command, shell=need_shell, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
startupinfo=stup, env=env, creationflags=creationflags)
# Follow the output, so we can abort it
proc = p.stdout
if p.stdin:
p.stdin.close()
lines = []
while 1:
line = platform_btou(proc.readline())
if not line:
break
line = line.strip()
lines.append(line)
# Show current line in history
nzo.set_action_line(T('Running script'), line)
# Check if we should still continue
if not nzo.pp_active:
p.kill()
lines.append(T('PostProcessing was aborted (%s)') % T('Script'))
# Print at least what we got
output = '\n'.join(lines)
return output, 1
except:
logging.debug("Failed script %s, Traceback: ", extern_proc, exc_info=True)
return "Cannot run script %s\r\n" % extern_proc, -1
output = '\n'.join(lines)
ret = p.wait()
return output, ret
def external_script(script, p1, p2, p3=None, p4=None):
""" Run a user script with two parameters, return console output and exit value """
command = [script, p1, p2, p3, p4]
try:
stup, need_shell, command, creationflags = build_command(command)
env = create_env()
logging.info('Running user script %s(%s, %s)', script, p1, p2)
p = Popen(command, shell=need_shell, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
startupinfo=stup, env=env, creationflags=creationflags)
except:
logging.debug("Failed script %s, Traceback: ", script, exc_info=True)
return "Cannot run script %s\r\n" % script, -1
output = platform_btou(p.stdout.read())
ret = p.wait()
return output, ret
def unpack_magic(nzo, workdir, workdir_complete, dele, one_folder, joinables, zips, rars, sevens, ts, depth=0):
""" Do a recursive unpack from all archives in 'workdir' to 'workdir_complete' """
if depth > 5:
logging.warning(T('Unpack nesting too deep [%s]'), nzo.final_name)
return False, []
depth += 1
if depth == 1:
# First time, ignore anything in workdir_complete
xjoinables, xzips, xrars, xsevens, xts = build_filelists(workdir)
else:
xjoinables, xzips, xrars, xsevens, xts = build_filelists(workdir, workdir_complete, check_both=dele)
force_rerun = False
newfiles = []
error = None
new_joins = new_ts = None
if cfg.enable_filejoin():
new_joins = [jn for jn in xjoinables if jn not in joinables]
if new_joins:
logging.info('Filejoin starting on %s', workdir)
error, newf = file_join(nzo, workdir, workdir_complete, dele, new_joins)
if newf:
newfiles.extend(newf)
logging.info('Filejoin finished on %s', workdir)
if cfg.enable_unrar():
new_rars = [rar for rar in xrars if rar not in rars]
if new_rars:
logging.info('Unrar starting on %s', workdir)
error, newf = rar_unpack(nzo, workdir, workdir_complete, dele, one_folder, new_rars)
if newf:
newfiles.extend(newf)
logging.info('Unrar finished on %s', workdir)
if cfg.enable_7zip():
new_sevens = [seven for seven in xsevens if seven not in sevens]
if new_sevens:
logging.info('7za starting on %s', workdir)
error, newf = unseven(nzo, workdir, workdir_complete, dele, one_folder, new_sevens)
if newf:
newfiles.extend(newf)
logging.info('7za finished on %s', workdir)
if cfg.enable_unzip():
new_zips = [zip for zip in xzips if zip not in zips]
if new_zips:
logging.info('Unzip starting on %s', workdir)
if SEVEN_COMMAND:
error, newf = unseven(nzo, workdir, workdir_complete, dele, one_folder, new_zips)
else:
error, newf = unzip(nzo, workdir, workdir_complete, dele, one_folder, new_zips)
if newf:
newfiles.extend(newf)
logging.info('Unzip finished on %s', workdir)
if cfg.enable_tsjoin():
new_ts = [_ts for _ts in xts if _ts not in ts]
if new_ts:
logging.info('TS Joining starting on %s', workdir)
error, newf = file_join(nzo, workdir, workdir_complete, dele, new_ts)
if newf:
newfiles.extend(newf)
logging.info('TS Joining finished on %s', workdir)
# Refresh history and set output
nzo.set_action_line()
# Only re-run if something was unpacked and it was success
rerun = error in (False, 0)
# During a Retry we might miss files that failed during recursive unpack
if nzo.reuse and depth == 1 and any(build_filelists(workdir, workdir_complete)):
rerun = True
# We can't recursive unpack on long paths on Windows
# See: https://github.com/sabnzbd/sabnzbd/pull/771
if sabnzbd.WIN32 and len(workdir_complete) > 256:
rerun = False
# Double-check that we didn't miss any files in workdir
# But only if dele=True, otherwise of course there will be files left
if rerun and dele and depth == 1 and any(build_filelists(workdir)):
force_rerun = True
# Clear lists to force re-scan of files
xjoinables, xzips, xrars, xsevens, xts = ([], [], [], [], [])
if rerun and (cfg.enable_recursive() or new_ts or new_joins or force_rerun):
z, y = unpack_magic(nzo, workdir, workdir_complete, dele, one_folder,
xjoinables, xzips, xrars, xsevens, xts, depth)
if z:
error = z
if y:
newfiles.extend(y)
return error, newfiles
##############################################################################
# Filejoin Functions
##############################################################################
def match_ts(file):
""" Return True if file is a joinable TS file """
match = TS_RE.search(file)
if not match:
return False, '', 0
num = int(match.group(1))
try:
set = file[:match.start()]
set += '.ts'
except:
set = ''
return match, set, num
def clean_up_joinables(names):
""" Remove joinable files and their .1 backups """
for name in names:
if os.path.exists(name):
try:
remove_file(name)
except:
pass
name1 = name + ".1"
if os.path.exists(name1):
try:
remove_file(name1)
except:
pass
def get_seq_number(name):
""" Return sequence number if name as an int """
head, tail = os.path.splitext(name)
if tail == '.ts':
match, set, num = match_ts(name)
else:
num = tail[1:]
if num.isdigit():
return int(num)
else:
return 0
def file_join(nzo, workdir, workdir_complete, delete, joinables):
""" Join and joinable files in 'workdir' to 'workdir_complete' and
when successful, delete originals
"""
newfiles = []
bufsize = 24 * 1024 * 1024
# Create matching sets from the list of files
joinable_sets = {}
joinable_set = None
for joinable in joinables:
head, tail = os.path.splitext(joinable)
if tail == '.ts':
head = match_ts(joinable)[1]
if head not in joinable_sets:
joinable_sets[head] = []
joinable_sets[head].append(joinable)
logging.debug("joinable_sets: %s", joinable_sets)
try:
# Handle each set
for joinable_set in joinable_sets:
current = joinable_sets[joinable_set]
joinable_sets[joinable_set].sort()
# If par2 already did the work, just remove the files
if os.path.exists(joinable_set):
logging.debug("file_join(): Skipping %s, (probably) joined by par2", joinable_set)
if delete:
clean_up_joinables(current)
# done, go to next set
continue
# Only join when there is more than one file
size = len(current)
if size < 2:
continue
# Prepare joined file
filename = joinable_set
if workdir_complete:
filename = filename.replace(workdir, workdir_complete)
logging.debug("file_join(): Assembling %s", filename)
joined_file = open(filename, 'ab')
# Join the segments
n = get_seq_number(current[0])
seq_error = n > 1
for joinable in current:
if get_seq_number(joinable) != n:
seq_error = True
perc = (100.0 / size) * n
logging.debug("Processing %s", joinable)
nzo.set_action_line(T('Joining'), '%.0f%%' % perc)
f = open(joinable, 'rb')
shutil.copyfileobj(f, joined_file, bufsize)
f.close()
if delete:
remove_file(joinable)
n += 1
# Remove any remaining .1 files
clean_up_joinables(current)
# Finish up
joined_file.flush()
joined_file.close()
newfiles.append(filename)
setname = setname_from_path(joinable_set)
if seq_error:
msg = T('Incomplete sequence of joinable files')
nzo.fail_msg = T('File join of %s failed') % setname
nzo.set_unpack_info('Filejoin', T('[%s] Error "%s" while joining files') % (setname, msg))
logging.error(T('Error "%s" while running file_join on %s'), msg, nzo.final_name)
return True, []
else:
msg = T('[%s] Joined %s files') % (joinable_set, size)
nzo.set_unpack_info('Filejoin', msg, setname)
except:
msg = sys.exc_info()[1]
nzo.fail_msg = T('File join of %s failed') % msg
nzo.set_unpack_info('Filejoin', T('[%s] Error "%s" while joining files') % (setname_from_path(joinable_set), msg))
logging.error(T('Error "%s" while running file_join on %s'), msg, nzo.final_name)
return True, []
return False, newfiles
##############################################################################
# (Un)Rar Functions
##############################################################################
def rar_unpack(nzo, workdir, workdir_complete, delete, one_folder, rars):
""" Unpack multiple sets 'rars' of RAR files from 'workdir' to 'workdir_complete.
When 'delete' is set, originals will be deleted.
When 'one_folder' is set, all files will be in a single folder
"""
newfiles = extracted_files = []
rar_sets = {}
for rar in rars:
rar_set = setname_from_path(rar)
if RAR_RE_V3.search(rar_set):
# Remove the ".partXX" part
rar_set = os.path.splitext(rar_set)[0]
if rar_set not in rar_sets:
rar_sets[rar_set] = []
rar_sets[rar_set].append(rar)
logging.debug('Rar_sets: %s', rar_sets)
for rar_set in rar_sets:
# Run the RAR extractor
rar_sets[rar_set].sort(key=functools.cmp_to_key(rar_sort))
rarpath = rar_sets[rar_set][0]
if workdir_complete and rarpath.startswith(workdir):
extraction_path = workdir_complete
else:
extraction_path = os.path.split(rarpath)[0]
# Is the direct-unpacker still running? We wait for it
if nzo.direct_unpacker:
wait_count = 0
last_stats = nzo.direct_unpacker.get_formatted_stats()
while nzo.direct_unpacker.is_alive():
logging.debug('DirectUnpacker still alive for %s: %s', nzo.final_name, last_stats)
# Bump the file-lock in case it's stuck
with nzo.direct_unpacker.next_file_lock:
nzo.direct_unpacker.next_file_lock.notify()
time.sleep(2)
# Did something change? Might be stuck
if last_stats == nzo.direct_unpacker.get_formatted_stats():
wait_count += 1
if wait_count > 60:
# We abort after 2 minutes of no changes
nzo.direct_unpacker.abort()
else:
wait_count = 0
last_stats = nzo.direct_unpacker.get_formatted_stats()
# Did we already direct-unpack it? Not when recursive-unpacking
if nzo.direct_unpacker and rar_set in nzo.direct_unpacker.success_sets:
logging.info("Set %s completed by DirectUnpack", rar_set)
fail = False
success = True
rars, newfiles = nzo.direct_unpacker.success_sets.pop(rar_set)
else:
logging.info("Extracting rarfile %s (belonging to %s) to %s",
rarpath, rar_set, extraction_path)
try:
fail, newfiles, rars = rar_extract(rarpath, len(rar_sets[rar_set]),
one_folder, nzo, rar_set, extraction_path)
# Was it aborted?
if not nzo.pp_active:
fail = True
break
success = not fail
except:
success = False
fail = True
msg = sys.exc_info()[1]
nzo.fail_msg = T('Unpacking failed, %s') % msg
setname = nzo.final_name
nzo.set_unpack_info('Unpack', T('[%s] Error "%s" while unpacking RAR files') % (setname, msg))
logging.error(T('Error "%s" while running rar_unpack on %s'), msg, setname)
logging.debug("Traceback: ", exc_info=True)
if success:
logging.debug('rar_unpack(): Rars: %s', rars)
logging.debug('rar_unpack(): Newfiles: %s', newfiles)
extracted_files.extend(newfiles)
# Do not fail if this was a recursive unpack
if fail and rarpath.startswith(workdir_complete):
# Do not delete the files, leave it to user!
logging.info('Ignoring failure to do recursive unpack of %s', rarpath)
fail = 0
success = True
newfiles = []
# Do not fail if this was maybe just some duplicate fileset
# Multipar and par2tbb will detect and log them, par2cmdline will not
if fail and rar_set.endswith(('.1', '.2')):
# Just in case, we leave the raw files
logging.info('Ignoring failure of unpack for possible duplicate file %s', rarpath)
fail = 0
success = True
newfiles = []
# Delete the old files if we have to
if success and delete and newfiles:
for rar in rars:
try:
remove_file(rar)
except OSError:
if os.path.exists(rar):
logging.warning(T('Deleting %s failed!'), rar)
brokenrar = '%s.1' % rar
if os.path.exists(brokenrar):
logging.info("Deleting %s", brokenrar)
try:
remove_file(brokenrar)
except OSError:
if os.path.exists(brokenrar):
logging.warning(T('Deleting %s failed!'), brokenrar)
return fail, extracted_files
def rar_extract(rarfile_path, numrars, one_folder, nzo, setname, extraction_path):
""" Unpack single rar set 'rarfile' to 'extraction_path',
with password tries
Return fail==0(ok)/fail==1(error)/fail==2(wrong password), new_files, rars
"""
fail = 0
new_files = None
rars = []
passwords = get_all_passwords(nzo)
for password in passwords:
if password:
logging.debug('Trying unrar with password "%s"', password)
msg = T('Trying unrar with password "%s"') % password
nzo.fail_msg = msg
nzo.set_unpack_info('Unpack', msg, setname)
fail, new_files, rars = rar_extract_core(rarfile_path, numrars, one_folder, nzo, setname, extraction_path, password)
if fail != 2:
break
if fail == 2:
logging.error('%s (%s)', T('Unpacking failed, archive requires a password'), os.path.split(rarfile_path)[1])
return fail, new_files, rars
def rar_extract_core(rarfile_path, numrars, one_folder, nzo, setname, extraction_path, password):
""" Unpack single rar set 'rarfile_path' to 'extraction_path'
Return fail==0(ok)/fail==1(error)/fail==2(wrong password)/fail==3(crc-error), new_files, rars
"""
start = time.time()
logging.debug("rar_extract(): Extractionpath: %s", extraction_path)
if password:
password_command = '-p%s' % password
else:
password_command = '-p-'
############################################################################
if one_folder or cfg.flat_unpack():
action = 'e'
else:
action = 'x'
if cfg.overwrite_files():
overwrite = '-o+' # Enable overwrite
rename = '-o+' # Dummy
else:
overwrite = '-o-' # Disable overwrite
rename = '-or' # Auto renaming
if sabnzbd.WIN32:
# For Unrar to support long-path, we need to cricumvent Python's list2cmdline
# See: https://github.com/sabnzbd/sabnzbd/issues/1043
command = ['%s' % RAR_COMMAND, action, '-idp', overwrite, rename, '-ai', password_command,
'%s' % clip_path(rarfile_path), '%s\\' % long_path(extraction_path)]
elif RAR_PROBLEM:
# Use only oldest options (specifically no "-or")
command = ['%s' % RAR_COMMAND, action, '-idp', overwrite, password_command,
'%s' % rarfile_path, '%s/' % extraction_path]
else:
# Don't use "-ai" (not needed for non-Windows)
command = ['%s' % RAR_COMMAND, action, '-idp', overwrite, rename, password_command,
'%s' % rarfile_path, '%s/' % extraction_path]
if cfg.ignore_unrar_dates():
command.insert(3, '-tsm-')
stup, need_shell, command, creationflags = build_command(command, flatten_command=True)
# Get list of all the volumes part of this set
logging.debug("Analyzing rar file ... %s found", rarfile.is_rarfile(rarfile_path))
logging.debug("Running unrar %s", command)
p = Popen(command, shell=need_shell, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
startupinfo=stup, creationflags=creationflags)
proc = p.stdout
if p.stdin:
p.stdin.close()
nzo.set_action_line(T('Unpacking'), '00/%02d' % numrars)
# Loop over the output from rar!
curr = 0
extracted = []
rarfiles = []
fail = 0
inrecovery = False
lines = []
while 1:
line = platform_btou(proc.readline())
if not line:
break
# Check if we should still continue
if not nzo.pp_active:
p.kill()
msg = T('PostProcessing was aborted (%s)') % T('Unpack')
nzo.fail_msg = msg
nzo.set_unpack_info('Unpack', msg, setname)
nzo.status = Status.FAILED
return fail, (), ()
line = line.strip()
lines.append(line)
if line.startswith('Extracting from'):
filename = (re.search(EXTRACTFROM_RE, line).group(1))
if filename not in rarfiles:
rarfiles.append(filename)
curr += 1
nzo.set_action_line(T('Unpacking'), '%02d/%02d' % (curr, numrars))
elif line.find('recovery volumes found') > -1:
inrecovery = True # and thus start ignoring "Cannot find volume" for a while
logging.debug("unrar recovery start: %s" % line)
elif line.startswith('Reconstruct'):
# end of reconstruction: 'Reconstructing... 100%' or 'Reconstructing... ' (both success), or 'Reconstruction impossible'
inrecovery = False
logging.debug("unrar recovery result: %s" % line)
elif line.startswith('Cannot find volume') and not inrecovery:
filename = os.path.basename(line[19:])
msg = T('Unpacking failed, unable to find %s') % filename
nzo.fail_msg = msg
nzo.set_unpack_info('Unpack', msg, setname)
logging.warning(T('ERROR: unable to find "%s"'), filename)
fail = 1
elif line.endswith('- CRC failed'):
msg = T('Unpacking failed, CRC error')
nzo.fail_msg = msg
nzo.set_unpack_info('Unpack', msg, setname)
logging.warning(T('ERROR: CRC failed in "%s"'), setname)
fail = 2 # Older unrar versions report a wrong password as a CRC error
elif line.startswith('File too large'):
msg = T('Unpacking failed, file too large for filesystem (FAT?)')
nzo.fail_msg = msg
nzo.set_unpack_info('Unpack', msg, setname)
# ERROR: File too large for file system (bigfile-5000MB)
logging.error(T('ERROR: File too large for filesystem (%s)'), setname)
fail = 1
elif line.startswith('Write error'):
msg = T('Unpacking failed, write error or disk is full?')
nzo.fail_msg = msg
nzo.set_unpack_info('Unpack', msg, setname)
logging.error(T('ERROR: write error (%s)'), line[11:])
fail = 1
elif line.startswith('Cannot create'):
line2 = platform_btou(proc.readline())
if 'must not exceed 260' in line2:
msg = '%s: %s' % (T('Unpacking failed, path is too long'), line[13:])
nzo.fail_msg = msg
logging.error(T('ERROR: path too long (%s)'), line[13:])
else:
msg = '%s: %s' % (T('Unpacking failed, write error or disk is full?'), line[13:])
nzo.fail_msg = msg
logging.error(T('ERROR: write error (%s)'), line[13:])
nzo.set_unpack_info('Unpack', msg, setname)
fail = 1
# Kill the process (can stay in endless loop on Windows Server)
p.kill()
elif line.startswith('ERROR: '):
msg = T('ERROR: %s' % line[7:])
nzo.fail_msg = msg
logging.warning(msg)
nzo.set_unpack_info('Unpack', msg, setname)
fail = 1
elif 'The specified password is incorrect' in line or 'Incorrect password' in line or \
('ncrypted file' in line and (('CRC failed' in line) or ('Checksum error' in line))):
# unrar 3.x: "Encrypted file: CRC failed in oLKQfrcNVivzdzSG22a2xo7t001.part1.rar (password incorrect ?)"
# unrar 4.x: "CRC failed in the encrypted file oLKQfrcNVivzdzSG22a2xo7t001.part1.rar. Corrupt file or wrong password."
# unrar 5.x: "Checksum error in the encrypted file oLKQfrcNVivzdzSG22a2xo7t001.part1.rar. Corrupt file or wrong password."
# unrar 5.01: "The specified password is incorrect."
# unrar 5.80: "Incorrect password for oLKQfrcNVivzdzSG22a2xo7t001.part1.rar"
msg = T('Unpacking failed, archive requires a password')
nzo.fail_msg = msg
nzo.set_unpack_info('Unpack', msg, setname)
fail = 2
elif 'is not RAR archive' in line:
# Unrecognizable RAR file
msg = T('Unusable RAR file')
nzo.fail_msg = msg
nzo.set_unpack_info('Unpack', msg, setname)
fail = 3
elif 'checksum error' in line or 'Unexpected end of archive' in line:
# Corrupt archive or passworded, we can't know
# packed data checksum error in volume FILE
msg = T('Corrupt RAR file')
nzo.fail_msg = msg
nzo.set_unpack_info('Unpack', msg, setname)
fail = 3
else:
m = re.search(EXTRACTED_RE, line)
if m:
# In case of flat-unpack, UnRar still prints the whole path (?!)
unpacked_file = m.group(2)
if cfg.flat_unpack():
unpacked_file = os.path.basename(unpacked_file)
extracted.append(real_path(extraction_path, unpacked_file))
if fail:
if proc:
proc.close()
p.wait()
logging.debug('UNRAR output %s', '\n'.join(lines))
return fail, (), ()
if proc:
proc.close()
p.wait()
# Which files did we use to extract this?
rarfiles = rar_volumelist(rarfile_path, password, rarfiles)
logging.debug('UNRAR output %s', '\n'.join(lines))
nzo.fail_msg = ''
msg = T('Unpacked %s files/folders in %s') % (str(len(extracted)), format_time_string(time.time() - start))
nzo.set_unpack_info('Unpack', msg, setname)
logging.info('%s', msg)
return 0, extracted, rarfiles
##############################################################################
# (Un)Zip Functions
##############################################################################
def unzip(nzo, workdir, workdir_complete, delete, one_folder, zips):
""" Unpack multiple sets 'zips' of ZIP files from 'workdir' to 'workdir_complete.
When 'delete' is ste, originals will be deleted.
"""
try:
i = 0
unzip_failed = False
tms = time.time()
# For file-bookkeeping
orig_dir_content = recursive_listdir(workdir_complete)
for _zip in zips:
logging.info("Starting extract on zipfile: %s ", _zip)
nzo.set_action_line(T('Unpacking'), '%s' % setname_from_path(_zip))
if workdir_complete and _zip.startswith(workdir):
extraction_path = workdir_complete
else:
extraction_path = os.path.split(_zip)[0]
if ZIP_Extract(_zip, extraction_path, one_folder):
unzip_failed = True
else:
i += 1
msg = T('%s files in %s') % (str(i), format_time_string(time.time() - tms))
nzo.set_unpack_info('Unpack', msg)
# What's new?
new_files = list(set(orig_dir_content + recursive_listdir(workdir_complete)))
# Delete the old files if we have to
if delete and not unzip_failed:
i = 0
for _zip in zips:
try:
remove_file(_zip)
i += 1
except OSError:
logging.warning(T('Deleting %s failed!'), _zip)
brokenzip = '%s.1' % _zip
if os.path.exists(brokenzip):
try:
remove_file(brokenzip)
i += 1
except OSError:
logging.warning(T('Deleting %s failed!'), brokenzip)
return unzip_failed, new_files
except:
msg = sys.exc_info()[1]
nzo.fail_msg = T('Unpacking failed, %s') % msg
logging.error(T('Error "%s" while running unzip() on %s'), msg, nzo.final_name)
return True, []
def ZIP_Extract(zipfile, extraction_path, one_folder):
""" Unzip single zip set 'zipfile' to 'extraction_path' """
command = ['%s' % ZIP_COMMAND, '-o', '-Pnone', '%s' % clip_path(zipfile),
'-d%s' % extraction_path]
if one_folder or cfg.flat_unpack():
command.insert(3, '-j') # Unpack without folders
stup, need_shell, command, creationflags = build_command(command)
logging.debug('Starting unzip: %s', command)
p = Popen(command, shell=need_shell, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
startupinfo=stup, creationflags=creationflags)
output = platform_btou(p.stdout.read())
logging.debug('unzip output: \n%s', output)
ret = p.wait()
return ret
##############################################################################
# 7Zip Functions
##############################################################################
def unseven(nzo, workdir, workdir_complete, delete, one_folder, sevens):
""" Unpack multiple sets '7z' of 7Zip files from 'workdir' to 'workdir_complete.
When 'delete' is set, originals will be deleted.
"""
i = 0
unseven_failed = False
new_files = []
tms = time.time()
# Find multi-volume sets, because 7zip will not provide actual set members
sets = {}
for seven in sevens:
name, ext = os.path.splitext(seven)
ext = ext.strip('.')
if not ext.isdigit():
name = seven
ext = None
if name not in sets:
sets[name] = []
if ext:
sets[name].append(ext)
# Unpack each set
for seven in sets:
extensions = sets[seven]
logging.info("Starting extract on 7zip set/file: %s ", seven)
nzo.set_action_line(T('Unpacking'), '%s' % setname_from_path(seven))
if workdir_complete and seven.startswith(workdir):
extraction_path = workdir_complete
else:
extraction_path = os.path.split(seven)[0]
res, new_files_set, msg = seven_extract(nzo, seven, extensions, extraction_path, one_folder, delete)
if res:
unseven_failed = True
nzo.set_unpack_info('Unpack', msg, setname_from_path(seven))
else:
i += 1
new_files.extend(new_files_set)
if not unseven_failed:
msg = T('%s files in %s') % (str(i), format_time_string(time.time() - tms))
nzo.set_unpack_info('Unpack', msg)
return unseven_failed, new_files
def seven_extract(nzo, sevenset, extensions, extraction_path, one_folder, delete):
""" Unpack single set 'sevenset' to 'extraction_path', with password tries
Return fail==0(ok)/fail==1(error)/fail==2(wrong password), new_files, sevens
"""
# Before we start, make sure the 7z binary SEVEN_COMMAND is defined
if not SEVEN_COMMAND:
msg = T('No 7za binary found, cannot unpack "%s"') % os.path.basename(sevenset)
logging.error(msg)
return 1, [], msg
fail = 0
passwords = get_all_passwords(nzo)
for password in passwords:
if password:
msg = T('Trying 7zip with password "%s"') % password
logging.debug(msg)
nzo.fail_msg = msg
nzo.set_unpack_info('Unpack', msg, setname_from_path(sevenset))
fail, new_files, msg = seven_extract_core(sevenset, extensions, extraction_path, one_folder, delete, password)
if fail != 2:
break
nzo.fail_msg = ''
if fail == 2:
msg = '%s (%s)' % (T('Unpacking failed, archive requires a password'), os.path.basename(sevenset))
if fail > 0:
nzo.fail_msg = msg
nzo.status = Status.FAILED
logging.error(msg)
return fail, new_files, msg
def seven_extract_core(sevenset, extensions, extraction_path, one_folder, delete, password):
""" Unpack single 7Z set 'sevenset' to 'extraction_path'
Return fail==0(ok)/fail==1(error)/fail==2(wrong password), new_files, message
"""
if one_folder:
method = 'e' # Unpack without folders
else:
method = 'x' # Unpack with folders
if sabnzbd.WIN32 or sabnzbd.DARWIN:
case = '-ssc-' # Case insensitive
else:
case = '-ssc' # Case sensitive
if cfg.overwrite_files():
overwrite = '-aoa'
else:
overwrite = '-aou'
if password:
password = '-p%s' % password
else:
password = '-p'
if len(extensions) > 0:
name = '%s.001' % sevenset
parm = '-tsplit'
else:
name = sevenset
parm = '-tzip' if sevenset.lower().endswith('.zip') else '-t7z'
if not os.path.exists(name):
return 1, [], T('7ZIP set "%s" is incomplete, cannot unpack') % setname_from_path(sevenset)
# For file-bookkeeping
orig_dir_content = recursive_listdir(extraction_path)
command = [SEVEN_COMMAND, method, '-y', overwrite, parm, case, password,
'-o%s' % extraction_path, name]
stup, need_shell, command, creationflags = build_command(command)
logging.debug('Starting 7za: %s', command)
p = Popen(command, shell=need_shell, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
startupinfo=stup, creationflags=creationflags)
output = platform_btou(p.stdout.read())
logging.debug('7za output: %s', output)
ret = p.wait()
# Return-code for CRC and Password is the same
if ret == 2 and 'ERROR: CRC Failed' in output:
# We can output a more general error
ret = 1
msg = T('ERROR: CRC failed in "%s"') % setname_from_path(sevenset)
else:
# Default message
msg = T('Could not unpack %s') % setname_from_path(sevenset)
# What's new?
new_files = list(set(orig_dir_content + recursive_listdir(extraction_path)))
if ret == 0 and delete:
if extensions:
for ext in extensions:
path = '%s.%s' % (sevenset, ext)
try:
remove_file(path)
except:
logging.warning(T('Deleting %s failed!'), path)
else:
try:
remove_file(sevenset)
except:
logging.warning(T('Deleting %s failed!'), sevenset)
# Always return an error message, even when return code is 0
return ret, new_files, msg
##############################################################################
# PAR2 Functions
##############################################################################
def par2_repair(parfile_nzf, nzo, workdir, setname, single):
""" Try to repair a set, return readd or correctness """
# Check if file exists, otherwise see if another is done
parfile_path = os.path.join(workdir, parfile_nzf.filename)
if not os.path.exists(parfile_path) and nzo.extrapars[setname]:
for new_par in nzo.extrapars[setname]:
test_parfile = os.path.join(workdir, new_par.filename)
if os.path.exists(test_parfile):
parfile_nzf = new_par
break
else:
# No file was found, we assume this set already finished
return False, True
parfile = os.path.join(workdir, parfile_nzf.filename)
old_dir_content = os.listdir(workdir)
used_joinables = ()
joinables = ()
used_for_repair = ()
result = readd = False
# Need to copy now, gets pop-ed during repair
setpars = nzo.extrapars[setname][:]
# Start QuickCheck
nzo.status = Status.QUICK_CHECK
nzo.set_action_line(T('Repair'), T('Quick Checking'))
qc_result = QuickCheck(setname, nzo)
if qc_result:
logging.info("Quick-check for %s is OK, skipping repair", setname)
nzo.set_unpack_info('Repair', T('[%s] Quick Check OK') % setname)
result = True
if not result and cfg.enable_all_par():
# Download all par2 files that haven't been downloaded yet
readd = False
for extrapar in nzo.extrapars[setname][:]:
# Make sure we only get new par2 files
if extrapar not in nzo.finished_files and extrapar not in nzo.files:
nzo.add_parfile(extrapar)
readd = True
if readd:
return readd, result
if not result:
nzo.status = Status.REPAIRING
result = False
readd = False
try:
nzo.set_action_line(T('Repair'), T('Starting Repair'))
logging.info('Scanning "%s"', parfile)
joinables, zips, rars, sevens, ts = build_filelists(workdir, check_rar=False)
# Multipar or not?
if sabnzbd.WIN32 and cfg.multipar():
finished, readd, datafiles, used_joinables, used_for_repair = MultiPar_Verify(parfile, nzo, setname, joinables, single=single)
else:
finished, readd, datafiles, used_joinables, used_for_repair = PAR_Verify(parfile, nzo, setname, joinables, single=single)
if finished:
result = True
logging.info('Par verify finished ok on %s!', parfile)
# Remove this set so we don't try to check it again
nzo.remove_parset(parfile_nzf.setname)
else:
logging.info('Par verify failed on %s!', parfile)
if not readd:
# Failed to repair -> remove this set
nzo.remove_parset(parfile_nzf.setname)
return readd, False
except:
msg = sys.exc_info()[1]
nzo.fail_msg = T('Repairing failed, %s') % msg
logging.error(T('Error %s while running par2_repair on set %s'), msg, setname)
logging.info("Traceback: ", exc_info=True)
return readd, result
try:
if cfg.enable_par_cleanup():
deletables = []
new_dir_content = os.listdir(workdir)
# Remove extra files created during repair and par2 base files
for path in new_dir_content:
if os.path.splitext(path)[1] == '.1' and path not in old_dir_content:
deletables.append(os.path.join(workdir, path))
deletables.append(os.path.join(workdir, setname + '.par2'))
deletables.append(os.path.join(workdir, setname + '.PAR2'))
deletables.append(parfile)
# Add output of par2-repair to remove
deletables.extend(used_joinables)
deletables.extend([os.path.join(workdir, f) for f in used_for_repair])
# Delete pars of the set
deletables.extend([os.path.join(workdir, nzf.filename) for nzf in setpars])
for filepath in deletables:
if filepath in joinables:
joinables.remove(filepath)
if os.path.exists(filepath):
try:
remove_file(filepath)
except OSError:
logging.warning(T('Deleting %s failed!'), filepath)
except:
msg = sys.exc_info()[1]
nzo.fail_msg = T('Repairing failed, %s') % msg
logging.error(T('Error "%s" while running par2_repair on set %s'), msg, setname, exc_info=True)
return readd, result
_RE_BLOCK_FOUND = re.compile(r'File: "([^"]+)" - found \d+ of \d+ data blocks from "([^"]+)"')
_RE_IS_MATCH_FOR = re.compile(r'File: "([^"]+)" - is a match for "([^"]+)"')
_RE_LOADING_PAR2 = re.compile(r'Loading "([^"]+)"\.')
_RE_LOADED_PAR2 = re.compile(r'Loaded (\d+) new packets')
def PAR_Verify(parfile, nzo, setname, joinables, single=False):
""" Run par2 on par-set """
used_joinables = []
used_for_repair = []
# set the current nzo status to "Verifying...". Used in History
nzo.status = Status.VERIFYING
start = time.time()
options = cfg.par_option().strip()
command = [str(PAR2_COMMAND), 'r', options, parfile]
# Append the wildcard for this set
parfolder = os.path.split(parfile)[0]
if single or len(globber(parfolder, setname + '*')) < 2:
# Support bizarre naming conventions
wildcard = '*'
else:
# Normal case, everything is named after set
wildcard = setname + '*'
if sabnzbd.WIN32 or sabnzbd.DARWIN:
command.append(os.path.join(parfolder, wildcard))
else:
# For Unix systems, remove folders, due to bug in some par2cmdline versions
flist = [item for item in globber_full(parfolder, wildcard) if os.path.isfile(item)]
command.extend(flist)
# We need to check for the bad par2cmdline that skips blocks
# Or the one that complains about basepath
# Only if we're not doing multicore
if not sabnzbd.WIN32 and not sabnzbd.DARWIN:
par2text = run_simple([command[0], '-h'])
if 'No data skipping' in par2text:
logging.info('Detected par2cmdline version that skips blocks, adding -N parameter')
command.insert(2, '-N')
if 'Set the basepath' in par2text:
logging.info('Detected par2cmdline version that needs basepath, adding -B<path> parameter')
command.insert(2, '-B')
command.insert(3, parfolder)
stup, need_shell, command, creationflags = build_command(command)
# par2multicore wants to see \\.\ paths on Windows
# See: https://github.com/sabnzbd/sabnzbd/pull/771
if sabnzbd.WIN32:
command = [clip_path(x) if x.startswith('\\\\?\\') else x for x in command]
# Run the external command
logging.info('Starting par2: %s', command)
lines = []
try:
p = Popen(command, shell=need_shell, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
startupinfo=stup, creationflags=creationflags)
proc = p.stdout
if p.stdin:
p.stdin.close()
# Set up our variables
datafiles = []
renames = {}
reconstructed = []
linebuf = ''
finished = 0
readd = False
verifynum = 1
verifytotal = 0
verified = 0
in_verify_repaired = False
# Loop over the output, whee
while 1:
char = platform_btou(proc.read(1))
if not char:
break
# Line not complete yet
if char not in ('\n', '\r'):
linebuf += char
continue
line = linebuf.strip()
linebuf = ''
# Check if we should still continue
if not nzo.pp_active:
p.kill()
msg = T('PostProcessing was aborted (%s)') % T('Repair')
nzo.fail_msg = msg
nzo.set_unpack_info('Repair', msg, setname)
nzo.status = Status.FAILED
readd = False
break
# Skip empty lines
if line == '':
continue
if 'Repairing:' not in line:
lines.append(line)
if line.startswith(('Invalid option specified', 'Invalid thread option', 'Cannot specify recovery file count')):
msg = T('[%s] PAR2 received incorrect options, check your Config->Switches settings') % setname
nzo.set_unpack_info('Repair', msg)
nzo.status = Status.FAILED
logging.error(msg)
elif line.startswith('All files are correct'):
msg = T('[%s] Verified in %s, all files correct') % (setname, format_time_string(time.time() - start))
nzo.set_unpack_info('Repair', msg)
logging.info('Verified in %s, all files correct',
format_time_string(time.time() - start))
finished = 1
elif line.startswith('Repair is required'):
msg = T('[%s] Verified in %s, repair is required') % (setname, format_time_string(time.time() - start))
nzo.set_unpack_info('Repair', msg)
logging.info('Verified in %s, repair is required',
format_time_string(time.time() - start))
start = time.time()
verified = 1
# Reset to use them again for verification of repair
verifytotal = 0
verifynum = 0
elif line.startswith('Main packet not found') or 'The recovery file does not exist' in line:
# Initialparfile probably didn't decode properly or bad user parameters
# We will try to get another par2 file, but 99% of time it's user parameters
msg = T('Invalid par2 files or invalid PAR2 parameters, cannot verify or repair')
logging.info(msg)
logging.info("Extra pars = %s", nzo.extrapars[setname])
# Look for the smallest par2file
block_table = {}
for nzf in nzo.extrapars[setname]:
if not nzf.completed:
block_table[nzf.blocks] = nzf
if block_table:
nzf = block_table[min(block_table.keys())]
logging.info("Found new par2file %s", nzf.filename)
# Move from extrapar list to files to be downloaded
# and remove it from the extrapars list
nzo.add_parfile(nzf)
readd = True
else:
nzo.fail_msg = msg
nzo.set_unpack_info('Repair', msg, setname)
nzo.status = Status.FAILED
elif line.startswith('You need'):
# We need more blocks, but are they available?
chunks = line.split()
needed_blocks = int(chunks[2])
# Check if we have enough blocks
added_blocks = nzo.get_extra_blocks(setname, needed_blocks)
if added_blocks:
msg = T('Fetching %s blocks...') % str(added_blocks)
nzo.set_action_line(T('Fetching'), msg)
readd = True
else:
# Failed
msg = T('Repair failed, not enough repair blocks (%s short)') % str(needed_blocks)
nzo.fail_msg = msg
nzo.set_unpack_info('Repair', msg, setname)
nzo.status = Status.FAILED
elif line.startswith('Repair is possible'):
start = time.time()
nzo.set_action_line(T('Repairing'), '%2d%%' % 0)
elif line.startswith('Repairing:'):
chunks = line.split()
per = float(chunks[-1][:-1])
nzo.set_action_line(T('Repairing'), '%2d%%' % per)
nzo.status = Status.REPAIRING
elif line.startswith('Repair complete'):
msg = T('[%s] Repaired in %s') % (setname, format_time_string(time.time() - start))
nzo.set_unpack_info('Repair', msg)
logging.info('Repaired in %s', format_time_string(time.time() - start))
finished = 1
elif verified and line.endswith(('are missing.', 'exist but are damaged.')):
# Files that will later be verified after repair
chunks = line.split()
verifytotal += int(chunks[0])
elif line.startswith('Verifying repaired files'):
in_verify_repaired = True
nzo.set_action_line(T('Verifying repair'), '%02d/%02d' % (verifynum, verifytotal))
elif in_verify_repaired and line.startswith('Target'):
verifynum += 1
if verifynum <= verifytotal:
nzo.set_action_line(T('Verifying repair'), '%02d/%02d' % (verifynum, verifytotal))
elif line.startswith('File:') and line.find('data blocks from') > 0:
m = _RE_BLOCK_FOUND.search(line)
if m:
workdir = os.path.split(parfile)[0]
old_name = m.group(1)
new_name = m.group(2)
if joinables:
# Find out if a joinable file has been used for joining
for jn in joinables:
if line.find(os.path.split(jn)[1]) > 0:
used_joinables.append(jn)
break
# Special case of joined RAR files, the "of" and "from" must both be RAR files
# This prevents the joined rars files from being seen as an extra rar-set
if '.rar' in old_name.lower() and '.rar' in new_name.lower():
used_joinables.append(os.path.join(workdir, old_name))
else:
logging.debug('PAR2 will reconstruct "%s" from "%s"', new_name, old_name)
reconstructed.append(os.path.join(workdir, old_name))
elif 'Could not write' in line and 'at offset 0:' in line:
# If there are joinables, this error will only happen in case of 100% complete files
# We can just skip the retry, because par2cmdline will fail in those cases
# becauses it refuses to scan the ".001" file
if joinables:
finished = 1
used_joinables = []
elif ' cannot be renamed to ' in line:
msg = line.strip()
nzo.fail_msg = msg
nzo.set_unpack_info('Repair', msg, setname)
nzo.status = Status.FAILED
elif 'There is not enough space on the disk' in line:
# Oops, disk is full!
msg = T('Repairing failed, %s') % T('Disk full')
nzo.fail_msg = msg
nzo.set_unpack_info('Repair', msg, setname)
nzo.status = Status.FAILED
# File: "oldname.rar" - is a match for "newname.rar".
elif 'is a match for' in line:
m = _RE_IS_MATCH_FOR.search(line)
if m:
old_name = m.group(1)
new_name = m.group(2)
logging.debug('PAR2 will rename "%s" to "%s"', old_name, new_name)
renames[new_name] = old_name
# Show progress
if verifytotal == 0 or verifynum < verifytotal:
verifynum += 1
nzo.set_action_line(T('Verifying'), '%02d/%02d' % (verifynum, verifytotal))
elif 'Scanning extra files' in line:
# Obfuscated post most likely, so reset counter to show progress
verifynum = 1
elif 'No details available for recoverable file' in line:
msg = line.strip()
nzo.fail_msg = msg
nzo.set_unpack_info('Repair', msg, setname)
nzo.status = Status.FAILED
elif line.startswith('Repair Failed.'):
# Unknown repair problem
msg = T('Repairing failed, %s') % line
nzo.fail_msg = msg
nzo.set_unpack_info('Repair', msg, setname)
nzo.status = Status.FAILED
finished = 0
elif not verified:
if line.startswith('Verifying source files'):
nzo.set_action_line(T('Verifying'), '01/%02d' % verifytotal)
nzo.status = Status.VERIFYING
elif line.startswith('Scanning:'):
pass
# Target files
m = TARGET_RE.match(line)
if m:
nzo.status = Status.VERIFYING
verifynum += 1
if verifytotal == 0 or verifynum < verifytotal:
nzo.set_action_line(T('Verifying'), '%02d/%02d' % (verifynum, verifytotal))
else:
nzo.set_action_line(T('Checking extra files'), '%02d' % verifynum)
# Remove redundant extra files that are just duplicates of original ones
if 'duplicate data blocks' in line:
used_for_repair.append(m.group(1))
else:
datafiles.append(m.group(1))
continue
# Verify done
m = re.match(r'There are (\d+) recoverable files', line)
if m:
verifytotal = int(m.group(1))
p.wait()
except WindowsError as err:
raise WindowsError(err)
# Also log what is shown to user in history
if nzo.fail_msg:
logging.info(nzo.fail_msg)
logging.debug('PAR2 output was\n%s', '\n'.join(lines))
# If successful, add renamed files to the collection
if finished and renames:
nzo.renamed_file(renames)
# If successful and files were reconstructed, remove incomplete original files
if finished and reconstructed:
# Use 'used_joinables' as a vehicle to get rid of the files
used_joinables.extend(reconstructed)
return finished, readd, datafiles, used_joinables, used_for_repair
_RE_FILENAME = re.compile(r'"([^"]+)"')
def MultiPar_Verify(parfile, nzo, setname, joinables, single=False):
""" Run par2 on par-set """
parfolder = os.path.split(parfile)[0]
used_joinables = []
used_for_repair = []
# set the current nzo status to "Verifying...". Used in History
nzo.status = Status.VERIFYING
start = time.time()
# Caching of verification implemented by adding:
# But not really required due to prospective-par2
command = [str(MULTIPAR_COMMAND), 'r', '-vs2', '-vd%s' % parfolder, parfile]
# Check if there are maybe par2cmdline/par2tbb commands supplied
if '-t' in cfg.par_option() or '-p' in cfg.par_option():
logging.info('Removing old par2cmdline/par2tbb options for MultiPar')
cfg.par_option.set('')
# Only add user-options if supplied
options = cfg.par_option().strip()
if options:
# We wrongly instructed users to use /x parameter style instead of -x
options = options.replace('/', '-', 1)
command.insert(2, options)
# Append the wildcard for this set
if single or len(globber(parfolder, setname + '*')) < 2:
# Support bizarre naming conventions
wildcard = '*'
else:
# Normal case, everything is named after set
wildcard = setname + '*'
command.append(os.path.join(parfolder, wildcard))
stup, need_shell, command, creationflags = build_command(command)
logging.info('Starting MultiPar: %s', command)
lines = []
p = Popen(command, shell=need_shell, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
startupinfo=stup, creationflags=creationflags)
proc = p.stdout
if p.stdin:
p.stdin.close()
# Set up our variables
datafiles = []
renames = {}
reconstructed = []
linebuf = ''
finished = 0
readd = False
verifynum = 0
verifytotal = 0
in_check = False
in_verify = False
in_repair = False
in_verify_repaired = False
misnamed_files = False
old_name = None
# Loop over the output, whee
while 1:
char = platform_btou(proc.read(1))
if not char:
break
# Line not complete yet
if char not in ('\n', '\r'):
linebuf += char
continue
line = linebuf.strip()
linebuf = ''
# Check if we should still continue
if not nzo.pp_active:
p.kill()
msg = T('PostProcessing was aborted (%s)') % T('Repair')
nzo.fail_msg = msg
nzo.set_unpack_info('Repair', msg, setname)
nzo.status = Status.FAILED
readd = False
break
# Skip empty lines
if line == '':
continue
# Save it all
lines.append(line)
# ----------------- Startup
if line.startswith('invalid option'):
# Option error
msg = T('[%s] PAR2 received incorrect options, check your Config->Switches settings') % setname
nzo.set_unpack_info('Repair', msg)
nzo.status = Status.FAILED
logging.error(msg)
elif line.startswith('valid file is not found'):
# Initialparfile probably didn't decode properly, or bad user parameters
# We will try to get another par2 file, but 99% of time it's user parameters
msg = T('Invalid par2 files or invalid PAR2 parameters, cannot verify or repair')
logging.info(msg)
logging.info("Extra pars = %s", nzo.extrapars[setname])
# Look for the smallest par2file
block_table = {}
for nzf in nzo.extrapars[setname]:
if not nzf.completed:
block_table[nzf.blocks] = nzf
if block_table:
nzf = block_table[min(block_table.keys())]
logging.info("Found new par2file %s", nzf.filename)
# Move from extrapar list to files to be downloaded
# and remove it from the extrapars list
nzo.add_parfile(nzf)
readd = True
else:
nzo.fail_msg = msg
nzo.set_unpack_info('Repair', msg, setname)
nzo.status = Status.FAILED
elif line.startswith('There is not enough space on the disk'):
msg = T('Repairing failed, %s') % T('Disk full')
nzo.fail_msg = msg
nzo.set_unpack_info('Repair', msg, setname)
nzo.status = Status.FAILED
# ----------------- Start check/verify stage
elif line.startswith('Recovery Set ID'):
# Remove files were MultiPar stores verification result when repaired succesfull
recovery_id = line.split()[-1]
used_for_repair.append('2_%s.bin' % recovery_id)
used_for_repair.append('2_%s.ini' % recovery_id)
elif line.startswith('Input File total count'):
# How many files will it try to find?
verifytotal = int(line.split()[-1])
# ----------------- Misnamed-detection stage
# Misnamed files
elif line.startswith('Searching misnamed file'):
# We are in the misnamed files block
misnamed_files = True
verifynum = 0
elif misnamed_files and 'Found' in line:
# First it reports the current filename
m = _RE_FILENAME.search(line)
if m:
verifynum += 1
nzo.set_action_line(T('Checking'), '%02d/%02d' % (verifynum, verifytotal))
old_name = m.group(1)
elif misnamed_files and 'Misnamed' in line:
# Then it finds the actual
m = _RE_FILENAME.search(line)
if m and old_name:
new_name = m.group(1)
logging.debug('MultiPar will rename "%s" to "%s"', old_name, new_name)
renames[new_name] = old_name
# New name is also part of data!
datafiles.append(new_name)
reconstructed.append(old_name)
# ----------------- Checking stage
# Checking input files
elif line.startswith('Complete file count'):
in_check = False
verifynum = 0
old_name = None
elif line.startswith('Verifying Input File'):
in_check = True
nzo.status = Status.VERIFYING
elif in_check:
m = _RE_FILENAME.search(line)
if m:
# Only increase counter if it was really the detection line
if line.startswith('= ') or '%' not in line:
verifynum += 1
nzo.set_action_line(T('Checking'), '%02d/%02d' % (verifynum, verifytotal))
old_name = m.group(1)
# ----------------- Verify stage
# Which files need extra verification?
elif line.startswith('Damaged file count'):
verifytotal = int(line.split()[-1])
elif line.startswith('Missing file count'):
verifytotal += int(line.split()[-1])
# Actual verification
elif line.startswith('Input File Slice found'):
# End of verification AND end of misnamed file search
in_verify = False
misnamed_files = False
old_name = None
elif line.startswith('Finding available slice'):
# The actual scanning of the files
in_verify = True
nzo.set_action_line(T('Verifying'), T('Checking'))
elif in_verify:
m = _RE_FILENAME.search(line)
if m:
# It prints the filename couple of times, so we save it to check
# 'datafiles' will not contain all data-files in par-set, only the
# ones that got scanned, but it's ouput is never used!
nzo.status = Status.VERIFYING
if line.split()[1] in ('Damaged', 'Found'):
verifynum += 1
datafiles.append(m.group(1))
# Set old_name in case it was misnamed and found (not when we are joining)
old_name = None
if line.split()[1] == 'Found' and not joinables:
old_name = m.group(1)
# Sometimes we don't know the total (filejoin)
if verifytotal <= 1:
nzo.set_action_line(T('Verifying'), '%02d' % verifynum)
else:
nzo.set_action_line(T('Verifying'), '%02d/%02d' % (verifynum, verifytotal))
elif old_name and old_name != m.group(1):
# Hey we found another misnamed one!
new_name = m.group(1)
logging.debug('MultiPar will rename "%s" to "%s"', old_name, new_name)
renames[new_name] = old_name
# Put it back with it's new name!
datafiles.pop()
datafiles.append(new_name)
# Need to remove the old file after repair (Multipar keeps it)
used_for_repair.append(old_name)
# Need to reset it to avoid collision
old_name = None
else:
# It's scanning extra files that don't belong to the set
# For damaged files it reports the filename twice, so only then start
verifynum += 1
if verifynum / 2 > verifytotal:
nzo.set_action_line(T('Checking extra files'), '%02d' % verifynum)
if joinables:
# Find out if a joinable file has been used for joining
for jn in joinables:
if line.find(os.path.split(jn)[1]) > 0:
used_joinables.append(jn)
datafiles.append(m.group(1))
break
elif line.startswith('Need'):
# We need more blocks, but are they available?
chunks = line.split()
needed_blocks = int(chunks[1])
# Check if we have enough blocks
added_blocks = nzo.get_extra_blocks(setname, needed_blocks)
if added_blocks:
msg = T('Fetching %s blocks...') % str(added_blocks)
nzo.set_action_line(T('Fetching'), msg)
readd = True
else:
# Failed
msg = T('Repair failed, not enough repair blocks (%s short)') % str(needed_blocks)
nzo.fail_msg = msg
nzo.set_unpack_info('Repair', msg, setname)
nzo.status = Status.FAILED
# MultiPar can say 'PAR File(s) Incomplete' also when it needs more blocks
# But the Need-more-blocks message is always last, so force failure
finished = 0
# Result of verification
elif line.startswith('All Files Complete') or line.endswith('PAR File(s) Incomplete'):
# Completed without damage!
# 'PAR File(s) Incomplete' is reported for success
# but when there are very similar filenames in the folder
msg = T('[%s] Verified in %s, all files correct') % (setname, format_time_string(time.time() - start))
nzo.set_unpack_info('Repair', msg)
logging.info('Verified in %s, all files correct',
format_time_string(time.time() - start))
finished = 1
elif line.startswith(('Ready to repair', 'Ready to rejoin')):
# Ready to repair!
# Or we are re-joining a split file when there's no damage but takes time
msg = T('[%s] Verified in %s, repair is required') % (setname, format_time_string(time.time() - start))
nzo.set_unpack_info('Repair', msg)
logging.info('Verified in %s, repair is required',
format_time_string(time.time() - start))
start = time.time()
# Set message for user in case of joining
if line.startswith('Ready to rejoin'):
nzo.set_action_line(T('Joining'), '%2d' % len(used_joinables))
else:
# If we are repairing a joinable set, it won't actually
# do the joining. So we can't remove those files!
used_joinables = []
# ----------------- Repair stage
elif 'Recovering slice' in line:
# Before this it will calculate matrix, here is where it starts
start = time.time()
in_repair = True
nzo.set_action_line(T('Repairing'), '%2d%%' % 0)
elif in_repair and line.startswith('Verifying repair'):
in_repair = False
in_verify_repaired = True
# How many will be checked?
verifytotal = int(line.split()[-1])
verifynum = 0
elif in_repair:
try:
# Line with percentage of repair (nothing else)
per = float(line[:-1])
nzo.set_action_line(T('Repairing'), '%2d%%' % per)
nzo.status = Status.REPAIRING
except:
# Checksum error
if 'checksum' in line:
# Failed due to checksum error of multipar
msg = T('Repairing failed, %s') % line
nzo.fail_msg = msg
nzo.set_unpack_info('Repair', msg, setname)
nzo.status = Status.FAILED
else:
# Not sure, log error
logging.info("Traceback: ", exc_info=True)
elif line.startswith('Repaired successfully'):
msg = T('[%s] Repaired in %s') % (setname, format_time_string(time.time() - start))
nzo.set_unpack_info('Repair', msg)
logging.info('Repaired in %s', format_time_string(time.time() - start))
finished = 1
elif in_verify_repaired and line.startswith('Repaired :'):
# Track verification of repaired files (can sometimes take a while)
verifynum += 1
nzo.set_action_line(T('Verifying repair'), '%02d/%02d' % (verifynum, verifytotal))
elif line.startswith('Failed to repair'):
# Unknown repair problem
msg = T('Repairing failed, %s') % line
nzo.fail_msg = msg
nzo.set_unpack_info('Repair', msg, setname)
nzo.status = Status.FAILED
finished = 0
p.wait()
# Also log what is shown to user in history
if nzo.fail_msg:
logging.info(nzo.fail_msg)
logging.debug('MultiPar output was\n%s', '\n'.join(lines))
# Add renamed files to the collection
# MultiPar always(!!) renames automatically whatever it can in the 'Searching misnamed file:'-section
# Even if the repair did not complete fully it will rename those!
# But the ones in 'Finding available slices'-section will only be renamed after succesfull repair
if renames:
# If succes, we also remove the possibly previously renamed ones
if finished:
reconstructed.extend(list(renames.values()))
# Adding to the collection
nzo.renamed_file(renames)
# Remove renamed original files
workdir = os.path.split(parfile)[0]
used_joinables.extend([os.path.join(workdir, name) for name in reconstructed])
return finished, readd, datafiles, used_joinables, used_for_repair
def create_env(nzo=None, extra_env_fields={}):
""" Modify the environment for pp-scripts with extra information
OSX: Return copy of environment without PYTHONPATH and PYTHONHOME
other: return None
"""
env = os.environ.copy()
# Are we adding things?
if nzo:
# Add basic info
for field in ENV_NZO_FIELDS:
try:
field_value = getattr(nzo, field)
# Special filters for Python types
if field_value is None:
env['SAB_' + field.upper()] = ''
elif isinstance(field_value, bool):
env['SAB_' + field.upper()] = str(field_value*1)
else:
env['SAB_' + field.upper()] = str(field_value)
except:
# Catch key errors
pass
# Always supply basic info
extra_env_fields.update({'program_dir': sabnzbd.DIR_PROG,
'par2_command': sabnzbd.newsunpack.PAR2_COMMAND,
'multipar_command': sabnzbd.newsunpack.MULTIPAR_COMMAND,
'rar_command': sabnzbd.newsunpack.RAR_COMMAND,
'zip_command': sabnzbd.newsunpack.ZIP_COMMAND,
'7zip_command': sabnzbd.newsunpack.SEVEN_COMMAND,
'version': sabnzbd.__version__})
# Add extra fields
for field in extra_env_fields:
try:
if extra_env_fields[field] is not None:
env['SAB_' + field.upper()] = str(extra_env_fields[field])
else:
env['SAB_' + field.upper()] = ''
except:
# Catch key errors
pass
if sabnzbd.DARWIN:
if 'PYTHONPATH' in env:
del env['PYTHONPATH']
if 'PYTHONHOME' in env:
del env['PYTHONHOME']
elif not nzo:
# No modification
return None
return env
def userxbit(filename):
# Returns boolean if the x-bit for user is set on the given file
# This is a workaround: os.access(filename, os.X_OK) does not work on certain mounted file systems
# Does not work on Windows, but it is not called on Windows
# rwx rwx rwx
# 876 543 210 # we want bit 6 from the right, counting from 0
userxbit = 1<<6 # bit 6
rwxbits = os.stat(filename)[0] # the first element of os.stat() is "mode"
# do logical AND, check if it is not 0:
xbitset = (rwxbits & userxbit) > 0
return xbitset
def build_command(command, flatten_command=False):
""" Prepare list from running an external program
On Windows we need to run our own list2cmdline for Unrar
"""
# command[0] should be set, and thus not None
if not command[0]:
logging.error(T('[%s] The command in build_command is undefined.'), caller_name())
raise IOError
if not sabnzbd.WIN32:
if command[0].endswith('.py'):
with open(command[0], 'r') as script_file:
if not userxbit(command[0]):
# Inform user that Python scripts need x-bit and then stop
logging.error(T('Python script "%s" does not have execute (+x) permission set'), command[0])
raise IOError
elif script_file.read(2) != '#!':
# No shebang (#!) defined, add default python
command.insert(0, 'python')
if IONICE_COMMAND and cfg.ionice().strip():
lst = cfg.ionice().split()
lst.reverse()
for arg in lst:
command.insert(0, arg)
command.insert(0, IONICE_COMMAND)
if NICE_COMMAND and cfg.nice().strip():
lst = cfg.nice().split()
lst.reverse()
for arg in lst:
command.insert(0, arg)
command.insert(0, NICE_COMMAND)
need_shell = False
stup = None
creationflags = 0
else:
# For Windows we always need to add python interpreter
if command[0].endswith('.py'):
command.insert(0, 'python')
need_shell = os.path.splitext(command[0])[1].lower() not in ('.exe', '.com')
stup = subprocess.STARTUPINFO()
stup.dwFlags = win32process.STARTF_USESHOWWINDOW
stup.wShowWindow = win32con.SW_HIDE
creationflags = WIN_SCHED_PRIOS[cfg.win_process_prio()]
if need_shell or flatten_command:
command = list2cmdline(command)
return stup, need_shell, command, creationflags
def rar_volumelist(rarfile_path, password, known_volumes):
""" Extract volumes that are part of this rarset
and merge them with existing list, removing duplicates
"""
# UnRar is required to read some RAR files
# RarFile can fail in special cases
try:
rarfile.UNRAR_TOOL = RAR_COMMAND
zf = rarfile.RarFile(rarfile_path)
# setpassword can fail due to bugs in RarFile
if password:
try:
zf.setpassword(password)
except:
pass
zf_volumes = zf.volumelist()
except:
zf_volumes = []
# Remove duplicates
known_volumes_base = [os.path.basename(vol) for vol in known_volumes]
for zf_volume in zf_volumes:
if os.path.basename(zf_volume) not in known_volumes_base:
# Long-path notation just to be sure
known_volumes.append(long_path(zf_volume))
return known_volumes
# Sort the various RAR filename formats properly :\
def rar_sort(a, b):
""" Define sort method for rar file names """
aext = a.split('.')[-1]
bext = b.split('.')[-1]
if aext == 'rar' and bext == 'rar':
return cmp(a, b)
elif aext == 'rar':
return -1
elif bext == 'rar':
return 1
else:
return cmp(a, b)
def build_filelists(workdir, workdir_complete=None, check_both=False, check_rar=True):
""" Build filelists, if workdir_complete has files, ignore workdir.
Optionally scan both directories.
Optionally test content to establish RAR-ness
"""
sevens, joinables, zips, rars, ts, filelist = ([], [], [], [], [], [])
if workdir_complete:
filelist.extend(recursive_listdir(workdir_complete))
if workdir and (not filelist or check_both):
filelist.extend(recursive_listdir(workdir))
for file in filelist:
# Extra check for rar (takes CPU/disk)
file_is_rar = False
if check_rar:
file_is_rar = rarfile.is_rarfile(file)
# Run through all the checks
if SEVENZIP_RE.search(file) or SEVENMULTI_RE.search(file):
# 7zip
sevens.append(file)
elif SPLITFILE_RE.search(file) and not file_is_rar:
# Joinables, optional with RAR check
joinables.append(file)
elif ZIP_RE.search(file):
# ZIP files
zips.append(file)
elif RAR_RE.search(file):
# RAR files
rars.append(file)
elif TS_RE.search(file):
# TS split files
ts.append(file)
logging.debug("build_filelists(): joinables: %s", joinables)
logging.debug("build_filelists(): zips: %s", zips)
logging.debug("build_filelists(): rars: %s", rars)
logging.debug("build_filelists(): 7zips: %s", sevens)
logging.debug("build_filelists(): ts: %s", ts)
return joinables, zips, rars, sevens, ts
def QuickCheck(set, nzo):
""" Check all on-the-fly md5sums of a set """
md5pack = nzo.md5packs.get(set)
if md5pack is None:
return False
# We use bitwise assigment (&=) so False always wins in case of failure
# This way the renames always get saved!
result = True
nzf_list = nzo.finished_files
renames = {}
# Files to ignore
ignore_ext = cfg.quick_check_ext_ignore()
for file in md5pack:
found = False
file_to_ignore = os.path.splitext(file)[1].lower().replace('.', '') in ignore_ext
for nzf in nzf_list:
# Do a simple filename based check
if file == nzf.filename:
found = True
if (nzf.md5sum is not None) and nzf.md5sum == md5pack[file]:
logging.debug('Quick-check of file %s OK', file)
result &= True
elif file_to_ignore:
# We don't care about these files
logging.debug('Quick-check ignoring file %s', file)
result &= True
else:
logging.info('Quick-check of file %s failed!', file)
result = False
break
# Now lets do obfuscation check
if nzf.md5sum == md5pack[file]:
try:
logging.debug('Quick-check will rename %s to %s', nzf.filename, file)
renamer(os.path.join(nzo.downpath, nzf.filename), os.path.join(nzo.downpath, file))
renames[file] = nzf.filename
nzf.filename = file
result &= True
found = True
break
except IOError:
# Renamed failed for some reason, probably already done
break
if not found:
if file_to_ignore:
# We don't care about these files
logging.debug('Quick-check ignoring missing file %s', file)
continue
logging.info('Cannot Quick-check missing file %s!', file)
result = False
# Save renames
if renames:
nzo.renamed_file(renames)
return result
def unrar_check(rar):
""" Return version number of unrar, where "5.01" returns 501
Also return whether an original version is found
(version, original)
"""
version = 0
original = ''
if rar:
try:
version = run_simple(rar)
except:
return version, original
original = "Alexander Roshal" in version
m = re.search(r"RAR\s(\d+)\.(\d+)", version)
if m:
version = int(m.group(1)) * 100 + int(m.group(2))
else:
version = 0
return version, original
def par2_mt_check(par2_path):
""" Detect if we have multicore par2 variants """
try:
par2_version = run_simple([par2_path, '-h'])
# Look for a threads option
if '-t<' in par2_version:
return True
except:
pass
return False
def sfv_check(sfv_path):
""" Verify files using SFV file,
input: full path of sfv, file are assumed to be relative to sfv
returns: List of failing files or [] when all is OK
"""
failed = []
try:
fp = open(sfv_path, 'r')
except:
logging.info('Cannot open SFV file %s', sfv_path)
failed.append(sfv_path)
return failed
root = os.path.split(sfv_path)[0]
for line in fp:
line = line.strip('\n\r ')
if line and line[0] != ';':
x = line.rfind(' ')
if x > 0:
filename = line[:x].strip()
checksum = line[x:].strip()
path = os.path.join(root, filename)
if os.path.exists(path):
if crc_check(path, checksum):
logging.debug('File %s passed SFV check', path)
else:
logging.info('File %s did not pass SFV check', path)
failed.append(filename)
else:
logging.info('File %s missing in SFV check', path)
failed.append(filename)
fp.close()
return failed
def crc_check(path, target_crc):
""" Return True if file matches CRC """
try:
fp = open(path, 'rb')
except:
return False
crc = 0
while 1:
data = fp.read(4096)
if not data:
break
crc = binascii.crc32(data, crc)
fp.close()
crc = '%08x' % (crc & 0xffffffff,)
return crc.lower() == target_crc.lower()
def analyse_show(name):
""" Do a quick SeasonSort check and return basic facts """
job = SeriesSorter(None, name, None, None)
job.match(force=True)
if job.is_match():
job.get_values()
info = job.show_info
show_name = info.get('show_name', '').replace('.', ' ').replace('_', ' ')
show_name = show_name.replace(' ', ' ')
return show_name, \
info.get('season_num', ''), \
info.get('episode_num', ''), \
info.get('ep_name', '')
def pre_queue(nzo, pp, cat):
""" Run pre-queue script (if any) and process results.
pp and cat are supplied seperate since they can change.
"""
def fix(p):
# If added via API, some items can still be "None" (as a string)
if not p or str(p).lower() == 'none':
return ''
return str(p)
values = [1, nzo.final_name_pw_clean, pp, cat, nzo.script, nzo.priority, None]
script_path = make_script_path(cfg.pre_script())
if script_path:
# Basic command-line parameters
command = [script_path, nzo.final_name_pw_clean, pp, cat, nzo.script, nzo.priority, str(nzo.bytes), ' '.join(nzo.groups)]
command.extend(analyse_show(nzo.final_name_pw_clean))
command = [fix(arg) for arg in command]
# Fields not in the NZO directly
extra_env_fields = {'groups': ' '.join(nzo.groups),
'show_name': command[8],
'show_season': command[9],
'show_episode': command[10],
'show_episode_name': command[11]}
try:
stup, need_shell, command, creationflags = build_command(command)
env = create_env(nzo, extra_env_fields)
logging.info('Running pre-queue script %s', command)
p = Popen(command, shell=need_shell, stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT, startupinfo=stup, env=env,
creationflags=creationflags)
except:
logging.debug("Failed script %s, Traceback: ", script_path, exc_info=True)
return values
output = platform_btou(p.stdout.read())
ret = p.wait()
logging.info('Pre-queue script returns %s and output=\n%s', ret, output)
if ret == 0:
n = 0
for line in output.split('\n'):
line = line.strip('\r\n \'"')
if n < len(values) and line:
values[n] = line
n += 1
accept = int_conv(values[0])
if accept < 1:
logging.info('Pre-Q refuses %s', nzo.final_name_pw_clean)
elif accept == 2:
logging.info('Pre-Q accepts&fails %s', nzo.final_name_pw_clean)
else:
logging.info('Pre-Q accepts %s', nzo.final_name_pw_clean)
return values
def list2cmdline(lst):
""" convert list to a cmd.exe-compatible command string """
nlst = []
for arg in lst:
if not arg:
nlst.append('""')
else:
nlst.append('"%s"' % arg)
return ' '.join(nlst)
def is_sevenfile(path):
""" Return True if path has proper extension and 7Zip is installed """
return SEVEN_COMMAND and os.path.splitext(path)[1].lower() == '.7z'
class SevenZip:
""" Minimal emulation of ZipFile class for 7Zip """
def __init__(self, path):
self.path = path
def namelist(self):
""" Return list of names in 7Zip """
names = []
# Future extension: use '-sccUTF-8' to get names in UTF8 encoding
command = [SEVEN_COMMAND, 'l', '-p', '-y', '-slt', self.path]
stup, need_shell, command, creationflags = build_command(command)
p = Popen(command, shell=need_shell, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
startupinfo=stup, creationflags=creationflags)
output = platform_btou(p.stdout.read())
_ = p.wait()
re_path = re.compile('^Path = (.+)')
for line in output.split('\n'):
m = re_path.search(line)
if m:
names.append(m.group(1).strip('\r'))
if names:
# Remove name of archive itself
del names[0]
return names
def read(self, name):
""" Read named file from 7Zip and return data """
command = [SEVEN_COMMAND, 'e', '-p', '-y', '-so', self.path, name]
stup, need_shell, command, creationflags = build_command(command)
# Ignore diagnostic output, otherwise it will be appended to content
if sabnzbd.WIN32:
stderr = open('nul', 'w')
else:
stderr = open('/dev/null', 'w')
p = Popen(command, shell=need_shell, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=stderr,
startupinfo=stup, creationflags=creationflags)
output = platform_btou(p.stdout.read())
_ = p.wait()
stderr.close()
return output
def close(self):
""" Close file """
pass
def run_simple(cmd):
""" Run simple external command and return output """
p = Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
txt = platform_btou(p.stdout.read())
p.wait()
return txt
|
the-stack_0_11132 | # -*- coding: utf-8 -*-
"""
Created on Fri Apr 22 02:51:53 2016
@author: utkarsh
"""
# FREQEST - Estimate fingerprint ridge frequency within image block
#
# Function to estimate the fingerprint ridge frequency within a small block
# of a fingerprint image. This function is used by RIDGEFREQ
#
# Usage:
# freqim = freqest(im, orientim, windsze, minWaveLength, maxWaveLength)
#
# Arguments:
# im - Image block to be processed.
# orientim - Ridge orientation image of image block.
# windsze - Window length used to identify peaks. This should be
# an odd integer, say 3 or 5.
# minWaveLength, maxWaveLength - Minimum and maximum ridge
# wavelengths, in pixels, considered acceptable.
#
# Returns:
# freqim - An image block the same size as im with all values
# set to the estimated ridge spatial frequency. If a
# ridge frequency cannot be found, or cannot be found
# within the limits set by min and max Wavlength
# freqim is set to zeros.
#
# Suggested parameters for a 500dpi fingerprint image
# freqim = freqest(im,orientim, 5, 5, 15);
#
# See also: RIDGEFREQ, RIDGEORIENT, RIDGESEGMENT
# REFERENCES
# Peter Kovesi
# School of Computer Science & Software Engineering
# The University of Western Australia
# pk at csse uwa edu au
# http://www.csse.uwa.edu.au/~pk
import numpy as np
import math
import scipy.ndimage
# import cv2
def frequest(im, orientim, windsze, minWaveLength, maxWaveLength):
rows, cols = np.shape(im)
# Find mean orientation within the block. This is done by averaging the
# sines and cosines of the doubled angles before reconstructing the
# angle again. This avoids wraparound problems at the origin.
cosorient = np.mean(np.cos(2 * orientim))
sinorient = np.mean(np.sin(2 * orientim))
orient = math.atan2(sinorient, cosorient) / 2
# Rotate the image block so that the ridges are vertical
# ROT_mat = cv2.getRotationMatrix2D((cols/2,rows/2),orient/np.pi*180 + 90,1)
# rotim = cv2.warpAffine(im,ROT_mat,(cols,rows))
rotim = scipy.ndimage.rotate(im, orient / np.pi * 180 + 90, axes=(1, 0), reshape=False, order=3, mode='nearest')
# Now crop the image so that the rotated image does not contain any
# invalid regions. This prevents the projection down the columns
# from being mucked up.
cropsze = int(np.fix(rows / np.sqrt(2)))
offset = int(np.fix((rows - cropsze) / 2))
rotim = rotim[offset:offset + cropsze][:, offset:offset + cropsze]
# Sum down the columns to get a projection of the grey values down
# the ridges.
proj = np.sum(rotim, axis=0)
dilation = scipy.ndimage.grey_dilation(proj, windsze, structure=np.ones(windsze))
temp = np.abs(dilation - proj)
peak_thresh = 2
maxpts = (temp < peak_thresh) & (proj > np.mean(proj))
maxind = np.where(maxpts)
rows_maxind, cols_maxind = np.shape(maxind)
# Determine the spatial frequency of the ridges by divinding the
# distance between the 1st and last peaks by the (No of peaks-1). If no
# peaks are detected, or the wavelength is outside the allowed bounds,
# the frequency image is set to 0
if cols_maxind < 2:
freqim = np.zeros(im.shape)
else:
NoOfPeaks = cols_maxind
waveLength = (maxind[0][cols_maxind - 1] - maxind[0][0]) / (NoOfPeaks - 1)
if minWaveLength <= waveLength <= maxWaveLength:
freqim = 1 / np.double(waveLength) * np.ones(im.shape)
else:
freqim = np.zeros(im.shape)
return freqim
|
the-stack_0_11133 | from automata_tools.Automata import Automata
from typing import Dict, List, Union, Callable
import numpy as np
class WFA:
dfa: Automata
def __init__(self, dfa: Automata, word2index: Dict[str, int],
dfa_to_tensor: Callable) -> None:
self.dfa = dfa
self.dfaDict = self.dfa.to_dict()
wfaTensor, wfaState2idx, wildcardMatrix, language = dfa_to_tensor(
self.dfaDict, word2index)
self.word2index = word2index
self.wfaTensor = wfaTensor + wildcardMatrix # word sparse transition matrix and wildcard all 1 transition matrix
self.wfaState2idx = wfaState2idx
self.language = language
self.tokenizer = lambda inputText: self.dfa.tokenizer(inputText)
def setTokenizer(self, tokenizerFunction: Callable[[str], List[str]]):
self.tokenizer = tokenizerFunction
def getStateLength(self) -> int:
return len(self.dfaDict['states'])
def getFinalStateIndex(self) -> List[int]:
return [self.wfaState2idx[i] for i in self.dfaDict['finalStates']]
def getStartStateIndex(self) -> int:
return self.wfaState2idx[self.dfaDict['startstate']]
def execute(self, inputWords: Union[str, np.array]) -> bool:
if isinstance(inputWords, str):
inputWordTensor = np.array(
list(
map(lambda word: self.word2index[word],
self.tokenizer(inputWords))))
else:
inputWordTensor = inputWords
stateTensor = np.zeros((self.getStateLength(), 1))
stateTensor[self.getStartStateIndex(
)] = 1 # set initial state's probability to 1
# every word have a size SxS transition matrix, where S = self.getStateLength()
for inputIndex in range(len(inputWordTensor)):
inputWordIndex = inputWordTensor[inputIndex]
transitionMatrixOfCurrentInputWord = self.wfaTensor[int(
inputWordIndex)].transpose()
stateTensor = np.dot(transitionMatrixOfCurrentInputWord,
stateTensor)
for index in self.getFinalStateIndex():
if int(stateTensor[index]) >= 1:
return True
return False |
the-stack_0_11137 | from setuptools import setup
with open('README.md', 'r') as fp:
long_desc = fp.read()
setup(
name='HTTPserver-mock',
version='2',
author='Tom YU Choe',
author_email='[email protected]',
description='a simple http-server mockup to test web crawler.',
long_description=long_desc,
url='https://github.com/YUChoe/HTTPserver-mock',
long_description_content_type="text/markdown",
py_modules=['HTTPserver_mock'],
package_dir={'': 'src'},
license='MIT',
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: System Administrators",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.6",
"License :: OSI Approved :: MIT License",
"Operating System :: POSIX :: Linux",
],
install_requires=[],
)
|
the-stack_0_11139 | from __future__ import annotations
import ipaddress
import json
import logging
import struct
import sys
import time
import tkinter
import zlib
from dataclasses import astuple
from pathlib import Path
from tkinter import messagebox, ttk
from typing import Optional, Tuple
import dns
import dns.resolver
from idlelib.tooltip import Hovertip
from twisted.internet import reactor, task, tksupport
from modules.Client import ClientInstance
from modules.Common import (CarInfo, Credidentials, DataQueue, NetData,
NetworkQueue, PitStop)
from modules.DriverInputs import DriverInputs
from modules.Server import ServerInstance
from modules.Strategy import StrategyUI
from modules.Telemetry import Telemetry, TelemetryRT, TelemetryUI
from modules.TyreGraph import PrevLapsGraph, TyreGraph
from modules.TyreSets import TyreSets, TyresSetData
from modules.Users import UserUI
logging.basicConfig(stream=sys.stdout, level=logging.INFO,
format="%(asctime)s.%(msecs)03d | %(name)s | %(message)s",
datefmt="%H:%M:%S")
_VERSION_ = "1.5.9"
class ConnectionPage(ttk.Frame):
def __init__(self, app: App, root):
ttk.Frame.__init__(self, master=root)
self.main_app = app
self.connection_path = "./Config/connection.json"
self.is_connected = None
self.connection_msg = ""
self.credis = None
self.is_connected_loop = task.LoopingCall(self.check_connection)
self.credidentials = None
key_check = ("saved_ip", "tcp_port", "udp_port", "username",
"driverID")
logging.info(f"Loading {self.connection_path}")
if Path(self.connection_path).is_file():
fp = open(self.connection_path, "r")
try:
self.credidentials = json.load(fp)
if (type(self.credidentials) is not dict or
tuple(self.credidentials.keys()) != key_check):
logging.info(f"Invalid connection.json file")
self.credidentials = None
except json.JSONDecodeError as msg:
self.credidentials = None
logging.info(f"JSON Error: {msg}")
fp.close()
else:
logging.info(f"{self.connection_path} not found")
self.credidentials = None
self.as_server = False
self.f_connection_info = tkinter.Frame(
self, bd=2, relief=tkinter.RIDGE)
self.f_connection_info.grid()
self.l_ip = tkinter.Label(self.f_connection_info, text="Address",
anchor=tkinter.E, width=10)
self.l_ip.grid(row=0, column=0, padx=5, pady=2)
Hovertip(self.l_ip, "Address of the server host ip or domain", 10)
self.l_tcp_port = tkinter.Label(self.f_connection_info,
text="TCP port", anchor=tkinter.E,
width=10)
self.l_tcp_port.grid(row=1, column=0, padx=5, pady=2)
Hovertip(self.l_ip, "TCP port of the host server (1024 - 10 000),"
" can be the same UDP", 10)
self.l_udp_port = tkinter.Label(self.f_connection_info,
text="UDP port", anchor=tkinter.E,
width=10)
self.l_udp_port.grid(row=2, column=0, padx=5, pady=2)
Hovertip(self.l_ip, "UDP port of the host server (1024 - 10 000),"
" can be the same as TCP", 10)
self.l_username = tkinter.Label(self.f_connection_info,
text="Username",
anchor=tkinter.E, width=10)
self.l_username.grid(row=3, column=0, padx=5, pady=2)
Hovertip(self.l_username, "Your name in ACC", 10)
self.l_driverID = tkinter.Label(self.f_connection_info,
text="Driver ID",
anchor=tkinter.E, width=10)
self.l_driverID.grid(row=4, column=0, padx=5, pady=2)
Hovertip(self.l_driverID, "Driver ID for driver swap "
"(Driver 1, 2, 3, 4, etc), not your SteamID", 10)
if self.credidentials is None:
self.cb_ip = ttk.Combobox(self.f_connection_info, width=30,
values=[])
else:
self.cb_ip = ttk.Combobox(self.f_connection_info, width=30,
values=self.credidentials["saved_ip"])
self.cb_ip.grid(row=0, column=1, padx=5, pady=2)
self.e_tcp_port = tkinter.Entry(self.f_connection_info, width=30)
self.e_tcp_port.grid(row=1, column=1, padx=5, pady=2)
self.e_udp_port = tkinter.Entry(self.f_connection_info, width=30)
self.e_udp_port.grid(row=2, column=1, padx=5, pady=2)
self.e_username = tkinter.Entry(self.f_connection_info, width=30)
self.e_username.grid(row=3, column=1, padx=5, pady=2)
Hovertip(self.e_username, "Your name in ACC", 10)
self.e_driverID = tkinter.Entry(self.f_connection_info, width=30)
self.e_driverID.grid(row=4, column=1, padx=5, pady=2)
Hovertip(self.e_driverID, "Driver ID for driver swap "
"(Driver 1, 2, 3, 4, etc), not your SteamID", 10)
self.b_connect = tkinter.Button(self, text="Connect",
command=self.connect)
self.b_connect.grid(row=1, padx=10, pady=5)
if self.credidentials is not None:
self.e_tcp_port.insert(tkinter.END, self.credidentials["tcp_port"])
self.e_udp_port.insert(tkinter.END, self.credidentials["udp_port"])
self.e_username.insert(tkinter.END, self.credidentials["username"])
self.e_driverID.insert(tkinter.END, self.credidentials["driverID"])
else:
self.e_tcp_port.insert(tkinter.END, "4269")
self.e_udp_port.insert(tkinter.END, "4270")
logging.info("Displaying connection window")
def set_as_server(self) -> None:
self.cb_ip.set("127.0.0.1")
self.cb_ip["state"] = "disabled"
self.as_server = True
def set_as_client(self) -> None:
self.cb_ip.set("")
self.cb_ip["state"] = "normal"
self.as_server = False
def connect(self) -> None:
logging.info("Connect button pressed")
self.b_connect.config(state="disabled")
error_message = ""
ip = None
try:
ip = ipaddress.ip_address(self.cb_ip.get()).compressed
except ValueError:
logging.info("Querrying dns server...")
try:
results = dns.resolver.resolve(self.cb_ip.get())
for result in results:
logging.info(f"Found ip: {result.address}")
logging.info(f"Picking first dns answer: {results[0].address}")
ip = results[0].address
except dns.resolver.NXDOMAIN:
error_message += "Invalide IP address or Domain name\n"
except dns.resolver.NoAnswer:
error_message += ("DNS didn't replied to the request"
f" for {self.cb_ip.get()}")
except dns.resolver.NoNameservers:
error_message += "No DNS server available"
except dns.resolver.YXDOMAIN:
error_message += ("The query name is too long after "
"DNAME substitution")
if self.e_tcp_port.get().isnumeric():
self.e_tcp_port.config(background="White")
else:
self.e_tcp_port.config(background="Red")
error_message += "Invalide TCP port\n"
if self.e_udp_port.get().isnumeric():
self.e_udp_port.config(background="White")
else:
self.e_udp_port.config(background="Red")
error_message += "Invalide UDP port\n"
if self.e_username.get() != "":
self.e_username.config(background="White")
else:
self.e_username.config(background="Red")
error_message += "Invalide username\n"
driverID = self.e_driverID.get()
if driverID != "" and driverID.isnumeric() and 0 < int(driverID) <= 5:
self.e_driverID.config(background="White")
else:
self.e_driverID.config(background="Red")
if (driverID.isnumeric() and 1 > int(driverID) > 5):
error_message += ("Are you sure you are the driver N° "
f"{driverID} in your team ?")
else:
error_message += "Invalide driver ID\n"
if error_message == "":
logging.info("No error in the credidentials")
self.credits = Credidentials(
ip=ip,
tcp_port=int(self.e_tcp_port.get()),
udp_port=int(self.e_udp_port.get()),
username=self.e_username.get(),
driverID=int(self.e_driverID.get())
)
if self.as_server:
self.main_app.as_server(self.credits)
else:
self.main_app.connect_to_server(self.credits)
self.is_connected_loop.start(0.1)
logging.info("Waiting for connection confirmation")
else:
logging.info(f"Error: {error_message}")
messagebox.showerror("Error", error_message)
self.b_connect.config(state="normal")
def check_connection(self) -> None:
if self.is_connected is None:
return
if self.is_connected:
logging.info("Connected")
self.save_credidentials(self.credits)
else:
logging.info("Connection failed")
messagebox.showerror("Error", self.connection_msg)
self.b_connect.config(state="normal")
self.is_connected = None
self.is_connected_loop.stop()
def connected(self, succes: bool, error: str) -> None:
self.is_connected = succes
self.connection_msg = error
def save_credidentials(self, credits: Credidentials) -> None:
logging.info("Saving credidentials")
if self.credidentials is None:
saved_ip = [self.cb_ip.get()]
elif credits.ip not in self.credidentials["saved_ip"]:
saved_ip = [self.cb_ip.get(), *self.credidentials["saved_ip"]]
if len(saved_ip) > 5:
self.credidentials["saved_ip"].pop()
else:
saved_ip = self.credidentials["saved_ip"]
with open(self.connection_path, "w") as fp:
connection = {
"saved_ip": saved_ip,
"tcp_port": credits.tcp_port,
"udp_port": credits.udp_port,
"username": credits.username,
"driverID": credits.driverID,
}
json.dump(connection, fp, indent=4)
class App(tkinter.Tk):
def __init__(self) -> None:
tkinter.Tk.__init__(self)
tksupport.install(self)
self.geometry("830x580+0+0")
try:
with open("./Config/gui.json", "r") as fp:
self.gui_config = json.load(fp)
except FileNotFoundError:
print("APP: './Config/gui.json' not found.")
return
self.font = (self.gui_config["font"], self.gui_config["font_size"])
app_style = ttk.Style(self)
app_style.configure('.',
font=self.font,
background=self.gui_config["background_colour"],
foreground=self.gui_config["foreground_colour"])
app_style.configure('TNotebook.Tab', foreground="#000000")
app_style.configure('TButton', foreground="#000000")
app_style.configure('TCombobox', foreground="#000000")
app_style.configure("ActiveDriver.TLabel",
background=self.gui_config["active_driver_colour"])
app_style.configure("Users.TFrame", background="#000000")
app_style.configure("TelemetryGrid.TFrame", background="#000000")
app_style.configure("PressureInfo.TFrame", background="#000000")
app_style.configure("TEntry", foreground="#000000")
self.title(f"PyAccEngineer {_VERSION_}")
self.config(bg="Grey")
self.protocol("WM_DELETE_WINDOW", self.on_close)
# Networking
self.is_connected = False
self.client: Optional[ClientInstance] = None
self.server: Optional[ServerInstance] = None
self.net_queue = DataQueue([], [])
self.menu_bar = tkinter.Menu(self)
self.menu_bar.add_command(label="Connect",
command=self.show_connection_page,
font=self.font)
self.menu_bar.add_command(label="As Server",
command=lambda: self.show_connection_page(
True), font=self.font)
self.menu_bar.add_command(label="Disconnect",
command=self.disconnect, state="disabled",
font=self.font)
self.config(menu=self.menu_bar)
self.main_canvas = tkinter.Canvas(self)
self.main_frame = ttk.Frame(self)
self.hsb = ttk.Scrollbar(self)
self.vsb = ttk.Scrollbar(self)
self.main_canvas.config(xscrollcommand=self.hsb.set,
yscrollcommand=self.vsb.set,
highlightthickness=0)
self.hsb.config(orient=tkinter.HORIZONTAL,
command=self.main_canvas.xview)
self.vsb.config(orient=tkinter.VERTICAL,
command=self.main_canvas.yview)
self.hsb.pack(fill=tkinter.X, side=tkinter.BOTTOM,
expand=tkinter.FALSE)
self.vsb.pack(fill=tkinter.Y, side=tkinter.RIGHT,
expand=tkinter.FALSE)
self.main_canvas.pack(fill=tkinter.BOTH, side=tkinter.LEFT,
expand=tkinter.TRUE)
self.main_canvas.create_window(0, 0, window=self.main_frame,
anchor=tkinter.NW)
self.user_ui = UserUI(self.main_frame)
self.user_ui.grid(row=1, column=0)
self.tab_control = ttk.Notebook(self.main_frame)
self.tab_control.grid(row=0, column=0, pady=3)
self.f_connection_ui = ttk.Frame(self.tab_control)
self.f_connection_ui.pack(fill=tkinter.BOTH, expand=1)
self.connection_page = ConnectionPage(self, self.f_connection_ui)
self.connection_page.place(anchor=tkinter.CENTER,
in_=self.f_connection_ui,
relx=.5, rely=.5)
# Center StrategyUI in the notebook frame
f_strategy_ui = ttk.Frame(self.tab_control)
f_strategy_ui.pack(fill=tkinter.BOTH, expand=1)
self.strategy_ui = StrategyUI(f_strategy_ui, self.gui_config)
self.strategy_ui.place(anchor=tkinter.CENTER, in_=f_strategy_ui,
relx=.5, rely=.5)
self.telemetry_ui = TelemetryUI(self.tab_control)
self.telemetry_ui.pack(fill=tkinter.BOTH, side=tkinter.LEFT,
expand=tkinter.TRUE)
self.driver_inputs = DriverInputs(self.tab_control)
self.driver_inputs.pack(fill=tkinter.BOTH, side=tkinter.LEFT,
expand=tkinter.TRUE)
self.tyre_graph = TyreGraph(self.tab_control, self.gui_config)
self.tyre_graph.pack(fill=tkinter.BOTH, expand=1)
self.prev_lap_graph = PrevLapsGraph(self.tab_control, self.gui_config)
self.prev_lap_graph.pack(fill=tkinter.BOTH, expand=1)
self.tyre_sets = TyreSets(self.tab_control, self.gui_config)
self.tyre_sets.pack(fill=tkinter.BOTH, expand=1)
self.tab_control.add(self.f_connection_ui, text="Connection")
self.tab_control.add(f_strategy_ui, text="Strategy")
self.tab_control.add(self.telemetry_ui, text="Telemetry")
self.tab_control.add(self.driver_inputs, text="Driver Inputs")
self.tab_control.add(self.tyre_graph, text="Pressures")
self.tab_control.add(self.prev_lap_graph, text="Previous Laps")
self.tab_control.add(self.tyre_sets, text="Tyre sets")
self.tab_control.hide(0)
self.last_time = time.time()
self.rt_last_time = time.time()
self.rt_min_delta = self.gui_config["driver_input_speed"]
self.min_delta = 0.5
self.last_telemetry = time.time()
self.telemetry_timeout = 2
logging.info("Main UI created.")
self.client_loopCall = task.LoopingCall(self.client_loop)
self.client_loopCall.start(0.01)
self.eval('tk::PlaceWindow . center')
self.updateScrollRegion()
def updateScrollRegion(self):
self.main_canvas.update_idletasks()
self.main_canvas.config(scrollregion=self.main_frame.bbox())
def client_loop(self) -> None:
selected_tab_name = self.tab_control.tab(self.tab_control.select(),
"text")
if selected_tab_name == "Driver Inputs":
if not self.driver_inputs.is_animating:
self.driver_inputs.start_animation()
else:
if self.driver_inputs.is_animating:
self.driver_inputs.stop_animation()
if selected_tab_name == "Pressures":
if not self.tyre_graph.is_animating:
self.tyre_graph.start_animation()
else:
if self.tyre_graph.is_animating:
self.tyre_graph.stop_animation()
for element in self.net_queue.q_out:
if element.data_type == NetworkQueue.ConnectionReply:
logging.info("Received Connection reply for server")
succes = bool(element.data[0])
msg_lenght = element.data[1]
msg = element.data[2:2 + msg_lenght]
self.connection_page.connected(succes, msg)
self.mb_connected(succes)
self.is_connected = succes
if not succes:
self.client.close()
elif element.data_type == NetworkQueue.ServerData:
server_data = CarInfo.from_bytes(element.data)
is_first_update = self.strategy_ui.server_data is None
self.strategy_ui.server_data = server_data
if is_first_update:
self.strategy_ui.update_values()
elif element.data_type == NetworkQueue.Strategy:
logging.info("Received: Strategy")
self.strategy_ui.b_set_strat.config(state="disabled")
asm_data = self.strategy_ui.asm.read_shared_memory()
pit_stop = PitStop.from_bytes(element.data)
self.strategy_ui.save_strategy(pit_stop)
if asm_data is not None:
self.strategy_ui.apply_strategy(pit_stop)
elif element.data_type == NetworkQueue.StategyHistory:
self.strategy_ui.clear_strategy_history()
strategy_count = element.data[0]
byte_index = 1
for _ in range(strategy_count):
strat = PitStop.from_bytes(element.data[byte_index:])
self.strategy_ui.save_strategy(strat)
byte_index += PitStop.byte_size
elif element.data_type == NetworkQueue.StrategyDone:
logging.info("Received: Strategy Done")
self.strategy_ui.b_set_strat.config(state="normal")
self.strategy_ui.update_values()
elif element.data_type == NetworkQueue.Telemetry:
telemetry, err = Telemetry.from_bytes(element.data)
if (telemetry is None):
messagebox.showerror("Unexpected error", err)
self.on_close()
return
self.telemetry_ui.update_values(telemetry)
self.tyre_graph.update_data(telemetry)
self.strategy_ui.updade_telemetry_data(telemetry)
self.driver_inputs.update_lap(telemetry.lap)
if not self.strategy_ui.is_driver_active:
self.strategy_ui.is_driver_active = True
self.user_ui.set_active(telemetry.driver)
self.last_telemetry = time.time()
elif element.data_type == NetworkQueue.TelemetryRT:
telemetry = TelemetryRT.from_bytes(element.data)
self.driver_inputs.update_values(telemetry)
elif element.data_type == NetworkQueue.UpdateUsers:
logging.info("Received user update")
user_update = element.data
nb_users = user_update[0]
self.user_ui.reset()
self.strategy_ui.reset_drivers()
index = 1
for _ in range(nb_users):
lenght = user_update[index]
index += 1
name = user_update[index:index+lenght].decode("utf-8")
index += lenght
driverID = user_update[index]
index += 1
self.user_ui.add_user(name, driverID)
self.strategy_ui.add_driver(name, driverID)
elif element.data_type == NetworkQueue.TyreSets:
data = zlib.decompress(element.data)
tyres_data = []
nb_of_set = data[0]
byte_index = 1
for _ in range(nb_of_set):
tyre_info = TyresSetData.from_bytes(
data[byte_index:byte_index+TyresSetData.byte_size])
tyres_data.append(tyre_info)
byte_index += TyresSetData.byte_size
self.tyre_sets.update_tyre_set_data(tyres_data)
self.net_queue.q_out.clear()
if not self.is_connected:
return
if not self.strategy_ui.is_connected:
self.strategy_ui.is_connected = True
if self.telemetry_ui.driver_swap or self.user_ui.active_user is None:
if self.telemetry_ui.current_driver is not None:
self.user_ui.set_active(self.telemetry_ui.current_driver)
self.telemetry_ui.driver_swap = False
self.strategy_ui.set_driver(self.telemetry_ui.current_driver)
rt_delta_time = time.time() - self.rt_last_time
delta_time = time.time() - self.last_time
if (self.strategy_ui.is_driver_active and
time.time() > self.last_telemetry + self.telemetry_timeout):
logging.info("Telemetry timeout, not received "
f"telemetry for {self.telemetry_timeout}s")
self.strategy_ui.is_driver_active = False
self.user_ui.remove_active()
self.telemetry_ui.current_driver = None
asm_data = self.strategy_ui.asm.read_shared_memory()
if asm_data is not None:
if self.rt_min_delta < rt_delta_time:
self.rt_last_time = time.time()
telemetry_rt = TelemetryRT(
asm_data.Physics.gas,
asm_data.Physics.brake,
asm_data.Physics.steer_angle,
asm_data.Physics.gear,
asm_data.Physics.speed_kmh
)
self.net_queue.q_in.append(NetData(NetworkQueue.TelemetryRT,
telemetry_rt.to_bytes()))
if self.min_delta < delta_time:
self.last_time = time.time()
infos = CarInfo(
*astuple(asm_data.Graphics.mfd_tyre_pressure),
asm_data.Graphics.mfd_fuel_to_add,
asm_data.Static.max_fuel,
asm_data.Graphics.mfd_tyre_set)
self.net_queue.q_in.append(NetData(NetworkQueue.CarInfoData,
infos.to_bytes()))
# Telemetry
name = asm_data.Static.player_name.split("\x00")[0]
surname = asm_data.Static.player_surname.split("\x00")[0]
driver = f"{name} {surname}"
telemetry_data = Telemetry(
driver,
asm_data.Graphics.completed_lap,
asm_data.Physics.fuel,
asm_data.Graphics.fuel_per_lap,
asm_data.Graphics.fuel_estimated_laps,
asm_data.Physics.pad_life,
asm_data.Physics.disc_life,
asm_data.Graphics.current_time,
asm_data.Graphics.best_time,
asm_data.Graphics.last_time,
asm_data.Graphics.is_in_pit,
asm_data.Graphics.is_in_pit_lane,
asm_data.Graphics.session_type,
asm_data.Graphics.driver_stint_time_left,
asm_data.Physics.wheel_pressure,
asm_data.Physics.tyre_core_temp,
asm_data.Physics.brake_temp,
asm_data.Graphics.rain_tyres,
asm_data.Graphics.session_time_left,
asm_data.Graphics.track_grip_status,
asm_data.Physics.front_brake_compound,
asm_data.Physics.rear_brake_compound,
asm_data.Physics.car_damage,
asm_data.Graphics.rain_intensity,
asm_data.Physics.suspension_damage,
asm_data.Graphics.current_sector_index,
asm_data.Graphics.last_sector_time,
asm_data.Graphics.is_valid_lap,
asm_data.Physics.air_temp,
asm_data.Physics.road_temp,
asm_data.Graphics.wind_speed,
asm_data.Graphics.driver_stint_total_time_left,
asm_data.Graphics.current_tyre_set,
)
self.net_queue.q_in.append(NetData(NetworkQueue.Telemetry,
telemetry_data.to_bytes()))
if self.strategy_ui.strategy is not None:
logging.info("Sending strategy")
strategy = self.strategy_ui.strategy
self.strategy_ui.strategy = None
self.net_queue.q_in.append(NetData(NetworkQueue.StrategySet,
strategy.to_bytes()))
if self.strategy_ui.strategy_ok:
logging.info("Send strategy Done")
self.net_queue.q_in.append(NetData(NetworkQueue.StrategyDone))
self.strategy_ui.strategy_ok = False
if self.tyre_sets.updated:
data = b""
data += struct.pack("!B", len(self.tyre_sets.tyres_data))
for tyre_set in self.tyre_sets.tyres_data:
data += tyre_set.to_bytes()
data_compressed = zlib.compress(data)
print(f"{len(data)} vs {len(data_compressed)}")
self.net_queue.q_in.append(NetData(NetworkQueue.TyreSets,
data_compressed))
self.tyre_sets.updated = False
logging.info("Sending tyre set data")
def show_connection_page(self, as_server: bool = False) -> None:
logging.info("Show connection page")
self.tab_control.add(self.f_connection_ui, text="Connection")
self.tab_control.select(0)
if as_server:
self.connection_page.set_as_server()
else:
self.connection_page.set_as_client()
def connect_to_server(self, credits: Credidentials) -> None:
logging.info("Creating a ClientInstance connecting"
f" to {credits.ip}:{credits.tcp_port}")
self.client = ClientInstance(credits, self.net_queue)
def as_server(self, credis: Credidentials) -> Tuple[bool, str]:
logging.info("Creating a ServerInstance")
self.server = ServerInstance(credis.tcp_port, credis.udp_port)
self.connect_to_server(credis)
def mb_connected(self, state: bool) -> None:
if state:
self.menu_bar.entryconfig("Disconnect", state="active")
self.menu_bar.entryconfig("Connect", state="disabled")
self.menu_bar.entryconfig("As Server", state="disabled")
self.tab_control.hide(0)
else:
self.menu_bar.entryconfig("Disconnect", state="disabled")
self.menu_bar.entryconfig("Connect", state="active")
self.menu_bar.entryconfig("As Server", state="active")
def disconnect(self) -> None:
logging.info("Disconnecting")
self.stop_networking()
self.mb_connected(False)
self.strategy_ui.reset()
self.user_ui.reset()
self.tyre_graph.reset()
def stop_networking(self) -> None:
if self.is_connected:
self.client.close()
self.is_connected = False
logging.info("Client stopped.")
if self.server is not None:
self.server.close()
self.server = None
logging.info("Server stopped.")
def on_close(self) -> None:
logging.info("Closing the app")
self.strategy_ui.close()
self.tyre_graph.close()
self.prev_lap_graph.close()
self.tyre_sets.close()
self.disconnect()
self.client_loopCall.stop()
tksupport.uninstall()
reactor.stop()
self.destroy()
logging.info("App closed")
def create_gui() -> None:
App()
def main():
reactor.callLater(0, create_gui)
reactor.run()
if __name__ == "__main__":
main()
|
the-stack_0_11140 | from gremlin_python.driver import client, serializer
import sys, traceback
_gremlin_cleanup_graph = "g.V().drop()"
_gremlin_insert_vertices = [
"g.addV('person').property('id', 'thomas').property('firstName', 'Thomas').property('age', 44)",
"g.addV('person').property('id', 'mary').property('firstName', 'Mary').property('lastName', 'Andersen').property('age', 39)",
"g.addV('person').property('id', 'ben').property('firstName', 'Ben').property('lastName', 'Miller')",
"g.addV('person').property('id', 'robin').property('firstName', 'Robin').property('lastName', 'Wakefield')"
]
_gremlin_insert_edges = [
"g.V('thomas').addE('knows').to(g.V('mary'))",
"g.V('thomas').addE('knows').to(g.V('ben'))",
"g.V('ben').addE('knows').to(g.V('robin'))"
]
_gremlin_update_vertices = [
"g.V('thomas').property('age', 44)"
]
_gremlin_count_vertices = "g.V().count()"
_gremlin_traversals = {
"Get all persons older than 40" : "g.V().hasLabel('person').has('age', gt(40)).values('firstName', 'age')",
"Get all persons and their first name" : "g.V().hasLabel('person').values('firstName')",
"Get all persons sorted by first name" : "g.V().hasLabel('person').order().by('firstName', incr).values('firstName')",
"Get all persons that Thomas knows" : "g.V('thomas').out('knows').hasLabel('person').values('firstName')",
"People known by those who Thomas knows" : "g.V('thomas').out('knows').hasLabel('person').out('knows').hasLabel('person').values('firstName')",
"Get the path from Thomas to Robin" : "g.V('thomas').repeat(out()).until(has('id', 'robin')).path().by('firstName')"
}
_gremlin_drop_operations = {
"Drop Edge - Thomas no longer knows Mary" : "g.V('thomas').outE('knows').where(inV().has('id', 'mary')).drop()",
"Drop Vertex - Drop Thomas" : "g.V('thomas').drop()"
}
def cleanup_graph(client):
print("\tRunning this Gremlin query:\n\t{0}".format(_gremlin_cleanup_graph))
callback = client.submitAsync(_gremlin_cleanup_graph)
if callback.result() is not None:
print("\tCleaned up the graph!")
print("\n")
def insert_vertices(client):
for query in _gremlin_insert_vertices:
print("\tRunning this Gremlin query:\n\t{0}\n".format(query))
callback = client.submitAsync(query)
if callback.result() is not None:
print("\tInserted this vertex:\n\t{0}\n".format(callback.result().one()))
else:
print("Something went wrong with this query: {0}".format(query))
print("\n")
def insert_edges(client):
for query in _gremlin_insert_edges:
print("\tRunning this Gremlin query:\n\t{0}\n".format(query))
callback = client.submitAsync(query)
if callback.result() is not None:
print("\tInserted this edge:\n\t{0}\n".format(callback.result().one()))
else:
print("Something went wrong with this query:\n\t{0}".format(query))
print("\n")
def update_vertices(client):
for query in _gremlin_update_vertices:
print("\tRunning this Gremlin query:\n\t{0}\n".format(query))
callback = client.submitAsync(query)
if callback.result() is not None:
print("\tUpdated this vertex:\n\t{0}\n".format(callback.result().one()))
else:
print("Something went wrong with this query:\n\t{0}".format(query))
print("\n")
def count_vertices(client):
print("\tRunning this Gremlin query:\n\t{0}".format(_gremlin_count_vertices))
callback = client.submitAsync(_gremlin_count_vertices)
if callback.result() is not None:
print("\tCount of vertices: {0}".format(callback.result().one()))
else:
print("Something went wrong with this query: {0}".format(_gremlin_count_vertices))
print("\n")
def execute_traversals(client):
for key in _gremlin_traversals:
print("\t{0}:".format(key))
print("\tRunning this Gremlin query:\n\t{0}\n".format(_gremlin_traversals[key]))
callback = client.submitAsync(_gremlin_traversals[key])
for result in callback.result():
print("\t{0}".format(str(result)))
print("\n")
def execute_drop_operations(client):
for key in _gremlin_drop_operations:
print("\t{0}:".format(key))
print("\tRunning this Gremlin query:\n\t{0}".format(_gremlin_drop_operations[key]))
callback = client.submitAsync(_gremlin_drop_operations[key])
for result in callback.result():
print(result)
print("\n")
try:
client = client.Client('https://localhost:8901','g',
username="/dbs/Employee/colls/Employee",
password="C2y6yDjf5/R+ob0N8A7Cgv30VRDJIWEHLM+4QDU5DE2nQ9nDuVTqobD4b8mGGyPMbIZnqyMsEcaGQy67XIw/Jw==",
message_serializer=serializer.GraphSONSerializersV2d0()
)
print("Welcome to Azure Cosmos DB + Gremlin on Python!")
# Drop the entire Graph
input("We're about to drop whatever graph is on the server. Press any key to continue...")
cleanup_graph(client)
# Insert all vertices
input("Let's insert some vertices into the graph. Press any key to continue...")
insert_vertices(client)
# Create edges between vertices
input("Now, let's add some edges between the vertices. Press any key to continue...")
insert_edges(client)
# Update a couple of vertices
input("Ah, sorry. I made a mistake. Let's change the ages of these two vertices. Press any key to continue...")
update_vertices(client)
# Count all vertices
input("Okay. Let's count how many vertices we have. Press any key to continue...")
count_vertices(client)
# Execute traversals and get results
input("Cool! Let's run some traversals on our graph. Press any key to continue...")
execute_traversals(client)
# Drop a few vertices and edges
input("So, life happens and now we will make some changes to the graph. Press any key to continue...")
execute_drop_operations(client)
# Count all vertices again
input("How many vertices do we have left? Press any key to continue...")
count_vertices(client)
except Exception as e:
print('There was an exception: {0}'.format(e))
traceback.print_exc(file=sys.stdout)
sys.exit(1)
print("\nAnd that's all! Sample complete")
input("Press Enter to continue...")
|
the-stack_0_11141 | import pandas as pd
from finvizfinance.util import webScrap, numberCovert, NUMBER_COL, util_dict
BASE_URL = 'https://finviz.com/screener.ashx?v={screener}{filter}&ft=4&o={order}&r={row}'
FILTER_DICT = util_dict['filter']
def set_filters(filters_dict):
"""Set filters.
Args:
filters_dict(dict): dictionary of filters
Returns:
url_filter(str): filter string for url
"""
filters = []
for key, value in filters_dict.items():
if key not in FILTER_DICT:
filter_keys = list(FILTER_DICT.keys())
raise ValueError("Invalid filter '{}'. Possible filter: {}".format(key, filter_keys))
if value not in FILTER_DICT[key]['option']:
filter_options = list(FILTER_DICT[key]['option'].keys())
raise ValueError("Invalid filter option '{}'. Possible filter options: {}".format(value,
filter_options))
prefix = FILTER_DICT[key]['prefix']
urlcode = FILTER_DICT[key]['option'][value]
if urlcode != '':
filters.append('{}_{}'.format(prefix, urlcode))
url_filter = ''
if len(filters) != 0:
url_filter = '&f=' + ','.join(filters)
return url_filter
def screener_helper(rows, num_col_index, table_header):
"""Get screener table helper function.
Returns:
df(pandas.DataFrame): screener information table
"""
rows = rows[1:]
df = pd.DataFrame([], columns=table_header)
for index, row in enumerate(rows):
cols = row.findAll('td')[1:]
info_dict = {}
for i, col in enumerate(cols):
# check if the col is number
if i not in num_col_index:
info_dict[table_header[i]] = col.text
else:
info_dict[table_header[i]] = numberCovert(col.text)
df = df.append(info_dict, ignore_index=True)
return df
def get_screener(screener, filters=None, order='ticker', page=1, ascend=True):
'''get_screener
Get screener from finviz website
Args:
screener(str): screener type
filters(list): filters
order(str): order of the dataframe.
page(int): page number
'''
if screener == 'overview':
screener_code = '111'
elif screener == 'financial':
screener_code = '161'
elif screener == 'ownership':
screener_code = '131'
elif screener == 'performance':
screener_code = '141'
elif screener == 'technical':
screener_code = '171'
elif screener == 'valuation':
screener_code = '121'
# get url
url_filter = ''
if filters:
url_filter = set_filters(filters)
url_order = order
if not ascend:
url_order = '-' + order
url_row = (page - 1) * 20 + 1
url = BASE_URL.format(screener=screener_code, filter=url_filter, order=url_order, row=url_row)
# scrap website
soup = webScrap(url)
page = len(soup.findAll('table')[17].findAll('option'))
if page == 0:
print('No information found.')
return None, 0
table = soup.findAll('table')[18]
rows = table.findAll('tr')
table_header = [i.text for i in rows[0].findAll('td')][1:]
num_col_index = [table_header.index(i) for i in table_header if i in NUMBER_COL]
df = screener_helper(rows, num_col_index, table_header)
return df, page
if __name__ == '__main__':
filters_dict = {'Exchange':'AMEX','Sector':'Basic Materials'}
df, page = get_screener('Overview', filters=filters_dict, order='company', page=3, ascend=False)
print(df)
print(page)
|
the-stack_0_11145 | # pylint: disable=I0011,W0613,W0201,W0212,E1101,E1103
import os
from collections import Counter
import pytest
import numpy as np
from numpy.testing import assert_allclose, assert_equal
from glue.config import colormaps
from glue.core.message import SubsetUpdateMessage
from glue.core import HubListener, Data
from glue.core.roi import XRangeROI, RectangularROI, CircularROI
from glue.core.subset import RoiSubsetState, AndState
from glue import core
from glue.core.component_id import ComponentID
from glue.utils.qt import combo_as_string, process_events
from glue.viewers.matplotlib.qt.tests.test_data_viewer import BaseTestMatplotlibDataViewer
from glue.core.state import GlueUnSerializer
from glue.app.qt.layer_tree_widget import LayerTreeWidget
from glue.app.qt import GlueApplication
from ..data_viewer import ScatterViewer
DATA = os.path.join(os.path.dirname(__file__), 'data')
class TestScatterCommon(BaseTestMatplotlibDataViewer):
def init_data(self):
return Data(label='d1', x=[3.4, 2.3, -1.1, 0.3], y=['a', 'b', 'c', 'a'])
viewer_cls = ScatterViewer
class TestScatterViewer(object):
def setup_method(self, method):
self.data = Data(label='d1', x=[3.4, 2.3, -1.1, 0.3],
y=[3.2, 3.3, 3.4, 3.5], z=['a', 'b', 'c', 'a'])
self.data_2d = Data(label='d2', a=[[1, 2], [3, 4]], b=[[5, 6], [7, 8]],
x=[[3, 5], [5.4, 1]], y=[[1.2, 4], [7, 8]])
self.app = GlueApplication()
self.session = self.app.session
self.hub = self.session.hub
self.data_collection = self.session.data_collection
self.data_collection.append(self.data)
self.data_collection.append(self.data_2d)
self.viewer = self.app.new_data_viewer(ScatterViewer)
def teardown_method(self, method):
self.viewer.close()
self.viewer = None
self.app.close()
self.app = None
def test_basic(self):
viewer_state = self.viewer.state
# Check defaults when we add data
self.viewer.add_data(self.data)
assert combo_as_string(self.viewer.options_widget().ui.combosel_x_att) == 'Main components:x:y:z:Coordinate components:Pixel Axis 0 [x]'
assert combo_as_string(self.viewer.options_widget().ui.combosel_y_att) == 'Main components:x:y:z:Coordinate components:Pixel Axis 0 [x]'
assert viewer_state.x_att is self.data.id['x']
assert_allclose(viewer_state.x_min, -1.1 - 0.18)
assert_allclose(viewer_state.x_max, 3.4 + 0.18)
assert viewer_state.y_att is self.data.id['y']
assert_allclose(viewer_state.y_min, 3.2 - 0.012)
assert_allclose(viewer_state.y_max, 3.5 + 0.012)
assert not viewer_state.x_log
assert not viewer_state.y_log
assert len(viewer_state.layers) == 1
# Change to categorical component and check new values
viewer_state.y_att = self.data.id['z']
assert viewer_state.x_att is self.data.id['x']
assert_allclose(viewer_state.x_min, -1.1 - 0.18)
assert_allclose(viewer_state.x_max, 3.4 + 0.18)
assert viewer_state.y_att is self.data.id['z']
assert_allclose(viewer_state.y_min, -0.5 - 0.12)
assert_allclose(viewer_state.y_max, 2.5 + 0.12)
assert not viewer_state.x_log
assert not viewer_state.y_log
def test_flip(self):
viewer_state = self.viewer.state
self.viewer.add_data(self.data)
assert_allclose(viewer_state.x_min, -1.1 - 0.18)
assert_allclose(viewer_state.x_max, 3.4 + 0.18)
self.viewer.options_widget().button_flip_x.click()
assert_allclose(viewer_state.x_max, -1.1 - 0.18)
assert_allclose(viewer_state.x_min, 3.4 + 0.18)
assert_allclose(viewer_state.y_min, 3.2 - 0.012)
assert_allclose(viewer_state.y_max, 3.5 + 0.012)
self.viewer.options_widget().button_flip_y.click()
assert_allclose(viewer_state.y_max, 3.2 - 0.012)
assert_allclose(viewer_state.y_min, 3.5 + 0.012)
def test_remove_data(self):
self.viewer.add_data(self.data)
assert combo_as_string(self.viewer.options_widget().ui.combosel_x_att) == 'Main components:x:y:z:Coordinate components:Pixel Axis 0 [x]'
assert combo_as_string(self.viewer.options_widget().ui.combosel_y_att) == 'Main components:x:y:z:Coordinate components:Pixel Axis 0 [x]'
self.data_collection.remove(self.data)
assert combo_as_string(self.viewer.options_widget().ui.combosel_x_att) == ''
assert combo_as_string(self.viewer.options_widget().ui.combosel_y_att) == ''
def test_update_component_updates_title(self):
self.viewer.add_data(self.data)
assert self.viewer.windowTitle() == '2D Scatter'
self.viewer.state.x_att = self.data.id['y']
assert self.viewer.windowTitle() == '2D Scatter'
def test_combo_updates_with_component_add(self):
self.viewer.add_data(self.data)
self.data.add_component([3, 4, 1, 2], 'a')
assert self.viewer.state.x_att is self.data.id['x']
assert self.viewer.state.y_att is self.data.id['y']
assert combo_as_string(self.viewer.options_widget().ui.combosel_x_att) == 'Main components:x:y:z:a:Coordinate components:Pixel Axis 0 [x]'
assert combo_as_string(self.viewer.options_widget().ui.combosel_y_att) == 'Main components:x:y:z:a:Coordinate components:Pixel Axis 0 [x]'
def test_nonnumeric_first_component(self):
# regression test for #208. Shouldn't complain if
# first component is non-numerical
data = core.Data()
data.add_component(['a', 'b', 'c'], label='c1')
data.add_component([1, 2, 3], label='c2')
self.data_collection.append(data)
self.viewer.add_data(data)
def test_apply_roi(self):
self.viewer.add_data(self.data)
roi = RectangularROI(0, 3, 3.25, 3.45)
assert len(self.viewer.layers) == 1
self.viewer.apply_roi(roi)
assert len(self.viewer.layers) == 2
assert len(self.data.subsets) == 1
assert_allclose(self.data.subsets[0].to_mask(), [0, 1, 0, 0])
state = self.data.subsets[0].subset_state
assert isinstance(state, RoiSubsetState)
def test_apply_roi_categorical(self):
viewer_state = self.viewer.state
self.viewer.add_data(self.data)
viewer_state.y_att = self.data.id['z']
roi = RectangularROI(0, 3, -0.4, 0.3)
assert len(self.viewer.layers) == 1
self.viewer.apply_roi(roi)
assert len(self.viewer.layers) == 2
assert len(self.data.subsets) == 1
assert_allclose(self.data.subsets[0].to_mask(), [0, 0, 0, 1])
state = self.data.subsets[0].subset_state
assert isinstance(state, AndState)
def test_apply_roi_empty(self):
# Make sure that doing an ROI selection on an empty viewer doesn't
# produce error messsages
roi = XRangeROI(-0.2, 0.1)
self.viewer.apply_roi(roi)
def test_axes_labels(self):
viewer_state = self.viewer.state
self.viewer.add_data(self.data)
assert self.viewer.axes.get_xlabel() == 'x'
assert self.viewer.axes.get_ylabel() == 'y'
viewer_state.x_log = True
assert self.viewer.axes.get_xlabel() == 'Log x'
assert self.viewer.axes.get_ylabel() == 'y'
viewer_state.x_att = self.data.id['y']
assert self.viewer.axes.get_xlabel() == 'y'
assert self.viewer.axes.get_ylabel() == 'y'
viewer_state.y_log = True
assert self.viewer.axes.get_xlabel() == 'y'
assert self.viewer.axes.get_ylabel() == 'Log y'
def test_component_replaced(self):
# regression test for 508 - if a component ID is replaced, we should
# make sure that the component ID is selected if the old component ID
# was selected
self.viewer.add_data(self.data)
self.viewer.state.x_att = self.data.id['x']
test = ComponentID('test')
self.data.update_id(self.viewer.state.x_att, test)
assert self.viewer.state.x_att is test
assert combo_as_string(self.viewer.options_widget().ui.combosel_x_att) == 'Main components:test:y:z:Coordinate components:Pixel Axis 0 [x]'
def test_nan_component(self):
# regression test for case when all values are NaN in a component
data = core.Data()
data.add_component([np.nan, np.nan, np.nan], label='c1')
self.data_collection.append(data)
self.viewer.add_data(data)
def test_density_map(self):
kwargs = dict(range=[(-5, 5), (-5, 5)], bins=(2, 2))
self.viewer.add_data(self.data)
self.viewer.state.layers[0].points_mode = 'auto'
assert self.viewer.layers[0].state.compute_density_map(**kwargs).sum() == 0
self.viewer.state.layers[0].points_mode = 'density'
assert self.viewer.layers[0].state.compute_density_map(**kwargs).sum() == 4
self.viewer.state.layers[0].points_mode = 'markers'
assert self.viewer.layers[0].state.compute_density_map(**kwargs).sum() == 0
def test_density_map_color(self):
# Regression test to make sure things don't crash when changing
# back to markers if the color mode is cmap
self.viewer.add_data(self.data)
self.viewer.state.layers[0].points_mode = 'density'
self.viewer.state.layers[0].cmap_mode = 'Linear'
self.viewer.state.layers[0].size_mode = 'Linear'
self.viewer.state.layers[0].points_mode = 'markers'
self.viewer.state.layers[0].points_mode = 'density'
@pytest.mark.parametrize('protocol', [0, 1])
def test_session_back_compat(self, protocol):
filename = os.path.join(DATA, 'scatter_v{0}.glu'.format(protocol))
with open(filename, 'r') as f:
session = f.read()
state = GlueUnSerializer.loads(session)
ga = state.object('__main__')
dc = ga.session.data_collection
assert len(dc) == 1
assert dc[0].label == 'basic'
viewer1 = ga.viewers[0][0]
assert len(viewer1.state.layers) == 3
assert viewer1.state.x_att is dc[0].id['a']
assert viewer1.state.y_att is dc[0].id['b']
assert_allclose(viewer1.state.x_min, -1.04)
assert_allclose(viewer1.state.x_max, 1.04)
assert_allclose(viewer1.state.y_min, 1.98)
assert_allclose(viewer1.state.y_max, 3.02)
assert not viewer1.state.x_log
assert not viewer1.state.y_log
assert viewer1.state.layers[0].visible
assert viewer1.state.layers[1].visible
assert viewer1.state.layers[2].visible
viewer2 = ga.viewers[0][1]
assert len(viewer2.state.layers) == 3
assert viewer2.state.x_att is dc[0].id['a']
assert viewer2.state.y_att is dc[0].id['c']
assert_allclose(viewer2.state.x_min, 9.5e-6)
assert_allclose(viewer2.state.x_max, 1.05)
assert_allclose(viewer2.state.y_min, 0.38)
assert_allclose(viewer2.state.y_max, 5.25)
assert viewer2.state.x_log
assert viewer2.state.y_log
assert viewer2.state.layers[0].visible
assert not viewer2.state.layers[1].visible
assert viewer2.state.layers[2].visible
viewer3 = ga.viewers[0][2]
assert len(viewer3.state.layers) == 3
assert viewer3.state.x_att is dc[0].id['b']
assert viewer3.state.y_att is dc[0].id['a']
assert_allclose(viewer3.state.x_min, 0)
assert_allclose(viewer3.state.x_max, 5)
assert_allclose(viewer3.state.y_min, -5)
assert_allclose(viewer3.state.y_max, 5)
assert not viewer3.state.x_log
assert not viewer3.state.y_log
assert viewer3.state.layers[0].visible
assert viewer3.state.layers[1].visible
assert not viewer3.state.layers[2].visible
ga.close()
def test_session_line_back_compat(self):
# Backward-compatibility for v0.11 files in which the line and scatter
# plots were defined as separate styles.
filename = os.path.join(DATA, 'scatter_and_line_v1.glu')
with open(filename, 'r') as f:
session = f.read()
state = GlueUnSerializer.loads(session)
ga = state.object('__main__')
dc = ga.session.data_collection
assert len(dc) == 1
assert dc[0].label == 'table'
viewer1 = ga.viewers[0][0]
assert len(viewer1.state.layers) == 1
assert viewer1.state.x_att is dc[0].id['a']
assert viewer1.state.y_att is dc[0].id['b']
assert viewer1.state.layers[0].markers_visible
assert not viewer1.state.layers[0].line_visible
viewer1 = ga.viewers[0][1]
assert len(viewer1.state.layers) == 1
assert viewer1.state.x_att is dc[0].id['a']
assert viewer1.state.y_att is dc[0].id['b']
assert not viewer1.state.layers[0].markers_visible
assert viewer1.state.layers[0].line_visible
ga.close()
def test_save_svg(self, tmpdir):
# Regression test for a bug in AxesCache that caused SVG saving to
# fail (because renderer.buffer_rgba did not exist)
self.viewer.add_data(self.data)
filename = tmpdir.join('test.svg').strpath
self.viewer.axes.figure.savefig(filename)
def test_2d(self):
viewer_state = self.viewer.state
self.viewer.add_data(self.data_2d)
assert viewer_state.x_att is self.data_2d.id['a']
assert_allclose(viewer_state.x_min, 1 - 0.12)
assert_allclose(viewer_state.x_max, 4 + 0.12)
assert viewer_state.y_att is self.data_2d.id['b']
assert_allclose(viewer_state.y_min, 5 - 0.12)
assert_allclose(viewer_state.y_max, 8 + 0.12)
assert self.viewer.layers[0].plot_artist.get_xdata().shape == (4,)
def test_apply_roi_single(self):
# Regression test for a bug that caused mode.update to be called
# multiple times and resulted in all other viewers receiving many
# messages regarding subset updates (this occurred when multiple)
# datasets were present.
layer_tree = LayerTreeWidget(session=self.session)
layer_tree.set_checkable(False)
layer_tree.setup(self.data_collection)
layer_tree.bind_selection_to_edit_subset()
class Client(HubListener):
def __init__(self, *args, **kwargs):
super(Client, self).__init__(*args, **kwargs)
self.count = Counter()
def ping(self, message):
self.count[message.sender] += 1
def register_to_hub(self, hub):
hub.subscribe(self, SubsetUpdateMessage, handler=self.ping)
d1 = Data(a=[1, 2, 3], label='d3')
d2 = Data(b=[1, 2, 3], label='d4')
d3 = Data(c=[1, 2, 3], label='d5')
d4 = Data(d=[1, 2, 3], label='d6')
self.data_collection.append(d1)
self.data_collection.append(d2)
self.data_collection.append(d3)
self.data_collection.append(d4)
client = Client()
client.register_to_hub(self.hub)
self.viewer.add_data(d1)
self.viewer.add_data(d3)
roi = XRangeROI(2.5, 3.5)
self.viewer.apply_roi(roi)
for subset in client.count:
assert client.count[subset] == 1
@pytest.mark.parametrize('ndim', [1, 2])
def test_all_options(self, ndim):
# This test makes sure that all the code for the different scatter modes
# gets run, though does not check the result.
viewer_state = self.viewer.state
if ndim == 1:
data = self.data
elif ndim == 2:
data = self.data_2d
self.viewer.add_data(data)
layer_state = viewer_state.layers[0]
layer_state.style = 'Scatter'
layer_state.size_mode = 'Linear'
layer_state.size_att = data.id['y']
layer_state.size_vmin = 1.2
layer_state.size_vmax = 4.
layer_state.size_scaling = 2
layer_state.cmap_mode = 'Linear'
layer_state.cmap_att = data.id['x']
layer_state.cmap_vmin = -1
layer_state.cmap_vmax = 2.
layer_state.cmap = colormaps.members[3][1]
# Check inverting works
layer_state.cmap_vmin = 3.
layer_state.size_mode = 'Fixed'
layer_state.xerr_visible = True
layer_state.xerr_att = data.id['x']
layer_state.yerr_visible = True
layer_state.yerr_att = data.id['y']
layer_state.style = 'Line'
layer_state.linewidth = 3
layer_state.linestyle = 'dashed'
def test_session_categorical(self, tmpdir):
def visible_xaxis_labels(ax):
# Due to a bug in Matplotlib the labels returned outside the field
# of view may be incorrect: https://github.com/matplotlib/matplotlib/issues/9397
pos = ax.xaxis.get_ticklocs()
labels = [tick.get_text() for tick in ax.xaxis.get_ticklabels()]
xmin, xmax = ax.get_xlim()
return [labels[i] for i in range(len(pos)) if pos[i] >= xmin and pos[i] <= xmax]
# Regression test for a bug that caused a restored scatter viewer
# with a categorical component to not show the categorical labels
# as tick labels.
filename = tmpdir.join('test_session_categorical.glu').strpath
self.viewer.add_data(self.data)
self.viewer.state.x_att = self.data.id['z']
process_events()
assert visible_xaxis_labels(self.viewer.axes) == ['a', 'b', 'c']
self.session.application.save_session(filename)
with open(filename, 'r') as f:
session = f.read()
state = GlueUnSerializer.loads(session)
ga = state.object('__main__')
dc = ga.session.data_collection
viewer = ga.viewers[0][0]
assert viewer.state.x_att is dc[0].id['z']
assert visible_xaxis_labels(self.viewer.axes) == ['a', 'b', 'c']
ga.close()
def test_enable_disable_components_combo(self):
# Regression test for a bug that caused an error when turning off pixel
# components from combo boxes.
self.viewer.add_data(self.data)
self.data['a'] = self.data.id['x'] + 5
self.viewer.state.x_att_helper.pixel_coord = True
self.viewer.state.x_att = self.data.pixel_component_ids[0]
self.viewer.state.x_att_helper.pixel_coord = False
def test_datetime64_support(self, tmpdir):
self.data.add_component(np.array([100, 200, 300, 400], dtype='M8[D]'), 't1')
self.data.add_component(np.array([200, 300, 400, 500], dtype='M8[D]'), 't2')
self.viewer.add_data(self.data)
self.viewer.state.x_att = self.data.id['t1']
self.viewer.state.y_att = self.data.id['y']
# Matplotlib deals with dates by converting them to the number of days
# since 01-01-0001, so we can check that the limits are correctly
# converted (and not 100 to 400)
assert self.viewer.axes.get_xlim() == (719251.0, 719575.0)
assert self.viewer.axes.get_ylim() == (3.2 - 0.012, 3.5 + 0.012)
# Apply an ROI selection in plotting coordinates
roi = RectangularROI(xmin=719313, xmax=719513, ymin=3, ymax=4)
self.viewer.apply_roi(roi)
# Check that the two middle elements are selected
assert_equal(self.data.subsets[0].to_mask(), [0, 1, 1, 0])
# Now do the same with the y axis
self.viewer.state.y_att = self.data.id['t2']
assert self.viewer.axes.get_xlim() == (719251.0, 719575.0)
assert self.viewer.axes.get_ylim() == (719351.0, 719675.0)
# Apply an ROI selection in plotting coordinates
edit = self.session.edit_subset_mode
edit.edit_subset = []
roi = CircularROI(xc=719463, yc=719563, radius=200)
self.viewer.apply_roi(roi)
assert_equal(self.data.subsets[1].to_mask(), [0, 1, 1, 1])
# Make sure that the Qt labels look ok
self.viewer.state.y_att = self.data.id['y']
options = self.viewer.options_widget().ui
assert options.valuetext_x_min.text() == '1970-03-30'
assert options.valuetext_x_max.text() == '1971-02-17'
assert options.valuetext_y_min.text() == '3.188'
assert options.valuetext_y_max.text() == '3.512'
# Make sure that we can set the xmin/xmax to a string date
assert_equal(self.viewer.state.x_min, np.datetime64('1970-03-30', 'D'))
options.valuetext_x_min.setText('1970-04-14')
options.valuetext_x_min.editingFinished.emit()
assert self.viewer.axes.get_xlim() == (719266.0, 719575.0)
assert_equal(self.viewer.state.x_min, np.datetime64('1970-04-14', 'D'))
# Make sure that everything works fine after saving/reloading
filename = tmpdir.join('test_datetime64.glu').strpath
self.session.application.save_session(filename)
with open(filename, 'r') as f:
session = f.read()
state = GlueUnSerializer.loads(session)
ga = state.object('__main__')
viewer = ga.viewers[0][0]
options = viewer.options_widget().ui
assert_equal(self.viewer.state.x_min, np.datetime64('1970-04-14', 'D'))
assert options.valuetext_x_min.text() == '1970-04-14'
assert options.valuetext_x_max.text() == '1971-02-17'
assert options.valuetext_y_min.text() == '3.188'
assert options.valuetext_y_max.text() == '3.512'
ga.close()
def test_datetime64_disabled(self, capsys):
# Make sure that datetime components aren't options for the vector and
# error markers.
data = Data(label='test')
data.add_component(np.array([100, 200, 300, 400], dtype='M8[D]'), 't1')
data.add_component(np.array([200, 300, 400, 500], dtype='M8[D]'), 't2')
data.add_component(np.array([200., 300., 400., 500.]), 'x')
data.add_component(np.array([200., 300., 400., 500.]), 'y')
self.data_collection.append(data)
self.viewer.add_data(data)
self.viewer.state.x_att = data.id['x']
self.viewer.state.y_att = data.id['y']
self.viewer.state.layers[0].cmap_mode = 'Linear'
self.viewer.state.layers[0].cmap_att = data.id['x']
self.viewer.state.layers[0].size_mode = 'Linear'
self.viewer.state.layers[0].size_att = data.id['y']
self.viewer.state.layers[0].vector_visible = True
self.viewer.state.layers[0].xerr_visible = True
self.viewer.state.layers[0].yerr_visible = True
process_events()
self.viewer.state.x_att = data.id['t1']
self.viewer.state.y_att = data.id['t2']
process_events()
# We use capsys here because the # error is otherwise only apparent in stderr.
out, err = capsys.readouterr()
assert out.strip() == ""
assert err.strip() == ""
def test_density_map_incompatible_subset(self, capsys):
# Regression test for a bug that caused the scatter viewer to crash
# if subset for density map was incompatible.
data2 = Data(label='d1', x=[3.4, 2.3, -1.1, 0.3], y=[3.2, 3.3, 3.4, 3.5], z=['a', 'b', 'c', 'a'])
self.data_collection.append(data2)
self.viewer.add_data(self.data)
self.viewer.add_data(data2)
self.data_collection.new_subset_group('test', self.data.id['x'] > 1)
for layer in self.viewer.state.layers:
layer.density_map = True
self.viewer.figure.canvas.draw()
process_events()
assert self.viewer.layers[0].enabled
assert not self.viewer.layers[1].enabled
assert self.viewer.layers[2].enabled
assert not self.viewer.layers[3].enabled
def test_density_map_line_error_vector(self, capsys):
# Make sure that we don't allow/show lines/errors/vectors
# if in density map mode.
self.viewer.add_data(self.data)
self.viewer.state.layers[0].line_visible = True
self.viewer.state.layers[0].xerr_visible = True
self.viewer.state.layers[0].yerr_visible = True
self.viewer.state.layers[0].vector_visible = True
# Setting density_map to True resets the visibility of
# lines/errors/vectors.
self.viewer.state.layers[0].density_map = True
assert not self.viewer.state.layers[0].line_visible
assert not self.viewer.state.layers[0].xerr_visible
assert not self.viewer.state.layers[0].yerr_visible
assert not self.viewer.state.layers[0].vector_visible
|
the-stack_0_11147 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Autologging documentation build configuration file, created by
# sphinx-quickstart on Wed Mar 27 21:07:11 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if (on_rtd):
html_theme = 'default'
else:
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
sys.path.insert(0, os.path.abspath('../..'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'Autologging'
copyright = '2013, 2015, 2016, 2018, 2019 Matthew Zipay'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = "1.3"
# The full version, including alpha/beta/rc tags.
release = "1.3.2"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = False
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
html_use_opensearch = 'http://ninthtest.info/python-autologging/'
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Autologgingdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Autologging.tex', 'Autologging Documentation',
'Matthew Zipay', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'autologging', 'Autologging Documentation',
['Matthew Zipay'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Autologging', 'Autologging Documentation',
'Matthew Zipay', 'Autologging', 'Eliminate boilerplate logging and tracing code.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/3': None}
autodoc_member_order = 'bysource'
autodoc_default_flags = ['show-inheritance', 'members']
|
the-stack_0_11148 | from re import L
from .TypingData import TypingData
from .TypingNet import TypingNet, TypingTrain
import torch as th
import dgl
class TypingUtility:
def __init__(self):
self.fn = None
self.data = None
self.net = None
self.prob = th.nn.Softmax(dim=0)
def Predict(self, fn):
residue = self.DLText2Residue(fn)
return residue, self.PredictResidue(residue)
def PredictResidue(self, residue):
with th.no_grad():
y = self.net(residue)
types = [self.data.atomic_types[residue[0].ndata[self.data.features_str][i][0].item()][th.argmax(y0).item()] for i, y0 in enumerate(y)]
probs = [self.prob(y0) for y0 in y]
return types, probs
def DLText2Residue(self, fn):
with open(fn, "r") as fd:
lines = fd.readlines()
atom_numbers = [int(word) for word in lines[0].split()]
edge_lines = [int(word) for word in lines[1].split()]
edges1 = []
edges2 = []
for i in range(0, len(edge_lines), 2):
edges1.append(edge_lines[i])
edges2.append(edge_lines[i+1])
edges1.append(edge_lines[i+1])
edges2.append(edge_lines[i])
graph = dgl.graph((th.tensor(edges1), th.tensor(edges2)))
graph.ndata[self.data.features_str] = th.tensor(atom_numbers).unsqueeze(1)
return (graph,
{element:[idx for idx,value in enumerate(graph.ndata[self.data.features_str][:, 0].tolist()) if value == element]
for element in set(graph.ndata[self.data.features_str][:, 0].tolist())},
fn.upper())
def Calibrate(self):
type_stats = {key:[[0,0] for _ in range(len(value))] for key,value in self.data.atomic_types.items()}
for residue in self.data.residues:
y = self.net(residue)
for element, type, prob in zip(residue[0].ndata[self.data.features_str][:, 0].tolist(), residue[0].ndata[self.data.atomic_type_str].squeeze(1).tolist(), y):
type_stats[element][type][0] += 1
predict_type = th.argmax(prob).item()
if predict_type != type:
type_stats[element][type][1] += 1
if self.data.atomic_types[element][type] == "CG321":
print("In %8s: should be %8s, but is %s" % (residue[2], self.data.atomic_types[element][type], self.data.atomic_types[element][predict_type]))
return type_stats
def ExportParams(self, fn):
with open(fn, "w") as fd:
maxK = -1
num_layers = len(self.net.tclayers[1].gcnlayers)
layer_str = ""
for i in range(num_layers-1): layer_str += "layer%d, " % (i)
layer_str += "layer%d" % (num_layers-1)
fd.write("Layer %s;\n" % (layer_str))
for element, net in self.net.tclayers.items():
params = net.export()
param_str = ""
for i, param in enumerate(params):
b = param[0]
K = param[1]
maxK = max(K, maxK)
Ws = param[2]
dim0 = Ws[0].shape[0]
dim1 = Ws[0].shape[1]
param_str += "layer%d = {{" % (i)
param_str += ("MatrixXd::Zero(%d, %d), " % (dim0, dim1))*(K+1)
param_str += "}, RowVectorXd::Zero(%d)};\n" % (dim1)
# Write down parameters.
for k in range(K+1):
param_str += "layer%d.Ws[%d] << " % (i, k)
for x in range(dim0-1):
for y in range(dim1):
param_str += "%15.8f, " % (Ws[k][x][y])
param_str += "\n"
for y in range(dim1-1): param_str += "%15.8f, " % (Ws[k][dim0-1][y])
param_str += "%15.8f;\n" % (Ws[k][dim0-1][dim1-1])
param_str += "layer%d.b << " % (i)
for y in range(dim1-1): param_str += "%15.8f, " % (b[y])
param_str += "%15.8f;\n" % (b[dim1-1])
param_str += "Nets[%d] = {%s};" % (element, layer_str)
fd.write("%s\n" % (param_str))
fd.write("MaxK = %d;" % (maxK))
def Build(self, fn, params_fn = None, type = "CHARMM", training_ratio = 1., learning_rate = 1E-3, max_epochs = 1000000, output_freq = 100, device = th.device("cpu")):
print(" -- Build Atom Typing Predictor --")
# Build typing model.
self.fn = fn
self.data = TypingData()
typing_type_fun = {"CHARMM":self.data.ParseFromCHARMM}
typing_type_fun[type.upper()](self.fn, device)
print("Definitions from: %s" % (self.fn))
num_atomic_types = len(self.data.atomic_types)
print(" Atomic types: %d" % (sum(len(value) for value in self.data.atomic_types.values())))
for key, value in self.data.atomic_types.items():
print(" %-2s: %-3d" % ([k for k, v in self.data.periodic_table.items() if v == key][0], len(value)), value)
print(" Residue names: %d" % (len(self.data.residues)))
# Build net.
print("Net parameters: %s" % ("To be trained" if params_fn is None else params_fn))
num_features = self.data.num_features
features_str = self.data.features_str
atomic_type_str = self.data.atomic_type_str
save_fn_prefix = self.fn[:self.fn.rfind(".")]
self.net = TypingNet(num_features, self.data.atomic_types, features_str)
if params_fn is not None:
# Only load parameters, not architecture.
self.net.load(params_fn)
if max_epochs > 0:
TypingTrain(self.net, self.data.residues, training_ratio, learning_rate, max_epochs, output_freq, save_fn_prefix, atomic_type_str, device)
self.net.eval()
print(" -- Building Atom Typing Predictor Accomplished --")
|
the-stack_0_11149 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import abc
import typing
import pkg_resources
from google import auth
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.auth import credentials # type: ignore
from google.ads.googleads.v6.resources.types import search_term_view
from google.ads.googleads.v6.services.types import search_term_view_service
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution("google-ads",).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
class SearchTermViewServiceTransport(metaclass=abc.ABCMeta):
"""Abstract transport class for SearchTermViewService."""
AUTH_SCOPES = ("https://www.googleapis.com/auth/adwords",)
def __init__(
self,
*,
host: str = "googleads.googleapis.com",
credentials: credentials.Credentials = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]): The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
"""
# Save the hostname. Default to port 443 (HTTPS) if none is specified.
if ":" not in host:
host += ":443"
self._host = host
# If no credentials are provided, then determine the appropriate
# defaults.
if credentials is None:
credentials, _ = auth.default(scopes=self.AUTH_SCOPES)
# Save the credentials.
self._credentials = credentials
# Lifted into its own function so it can be stubbed out during tests.
self._prep_wrapped_messages(client_info)
def _prep_wrapped_messages(self, client_info):
# Precomputed wrapped methods
self._wrapped_methods = {
self.get_search_term_view: gapic_v1.method.wrap_method(
self.get_search_term_view,
default_timeout=None,
client_info=client_info,
),
}
@property
def get_search_term_view(
self,
) -> typing.Callable[
[search_term_view_service.GetSearchTermViewRequest],
search_term_view.SearchTermView,
]:
raise NotImplementedError
__all__ = ("SearchTermViewServiceTransport",)
|
the-stack_0_11150 | import argparse
import codecs
import csv
import datetime
import errno
import importlib
import json
import logging
import os
import shutil
import subprocess
import sys
import traceback
from functools import singledispatch
from pathlib import Path
from typing import (
Any,
Iterable,
List,
Tuple,
Union,
cast,
)
from types import ModuleType
from urllib.error import URLError
import publicsuffix
import requests
import strict_rfc3339
MANDATORY_SCANNER_PROPERTIES = (
"headers",
"to_rows"
)
# global in-memory cache
suffix_list = None
# Time Conveniences #
# Now, in UTC, in seconds (with decimal microseconds).
def local_now() -> float:
return datetime.datetime.now().timestamp()
def format_datetime(obj) -> Union[str, None]:
if isinstance(obj, datetime.date):
return obj.isoformat()
elif isinstance(obj, str):
return obj
else:
return None
# Cut off floating point errors, always output duration down to
# microseconds.
def just_microseconds(duration: float) -> str:
if duration is None:
return None
return "%.6f" % duration
# RFC 3339 timestamp for a given UTC time.
# seconds can be a float, down to microseconds.
# A given time needs to be passed in *as* UTC already.
def utc_timestamp(seconds: Union[float, int]) -> Union[str, None]:
if not seconds:
return None
return strict_rfc3339.timestamp_to_rfc3339_utcoffset(seconds)
# /Time Conveniences #
# Filesystem Conveniences #
# mkdir -p in python, from:
# https://stackoverflow.com/questions/600268/mkdir-p-functionality-in-python
def mkdir_p(path: str) -> None:
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST:
pass
else:
raise
def read(source):
with open(source) as f:
contents = f.read()
return contents
def write(content: Union[bytes, str], destination: str,
binary: bool=False) -> None:
mkdir_p(os.path.dirname(destination))
if binary:
binary_content = cast(bytes, content) # mypy wrangling
with open(destination, "bw") as fb:
fb.write(binary_content)
else:
string_content = cast(str, content) # mypy wrangling
with open(destination, "w", encoding="utf-8") as fs:
fs.write(string_content)
# /Filesystem Conveniences #
# Error Conveniences #
def format_last_exception():
exc_type, exc_value, exc_traceback = sys.exc_info()
return "\n".join(traceback.format_exception(exc_type, exc_value,
exc_traceback))
# Error Conveniences #
# Command Line Conveniences #
def scan(command: List[str], env: dict=None,
allowed_return_codes: list=[]) -> Union[str, None]:
try:
response = subprocess.check_output(
command,
stderr=subprocess.STDOUT,
shell=False, env=env
)
return str(response, encoding='UTF-8')
except subprocess.CalledProcessError as exc:
if exc.returncode in allowed_return_codes:
return str(exc.stdout, encoding='UTF-8')
else:
logging.warning("Error running %s." % (str(command)))
logging.warning("Error running %s." % (str(exc.output)))
logging.warning(format_last_exception())
return None
# test if a command exists, don't print output
def try_command(command):
try:
subprocess.check_call(["which", command], shell=False,
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL)
return True
except subprocess.CalledProcessError:
logging.warning(format_last_exception())
logging.warning("No command found: %s" % (str(command)))
return False
# /Command Line Conveniences #
# JSON Conveniences #
# Format datetimes, sort keys, pretty-print.
def json_for(object: object) -> str:
return json.dumps(object, sort_keys=True, indent=2, default=format_datetime)
# Mirror image of json_for.
def from_json(string):
return json.loads(string)
# /JSON Conveniences #
# Logging Conveniences #
def configure_logging(options: Union[dict, None]=None) -> None:
options = {} if not options else options
if options.get('debug', False):
log_level = "debug"
else:
log_level = options.get("log", "warn")
if log_level not in ["debug", "info", "warn", "error"]:
print("Invalid log level (specify: debug, info, warn, error).")
sys.exit(1)
logging.basicConfig(format='%(message)s', level=log_level.upper())
# /Logging Conveniences #
# CSV Handling #
# Sort a CSV by domain name, "in-place" (by making a temporary copy).
# This loads the whole thing into memory: it's not a great solution for
# super-large lists of domains.
def sort_csv(input_filename):
logging.warning("Sorting %s..." % input_filename)
input_file = open(input_filename, encoding='utf-8', newline='')
tmp_filename = "%s.tmp" % input_filename
tmp_file = open(tmp_filename, 'w', newline='')
tmp_writer = csv.writer(tmp_file)
# store list of domains, to sort at the end
domains = []
# index rows by domain
rows = {}
header = None
for row in csv.reader(input_file):
# keep the header around
if (row[0].lower() == "domain"):
header = row
continue
# index domain for later reference
domain = row[0]
domains.append(domain)
rows[domain] = row
# straight alphabet sort
domains.sort()
# write out to a new file
tmp_writer.writerow(header)
for domain in domains:
tmp_writer.writerow(rows[domain])
# close the file handles
input_file.close()
tmp_file.close()
# replace the original
shutil.move(tmp_filename, input_filename)
def write_rows(rows, domain, base_domain, scanner, csv_writer, meta={}):
# If we didn't get any info, we'll still output information about why the scan failed.
if rows is None:
empty_row = [None] * len(scanner.headers)
rows = [empty_row]
# Always output Domain and Base Domain.
standard_prefix = [
domain,
base_domain,
]
# If requested, add local and Lambda scan data.
meta_fields = []
if bool(meta):
meta_fields.append(" ".join(meta.get('errors', [])))
meta_fields.append(utc_timestamp(meta.get("start_time")))
meta_fields.append(utc_timestamp(meta.get("end_time")))
meta_fields.append(just_microseconds(meta.get("duration")))
if meta.get("lambda") is not None:
meta_fields.append(meta['lambda'].get('request_id'))
meta_fields.append(meta['lambda'].get('log_group_name'))
meta_fields.append(meta['lambda'].get('log_stream_name'))
meta_fields.append(utc_timestamp(meta['lambda'].get('start_time')))
meta_fields.append(utc_timestamp(meta['lambda'].get('end_time')))
meta_fields.append(meta['lambda'].get('memory_limit'))
meta_fields.append(just_microseconds(meta['lambda'].get('measured_duration')))
# Write out prefix, scan data, and meta scan data.
for row in rows:
csv_writer.writerow(standard_prefix + row + meta_fields)
# CSV Handling #
# Cache Handling #
def cache_single(filename, cache_dir="./cache"):
return os.path.join(cache_dir, filename)
# Predictable cache path for a domain and operation.
def cache_path(domain, operation, ext="json", cache_dir="./cache"):
return os.path.join(cache_dir, operation, ("%s.%s" % (domain, ext)))
# Used to quickly get cached data for a domain.
def data_for(domain, operation, cache_dir="./cache"):
path = cache_path(domain, operation, cache_dir=cache_dir)
if os.path.exists(path):
raw = read(path)
data = json.loads(raw)
if isinstance(data, dict) and (data.get('invalid', False)):
return None
else:
return data
else:
return {}
# marker for a cached invalid response
def invalid(data=None):
if data is None:
data = {}
data['invalid'] = True
return json_for(data)
# Return base domain for a subdomain, factoring in the Public Suffix List.
def base_domain_for(subdomain, cache_dir="./cache"):
global suffix_list
"""
For "x.y.domain.gov", return "domain.gov".
If suffix_list is None, the caches have not been initialized, so do that.
"""
if suffix_list is None:
suffix_list, discard = load_suffix_list(cache_dir=cache_dir)
if suffix_list is None:
logging.warning("Error downloading the PSL.")
exit(1)
return suffix_list.get_public_suffix(subdomain)
# Returns an instantiated PublicSuffixList object, and the
# list of lines read from the file.
def load_suffix_list(cache_dir="./cache"):
cached_psl = cache_single("public-suffix-list.txt", cache_dir=cache_dir)
if os.path.exists(cached_psl):
logging.debug("Using cached Public Suffix List...")
with codecs.open(cached_psl, encoding='utf-8') as psl_file:
suffixes = publicsuffix.PublicSuffixList(psl_file)
content = psl_file.readlines()
else:
# File does not exist, download current list and cache it at given location.
logging.debug("Downloading the Public Suffix List...")
try:
cache_file = publicsuffix.fetch()
except URLError as err:
logging.warning("Unable to download the Public Suffix List...")
logging.debug("{}".format(err))
return None, None
content = cache_file.readlines()
suffixes = publicsuffix.PublicSuffixList(content)
# Cache for later.
write(''.join(content), cached_psl)
return suffixes, content
# /Cache Handling #
# Argument Parsing #
class ArgumentParser(argparse.ArgumentParser):
"""
This lets us test for errors from argparse by overriding the error method.
See https://stackoverflow.com/questions/5943249
"""
def _get_action_from_name(self, name):
"""Given a name, get the Action instance registered with this parser.
If only it were made available in the ArgumentError object. It is
passed as its first arg...
"""
container = self._actions
if name is None:
return None
for action in container:
if '/'.join(action.option_strings) == name:
return action
elif action.metavar == name:
return action
elif action.dest == name:
return action
def error(self, message):
exc = sys.exc_info()[1]
if exc:
exc.argument = self._get_action_from_name(exc.argument_name)
raise exc
super(ArgumentParser, self).error(message)
def build_scan_options_parser() -> ArgumentParser:
""" Builds the argparse parser object.
Remember that it changes '-' to '_' in the options name.
"""
parser = ArgumentParser(prefix_chars="--")
parser.add_argument("domains", help="".join([
"Either a comma-separated list of domains or the url of a CSV ",
"file/path to a local CSV file containing the domains to be ",
"domains to be scanned. The CSV's header row will be ignored ",
"if the first cell starts with \"Domain\" (case-insensitive).",
]))
parser.add_argument("--cache", action="store_true", help="".join([
"Use previously cached scan data to avoid scans hitting the network ",
"where possible.",
]))
parser.add_argument("--debug", action="store_true",
help="Print out more stuff. Useful with '--serial'")
parser.add_argument("--lambda", action="store_true", help="".join([
"Run certain scanners inside Amazon Lambda instead of locally.",
]))
parser.add_argument("--lambda-profile", nargs=1, help="".join([
"When running Lambda-related commands, use a specified AWS named ",
"profile. Credentials/config for this named profile should already ",
"be configured separately in the execution environment.",
]))
parser.add_argument("--lambda-retries", type=int, help="".join([
"The maximum number of times to retry a Lambda job that fails. ",
"If not specified then the value 0 is used."
]))
parser.add_argument("--meta", action="store_true", help="".join([
"Append some additional columns to each row with information about ",
"the scan itself. This includes start/end times and durations, as ",
"well as any encountered errors. When also using '--lambda', ",
"additional, Lambda-specific information will be appended.",
]))
parser.add_argument("--scan", nargs=1, required=True,
help="Comma-separated list of scanners (required).")
parser.add_argument("--sort", action="store_true", help="".join([
"Sort result CSVs by domain name, alphabetically. (Note: this causes ",
"the entire dataset to be read into memory.)",
]))
parser.add_argument("--serial", action="store_true", help="".join([
"Disable parallelization, force each task to be done simultaneously. ",
"Helpful for testing and debugging.",
]))
parser.add_argument("--suffix", nargs=1, help="".join([
"Add a suffix to all input domains. For example, a --suffix of ",
"'virginia.gov' will add '.virginia.gov' to the end of all ",
"input domains."
]))
parser.add_argument("--output", nargs=1, default=["./"], help="".join([
"Where to output the 'cache/' and 'results/' directories. ",
"Defaults to './'.",
]))
parser.add_argument("--workers", nargs=1,
help="Limit parallel threads per-scanner to a number.")
# TODO: Should workers have a default value?
parser.add_argument("--no-fast-cache", action="store_true", help="".join([
"Do not use fast caching even if a scanner supports it. This option ",
"will cause domain-scan to use less memory, but some (possibly ",
"expensive) network activity or other operations may be repeated."
]))
# TODO: Move the scanner-specific argument parsing to each scanner's code.
# a11y:
parser.add_argument("--a11y-config",
help="a11y: Location of pa11y config file (used with a11y scanner.")
parser.add_argument("--a11y-redirects",
help="a11y: Location of YAML file with redirects to inform the a11y scanner.")
# pshtt:
parser.add_argument("--ca_file",
help="ca_file: Location of PEM file of trust store to verify certs with.")
parser.add_argument("--pt_int_ca_file",
help="pt_int_ca_file: Location of PEM file of public trust store with any needed intermediate certificates to verify certs with.")
parser.add_argument("--cache-third-parties",
help="cache-third-parties: Location ot store third party cache files.")
parser.add_argument("--user_agent",
help="user_agent: User agent string to use in scan request header.")
parser.add_argument("--adfs-hsts", action="store_true",
help="adfs-hsts: Specifically scan /adfs/ls/ for an HSTS header even without a redirect to it.")
# sslyze:
parser.add_argument("--sslyze-serial",
help="sslyze: If set, will use a synchronous (single-threaded in-process) scanner. Defaults to true.")
parser.add_argument("--sslyze-certs",
help="sslyze: If set, will use the CertificateInfoScanner and return certificate info. Defaults to true.")
parser.add_argument("--sslyze-reneg",
help="sslyze: If set, will use the SessionRenegotiationScanner and return session renegotiation info. Defaults to true.")
# trustymail:
parser.add_argument("--starttls", action='store_true', help="".join([
"trustymail: Only check mx records and STARTTLS support. ",
"(Implies --mx.)"
]))
parser.add_argument("--timeout", help="".join([
"trustymail: The DNS lookup timeout in seconds. (Default is 5.)"
]))
parser.add_argument("--smtp-timeout", help="".join([
"trustymail: The SMTP connection timeout in seconds. (Default is 5.)"
]))
parser.add_argument("--smtp-localhost", help="".join([
"trustymail: The hostname to use when connecting to SMTP ",
"servers. (Default is the FQDN of the host from ",
"which trustymail is being run.)"
]))
parser.add_argument("--smtp-ports", help="".join([
"trustymail: A comma-delimited list of ports at which to look ",
"for SMTP servers. (Default is '25,465,587'.)"
]))
parser.add_argument("--dns", help="".join([
"trustymail: A comma-delimited list of DNS servers to query ",
"against. For example, if you want to use ",
"Google's DNS then you would use the ",
"value --dns-hostnames='8.8.8.8,8.8.4.4'. By ",
"default the DNS configuration of the host OS ",
"(/etc/resolv.conf) is used. Note that ",
"the host's DNS configuration is not used at all ",
"if this option is used."
]))
parser.add_argument("--no-smtp-cache", help="".join([
"trustymail: Do not cache SMTP results during the run. This",
"may results in slower scans due to testing the ",
"same mail servers multiple times."
]))
parser.add_argument("--mx", action='store_true', help="".join([
"trustymail: Only check MX records"
]))
parser.add_argument("--spf", action='store_true', help="".join([
"trustymail: Only check SPF records"
]))
parser.add_argument("--dmarc", action='store_true', help="".join([
"trustymail: Only check DMARC records"
]))
return parser
def options() -> Tuple[dict, list]:
"""
Parse options for the ``scan`` command.
Impure
Reads from sys.argv.
"""
parser = build_scan_options_parser()
parsed, unknown = parser.parse_known_args()
opts = {k: v for k, v in vars(parsed).items() if v is not None}
if opts.get("lambda_profile") and not opts.get("lambda"):
raise argparse.ArgumentTypeError(
"Can't set lambda profile unless lambda flag is set.")
# We know we want one value, but the ``nargs`` flag means we get a list.
should_be_singles = (
"lambda_profile",
"output",
"scan",
"suffix",
"workers",
)
opts = make_values_single(opts, should_be_singles)
# Derive some options not set directly at CLI:
opts["_"] = {
"cache_dir": os.path.join(opts.get("output", "./"), "cache"),
"report_dir": opts.get("output", "./"),
"results_dir": os.path.join(opts.get("output", "./"), "results"),
}
return (opts, unknown)
def make_values_single(dct: dict, should_be_singles: Iterable[str]) -> dict:
for key in (k for k in should_be_singles if k in dct):
dct[key] = dct[key][0]
return dct
def handle_scanner_arguments(scans: List[ModuleType], opts: dict, unknown: List[str]):
for scan in scans:
if hasattr(scan, "handle_scanner_args"):
scan_opts, unknown = scan.handle_scanner_args(unknown, opts) # type: ignore
opts.update(scan_opts)
return (opts, unknown)
# /Argument Parsing #
def build_scanner_list(names: List[str],
mod: str="scanners") -> List[ModuleType]:
"""
Given a list of names, load modules corresponding to those names from the
scanners directory. Also verify that they have the required properties.
"""
scans = []
for name in names:
try:
scan = importlib.import_module(
"%s.%s" % (mod, name))
verify_scanner_properties(scan)
except ImportError:
exc_type, exc_value, exc_traceback = sys.exc_info()
errmsg = "\n".join([
"[%s] Scanner not found, or had an error during loading." % name,
"\tERROR: %s" % exc_type,
"\t%s" % exc_value,
])
logging.error(errmsg)
raise ImportError(errmsg)
scans.append(scan)
return scans
def verify_scanner_properties(scanner: ModuleType) -> None:
name = scanner.__name__
for prop in MANDATORY_SCANNER_PROPERTIES:
if not hasattr(scanner, prop):
raise ImportError("%s lacks required %s property" % (name, prop))
# If the scan has a canonical command, make sure it exists.
# mypy doesn't handle optional properties well, it seems.
if hasattr(scan, "command") and scan.command and (not try_command(scan.command)): # type: ignore
errmsg = "[%s] Command not found: %s" % (name, scan.command) # type: ignore
logging.error(errmsg)
raise ImportError(errmsg)
def begin_csv_writing(scanner: ModuleType, options: dict,
base_hdrs: Tuple[List[str], List[str], List[str]]) -> dict:
"""
Determine the CSV output file path for the scanner, open the file at that
path, instantiate a CSV writer for it, determine whether or not to use
lambda, determine what the headers are, write the headers to the CSV.
Return a dict containing the above.
"""
PREFIX_HEADERS, LOCAL_HEADERS, LAMBDA_HEADERS = base_hdrs
name = scanner.__name__.split(".")[-1] # e.g. 'pshtt'
results_dir = options["_"]["results_dir"]
meta = options.get("meta")
lambda_mode = options.get("lambda")
use_lambda = lambda_mode and \
hasattr(scanner, "lambda_support") and \
scanner.lambda_support # type: ignore # it's an optional variable.
# Write the header row, factoring in Lambda detail if needed.
headers = PREFIX_HEADERS + scanner.headers # type: ignore # optional again
# Local scan timing/errors.
if meta:
headers += LOCAL_HEADERS
# Lambda scan timing/errors. (At this step, only partial fields.)
if meta and use_lambda:
headers += LAMBDA_HEADERS
scanner_csv_path = Path(results_dir, "%s.csv" % name).resolve()
scanner_file = scanner_csv_path.open('w', newline='')
scanner_writer = csv.writer(scanner_file)
print("Opening csv file for scanner {}: {}".format(name, scanner_csv_path))
scanner_writer.writerow(headers)
return {
'name': name,
'file': scanner_file,
'filename': str(scanner_csv_path),
'writer': scanner_writer,
'headers': headers,
'use_lambda': use_lambda,
}
def determine_scan_workers(scanner: ModuleType, options: dict, w_default: int,
w_max: int) -> int:
"""
Given a number of inputs, determines the right number of workers to set
when running scans.
"""
if options.get("serial"):
workers = 1
elif hasattr(scanner, "workers"):
workers = scanner.workers # type: ignore # The subclass objects set this sometimes.
else:
# mypy has trouble with this, presumably because we're using a dict
workers = int(options.get("workers", w_default)) # type: ignore
# Enforce a local worker maximum as a safety valve.
return min(workers, w_max)
# Yield domain names from a single string, or a CSV of them.
@singledispatch
def domains_from(arg: Any, domain_suffix=None) -> Iterable[str]:
raise TypeError("'%s' is not a recognized source for domains." % arg)
@domains_from.register(str)
def _df_str(arg: str, domain_suffix: Union[str, None]=None) -> Iterable[str]:
# TODO: how do we handle domain_suffix here?
if domain_suffix is not None:
errmsg = "Passing in domains at CLI not compatible with --suffix."
raise argparse.ArgumentError(errmsg)
for x in arg.split(","):
yield x
@domains_from.register(Path)
def _df_path(arg: Path, domain_suffix: Union[str, None]=None) -> Iterable[str]:
if arg.suffix == ".csv":
with arg.open(encoding='utf-8', newline='') as csvfile:
for row in csv.reader(csvfile):
if (not row) or (not row[0]) or (row[0].lower() == "domain") or (row[0].lower() == "domain name"):
continue
domain = row[0].lower()
if domain_suffix:
sep = "."
if domain_suffix.startswith("."):
sep = ""
yield "%s%s%s" % (domain, sep, domain_suffix)
else:
yield domain
else:
# Note: the path referred to below will be the path to the local cached
# download and not to the original URL. It shouldn't be possible to get
# here with that being a problem, but noting it anyway.
msg = "\n".join([
"Domains should be specified as a comma-separated list ",
"or as the URL or path to a .csv file. ",
"%s does not appear to be any of those." % arg
])
raise TypeError(msg)
def handle_domains_argument(domains: str, cache_dir: Path) -> Union[Path, str]:
# `domains` can be either a path or a domain name.
# It can also be a URL, and if it is we want to download it now,
# and then adjust the value to be the path of the cached download.
# Note that the cache_dir is basically guaranteed to exist by the time
# we reach this point in the execution path.
if domains.startswith("http:") or domains.startswith("https:"):
domains_path = Path(cache_dir, "domains.csv")
try:
response = requests.get(domains)
write(response.text, str(domains_path))
except requests.exceptions.RequestException as err:
msg = "\n".join([
"Domains URL not downloaded successfully; RequestException",
str(err),
])
logging.error(msg)
raise IOError(msg)
return domains_path
elif domains.endswith(".csv"):
# Assume file is either absolute or relative from current dir.
try:
domains_path = Path(os.path.curdir, domains).resolve()
if not domains_path.exists():
raise FileNotFoundError
return domains_path
except FileNotFoundError as err:
msg = "\n".join([
"Domains CSV file not found.",
"(Curdir: %s CSV file: %s)" % (os.path.curdir, domains),
str(err),
])
logging.error(msg)
raise FileNotFoundError(msg)
return domains
|
the-stack_0_11152 | """
Tests for the bootstrap.py (formerly bootstrap_controller.py) file.
"""
import unittest
from collections import OrderedDict
import numpy as np
import numpy.testing as npt
import pandas as pd
from scipy.sparse import csr_matrix, eye
import pylogit.bootstrap as bc
import pylogit.asym_logit as asym
import pylogit.mixed_logit_calcs as mlc
import pylogit.mixed_logit as mixed_logit
import pylogit.nested_logit as nested_logit
from pylogit.conditional_logit import MNL
try:
# Python 3.x does not natively support xrange
from past.builtins import xrange
except ImportError:
pass
class BootstrapTests(unittest.TestCase):
def setUp(self):
# The set up being used is one where there are two choice situations,
# The first having three alternatives, and the second having only two
# alternatives. There is one generic variable. Two alternative
# specific constants and all three shape parameters are used.
# Create the betas to be used during the tests
self.fake_betas = np.array([-0.6])
# Create the fake outside intercepts to be used during the tests
self.fake_intercepts = np.array([1, 0.5])
# Create names for the intercept parameters
self.fake_intercept_names = ["ASC 1", "ASC 2"]
# Record the position of the intercept that is not being estimated
self.fake_intercept_ref_pos = 2
# Create the shape parameters to be used during the tests. Note that
# these are the reparameterized shape parameters, thus they will be
# exponentiated in the fit_mle process and various calculations.
self.fake_shapes = np.array([-1, 1])
# Create names for the intercept parameters
self.fake_shape_names = ["Shape 1", "Shape 2"]
# Record the position of the shape parameter that is being constrained
self.fake_shape_ref_pos = 2
# Calculate the 'natural' shape parameters
self.natural_shapes = asym._convert_eta_to_c(self.fake_shapes,
self.fake_shape_ref_pos)
# Create an array of all model parameters
self.fake_all_params = np.concatenate((self.fake_shapes,
self.fake_intercepts,
self.fake_betas))
# The mapping between rows and alternatives is given below.
self.fake_rows_to_alts = csr_matrix(np.array([[1, 0, 0],
[0, 1, 0],
[0, 0, 1],
[1, 0, 0],
[0, 0, 1],
[1, 0, 0],
[0, 1, 0],
[0, 0, 1],
[0, 1, 0],
[0, 0, 1],
[1, 0, 0],
[0, 1, 0],
[0, 0, 1],
[1, 0, 0],
[0, 1, 0],
[0, 0, 1]]))
# Get the mappping between rows and observations
self.fake_rows_to_obs = csr_matrix(np.array([[1, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0],
[0, 0, 1, 0, 0, 0],
[0, 0, 1, 0, 0, 0],
[0, 0, 0, 1, 0, 0],
[0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 1]]))
# Create the fake design matrix with columns denoting X
# The intercepts are not included because they are kept outside the
# index in the scobit model.
self.fake_design = np.array([[1],
[2],
[3],
[1.5],
[3.5],
[0.78],
[0.23],
[1.04],
[2.52],
[1.49],
[0.85],
[1.37],
[1.17],
[2.03],
[1.62],
[1.94]])
# Create the index array for this set of choice situations
self.fake_index = self.fake_design.dot(self.fake_betas)
# Create the needed dataframe for the Asymmetric Logit constructor
self.fake_df = pd.DataFrame({"obs_id": [1, 1, 1, 2, 2, 3, 3, 3,
4, 4, 5, 5, 5, 6, 6, 6],
"alt_id": [1, 2, 3, 1, 3, 1, 2, 3,
2, 3, 1, 2, 3, 1, 2, 3],
"choice": [0, 1, 0, 0, 1, 1, 0, 0,
1, 0, 1, 0, 0, 0, 0, 1],
"x": self.fake_design[:, 0],
"intercept":
np.ones(self.fake_design.shape[0])})
# Record the various column names
self.alt_id_col = "alt_id"
self.obs_id_col = "obs_id"
self.choice_col = "choice"
# Create the index specification and name dictionaryfor the model
self.fake_specification = OrderedDict()
self.fake_names = OrderedDict()
self.fake_specification["x"] = [[1, 2, 3]]
self.fake_names["x"] = ["x (generic coefficient)"]
# Bundle args and kwargs used to construct the Asymmetric Logit model.
self.constructor_args = [self.fake_df,
self.alt_id_col,
self.obs_id_col,
self.choice_col,
self.fake_specification]
# Create a variable for the kwargs being passed to the constructor
self.constructor_kwargs = {"intercept_ref_pos":
self.fake_intercept_ref_pos,
"shape_ref_pos": self.fake_shape_ref_pos,
"names": self.fake_names,
"intercept_names":
self.fake_intercept_names,
"shape_names": self.fake_shape_names}
# Initialize a basic Asymmetric Logit model whose coefficients will be
# estimated.
self.asym_model_obj = asym.MNAL(*self.constructor_args,
**self.constructor_kwargs)
self.asym_model_obj.coefs = pd.Series(self.fake_betas)
self.asym_model_obj.intercepts =\
pd.Series(self.fake_intercepts, index=self.fake_intercept_names)
self.asym_model_obj.shapes =\
pd.Series(self.fake_shapes, index=self.fake_shape_names)
self.asym_model_obj.params =\
pd.Series(np.concatenate([self.fake_shapes,
self.fake_intercepts,
self.fake_betas]),
index=self.fake_shape_names +
self.fake_intercept_names +
self.fake_names["x"])
self.asym_model_obj.nests = None
#####
# Initialize a basic MNL model
#####
# Create the MNL specification and name dictionaries.
self.mnl_spec, self.mnl_names = OrderedDict(), OrderedDict()
self.mnl_spec["intercept"] = [1, 2]
self.mnl_names["intercept"] = self.fake_intercept_names
self.mnl_spec.update(self.fake_specification)
self.mnl_names.update(self.fake_names)
mnl_construct_args = self.constructor_args[:-1] + [self.mnl_spec]
mnl_kwargs = {"names": self.mnl_names}
self.mnl_model_obj = MNL(*mnl_construct_args, **mnl_kwargs)
return None
def test_get_param_names(self):
# Alias the function being tested.
func = bc.get_param_names
# Get the function results
func_results = func(self.asym_model_obj)
# Get the expected results
expected_results = self.asym_model_obj.params.index.tolist()
# Test the function results
self.assertIsInstance(func_results, list)
self.assertEqual(func_results, expected_results)
# Set the nest names and re-test the function.
self.asym_model_obj.nest_names = ["No Nest"]
expected_results_2 = self.asym_model_obj.nest_names + expected_results
func_results_2 = func(self.asym_model_obj)
self.assertIsInstance(func_results_2, list)
self.assertEqual(func_results_2, expected_results_2)
return None
def test_get_param_list_for_prediction(self):
# Determine the number of replicates
num_replicates = 10
# Create a fake model object with the needed attributes
class FakeModel(object):
def __init__(self):
self.nest_names = ['one', 'oneA']
self.shape_names = ['two', 'twoA', 'twoB']
self.intercept_names = ['three']
self.ind_var_names =\
['four', 'fourA', 'fourB', 'fourC', 'fourD']
fake_model_obj = FakeModel()
# Create a fake set of bootstrap replicates
fake_replicates =\
(np.array([1, 1, 2, 2, 2, 3, 4, 4, 4, 4, 4])[None, :] *
np.ones(num_replicates)[:, None])
# Create the expected result
expected_param_list = [4 * np.ones((5, num_replicates)),
3 * np.ones((1, num_replicates)),
2 * np.ones((3, num_replicates)),
np.ones((2, num_replicates))]
# Alias the function being tested
func = bc.get_param_list_for_prediction
# Calculate the function result
func_result = func(fake_model_obj, fake_replicates)
# Perform the desired tests with a full set of parameters
self.assertIsInstance(func_result, list)
self.assertEqual(len(func_result), 4)
for pos, func_array in enumerate(func_result):
expected_array = expected_param_list[pos]
self.assertIsInstance(func_array, np.ndarray)
self.assertEqual(func_array.shape, expected_array.shape)
npt.assert_allclose(func_array, expected_array)
# Perform the desired tests with just index coefficients
for attr in ['intercept_names', 'shape_names', 'nest_names']:
setattr(fake_model_obj, attr, None)
func_result_2 = func(fake_model_obj, fake_replicates[:, -5:])
expected_result_2 =\
[4 * np.ones((5, num_replicates)), None, None, None]
self.assertIsInstance(func_result_2, list)
for pos in xrange(1, 4):
self.assertIsNone(func_result_2[pos])
self.assertIsInstance(func_result_2[0], np.ndarray)
self.assertEqual(func_result_2[0].shape, expected_result_2[0].shape)
npt.assert_allclose(func_result_2[0], expected_result_2[0])
return None
def test_ensure_replicates_kwarg_validity(self):
# Create the 'good' and 'bad' arguments for testing
good_args = ['bootstrap', 'jackknife']
bad_args = ['bad', 2, None]
# Alias the function being tested
func = bc.ensure_replicates_kwarg_validity
# Note the expected error messsage
expected_error_msg =\
"`replicates` MUST be either 'bootstrap' or 'jackknife'."
# Perform the desired tests
for good_arg in good_args:
self.assertIsNone(func(good_arg))
for bad_arg in bad_args:
self.assertRaisesRegexp(ValueError,
expected_error_msg,
func,
bad_arg)
return None
def test_boot_initialization(self):
# Create the bootstrap object
boot_obj =\
bc.Boot(self.asym_model_obj, self.asym_model_obj.params.values)
# Test the bootstrap object.
self.assertIsInstance(boot_obj, bc.Boot)
self.assertEqual(id(boot_obj.model_obj), id(self.asym_model_obj))
self.assertEqual(self.asym_model_obj.params.index.tolist(),
boot_obj.mle_params.index.tolist())
expected_attrs =\
["bootstrap_replicates", "jackknife_replicates",
"percentile_interval", "bca_interval",
"abc_interval", "all_intervals",
"jackknife_log_likehoods",
"bootstrap_log_likelihoods"]
for current_attr in expected_attrs:
self.assertTrue(hasattr(boot_obj, current_attr))
self.assertIsNone(getattr(boot_obj, current_attr))
return None
def test_generate_bootstrap_replicates(self):
# Create the bootstrap object.
boot_obj =\
bc.Boot(self.asym_model_obj, self.asym_model_obj.params.values)
# Determine the number of bootstrap samples that we wish to take
num_samples = 3
# Create the necessary keyword arguments.
mnl_init_vals =\
np.zeros(len(self.fake_intercept_names) +
sum([len(x) for x in self.fake_names.values()]))
mnl_kwargs = {"ridge": 0.01,
"maxiter": 1200,
"method": "bfgs"}
bootstrap_kwargs = {"mnl_obj": self.mnl_model_obj,
"mnl_init_vals": mnl_init_vals,
"mnl_fit_kwargs": mnl_kwargs,
"constrained_pos": [0],
"boot_seed": 1988}
# Alias the needed function
func = boot_obj.generate_bootstrap_replicates
# Get the function results
func_results =\
func(num_samples,
mnl_obj=self.mnl_model_obj,
mnl_init_vals=mnl_init_vals,
mnl_fit_kwargs=mnl_kwargs,
constrained_pos=[0],
boot_seed=1988)
# Perform the requisite tests
self.assertIsNone(func_results)
self.assertIsInstance(boot_obj.bootstrap_replicates, pd.DataFrame)
self.assertEqual(boot_obj.bootstrap_replicates.ndim, 2)
expected_shape = (num_samples, self.asym_model_obj.params.size)
self.assertEqual(boot_obj.bootstrap_replicates.shape, expected_shape)
self.assertEqual(boot_obj.bootstrap_replicates
.iloc[:, 0].unique().size, 1)
self.assertEqual(boot_obj.bootstrap_replicates
.iloc[:, 0].unique()[0], 0)
self.assertTrue(boot_obj.bootstrap_replicates
.iloc[:, 1].unique().size > 1)
return None
def test_generate_jackknife_replicates(self):
# Create the bootstrap object.
boot_obj =\
bc.Boot(self.asym_model_obj, self.asym_model_obj.params.values)
# Create the necessary keyword arguments.
mnl_init_vals =\
np.zeros(len(self.fake_intercept_names) +
sum([len(x) for x in self.fake_names.values()]))
mnl_kwargs = {"ridge": 0.01,
"maxiter": 1200,
"method": "bfgs"}
bootstrap_kwargs = {"mnl_obj": self.mnl_model_obj,
"mnl_init_vals": mnl_init_vals,
"mnl_fit_kwargs": mnl_kwargs,
"constrained_pos": [0],
"boot_seed": 1988}
# Alias the needed function
func = boot_obj.generate_jackknife_replicates
# Get the function results
func_results =\
func(mnl_obj=self.mnl_model_obj,
mnl_init_vals=mnl_init_vals,
mnl_fit_kwargs=mnl_kwargs,
constrained_pos=[0])
# Perform the requisite tests
self.assertIsNone(func_results)
self.assertIsInstance(boot_obj.jackknife_replicates, pd.DataFrame)
self.assertEqual(boot_obj.jackknife_replicates.ndim, 2)
expected_shape =\
(self.fake_rows_to_obs.shape[1], self.asym_model_obj.params.size)
self.assertEqual(boot_obj.jackknife_replicates.shape, expected_shape)
self.assertEqual(boot_obj.jackknife_replicates
.iloc[:, 0].unique().size, 1)
self.assertEqual(boot_obj.jackknife_replicates
.iloc[:, 0].unique()[0], 0)
self.assertTrue(boot_obj.jackknife_replicates
.iloc[:, 1].unique().size > 1)
return None
class IntervalTests(unittest.TestCase):
"""
References
----------
Efron, Bradley, and Robert J. Tibshirani. An Introduction to the
Bootstrap. CRC press, 1994. Chapter 14.
Notes
-----
The data and tests used in the `IntervalTests` test suite come from the
Efron & Tibshirani reference cited above.
"""
def setUp(self):
# Store the spatial test data from Efron and Tibshirani (1994)
self.test_data =\
np.array([48, 36, 20, 29, 42, 42, 20, 42, 22, 41, 45, 14, 6,
0, 33, 28, 34, 4, 32, 24, 47, 41, 24, 26, 30, 41])
# Note how many test data observations there are.
self.num_test_obs = self.test_data.size
# Store the MLE estimate
self.test_theta_hat = self.calc_theta(self.test_data)
# Create a pandas series of the data. Allows for easy case deletion.
self.raw_series = pd.Series(self.test_data)
# Create the array of jackknife replicates
self.jackknife_replicates =\
np.empty((self.num_test_obs, 1), dtype=float)
for obs in xrange(self.num_test_obs):
current_data = self.raw_series[self.raw_series.index != obs].values
self.jackknife_replicates[obs] = self.calc_theta(current_data)[0]
# Create the bootstrap replicates
num_test_reps = 5000
test_indices = np.arange(self.num_test_obs)
boot_indx_shape = (num_test_reps, self.num_test_obs)
np.random.seed(8292017)
boot_indices =\
np.random.choice(test_indices,
replace=True,
size=self.num_test_obs*num_test_reps)
self.bootstrap_replicates =\
np.fromiter((self.calc_theta(self.test_data[x])[0] for x in
boot_indices.reshape(boot_indx_shape)),
dtype=float)[:, None]
self.rows_to_obs = eye(self.test_data.size, format='csr', dtype=int)
# Create a fake model object and a fake model class that will implement the
# T(P) function through it's fit_mle method.
test_data = self.test_data
fake_rows_to_obs = self.rows_to_obs
calc_theta = self.calc_theta
class FakeModel(object):
def __init__(self):
# Create needed attributes to successfully mock an MNDC_Model
#instance in this test
self.data = pd.Series([pos for pos, x in enumerate(test_data)])
self.obs_id_col = np.arange(self.data.size, dtype=int)
needed_names = ['ind_var_names', 'intercept_names',
'shape_names', 'nest_names']
for name in needed_names:
setattr(self, name, None)
self.ind_var_names = ['variance']
# Create a get_mappings_for_fit function that will allow for
# successful mocking in this test
def get_mappings_for_fit(self):
return {"rows_to_obs": fake_rows_to_obs}
# Use the T(P) function from the spatial test data example.
def fit_mle(self,
init_vals,
weights=None,
**kwargs):
return {'x': calc_theta(test_data, weights=weights)}
self.fake_model_obj = FakeModel()
# Create the bootstrap object
self.boot =\
bc.Boot(self.fake_model_obj,
pd.Series(self.test_theta_hat, index=["variance"]))
self.boot.bootstrap_replicates =\
pd.DataFrame(self.bootstrap_replicates, columns=['variance'])
self.boot.jackknife_replicates =\
pd.DataFrame(self.jackknife_replicates, columns=['variance'])
# Store the confidence percentage that will be used for the test
self.conf_percentage = 90
return None
# Create the function to calculate the objective function.
def calc_theta(self, array, weights=None):
if weights is None:
result = ((array - array.mean())**2).sum() / float(array.size)
else:
a_mean = weights.dot(array)
differences = (array - a_mean)
squared_diffs = differences**2
result = weights.dot(squared_diffs)
return np.array([result])
def test_calc_percentile_interval(self):
# Alias the function being tested
func = self.boot.calc_percentile_interval
# Perform the first test
self.assertIsNone(self.boot.percentile_interval)
# Calculate the function result
func(self.conf_percentage)
# Note the expected result is from Table 14.2 on page 183 of
# Efron & Tibshirani (1994)
expected_result = np.array([100.8, 233.9])
expected_columns = ['5%', '95%']
# Perform the remaining tests
self.assertIsInstance(self.boot.percentile_interval, pd.DataFrame)
self.assertEqual(expected_columns,
self.boot.percentile_interval.columns.tolist())
self.assertIn("variance", self.boot.percentile_interval.index)
self.assertEqual(self.boot.percentile_interval.shape, (1, 2))
npt.assert_allclose(self.boot.percentile_interval.iloc[0, :],
expected_result, rtol=0.02)
# Set the percentile interval back to none.
self.boot.percentile_interval = None
self.assertIsNone(self.boot.percentile_interval)
return None
def test_calc_bca_interval(self):
# Alias the function being tested
func = self.boot.calc_bca_interval
# Perform the first test
self.assertIsNone(self.boot.bca_interval)
# Calculate the function result
func(self.conf_percentage)
# Note the expected result is from Table 14.2 on page 183 of
# Efron & Tibshirani (1994)
expected_result = np.array([115.8, 259.6])
expected_columns = ['5%', '95%']
# Perform the remaining tests
self.assertIsInstance(self.boot.bca_interval, pd.DataFrame)
self.assertEqual(expected_columns,
self.boot.bca_interval.columns.tolist())
self.assertIn("variance", self.boot.bca_interval.index)
self.assertEqual(self.boot.bca_interval.shape, (1, 2))
npt.assert_allclose(self.boot.bca_interval.iloc[0, :],
expected_result, rtol=0.01)
# Set the percentile interval back to none.
self.boot.bca_interval = None
self.assertIsNone(self.boot.bca_interval)
return None
def test_calc_abc_interval(self):
# Alias the function being tested
func = self.boot.calc_abc_interval
# Perform the first test
self.assertIsNone(self.boot.abc_interval)
# Calculate the function result
func(self.conf_percentage, self.test_theta_hat, epsilon=0.001)
# Note the expected result, from Table 14.2 on page 183 of
# Efron & Tibshirani (1994)
expected_result = np.array([116.7, 260.9])
expected_columns = ['5%', '95%']
# Perform the remaining tests
self.assertIsInstance(self.boot.abc_interval, pd.DataFrame)
self.assertEqual(expected_columns,
self.boot.abc_interval.columns.tolist())
self.assertIn("variance", self.boot.abc_interval.index)
self.assertEqual(self.boot.abc_interval.shape, (1, 2))
npt.assert_allclose(self.boot.abc_interval.iloc[0, :],
expected_result, rtol=0.01)
# Set the percentile interval back to none.
self.boot.abc_interval = None
self.assertIsNone(self.boot.abc_interval)
return None
def test_calc_conf_intervals_except_all(self):
kwargs = {"init_vals": self.test_theta_hat,
"epsilon": 0.001}
# Alias the function being tested
func = self.boot.calc_conf_intervals
# Create the list of attributes to be tested
tested_attrs = ['percentile_interval', 'bca_interval', 'abc_interval']
interval_types = ['pi', 'bca', 'abc']
# Note the expected result, from Table 14.2 on page 183 of
# Efron & Tibshirani (1994)
expected_result =\
np.array([[100.8, 233.9], [115.8, 259.6], [116.7, 260.9]])
expected_columns = ['5%', '95%']
# Perform the desired tests
for pos, i_type in enumerate(interval_types):
desired_attr = getattr(self.boot, tested_attrs[pos])
self.assertIsNone(desired_attr)
# Calculate the function result
kwargs['interval_type'] = i_type
func(self.conf_percentage, **kwargs)
# Perform the remaining tests
desired_attr = getattr(self.boot, tested_attrs[pos])
self.assertIsInstance(desired_attr, pd.DataFrame)
self.assertEqual(expected_columns,
desired_attr.columns.tolist())
self.assertIn("variance", desired_attr.index)
self.assertEqual(desired_attr.shape, (1, 2))
npt.assert_allclose(desired_attr.iloc[0, :],
expected_result[pos], rtol=0.02)
# Perform clean-up activities after the test
setattr(self.boot, tested_attrs[pos], None)
return None
def test_calc_conf_intervals_all(self):
kwargs = {"interval_type": 'all',
"init_vals": self.test_theta_hat,
"epsilon": 0.001}
# Alias the function being tested
func = self.boot.calc_conf_intervals
# Create the list of attributes to be tested
tested_attrs = ['percentile_interval', 'bca_interval', 'abc_interval']
# Note the expected result, from Table 14.2 on page 183 of
# Efron & Tibshirani (1994)
expected_result =\
np.array([[100.8, 233.9], [115.8, 259.6], [116.7, 260.9]])
# Note the expected MultiIndex columns
expected_columns_all = [("percentile_interval", "5%"),
("percentile_interval", "95%"),
("BCa_interval", "5%"),
("BCa_interval", "95%"),
("ABC_interval", "5%"),
("ABC_interval", "95%")]
expected_columns_single = ["5%", "95%"]
# Perform the expected tests before running the function
for attr in tested_attrs:
self.assertIsNone(getattr(self.boot, attr))
# Calculate the function results
func(self.conf_percentage, **kwargs)
# Perform the remaining tests
for pos, attr in enumerate(tested_attrs):
desired_attr = getattr(self.boot, attr)
self.assertEqual(expected_columns_single,
desired_attr.columns.tolist())
self.assertIn("variance", desired_attr.index)
self.assertEqual(desired_attr.shape, (1, 2))
npt.assert_allclose(desired_attr.iloc[0, :],
expected_result[pos], rtol=0.02)
# Test the 'all_intervals' attribute.
self.assertIsInstance(self.boot.all_intervals, pd.DataFrame)
self.assertEqual(expected_columns_all,
self.boot.all_intervals.columns.tolist())
self.assertIn("variance", self.boot.all_intervals.index)
self.assertEqual(self.boot.all_intervals.shape, (1, 6))
npt.assert_allclose(self.boot.all_intervals.values,
expected_result.reshape((1, 6)), rtol=0.02)
# Set the various intervals back to None.
for attr in tested_attrs + ['all_intervals']:
setattr(self.boot, attr, None)
self.assertIsNone(getattr(self.boot, attr))
return None
def test_interval_type_error_in_calc_conf_intervals(self):
# Alias the function being tested
func = self.boot.calc_conf_intervals
# Create kwargs for the function to be tested
kwargs = {"interval_type": 'bad_type',
"init_vals": self.test_theta_hat,
"epsilon": 0.001}
# Note the expected error message.
expected_error_msg =\
"interval_type MUST be in `\['pi', 'bca', 'abc', 'all'\]`"
# Ensure that the appropriate errors are raised.
self.assertRaisesRegexp(ValueError,
expected_error_msg,
func,
self.conf_percentage,
**kwargs)
return None
class AnalysisTests(unittest.TestCase):
def make_mnl_model(self):
# The set up being used is one where there are two choice situations,
# The first having three alternatives, and the second having only two
# alternatives. There is one generic variable. Two alternative
# specific constants and all three shape parameters are used.
# Create the betas to be used during the tests
fake_betas = np.array([-0.6])
# Create the fake design matrix with columns denoting X
# The intercepts are not included because they are kept outside the
# index in the scobit model.
fake_design = np.array([[1],
[2],
[3],
[1.5],
[3.5]])
# Create the index array for this set of choice situations
fake_index = fake_design.dot(fake_betas)
# Create the needed dataframe for the model constructor
fake_df = pd.DataFrame({"obs_id": [1, 1, 1, 2, 2],
"alt_id": [1, 2, 3, 1, 3],
"choice": [0, 1, 0, 0, 1],
"x": fake_design[:, 0]})
# Record the various column names
alt_id_col = "alt_id"
obs_id_col = "obs_id"
choice_col = "choice"
# Create the index specification and name dictionaryfor the model
mnl_spec = OrderedDict()
mnl_names = OrderedDict()
mnl_spec["x"] = [[1, 2, 3]]
mnl_names["x"] = ["x (generic coefficient)"]
# Bundle args and kwargs used to construct the Asymmetric Logit model.
mnl_args = [fake_df, alt_id_col, obs_id_col, choice_col, mnl_spec]
# Create a variable for the kwargs being passed to the constructor
mnl_kwargs = {"names": mnl_names}
# Initialize a basic choice model.
mnl_obj = MNL(*mnl_args, **mnl_kwargs)
# Create the desired model attributes for the clog log model
mnl_obj.coefs = pd.Series(fake_betas, index=mnl_names["x"])
mnl_obj.intercepts = None
mnl_obj.shapes = None
mnl_obj.nests = None
mnl_obj.params = mnl_obj.coefs.copy()
return mnl_obj
def make_asym_model(self):
# The set up being used is one where there are two choice situations,
# The first having three alternatives, and the second having only two
# alternatives. There is one generic variable. Two alternative
# specific constants and all three shape parameters are used.
# Create the betas to be used during the tests
fake_betas = np.array([-0.6])
# Create the fake outside intercepts to be used during the tests
fake_intercepts = np.array([1, 0.5])
# Create names for the intercept parameters
fake_intercept_names = ["ASC 1", "ASC 2"]
# Record the position of the intercept that is not being estimated
fake_intercept_ref_pos = 2
# Create the shape parameters to be used during the tests. Note that
# these are the reparameterized shape parameters, thus they will be
# exponentiated in the fit_mle process and various calculations.
fake_shapes = np.array([-1, 1])
# Create names for the intercept parameters
fake_shape_names = ["Shape 1", "Shape 2"]
# Record the position of the shape parameter that is being constrained
fake_shape_ref_pos = 2
# Calculate the 'natural' shape parameters
natural_shapes = asym._convert_eta_to_c(fake_shapes,
fake_shape_ref_pos)
# Create an array of all model parameters
fake_all_params = np.concatenate((fake_shapes,
fake_intercepts,
fake_betas))
# The mapping between rows and alternatives is given below.
fake_rows_to_alts = csr_matrix(np.array([[1, 0, 0],
[0, 1, 0],
[0, 0, 1],
[1, 0, 0],
[0, 0, 1]]))
# Get the mappping between rows and observations
fake_rows_to_obs = csr_matrix(np.array([[1, 0],
[1, 0],
[1, 0],
[0, 1],
[0, 1]]))
# Create the fake design matrix with columns denoting X
# The intercepts are not included because they are kept outside the
# index in the scobit model.
fake_design = np.array([[1],
[2],
[3],
[1.5],
[3.5]])
# Create the index array for this set of choice situations
fake_index = fake_design.dot(fake_betas)
# Create the needed dataframe for the Asymmetric Logit constructor
fake_df = pd.DataFrame({"obs_id": [1, 1, 1, 2, 2],
"alt_id": [1, 2, 3, 1, 3],
"choice": [0, 1, 0, 0, 1],
"x": fake_design[:, 0],
"intercept": [1 for i in range(5)]})
# Record the various column names
alt_id_col = "alt_id"
obs_id_col = "obs_id"
choice_col = "choice"
# Create the index specification and name dictionaryfor the model
fake_specification = OrderedDict()
fake_names = OrderedDict()
fake_specification["x"] = [[1, 2, 3]]
fake_names["x"] = ["x (generic coefficient)"]
# Bundle args and kwargs used to construct the Asymmetric Logit model.
constructor_args = [fake_df,
alt_id_col,
obs_id_col,
choice_col,
fake_specification]
# Create a variable for the kwargs being passed to the constructor
constructor_kwargs = {"intercept_ref_pos": fake_intercept_ref_pos,
"shape_ref_pos": fake_shape_ref_pos,
"names": fake_names,
"intercept_names": fake_intercept_names,
"shape_names": fake_shape_names}
# Initialize a basic Asymmetric Logit model whose coefficients will be
# estimated.
model_obj = asym.MNAL(*constructor_args, **constructor_kwargs)
# Get the fitted probabilities for this model and dataset
# Note this relies on the calc_probabilities function being functional.
# args = [fake_betas,
# fake_design,
# fake_df[alt_id_col].values,
# fake_rows_to_obs,
# fake_rows_to_alts,
# model_obj.utility_transform]
# kwargs = {"intercept_params": fake_intercepts,
# "shape_params": fake_shapes,
# "return_long_probs": True}
# model_obj.prob_array =\
# choice_calcs.calc_probabilities(*args, **kwargs)
model_obj.coefs = pd.Series(fake_betas, index=fake_names["x"])
model_obj.intercepts =\
pd.Series(fake_intercepts, index=fake_intercept_names)
model_obj.shapes = pd.Series(fake_shapes, index=fake_shape_names)
model_obj.nests = None
model_obj.params =\
pd.concat([model_obj.shapes,
model_obj.intercepts,
model_obj.coefs],
axis=0, ignore_index=False)
return model_obj
def make_mixed_model(self):
# Fake random draws where Row 1 is for observation 1 and row 2 is
# for observation 2. Column 1 is for draw 1 and column 2 is for draw 2
fake_draws = mlc.get_normal_draws(2, 2, 1, seed=1)[0]
# Create the betas to be used during the tests
fake_betas = np.array([0.3, -0.6, 0.2])
fake_std = 1
fake_betas_ext = np.concatenate((fake_betas,
np.array([fake_std])),
axis=0)
# Create the fake design matrix with columns denoting ASC_1, ASC_2, X
fake_design = np.array([[1, 0, 1],
[0, 1, 2],
[0, 0, 3],
[1, 0, 1.5],
[0, 1, 2.5],
[0, 0, 3.5],
[1, 0, 0.5],
[0, 1, 1.0],
[0, 0, 1.5]])
# Record what positions in the design matrix are being mixed over
mixing_pos = [2]
# Create the arrays that specify the choice situation, individual id
# and alternative ids
situation_ids = np.array([1, 1, 1, 2, 2, 2, 3, 3, 3])
individual_ids = np.array([1, 1, 1, 1, 1, 1, 2, 2, 2])
alternative_ids = np.array([1, 2, 3, 1, 2, 3, 1, 2, 3])
# Create a fake array of choices
choice_array = np.array([0, 1, 0, 0, 0, 1, 1, 0, 0])
# Create the 'rows_to_mixers' sparse array for this dataset
# Denote the rows that correspond to observation 1 and observation 2
obs_1_rows = np.ones(fake_design.shape[0])
# Make sure the rows for observation 2 are given a zero in obs_1_rows
obs_1_rows[-3:] = 0
obs_2_rows = 1 - obs_1_rows
# Create the row_to_mixers scipy.sparse matrix
fake_rows_to_mixers = csr_matrix(obs_1_rows[:, None] ==
np.array([1, 0])[None, :])
# Create the rows_to_obs scipy.sparse matrix
fake_rows_to_obs = csr_matrix(situation_ids[:, None] ==
np.arange(1, 4)[None, :])
# Create the rows_to_alts scipy.sparse matrix
fake_rows_to_alts = csr_matrix(alternative_ids[:, None] ==
np.arange(1, 4)[None, :])
# Create the design matrix that we should see for draw 1 and draw 2
arrays_to_join = (fake_design.copy(),
fake_design.copy()[:, -1][:, None])
fake_design_draw_1 = np.concatenate(arrays_to_join, axis=1)
fake_design_draw_2 = fake_design_draw_1.copy()
# Multiply the 'random' coefficient draws by the corresponding variable
fake_design_draw_1[:, -1] *= (obs_1_rows *
fake_draws[0, 0] +
obs_2_rows *
fake_draws[1, 0])
fake_design_draw_2[:, -1] *= (obs_1_rows *
fake_draws[0, 1] +
obs_2_rows *
fake_draws[1, 1])
extended_design_draw_1 = fake_design_draw_1[:, None, :]
extended_design_draw_2 = fake_design_draw_2[:, None, :]
fake_design_3d = np.concatenate((extended_design_draw_1,
extended_design_draw_2),
axis=1)
# Create the fake systematic utility values
sys_utilities_draw_1 = fake_design_draw_1.dot(fake_betas_ext)
sys_utilities_draw_2 = fake_design_draw_2.dot(fake_betas_ext)
#####
# Calculate the probabilities of each alternatve in each choice
# situation
#####
long_exp_draw_1 = np.exp(sys_utilities_draw_1)
long_exp_draw_2 = np.exp(sys_utilities_draw_2)
ind_exp_sums_draw_1 = fake_rows_to_obs.T.dot(long_exp_draw_1)
ind_exp_sums_draw_2 = fake_rows_to_obs.T.dot(long_exp_draw_2)
long_exp_sum_draw_1 = fake_rows_to_obs.dot(ind_exp_sums_draw_1)
long_exp_sum_draw_2 = fake_rows_to_obs.dot(ind_exp_sums_draw_2)
long_probs_draw_1 = long_exp_draw_1 / long_exp_sum_draw_1
long_probs_draw_2 = long_exp_draw_2 / long_exp_sum_draw_2
prob_array = np.concatenate((long_probs_draw_1[:, None],
long_probs_draw_2[:, None]),
axis=1)
###########
# Create a mixed logit object for later use.
##########
# Create a fake old long format dataframe for mixed logit model object
alt_id_column = "alt_id"
situation_id_column = "situation_id"
obs_id_column = "observation_id"
choice_column = "choice"
data = {"x": fake_design[:, 2],
alt_id_column: alternative_ids,
situation_id_column: situation_ids,
obs_id_column: individual_ids,
choice_column: choice_array}
fake_old_df = pd.DataFrame(data)
fake_old_df["intercept"] = 1
# Create a fake specification
fake_spec = OrderedDict()
fake_names = OrderedDict()
fake_spec["intercept"] = [1, 2]
fake_names["intercept"] = ["ASC 1", "ASC 2"]
fake_spec["x"] = [[1, 2, 3]]
fake_names["x"] = ["beta_x"]
# Specify the mixing variable
fake_mixing_vars = ["beta_x"]
# Create a fake version of a mixed logit model object
args = [fake_old_df,
alt_id_column,
situation_id_column,
choice_column,
fake_spec]
kwargs = {"names": fake_names,
"mixing_id_col": obs_id_column,
"mixing_vars": fake_mixing_vars}
mixl_obj = mixed_logit.MixedLogit(*args, **kwargs)
# Set all the necessary attributes for prediction:
# design_3d, coefs, intercepts, shapes, nests, mixing_pos
mixl_obj.design_3d = fake_design_3d
mixl_obj.ind_var_names += ["Sigma X"]
mixl_obj.coefs =\
pd.Series(fake_betas_ext, index=mixl_obj.ind_var_names)
mixl_obj.intercepts = None
mixl_obj.shapes = None
mixl_obj.nests = None
mixl_obj.params = mixl_obj.coefs.copy()
return mixl_obj
def make_nested_model(self):
# Create the betas to be used during the tests
fake_betas = np.array([0.3, -0.6, 0.2])
# Create the fake nest coefficients to be used during the tests
# Note that these are the 'natural' nest coefficients, i.e. the
# inverse of the scale parameters for each nest. They should be bigger
# than or equal to 1.
natural_nest_coefs = np.array([1 - 1e-16, 0.5])
# Create an array of all model parameters
fake_all_params = np.concatenate((natural_nest_coefs,
fake_betas))
# The set up being used is one where there are two choice situations,
# The first having three alternatives, and the second having only two.
# The nest memberships of these alternatives are given below.
fake_rows_to_nests = csr_matrix(np.array([[1, 0],
[1, 0],
[0, 1],
[1, 0],
[0, 1]]))
# Create a sparse matrix that maps the rows of the design matrix to the
# observatins
fake_rows_to_obs = csr_matrix(np.array([[1, 0],
[1, 0],
[1, 0],
[0, 1],
[0, 1]]))
# Create the fake design matrix with columns denoting ASC_1, ASC_2, X
fake_design = np.array([[1, 0, 1],
[0, 1, 2],
[0, 0, 3],
[1, 0, 1.5],
[0, 0, 3.5]])
# Create fake versions of the needed arguments for the MNL constructor
fake_df = pd.DataFrame({"obs_id": [1, 1, 1, 2, 2],
"alt_id": [1, 2, 3, 1, 3],
"choice": [0, 1, 0, 0, 1],
"x": range(5),
"intercept": [1 for i in range(5)]})
# Record the various column names
alt_id_col = "alt_id"
obs_id_col = "obs_id"
choice_col = "choice"
# Store the choice array
choice_array = fake_df[choice_col].values
# Create a sparse matrix that maps the chosen rows of the design
# matrix to the observatins
fake_chosen_rows_to_obs = csr_matrix(np.array([[0, 0],
[1, 0],
[0, 0],
[0, 0],
[0, 1]]))
# Create the index specification and name dictionaryfor the model
fake_specification = OrderedDict()
fake_specification["intercept"] = [1, 2]
fake_specification["x"] = [[1, 2, 3]]
fake_names = OrderedDict()
fake_names["intercept"] = ["ASC 1", "ASC 2"]
fake_names["x"] = ["x (generic coefficient)"]
# Create the nesting specification
fake_nest_spec = OrderedDict()
fake_nest_spec["Nest 1"] = [1, 2]
fake_nest_spec["Nest 2"] = [3]
# Create a nested logit object
args = [fake_df,
alt_id_col,
obs_id_col,
choice_col,
fake_specification]
kwargs = {"names": fake_names,
"nest_spec": fake_nest_spec}
model_obj = nested_logit.NestedLogit(*args, **kwargs)
model_obj.coefs = pd.Series(fake_betas, index=model_obj.ind_var_names)
model_obj.intercepts = None
model_obj.shapes = None
def logit(x):
return np.log(x / (1 - x))
model_obj.nests =\
pd.Series(logit(natural_nest_coefs), index=fake_nest_spec.keys())
model_obj.params =\
pd.concat([model_obj.nests, model_obj.coefs],
axis=0, ignore_index=False)
# Store a ridge parameter
# ridge = 0.5
# Gather the arguments needed for the calc_nested_probs function
# args = [natural_nest_coefs,
# fake_betas,
# model_obj.design,
# fake_rows_to_obs,
# fake_rows_to_nests]
# kwargs = {"return_type": "long_probs"}
# model_obj.prob_array = nlc.calc_nested_probs(*args, **kwargs)
return model_obj
def setUp(self):
"""
Create the real model objects.
"""
self.mnl_model = self.make_mnl_model()
self.asym_model = self.make_asym_model()
self.mixed_model = self.make_mixed_model()
self.nested_model = self.make_nested_model()
return None
def test_calc_log_likes_for_replicates(self):
# Create the keyword arguments needed for the test.
kwargs = {'num_draws': 10, 'seed': 932017}
# Note the objects that are to be tested
model_objects = [self.mnl_model,
self.asym_model,
self.mixed_model,
self.nested_model]
# Iterate over the Asym, MNL, Mixed, and Nested models.
for model_obj in model_objects:
# Create the bootstrap object based on the model object.
boot = bc.Boot(model_obj, model_obj.params.values)
# Create the bootstrap and jackknife replicate attributes.
replicates =\
pd.DataFrame(np.concatenate([model_obj.params.values[None, :],
model_obj.params.values[None, :]],
axis=0))
boot.bootstrap_replicates = replicates
boot.jackknife_replicates = replicates
# Alias the function being tested.
func = boot.calc_log_likes_for_replicates
for replicate_type in ['bootstrap', 'jackknife']:
# Calculate function results using each bootstrap object
kwargs["replicates"] = replicate_type
func_result = func(**kwargs)
# Ensure function results have the expected properties
self.assertIsInstance(func_result, np.ndarray)
self.assertEqual(func_result.ndim, 1)
self.assertEqual(func_result.shape, (replicates.shape[0],))
return None
def test_calc_gradient_norm_for_replicates(self):
# Create the bootstrap object based on the MNL model.
base_array = self.mnl_model.params.values
base_array_2d = base_array[None, :]
boot = bc.Boot(self.mnl_model, base_array)
# Create the bootstrap and jackknife replicate attributes.
replicates = pd.DataFrame(np.concatenate((base_array_2d,
base_array_2d + 1,
base_array_2d - 1),
axis=0))
boot.bootstrap_replicates = replicates
boot.jackknife_replicates = replicates
# Alias the function being tested.
func = boot.calc_gradient_norm_for_replicates
# Perform the desired tests.
for replicate_type in ['bootstrap', 'jackknife']:
func_result = func(replicates=replicate_type)
self.assertIsInstance(func_result, np.ndarray)
self.assertEqual(func_result.shape, (replicates.shape[0],))
self.assertTrue(np.unique(func_result).size == func_result.size)
return None
|
the-stack_0_11153 | __author__ = "Nitin Kumar, Rick Sherman"
__credits__ = "Jeremy Schulman"
import unittest
from nose.plugins.attrib import attr
from jnpr.junos.jxml import NAME, INSERT, remove_namespaces
@attr('unit')
class Test_JXML(unittest.TestCase):
def test_name(self):
op = NAME('test')
self.assertEqual(op['name'], 'test')
def test_insert(self):
op = INSERT('test')
self.assertEqual(op['insert'], 'test')
def test_remove_namespaces(self):
xmldata = \
"""<xsl:stylesheet xmlns:xsl="http://xml.juniper.net/junos">
<xsl:template>
<xsl:attribute name="{myname}">
</xsl:attribute>
</xsl:template>
</xsl:stylesheet>"""
import xml.etree.ElementTree as ET
root = ET.fromstring(xmldata)
test = remove_namespaces(root)
for elem in test.getiterator():
i = elem.tag.find('}')
if i > 0:
i = i + 1
self.assertTrue(i <= 0)
|
the-stack_0_11154 | # -*- coding: utf-8 -*-
import asyncio
import collections
import functools
import json
import time
from typing import List, Optional
from threading import Thread
from vk_api import VkApi
from vk_api.bot_longpoll import VkBotEventType, VkBotLongPoll
from vk_api.execute import VkFunction
from vk_api.upload import VkUpload
from vk_api.utils import get_random_id
API_VERSION = '5.130'
vk_execute = VkFunction(
args=('methods',),
clean_args=('methods',),
code='''
%(methods)s;
return 1;
''')
def threaded(fn):
def wrapper(*args, **kwargs):
Thread(target=fn, args=args, kwargs=kwargs, daemon=True).start()
return wrapper
class VKMessage:
__slots__ = ('id', 'peer_id', 'user_id', 'text', 'payload', 'reply')
def __init__(self, raw: dict, vk: 'VK') -> None:
self.id = raw['id']
self.peer_id = raw['peer_id']
self.user_id = raw['from_id']
self.text = raw['text']
self.payload = json.loads(raw['payload']) if 'payload' in raw else None
self.reply = functools.partial(vk.send, self.peer_id)
class VK:
__slots__ = ('vk', 'logger', 'event_queue', 'msg_queue', 'user_cache', 'group_id')
def __init__(self, token: str, logger) -> None:
self.vk = VkApi(token=token, api_version=API_VERSION)
self.logger = logger
self.event_queue = collections.deque()
self.msg_queue = []
self.user_cache = {}
self.group_id = self.method('groups.getById')[0]['id']
self.init_group_settings()
def method(self, method: str, args: dict = None) -> dict:
return self.vk.method(method, args)
def send(self, peer_id: int, message: str, keyboard=None, attach=None, sticker=None, disable_mentions=True) -> None:
if 4000 < len(message) < 100000 and (not attach) and (not sticker):
for message_part in [message[j:j + 4000] for j in range(0, len(message), 4000)]:
self.msg_queue.append({'peer_id': peer_id, 'message': message_part, 'random_id': get_random_id(), 'disable_mentions': disable_mentions,
'keyboard': keyboard})
else:
self.msg_queue.append({'peer_id': peer_id, 'message': message, 'random_id': get_random_id(), 'disable_mentions': disable_mentions,
'keyboard': keyboard, 'attachment': attach, 'sticker_id': sticker})
def send_multiple(self, peer_ids: List[int], message: str, keyboard=None, disable_mentions=True) -> None:
self.msg_queue.append({'peer_ids': peer_ids, 'message': message, 'random_id': get_random_id(), 'disable_mentions': disable_mentions,
'keyboard': keyboard})
def get_user_link(self, target_id: int, name_case: str = 'nom') -> str:
if target_id not in self.user_cache and target_id != 0:
if target_id < 0:
self.user_cache[target_id] = self.method('groups.getById', {'group_id': -target_id})[0]
else:
self.user_cache[target_id] = self.method('users.get', {'user_ids': target_id, 'name_case': name_case})[0]
if target_id < 0:
return ''.join(['[id', str(target_id), '|', self.user_cache[target_id]['first_name'], ']'])
elif target_id == 0:
return '@id0'
else:
self.user_cache[target_id] = self.method('users.get', {'user_ids': target_id, 'name_case': name_case})[0]
return f"[id{target_id}|{self.user_cache[target_id]['first_name']}]"
def get_user_links(self, target_ids: List[int]) -> dict:
cached = True
for i in target_ids:
if i not in self.user_cache:
cached = False
break
if not cached:
for i in self.method('users.get', {'user_ids': ','.join(list(map(str, target_ids)))}):
self.user_cache[i['id']] = i
return {i: f"[id{i}|{self.user_cache[i]['first_name']}]" for i in target_ids}
def get_target_id(self, s: str) -> Optional[int]:
r = s.replace('https://', '').replace('vk.com/', '').replace('@id', '').replace('@', '').replace('[', '').replace(']', '')
if '|' in r:
r = r.split('|')[0]
if not r.isdecimal():
r = self.method('utils.resolveScreenName', {'screen_name': r.replace('-', 'club')})
if not r:
return
if r['type'] == 'user':
r = r['object_id']
elif r['type'] == 'group':
r = -r['object_id']
return int(r)
def is_chat_member(self, peer_id: int, user_id: int) -> bool:
members = self.method('messages.getConversationMembers', {'peer_id': peer_id})['items']
for i in members:
if i['member_id'] == user_id:
return True
def is_chat_admin(self, peer_id: int, user_id: int, check_if_owner: bool = False) -> bool:
members = self.method('messages.getConversationMembers', {'peer_id': peer_id})['items']
for i in members:
if i['member_id'] == user_id and 'is_admin' in i and i['is_admin'] and ((not check_if_owner) or ('is_owner' in i and i['is_owner'])):
return True
def get_chat_owner(self, peer_id: int) -> Optional[int]:
members = self.method('messages.getConversationMembers', {'peer_id': peer_id})['items']
for i in members:
if 'is_owner' in i and i['is_owner']:
return i['member_id']
def get_upload(self) -> VkUpload:
return VkUpload(self.vk)
def init_group_settings(self) -> None:
self.method('groups.setSettings', {
'group_id': self.group_id,
'messages': 1,
'bots_capabilities': 1,
'bots_start_button': 1,
'bots_add_to_chat': 1,
})
self.method('groups.setLongPollSettings', {
'group_id': self.group_id,
'enabled': 1,
'api_version': API_VERSION,
'message_new': 1,
})
async def messages_sender(self) -> None:
while True:
queue = self.msg_queue[:25]
if queue:
self.msg_queue = self.msg_queue[25:]
try:
vk_execute(self.vk, ''.join(('API.messages.send(' + json.dumps(i, ensure_ascii=False, separators=(',', ':')) + ');') for i in queue))
except Exception as ex:
self.logger.warning('Произошла ошибка при отправке сообщений', exc_info=ex)
await asyncio.sleep(0.05)
@threaded
def event_handler(self) -> None:
convs = self.method('messages.getConversations', {'count': 200, 'filter': 'unanswered'})['items']
for i in convs:
self.event_queue.append(VKMessage(i['last_message'], self))
lp = VkBotLongPoll(self.vk, self.group_id)
while True:
try:
for event in lp.check():
if event.type == VkBotEventType.MESSAGE_NEW:
self.event_queue.append(VKMessage(event.raw['object']['message'], self))
else:
self.event_queue.append(event)
except Exception as ex:
self.logger.warning('Произошла ошибка в LongPoll', exc_info=ex)
time.sleep(3)
|
the-stack_0_11156 | from tensorflow.keras.applications.vgg16 import VGG16, preprocess_input
import tensorflow as tf
from tensorflow.keras import layers, regularizers
def rpn(feature_map, anchors_per_location=9):
shared = layers.Conv2D(512, (3, 3), padding='same', activation='relu', name='rpn_conv_shared')(feature_map)
# Anchor class (foreground, background)
# [batch, height, width, anchors_per_location * 2]
x = layers.Conv2D(2 * anchors_per_location, (1, 1), padding='valid',
activation='linear', name='rpn_class_raw')(shared)
# Reshape to [batch, anchors, 2]
x = layers.Reshape((-1, 2))(x)
rpn_class = layers.Activation('softmax', name='rpn_class_probs')(x)
# Bounding box refinement
# [batch, height, width, anchors_per_location * (x, y, log(w), log(h))]
x = layers.Conv2D(4 * anchors_per_location, (1, 1), padding='valid',
activation='linear', name='rpn_bbox_pred')(shared)
# Reshape to [batch, anchors, 4]
rpn_bbox = layers.Reshape((-1, 4))(x)
return rpn_class, rpn_bbox
def get_anchors():
anchors=[]
scales=(8,16,32)
ratios=(0.5,1,2)
# for
def build_model():
inputs = layers.Input(shape=(None, None, 3)) # default shape is 224*224*3
x = preprocess_input(inputs)
backbone = VGG16(weights='imagenet', include_top=False)
feature_map = backbone(x)
rpn_class, rpn_bbox = rpn(feature_map)
anchors=get_anchors()
|
the-stack_0_11160 | import unittest
from bbscript.stdlib import cmd_var, cmd_doc
from bbscript.errors import InvalidOperation
class TestVariables(unittest.TestCase):
def test_var_get(self):
doc = {"docname": "testdoc"}
ctx = {"$test_var": "test value", "$doc": doc}
self.assertEqual(cmd_var(ctx, "test_var"), "test value")
self.assertEqual(cmd_var(ctx, "doc"), doc)
self.assertEqual(cmd_var(ctx, "doc", "docname"), doc.get("docname"))
def test_doc(self):
doc = {"field1": "value1", "field2": True, "field3": 123}
meta = {
"fields": {
"field1": {
"type": "string"
},
"field2": {
"type": "boolean"
},
"field3": {
"type": "int"
}
}
}
ctx = {
"$doc": doc
}
self.assertEqual(cmd_doc(ctx, "doc", meta), doc)
|
the-stack_0_11161 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Classes for different algorithms of reduction and broadcasting."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import enum
import six
from tensorflow.python.client import device_lib
from tensorflow.python.distribute import cross_device_utils
from tensorflow.python.distribute import device_util
from tensorflow.python.distribute import reduce_util
from tensorflow.python.distribute import values as value_lib
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util.tf_export import tf_export
from tensorflow.tools.docs import doc_controls
def check_destinations(destinations):
"""Checks whether `destinations` is not empty.
Args:
destinations: a `DistributedValues`, variable, or string object.
Returns:
Boolean which is True if `destinations` is not empty.
"""
# Calling bool() on a ResourceVariable is not allowed.
if isinstance(destinations, resource_variable_ops.ResourceVariable):
return bool(destinations.device)
return bool(destinations)
def validate_destinations(destinations):
if not isinstance(destinations,
(value_lib.DistributedValues,
resource_variable_ops.ResourceVariable,
value_lib.AggregatingVariable,
six.string_types,
value_lib.TPUMirroredVariable,
# LogicalDeviceSpec is only used internally, e.g. as a
# broadcast destination, never supplied by a user.
value_lib.LogicalDeviceSpec)):
raise ValueError("destinations must be one of a `DistributedValues` object,"
" a tf.Variable object, or a device string.")
if not check_destinations(destinations):
raise ValueError("destinations can not be empty")
def reduce_non_distributed_value(reduce_op, device_map, value, destinations):
"""Reduce a non-DistributedValue `value` to `destinations`."""
if isinstance(value, value_lib.DistributedValues):
raise ValueError("You are passing a `DistributedValue` to "
"`reduce_non_distributed_value`, which is not allowed.")
# If the same value is present on all replicas then the PerReplica value will
# be a single value. We also handle the case when `value` is a single value
# and equal to 0.
if value == 0:
return 0
# If there is only a single value and the reduce op is MEAN,
# that value should be on all destinations.
if reduce_op == reduce_util.ReduceOp.MEAN:
return value
validate_destinations(destinations)
# We do not support a reduce op of SUM if the value is the same across
# all replicas. We call this as part of assign functions for MirroredVariables
# and summing up identical values across replicas is not clearly defined.
if device_map.num_replicas_in_graph != 1:
raise ValueError("A non-DistributedValues value %s cannot be reduced with "
"the given reduce op %s." % (value, reduce_op))
return simple_broadcast(value, destinations)
def _make_tensor_into_per_replica(input_tensor):
"""Converts a single tensor into a PerReplica object."""
if isinstance(input_tensor, (tuple, list)):
raise ValueError("Cannot convert `input_tensor` to a `PerReplica` object, "
"got %r but expected a object that is not a tuple or list."
% (input_tensor,))
if isinstance(input_tensor, value_lib.PerReplica):
return input_tensor
try:
device = input_tensor.device
except AttributeError:
raise ValueError("Cannot convert `input_tensor` to a `PerReplica` object "
"because it doesn't have device set.")
device_map = value_lib.SingleDeviceMap(device)
return value_lib.PerReplica(device_map, (input_tensor,))
def _normalize_value_destination_pairs(value_destination_pairs):
"""Converts each tensor into a PerReplica object in the input list."""
result = []
value_destination_pairs = list(value_destination_pairs)
if not isinstance(value_destination_pairs, (list, tuple)):
raise ValueError("`value_destination_pairs` should be a list or tuple")
for pair in value_destination_pairs:
if not isinstance(pair, tuple):
raise ValueError(
"Each element of `value_destination_pairs` should be a tuple.")
if len(pair) != 2:
raise ValueError("Each element of `value_destination_pairs` should be a "
"tuple of size 2.")
per_replica = _make_tensor_into_per_replica(pair[0])
result.append((per_replica, pair[1]))
return result
def _validate_value_destination_pairs(value_destination_pairs):
# TODO(yuefengz): raise exceptions instead of returning False.
# pylint: disable=g-missing-docstring
if not value_destination_pairs: return False
if not isinstance(value_destination_pairs, (list, tuple)): return False
if not all(isinstance(pair, tuple) for pair in value_destination_pairs):
return False
if not all(isinstance(v[0], value_lib.PerReplica)
for v in value_destination_pairs):
return False
return True
# TODO(yuefengz): consider calling this function in the caller of
# CrossDeviceOps.
def get_devices_from(destinations):
if isinstance(destinations, value_lib.DistributedValues):
return destinations.devices
elif isinstance(destinations, value_lib.LogicalDeviceSpec):
return destinations.device_map.logical_to_actual_devices(
destinations.logical_device)
elif isinstance(destinations, six.string_types):
return (device_util.resolve(destinations),)
return (destinations.device,)
def get_device_map_from(destinations):
if isinstance(destinations, (value_lib.DistributedValues,
value_lib.LogicalDeviceSpec)):
return destinations.device_map, destinations.logical_device
if isinstance(destinations, six.string_types):
device = device_util.resolve(destinations)
else:
device = destinations.device
return value_lib.SingleDeviceMap(device), 0
def _devices_match(left, right):
return set(get_devices_from(left)) == set(get_devices_from(right))
def _all_devices_match(value_destination_pairs):
if not all(_devices_match(v, d) for v, d in value_destination_pairs):
return False
if not all(_devices_match(v, value_destination_pairs[0][0])
for v, _ in value_destination_pairs[1:]):
return False
return True
def simple_broadcast(value, destinations, always_mirrored=False):
"""Broadcast `value` to `destinations` using simple copies."""
device_map, logical_device = get_device_map_from(destinations)
devices = device_map.logical_to_actual_devices(logical_device)
if len(devices) == 1 and not always_mirrored:
return cross_device_utils.copy_tensor_or_indexed_slices_to_device(
value, devices[0])
else:
value_updates = []
for d in devices:
value_updates.append(
cross_device_utils.copy_tensor_or_indexed_slices_to_device(
value, d))
return value_lib.Mirrored(device_map, value_updates, logical_device)
def _simple_reduce(per_replica_value, reduce_to_device, accumulation_fn,
reduce_op):
# pylint: disable=g-missing-docstring
all_values = per_replica_value.values
if not all_values:
raise ValueError("`per_replica_value` must be non-empty")
count = len(all_values)
with ops.device(reduce_to_device):
with context.device_policy(context.DEVICE_PLACEMENT_SILENT):
reduced = cross_device_utils.aggregate_tensors_or_indexed_slices(
all_values, accumulation_fn)
if reduce_op == reduce_util.ReduceOp.MEAN:
reduced = cross_device_utils.divide_by_n_tensors_or_indexed_slices(
reduced, count)
elif reduce_op != reduce_util.ReduceOp.SUM:
raise ValueError("`reduce_op` must be Reduce.SUM or Reduce.MEAN.")
return reduced
@tf_export("distribute.CrossDeviceOps")
class CrossDeviceOps(object):
"""Base class for cross-device reduction and broadcasting algorithms."""
def __init__(self):
pass
def reduce(self, reduce_op, per_replica_value, destinations):
"""Reduce `per_replica_value` to `destinations`.
It runs the reduction operation defined by `reduce_op` and put the
result on `destinations`.
Args:
reduce_op: Indicates how per_replica_value will be reduced. Accepted
values are `tf.distribute.ReduceOp.SUM`, `tf.distribute.ReduceOp.MEAN`.
per_replica_value: a PerReplica object or a tensor with device set.
destinations: the reduction destinations.
Returns:
a Mirrored object.
Raises:
ValueError: if per_replica_value can't be converted to a PerReplica
object.
"""
if not isinstance(per_replica_value, value_lib.PerReplica):
per_replica_value = _make_tensor_into_per_replica(per_replica_value)
validate_destinations(destinations)
return self.reduce_implementation(reduce_op, per_replica_value,
destinations)
def batch_reduce(self, reduce_op, value_destination_pairs):
"""Reduce PerReplica objects in a batch.
Reduce each first element in `value_destination_pairs` to each second
element which indicates the destinations.
Args:
reduce_op: Indicates how per_replica_value will be reduced. Accepted
values are `tf.distribute.ReduceOp.SUM`, `tf.distribute.ReduceOp.MEAN`.
value_destination_pairs: a list or a tuple of tuples of PerReplica objects
(or tensors with device set if there is one device) and destinations.
Returns:
a list of Mirrored objects.
Raises:
ValueError: if `value_destination_pairs` is not a list or a tuple of
tuples of PerReplica objects and destinations
"""
# TODO(yuefengz): if destinations are different, split into several
# `_batch_reduce` invocations.
if not _validate_value_destination_pairs(value_destination_pairs):
# If the first element of each pair is a tensor, we try to turn it into a
# PerReplica object.
value_destination_pairs = _normalize_value_destination_pairs(
value_destination_pairs)
for _, d in value_destination_pairs:
validate_destinations(d)
return self.batch_reduce_implementation(reduce_op, value_destination_pairs)
def broadcast(self, tensor, destinations):
"""Broadcast the `tensor` to destinations.
Args:
tensor: the tensor to broadcast.
destinations: the broadcast destinations.
Returns:
a Mirrored object.
"""
validate_destinations(destinations)
return self.broadcast_implementation(tensor, destinations)
@doc_controls.for_subclass_implementers
def reduce_implementation(self, reduce_op, per_replica_value, destinations):
"""The implementation of reduce of `per_replica_value` to `destinations`.
It runs the reduction operation defined by `reduce_op` and put the
result on `destinations`.
Args:
reduce_op: Indicates how per_replica_value will be reduced. Accepted
values are `tf.distribute.ReduceOp.SUM`, `tf.distribute.ReduceOp.MEAN`.
per_replica_value: a PerReplica object or a tensor with device set.
destinations: the reduction destinations.
Returns:
a Mirrored object.
Raises:
ValueError: if per_replica_value can't be converted to a PerReplica
object.
"""
raise NotImplementedError(
"_reduce method must be implemented in descendants.")
@doc_controls.for_subclass_implementers
def batch_reduce_implementation(self, reduce_op, value_destination_pairs):
"""Implementation of reduce PerReplica objects in a batch.
Reduce each first element in `value_destination_pairs` to each second
element which indicates the destinations.
Args:
reduce_op: Indicates how per_replica_value will be reduced. Accepted
values are `tf.distribute.ReduceOp.SUM`, `tf.distribute.ReduceOp.MEAN`.
value_destination_pairs: a list or a tuple of tuples of PerReplica objects
(or tensors with device set if there is one device) and destinations.
Returns:
a list of Mirrored objects.
Raises:
ValueError: if `value_destination_pairs` is not a list or a tuple of
tuples of PerReplica objects and destinations
"""
raise NotImplementedError(
"_batch_reduce method must be implemented in descendants.")
@doc_controls.for_subclass_implementers
def broadcast_implementation(self, tensor, destinations):
"""Implementation of broadcast the `tensor` to destinations.
Args:
tensor: the tensor to broadcast.
destinations: the broadcast destinations.
Returns:
a Mirrored object.
"""
return simple_broadcast(tensor, destinations, always_mirrored=True)
@tf_export("distribute.ReductionToOneDevice")
class ReductionToOneDevice(CrossDeviceOps):
"""Always do reduction to one device first and then do broadcasting.
Batch reduction is done by reduction on each element one by one.
"""
def __init__(self, reduce_to_device=None, accumulation_fn=None):
"""Constructor.
Args:
reduce_to_device: the intermediate device to reduce to. If None, reduce
to the first device in `destinations` of the reduce() method.
accumulation_fn: a function that does accumulation. If None, then
`tf.math.add_n` is used.
"""
self.reduce_to_device = reduce_to_device
self.accumulation_fn = accumulation_fn or math_ops.add_n
super(ReductionToOneDevice, self).__init__()
def reduce_implementation(self, reduce_op, per_replica_value, destinations):
if check_destinations(destinations):
devices = get_devices_from(destinations)
else:
devices = get_devices_from(per_replica_value)
reduce_to_device = self.reduce_to_device or devices[0]
logging.log_first_n(
logging.INFO,
"Reduce to %s then broadcast to %r." % (reduce_to_device, devices), 10)
reduced = _simple_reduce(per_replica_value, reduce_to_device,
self.accumulation_fn, reduce_op)
return self.broadcast(reduced, destinations)
def batch_reduce_implementation(self, reduce_op, value_destination_pairs):
return [
self.reduce_implementation(reduce_op, t, destinations=v)
for t, v in value_destination_pairs
]
def _group_value_by_device(per_replica_values):
"""Group values into sublists by their devices.
This grouping is needed to call the all-reduce library because it expects a
list of the following form:
[[(grad0_gpu0, v0_gpu0), (grad1_gpu0, v1_gpu0), (grad2_gpu0, v2_gpu0) ...],
[(grad0_gpu1, v0_gpu1), (grad1_gpu1, v1_gpu1), (grad2_gpu1, v2_gpu1) ...],
[(grad0_gpu2, v0_gpu2), (grad1_gpu0, v1_gpu2), (grad2_gpu0, v2_gpu2) ...],
...
]
Args:
per_replica_values: a list of PerReplica obejcts.
Returns:
a list of lists, each sublist has components for its corresponding device of
PerReplica objects, paired with a None.
"""
destinations = per_replica_values[0].devices
grouped = [[] for _ in range(len(destinations))]
for per_replica_value in per_replica_values:
# pylint: disable=protected-access
for i, v in enumerate(per_replica_value.values):
assert per_replica_value.devices == destinations
grouped[i].append((v, None))
return grouped
def _ungroup_and_make_mirrored(grouped_reduced,
destinations,
reduce_op,
num_between_graph_workers=1):
"""Ungroup results from all-reduce and make Mirrored objects.
Each all-reduce result will be divided by the number of destinations before
Mirrored objects are created if reduce_op is "mean".
Args:
grouped_reduced: a list of lists, each sublist has components for each
device, paired with a None. It is the result from
cross_device_utils.aggregate_gradients_using*.
destinations: a value to colocate the result with.
reduce_op: Indicates how values will be aggregated. Accepted values
are `tf.distribute.ReduceOp.SUM`, `tf.distribute.ReduceOp.MEAN`.
num_between_graph_workers: number of workers in the between-graph
replication.
Returns:
a list of Mirrored objects.
"""
device_map, logical_device = get_device_map_from(destinations)
num_replicas = device_map.num_replicas_in_graph * num_between_graph_workers
index = [[] for _ in range(len(grouped_reduced[0]))]
for per_replica_reduced in grouped_reduced:
for i, (v, _) in enumerate(per_replica_reduced):
if reduce_op == reduce_util.ReduceOp.MEAN:
index[i].append(v / num_replicas)
else:
index[i].append(v)
return [value_lib.Mirrored(device_map, v, logical_device) for v in index]
class _ConcatAndSplitPacker(object):
"""Concatenate and split tensors for reduction."""
def __init__(self, num_packs=1):
"""Initialize the _ConcatAndSplitPacker object.
Args:
num_packs: specifies the number of split packs that will be
formed.
Raises:
ValueError: if num_packs is not greater than 0.
"""
if num_packs <= 0:
raise ValueError("num_packs must be greater than zero.")
self.num_packs = num_packs
def pack(self, grouped_grads_and_vars):
"""Pack tensors."""
self.grouped_grads_and_vars = grouped_grads_and_vars
self.all_device_shapes = []
self.all_device_sizes = []
device_grad_packs = []
for device_grads_and_vars in grouped_grads_and_vars:
with ops.colocate_with(device_grads_and_vars[0][0]):
# Flatten all the grads.
flat_grads = [
array_ops.reshape(g, [-1]) for g, _ in device_grads_and_vars
]
# Remember the original shape of all the grads.
device_shapes = [array_ops.shape(g) for g, _ in device_grads_and_vars]
# Remember the original sizes of all the grads.
device_sizes = [array_ops.size(g) for g, _ in device_grads_and_vars]
# Concat all the flat grads into a big flat tensor.
concat_grads = array_ops.concat(flat_grads, 0)
# Split the big tensor into num_splits packs. In cases where the
# total size is not divisible num_splits, the last pack gets
# more elements.
# TODO(zhengxq): it is also possible to optimize away all the concat
# as well.
num_splits = self.num_packs
# The array_ops.size function will sometimes remove static shapes. So if
# all gradient shapes are defined, we use another method to get the
# total size.
# TODO(yuefengz): move this logic to array_ops.size.
if all(g.shape.is_fully_defined() for g, _ in device_grads_and_vars):
total_grad_size = sum(
[g.shape.num_elements() for g, _ in device_grads_and_vars])
else:
total_grad_size = array_ops.size(concat_grads)
split_size = total_grad_size // num_splits
split_size_last = total_grad_size - split_size * (num_splits - 1)
split_sizes = [split_size] * (num_splits - 1) + [split_size_last]
grad_packs = array_ops.split(concat_grads, split_sizes)
# Ready to aggregate the repacked gradients, with fake variables.
# TODO(zhengxq): It is hacky to have to use fake variables.
# We should remove the need for variables in
# aggregate_gradients_using*.
device_grad_packs.append(zip(grad_packs, [None] * num_splits))
self.all_device_shapes.append(device_shapes)
self.all_device_sizes.append(device_sizes)
return device_grad_packs
def unpack(self, summed_device_grad_packs):
"""Reverse the pack."""
aggregated_device_grads = []
for (summed_device_grad_packs,
device_grads_and_vars, device_shapes, device_sizes) in zip(
summed_device_grad_packs, self.grouped_grads_and_vars,
self.all_device_shapes, self.all_device_sizes):
# pylint: enable=line-too-long
# Reverse the packing operations in the previous steps. Form the
# summed gradients back into their original shapes.
with ops.colocate_with(summed_device_grad_packs[0][0]):
# Form a list of the summed grad packs.
device_grad_packs = [g for g, _ in summed_device_grad_packs]
# Concat them back into a big flat tensor.
device_grads_concat = array_ops.concat(device_grad_packs, 0)
# Split the tensors back into their original sizes.
grads_with_sizes = array_ops.split(device_grads_concat, device_sizes)
# Reshape the tensors back into their original shapes.
grads_with_shapes = [
array_ops.reshape(grad, shape)
for shape, grad in zip(device_shapes, grads_with_sizes)
]
# Form the list with the original list of variables.
summed_device_grads = [
(g, v) for g, (_, v) in zip(grads_with_shapes,
device_grads_and_vars)
]
aggregated_device_grads.append(summed_device_grads)
return aggregated_device_grads
class _AggregateSmallTensorPacker(object):
"""Concatenate small gradient tensors together for reduction."""
def __init__(self,
agg_small_grads_max_bytes=1048576,
agg_small_grads_max_group=16):
"""Initialize the _AggregateSmallTensorPacker object.
Args:
agg_small_grads_max_bytes: largest tensor eligible for aggregation,
in number of bytes.
agg_small_grads_max_group: largest permitted aggregation of small
tensors.
Raises:
ValueError: if `agg_small_grads_max_bytes` or `agg_small_grads_max_group`
is not greater than 0.
"""
if agg_small_grads_max_bytes <= 0 or agg_small_grads_max_group <= 0:
raise ValueError("agg_small_grads_max_bytes and agg_small_grads_max_group"
" should both be greater than zero.")
self.agg_small_grads_max_bytes = agg_small_grads_max_bytes
self.agg_small_grads_max_group = agg_small_grads_max_group
def pack(self, grouped_grads_and_vars):
"""Aggregate small tensors."""
if (self.agg_small_grads_max_bytes > 0 and
self.agg_small_grads_max_group > 0):
device_grads, self.packing = cross_device_utils.pack_small_tensors(
grouped_grads_and_vars,
max_bytes=self.agg_small_grads_max_bytes,
max_group=self.agg_small_grads_max_group)
return device_grads
def unpack(self, summed_device_grad_packs):
"""Reverse the aggregation process."""
return cross_device_utils.unpack_small_tensors(summed_device_grad_packs,
self.packing)
def _pack_tensors(device_grads,
num_packs=0,
agg_small_grads_max_bytes=0,
agg_small_grads_max_group=0):
"""Pack tensors if specified."""
if num_packs > 0:
tensor_packer = _ConcatAndSplitPacker(num_packs)
device_grad_packs = tensor_packer.pack(device_grads)
elif agg_small_grads_max_bytes > 0 and agg_small_grads_max_group > 0:
tensor_packer = _AggregateSmallTensorPacker(agg_small_grads_max_bytes,
agg_small_grads_max_group)
device_grad_packs = tensor_packer.pack(device_grads)
else:
tensor_packer = None
device_grad_packs = device_grads
return device_grad_packs, tensor_packer
def _unpack_tensors(reduced, tensor_packer=None):
"""Unpack tensors if they are packed before all-reduce."""
if tensor_packer:
return tensor_packer.unpack(reduced)
return reduced
class AllReduceCrossDeviceOps(CrossDeviceOps):
"""Reduction using all-reduce."""
def __init__(self,
all_reduce_alg="nccl",
num_packs=1,
agg_small_grads_max_bytes=0,
agg_small_grads_max_group=10):
"""All-reduce implementation of CrossDeviceOps.
Before performing all-reduce, tensors will be repacked or aggregated for
more efficient cross-device transportation:
1) If `num_packs` is non-zero, pack values into
`num_packs` splits.
2) Otherwise, if `agg_small_grads_max_bytes` > 0 and
`agg_small_grads_max_group` > 0, aggregate values smaller than
`agg_small_grads_max_bytes` into groups with at most
`agg_small_grads_max_group` values.
3) Otherwise, no repacking or grouping will happen.
Args:
all_reduce_alg: the all-reduce algorithm to use, currently only "nccl" or
"hierarchical_copy" are supported.
num_packs: see above.
agg_small_grads_max_bytes: see above.
agg_small_grads_max_group: see above.
"""
self._all_reduce_alg = all_reduce_alg
self._num_packs = num_packs
self._agg_small_grads_max_bytes = agg_small_grads_max_bytes
self._agg_small_grads_max_group = agg_small_grads_max_group
self._simple_cross_replica_ops = ReductionToOneDevice()
super(AllReduceCrossDeviceOps, self).__init__()
def reduce_implementation(self, reduce_op, per_replica_value, destinations):
if _devices_match(per_replica_value, destinations):
return self._batch_all_reduce(reduce_op, [per_replica_value])[0]
else:
return self._simple_cross_replica_ops.reduce(reduce_op, per_replica_value,
destinations)
def batch_reduce_implementation(self, reduce_op, value_destination_pairs):
all_devices_match = _all_devices_match(value_destination_pairs)
contains_indexed_slices = cross_device_utils.contains_indexed_slices(
value_destination_pairs)
if (all_devices_match and not context.executing_eagerly()
and not contains_indexed_slices):
return self._batch_all_reduce(reduce_op,
[v[0] for v in value_destination_pairs])
else:
if not all_devices_match:
logging.log_first_n(logging.WARN,
"Efficient batch_reduce is not supported if "
"destinations are different.",
10)
return [
self.reduce_implementation(reduce_op, t, destinations=v)
for t, v in value_destination_pairs
]
def _batch_all_reduce(self, reduce_op, per_replica_values):
"""All-reduce algorithm in a batch."""
dense_values, dense_indices, sparse_values, sparse_indices = (
cross_device_utils.split_by_sparsity(per_replica_values))
if dense_values:
dense_results = self._do_batch_all_reduce(reduce_op, dense_values)
else:
dense_results = []
if sparse_values:
sparse_results = self._do_batch_all_reduce_sparse(reduce_op,
sparse_values)
else:
sparse_results = []
return cross_device_utils.stitch_values(((dense_results, dense_indices),
(sparse_results, sparse_indices)))
def _do_batch_all_reduce(self, reduce_op, dense_values):
"""Run batch all-reduces."""
logging.log_first_n(
logging.INFO, "batch_all_reduce: %d all-reduces with algorithm = %s,"
"num_packs = %d, agg_small_grads_max_bytes = %d and "
"agg_small_grads_max_group = %d" %
(len(dense_values), self._all_reduce_alg, self._num_packs,
self._agg_small_grads_max_bytes, self._agg_small_grads_max_group), 10)
destinations = dense_values[0].devices
grouped = _group_value_by_device(dense_values)
device_grad_packs, tensor_packer = _pack_tensors(
grouped, self._num_packs, self._agg_small_grads_max_bytes,
self._agg_small_grads_max_group)
# The actual aggregation of the repacked gradients. Note that they are
# sharded among different aggregation trees. So it is important to strike
# the balance on num_splits.
if self._all_reduce_alg == "nccl":
# TODO(yuefengz): merge this into the all-reduce library.
reduced = cross_device_utils.aggregate_gradients_using_nccl(
device_grad_packs)
else:
# TODO(yuefengz): check that gpu ids in `destinations` are in ascending
# order.
reduced = (
cross_device_utils.aggregate_gradients_using_hierarchical_copy(
destinations, device_grad_packs))
reduced = _unpack_tensors(reduced, tensor_packer)
return _ungroup_and_make_mirrored(reduced, dense_values[0], reduce_op)
def _do_batch_all_reduce_sparse(self, reduce_op, sparse_values):
"""Run batch all-reduce for sparse values."""
logging.log_first_n(
logging.WARN,
"Efficient allreduce is not supported for %d IndexedSlices" %
len(sparse_values), 10)
# Use `sparse_values` as destinations to do all-reduces. It is effectively
# an allgather under the hood but not an efficient one.
return self._simple_cross_replica_ops.batch_reduce(
reduce_op, zip(sparse_values, sparse_values))
# For compatibility with code using the old name of `AllReduceCrossDeviceOps`.
AllReduceCrossTowerOps = AllReduceCrossDeviceOps
AllReduceSpecTuple = collections.namedtuple("AllReduceSpecTuple",
"alg shards limit")
@tf_export("distribute.NcclAllReduce")
class NcclAllReduce(AllReduceCrossDeviceOps):
"""Reduction using NCCL all-reduce."""
def __init__(self, num_packs=1):
"""NCCL all-reduce implementation of CrossDeviceOps.
Before performing all-reduce, tensors will be repacked or aggregated for
more efficient cross-device transportation.
Args:
num_packs: values will be packed in this many splits. `num_packs` should
be greater than 0.
"""
assert num_packs > 0, (
"NCLL all-reduce requires num_packs > 0, but {} is specified".format(
num_packs))
super(NcclAllReduce, self).__init__(
all_reduce_alg="nccl", num_packs=num_packs)
@tf_export("distribute.HierarchicalCopyAllReduce")
class HierarchicalCopyAllReduce(AllReduceCrossDeviceOps):
"""Reduction using hierarchical copy all-reduce.
This is a good reduction for configurations like Nvidia DGX-1.
"""
def __init__(self, num_packs=1):
"""Hierarchical copy all-reduce implementation of CrossDeviceOps.
Before performing all-reduce, tensors will be repacked or aggregated for
more efficient cross-device transportation.
Args:
num_packs: values will be packed in this many splits. `num_packs` should
be greater than 0.
"""
super(HierarchicalCopyAllReduce, self).__init__(
all_reduce_alg="hierarchical_copy",
num_packs=num_packs)
class MultiWorkerAllReduce(AllReduceCrossDeviceOps):
"""All-reduce algorithms for distributed TensorFlow."""
def __init__(self,
worker_devices,
num_gpus_per_worker,
all_reduce_spec=("pscpu/pscpu", 2, -1),
num_packs=0,
agg_small_grads_max_bytes=0,
agg_small_grads_max_group=10):
"""Initialize the all-reduce algorithm.
Args:
worker_devices: a list of device strings for workers participating in
all-reduce.
num_gpus_per_worker: number of GPU devices per worker.
all_reduce_spec: a tuple or a named tuple or a list of tuples specifying
the all-reduce algorithm.
1. The first element of a tuple is the name of the all-reduce algorithm.
Valid algorithm names are: "nccl", "nccl/xring", "nccl/rechd",
"nccl/pscpu", "xring", "pscpu", "psgpu", "pscpu/pscpu". Algorithms with
a "/" are hierarchical, so two all-reduces are executed, the first one
aggregates tensors within a worker and the second aggregates across
workers.
2. The second element of a tuple is the number of shards when doing
all-reduce. Let's say its values is M, each tensor after packing will be
split into M shards and then M parallel all-reduces would be performed
before finally they are concatenated backed into a complete tensor.
3. The third element is the maximum size of tensors that will be
applicable for the algorithm specified by the first element. For
example, if all_reduce_spec=[("nccl", 2, 1024), ("pscpu/pscpu", 2, -1)],
tensors with size not larger than 1024 bytes will be applied a 2-shard
"nccl" all-reduce and other tensors will be applied a 2-shard
"pscpu/pscpu" algorithm. The third elements should be in increasing
order across tuples and end with -1 which indicates infinity.
num_packs: see AllReduceCrossDeviceOps.
agg_small_grads_max_bytes: see AllReduceCrossDeviceOps.
agg_small_grads_max_group: see AllReduceCrossDeviceOps.
"""
self._worker_devices = worker_devices
self._num_gpus_per_worker = num_gpus_per_worker
super(MultiWorkerAllReduce, self).__init__(
num_packs=num_packs,
agg_small_grads_max_bytes=agg_small_grads_max_bytes,
agg_small_grads_max_group=agg_small_grads_max_group)
def validate_and_complete_spec(spec):
"""Validate and complete the all-reduce spec."""
# TODO(yuefengz): support namedtuple.
if not isinstance(spec, tuple):
raise ValueError(
"A tuple is expected for all-reduce spec: %r" % all_reduce_spec)
if not spec or len(spec) > 3:
raise ValueError(
"Too many elements in the all-reduce spec tuple: %r" % spec)
if len(spec) == 1:
return AllReduceSpecTuple(spec[0], 1, -1)
elif len(spec) == 2:
return AllReduceSpecTuple(spec[0], spec[1], -1)
else:
return AllReduceSpecTuple(*spec)
self._all_reduce_spec = []
if isinstance(all_reduce_spec, six.string_types):
self._all_reduce_spec.append(AllReduceSpecTuple(all_reduce_spec, 1, -1))
elif isinstance(all_reduce_spec, tuple):
self._all_reduce_spec.append(validate_and_complete_spec(all_reduce_spec))
elif isinstance(all_reduce_spec, list):
self._all_reduce_spec = [
validate_and_complete_spec(spec) for spec in all_reduce_spec
]
def _batch_all_reduce(self, reduce_op, per_replica_values):
"""All-reduce algorithm in a batch."""
logging.log_first_n(
logging.INFO,
"Distributed batch_all_reduce: %d all-reduces with "
"allreduce_spec = %r, num_packs = %d, agg_small_grads_max_bytes = %d, "
"and agg_small_grads_max_group = %d" %
(len(per_replica_values), self._all_reduce_spec, self._num_packs,
self._agg_small_grads_max_bytes, self._agg_small_grads_max_group), 10)
device_grads = _group_value_by_device(per_replica_values)
# The all-reduce library requires fully defined shapes.
# TODO(yuefengz): when tensor sharding is not needed, static shapes are not
# required as well.
for device_grad in device_grads:
for grad, _ in device_grad:
if not grad.shape.is_fully_defined():
raise ValueError("Shape is unknown for node %r" % grad)
remaining_grads = device_grads
aggregated_grads = []
for spec_tuple in self._all_reduce_spec:
if spec_tuple.limit < 0:
this_grads = remaining_grads
remaining_grads = []
else:
(this_grads, remaining_grads) = cross_device_utils.split_grads_by_size(
spec_tuple.limit, remaining_grads)
if this_grads:
device_grad_packs, tensor_packer = _pack_tensors(
this_grads, self._num_packs, self._agg_small_grads_max_bytes,
self._agg_small_grads_max_group)
range_agg_grads = cross_device_utils.sum_gradients_all_reduce(
self._worker_devices, device_grad_packs, len(self._worker_devices),
spec_tuple.alg, spec_tuple.shards, range(self._num_gpus_per_worker))
range_agg_grads = _unpack_tensors(range_agg_grads, tensor_packer)
if not aggregated_grads:
aggregated_grads = range_agg_grads
else:
assert len(aggregated_grads) == len(range_agg_grads)
for i in range(len(aggregated_grads)):
aggregated_grads[i] += range_agg_grads[i]
assert not remaining_grads
return _ungroup_and_make_mirrored(aggregated_grads, per_replica_values[0],
reduce_op)
@tf_export("distribute.experimental.CollectiveCommunication")
class CollectiveCommunication(enum.Enum):
"""Communication choices for CollectiveOps.
* `AUTO`: Default to runtime's automatic choices.
* `RING`: TensorFlow's ring algorithms for all-reduce and
all-gather.
* `NCCL`: Use ncclAllReduce for all-reduce, and ring algorithms for
all-gather. TODO(ayushd): add ncclAllGather implementation.
"""
AUTO = "AUTO"
RING = "RING"
NCCL = "NCCL"
# TODO(yuefengz): support in-graph collective all-reduce.
class CollectiveAllReduce(CrossDeviceOps):
"""All-reduce cross device ops using collective ops.
In the between-graph replicated training, it will still do all-reduces across
all workers and then put results on the right destinations.
"""
def __init__(self,
num_workers=1,
num_gpus_per_worker=0,
all_reduce_merge_scope=32,
collective_keys=None):
"""Initializes the object.
Args:
num_workers: number of workers in the between-graph replicated training.
num_gpus_per_worker: number of GPUs per worker.
all_reduce_merge_scope: size of groups into which to partition consecutive
gradients grouped under a common 'allreduce' name scope. This is useful
for some optimization of collective ops.
collective_keys: an optional CollectiveKey object.
"""
self._num_workers = num_workers
self._num_gpus_per_worker = num_gpus_per_worker
self._all_reduce_merge_scope = all_reduce_merge_scope
self._collective_keys = (collective_keys or
cross_device_utils.CollectiveKeys())
super(CollectiveAllReduce, self).__init__()
def reduce_implementation(self, reduce_op, per_replica_value, destinations):
all_reduced = self._batch_all_reduce(reduce_op, [per_replica_value])[0]
device_map, logical_device = get_device_map_from(destinations)
if (all_reduced.device_map is device_map and
all_reduced.logical_device == logical_device):
return all_reduced
devices = device_map.logical_to_actual_devices(logical_device)
index = []
for d in devices:
if d in all_reduced.devices:
index.append(all_reduced.get(d))
else:
# TODO(josh11b): Once we add support for model parallelism, get the
# copy from the corresponding replica instead of the primary.
with ops.control_dependencies(all_reduced.values), ops.device(d):
index.append(array_ops.identity(all_reduced.primary))
return value_lib.Mirrored(device_map, index, logical_device)
def batch_reduce_implementation(self, reduce_op, value_destination_pairs):
all_devices_match = _all_devices_match(value_destination_pairs)
if all_devices_match:
return self._batch_all_reduce(reduce_op,
[v[0] for v in value_destination_pairs])
else:
if not all_devices_match:
logging.log_first_n(
logging.WARN, "Efficient batch_reduce is not supported if "
"destinations are different.", 10)
return [
self.reduce_implementation(reduce_op, t, destinations=v)
for t, v in value_destination_pairs
]
def _make_gradient_chunks(self, per_replica_values, all_reduce_merge_scope):
"""Make `per_replica_values` into chunks."""
grouped_by_device = _group_value_by_device(per_replica_values)
grouped_by_var = list(zip(*grouped_by_device))
# grouped_by_var is grouped by variables and takes the following format:
# [((grad0_gpu0, v0_gpu0), (grad0_gpu1, v0_gpu1), (grad0_gpu2, v0_gpu2) ..),
# ((grad1_gpu0, v1_gpu0), (grad1_gpu1, v1_gpu1), (grad1_gpu0, v1_gpu2) ..),
# ((grad2_gpu0, v2_gpu0), (grad2_gpu1, v2_gpu1), (grad2_gpu0, v2_gpu2) ..),
# ...
# ]
chunked_gv = [
grouped_by_var[x:x + all_reduce_merge_scope]
for x in range(0, len(grouped_by_var), all_reduce_merge_scope)
]
return chunked_gv
def _batch_all_reduce(self, reduce_op, per_replica_values):
"""All reduce algorithm in a batch."""
logging.log_first_n(
logging.INFO, "Collective batch_all_reduce: %d all-reduces, "
"num_workers = %d" % (len(per_replica_values), self._num_workers), 10)
dense_values, dense_indices, sparse_values, sparse_indices = (
cross_device_utils.split_by_sparsity(per_replica_values))
if dense_values:
dense_results = self._do_batch_all_reduce_dense(reduce_op, dense_values)
else:
dense_results = []
if sparse_values:
sparse_results = self._do_batch_all_reduce_sparse(reduce_op,
sparse_values)
else:
sparse_results = []
return cross_device_utils.stitch_values(((dense_results, dense_indices),
(sparse_results, sparse_indices)))
def _do_batch_all_reduce_dense(self, reduce_op, per_replica_values):
"""All-reduce across all workers in a batch."""
logging.log_first_n(
logging.INFO, "Collective batch_all_reduce: %d all-reduces, "
"num_workers = %d" % (len(per_replica_values), self._num_workers), 10)
chunked_gv = self._make_gradient_chunks(per_replica_values,
self._all_reduce_merge_scope)
reduced_gv_list = []
for chunk in chunked_gv:
with ops.name_scope("allreduce"):
for grad_and_vars in chunk:
# Gradients for the same variable but from different devices.
scaled_grads = [g for g, _ in grad_and_vars]
collective_reduced = cross_device_utils.build_collective_reduce(
scaled_grads, self._num_workers, self._collective_keys, "Add",
"Id")
result = []
for (_, v), g in zip(grad_and_vars, collective_reduced):
result.append([g, v])
reduced_gv_list.append(result)
new_device_grads = [list(x) for x in zip(*reduced_gv_list)]
return _ungroup_and_make_mirrored(
new_device_grads,
per_replica_values[0],
reduce_op,
num_between_graph_workers=self._num_workers)
def _do_batch_all_reduce_sparse(self, reduce_op, per_replica_values):
"""All-reduce IndexedSlices across all workers in a batch."""
logging.log_first_n(
logging.INFO, "Collective batch_all_reduce for IndexedSlices: "
"%d all-reduces, num_workers = %d" %
(len(per_replica_values), self._num_workers), 10)
chunked_gv = self._make_gradient_chunks(per_replica_values,
self._all_reduce_merge_scope)
reduced_gv_list = []
for chunk in chunked_gv:
with ops.name_scope("allreduce"):
for grad_and_vars in chunk:
# Gradients for the same variable but from different devices.
scaled_grads = [g for g, _ in grad_and_vars]
values = [g.values for g in scaled_grads]
indices = [g.indices for g in scaled_grads]
assert len(values) == len(indices)
# Build two separate allgathers, one for values, the other one for
# indices.
gathered_values = cross_device_utils.build_collective_gather(
values, self._num_workers, self._collective_keys)
gathered_indices = cross_device_utils.build_collective_gather(
indices, self._num_workers, self._collective_keys)
assert len(gathered_values) == len(gathered_indices)
collective_reduced = []
for i in range(len(values)):
reduced = ops.IndexedSlices(
gathered_values[i],
gathered_indices[i],
dense_shape=scaled_grads[i].dense_shape)
collective_reduced.append(reduced)
result = []
for (_, v), g in zip(grad_and_vars, collective_reduced):
result.append([g, v])
reduced_gv_list.append(result)
new_device_grads = [list(x) for x in zip(*reduced_gv_list)]
return _ungroup_and_make_mirrored(
new_device_grads,
per_replica_values[0],
reduce_op,
num_between_graph_workers=self._num_workers)
_dgx1_links = [[1, 2, 3, 4], [0, 2, 3, 5], [0, 1, 3, 6], [0, 1, 2, 7],
[0, 5, 6, 7], [1, 4, 6, 7], [2, 4, 5, 7], [3, 4, 5, 6]]
def _has_dgx1_like_links(gpu_links):
if not gpu_links:
return False
# TODO(yuefengz): figure out the right topology for hierarchical copy if
# number of gpus are less than 8.
if len(gpu_links) < 8:
return False
for i, (gpu_link, dgx1_link) in enumerate(zip(gpu_links, _dgx1_links)):
if (set(gpu_link) != set(dgx1_link) and
set(gpu_link) != set(dgx1_link + [i])):
return False
return True
def _choose_all_reduce_algorithm(device_links):
if _has_dgx1_like_links(device_links):
return HierarchicalCopyAllReduce(num_packs=len(device_links))
else:
return NcclAllReduce(num_packs=1)
def choose_the_best(devices, session_config=None):
"""Find the best subclass of CrossDeviceOps given a session config.
Args:
devices: a list of devices passed to `tf.distribute.Strategy`.
session_config: a `tf.ConfigProto` or `None`. If `None`, it will make
decision based on all local devices.
Returns:
A subclass of `CrossDeviceOps`.
"""
requested_devices = set([device_util.canonicalize(d) for d in devices])
machine_devices = device_lib.list_local_devices(session_config=session_config)
using_devices = []
for d in machine_devices:
if device_util.canonicalize(d.name) in requested_devices:
using_devices.append(d)
else:
logging.info(
"Device is available but not used by distribute strategy: %s", d.name)
if len(using_devices) != len(requested_devices):
logging.warning("Not all devices in `tf.distribute.Strategy` are visible "
"to TensorFlow.")
return ReductionToOneDevice()
if any(d.device_type.lower() != "gpu" for d in using_devices):
logging.warning("Not all devices in `tf.distribute.Strategy` are visible "
"to TensorFlow.")
return ReductionToOneDevice()
device_links = [[] for _ in range(len(using_devices))]
for i, device in enumerate(using_devices):
for link in device.locality.links.link:
device_links[i].append(link.device_id)
return _choose_all_reduce_algorithm(device_links)
|
the-stack_0_11162 | import numpy as np
from .base_actuator import Actuator
from spike_swarm_sim.register import actuator_registry
@actuator_registry(name='wheel_actuator')
class WheelActuator(Actuator):
""" Robot wheel actuator using a differential drive system.
"""
def __init__(self, *args, robot_radius=0.11, dt=1., min_thresh=0.0, **kwargs):
super(WheelActuator, self).__init__(*args, **kwargs)
self.robot_radius = robot_radius
self.dt = dt
self.delta_pos = np.zeros(2)
self.delta_theta = 0
self.min_thresh = min_thresh
def step(self, v_motors ):
if isinstance(v_motors, list):
v_motors = np.array(v_motors)
current_pos = self.actuator_owner.position
current_theta = self.actuator_owner.orientation
v_motors[np.abs(v_motors) < self.min_thresh] = 0.0
delta_t = self.dt
R = .5 * self.robot_radius * v_motors.sum() / (v_motors[0] - v_motors[1] + 1e-3)
w = (v_motors[0] - v_motors[1] + 1e-3) / (self.robot_radius * .5)
icc = current_pos + R * np.array([-np.sin(current_theta), np.cos(current_theta)])
transf_mat = lambda x: np.array([[np.cos(x), -np.sin(x)], [np.sin(x), np.cos(x)]])
self.delta_pos = transf_mat(w * delta_t).dot(current_pos - icc) + icc - current_pos
self.delta_theta = w * delta_t
new_pos = self.actuator_owner.position + self.delta_pos.astype(float)
print(self.actuator_owner.position, new_pos)
self.actuator_owner.position = new_pos
self.actuator_owner.orientation = self.actuator_owner.orientation + self.delta_theta
self.actuator_owner.orientation = self.actuator_owner.orientation % (2 * np.pi)
self.delta_pos = np.zeros(2)
self.delta_theta = 0.0 |
the-stack_0_11164 | """
#Trains a ResNet on the CIFAR10 dataset.
"""
from __future__ import print_function
import keras
from keras.layers import Dense, Conv2D, BatchNormalization, Activation
from keras.layers import AveragePooling2D, Input, Flatten
from keras.optimizers import Adam
from keras.callbacks import ModelCheckpoint, LearningRateScheduler
from keras.callbacks import ReduceLROnPlateau, TensorBoard
from keras.preprocessing.image import ImageDataGenerator
from keras.regularizers import l2
from keras import backend as K
from keras.models import Model
from keras.datasets import cifar10
from keras.applications.resnet import ResNet50, ResNet101, ResNet152
from keras import models, layers, optimizers
from datetime import datetime
import tensorflow as tf
import numpy as np
import os
import pdb
import sys
import argparse
import time
import signal
import glob
import json
import send_signal
parser = argparse.ArgumentParser(description='Tensorflow Cifar10 Training')
parser.add_argument('--tc', metavar='TESTCASE', type=str, help='specific testcase name')
parser.add_argument('--resume', dest='resume', action='store_true', help='if True, resume training from a checkpoint')
parser.add_argument('--gpu_num', metavar='GPU_NUMBER', type=str, help='select which gpu to use')
parser.add_argument('--node', metavar='HOST_NODE', type=str, help='node of the host (scheduler)')
parser.set_defaults(resume=False)
args = parser.parse_args()
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]=args.gpu_num
# Training parameters
batch_size = 256
args_lr = 0.001
args_model = 'resnet50'
epoch_begin_time = 0
job_name = sys.argv[0].split('.')[0]
save_files = '/scratch/li.baol/checkpoint_max_pwr/' + job_name + '*'
total_epochs = 4
starting_epoch = 0
# first step is to update the PID
pid_dict = {}
with open('pid_lock.json', 'r') as fp:
pid_dict = json.load(fp)
pid_dict[job_name] = os.getpid()
json_file = json.dumps(pid_dict)
with open('pid_lock.json', 'w') as fp:
fp.write(json_file)
os.rename('pid_lock.json', 'pid.json')
if args.resume:
save_file = glob.glob(save_files)[0]
# epochs = int(save_file.split('/')[4].split('_')[1].split('.')[0])
starting_epoch = int(save_file.split('/')[4].split('.')[0].split('_')[-1])
data_augmentation = True
num_classes = 10
# Subtracting pixel mean improves accuracy
subtract_pixel_mean = True
n = 3
# Model name, depth and version
model_type = args.tc #'P100_resnet50_he_256_1'
# Load the CIFAR10 data.
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
# Normalize data.
x_train = x_train.astype('float32') / 255
x_test = x_test.astype('float32') / 255
# If subtract pixel mean is enabled
if subtract_pixel_mean:
x_train_mean = np.mean(x_train, axis=0)
x_train -= x_train_mean
x_test -= x_train_mean
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
print('y_train shape:', y_train.shape)
# Convert class vectors to binary class matrices.
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
if args.resume:
print('resume from checkpoint')
model = keras.models.load_model(save_file)
else:
print('train from start')
model = models.Sequential()
if '50' in args_model:
base_model = ResNet50(weights=None, include_top=False, input_shape=(32, 32, 3), pooling=None)
elif '101' in args_model:
base_model = ResNet101(weights=None, include_top=False, input_shape=(32, 32, 3), pooling=None)
elif '152' in args_model:
base_model = ResNet152(weights=None, include_top=False, input_shape=(32, 32, 3), pooling=None)
#base_model.summary()
#pdb.set_trace()
#model.add(layers.UpSampling2D((2,2)))
#model.add(layers.UpSampling2D((2,2)))
#model.add(layers.UpSampling2D((2,2)))
model.add(base_model)
model.add(layers.Flatten())
#model.add(layers.BatchNormalization())
#model.add(layers.Dense(128, activation='relu'))
#model.add(layers.Dropout(0.5))
#model.add(layers.BatchNormalization())
#model.add(layers.Dense(64, activation='relu'))
#model.add(layers.Dropout(0.5))
#model.add(layers.BatchNormalization())
model.add(layers.Dense(10, activation='softmax'))#, kernel_initializer='he_uniform'))
model.compile(loss='categorical_crossentropy',
optimizer=Adam(lr=args_lr),
metrics=['accuracy'])
#model.summary()
print(model_type)
#pdb.set_trace()
current_epoch = 0
################### connects interrupt signal to the process #####################
def terminateProcess(signalNumber, frame):
# first record the wasted epoch time
global epoch_begin_time
if epoch_begin_time == 0:
epoch_waste_time = 0
else:
epoch_waste_time = int(time.time() - epoch_begin_time)
epoch_waste_dict = {}
with open('epoch_waste.json', 'r') as fp:
epoch_waste_dict = json.load(fp)
epoch_waste_dict[job_name] += epoch_waste_time
json_file3 = json.dumps(epoch_waste_dict)
with open('epoch_waste.json', 'w') as fp:
fp.write(json_file3)
print('checkpointing the model triggered by kill -15 signal')
# delete whatever checkpoint that already exists
for f in glob.glob(save_files):
os.remove(f)
model.save('/scratch/li.baol/checkpoint_max_pwr/' + job_name + '_' + str(current_epoch) + '.h5')
print ('(SIGTERM) terminating the process')
checkpoint_dict = {}
with open('checkpoint.json', 'r') as fp:
checkpoint_dict = json.load(fp)
checkpoint_dict[job_name] = 1
json_file3 = json.dumps(checkpoint_dict)
with open('checkpoint.json', 'w') as fp:
fp.write(json_file3)
sys.exit()
signal.signal(signal.SIGTERM, terminateProcess)
#################################################################################
logdir = '/scratch/li.baol/tsrbrd_log/job_runs/' + model_type + '/' + job_name
tensorboard_callback = TensorBoard(log_dir=logdir)#, update_freq='batch')
class PrintEpoch(keras.callbacks.Callback):
def on_epoch_begin(self, epoch, logs=None):
global current_epoch
#remaining_epochs = epochs - epoch
current_epoch = epoch
print('current epoch ' + str(current_epoch))
global epoch_begin_time
epoch_begin_time = time.time()
my_callback = PrintEpoch()
callbacks = [tensorboard_callback, my_callback]
#[checkpoint, lr_reducer, lr_scheduler, tensorboard_callback]
# Run training
# send signal to indicate checkpoint is qualified
message = job_name + ' ckpt_qual'
send_signal.send(args.node, 10002, message)
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=round(total_epochs/2),
validation_data=(x_test, y_test),
shuffle=True,
callbacks=callbacks,
initial_epoch=starting_epoch,
verbose=1
)
# Score trained model.
scores = model.evaluate(x_test, y_test, verbose=1)
print('Test loss:', scores[0])
print('Test accuracy:', scores[1])
# send signal to indicate job has finished
message = job_name + ' finish'
send_signal.send(args.node, 10002, message)
|
the-stack_0_11165 | import socket
server = socket.socket()
server.bind(('127.0.0.1', 5000))
server.listen(5)
while True:
client, (client_host, client_port) = server.accept()
client.recv(4096)
response_type = 'HTTP/1.1 200 OK\n'
headers = 'Content-Type: text/html\n\n'
with open('task_3/index.html', 'r') as f:
body = f.read()
response = response_type + headers + body
client.send(response.encode('utf-8'))
client.close()
|
the-stack_0_11167 | from operator import itemgetter
from nltk.corpus import wordnet
from nltk.stem import WordNetLemmatizer
from nltk.stem.lancaster import LancasterStemmer
from players.codemaster import Codemaster
class AICodemaster(Codemaster):
def __init__(self, brown_ic=None, glove_vecs=None, word_vectors=None):
super().__init__()
self.brown_ic = brown_ic
self.glove_vecs = glove_vecs
self.word_vectors = word_vectors
self.wordnet_lemmatizer = WordNetLemmatizer()
self.lancaster_stemmer = LancasterStemmer()
self.cm_wordlist = []
with open('players/cm_wordlist.txt') as infile:
for line in infile:
self.cm_wordlist.append(line.rstrip())
self.syns = []
for word in self.cm_wordlist:
for synset_in_cmwordlist in wordnet.synsets(word):
self.syns.append(synset_in_cmwordlist)
def set_game_state(self, words, maps):
self.words = words
self.maps = maps
def get_clue(self):
lin_results = []
count = 0
red_words = []
bad_words = []
for i in range(25):
if self.words[i][0] == '*':
continue
elif self.maps[i] == "Assassin" or self.maps[i] == "Blue" or self.maps[i] == "Civilian":
bad_words.append(self.words[i].lower())
else:
red_words.append(self.words[i].lower())
print("RED:\t", red_words)
for red_word in red_words:
for synset_in_cmwordlist in self.syns:
lin_clue = 0
for red_synset in wordnet.synsets(red_word):
try:
# only if the two compared words have the same part of speech
lin_score = synset_in_cmwordlist.lin_similarity(red_synset, self.brown_ic)
except :
continue
if lin_score:
if not self.arr_not_in_word(synset_in_cmwordlist.lemma_names()[0], red_words + bad_words):
continue
lin_results.append((lin_score, synset_in_cmwordlist))
if lin_score > lin_clue:
lin_clue = lin_score
lin_results = list(reversed(sorted(lin_results, key=itemgetter(0))))
return [lin_results[0][1].lemma_names()[0], 1]
def arr_not_in_word(self, word, arr):
if word in arr:
return False
lemm = self.wordnet_lemmatizer.lemmatize(word)
lancas = self.lancaster_stemmer.stem(word)
for i in arr:
if i == lemm or i == lancas:
return False
if i.find(word) != -1:
return False
if word.find(i) != -1:
return False
return True
|
the-stack_0_11168 | # Imports for flask and sql
from flask import Flask, render_template, url_for, request, redirect, flash
from flask_sqlalchemy import SQLAlchemy
from sqlalchemy import func
# Imports for plots
import plotly.express as px
import pandas as pd
import numpy as np
import json
import plotly
# Other imports
from datetime import datetime, date
import re
# Create flask app object and hooking up database
app = Flask(__name__)
app.config.from_pyfile('config.cfg')
db = SQLAlchemy(app)
# This class is responsible for a table 'ledger' in database
# In this table are stored information about user operations
class Ledger(db.Model):
id = db.Column(db.Integer, primary_key=True)
amount = db.Column(db.Float(precision=2)) # The amount of the operation
date = db.Column(db.Date) # The date of the operation
description = db.Column(db.String(200)) # The description of the operation
category = db.Column(db.String(50)) # The category of the operation - 'Deposit' for each deposit, and custom for others
def get_current_date():
"""
Function return current date as datetime.
:return: datetime in format YYYY-MM-DD
"""
today = date.today()
today = today.strftime("%Y-%m-%d")
return today
def get_balance():
"""
Function return current user balance.
:return: string looks like float with two decimal places
"""
if len(Ledger.query.all()) == 0:
# This part is responsible for return 0 with two decimal places
return '0.00'
else:
# Statement below:
# - get column amount from ledger
# - sums this column
# - return only number(without parentheses)
# - return number rounded to two decimal places
return "{:.2f}".format(db.session.query(func.sum(Ledger.amount)).all()[0][0])
@app.route('/')
def home():
# Home view displayed when user enter to the website
db.create_all() # Create table in database based on Ledger class
return render_template('home.html', # Render 'home.html' template
nav_active='home', # This param is responsible for activation right overlap in menu
balance=get_balance()) # Menu keeps showing users balance
@app.route('/add_deposit', methods=['GET', 'POST'])
def add_deposit():
# This page allows add deposit to ledger
# When method is GET, render template with form
if request.method == 'GET':
return render_template('add_deposit.html', # Render template 'add_deposit.html'
nav_active='add', # Highlighted tab in menu
balance=get_balance(), # Menu keeps showing users balance
today=get_current_date()) # Current date is set in form in input date
# If method is POST, get values from form and save it in table
else:
# Default value of dollars - when user doesn't type value
dollars = 0
# If value is typed in form, get it and save to 'dollars' var
if 'ad-amount-dollars' in request.form:
dollars = request.form['ad-amount-dollars']
# Defaul value of cents - when user doesn't type value
cents = 0
# If value is typed in form, get it and save to 'cents' var
if 'ad-amount-cents' in request.form:
cents = request.form['ad-amount-cents']
# Check if value from form is not empty
if cents == '':
# If value is empty, set value = 0
cents = 0
# If value is <10 we have to save eg. 03 insted of 3
# So we have to save cents to string with 0 before char
if int(cents) < 10:
cents = '0' + str(cents)
# Get date from form and save it as datetime to 'date' var
if 'ad-date' in request.form:
date = request.form['ad-date']
date = datetime.strptime(date, '%Y-%m-%d')
# Default description - get from form and save to 'desc' var
desc = ''
if 'ad-description' in request.form:
desc = request.form['ad-description']
# Concat dollars and cents into amount
# And change amount to float dtype
amount = str(dollars) + "." + str(cents)
amount = float(amount)
# Create object Ledger with data from form
added_row = Ledger(amount=amount,
description=desc,
date=date,
category='Deposit') # This category is default for each deposit
# Add row above to database
db.session.add(added_row)
db.session.commit()
# Display flash about adding a row
flash("Row has been added")
# Redirect to display ledger
return redirect(url_for('ledger'))
@app.route('/add_withdrawal', methods=['GET', 'POST'])
def add_withdrawal():
# This page allows to add withdrawal to ledger
# When method is GET, render template with form
if request.method == 'GET':
return render_template('add_withdrawal.html', # Render template 'add_withdrawal.html'
nav_active='add', # Highlighted tab in menu
balance=get_balance(), # Menu keeps showing users balance
today=get_current_date()) # Current date is set in form in input date
# If method is POST, get values from form and save it in table
else:
# Default value of dollars - when user doesn't type value
dollars = 0
# If value is typed in form, get it and save to 'dollars' var
if 'aw-amount-dollars' in request.form:
dollars = request.form['aw-amount-dollars']
# Defaul value of cents - when user doesn't type value
cents = 0
# If value is typed in form, get it and save to 'cents' var
if 'aw-amount-cents' in request.form:
cents = request.form['aw-amount-cents']
# Check if value from form is not empty
if cents == '':
# If value is empty, set value = 0
cents = 0
# If value is <10 we have to save eg. 03 insted of 3
# So we have to save cents to string with 0 before char
if int(cents) < 10:
cents = '0' + str(cents)
# Get date from form and save it as datetime to 'date' var
if 'aw-date' in request.form:
date = request.form['aw-date']
date = datetime.strptime(date, '%Y-%m-%d')
# Default description
desc = ''
# If description is in form, get from form and save to 'desc' var
if 'aw-description' in request.form:
desc = request.form['aw-description']
# Default category
category = 'Unsigned'
# If category is in form, get from form and save to 'category' var
if 'aw-category' in request.form:
category = request.form['aw-category']
# Concat dollars and cents into amount
# And change amount to float dtype
amount = '-' + str(dollars) + "." + str(cents)
amount = float(amount)
# Create object Ledger with data from form
added_row = Ledger(amount=amount, description=desc, date=date, category=category)
# Add row above to database
db.session.add(added_row)
db.session.commit()
# Display flash about adding a row
flash("Row has been added")
# Redirect to display ledger
return redirect(url_for('ledger'))
@app.route('/ledger', methods=['GET', 'POST'])
def ledger():
# When method GET this page display ledger
if request.method == 'GET':
# Get all rows from table ordered by date and save it to 'full_ledger'
# In 'ledger.html' is counted post-transactional balance after each of operations ordered by date
full_ledger = Ledger.query.order_by(Ledger.date).all()
# Get length ledger - if it is empty, in 'ledger.html' an appropriate text will be displayed
ledger_len = len(full_ledger)
return render_template('ledger.html', # Render template 'ledger.html'
nav_active='ledger', # Highlighted tab in menu
full_ledger=full_ledger, # Full ledger to display
ledger_len=ledger_len, # Ledger len to check if ledger not empty
balance=get_balance()) # Menu keeps showing users balance
# When ledger is called with method POST, it will deleted row
else:
# Button 'delete' in row in ledger call modal
# Inside modal there is form with id row to delete
# Get value(id row) from this form
if 'id_row_to_del' in request.form:
id_row_to_del = request.form['id_row_to_del']
# Choose row from database, where id == id from form
row_to_del = Ledger.query.filter(Ledger.id == id_row_to_del).first()
# Delete row and save changes in database
db.session.delete(row_to_del)
db.session.commit()
# Display flash with information about delete row
flash('Row has been deleted')
# Redirect to itselft, but with method GET
return redirect(url_for('ledger'))
@app.route('/edit_row/<int:id_row_to_edit>', methods=['GET', 'POST'])
def edit_row(id_row_to_edit):
# This page allows to edit deposit and withdrawal
if request.method == 'GET':
# With method GET, get row from table in database, where id == id sent in URL
# Next - load this row into 'row' variable
# This row will be used to display values from this row in inputs of form in rendered page
row = Ledger.query.filter(Ledger.id == id_row_to_edit).first()
# If amount in row is < 0, render edit withdrawal, else - edit deposit
if row.amount > 0:
return render_template('edit_deposit_row.html', # Render template 'edit_deposit_row.html'
nav_active='ledger', # Highlighted tab in menu
row=row, # Inputs have values from this row
balance=get_balance()) # Menu keeps showing users balance
else:
return render_template('edit_withdrawal_row.html', # Render template 'edit_withdrawal_row.html'
nav_active='ledger', # Highlighted tab in menu
row=row, # Inputs have values from this row
balance=get_balance()) # Menu keeps showing users balance
# With method POST, values are getting from form and save in table in database
else:
# Default value of dollars - when user delete previous value and send form with empty field
dollars = 0
# If value is typed in form, get it and save to 'dollars' var
if 'edit-dollars' in request.form:
dollars = request.form['edit-dollars']
# Default value of cents
cents = 0
# If value is typed in form, get it and save to 'cents' var
if 'edit-cents' in request.form:
cents = request.form['edit-cents']
# Check if value from form is not empty
if cents == '':
# If value is empty, set value = 0
cents = 0
# If value is <10 we have to save eg. 03 insted of 3
# So we have to save cents to string with 0 before char
if int(cents) < 10:
cents = '0' + str(cents)
# Get value from date input
if 'edit-date' in request.form:
date = request.form['edit-date']
date = datetime.strptime(date, '%Y-%m-%d')
# Default description
desc = ''
# If description is in form, get from form and save to 'desc' var
if 'edit-description' in request.form:
desc = request.form['edit-description']
# If 'edit-category' is in form, that means the form is for withdrawal
# Else - form is about deposit
if 'edit-category' in request.form:
# If withdrawal - save category and add '-' before value of amount
category = request.form['edit-category']
amount = '-' + str(dollars) + "." + str(cents)
else:
# If deposit - save 'Deposit' category and amount without '-'
category = 'Deposit'
amount = str(dollars) + "." + str(cents)
# Change amount into float
amount = float(amount)
# When we have information from form, we can change them in database
# Get value from table in database, where id row == id to edit(from URL)
row = Ledger.query.filter(Ledger.id == id_row_to_edit).first()
# Save information from form into variables from row
row.amount = amount
row.description = desc
row.date = date
row.category = category
# Save changes in database
db.session.commit()
# Print information about edition
flash("Row has been edited")
# Redirect to ledger
return redirect(url_for('ledger'))
@app.route('/analysis')
def analysis():
# This page display analysis and visualisations based on rows from ledger
# Get full ledger ordered by date
full_ledger = Ledger.query.order_by(Ledger.date).all()
# Save ledger into dataframe(to analysis)
ledger_df = pd.DataFrame({
# Simply values from these rows
'date': [x.date for x in full_ledger],
'amount': [x.amount for x in full_ledger],
'category': [x.category for x in full_ledger]
})
# Add new column into data frame about post transaction balance
# This column will be visualise
ledger_df['balance'] = np.cumsum(ledger_df['amount'])
# First, we get number of rows in dataframe
# If there is not enough rows, plot not display or display ugly
# So we will display information insted of plot
len_line = ledger_df.shape[0]
# Create line plot for balance
fig_line = px.line(ledger_df, # Data from main ledger dataframe
x='date', # Date as X axis
y='balance') # Balance as Y axis
# We introduce some cosmetic changes to plot
fig_line.update_layout(xaxis_title='Date', # X axis title
yaxis_title='Balance', # Y axis title
title='Balance of your ledger') # Plot title
# We have to encode plot into json to send it to view
plot_json_line = json.dumps(fig_line, cls=plotly.utils.PlotlyJSONEncoder)
# To create pie plot of withdrawals by category we have to rebuild dataframe
# First - get only rows with category other than deposit
ledger_df_pie = ledger_df[ledger_df['category'] != 'Deposit']
# Next step is save amount from withdrawal(default negative values) as absolute values
ledger_df_pie['amount'] = ledger_df_pie['amount'].abs()
# As with line plot - we need some rows to display plot
len_pie = ledger_df_pie.shape[0]
# Now we can create pie plot
fig_pie = px.pie(ledger_df_pie, values='amount', names='category', title='Expenses by category')
# Now we can encode this plot into json
plot_json_pie = json.dumps(fig_pie, cls=plotly.utils.PlotlyJSONEncoder)
# When we have everything, we can render template
return render_template(
'analysis.html',
nav_active='analysis', # Highlighted tab in menu
balance=get_balance(), # Menu keeps showing users balance
len_line=len_line, # Length of full ledger(to line plot)
plot_json_line=plot_json_line, # Line plot
len_pie=len_pie, # Length of dataframe with withdrawals
plot_json_pie=plot_json_pie, # Pie plot
)
@app.route('/about')
def about():
# This page show information about project
return render_template('about.html',
nav_active='about', # Highlighted tab in menu
balance=get_balance()) # Menu keeps showing users balance
if __name__ == '__main__':
app.run()
|
the-stack_0_11170 | from collections.abc import Mapping, Iterable
from ctypes import c_int, c_int32, c_double, c_char_p, POINTER
from weakref import WeakValueDictionary
import numpy as np
from numpy.ctypeslib import as_array
from openmc.exceptions import AllocationError, InvalidIDError
from . import _dll
from .core import _FortranObjectWithID
from .error import _error_handler
from .material import Material
__all__ = ['Cell', 'cells']
# Cell functions
_dll.openmc_extend_cells.argtypes = [c_int32, POINTER(c_int32), POINTER(c_int32)]
_dll.openmc_extend_cells.restype = c_int
_dll.openmc_extend_cells.errcheck = _error_handler
_dll.openmc_cell_get_id.argtypes = [c_int32, POINTER(c_int32)]
_dll.openmc_cell_get_id.restype = c_int
_dll.openmc_cell_get_id.errcheck = _error_handler
_dll.openmc_cell_get_fill.argtypes = [
c_int32, POINTER(c_int), POINTER(POINTER(c_int32)), POINTER(c_int32)]
_dll.openmc_cell_get_fill.restype = c_int
_dll.openmc_cell_get_fill.errcheck = _error_handler
_dll.openmc_cell_set_fill.argtypes = [
c_int32, c_int, c_int32, POINTER(c_int32)]
_dll.openmc_cell_set_fill.restype = c_int
_dll.openmc_cell_set_fill.errcheck = _error_handler
_dll.openmc_cell_set_id.argtypes = [c_int32, c_int32]
_dll.openmc_cell_set_id.restype = c_int
_dll.openmc_cell_set_id.errcheck = _error_handler
_dll.openmc_cell_set_temperature.argtypes = [
c_int32, c_double, POINTER(c_int32)]
_dll.openmc_cell_set_temperature.restype = c_int
_dll.openmc_cell_set_temperature.errcheck = _error_handler
_dll.openmc_get_cell_index.argtypes = [c_int32, POINTER(c_int32)]
_dll.openmc_get_cell_index.restype = c_int
_dll.openmc_get_cell_index.errcheck = _error_handler
class Cell(_FortranObjectWithID):
"""Cell stored internally.
This class exposes a cell that is stored internally in the OpenMC
library. To obtain a view of a cell with a given ID, use the
:data:`openmc.capi.cells` mapping.
Parameters
----------
index : int
Index in the `cells` array.
Attributes
----------
id : int
ID of the cell
"""
__instances = WeakValueDictionary()
def __new__(cls, uid=None, new=True, index=None):
mapping = cells
if index is None:
if new:
# Determine ID to assign
if uid is None:
uid = max(mapping, default=0) + 1
else:
if uid in mapping:
raise AllocationError('A cell with ID={} has already '
'been allocated.'.format(uid))
index = c_int32()
_dll.openmc_extend_cells(1, index, None)
index = index.value
else:
index = mapping[uid]._index
if index not in cls.__instances:
instance = super().__new__(cls)
instance._index = index
if uid is not None:
instance.id = uid
cls.__instances[index] = instance
return cls.__instances[index]
@property
def id(self):
cell_id = c_int32()
_dll.openmc_cell_get_id(self._index, cell_id)
return cell_id.value
@id.setter
def id(self, cell_id):
_dll.openmc_cell_set_id(self._index, cell_id)
@property
def fill(self):
fill_type = c_int()
indices = POINTER(c_int32)()
n = c_int32()
_dll.openmc_cell_get_fill(self._index, fill_type, indices, n)
if fill_type.value == 1:
if n.value > 1:
#TODO: off-by-one
return [Material(index=i+1 if i >= 0 else i)
for i in indices[:n.value]]
else:
#TODO: off-by-one
index = indices[0] + 1 if indices[0] >= 0 else indices[0]
return Material(index=index)
else:
raise NotImplementedError
@fill.setter
def fill(self, fill):
if isinstance(fill, Iterable):
n = len(fill)
indices = (c_int32*n)(*(m._index if m is not None else -1
for m in fill))
_dll.openmc_cell_set_fill(self._index, 1, n, indices)
elif isinstance(fill, Material):
indices = (c_int32*1)(fill._index)
_dll.openmc_cell_set_fill(self._index, 1, 1, indices)
elif fill is None:
indices = (c_int32*1)(-1)
_dll.openmc_cell_set_fill(self._index, 1, 1, indices)
def set_temperature(self, T, instance=None):
"""Set the temperature of a cell
Parameters
----------
T : float
Temperature in K
instance : int or None
Which instance of the cell
"""
_dll.openmc_cell_set_temperature(self._index, T, c_int32(instance))
class _CellMapping(Mapping):
def __getitem__(self, key):
index = c_int32()
try:
_dll.openmc_get_cell_index(key, index)
except (AllocationError, InvalidIDError) as e:
# __contains__ expects a KeyError to work correctly
raise KeyError(str(e))
return Cell(index=index.value)
def __iter__(self):
for i in range(len(self)):
yield Cell(index=i + 1).id
def __len__(self):
return c_int32.in_dll(_dll, 'n_cells').value
def __repr__(self):
return repr(dict(self))
cells = _CellMapping()
|
the-stack_0_11171 | from django.contrib import admin
from recipes.models import Ingredient, IngredientUnitMeasure, \
IngredientFamily, IngredientPhoto, \
RecipeType, RecipeDifficulty
@admin.register(IngredientUnitMeasure)
class IngredientUnitMeasureAdmin(admin.ModelAdmin):
list_display = ['name', 'label']
actions_on_bottom = True
@admin.register(IngredientFamily)
class IngredientFamilyAdmin(admin.ModelAdmin):
list_display = ['name']
actions_on_bottom = True
@admin.register(RecipeType)
class RecipeTypeAdmin(admin.ModelAdmin):
list_display = ['label']
actions_on_bottom = True
@admin.register(RecipeDifficulty)
class RecipeDifficultyAdmin(admin.ModelAdmin):
list_display = ['label', 'level']
ordering = ['level']
actions_on_bottom = True
@admin.register(Ingredient)
class IngredientAdmin(admin.ModelAdmin):
list_display = ['name', 'family']
# custom tabular inline for many to many relationship
class IngredientUnitMeasureInline(admin.TabularInline):
model = Ingredient.unit_measure.through
extra = 1
class IngredientPhotoInline(admin.StackedInline):
model = IngredientPhoto
inlines = [
IngredientUnitMeasureInline,
IngredientPhotoInline
]
# exclude unit_measure because it will be used into inlines
exclude = ('unit_measure',)
|
the-stack_0_11174 | # -*- coding: utf-8 -*-
from model.group import Group
import random
import string
import os.path
import jsonpickle
import getopt
import sys
try:
opts, args = getopt.getopt(sys.argv[1:], "n:f:", ["number of groups", "file"])
except getopt.GetoptError as err:
getopt.usage()
sys.exit(2)
n = 5
f = "data/groups.json"
for o, a in opts:
if o == "-n":
n = int(a)
elif o == "-f":
f = a
def random_string(prefix, maxlen):
symbols = string.ascii_letters+string.digits + string.punctuation + " "*10
return prefix + "".join((random.choice(symbols) for i in range(random.randrange(maxlen))))
testdata = [Group(name="", header="", footer="")] + [
Group(name=random_string("name", 10), header=random_string("header", 20), footer=random_string("footer", 20))
for i in range(n)
]
file = os.path.join(os.path.dirname(os.path.abspath(__file__)), "..", f)
with open(file, "w") as out:
jsonpickle.set_encoder_options("json", indent = 2)
out.write(jsonpickle.encode(testdata)) |
the-stack_0_11175 | # This code is part of Qiskit.
#
# (C) Copyright IBM 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""
The SUCCD Ansatz.
"""
from typing import List, Optional, Tuple
import itertools
import logging
from qiskit.circuit import QuantumCircuit
from qiskit_nature import QiskitNatureError
from qiskit_nature.converters.second_quantization import QubitConverter
from .ucc import UCC
from .utils.fermionic_excitation_generator import generate_fermionic_excitations
logger = logging.getLogger(__name__)
# TODO: figure out how to implement `succ_full`: a variant of this class, which does include also
# the symmetrically mirrored double excitations, but assigns the same circuit parameter to them.
class SUCCD(UCC):
"""The SUCCD Ansatz.
The SUCCD Ansatz (by default) only contains double excitations. Furthermore, it only considers
the set of excitations which is symmetrically invariant with respect to spin-flips of both
particles. For more information see also [1].
Note, that this Ansatz can only work for singlet-spin systems. Therefore, the number of alpha
and beta electrons must be equal.
This is a convenience subclass of the UCC Ansatz. For more information refer to :class:`UCC`.
References:
[1] https://arxiv.org/abs/1911.10864
"""
def __init__(
self,
qubit_converter: Optional[QubitConverter] = None,
num_particles: Optional[Tuple[int, int]] = None,
num_spin_orbitals: Optional[int] = None,
reps: int = 1,
initial_state: Optional[QuantumCircuit] = None,
include_singles: Tuple[bool, bool] = (False, False),
):
"""
Args:
qubit_converter: the QubitConverter instance which takes care of mapping a
:class:`~.SecondQuantizedOp` to a :class:`PauliSumOp` as well as performing all
configured symmetry reductions on it.
num_particles: the tuple of the number of alpha- and beta-spin particles.
num_spin_orbitals: the number of spin orbitals.
reps: The number of times to repeat the evolved operators.
initial_state: A `QuantumCircuit` object to prepend to the circuit.
include_singles: enables the inclusion of single excitations per spin species.
Raises:
QiskitNatureError: if the number of alpha and beta electrons is not equal.
"""
self._validate_num_particles(num_particles)
self._include_singles = include_singles
super().__init__(
qubit_converter=qubit_converter,
num_particles=num_particles,
num_spin_orbitals=num_spin_orbitals,
excitations=self.generate_excitations,
alpha_spin=True,
beta_spin=True,
max_spin_excitation=None,
reps=reps,
initial_state=initial_state,
)
@property
def include_singles(self) -> Tuple[bool, bool]:
"""Whether to include single excitations."""
return self._include_singles
@include_singles.setter
def include_singles(self, include_singles: Tuple[bool, bool]) -> None:
"""Sets whether to include single excitations."""
self._include_singles = include_singles
def generate_excitations(
self, num_spin_orbitals: int, num_particles: Tuple[int, int]
) -> List[Tuple[Tuple[int, ...], Tuple[int, ...]]]:
"""Generates the excitations for the SUCCD Ansatz.
Args:
num_spin_orbitals: the number of spin orbitals.
num_particles: the number of alpha and beta electrons. Note, these must be identical for
this class.
Raises:
QiskitNatureError: if the number of alpha and beta electrons is not equal.
Returns:
The list of excitations encoded as tuples of tuples. Each tuple in the list is a pair of
tuples. The first tuple contains the occupied spin orbital indices whereas the second
one contains the indices of the unoccupied spin orbitals.
"""
self._validate_num_particles(num_particles)
excitations: List[Tuple[Tuple[int, ...], Tuple[int, ...]]] = []
excitations.extend(
generate_fermionic_excitations(
1,
num_spin_orbitals,
num_particles,
alpha_spin=self.include_singles[0],
beta_spin=self.include_singles[1],
)
)
num_electrons = num_particles[0]
beta_index_shift = num_spin_orbitals // 2
# generate alpha-spin orbital indices for occupied and unoccupied ones
alpha_occ = list(range(num_electrons))
alpha_unocc = list(range(num_electrons, beta_index_shift))
# the Cartesian product of these lists gives all possible single alpha-spin excitations
alpha_excitations = list(itertools.product(alpha_occ, alpha_unocc))
logger.debug("Generated list of single alpha excitations: %s", alpha_excitations)
# Find all possible double excitations constructed from the list of single excitations.
# Note, that we use `combinations_with_replacement` here, in order to also get those double
# excitations which excite from the same occupied level twice. We will need those in the
# following post-processing step.
pool = itertools.combinations_with_replacement(alpha_excitations, 2)
for exc in pool:
# find the two excitations (Note: SUCCD only works for double excitations!)
alpha_exc, second_exc = exc[0], exc[1]
# shift the second excitation into the beta-spin orbital index range
beta_exc = (
second_exc[0] + beta_index_shift,
second_exc[1] + beta_index_shift,
)
# add the excitation tuple
occ: Tuple[int, ...]
unocc: Tuple[int, ...]
occ, unocc = zip(alpha_exc, beta_exc)
exc_tuple = (occ, unocc)
excitations.append(exc_tuple)
logger.debug("Added the excitation: %s", exc_tuple)
return excitations
def _validate_num_particles(self, num_particles):
try:
assert num_particles[0] == num_particles[1]
except AssertionError as exc:
raise QiskitNatureError(
"The SUCCD Ansatz only works for singlet-spin systems. However, you specified "
"differing numbers of alpha and beta electrons:",
str(num_particles),
) from exc
|
the-stack_0_11177 | # -*- coding: utf-8 -*-
# This scaffolding model makes your app work on Google App Engine too
# File is released under public domain and you can use without limitations
if request.global_settings.web2py_version < "2.14.1":
raise HTTP(500, "Requires web2py 2.13.3 or newer")
# if SSL/HTTPS is properly configured and you want all HTTP requests to
# be redirected to HTTPS, uncomment the line below:
# request.requires_https()
# app configuration made easy. Look inside private/appconfig.ini
from gluon.contrib.appconfig import AppConfig
# once in production, remove reload=True to gain full speed
myconf = AppConfig(reload=True)
if not request.env.web2py_runtime_gae:
# if NOT running on Google App Engine use SQLite or other DB
db = DAL(myconf.get('db.uri'),
pool_size=myconf.get('db.pool_size'),
migrate_enabled=myconf.get('db.migrate'),
check_reserved=['all'])
# I like to keep the session in the db.
session.connect(request, response, db=db)
else:
# connect to Google BigTable (optional 'google:datastore://namespace')
db = DAL('google:datastore+ndb')
# store sessions and tickets there
session.connect(request, response, db=db)
#
# or store session in Memcache, Redis, etc.
# from gluon.contrib.memdb import MEMDB
# from google.appengine.api.memcache import Client
# session.connect(request, response, db = MEMDB(Client()))
# by default give a view/generic.extension to all actions from localhost
# none otherwise. a pattern can be 'controller/function.extension'
response.generic_patterns = ['*'] if request.is_local else []
# choose a style for forms
response.formstyle = myconf.get('forms.formstyle') # or 'bootstrap3_stacked' or 'bootstrap2' or other
response.form_label_separator = myconf.get('forms.separator') or ''
# (optional) optimize handling of static files
# response.optimize_css = 'concat,minify,inline'
# response.optimize_js = 'concat,minify,inline'
# (optional) static assets folder versioning
# response.static_version = '0.0.0'
# Here is sample code if you need for
# - email capabilities
# - authentication (registration, login, logout, ... )
# - authorization (role based authorization)
# - services (xml, csv, json, xmlrpc, jsonrpc, amf, rss)
# - old style crud actions
# (more options discussed in gluon/tools.py)
from gluon.tools import Auth, Service, PluginManager
# host names must be a list of allowed host names (glob syntax allowed)
auth = Auth(db, host_names=myconf.get('host.names'))
service = Service()
plugins = PluginManager()
# create all tables needed by auth if not custom tables
auth.define_tables(username=False, signature=False)
# configure email
mail = auth.settings.mailer
mail.settings.server = 'logging' if request.is_local else myconf.get('smtp.server')
mail.settings.sender = myconf.get('smtp.sender')
mail.settings.login = myconf.get('smtp.login')
mail.settings.tls = myconf.get('smtp.tls') or False
mail.settings.ssl = myconf.get('smtp.ssl') or False
# configure auth policy
auth.settings.registration_requires_verification = False
auth.settings.registration_requires_approval = False
auth.settings.reset_password_requires_verification = True
# More API examples for controllers:
#
# >>> db.mytable.insert(myfield='value')
# >>> rows = db(db.mytable.myfield == 'value').select(db.mytable.ALL)
# >>> for row in rows: print row.id, row.myfield
######################
# Logging
import logging, sys
FORMAT = "%(asctime)s %(levelname)s %(process)s %(thread)s %(funcName)s():%(lineno)d %(message)s"
logging.basicConfig(stream=sys.stderr)
logger = logging.getLogger(request.application)
logger.setLevel(logging.INFO)
# Let's log the request.
logger.info("====> Request: %r %r" % (request.env.request_method, request.env.path_info))
|
the-stack_0_11181 | # -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
#
# Copyright 2018-2019 Fetch.AI Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ------------------------------------------------------------------------------
"""This package contains a scaffold of a handler."""
from typing import Optional, cast
from aea.protocols.base import Message
from aea.skills.base import Handler
from packages.fetchai.protocols.register.message import RegisterMessage
from packages.fetchai.protocols.signing.message import SigningMessage
from packages.fetchai.skills.registration_aw1.dialogues import (
RegisterDialogue,
RegisterDialogues,
SigningDialogue,
SigningDialogues,
)
from packages.fetchai.skills.registration_aw1.strategy import Strategy
class AW1RegistrationHandler(Handler):
"""This class handles register messages."""
SUPPORTED_PROTOCOL = RegisterMessage.protocol_id
def setup(self) -> None:
"""
Implement the setup.
:return: None
"""
def handle(self, message: Message) -> None:
"""
Implement the reaction to an envelope.
:param message: the message
:return: None
"""
register_msg = cast(RegisterMessage, message)
# recover dialogue
register_dialogues = cast(RegisterDialogues, self.context.register_dialogues)
register_dialogue = cast(
Optional[RegisterDialogue], register_dialogues.update(register_msg)
)
if register_dialogue is None:
self._handle_unidentified_dialogue(register_msg)
return
# handle message
if register_msg.performative is RegisterMessage.Performative.SUCCESS:
self._handle_success(register_msg, register_dialogue)
elif register_msg.performative is RegisterMessage.Performative.ERROR:
self._handle_error(register_msg, register_dialogue)
else:
self._handle_invalid(register_msg, register_dialogue)
def teardown(self) -> None:
"""
Implement the handler teardown.
:return: None
"""
def _handle_unidentified_dialogue(self, register_msg: RegisterMessage) -> None:
"""
Handle an unidentified dialogue.
:param msg: the message
"""
self.context.logger.info(
f"received invalid register_msg message={register_msg}, unidentified dialogue."
)
def _handle_success(
self, register_msg: RegisterMessage, register_dialogue: RegisterDialogue
) -> None:
"""
Handle an register message.
:param register_msg: the register message
:param register_dialogue: the dialogue
:return: None
"""
self.context.logger.debug(
f"received register_msg success message={register_msg} in dialogue={register_dialogue}."
)
self.context.logger.info(
f"received register message success, info={register_msg.info}. Stop me now!"
)
strategy = cast(Strategy, self.context.strategy)
strategy.is_registered = True
strategy.is_registration_pending = False
strategy.is_ready_to_register = False
def _handle_error(
self, register_msg: RegisterMessage, register_dialogue: RegisterDialogue
) -> None:
"""
Handle an register message.
:param register_msg: the register message
:param register_dialogue: the dialogue
:return: None
"""
self.context.logger.debug(
f"received register_msg error message={register_msg} in dialogue={register_dialogue}."
)
self.context.logger.info(
f"received register message error, error_msg={register_msg.error_msg}. Stop me now!"
)
strategy = cast(Strategy, self.context.strategy)
strategy.is_registration_pending = False
strategy.is_ready_to_register = False
def _handle_invalid(
self, register_msg: RegisterMessage, register_dialogue: RegisterDialogue
) -> None:
"""
Handle an register message.
:param register_msg: the register message
:param register_dialogue: the dialogue
:return: None
"""
self.context.logger.warning(
f"cannot handle register_msg message of performative={register_msg.performative} in dialogue={register_dialogue}."
)
class SigningHandler(Handler):
"""Implement the transaction handler."""
SUPPORTED_PROTOCOL = SigningMessage.protocol_id
def setup(self) -> None:
"""Implement the setup for the handler."""
def handle(self, message: Message) -> None:
"""
Implement the reaction to a message.
:param message: the message
:return: None
"""
signing_msg = cast(SigningMessage, message)
# recover dialogue
signing_dialogues = cast(SigningDialogues, self.context.signing_dialogues)
signing_dialogue = cast(
Optional[SigningDialogue], signing_dialogues.update(signing_msg)
)
if signing_dialogue is None:
self._handle_unidentified_dialogue(signing_msg)
return
# handle message
if signing_msg.performative is SigningMessage.Performative.SIGNED_MESSAGE:
self._handle_signed_message(signing_msg, signing_dialogue)
elif signing_msg.performative is SigningMessage.Performative.ERROR:
self._handle_error(signing_msg, signing_dialogue)
else:
self._handle_invalid(signing_msg, signing_dialogue)
def teardown(self) -> None:
"""
Implement the handler teardown.
:return: None
"""
def _handle_unidentified_dialogue(self, signing_msg: SigningMessage) -> None:
"""
Handle an unidentified dialogue.
:param msg: the message
"""
self.context.logger.info(
f"received invalid signing message={signing_msg}, unidentified dialogue."
)
def _handle_signed_message(
self, signing_msg: SigningMessage, signing_dialogue: SigningDialogue
) -> None:
"""
Handle a signed message.
:param signing_msg: the signing message
:param signing_dialogue: the dialogue
:return: None
"""
self.context.logger.debug(
f"received signing message from decision maker, message={signing_msg} in dialogue={signing_dialogue}"
)
self.context.logger.info(
f"received signing message from decision maker, signature={signing_msg.signed_message.body} stored!"
)
strategy = cast(Strategy, self.context.strategy)
strategy.signature_of_ethereum_address = signing_msg.signed_message.body
strategy.is_ready_to_register = True
def _handle_error(
self, signing_msg: SigningMessage, signing_dialogue: SigningDialogue
) -> None:
"""
Handle an oef search message.
:param signing_msg: the signing message
:param signing_dialogue: the dialogue
:return: None
"""
self.context.logger.info(
f"transaction signing was not successful. Error_code={signing_msg.error_code} in dialogue={signing_dialogue}"
)
def _handle_invalid(
self, signing_msg: SigningMessage, signing_dialogue: SigningDialogue
) -> None:
"""
Handle an oef search message.
:param signing_msg: the signing message
:param signing_dialogue: the dialogue
:return: None
"""
self.context.logger.warning(
f"cannot handle signing message of performative={signing_msg.performative} in dialogue={signing_dialogue}."
)
|
the-stack_0_11182 | from rest_framework.views import APIView
from mysystem.models import Users
from apps.oauth.models import OAuthWXUser
from utils.jsonResponse import SuccessResponse,ErrorResponse
from rest_framework_simplejwt.serializers import TokenObtainPairSerializer
from rest_framework_simplejwt.views import TokenObtainPairView
from utils.common import get_parameter_dic,REGEX_MOBILE
from config import WX_XCX_APPID,WX_XCX_APPSECRET,WX_GZH_APPID,WX_GZH_APPSECRET,WX_GZPT_APPSECRET,WX_GZPT_APPID
import requests
import base64
import json
from Crypto.Cipher import AES
from django.utils.translation import gettext_lazy as _
from rest_framework_simplejwt.authentication import JWTAuthentication
from rest_framework.permissions import IsAuthenticated
import re
import uuid
from django.db import transaction
from django.db.models import F
from django.core.cache import cache
import logging
from django_redis import get_redis_connection
logger = logging.getLogger(__name__)
# Create your views here.
# ================================================= #
# ************** 微信小程序登录 view ************** #
# ================================================= #
class XCXLoginSerializer(TokenObtainPairSerializer):
"""
登录的序列化器:
重写djangorestframework-simplejwt的序列化器
"""
@classmethod
def get_token(cls, user):
refresh = super(XCXLoginSerializer,cls).get_token(user)
data = {}
data['openid'] = user.oauthwxuser.xcx_openid
data['userId'] = user.id
data['refresh'] = str(refresh)
data['access'] = str(refresh.access_token)
return data
'''
WeChat Crypt
'''
class WeChatCrypt:
def __init__(self, appId, sessionKey):
self.appId = appId
self.sessionKey = sessionKey
def decrypt(self, encryptedData, iv):
# base64 decode
sessionKey = base64.b64decode(self.sessionKey)
encryptedData = base64.b64decode(encryptedData)
iv = base64.b64decode(iv)
cipher = AES.new(sessionKey, AES.MODE_CBC, iv)
decrypted = json.loads(self._unpad(cipher.decrypt(encryptedData)))
if decrypted['watermark']['appid'] != self.appId:
raise Exception('Invalid Buffer')
return decrypted
def _unpad(self, s):
return s[:-ord(s[len(s)-1:])]
#获取微信用户的openid等用户信息
def get_wechat_login_code_url(jscode):
api_url = 'https://api.weixin.qq.com/sns/jscode2session?appid={0}&secret={1}&js_code={2}&grant_type=authorization_code'
get_url = api_url.format(WX_XCX_APPID,WX_XCX_APPSECRET,jscode)
r = requests.get(get_url)
return r
#微信小程序登录接口
class WeChatXCXLoginAPIView(APIView):
"""
post:
微信小程序登录接口
微信小程序code获取openid
"""
permission_classes = []
authentication_classes = []
@transaction.atomic # 开启事务,当方法执行完成以后,自动提交事务
def post(self, request):
jscode = get_parameter_dic(request)['code']
inviter = get_parameter_dic(request).get('inviter')#为推广者的userid
if not jscode:
return ErrorResponse(msg="code不能为空")
resp = get_wechat_login_code_url(jscode)
openid = None
session_key = None
unionid = None
if resp.status_code != 200:
return ErrorResponse(msg="服务器到微信网络连接失败,请重试")
# json_data = {'errcode':0,'openid':'111','session_key':'test'}
json_data =json.loads(resp.content)
if 'errcode' in json_data:#如果获取失败返回失败信息
return ErrorResponse(msg=json_data['errmsg'])
openid = json_data['openid']
session_key = json_data['session_key']
if "unionid" in json_data:
unionid = json_data['unionid']
# 判断用户是否存在
try:
wxuser = Users.objects.get(username=openid)
wxuser.oauthwxuser.session_key = session_key # 小写oauthwxuser 表示关联的外键
wxuser.oauthwxuser.xcx_openid = openid
wxuser.oauthwxuser.unionId = unionid
wxuser.oauthwxuser.save()
resdata = XCXLoginSerializer.get_token(wxuser)
return SuccessResponse(data=resdata, msg="success")
except Exception as e:
with transaction.atomic():
savepoint = transaction.savepoint()
user = Users()
user.username = openid
user.password = uuid.uuid4() # 先随机生成一个密码,防止别人获取openid直接被登录情况
user.identity = [0] # 用户身份0表示普通用户
user.save()
OAuthWXUser.objects.create(user=user,session_key=session_key,xcx_openid=openid,unionid=unionid)
# if inviter: # 如果存在邀请码
# integral = FenXiaoManage.objects.filter(type=1, status=True).values_list('content', flat=True).first()
# if integral: # 如果推广积分活动还存在
# Users.objects.filter(id=inviter).update(integral=F('integral') + int(integral))
# InviteRecord.objects.create(inv_user_id=inviter, invitee_user=user, get_integral=integral)
# IntegralRecord.objects.create(user_id=inviter,type=4,income=1,integral=integral)
# 清除保存点
transaction.savepoint_commit(savepoint)
resdata = XCXLoginSerializer.get_token(user)
return SuccessResponse(data=resdata, msg="success")
def filter_emoji(desstr, restr=''):
# 过滤表情
try:
res = re.compile(u'[\U00010000-\U0010ffff]')
except re.error:
res = re.compile(u'[\uD800-\uDBFF][\uDC00-\uDFFF]')
return res.sub(restr, desstr)
#微信小程序手机号授权登录接口
class WeChatXCXMobileLoginAPIView(APIView):
"""
post:
微信小程序手机号授权登录接口
微信小程序code获取openid,并解密前端传的手机号encryptedData加密数据
"""
permission_classes = []
authentication_classes = []
def post(self, request):
inviter = get_parameter_dic(request).get('inviter')#邀请码#为推广者的userid
jscode = get_parameter_dic(request)['code']
iv = get_parameter_dic(request)['iv']
encryptedData = get_parameter_dic(request)['encryptedData']
nickname = get_parameter_dic(request)['nickname']
avatar_url = get_parameter_dic(request)['avatar_url']
gender = get_parameter_dic(request)['gender']
nickname = filter_emoji(nickname, '')
if jscode is None:
return ErrorResponse(msg="code不能为空")
if iv is None:
return ErrorResponse(msg="iv不能为空")
if encryptedData is None:
return ErrorResponse(msg="encryptedData不能为空")
if avatar_url is None:
return ErrorResponse(msg="avatar_url不能为空")
resp = get_wechat_login_code_url(jscode)
openid = None
session_key = None
unionid = ""
if resp.status_code != 200:
return ErrorResponse(msg="服务器到微信网络连接失败,请重试")
# json_data = {'errcode':0,'openid':'111','session_key':'test'}
json_data =json.loads(resp.content)
if 'errcode' in json_data:#如果获取失败返回失败信息
return ErrorResponse(msg=json_data['errmsg'])
openid = json_data['openid']
session_key = json_data['session_key']
if "unionid" in json_data:
unionid = json_data['unionid']
wxdc = WeChatCrypt(WX_XCX_APPID, session_key)
pResult = wxdc.decrypt(encryptedData, iv)
#判断用户是否存在
try:
wxuser = Users.objects.get(username = openid)
if not wxuser.is_active:
return ErrorResponse(msg="该用户已禁用,请联系管理员")
wxuser.oauthwxuser.session_key = session_key#小写oauthwxuser 表示关联的外键
wxuser.oauthwxuser.xcx_openid = openid
wxuser.oauthwxuser.unionId = unionid
wxuser.oauthwxuser.avatarUrl=avatar_url
wxuser.oauthwxuser.sex = gender
wxuser.oauthwxuser.mobilePhoneNumber = pResult['phoneNumber']
wxuser.oauthwxuser.nick = nickname
wxuser.oauthwxuser.save()
wxuser.nickname = nickname
wxuser.avatar = avatar_url
wxuser.gender = gender
wxuser.save()
resdata = XCXLoginSerializer.get_token(wxuser)
return SuccessResponse(data=resdata,msg="success")
except Exception as e:#新用户
with transaction.atomic():
try:
savepoint = transaction.savepoint()
user = Users()
user.username = openid
user.password = uuid.uuid4() #先随机生成一个密码,防止别人获取openid直接被登录情况
user.identity=[0]#用户身份0表示普通用户
user.nickname = nickname
user.name = nickname
user.avatar = avatar_url
user.mobile = pResult['phoneNumber']
user.save()
OAuthWXUser.objects.create(user=user,session_key=session_key,xcx_openid=openid,avatarUrl=avatar_url,sex=gender,mobilePhoneNumber=pResult['phoneNumber'],nick=nickname)
# if inviter:#如果存在邀请码
# integral = FenXiaoManage.objects.filter(type=1,status=True).values_list('content',flat=True).first()
# if integral:#如果推广积分活动还存在
# Users.objects.filter(id=inviter).update(integral=F('integral')+int(integral))
# InviteRecord.objects.create(inv_user_id=inviter,invitee_user=user,get_integral=integral)
# IntegralRecord.objects.create(user_id=inviter, type=4, income=1, integral=integral)
except Exception as e:
transaction.savepoint_rollback(savepoint)
return ErrorResponse(msg=str(e))
# 清除保存点
transaction.savepoint_commit(savepoint)
resdata = XCXLoginSerializer.get_token(user)
return SuccessResponse(data=resdata, msg="success")
#微信小程序更新(获取)用户信息wx.getUserInfo,用户解密获取的用户信息
class XCXWeChatUserInfoUpdateAPIView(APIView):
"""
post:
微信小程序更新用户信息
"""
permission_classes = [IsAuthenticated]
authentication_classes = [JWTAuthentication]
def post(self, request):
encryptedData = get_parameter_dic(request)['encryptedData']
iv = get_parameter_dic(request)['iv']
if not encryptedData:
return ErrorResponse(msg="encryptedData不能为空")
if not iv:
return ErrorResponse(msg="iv不能为空")
wechat_user = OAuthWXUser.objects.filter(user=request.user).first()
if not wechat_user:
return ErrorResponse(msg="无此用户")
pc = WeChatCrypt(WX_XCX_APPID, wechat_user.session_key)
user = pc.decrypt(encryptedData, iv)
wechat_user.nick = user['nickName']
wechat_user.sex = user['gender']
wechat_user.city = user['city']
wechat_user.avatarUrl = user['avatarUrl']
wechat_user.save()
myuser = request.user
myuser.nickname = user['nickName']
myuser.avatar = user['avatarUrl']
return SuccessResponse(data=user,msg="success")
# ================================================= #
# ************** 微信小程序生成推广小程序码view ************** #
# ================================================= #
#获取小程序的access_token
"""
正常返回,access_token 的有效期目前为 2 个小时,重复获取将导致上次获取的 access_token 失效
{"access_token":"ACCESS_TOKEN","expires_in":7200}
错误返回
{"errcode":40013,"errmsg":"invalid appid"}
"""
def get_wechat_xcx_access_token_url():
api_url = "https://api.weixin.qq.com/cgi-bin/token?grant_type=client_credential&appid={0}&secret={1}"
get_url = api_url.format(WX_XCX_APPID,WX_XCX_APPSECRET)
r = requests.get(get_url)
return r
#这个url生成二维码是无限个,返回的二维码是buffer类型(正式版小程序才能生成,体验版不行)
"""
正常返回
{
"errcode": 0,
"errmsg": "ok",
"contentType": "image/jpeg",
"buffer": Buffer
}
"""
def get_wechat_qrcode_url(access_token,scene,page,width=430,auto_color=True,is_hyaline=False):
if not page:
page = "pages/index/index"
api_url = 'https://api.weixin.qq.com/wxa/getwxacodeunlimit?access_token={0}'
get_url = api_url.format(access_token)
headers = {"Content-type":"application/json"}
data = dict(scene=scene,page=page,width=width,auto_color=auto_color,is_hyaline=is_hyaline)
r = requests.post(url=get_url,data=json.dumps(data),headers=headers)
return r
class GetXCXShareQrcodeView(APIView):
"""
post:
微信小程序获取推广二维码
scene 分享用户的userid
page 要跳转的页面
"""
permission_classes = [IsAuthenticated]
authentication_classes = [JWTAuthentication]
def post(self,request):
scene = get_parameter_dic(request)['scene']#分享用户的userid
page = get_parameter_dic(request)['page']
if scene is None or page is None:
return ErrorResponse("提交参数不能为空")
restoken = get_wechat_xcx_access_token_url()
if restoken.status_code != 200:
return ErrorResponse(msg="服务器到微信网络连接失败,请重试1")
json_data = json.loads(restoken.content)
if 'errcode' in json_data and json_data['errcode'] !=0: # 如果获取失败返回失败信息
return ErrorResponse(msg=json_data['errmsg'])
access_token = json_data['access_token']
res = get_wechat_qrcode_url(access_token,scene,page)
if res.status_code != 200:
return ErrorResponse(msg="服务器到微信网络连接失败,请重试2")
json_data2 = json.loads(res.content)
return SuccessResponse(data=json_data2,msg="success")
# ================================================= #
# ************** 微信小程序发送服务通知消息 view ************** #
# ================================================= #
"""
1、form_id提交表单的id(支付时用)
2、data 提交的请求体
push_data={
"keyword1":{
"value":obj.order_sn
},
"keyword2":{
"value":obj.time
},
}
"""
def send_wx_xcx_message(access_token,openid,template_id,form_id,push_data):
api_url = "https://api.weixin.qq.com/cgi-bin/message/subscribe/send?access_token={0}"
get_url = api_url.format(access_token)
payload={
"touser": openid, #这里为用户的openid
"template_id": template_id, #模板id
"form_id": form_id, #表单id或者prepay_id
"data": push_data
}
r = requests.post(get_url,json=payload)
return r
def send_wx_xcx_message_cache(openid,template_id,form_id,push_data):
access_token = cache.get('xcx_access_token')
if access_token:#有缓存
res = send_wx_xcx_message(access_token,openid,template_id,form_id,push_data)
json_data = json.loads(res.content)
if 'errcode' in json_data: # 如果获取失败返回失败信息
if json_data['errcode'] == 40001:
restoken = get_wechat_xcx_access_token_url()
json_data1 = json.loads(restoken.content)
access_token1 = json_data1['access_token']
res1 = send_wx_xcx_message(access_token1, openid, template_id, form_id, push_data)
json_data2 = json.loads(res1.content)
if 'errcode' in json_data2 and json_data2['errcode'] !=0:
logger.error("微信小程序发送消息服务错误,用户openid:%s,template_id:%s,form_id:%s,data:%s,微信返回错误信息:%s"%(openid,template_id,form_id,push_data,json_data2))
return False
cache.set('xcx_access_token', access_token,7000)
return True
else:#无缓存
restoken = get_wechat_xcx_access_token_url()
json_data1 = json.loads(restoken.content)
access_token1 = json_data1['access_token']
res1 = send_wx_xcx_message(access_token1, openid, template_id, form_id, push_data)
json_data2 = json.loads(res1.content)
if 'errcode' in json_data2 and json_data2['errcode'] !=0:
logger.error("微信小程序发送消息服务错误,用户openid:%s,template_id:%s,form_id:%s,data:%s,微信返回错误信息:%s" % (
openid, template_id, form_id, push_data, json_data2))
return False
cache.set('xcx_access_token', access_token,7000)
return True
# ================================================= #
# ************** 微信公众号app授权登录 view ************** #
# ================================================= #
#通过 code 换取 access_token 和 openid,code为前端获取后传过来得
"""
正确返回
{
"access_token": "ACCESS_TOKEN", 有效期2小时
"expires_in": 7200,
"refresh_token": "REFRESH_TOKEN",有效期30天
"openid": "OPENID",
"scope": "SCOPE",
"unionid": "o6_bmasdasdsad6_2sgVt7hMZOPfL"
}
错误返回
{
"errcode": 40029,
"errmsg": "invalid code"
}
"""
def get_wechat_access_token_url(code):
api_url = "https://api.weixin.qq.com/sns/oauth2/access_token?appid={0}&secret={1}&code={2}&grant_type=authorization_code"
get_url = api_url.format(WX_GZPT_APPID,WX_GZPT_APPSECRET,code)
r = requests.get(get_url)
return r
#获取微信用户公开个人信息
"""
正确返回
{
"openid": "OPENID",
"nickname": "NICKNAME",
"sex": 1,
"province": "PROVINCE",
"city": "CITY",
"country": "COUNTRY",
"headimgurl": "https://thirdwx.qlogo.cn/mmopen/g3MonUZtNHkdmzicIlibx6iaFqAc56vxLSUfpb6n5WKSYVY0ChQKkiaJSgQ1dZuTOgvLLrhJbERQQ4eMsv84eavHiaiceqxibJxCfHe/0",
"privilege": ["PRIVILEGE1", "PRIVILEGE2"],
"unionid": " o6_bmasdasdsad6_2sgVt7hMZOPfL"
}
错误返回
{
"errcode": 40003,
"errmsg": "invalid openid"
}
"""
def getWxUserInfo(access_token,openid):
api_url = "https://api.weixin.qq.com/sns/userinfo?access_token={0}&openid={1}"
get_url = api_url.format(access_token,openid)
r = requests.get(get_url)
return r
#检验授权凭证access_token 是否有效
"""
有效返回
{
"errcode": 0,
"errmsg": "ok"
}
"""
def is_access_token_valid(access_token, openid):
api_url = "https://api.weixin.qq.com/sns/auth?access_token={0}&openid={1}"
get_url = api_url.format(access_token, openid)
r = requests.get(get_url)
return r
#通过refresh_token刷新过期的access_token
"""
有效返回
{
"access_token": "ACCESS_TOKEN",
"expires_in": 7200,
"refresh_token": "REFRESH_TOKEN",
"openid": "OPENID",
"scope": "SCOPE"
}
错误返回
{
"errcode": 40030,
"errmsg": "invalid refresh_token"
}
"""
def refresh_access_token(refresh_token):
api_url = "https://api.weixin.qq.com/sns/oauth2/refresh_token?appid={0}&grant_type=refresh_token&refresh_token={1}"
get_url = api_url.format(WX_GZPT_APPID,refresh_token)
r = requests.get(get_url)
return r
#微信公众号app登录接口
class WeChatGZHLoginAPIView(APIView):
"""
post:
微信公众号登录接口
微信公众号code获取openid和access_token
"""
permission_classes = []
authentication_classes = []
def post(self, request):
jscode = get_parameter_dic(request)['code']
if not jscode:
return ErrorResponse(msg="code不能为空")
resp = get_wechat_access_token_url(jscode)
openid = ""
unionid = ""
access_token = ""
refresh_token = ""
scope = None
if resp.status_code != 200:
return ErrorResponse(msg="服务器到微信网络连接失败,请重试")
json_data =json.loads(resp.content)
if 'errcode' in json_data and json_data['errcode'] !=0:#如果获取失败返回失败信息
logger.error("微信app登录服务错误,用户提交code:%s,微信返回错误信息:%s" % (jscode, json_data))
return ErrorResponse(msg=json_data['errmsg'])
openid = json_data['openid']
access_token = json_data['access_token']
refresh_token = json_data['refresh_token']
scope = json_data['scope']
if "unionid" in json_data:
unionid = json_data['unionid']
#判断用户是否存在(根据openID判断用户是否是第一次登陆)
user = Users.objects.filter(is_active=True,oauthwxuser__gzh_openid=openid).first()
if not user:#如果不存在则提示绑定用户关系
return ErrorResponse(code=301,data={'openid':openid,'is_bind':False},msg="无此用户,请先绑定")
#返回token
resdata = XCXLoginSerializer.get_token(user)
return SuccessResponse(data=resdata,msg="success")
class WeChatGZHBindAPIView(APIView):
"""
绑定微信用户
post:
绑定微信用户
微信公众号openid、mobile(绑定手机号)、code(验证码)
"""
permission_classes = []
authentication_classes = []
def post(self,request):
openid = get_parameter_dic(request)['openid']
mobile = get_parameter_dic(request)['mobile']
code = get_parameter_dic(request)['code']
# 验证手机号是否合法
if not re.match(REGEX_MOBILE, mobile):
return ErrorResponse(msg="请输入正确手机号")
# 判断短信验证码是否正确
redis_conn = get_redis_connection('verify_codes')
send_flag = redis_conn.get('sms_%s' % mobile) # send_flag的值为bytes,需要转换成str ,send_flag.decode()
if not send_flag: # 如果取不到标记,则说明验证码过期
return ErrorResponse(msg="短信验证码已过期")
else:
if str(send_flag.decode()) != str(code):
return ErrorResponse(msg="验证码错误")
user = Users.objects.filter(is_active=True,username=mobile+"app",identity__contains="1",oauthwxuser__isnull=True).first()
if not user:#如果不存在
return ErrorResponse(msg="无法绑定,无此用户或已绑定")
OAuthWXUser.objects.create(user=user,gzh_openid=openid)
resdata = XCXLoginSerializer.get_token(user)
return SuccessResponse(data=resdata,msg="success") |
the-stack_0_11183 | #!/usr/bin/env python
# coding: utf-8
import json
import pandas as pd
from pandas.api.types import is_numeric_dtype
import numpy as np
from scipy.stats import ks_2samp
#import matplotlib.pyplot as plt
import plotly.graph_objs as go
import plotly.figure_factory as ff
from evidently.model.widget import BaseWidgetInfo, AlertStats, AdditionalGraphInfo
from evidently.widgets.widget import Widget
red = "#ed0400"
grey = "#4d4d4d"
class NumTargetCorrWidget(Widget):
def __init__(self, title: str):
super().__init__()
self.title = title
def get_info(self) -> BaseWidgetInfo:
#if self.wi:
return self.wi
#raise ValueError("No prediction data provided")
def calculate(self, reference_data: pd.DataFrame, production_data: pd.DataFrame, column_mapping):
if column_mapping:
date_column = column_mapping.get('datetime')
id_column = column_mapping.get('id')
target_column = column_mapping.get('target')
prediction_column = column_mapping.get('prediction')
num_feature_names = column_mapping.get('numerical_features')
if num_feature_names is None:
num_feature_names = []
else:
num_feature_names = [name for name in num_feature_names if is_numeric_dtype(reference_data[name])]
cat_feature_names = column_mapping.get('categorical_features')
if cat_feature_names is None:
cat_feature_names = []
else:
cat_feature_names = [name for name in cat_feature_names if is_numeric_dtype(reference_data[name])]
else:
date_column = 'datetime' if 'datetime' in reference_data.columns else None
id_column = None
target_column = 'target' if 'target' in reference_data.columns else None
prediction_column = 'prediction' if 'prediction' in reference_data.columns else None
utility_columns = [date_column, id_column, target_column, prediction_column]
num_feature_names = list(set(reference_data.select_dtypes([np.number]).columns) - set(utility_columns))
cat_feature_names = list(set(reference_data.select_dtypes([np.object]).columns) - set(utility_columns))
if target_column is not None:
#calculate corr
ref_target_corr = reference_data[num_feature_names + [target_column]].corr()[target_column]
prod_target_corr = production_data[num_feature_names + [target_column]].corr()[target_column]
#plot output correlations
target_corr = go.Figure()
target_corr.add_trace(go.Bar(y = ref_target_corr, x = ref_target_corr.index,
marker_color = grey, name = 'Reference'))
target_corr.add_trace(go.Bar(y = prod_target_corr, x = ref_target_corr.index,
marker_color = red, name = 'Production'))
target_corr.update_layout(xaxis_title = "Features", yaxis_title = "Correlation",
yaxis = dict(
range=(-1, 1),
showticklabels=True
))
target_corr_json = json.loads(target_corr.to_json())
self.wi = BaseWidgetInfo(
title=self.title,
type="big_graph",
details="",
alertStats=AlertStats(),
alerts=[],
alertsPosition="row",
insights=[],
size=1,
params={
"data": target_corr_json['data'],
"layout": target_corr_json['layout']
},
additionalGraphs=[],
)
else:
self.wi = None
|
the-stack_0_11184 | # -*- coding: utf-8 -*-
# @Time : 20-6-4 下午4:19
# @Author : zhuying
# @Company : Minivision
# @File : transform.py
# @Software : PyCharm
from __future__ import division
import math
import random
from PIL import Image
try:
import accimage
except ImportError:
accimage = None
import numpy as np
import numbers
import types
from anti_spoof.src.data_io import functional as F
__all__ = ["Compose", "ToTensor", "ToPILImage", "Normalize", "RandomHorizontalFlip",
"Lambda", "RandomResizedCrop", "ColorJitter", "RandomRotation"]
class Compose(object):
"""Composes several transforms together.
Args:
transforms (list of ``Transform`` objects): list of transforms to compose.
Example:
>>> transforms.Compose([
>>> transforms.CenterCrop(10),
>>> transforms.ToTensor(),
>>> ])
"""
def __init__(self, transforms):
self.transforms = transforms
def __call__(self, img):
for t in self.transforms:
img = t(img)
return img
class ToTensor(object):
"""Convert a ``PIL Image`` or ``numpy.ndarray`` to tensor.
Converts a PIL Image or numpy.ndarray (H x W x C) in the range
[0, 255] to a torch.FloatTensor of shape (C x H x W) in the range [0.0, 1.0].
"""
def __call__(self, pic):
"""
Args:
pic (PIL Image or numpy.ndarray): Image to be converted to tensor.
Returns:
Tensor: Converted image.
"""
return F.to_tensor(pic)
class Lambda(object):
"""Apply a user-defined lambda as a transform.
Args:
lambd (function): Lambda/function to be used for transform.
"""
def __init__(self, lambd):
assert isinstance(lambd, types.LambdaType)
self.lambd = lambd
def __call__(self, img):
return self.lambd(img)
class ToPILImage(object):
"""Convert a tensor or an ndarray to PIL Image.
Converts a torch.*Tensor of shape C x H x W or a numpy ndarray of shape
H x W x C to a PIL Image while preserving the value range.
Args:
mode (`PIL.Image mode`_): color space and pixel depth of input data (optional).
If ``mode`` is ``None`` (default) there are some assumptions made about the input data:
1. If the input has 3 channels, the ``mode`` is assumed to be ``RGB``.
2. If the input has 4 channels, the ``mode`` is assumed to be ``RGBA``.
3. If the input has 1 channel, the ``mode`` is determined by the data type (i,e,
``int``, ``float``, ``short``).
.. _PIL.Image mode: http://pillow.readthedocs.io/en/3.4.x/handbook/concepts.html#modes
"""
def __init__(self, mode=None):
self.mode = mode
def __call__(self, pic):
"""
Args:
pic (Tensor or numpy.ndarray): Image to be converted to PIL Image.
Returns:
PIL Image: Image converted to PIL Image.
"""
return F.to_pil_image(pic, self.mode)
class Normalize(object):
"""Normalize an tensor image with mean and standard deviation.
Given mean: ``(M1,...,Mn)`` and std: ``(S1,..,Sn)`` for ``n`` channels, this transform
will normalize each channel of the input ``torch.*Tensor`` i.e.
``input[channel] = (input[channel] - mean[channel]) / std[channel]``
Args:
mean (sequence): Sequence of means for each channel.
std (sequence): Sequence of standard deviations for each channel.
"""
def __init__(self, mean, std):
self.mean = mean
self.std = std
def __call__(self, tensor):
"""
Args:
tensor (Tensor): Tensor image of size (C, H, W) to be normalized.
Returns:
Tensor: Normalized Tensor image.
"""
return F.normalize(tensor, self.mean, self.std)
class RandomHorizontalFlip(object):
"""Horizontally flip the given PIL Image randomly with a probability of 0.5."""
def __call__(self, img):
"""
Args:
img (PIL Image): Image to be flipped.
Returns:
PIL Image: Randomly flipped image.
"""
if random.random() < 0.5:
return F.hflip(img)
return img
class RandomResizedCrop(object):
"""Crop the given PIL Image to random size and aspect ratio.
A crop of random size (default: of 0.08 to 1.0) of the original size and a random
aspect ratio (default: of 3/4 to 4/3) of the original aspect ratio is made. This crop
is finally resized to given size.
This is popularly used to train the Inception networks.
Args:
size: expected output size of each edge
scale: range of size of the origin size cropped
ratio: range of aspect ratio of the origin aspect ratio cropped
interpolation: Default: PIL.Image.BILINEAR
"""
def __init__(self, size, scale=(0.08, 1.0), ratio=(3. / 4., 4. / 3.), interpolation=Image.BILINEAR):
if isinstance(size, tuple):
self.size = size
else:
self.size = (size, size)
self.interpolation = interpolation
self.scale = scale
self.ratio = ratio
@staticmethod
def get_params(img, scale, ratio):
"""Get parameters for ``crop`` for a random sized crop.
Args:
img (PIL Image): Image to be cropped.
scale (tuple): range of size of the origin size cropped
ratio (tuple): range of aspect ratio of the origin aspect ratio cropped
Returns:
tuple: params (i, j, h, w) to be passed to ``crop`` for a random
sized crop.
"""
for attempt in range(10):
area = img.size[0] * img.size[1]
target_area = random.uniform(*scale) * area
aspect_ratio = random.uniform(*ratio)
w = int(round(math.sqrt(target_area * aspect_ratio)))
h = int(round(math.sqrt(target_area / aspect_ratio)))
if random.random() < 0.5:
w, h = h, w
if w <= img.size[0] and h <= img.size[1]:
i = random.randint(0, img.size[1] - h)
j = random.randint(0, img.size[0] - w)
return i, j, h, w
# Fallback
w = min(img.size[0], img.size[1])
i = (img.size[1] - w) // 2
j = (img.size[0] - w) // 2
return i, j, w, w
def __call__(self, img):
"""
Args:
img (PIL Image): Image to be flipped.
Returns:
PIL Image: Randomly cropped and resize image.
"""
i, j, h, w = self.get_params(img, self.scale, self.ratio)
return F.resized_crop(img, i, j, h, w, self.size, self.interpolation)
class ColorJitter(object):
"""Randomly change the brightness, contrast and saturation of an image.
Args:
brightness (float): How much to jitter brightness. brightness_factor
is chosen uniformly from [max(0, 1 - brightness), 1 + brightness].
contrast (float): How much to jitter contrast. contrast_factor
is chosen uniformly from [max(0, 1 - contrast), 1 + contrast].
saturation (float): How much to jitter saturation. saturation_factor
is chosen uniformly from [max(0, 1 - saturation), 1 + saturation].
hue(float): How much to jitter hue. hue_factor is chosen uniformly from
[-hue, hue]. Should be >=0 and <= 0.5.
"""
def __init__(self, brightness=0, contrast=0, saturation=0, hue=0):
self.brightness = brightness
self.contrast = contrast
self.saturation = saturation
self.hue = hue
@staticmethod
def get_params(brightness, contrast, saturation, hue):
"""Get a randomized transform to be applied on image.
Arguments are same as that of __init__.
Returns:
Transform which randomly adjusts brightness, contrast and
saturation in a random order.
"""
transforms = []
if brightness > 0:
brightness_factor = np.random.uniform(max(0, 1 - brightness), 1 + brightness)
transforms.append(Lambda(lambda img: F.adjust_brightness(img, brightness_factor)))
if contrast > 0:
contrast_factor = np.random.uniform(max(0, 1 - contrast), 1 + contrast)
transforms.append(Lambda(lambda img: F.adjust_contrast(img, contrast_factor)))
if saturation > 0:
saturation_factor = np.random.uniform(max(0, 1 - saturation), 1 + saturation)
transforms.append(Lambda(lambda img: F.adjust_saturation(img, saturation_factor)))
if hue > 0:
hue_factor = np.random.uniform(-hue, hue)
transforms.append(Lambda(lambda img: F.adjust_hue(img, hue_factor)))
np.random.shuffle(transforms)
transform = Compose(transforms)
return transform
def __call__(self, img):
"""
Args:
img (PIL Image): Input image.
Returns:
PIL Image: Color jittered image.
"""
transform = self.get_params(self.brightness, self.contrast,
self.saturation, self.hue)
return transform(img)
class RandomRotation(object):
"""Rotate the image by angle.
Args:
degrees (sequence or float or int): Range of degrees to select from.
If degrees is a number instead of sequence like (min, max), the range of degrees
will be (-degrees, +degrees).
resample ({PIL.Image.NEAREST, PIL.Image.BILINEAR, PIL.Image.BICUBIC}, optional):
An optional resampling filter.
See http://pillow.readthedocs.io/en/3.4.x/handbook/concepts.html#filters
If omitted, or if the image has mode "1" or "P", it is set to PIL.Image.NEAREST.
expand (bool, optional): Optional expansion flag.
If true, expands the output to make it large enough to hold the entire rotated image.
If false or omitted, make the output image the same size as the input image.
Note that the expand flag assumes rotation around the center and no translation.
center (2-tuple, optional): Optional center of rotation.
Origin is the upper left corner.
Default is the center of the image.
"""
def __init__(self, degrees, resample=False, expand=False, center=None):
if isinstance(degrees, numbers.Number):
if degrees < 0:
raise ValueError("If degrees is a single number, it must be positive.")
self.degrees = (-degrees, degrees)
else:
if len(degrees) != 2:
raise ValueError("If degrees is a sequence, it must be of len 2.")
self.degrees = degrees
self.resample = resample
self.expand = expand
self.center = center
@staticmethod
def get_params(degrees):
"""Get parameters for ``rotate`` for a random rotation.
Returns:
sequence: params to be passed to ``rotate`` for random rotation.
"""
angle = np.random.uniform(degrees[0], degrees[1])
return angle
def __call__(self, img):
"""
img (PIL Image): Image to be rotated.
Returns:
PIL Image: Rotated image.
"""
angle = self.get_params(self.degrees)
return F.rotate(img, angle, self.resample, self.expand, self.center)
|
the-stack_0_11186 | ## Advent of Code 2019: Intcode Computer v2
## https://adventofcode.com/2019
## Jesse Williams | github.com/vblank182
# **Compatible with Day 5, Part 1**
# Changelog:
# - Added IN and OUT instructions
# - Added support for parameter modes
#~# Opcodes #~#
ADD, MUL, IN, OUT = 1, 2, 3, 4
END = 99
#~# Parameter Modes #~#
POS = 0
IMM = 1
# Numbers of expected parameters for each opcode
num_params = {1:3, 2:3, 3:1, 4:1, 99:0}
def loadProgram(inputFile):
''' Loads a program file in "0,1,2,3,..." format and returns a list of integers. '''
with open(inputFile) as f:
initialTapeStrs = f.read()[:-1].split(',')
initialTape = [int(i) for i in initialTapeStrs]
return initialTape
def runProgram(initialTape, input, debugLevel=0):
# Make a copy of the initial tape.
workTape = initialTape.copy()
running = True
output = []
ptr = 0
while running:
# Determine the current opcode and parameter modes
opcode = int( str(workTape[ptr])[-2:] ) # get the opcode from the last 2 digits of the current position
param_modes = [0]*num_params[opcode]
for i in range(num_params[opcode]):
try:
# Set param mode to digit found (scanning right-to-left from opcode)
param_modes[i] = int( str(workTape[ptr])[-3-i] )
except IndexError:
# Default to param mode 0 if no digit is found
param_modes[i] = 0
#:: [1] Addition ::#
if opcode == ADD:
param = [0]*num_params[opcode] # initialize list of parameters
# Param 1 (left addend)
if param_modes[0] == POS:
param[0] = workTape[workTape[ptr+1]] # position mode
elif param_modes[0] == IMM:
param[0] = workTape[ptr+1] # immediate mode
# Param 2 (right addend)
if param_modes[1] == POS:
param[1] = workTape[workTape[ptr+2]] # position mode
elif param_modes[1] == IMM:
param[1] = workTape[ptr+2] # immediate mode
# Param 3 (sum)
if param_modes[2] == POS:
workTape[workTape[ptr+3]] = param[0] + param[1] # set output (position mode)
elif param_modes[2] == IMM:
raise InvalidParameterMode(opcode, 3, param_modes[2], "Immediate mode not supported for output.")
break
ptr += num_params[opcode] + 1 # advance instruction pointer
#:: [2] Multiplication ::#
elif opcode == MUL:
param = [0]*num_params[opcode] # initialize list of parameters
# Param 1 (left multiplicand)
if param_modes[0] == POS:
param[0] = workTape[workTape[ptr+1]] # position mode
elif param_modes[0] == IMM:
param[0] = workTape[ptr+1] # immediate mode
# Param 2 (right multiplicand)
if param_modes[1] == POS:
param[1] = workTape[workTape[ptr+2]] # position mode
elif param_modes[1] == IMM:
param[1] = workTape[ptr+2] # immediate mode
# Param 3 (product)
if param_modes[2] == POS:
workTape[workTape[ptr+3]] = param[0] * param[1] # set output (position mode)
elif param_modes[2] == IMM:
raise InvalidParameterMode(opcode, 3, param_modes[2], "Immediate mode not supported for output.")
break
ptr += num_params[opcode] + 1 # advance instruction pointer
#:: [3] Input ::#
elif opcode == IN:
# Param 1 (position)
if param_modes[0] == POS:
workTape[workTape[ptr+1]] = input # store input at position in parameter (position mode)
elif param_modes[0] == IMM:
raise InvalidParameterMode(opcode, 1, param_modes[0], "Immediate mode not supported for this instruction.")
break
ptr += num_params[opcode] + 1 # advance instruction pointer
#:: [4] Output ::#
elif opcode == OUT:
# Param 1 (position)
if param_modes[0] == POS:
output.append(workTape[workTape[ptr+1]]) # write output (position mode)
elif param_modes[0] == IMM:
output.append(workTape[ptr+1]) # write output (immediate mode)
ptr += num_params[opcode] + 1 # advance instruction pointer
#:: [99] End of Program ::#
elif opcode == END: # Program finished
running = False
else:
raise UnknownOpcode(opcode, ptr, workTape, debugLevel)
return False
return output # output
## Exception Classes ##
class InvalidParameterMode(Exception):
'''Exception raised for an invalid parameter mode.'''
def __init__(self, opcode, position, param_mode, message):
print("[Error] Invalid parameter mode '{}' for parameter {} of opcode {}.\n".format(param_mode, position, opcode))
if message != "":
print(message)
class UnknownOpcode(Exception):
'''Exception raised for an unknown opcode.'''
def __init__(self, opcode, ptr, workTape, debugLevel):
if debugLevel == 1:
print("[Error] Unknown opcode '{}' at location {}. Following instructions: ".format(opcode, ptr, workTape[ptr:ptr+9]))
elif debugLevel == 2:
print("[Error] Unknown opcode '{}' at location {}.".format(opcode, ptr))
print("Current tape state:\n")
print(workTape)
else: # debug level 0
print("[Error] Unknown opcode '{}' at location {}.".format(opcode, ptr))
|
the-stack_0_11187 | from collections import Counter
def read_signals():
file_name = "Data/day8.txt"
file = open(file_name, "r")
signals = []
digits = []
for line in file:
line = line.strip("\n").split(" | ")
signals.append(line[0].split())
digits.append(line[1].split())
return signals, digits
def sort_connections(signals, digits):
output = []
for signal, numbers in zip(signals, digits):
connections = {"a": 0, "b": 0, "c": 0, "d": 0, "e": 0, "f": 0, "g": 0}
letters = {i: set(connections.keys()) for i in range(2,8)}
for segments in signal:
letters[len(segments)] = letters[len(segments)].intersection(set(segments))
connections["a"] = list(letters[3] - letters[2])[0]
connections["f"] = list(letters[6].intersection(letters[2]))[0]
connections["c"] = list(letters[2] - {connections["f"]})[0]
connections["d"] = list(letters[4].intersection(letters[5]))[0]
connections["b"] = list(letters[4].intersection(letters[6]) - {connections["f"]})[0]
connections["g"] = list(letters[5].intersection(letters[6]) - {connections["a"]})[0]
connections["e"] = list(set(connections.keys()) - {connections["a"]}
- {connections["b"]} - {connections["c"]} - {connections["d"]}
- {connections["f"]} - {connections["g"]})[0]
connections = {v: k for k, v in connections.items()}
for number in numbers:
number = set(connections[letter] for letter in number)
if number == {"c", "f"}:
output.append(1)
elif number == {"a", "c", "f"}:
output.append(7)
elif number == {"b", "d", "c", "f"}:
output.append(4)
elif number == {"a", "c", "d", "e", "g"}:
output.append(2)
elif number == {"a", "c", "d", "f", "g"}:
output.append(3)
elif number == {"a", "b", "d", "f", "g"}:
output.append(5)
elif number == {"a", "b", "c", "e", "f", "g"}:
output.append(0)
elif number == {"a", "b", "d", "e", "f", "g"}:
output.append(6)
elif number == {"a", "b", "c", "d", "f", "g"}:
output.append(9)
elif number == {"a", "b", "c", "d", "e", "f", "g"}:
output.append(8)
count = Counter(output)
print(f"Part one: {count[1] + count[4] + count[7] + count[8]}")
total = sum(output[i] * 10**(3 - (i%4)) for i in range(len(output)))
print(f"Part two: {total}")
if __name__ == "__main__":
signals, digits = read_signals()
sort_connections(signals, digits)
|
the-stack_0_11188 | from flask import Flask
from flask_s3_viewer import FlaskS3Viewer
from flask_s3_viewer.aws.ref import Region
import logging
logging.basicConfig(
level=logging.INFO,
format='%(levelname)s: %(asctime)s: %(message)s'
)
app = Flask(__name__)
# For test, disable template caching
app.config['SEND_FILE_MAX_AGE_DEFAULT'] = 1
app.config['TEMPLATES_AUTO_RELOAD'] = True
# FlaskS3Viewer Init
FS3V_NAMESPACE = 'flask-s3-viewer'
s3viewer = FlaskS3Viewer(
app, # Flask app
namespace=FS3V_NAMESPACE, # namespace be unique
template_namespace='mdl',
object_hostname='http://flask-s3-viewer.com', # file's hostname
config={ # Bucket configs and else
'profile_name': 'test',
'access_key': None,
'secret_key': None,
'region_name': Region.SEOUL.value,
'endpoint_url': None,
'bucket_name': 'hwjeongtest',
'cache_dir': '/tmp/flask_s3_viewer',
'use_cache': True,
'ttl': 86400,
}
)
# Init another one
s3viewer.add_new_one(
object_hostname='http://namespace2.com',
namespace='np2', # namespace be unique
upload_type='presign',
config={
'profile_name': 'test',
'region_name': Region.SEOUL.value,
'bucket_name': 'hwjeongtest'
}
)
# You can see registerd configs
# print(s3viewer.FLASK_S3_VIEWER_BUCKET_CONFIGS)
# You can use boto3's session and client if you want
# print(FlaskS3Viewer.get_boto_client(FS3V_NAMESPACE))
# print(FlaskS3Viewer.get_boto_session(FS3V_NAMESPACE))
# Apply FlaskS3Viewer blueprint
s3viewer.register()
@app.route('/index')
def index ():
return 'Your app index page'
# Usage: python example.py test (run debug mode)
if __name__ == '__main__':
app.run(debug=True, port=3000)
|
the-stack_0_11189 | # -*- coding: utf-8 -*-
from ldap.dn import explode_dn
from node.behaviors import Adopt
from node.behaviors import Alias
from node.behaviors import Attributes
from node.behaviors import DefaultInit
from node.behaviors import NodeChildValidate
from node.behaviors import Nodespaces
from node.behaviors import Nodify
from node.behaviors import OdictStorage
from node.behaviors import Storage
from node.behaviors.alias import DictAliaser
from node.ext.ldap._node import LDAPNode
from node.ext.ldap.base import ensure_text
from node.ext.ldap.interfaces import ILDAPGroupsConfig as IGroupsConfig
from node.ext.ldap.interfaces import ILDAPUsersConfig as IUsersConfig
from node.ext.ldap.scope import BASE
from node.ext.ldap.scope import ONELEVEL
from node.ext.ldap.ugm.defaults import creation_defaults
from node.ext.ldap.ugm.samba import sambaLMPassword
from node.ext.ldap.ugm.samba import sambaNTPassword
from node.ext.ugm import Group as UgmGroup
from node.ext.ugm import Groups as UgmGroups
from node.ext.ugm import Ugm as UgmBase
from node.ext.ugm import User as UgmUser
from node.ext.ugm import Users as UgmUsers
from node.locking import locktree
from node.utils import debug
from plumber import Behavior
from plumber import default
from plumber import finalize
from plumber import override
from plumber import plumb
from plumber import plumbing
from zope.interface import implementer
import ldap
import logging
import six
import time
logger = logging.getLogger('node.ext.ldap')
# group member format
FORMAT_DN = 0
FORMAT_UID = 1
# mapping from object-class to properties
MEMBER_LOOKUP_BY_CLASS = {
'groupOfNames': {
'format': FORMAT_DN,
'attribute': 'member',
},
'groupOfUniqueNames': {
'format': FORMAT_DN,
'attribute': 'uniqueMember',
},
'posixGroup': {
'format': FORMAT_UID,
'attribute': 'memberUid',
},
'group': {
'format': FORMAT_DN,
'attribute': 'member',
},
}
# expiration unit
EXPIRATION_DAYS = 0
EXPIRATION_SECONDS = 1
class AccountExpired(object):
def __nonzero__(self):
return False
__bool__ = __nonzero__
def __repr__(self):
return 'ACCOUNT_EXPIRED'
__str__ = __repr__
ACCOUNT_EXPIRED = AccountExpired()
class PrincipalsConfig(object):
def __init__(
self,
baseDN='',
attrmap={},
scope=ONELEVEL,
queryFilter='',
objectClasses=[],
defaults={},
strict=True,
memberOfSupport=False,
recursiveGroups=False,
memberOfExternalGroupDNs=[],
expiresAttr=None,
expiresUnit=EXPIRATION_DAYS
):
self.baseDN = baseDN
self.attrmap = attrmap
self.scope = scope
self.queryFilter = queryFilter
self.objectClasses = objectClasses
self.defaults = defaults
self.strict = strict
self.memberOfSupport = memberOfSupport
self.recursiveGroups = recursiveGroups
self.memberOfExternalGroupDNs = memberOfExternalGroupDNs
# XXX: currently expiresAttr only gets considered for user
# authentication group and role expiration is not implemented yet.
self.expiresAttr = expiresAttr
self.expiresUnit = expiresUnit
# XXX: member_relation
# self.member_relation = member_relation
@implementer(IUsersConfig)
class UsersConfig(PrincipalsConfig):
"""Define how users look and where they are.
"""
@implementer(IGroupsConfig)
class GroupsConfig(PrincipalsConfig):
"""Define how groups look and where they are.
"""
class RolesConfig(PrincipalsConfig):
"""Define how roles are mapping in LDAP. Basically a role mapping works
like a group mapping, but the id attribute is considered as the role name,
and the members set have this role granted.
"""
@plumbing(
Alias,
NodeChildValidate,
Adopt,
Nodify,
Storage,
)
class PrincipalAliasedAttributes(object):
allow_non_node_children = True
def __init__(self, context, aliaser=None):
"""
:param context: The node whose children to alias
:param aliaser: The aliaser to be used
"""
self.__name__ = context.name
self.__parent__ = None
self.context = context
self.aliaser = aliaser
@property
def storage(self):
return self.context
@property
def changed(self):
return self.context.changed
def __repr__(self):
return "Aliased " + self.context.__repr__()
class AliasedPrincipal(Behavior):
@override
def __init__(self, context, attraliaser):
self.context = context
self.attraliaser = attraliaser
@default
def principal_attributes_factory(self, name=None, parent=None):
aliased_attrs = PrincipalAliasedAttributes(
self.context.attrs,
self.attraliaser
)
return aliased_attrs
attributes_factory = finalize(principal_attributes_factory)
@default
@locktree
def __call__(self):
# add object classes from creation defaults. if missing.
# happens if object classes are added after principals were already
# created with another set of default object classes or if editing
# existing principals from a database not created with this
# API/configuration.
ocs = self.context.attrs['objectClass']
ocs = [ocs] if isinstance(ocs, six.text_type) else ocs
ocsc = len(ocs)
for oc in self.parent.context.child_defaults['objectClass']:
if oc not in ocs:
ocs.append(oc)
# reset object classes only if changed to avoid unnecessary write
# operations to LDAP backend
if ocsc != len(ocs):
self.context.attrs['objectClass'] = ocs
# finally persist
self.context()
class LDAPPrincipal(AliasedPrincipal):
@default
def add_role(self, role):
self.parent.parent.add_role(role, self)
@default
def remove_role(self, role):
self.parent.parent.remove_role(role, self)
@default
@property
def roles(self):
return self.parent.parent.roles(self)
@default
@property
def changed(self):
return self.context.changed
@default
@property
def member_of_attr(self):
"""memberOf is in openldap realized as overlay and in Active
Directory also computed. In case of openldap this attribute is not
delivered in LDAP response unless explicitly queried. Thus a separate
property is used to query memberOf information explicit.
"""
entry = self.context.ldap_session.search(
scope=BASE,
baseDN=self.context.DN,
force_reload=self.context._reload,
attrlist=['memberOf']
)
return entry[0][1].get('memberOf', list())
class LDAPUser(LDAPPrincipal, UgmUser):
@default
@property
def groups(self):
groups = self.parent.parent.groups
return [groups[uid] for uid in self.group_ids if uid in groups]
@default
@property
def group_ids(self):
groups = self.parent.parent.groups
if self.parent.parent.ucfg.memberOfSupport:
group_dns = [groups.context.DN]
group_dns += self.parent.parent.ucfg.memberOfExternalGroupDNs
res = list()
for dn in self.member_of_attr:
dn = ensure_text(dn)
matching_group_dns = {
gdn for gdn in group_dns
if dn.endswith(gdn)
}
if not matching_group_dns:
# Skip DN outside groups base DN
continue
try:
res.append(groups.idbydn(dn))
except KeyError:
# happens if DN is returned which does not fit the groups
# base DN.
pass
else:
member_format = groups._member_format
attribute = groups._member_attribute
# Support LDAP_MATCHING_RULE_IN_CHAIN (recursive/nested groups)
# See https://msdn.microsoft.com/en-us/library/aa746475(v=vs.85).aspx
if self.parent.parent.ucfg.recursiveGroups:
attribute += ':1.2.840.113556.1.4.1941:'
if member_format == FORMAT_DN:
criteria = {attribute: self.context.DN}
elif member_format == FORMAT_UID:
criteria = {attribute: self.context.attrs['uid']}
attrlist = [groups._key_attr]
# if roles configuration points to child of groups container, and
# group configuration has search scope SUBTREE, and groups are
# specified by the same criteria as roles, the search returns the
# role id's as well.
# XXX: such edge cases should be resolved at UGM init time
matches_generator = groups.context.batched_search(
criteria=criteria,
attrlist=attrlist
)
res = [att[groups._key_attr][0] for _, att in matches_generator]
return res
@default
@property
def expired(self):
if not self.parent.expiresAttr:
return False
expires = self.context.attrs.get(self.parent.expiresAttr)
return calculate_expired(self.parent.expiresUnit, expires)
@plumbing(
LDAPUser,
Nodespaces,
Attributes,
Nodify,
)
class User(object):
pass
class LDAPGroupMapping(Behavior):
@override
def __getitem__(self, key):
key = ensure_text(key)
if key not in self:
raise KeyError(key)
return self.related_principals(key)[key]
@override
@locktree
def __delitem__(self, key):
key = ensure_text(key)
if key not in self:
raise KeyError(key)
if self._member_format == FORMAT_DN:
val = self.related_principals(key)[key].context.DN
elif self._member_format == FORMAT_UID:
val = key
# self.context.attrs[self._member_attribute].remove won't work here
# issue in LDAPNodeAttributes, does not recognize changed this way.
members = self.context.attrs[self._member_attribute]
members.remove(val)
self.context.attrs[self._member_attribute] = members
# XXX: call here immediately?
self.context()
@override
def __iter__(self):
return iter(self.member_ids)
@override
def __contains__(self, key):
key = ensure_text(key)
for uid in self:
if uid == key:
return True
return False
@default
@locktree
def add(self, key):
key = ensure_text(key)
if key not in self.member_ids:
val = self.translate_key(key)
# self.context.attrs[self._member_attribute].append won't work here
# issue in LDAPNodeAttributes, does not recognize changed this way.
old = self.context.attrs.get(self._member_attribute, list())
self.context.attrs[self._member_attribute] = old + [val]
# XXX: call here immediately?
# self.context()
@default
@property
def member_ids(self):
ugm = self.parent.parent
if ugm:
# XXX: roles with memberOf use rcfg!
gcfg = ugm.gcfg
if gcfg and gcfg.memberOfSupport:
users = ugm.users
criteria = {'memberOf': self.context.DN}
attrlist = [users._key_attr]
matches_generator = users.context.batched_search(
criteria=criteria,
attrlist=attrlist
)
return [
att[users._key_attr][0] for _, att in matches_generator
]
ret = list()
members = self.context.attrs.get(self._member_attribute, list())
for member in members:
if member in ['nobody', 'cn=nobody']:
continue
ret.append(member)
ret = self.translate_ids(ret)
keys = self.existing_member_ids
ret = [uid for uid in ret if uid in keys]
return ret
@default
@property
def _member_format(self):
return self.parent._member_format
@default
@property
def _member_attribute(self):
return self.parent._member_attribute
class LDAPGroup(LDAPGroupMapping, LDAPPrincipal, UgmGroup):
@default
def related_principals(self, key=None):
return self.parent.parent.users
@default
@property
def users(self):
return [self.parent.parent.users[uid] for uid in self.member_ids]
@default
@property
def existing_member_ids(self):
return self.related_principals().keys()
@default
def translate_ids(self, members):
if self._member_format != FORMAT_DN:
return members
principals = self.related_principals()
translated = list()
for dn in members:
try:
translated.append(principals.idbydn(dn))
except KeyError:
# inexistent DN
pass
return translated
@default
def translate_key(self, key):
ret = None
if self._member_format == FORMAT_DN:
principals = self.related_principals()
# make sure principal is loaded
principal = principals[key]
ret = principal.context.DN
elif self._member_format == FORMAT_UID:
ret = key
return ret
@plumbing(
LDAPGroup,
NodeChildValidate,
Nodespaces,
Attributes,
Nodify,
)
class Group(object):
pass
class LDAPPrincipals(OdictStorage):
principal_attrmap = default(None)
principal_attraliaser = default(None)
@override
def __init__(self, props, cfg):
context = LDAPNode(name=cfg.baseDN, props=props)
context.search_filter = cfg.queryFilter
context.search_scope = int(cfg.scope)
context.child_defaults = dict()
context.child_defaults['objectClass'] = cfg.objectClasses
context.child_defaults.update(cfg.defaults)
for oc in cfg.objectClasses:
for key, val in creation_defaults.get(oc, dict()).items():
if key not in context.child_defaults:
context.child_defaults[key] = val
# if cfg.member_relation:
# context.search_relation = cfg.member_relation
self._rdn_attr = cfg.attrmap['rdn']
self._key_attr = cfg.attrmap['id']
if self._key_attr not in cfg.attrmap:
cfg.attrmap[self._key_attr] = self._key_attr
self._login_attr = cfg.attrmap['id']
if cfg.attrmap.get('login'):
self._login_attr = cfg.attrmap['login']
self.expiresAttr = getattr(cfg, 'expiresAttr', None)
self.expiresUnit = getattr(cfg, 'expiresUnit', None)
self.principal_attrmap = cfg.attrmap
self.principal_attraliaser = DictAliaser(cfg.attrmap, cfg.strict)
self.context = context
@default
def idbydn(self, dn, strict=False):
"""Return a principal's id for a given dn.
Raise KeyError if not enlisted.
"""
# XXX: rename to id_by_dn
# XXX: what was strict good for? remove
# if strict:
# raise KeyError(dn)
try:
search = self.context.ldap_session.search
res = search(baseDN=dn)[0]
return ensure_text(res[1][self._key_attr][0])
except ldap.NO_SUCH_OBJECT:
raise KeyError(dn)
@override
@property
def ids(self):
return list(self.__iter__())
@default
@locktree
def __delitem__(self, key):
principal = self[key]
context = principal.context
del context.parent[context.name]
del self.storage[key]
@default
@locktree
def __getitem__(self, key):
key = ensure_text(key)
try:
return self.storage[key]
except KeyError:
criteria = {self._key_attr: key}
attrlist = ['rdn', self._key_attr]
res = self.context.search(criteria=criteria, attrlist=attrlist)
if not res:
raise KeyError(key)
if len(res) > 1: # pragma: no cover
msg = u'More than one principal with id "{0}" found.'
logger.warning(msg.format(key))
prdn = res[0][1]['rdn']
if prdn in self.context._deleted_children:
raise KeyError(key)
dn = res[0][0]
path = explode_dn(dn)[:len(self.context.DN.split(',')) * -1]
context = self.context
for rdn in reversed(path):
context = context[rdn]
principal = self.principal_factory(
context,
attraliaser=self.principal_attraliaser
)
principal.__name__ = key
principal.__parent__ = self
self.storage[key] = principal
return principal
@default
@locktree
def __iter__(self):
attrlist = ['rdn', self._key_attr]
for principal in self.context.batched_search(attrlist=attrlist):
prdn = principal[1]['rdn']
if prdn in self.context._deleted_children:
continue
yield ensure_text(principal[1][self._key_attr][0])
for principal in self.context._added_children:
yield self.context[principal].attrs[self._key_attr]
@default
@locktree
def __setitem__(self, name, value):
if not isinstance(value, self.principal_factory):
raise ValueError(u"Given value not instance of '{0}'".format(
self.principal_factory.__name__
))
# XXX: check if there is valid user context
exists = False
try:
self[name]
exists = True
except KeyError:
pass
if exists:
raise KeyError(
u"Principal with id '{0}' already exists.".format(name)
)
value.__name__ = name
value.__parent__ = self
self.storage[name] = value
@default
@property
def changed(self):
return self.context.changed
@default
@locktree
def invalidate(self, key=None):
"""Invalidate LDAPPrincipals.
"""
if key is None:
self.context.invalidate()
self.storage.clear()
return
try:
principal = self.storage[key]
principal.context.parent.invalidate(principal.context.name)
del self.storage[key]
except KeyError:
pass
@default
@locktree
def __call__(self):
self.context()
@default
def _alias_dict(self, dct):
ret = dict()
for key, val in six.iteritems(self.principal_attraliaser):
for k, v in six.iteritems(dct):
if val == k:
ret[key] = v
return ret
@default
def _unalias_list(self, lst):
unalias = self.principal_attraliaser.unalias
return [unalias(x) for x in lst]
@default
def _unalias_dict(self, dct):
if dct is None:
return None
unalias = self.principal_attraliaser.unalias
unaliased_dct = dict(
[(unalias(key), val) for key, val in six.iteritems(dct)])
return unaliased_dct
@default
def raw_search(self, criteria=None, attrlist=None,
exact_match=False, or_search=False, or_keys=None,
or_values=None, page_size=None, cookie=None):
search_attrlist = [self._key_attr]
if attrlist is not None and self._key_attr not in attrlist:
search_attrlist += attrlist
try:
results = self.context.search(
criteria=self._unalias_dict(criteria),
attrlist=self._unalias_list(search_attrlist),
exact_match=exact_match,
or_search=or_search,
or_keys=or_keys,
or_values=or_values,
page_size=page_size,
cookie=cookie
)
except ldap.NO_SUCH_OBJECT: # pragma: no cover
logger.debug("LDAPPrincipals.raw_search: ldap.NO_SUCH_OBJECT")
return []
if isinstance(results, tuple):
results, cookie = results
if attrlist is not None:
_results = list()
for _, att in results:
try:
principal_id = att[self._key_attr][0]
except (KeyError, IndexError):
continue
aliased = self._alias_dict(att)
for key in list(aliased.keys()):
if key not in attrlist:
del aliased[key]
_results.append((principal_id, aliased))
results = _results
else:
results = [att[self._key_attr][0] for _, att in results]
if cookie is not None:
return results, cookie
return results
@default
def search(self, criteria=None, attrlist=None,
exact_match=False, or_search=False):
result = []
cookie = ''
while True:
chunk, cookie = self.raw_search(
criteria=criteria,
attrlist=attrlist,
exact_match=exact_match,
or_search=or_search,
page_size=self.context.ldap_session._props.page_size,
cookie=cookie
)
result += chunk
if not cookie:
break
return result
@default
@locktree
def create(self, pid, **kw):
# XXX: mechanism for defining a target container if scope is SUBTREE
# create principal with LDAPNode as context
context = LDAPNode()
principal = self.principal_factory(
context,
attraliaser=self.principal_attraliaser
)
# ensure id on attributes
kw['id'] = pid
# avoid overwriting key attribute if given in kw
if self._key_attr in kw:
del kw[self._key_attr]
# set additional attributes on principal
for k, v in kw.items():
principal.attrs[k] = v
# set principal to self
self[pid] = principal
# if setting principal has been successful, hook up principal context
# to ldap tree
rdn = u'{0}={1}'.format(
self._rdn_attr,
principal.context.attrs[self._rdn_attr]
)
self.context[rdn] = context
# return newly created principal
return self[pid]
def calculate_expired(expiresUnit, expires):
"""Return bool whether expired.
"""
if expires and expires not in ['99999', '-1']:
# check expiration timestamp
expires = int(expires)
# XXX: maybe configurable?
# shadow account specific
# if self.expiresAttr == 'shadowExpire':
# expires += int(user.attrs.get('shadowInactive', '0'))
days = time.time()
if expiresUnit == EXPIRATION_DAYS:
# numer of days since epoch
days /= 86400
if days >= expires:
return True
return False
class LDAPUsers(LDAPPrincipals, UgmUsers):
principal_factory = default(User)
@override
@locktree
def __delitem__(self, key):
user = self[key]
try:
groups = user.groups
except AttributeError:
groups = list()
for group in groups:
del group[user.name]
parent = self.parent
if parent and parent.rcfg is not None:
for role in user.roles:
user.remove_role(role)
context = user.context
del context.parent[context.name]
del self.storage[key]
@default
def id_for_login(self, login):
criteria = {self._login_attr: login}
attrlist = [self._key_attr]
res = self.context.search(criteria=criteria, attrlist=attrlist)
if not res:
return ensure_text(login)
if len(res) > 1: # pragma: no cover
msg = u'More than one principal with login "{0}" found.'
logger.warning(msg.format(login))
return ensure_text(res[0][1][self._key_attr][0])
@default
@debug
def authenticate(self, login=None, pw=None, id=None):
if id is not None:
# bbb. deprecated usage
login = id
user_id = self.id_for_login(login)
criteria = {self._key_attr: user_id}
attrlist = ['dn']
if self.expiresAttr:
attrlist.append(self.expiresAttr)
try:
res = self.context.search(criteria=criteria, attrlist=attrlist)
except ldap.NO_SUCH_OBJECT: # pragma: no cover
return False
if not res:
return False
if len(res) > 1: # pragma: no cover
msg = u'More than one principal with login "{0}" found.'
logger.warning(msg.format(user_id))
if self.expiresAttr:
expires = res[0][1].get(self.expiresAttr)
expires = expires and expires[0] or None
try:
expired = calculate_expired(self.expiresUnit, expires)
except ValueError:
# unknown expires field data
msg = (
u"Accound expiration flag for user '{0}' "
u"contains unknown data"
)
logger.error(msg.format(id))
return False
if expired:
return ACCOUNT_EXPIRED
user_dn = res[0][1]['dn']
session = self.context.ldap_session
authenticated = session.authenticate(user_dn, pw)
return authenticated and user_id or False
@default
@debug
def passwd(self, id, oldpw, newpw):
user_id = self.id_for_login(id)
criteria = {self._key_attr: user_id}
attrlist = ['dn']
if self.expiresAttr:
attrlist.append(self.expiresAttr)
res = self.context.search(criteria=criteria, attrlist=attrlist)
if not res:
raise KeyError(id)
if len(res) > 1: # pragma: no cover
msg = u'More than one principal with login "{0}" found.'
logger.warning(msg.format(user_id))
user_dn = res[0][1]['dn']
self.context.ldap_session.passwd(user_dn, oldpw, newpw)
object_classes = self.context.child_defaults['objectClass']
user_node = self[user_id].context
user_node.attrs.load()
if 'sambaSamAccount' in object_classes:
user_node.attrs['sambaNTPassword'] = sambaNTPassword(newpw)
user_node.attrs['sambaLMPassword'] = sambaLMPassword(newpw)
user_node()
@plumbing(
LDAPUsers,
NodeChildValidate,
Nodespaces,
Adopt,
Attributes,
Nodify,
)
class Users(object):
pass
def member_format(object_classes):
for object_class in MEMBER_LOOKUP_BY_CLASS:
if object_class in object_classes:
return MEMBER_LOOKUP_BY_CLASS[object_class]['format']
raise Exception(
u"Can not lookup member format for object-classes: {0}".format(
object_classes,
)
)
def member_attribute(object_classes):
for object_class in MEMBER_LOOKUP_BY_CLASS:
if object_class in object_classes:
return MEMBER_LOOKUP_BY_CLASS[object_class]['attribute']
raise Exception(
u"Can not lookup member attribute for object-classes: {0}".format(
object_classes,
)
)
class LDAPGroupsMapping(LDAPPrincipals, UgmGroups):
@default
@property
def _member_format(self):
return member_format(self.context.child_defaults['objectClass'])
@default
@property
def _member_attribute(self):
return member_attribute(self.context.child_defaults['objectClass'])
@plumb
def __init__(_next, self, props, cfg):
mem_attr = member_attribute(cfg.objectClasses)
cfg.attrmap[mem_attr] = mem_attr
_next(self, props, cfg)
@plumb
def __setitem__(_next, self, key, value):
# XXX: kick this, dummy member should be created by default value
# callback
if self._member_attribute not in value.attrs:
value.attrs[self._member_attribute] = []
if self._member_format is FORMAT_UID:
value.attrs[self._member_attribute].insert(0, 'nobody')
else:
value.attrs[self._member_attribute].insert(0, 'cn=nobody')
_next(self, key, value)
class LDAPGroups(LDAPGroupsMapping):
principal_factory = default(Group)
@override
@locktree
def __delitem__(self, key):
key = ensure_text(key)
group = self[key]
parent = self.parent
if parent and parent.rcfg is not None:
for role in group.roles:
group.remove_role(role)
context = group.context
del context.parent[context.name]
del self.storage[key]
@plumbing(
LDAPGroups,
NodeChildValidate,
Nodespaces,
Adopt,
Attributes,
Nodify,
)
class Groups(object):
pass
class LDAPRole(LDAPGroupMapping, AliasedPrincipal):
@default
def related_principals(self, key):
ugm = self.parent.parent
if key.startswith('group:'):
return ugm.groups
return ugm.users
@default
@property
def existing_member_ids(self):
ugm = self.parent.parent
users = ugm.users
groups = ugm.groups
ret = [key for key in users]
for key in groups:
ret.append('group:{}'.format(key))
return ret
@default
def translate_ids(self, members):
if self._member_format == FORMAT_DN:
ugm = self.parent.parent
users = ugm.users
groups = ugm.groups
user_members = list()
for dn in members:
try:
user_members.append(users.idbydn(dn, True))
except KeyError:
pass
group_members = list()
for dn in members:
try:
group_members.append('group:{}'.format(groups.idbydn(dn, True)))
except KeyError:
pass
members = user_members + group_members
return members
@default
def translate_key(self, key):
ret = None
if self._member_format == FORMAT_DN:
if key.startswith('group:'):
key = key[6:]
principals = self.parent.parent.groups
else:
principals = self.parent.parent.users
# make sure principal is loaded
principal = principals[key]
ret = principal.context.DN
elif self._member_format == FORMAT_UID:
ret = key
return ret
@override
@locktree
def __getitem__(self, key):
key = ensure_text(key)
if key not in self:
raise KeyError(key)
principals = self.related_principals(key)
if key.startswith('group:'):
key = key[6:]
return principals[key]
@override
@locktree
def __delitem__(self, key):
key = ensure_text(key)
if key not in self:
raise KeyError(key)
principals = self.related_principals(key)
if self._member_format == FORMAT_DN:
real_key = key
if key.startswith('group:'):
real_key = key[6:]
val = principals[real_key].context.DN
elif self._member_format == FORMAT_UID:
val = key
# self.context.attrs[self._member_attribute].remove won't work here
# issue in LDAPNodeAttributes, does not recognize changed this way.
members = self.context.attrs[self._member_attribute]
members.remove(val)
self.context.attrs[self._member_attribute] = members
# XXX: call here immediately?
self.context()
@plumbing(
LDAPRole,
NodeChildValidate,
Nodespaces,
Attributes,
Nodify,
)
class Role(object):
pass
class LDAPRoles(LDAPGroupsMapping):
principal_factory = default(Role)
@plumbing(
LDAPRoles,
NodeChildValidate,
Nodespaces,
Adopt,
Attributes,
Nodify,
)
class Roles(object):
pass
class LDAPUgm(UgmBase):
@override
def __init__(self, name=None, parent=None, props=None,
ucfg=None, gcfg=None, rcfg=None):
"""
:param name: Node name.
:param parent: Node parent.
:param props: LDAPProps instance.
:param ucfg: UsersConfig instance.
:param gcfg: GroupsConfig instance.
:param rcfg: RolesConfig instance.
"""
self.__name__ = name
self.__parent__ = parent
self.props = props
self.ucfg = ucfg
self.gcfg = gcfg
self.rcfg = rcfg
@override
@locktree
def __getitem__(self, key):
if key not in self.storage:
if key == 'users':
self['users'] = Users(self.props, self.ucfg)
elif key == 'groups':
self['groups'] = Groups(self.props, self.gcfg)
return self.storage[key]
@override
@locktree
def __setitem__(self, key, value):
self._chk_key(key)
self.storage[key] = value
@override
def __delitem__(self, key):
raise NotImplementedError(u"Operation forbidden on this node.")
@override
def __iter__(self):
for key in ['users', 'groups']:
yield key
@override
@locktree
def __call__(self):
self.users()
self.groups()
roles_storage = self.roles_storage
if roles_storage is not None:
roles_storage()
@default
@property
def users(self):
return self['users']
@default
@property
def groups(self):
return self['groups']
@default
@property
def roles_storage(self):
return self._roles
@default
@locktree
def roles(self, principal):
uid = self._principal_id(principal)
roles = self._roles
ret = list()
if roles is None:
# XXX: logging
return ret
for role in roles.values():
if uid in role.member_ids:
ret.append(role.name)
return ret
# XXX: Below is the logic for querying roles from LDAP via query. Integrate
# to use this logic whenever roles are queried and the roles node is
# unchanged.
# attribute = roles._member_attribute
# format = roles._member_format
# if format == FORMAT_DN:
# criteria = { attribute: principal.context.DN }
# elif format == FORMAT_UID:
# # XXX: this is hacky. we really need member relations!!!
# if isinstance(principal, Group):
# attrkey = principal.parent.context._rdn_attr
# value = 'group:%s' % principal.context.attrs[attrkey]
# else:
# value = principal.context.attrs['uid']
# criteria = { attribute: value }
# return roles.context.search(criteria=criteria)
@default
@locktree
def add_role(self, rolename, principal):
uid = self._principal_id(principal)
roles = self._roles
if roles is None:
raise ValueError(u"Role support not configured properly")
role = roles.get(rolename)
if role is None:
role = roles.create(rolename)
if uid in role.member_ids:
raise ValueError(u"Principal already has role '{}'".format(rolename))
role.add(uid)
@default
@locktree
def remove_role(self, rolename, principal):
uid = self._principal_id(principal)
roles = self._roles
if roles is None:
raise ValueError(u"Role support not configured properly")
role = roles.get(rolename)
if role is None:
raise ValueError(u"Role not exists '{}'".format(rolename))
if uid not in role.member_ids:
raise ValueError(u"Principal does not has role '{}'".format(rolename))
del role[uid]
if not role.member_ids:
parent = role.parent
del parent[rolename]
@default
@property
def _roles(self):
if 'roles' not in self.storage:
try:
roles = Roles(self.props, self.rcfg)
except Exception:
# XXX: logging
return None
roles.__name__ = 'roles'
roles.__parent__ = self
self.storage['roles'] = roles
return self.storage['roles']
@default
def _principal_id(self, principal):
uid = principal.name
if isinstance(principal, Group):
uid = 'group:{}'.format(uid)
return uid
@default
def _chk_key(self, key):
if key not in ['users', 'groups']:
raise KeyError(key)
@plumbing(
LDAPUgm,
NodeChildValidate,
Nodespaces,
Adopt,
Attributes,
DefaultInit,
Nodify,
OdictStorage,
)
class Ugm(object):
def invalidate(self, key=None):
if key is None:
self.storage.clear()
return
del self.storage[key]
|
the-stack_0_11192 | import pytest
from diot import Diot
from pyppl import Proc
from pyppl.job import Job
from pyppl.utils import fs
from pyppl.logger import logger, LEVEL_GROUPS
from pyppl_echo import expand_numbers, fileflush, echo_jobs_converter, echo_types_converter, flush, logger_init, job_poll
@pytest.fixture
def fd_fileflush(tmp_path):
tmpfile = tmp_path / 'fileflush.txt'
tmpfile.write_text('')
with open(tmpfile, 'r') as fd_read, open(tmpfile, 'a') as fd_append:
yield fd_read, fd_append
@pytest.fixture(params = range(5))
def fixt_fileflush(request, fd_fileflush):
fd_read, fd_append = fd_fileflush
if request.param == 0:
return Diot(filed = fd_read, residue = '', expt_lines = [], expt_residue = '')
if request.param == 1:
fd_append.write('abcde')
fd_append.flush()
return Diot(filed = fd_read, residue = '', expt_lines = [], expt_residue = 'abcde')
if request.param == 2:
fd_append.write('ccc\ne1')
fd_append.flush()
return Diot(filed = fd_read, residue = 'abcde', expt_lines = ['abcdeccc\n'], expt_residue = 'e1')
if request.param == 3:
fd_append.write('ccc')
fd_append.flush()
return Diot(filed = fd_read, residue = '', end = True, expt_lines = ['ccc\n'], expt_residue = '')
if request.param == 4:
return Diot(filed = fd_read, residue = 'end', end = True, expt_lines = ['end\n'], expt_residue = '')
@pytest.fixture
def job0(tmp_path):
job = Job(0, Proc(
workdir = tmp_path/'pJob',
dirsig = True,
config = Diot(echo_jobs=0, types='stderr')
))
# pretend it's running
job.proc.runtime_config = {'dirsig': True}
fs.mkdir(job.dir)
(job.dir / 'job.script').write_text('')
return job
@pytest.mark.parametrize('numbers,expt',[
('1,2,3,4', [1,2,3,4]),
('1-4', [1,2,3,4]),
('1-4,7,8-10', [1,2,3,4,7,8,9,10]),
])
def test_expand_numbers(numbers, expt):
assert expand_numbers(numbers) == expt
def test_fileflush(fixt_fileflush):
lines, residue = fileflush(
fixt_fileflush.filed, fixt_fileflush.residue, fixt_fileflush.get('end', False))
assert lines == fixt_fileflush.expt_lines
assert residue == fixt_fileflush.expt_residue
@pytest.mark.parametrize('jobs,expected', [
([], []),
([0,1], [0,1]),
(0, [0]),
('0,1', [0,1]),
])
def test_echo_jobs_converter(jobs, expected):
assert echo_jobs_converter(jobs) == expected
@pytest.mark.parametrize('types,expected', [
('', {'stderr': None, 'stdout': None}),
('stderr', {'stderr': None}),
({'all': '^log'}, {'stderr': '^log', 'stdout': '^log'}),
])
def test_echo_types_converter(types, expected):
assert echo_types_converter(types) == expected
def test_flush(job0, caplog):
job0.proc.config.echo_jobs = [1]
flush(job0)
assert '' == caplog.text
assert job0.config.echo_lastout == ''
assert job0.config.echo_lasterr == ''
job0.proc.config.echo_jobs = [0]
job0.proc.config.echo_types = {
'stdout': '', 'stderr': r'^[^&].+$'}
(job0.dir / 'job.stdout').write_text('out: line1\nout: line2')
(job0.dir / 'job.stderr').write_text('err: line1\nerr: line2')
caplog.clear()
flush(job0)
assert 'out: line1' in caplog.text
assert 'err: line1' in caplog.text
assert 'line2' not in caplog.text
assert job0.config.echo_lastout == 'out: line2'
assert job0.config.echo_lasterr == 'err: line2'
(job0.dir / 'job.stderr').write_text(
'err: line1\nerr: line23\n& ignored\npyppl.logger.abc\npyppl.logger.msg: hello world!')
caplog.clear()
job_poll(job0, status = 'done')
#flush(job0, end = True)
assert 'err: line23' in caplog.text
assert '_MSG' in caplog.text
assert '_ABC' in caplog.text
assert 'hello world' in caplog.text
assert 'ignored' not in caplog.text
assert job0.config.echo_lastout == ''
assert job0.config.echo_lasterr == ''
def test_hook():
logger_init(logger)
assert 'STDOUT' in LEVEL_GROUPS['INFO']
assert 'STDERR' in LEVEL_GROUPS['INFO']
|
the-stack_0_11193 | #!/usr/bin/python
# -----------------------------------------------------------------------------
#
# Copyright 2013-2019 lispers.net - Dino Farinacci <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -----------------------------------------------------------------------------
#
# ltr.py - LISP EID Traceroute Client - Trace the encap/decap paths
#
# Usage: python ltr.py [-s <source-eid>] <destination-EID | DNS-name>
#
# -s: Optional source EID.
# <destination-EID>: required parameter [<iid>] in front is optional
#
# This application is run on an xTR. Typically a ITR or RTR, where the
# encapsulator adds to the ltr message with the RLOC the ITR is encapsulating
# to. Then the decapsulator will decapsulate and swap the source and
# destination addresses to return the packet to the source-EID (running the
# client program). If the ETR is not the EID, then the packet will be re-
# encapsulated in which more data is added to the ltr message.
#
# ltr messages run in UDP on port 2434 (4342 backwards) and are returned
# to the client program.
#
# The LISP-Trace message takes the following path:
#
# (1) ltr sends LISP-TRACE packet from its EID to the EID of the ETR on
# port 2434. It builds a type=9 packet with a nonce and an empty JSON field.
#
# (2) ITR will look up destination EID as part of forwarding logic and add
# RLOC information to LISP-Trace message. The message is encapsulated to
# the ETR.
#
# (3) The ETR (or RTR) will decap packet. It will add information to the LISP-
# packet. If it is the destination EID, it will send the LISP-Trace packet
# using itself as the source and the original source as the destination.
#
# (4) The local ITR will encapsulate the packet and add RLOC information to
# the LISP-Trace packet. It encapsulates the return packet to the ETR.
#
# (5) The ETR decapsulates the packet and sends it to the ltr client so the
# accumulated JSON data can be displayed for the user.
#
# This functionality works on a chain of encapsulating tunnels to give the
# user what RLOCs are used and the arrival time of the packet. It allows an
# ltr client to not only determine path and latency of the network, but if
# the encapsulation paths are symmetric or asymmetric.
#
# If there an error along the path, the node detecting the error will return
# the LISP-Trace packet to the RLOC of the originating ITR.
#
# The JSON format of an LISP-Trace packet is an array of dictionary arrays.
# The array will typically have 2 elements, one from ltr source to destination
# EID and one for the return path. Each dictionary array is keyed with "seid",
# "deid", and "paths". The array "paths" is the node data that is appended
# at each encapsulation hop. Note example below:
#
# [
# { "se" : "[<iid>]<orig-eid>", "de" : "[<iid>]<dest-eid>", "paths" : a
# [
# { "n" : "ITR", "sr" : "<source-rloc>", "dr" : "<dest_rloc>",
# "ets" : "<ts>", "hn" : "<hn>", "rtts" : [...], "hops" : [...] },
# { "n" : "RTR", "sr" : "<source-rloc>", "dr" : "<dest_rloc>",
# "dts" : "<ts>", "hn" : "<hn>" },
# { "n" : "RTR", "sr" : "<source-rloc>", "dr" : "<dest_rloc>",
# "ets" : "<ts>", "hn" : "<hn>", "rtts" : [...], "hops" : [...] },
# { "n" : "ETR", "sr" : "<source-rloc>", "dr" : "<dest_rloc>",
# "ets" : "<ts>", "hn" : "<hn>" }, ...
# ] },
#
# { "se" : "[<iid>]<dest-eid>", "de" : "[<iid>]<orig-eid>", "paths" :
# [
# { "n" : "ITR", "sr" : "<source-rloc>", "dr" : "<dest_rloc>",
# "ets" : "<ts>", "hn" : "<hn>", "rtts" : [...], "hops" : [...] },
# { "n" : "RTR", "sr" : "<source-rloc>", "dr" : "<dest_rloc>",
# "dts" : "<ts>", "hn" : "<hn>" },
# { "n" : "RTR", "sr" : "<source-rloc>", "dr" : "<dest_rloc>",
# "ets" : "<ts>", "hn" : "<hn>", "rtts" : [...], "hops" : [...] },
# { "n" : "ETR", "sr" : "<source-rloc>", "dr" : "<dest_rloc>",
# "ets" : "<ts>", "hn" : "<hn>" }, ...
# ] }
# ]
#
# Environment variable LISP_LTR_PORT is used to determine if the connection to
# the LISP API is done with a particular port. And if the port has a minus
# sign in front of it, it will use http rather https to connect to the
# lispers.net API. Environment variables LISP_LTR_USER and LISP_LTR_PW are
# used when lispers.net API is running with a password on username root.
#
#------------------------------------------------------------------------------
from __future__ import print_function
from future import standard_library
standard_library . install_aliases ( )
from builtins import hex
import sys
import struct
import random
import socket
import json
import time
import os
import binascii
from subprocess import getoutput
if 64 - 64: i11iIiiIii
if 65 - 65: O0 / iIii1I11I1II1 % OoooooooOO - i1IIi
if 73 - 73: II111iiii
if 22 - 22: I1IiiI * Oo0Ooo / OoO0O00 . OoOoOO00 . o0oOOo0O0Ooo / I1ii11iIi11i
if 48 - 48: oO0o / OOooOOo / I11i / Ii1I
if 48 - 48: iII111i % IiII + I1Ii111 / ooOoO0o * Ii1I
if 46 - 46: ooOoO0o * I11i - OoooooooOO
II1iII1i = "https"
oO0oIIII = 8080
if 59 - 59: i1IIi * i1IIi % OOooOOo + II111iiii
II = os . getenv ( "LISP_LTR_PORT" )
if ( II != None ) :
if ( II [ 0 ] == "-" ) :
II1iII1i = "http"
II = II [ 1 : : ]
if 100 - 100: i1IIi . I1Ii111 / IiII * OoooooooOO + I11i * oO0o
if ( II . isdigit ( ) == False ) :
print ( "Invalid value for env variable LISP_LTR_PORT" )
exit ( 1 )
if 99 - 99: iII111i . OOooOOo / iIii1I11I1II1 * iIii1I11I1II1
oO0oIIII = int ( II )
if 11 - 11: oO0o / i1IIi % II111iiii - OoOoOO00
OOo = os . getenv ( "LISP_LTR_USER" )
Ii1IIii11 = os . getenv ( "LISP_LTR_PW" )
if ( OOo == None ) : OOo = "root"
if ( Ii1IIii11 == None ) : Ii1IIii11 = ""
if 55 - 55: iIii1I11I1II1 - I1IiiI . Ii1I * IiII * i1IIi / iIii1I11I1II1
OOo000 = 2434
if 82 - 82: I11i . I1Ii111 / IiII % II111iiii % iIii1I11I1II1 % IiII
if 86 - 86: OoOoOO00 % I1IiiI
if 80 - 80: OoooooooOO . I1IiiI
if 87 - 87: oO0o / ooOoO0o + I1Ii111 - ooOoO0o . ooOoO0o / II111iiii
if 11 - 11: I1IiiI % o0oOOo0O0Ooo - Oo0Ooo
if 58 - 58: i11iIiiIii % I1Ii111
if 54 - 54: OOooOOo % O0 + I1IiiI - iII111i / I11i
if 31 - 31: OoO0O00 + II111iiii
if 13 - 13: OOooOOo * oO0o * I1IiiI
if 55 - 55: II111iiii
if 43 - 43: OoOoOO00 - i1IIi + I1Ii111 + Ii1I
if 17 - 17: o0oOOo0O0Ooo
if 64 - 64: Ii1I % i1IIi % OoooooooOO
if 3 - 3: iII111i + O0
if 42 - 42: OOooOOo / i1IIi + i11iIiiIii - Ii1I
if 78 - 78: OoO0O00
if 18 - 18: O0 - iII111i / iII111i + ooOoO0o % ooOoO0o - IiII
if 62 - 62: iII111i - IiII - OoOoOO00 % i1IIi / oO0o
if 77 - 77: II111iiii - II111iiii . I1IiiI / o0oOOo0O0Ooo
if 14 - 14: I11i % O0
if 41 - 41: i1IIi + I1Ii111 + OOooOOo - IiII
def oO ( rloc , port ) :
OO0OOooOoO0Oo = socket . htonl ( 0x90000000 + port )
iiIIiIiIi = struct . pack ( "I" , OO0OOooOoO0Oo )
if 38 - 38: Ii1I / Oo0Ooo
OooO0 = rloc . split ( "." )
II11iiii1Ii = int ( OooO0 [ 0 ] ) << 24
II11iiii1Ii += int ( OooO0 [ 1 ] ) << 16
II11iiii1Ii += int ( OooO0 [ 2 ] ) << 8
II11iiii1Ii += int ( OooO0 [ 3 ] )
iiIIiIiIi += struct . pack ( "I" , socket . htonl ( II11iiii1Ii ) )
if 70 - 70: oO0o / iIii1I11I1II1 % ooOoO0o % i11iIiiIii . I1IiiI
O0o0Oo = random . randint ( 0 , ( 2 ** 64 ) - 1 )
iiIIiIiIi += struct . pack ( "Q" , O0o0Oo )
return ( O0o0Oo , iiIIiIiIi )
if 78 - 78: iIii1I11I1II1 - Ii1I * OoO0O00 + o0oOOo0O0Ooo + iII111i + iII111i
if 11 - 11: iII111i - OoO0O00 % ooOoO0o % iII111i / OoOoOO00 - OoO0O00
if 74 - 74: iII111i * O0
if 89 - 89: oO0o + Oo0Ooo
if 3 - 3: i1IIi / I1IiiI % I11i * i11iIiiIii / O0 * I11i
if 49 - 49: oO0o % Ii1I + i1IIi . I1IiiI % I1ii11iIi11i
if 48 - 48: I11i + I11i / II111iiii / iIii1I11I1II1
def i1iiI11I ( nonce , packet ) :
if ( len ( packet ) < 12 ) : return ( False )
if 29 - 29: OoooooooOO
iI = "II"
I1i1I1II = struct . calcsize ( iI )
OO0OOooOoO0Oo , i1 = struct . unpack ( iI , packet [ : I1i1I1II ] )
packet = packet [ I1i1I1II : : ]
if ( socket . ntohl ( OO0OOooOoO0Oo ) != 0x90000000 ) :
print ( "Invalid LISP-Trace message" )
return ( { } )
if 48 - 48: O0 + O0 - I1ii11iIi11i . ooOoO0o / iIii1I11I1II1
if 77 - 77: i1IIi % OoOoOO00 - IiII + ooOoO0o
iI = "Q"
I1i1I1II = struct . calcsize ( iI )
I11iiIiii = struct . unpack ( iI , packet [ : I1i1I1II ] ) [ 0 ]
packet = packet [ I1i1I1II : : ]
if 1 - 1: II111iiii - I11i / I11i
if 46 - 46: Ii1I * OOooOOo - OoO0O00 * oO0o - I1Ii111
if 83 - 83: OoooooooOO
if 31 - 31: II111iiii - OOooOOo . I1Ii111 % OoOoOO00 - O0
if ( I11iiIiii != nonce ) :
print ( "Invalid nonce, sent {}, received {}" . format ( nonce , I11iiIiii ) )
return ( { } )
if 4 - 4: II111iiii / ooOoO0o . iII111i
if 58 - 58: OOooOOo * i11iIiiIii / OoOoOO00 % I1Ii111 - I1ii11iIi11i / oO0o
if ( len ( packet ) == 0 ) :
print ( "No JSON data in payload" )
return ( { } )
if 50 - 50: I1IiiI
if 34 - 34: I1IiiI * II111iiii % iII111i * OoOoOO00 - I1IiiI
if 33 - 33: o0oOOo0O0Ooo + OOooOOo * OoO0O00 - Oo0Ooo / oO0o % Ii1I
if 21 - 21: OoO0O00 * iIii1I11I1II1 % oO0o * i1IIi
if 16 - 16: O0 - I1Ii111 * iIii1I11I1II1 + iII111i
try :
Ii11iII1 = json . loads ( packet )
except :
print ( "Invalid JSON data: '{}'" . format ( packet ) )
return ( { } )
if 51 - 51: II111iiii * OoO0O00 % o0oOOo0O0Ooo * II111iiii % I1ii11iIi11i / ooOoO0o
return ( Ii11iII1 )
if 49 - 49: o0oOOo0O0Ooo
if 35 - 35: OoOoOO00 - OoooooooOO / I1ii11iIi11i % i1IIi
if 78 - 78: I11i
if 71 - 71: OOooOOo + ooOoO0o % i11iIiiIii + I1ii11iIi11i - IiII
if 88 - 88: OoOoOO00 - OoO0O00 % OOooOOo
if 16 - 16: I1IiiI * oO0o % IiII
if 86 - 86: I1IiiI + Ii1I % i11iIiiIii * oO0o . ooOoO0o * I11i
def i1I11i1iI ( jd ) :
for I1ii1Ii1 in jd :
iii11 = I1ii1Ii1 [ "se" ] if ( jd . index ( I1ii1Ii1 ) == 0 ) else oOOOOo0 ( I1ii1Ii1 [ "se" ] )
iiII1i1 = oOOOOo0 ( I1ii1Ii1 [ "de" ] ) if ( jd . index ( I1ii1Ii1 ) == 0 ) else I1ii1Ii1 [ "de" ]
if 66 - 66: OOooOOo - I11i
print ( "Path from {} to {}:" . format ( iii11 , iiII1i1 ) )
for I1i1III in I1ii1Ii1 [ "paths" ] :
if ( "ets" in I1i1III ) :
OO0O0OoOO0 = I1i1III [ "ets" ]
iiiI1I11i1 = "encap"
if 49 - 49: I1IiiI % ooOoO0o . ooOoO0o . I11i * ooOoO0o
if ( "dts" in I1i1III ) :
OO0O0OoOO0 = I1i1III [ "dts" ]
iiiI1I11i1 = "decap"
if 97 - 97: Ii1I + o0oOOo0O0Ooo . OOooOOo + I1ii11iIi11i % iII111i
oo0O = I1i1III [ "hn" ]
o0 = I1i1III [ "dr" ]
if ( o0 . find ( "?" ) != - 1 ) : o0 = oo0oOo ( o0 )
if 89 - 89: OoOoOO00
print ( " {} {}: {} -> {}, ts {}, node {}" . format ( I1i1III [ "n" ] , iiiI1I11i1 , I1i1III [ "sr" ] , o0 , OO0O0OoOO0 , OO0oOoOO0oOO0 ( oo0O ) ) )
if 86 - 86: OOooOOo
if 55 - 55: Oo0Ooo + iIii1I11I1II1 / OoOoOO00 * oO0o - i11iIiiIii - Ii1I
if ( "rtts" in I1i1III and "hops" in I1i1III and "lats" in I1i1III ) :
ii1ii1ii = json . dumps ( I1i1III [ "rtts" ] )
ii1ii1ii = ii1ii1ii . replace ( "-1" , "?" )
oooooOoo0ooo = json . dumps ( I1i1III [ "hops" ] )
oooooOoo0ooo = oooooOoo0ooo . replace ( "u" , "" )
oooooOoo0ooo = oooooOoo0ooo . replace ( "'" , "" )
oooooOoo0ooo = oooooOoo0ooo . replace ( '"' , "" )
I1I1IiI1 = json . dumps ( I1i1III [ "lats" ] )
I1I1IiI1 = I1I1IiI1 . replace ( "u" , "" )
I1I1IiI1 = I1I1IiI1 . replace ( "'" , "" )
I1I1IiI1 = I1I1IiI1 . replace ( '"' , "" )
print ( " " , end = ' ' )
print ( "recent-rtts {}, recent-hops {}" . format ( ii1ii1ii , oooooOoo0ooo ) )
print ( " recent-latencies {}" . format ( I1I1IiI1 ) )
if 5 - 5: o0oOOo0O0Ooo * ooOoO0o + OoOoOO00 . OOooOOo + OoOoOO00
if 91 - 91: O0
print ( "" )
if 61 - 61: II111iiii
if 64 - 64: ooOoO0o / OoOoOO00 - O0 - I11i
if 86 - 86: I11i % OoOoOO00 / I1IiiI / OoOoOO00
if 42 - 42: OoO0O00
if 67 - 67: I1Ii111 . iII111i . O0
if 10 - 10: I1ii11iIi11i % I1ii11iIi11i - iIii1I11I1II1 / OOooOOo + Ii1I
if 87 - 87: oO0o * I1ii11iIi11i + OOooOOo / iIii1I11I1II1 / iII111i
if 37 - 37: iII111i - ooOoO0o * oO0o % i11iIiiIii - I1Ii111
def o0oO ( eid ) :
IIiIi1iI = True
if 35 - 35: Ii1I % O0 - O0
if 16 - 16: II111iiii % OoOoOO00 - II111iiii + Ii1I
if 12 - 12: OOooOOo / OOooOOo + i11iIiiIii
if 40 - 40: I1IiiI . iIii1I11I1II1 / I1IiiI / i11iIiiIii
if 75 - 75: I11i + o0oOOo0O0Ooo
O0i1II1Iiii1I11 = eid . find ( "]" )
if ( O0i1II1Iiii1I11 == - 1 ) :
IIII = "0"
else :
IIiIi1iI = False
IIII = eid [ 1 : O0i1II1Iiii1I11 ]
eid = eid [ O0i1II1Iiii1I11 + 1 : : ]
if 32 - 32: OoooooooOO / iIii1I11I1II1 - o0oOOo0O0Ooo
if 91 - 91: iII111i % i1IIi % iIii1I11I1II1
if 20 - 20: OOooOOo % Ii1I / Ii1I + Ii1I
if 45 - 45: oO0o - IiII - OoooooooOO - OoO0O00 . II111iiii / O0
if 51 - 51: O0 + iII111i
if ( eid . find ( ":" ) == - 1 ) :
try : eid = socket . gethostbyname ( eid )
except : pass
if 8 - 8: oO0o * OoOoOO00 - Ii1I - OoO0O00 * OOooOOo % I1IiiI
return ( IIII , eid , IIiIi1iI )
if 48 - 48: O0
if 11 - 11: I11i + OoooooooOO - OoO0O00 / o0oOOo0O0Ooo + Oo0Ooo . II111iiii
if 41 - 41: Ii1I - O0 - O0
if 68 - 68: OOooOOo % I1Ii111
if 88 - 88: iIii1I11I1II1 - ooOoO0o + OOooOOo
if 40 - 40: I1IiiI * Ii1I + OOooOOo % iII111i
if 74 - 74: oO0o - Oo0Ooo + OoooooooOO + I1Ii111 / OoOoOO00
def i1I1iI1iIi111i ( eid , eid_prefix , ml ) :
iiIi1IIi1I = 2 ** ml - 1
if 84 - 84: ooOoO0o * II111iiii + Oo0Ooo
O0ooO0Oo00o = eid . split ( "." )
if ( len ( O0ooO0Oo00o ) == 1 ) : O0ooO0Oo00o = eid . split ( ":" )
if ( len ( O0ooO0Oo00o ) == 1 ) : return ( False )
if 77 - 77: iIii1I11I1II1 * OoO0O00
if ( len ( O0ooO0Oo00o ) == 4 ) :
iiIi1IIi1I = iiIi1IIi1I << ( 32 - ml )
eid = int ( O0ooO0Oo00o [ 0 ] ) << 24 | int ( O0ooO0Oo00o [ 1 ] ) << 16 | int ( O0ooO0Oo00o [ 2 ] ) << 8 | int ( O0ooO0Oo00o [ 3 ] )
O0ooO0Oo00o = eid & iiIi1IIi1I
eid = "{}.{}.{}.{}" . format ( ( O0ooO0Oo00o >> 24 ) & 0xff , ( O0ooO0Oo00o >> 16 ) & 0xff ,
( O0ooO0Oo00o >> 8 ) & 0xff , O0ooO0Oo00o & 0xff )
else :
iiIi1IIi1I = iiIi1IIi1I << ( 128 - ml )
eid = socket . inet_pton ( socket . AF_INET6 , eid )
eid = int ( binascii . hexlify ( eid ) , 16 )
O0ooO0Oo00o = eid & iiIi1IIi1I
eid = binascii . unhexlify ( hex ( O0ooO0Oo00o ) [ 2 : - 1 ] )
eid = socket . inet_ntop ( socket . AF_INET6 , eid )
if 95 - 95: I1IiiI + i11iIiiIii
return ( eid == eid_prefix )
if 6 - 6: ooOoO0o / i11iIiiIii + iII111i * oO0o
if 80 - 80: II111iiii
if 83 - 83: I11i . i11iIiiIii + II111iiii . o0oOOo0O0Ooo * I11i
if 53 - 53: II111iiii
if 31 - 31: OoO0O00
if 80 - 80: I1Ii111 . i11iIiiIii - o0oOOo0O0Ooo
if 25 - 25: OoO0O00
if 62 - 62: OOooOOo + O0
if 98 - 98: o0oOOo0O0Ooo
if 51 - 51: Oo0Ooo - oO0o + II111iiii * Ii1I . I11i + oO0o
def OoO0o ( match_iid , match_eid , user , pw , http , port , v4v6 ) :
oO0o0Ooooo = ( "curl --silent --insecure -u {}:{} {}://localhost:{}/lisp/" + "api/data/database-mapping" ) . format ( user , pw , http , port )
if 94 - 94: o0oOOo0O0Ooo * Ii1I / Oo0Ooo / Ii1I
oO0 = getoutput ( oO0o0Ooooo )
if 75 - 75: ooOoO0o + OoOoOO00 + o0oOOo0O0Ooo * I11i % oO0o . iII111i
try :
oOI1Ii1I1 = json . loads ( oO0 )
except :
return ( None , None , None , None )
if 28 - 28: O0 * Oo0Ooo - OOooOOo % iIii1I11I1II1 * Ii1I - i11iIiiIii
if 7 - 7: Oo0Ooo + oO0o - I1Ii111 % Ii1I + I1ii11iIi11i
for ooo0OOOoo in oOI1Ii1I1 :
if ( ( "eid-prefix" in ooo0OOOoo ) == False ) : continue
I1Ii1 = ooo0OOOoo [ "eid-prefix" ]
if 46 - 46: O0 + iII111i % I1IiiI / o0oOOo0O0Ooo . IiII * I11i
if 93 - 93: o0oOOo0O0Ooo % i1IIi . Ii1I . i11iIiiIii
if 56 - 56: I1ii11iIi11i % O0 - I1IiiI
if 100 - 100: Ii1I - O0 % oO0o * OOooOOo + I1IiiI
if 88 - 88: OoooooooOO - OoO0O00 * O0 * OoooooooOO . OoooooooOO
if ( I1Ii1 . count ( "'" ) == 2 ) : continue
if ( I1Ii1 . count ( "." ) != 3 and I1Ii1 . find ( ":" ) == - 1 ) : continue
if 33 - 33: I1Ii111 + iII111i * oO0o / iIii1I11I1II1 - I1IiiI
I1Ii1 , O0oO = I1Ii1 . split ( "/" )
IIII , I1Ii1 , IIiIi1iI = o0oO ( I1Ii1 )
if ( v4v6 and I1Ii1 . find ( "." ) == - 1 ) : continue
if ( v4v6 == False and I1Ii1 . find ( ":" ) == - 1 ) : continue
if 73 - 73: I1ii11iIi11i * i11iIiiIii % oO0o . I1ii11iIi11i
i1 = ooo0OOOoo [ "rlocs" ] [ 0 ] [ "rloc" ]
OOOOo0 = "translated-rloc" in ooo0OOOoo [ "rlocs" ] [ 0 ]
if 49 - 49: II111iiii % O0 . OoOoOO00 + oO0o / I1IiiI
if ( match_iid == None ) : return ( IIII , I1Ii1 , i1 , OOOOo0 )
if 72 - 72: ooOoO0o * Oo0Ooo . I1IiiI - II111iiii + i1IIi
iIi1ii = i1I1iI1iIi111i ( match_eid , I1Ii1 , int ( O0oO ) )
if ( match_iid == IIII and iIi1ii ) :
return ( None , None , i1 , OOOOo0 )
if 58 - 58: OoOoOO00 % o0oOOo0O0Ooo
if 50 - 50: I1Ii111 . o0oOOo0O0Ooo
return ( None , None , None , None )
if 97 - 97: O0 + OoOoOO00
if 89 - 89: o0oOOo0O0Ooo + OoO0O00 * I11i * Ii1I
if 37 - 37: OoooooooOO - O0 - o0oOOo0O0Ooo
if 77 - 77: OOooOOo * iIii1I11I1II1
if 98 - 98: I1IiiI % Ii1I * OoooooooOO
if 51 - 51: iIii1I11I1II1 . OoOoOO00 / oO0o + o0oOOo0O0Ooo
if 33 - 33: ooOoO0o . II111iiii % iII111i + o0oOOo0O0Ooo
def oO00O000oO0 ( user , pw , http , port ) :
oO0o0Ooooo = ( "curl --silent --insecure -u {}:{} {}://localhost:{}/lisp/" + "api/data/map-cache" ) . format ( user , pw , http , port )
if 79 - 79: I11i - OoooooooOO - oO0o - iIii1I11I1II1 * OOooOOo
oO0 = getoutput ( oO0o0Ooooo )
if 4 - 4: i11iIiiIii . OoooooooOO / OoO0O00 % I1Ii111 % I11i * O0
try :
oOI1Ii1I1 = json . loads ( oO0 )
except :
return ( [ ] )
if 14 - 14: OOooOOo / o0oOOo0O0Ooo
if 32 - 32: I1IiiI * Oo0Ooo
O0OooOo0o = [ ]
for ooo0OOOoo in oOI1Ii1I1 :
if ( "group-prefix" in ooo0OOOoo ) : continue
if ( ( "eid-prefix" in ooo0OOOoo ) == False ) : continue
if ( ooo0OOOoo [ "eid-prefix" ] != "0.0.0.0/0" ) : continue
if 29 - 29: I1IiiI % I1IiiI
for i1 in ooo0OOOoo [ "rloc-set" ] :
if ( ( "rloc-name" in i1 ) == False ) : continue
if ( i1 [ "rloc-name" ] != "RTR" ) : continue
if ( ( "address" in i1 ) == False ) : continue
O0OooOo0o . append ( i1 [ "address" ] )
if 94 - 94: iIii1I11I1II1 / Oo0Ooo % iII111i * iII111i * II111iiii
if 29 - 29: OoO0O00 + OoOoOO00 / o0oOOo0O0Ooo / OOooOOo * iIii1I11I1II1
return ( O0OooOo0o )
if 62 - 62: OOooOOo / oO0o - OoO0O00 . I11i
if 11 - 11: I1ii11iIi11i . OoO0O00 * IiII * OoooooooOO + ooOoO0o
if 33 - 33: O0 * o0oOOo0O0Ooo - I1Ii111 % I1Ii111
if 18 - 18: I1Ii111 / Oo0Ooo * I1Ii111 + I1Ii111 * i11iIiiIii * I1ii11iIi11i
if 11 - 11: ooOoO0o / OoOoOO00 - IiII * OoooooooOO + OoooooooOO . OoOoOO00
if 26 - 26: Ii1I % I1ii11iIi11i
if 76 - 76: IiII * iII111i
def oOOOOo0 ( string ) :
return ( "\033[1m" + string + "\033[0m" )
if 52 - 52: OOooOOo
if 19 - 19: I1IiiI
if 25 - 25: Ii1I / ooOoO0o
if 31 - 31: OOooOOo . O0 % I1IiiI . o0oOOo0O0Ooo + IiII
if 71 - 71: I1Ii111 . II111iiii
if 62 - 62: OoooooooOO . I11i
if 61 - 61: OoOoOO00 - OOooOOo - i1IIi
def OO0oOoOO0oOO0 ( string ) :
return ( "\033[94m" + oOOOOo0 ( string ) + "\033[0m" )
if 25 - 25: O0 * I11i + I1ii11iIi11i . o0oOOo0O0Ooo . o0oOOo0O0Ooo
if 58 - 58: I1IiiI
if 53 - 53: i1IIi
if 59 - 59: o0oOOo0O0Ooo
if 81 - 81: OoOoOO00 - OoOoOO00 . iII111i
if 73 - 73: I11i % i11iIiiIii - I1IiiI
if 7 - 7: O0 * i11iIiiIii * Ii1I + ooOoO0o % OoO0O00 - ooOoO0o
def oo0oOo ( string ) :
return ( "\033[91m" + oOOOOo0 ( string ) + "\033[0m" )
if 39 - 39: Oo0Ooo * OOooOOo % OOooOOo - OoooooooOO + o0oOOo0O0Ooo - I11i
if 23 - 23: i11iIiiIii
if 30 - 30: o0oOOo0O0Ooo - i1IIi % II111iiii + I11i * iIii1I11I1II1
if 81 - 81: IiII % i1IIi . iIii1I11I1II1
if 4 - 4: i11iIiiIii % OoO0O00 % i1IIi / IiII
if 6 - 6: iII111i / I1IiiI % OOooOOo - I1IiiI
if 31 - 31: OOooOOo
if 23 - 23: I1Ii111 . IiII
def OO0000o ( deid , v4v6 ) :
if ( v4v6 ) :
i1I1i1 = int ( deid . split ( "." ) [ 0 ] )
if ( i1I1i1 < 224 or i1I1i1 >= 240 ) : return
else :
if ( deid [ 0 : 2 ] . lower ( ) != "ff" ) : return
if 81 - 81: ooOoO0o - iIii1I11I1II1 - i1IIi / I1Ii111 - O0 * I11i
print ( "Multicast EID not supported" )
exit ( 1 )
if 20 - 20: oO0o % IiII
if 19 - 19: I1ii11iIi11i % IiII + ooOoO0o / I1Ii111 . ooOoO0o
if 12 - 12: i1IIi + i1IIi - I1ii11iIi11i * Oo0Ooo % Oo0Ooo - II111iiii
if 52 - 52: ooOoO0o . iII111i + I1Ii111
if 38 - 38: i1IIi - II111iiii . I1Ii111
if 58 - 58: I1IiiI . iII111i + OoOoOO00
if 66 - 66: iII111i / oO0o * OoooooooOO + OoooooooOO % I11i
if ( "-s" in sys . argv ) :
IIii1111 = len ( sys . argv ) != 4
else :
IIii1111 = len ( sys . argv ) != 2
if 42 - 42: I11i / o0oOOo0O0Ooo . oO0o + oO0o % OoOoOO00 + i11iIiiIii
if ( IIii1111 ) :
print ( "Usage: python ltr.py [-s <source-eid>] <destination-EID | DNS-name>" )
exit ( 1 )
if 56 - 56: o0oOOo0O0Ooo
if 28 - 28: iII111i . iII111i % iIii1I11I1II1 * iIii1I11I1II1 . o0oOOo0O0Ooo / iII111i
iII1i1 , O0oOOoooOO0O , IIiIi1iI = o0oO ( sys . argv [ - 1 ] )
if ( iII1i1 == None ) :
print ( "<destinaton-eid> parse error" )
exit ( 1 )
if 86 - 86: o0oOOo0O0Ooo
i1Iii11Ii1i1 = O0oOOoooOO0O . find ( ":" ) == - 1
if 59 - 59: Oo0Ooo % OoooooooOO . iII111i / IiII + I1IiiI
if 76 - 76: ooOoO0o
if 73 - 73: O0 * iII111i + Ii1I + ooOoO0o
if 40 - 40: II111iiii . OoOoOO00 * I1Ii111 + OOooOOo + OOooOOo
if 9 - 9: I11i % OoooooooOO . oO0o % I11i
OO0000o ( O0oOOoooOO0O , i1Iii11Ii1i1 )
if 32 - 32: i11iIiiIii
if 31 - 31: iIii1I11I1II1 / OoO0O00 / I1ii11iIi11i
if 41 - 41: Oo0Ooo
if 10 - 10: Oo0Ooo / Oo0Ooo / I1Ii111 . I1Ii111
if ( "-s" in sys . argv ) :
O0i1II1Iiii1I11 = sys . argv . index ( "-s" ) + 1
OOoo , iIIiiiI , IIiIi1iI = o0oO ( sys . argv [ O0i1II1Iiii1I11 ] )
if ( OOoo == None ) :
print ( "-s <source-eid> parse error" )
exit ( 1 )
if 60 - 60: I1IiiI . I1Ii111
if ( IIiIi1iI ) : OOoo = None
IiI111ii1ii , O0OOo , i1 , OOOOo0 = OoO0o ( OOoo , iIIiiiI , OOo , Ii1IIii11 , II1iII1i , oO0oIIII , i1Iii11Ii1i1 )
if ( i1 == None ) :
print ( "[{}]{} not a local EID, maybe lispers.net API pw/port wrong" . format ( OOoo , iIIiiiI ) )
if 38 - 38: iIii1I11I1II1 + I1ii11iIi11i - OOooOOo - ooOoO0o - OoOoOO00
exit ( 1 )
if 71 - 71: OOooOOo / Ii1I % OoO0O00
else :
OOoo , iIIiiiI , i1 , OOOOo0 = OoO0o ( None , None , OOo , Ii1IIii11 , II1iII1i , oO0oIIII , i1Iii11Ii1i1 )
if ( OOoo == None ) :
print ( "Could not find local EID, maybe lispers.net API pw/port wrong?" )
exit ( 1 )
if 50 - 50: OOooOOo / Ii1I % ooOoO0o . OoOoOO00
if 41 - 41: OOooOOo * Ii1I - IiII + o0oOOo0O0Ooo
if 64 - 64: Ii1I
if 66 - 66: i11iIiiIii - OOooOOo * Oo0Ooo
if 76 - 76: i11iIiiIii + o0oOOo0O0Ooo / I1ii11iIi11i - OoO0O00 - Ii1I + I1ii11iIi11i
if 51 - 51: iIii1I11I1II1 . ooOoO0o + iIii1I11I1II1
iII1i1 = OOoo if iII1i1 == "0" else iII1i1
if ( iII1i1 != OOoo ) :
print ( "Instance-IDs must be the same for source and destination EIDs" )
exit ( 1 )
if 95 - 95: I1IiiI
if 46 - 46: OoOoOO00 + OoO0O00
if 70 - 70: iII111i / iIii1I11I1II1
if 85 - 85: OoooooooOO % i1IIi * OoooooooOO / I1ii11iIi11i
if 96 - 96: OoooooooOO + oO0o
iiII1i11i = socket . socket ( socket . AF_INET6 , socket . SOCK_DGRAM )
iiII1i11i . bind ( ( "0::0" , 0 ) )
iiII1i11i . settimeout ( 3 )
II = iiII1i11i . getsockname ( ) [ 1 ]
if 11 - 11: I1IiiI / II111iiii + o0oOOo0O0Ooo * I1ii11iIi11i - I1ii11iIi11i - I1IiiI
if 85 - 85: I11i % oO0o / iIii1I11I1II1 . iIii1I11I1II1
if 31 - 31: o0oOOo0O0Ooo % OoO0O00
if 14 - 14: oO0o / oO0o % ooOoO0o
O0o0Oo , iiIIiIiIi = oO ( i1 , II )
if 56 - 56: I1IiiI . O0 + Oo0Ooo
if 1 - 1: iII111i
if 97 - 97: OOooOOo + iII111i + O0 + i11iIiiIii
if 77 - 77: o0oOOo0O0Ooo / OoooooooOO
if ( OOOOo0 ) :
O0OooOo0o = oO00O000oO0 ( OOo , Ii1IIii11 , II1iII1i , oO0oIIII )
for IIii11I1i1I in O0OooOo0o :
print ( "Send NAT-traversal LISP-Trace to RTR {} ..." . format ( IIii11I1i1I ) )
iiII1i11i . sendto ( iiIIiIiIi , ( "::ffff:" + IIii11I1i1I , OOo000 ) )
if 99 - 99: iII111i
if 76 - 76: OoO0O00 * I1IiiI
if 82 - 82: Ii1I * iII111i / I1ii11iIi11i
print ( "Send round-trip LISP-Trace between EIDs [{}]{} and [{}]{} ..." . format ( OOoo , iIIiiiI , iII1i1 , O0oOOoooOO0O ) )
if 36 - 36: OoooooooOO - i1IIi . O0 / II111iiii + o0oOOo0O0Ooo
if 33 - 33: II111iiii / ooOoO0o * O0 % Ii1I * I1Ii111
O0o = O0oOOoooOO0O if ( O0oOOoooOO0O . find ( ":" ) != - 1 ) else "::ffff:" + O0oOOoooOO0O
OO0O0OoOO0 = time . time ( )
if 72 - 72: OOooOOo % I1ii11iIi11i + OoO0O00 / oO0o + IiII
if 10 - 10: I1Ii111 / ooOoO0o + i11iIiiIii / Ii1I
if 74 - 74: OOooOOo + O0 + i1IIi - i1IIi + II111iiii
if 83 - 83: I1ii11iIi11i - I1IiiI + OOooOOo
try :
iiII1i11i . sendto ( iiIIiIiIi , ( O0o , OOo000 ) )
except socket . error as O0ooO0Oo00o :
print ( "sock.sendto() failed: {}" . format ( O0ooO0Oo00o ) )
exit ( 1 )
if 5 - 5: Ii1I
if 46 - 46: IiII
if 45 - 45: ooOoO0o
if 21 - 21: oO0o . I1Ii111 . OOooOOo / Oo0Ooo / I1Ii111
if 17 - 17: OOooOOo / OOooOOo / I11i
try :
iiIIiIiIi , ii1 = iiII1i11i . recvfrom ( 9000 )
ii1 = ii1 [ 0 ] . replace ( "::ffff:" , "" )
except socket . timeout :
exit ( 1 )
except socket . error as O0ooO0Oo00o :
print ( "sock.recvfrom() failed, error: {}" . format ( O0ooO0Oo00o ) )
exit ( 1 )
if 1 - 1: ooOoO0o % iIii1I11I1II1 + Oo0Ooo . iIii1I11I1II1 % I1IiiI
if 89 - 89: Ii1I
ooOoOO0OoO00o = round ( time . time ( ) - OO0O0OoOO0 , 3 )
if 11 - 11: Oo0Ooo - I1IiiI * II111iiii . I1ii11iIi11i . oO0o
print ( "Received reply from {}, rtt {} secs" . format ( ii1 , ooOoOO0OoO00o ) )
print ( "" )
Ii11iII1 = i1iiI11I ( O0o0Oo , iiIIiIiIi )
if ( Ii11iII1 == { } ) : exit ( 1 )
if 61 - 61: iII111i % I1IiiI - o0oOOo0O0Ooo - II111iiii % O0
if 90 - 90: iIii1I11I1II1 + I1ii11iIi11i + ooOoO0o - I1Ii111 * IiII . I1ii11iIi11i
if 37 - 37: ooOoO0o % i11iIiiIii % II111iiii . O0 . Ii1I
if 51 - 51: OoO0O00 - O0 % oO0o - II111iiii
i1I11i1iI ( Ii11iII1 )
if 31 - 31: iII111i / Oo0Ooo - iII111i - OOooOOo
iiII1i11i . close ( )
exit ( 0 )
if 7 - 7: iII111i % O0 . OoOoOO00 + I1IiiI - I11i
if 75 - 75: I11i
# dd678faae9ac167bc83abf78e5cb2f3f0688d3a3
|
the-stack_0_11194 |
def lantern_fish(filename, days):
# Data structure that will contain occurrences of fish
occurrences = [0, 0, 0, 0, 0, 0, 0, 0, 0]
# Retrieval of data from the input file
with open(filename, 'r', encoding='utf-8') as values:
for value in values:
fish_list = value.split(",")
# Data entry into the data structure of occurrences of fish
for fish in fish_list:
occurrences[int(fish)] += 1
# Solution algorithm
for day in range(1, days+1):
tmp = occurrences[0]
for i in range(0, len(occurrences)-1):
occurrences[i] = occurrences[i+1]
occurrences[8] = tmp
occurrences[6] += tmp
# Returns the number of fish
return sum(occurrences)
|
the-stack_0_11198 | # Copyright (c) Facebook, Inc. and its affiliates.
import logging
import torch
import tqdm
from multimodelity.common.sample import Sample
from multimodelity.datasets.multimodelity_dataset import multimodelityDataset
from multimodelity.utils.distributed import is_master
logger = logging.getLogger(__name__)
class VQA2Dataset(multimodelityDataset):
def __init__(self, config, dataset_type, imdb_file_index, *args, **kwargs):
if "name" in kwargs:
name = kwargs["name"]
elif "dataset_name" in kwargs:
name = kwargs["dataset_name"]
else:
name = "vqa2"
super().__init__(name, config, dataset_type, index=imdb_file_index)
self._should_fast_read = self.config.get("fast_read", False)
self.use_ocr = self.config.use_ocr
self.use_ocr_info = self.config.use_ocr_info
def init_processors(self):
super().init_processors()
if not self._use_features:
self.image_db.transform = self.image_processor
def try_fast_read(self):
# Don't fast read in case of test set.
if self._dataset_type == "test":
return
if hasattr(self, "_should_fast_read") and self._should_fast_read is True:
logger.info(
f"Starting to fast read {self.dataset_name} {self.dataset_type} "
+ "dataset"
)
self.cache = {}
for idx in tqdm.tqdm(
range(len(self.annotation_db)), miniters=100, disable=not is_master()
):
self.cache[idx] = self.load_item(idx)
def __getitem__(self, idx):
if self._should_fast_read is True and self._dataset_type != "test":
return self.cache[idx]
else:
return self.load_item(idx)
def load_item(self, idx):
sample_info = self.annotation_db[idx]
current_sample = Sample()
if "question_tokens" in sample_info:
text_processor_argument = {
"tokens": sample_info["question_tokens"],
"text": sample_info["question_str"],
}
else:
text_processor_argument = {"text": sample_info["question"]}
processed_question = self.text_processor(text_processor_argument)
current_sample.text = processed_question["text"]
if "input_ids" in processed_question:
current_sample.update(processed_question)
current_sample.question_id = torch.tensor(
sample_info["question_id"], dtype=torch.int
)
if isinstance(sample_info["image_id"], int):
current_sample.image_id = torch.tensor(
sample_info["image_id"], dtype=torch.int
)
else:
current_sample.image_id = sample_info["image_id"]
if "question_tokens" in sample_info:
current_sample.text_len = torch.tensor(
len(sample_info["question_tokens"]), dtype=torch.int
)
if self._use_features:
features = self.features_db[idx]
if hasattr(self, "transformer_bbox_processor"):
features["image_info_0"] = self.transformer_bbox_processor(
features["image_info_0"]
)
current_sample.update(features)
else:
image_path = sample_info["image_name"] + ".jpg"
current_sample.image = self.image_db.from_path(image_path)["images"][0]
# Add details for OCR like OCR bbox, vectors, tokens here
current_sample = self.add_ocr_details(sample_info, current_sample)
# Depending on whether we are using soft copy this can add
# dynamic answer space
current_sample = self.add_answer_info(sample_info, current_sample)
return current_sample
def add_ocr_details(self, sample_info, sample):
if self.use_ocr:
# Preprocess OCR tokens
ocr_tokens = [
self.ocr_token_processor({"text": token})["text"]
for token in sample_info["ocr_tokens"]
]
# Get embeddings for tokens
context = self.context_processor({"tokens": ocr_tokens})
sample.context = context["text"]
sample.context_tokens = context["tokens"]
sample.context_feature_0 = context["text"]
sample.context_info_0 = Sample()
sample.context_info_0.max_features = context["length"]
order_vectors = torch.eye(len(sample.context_tokens))
order_vectors[context["length"] :] = 0
sample.order_vectors = order_vectors
if self.use_ocr_info and "ocr_info" in sample_info:
sample.ocr_bbox = self.bbox_processor({"info": sample_info["ocr_info"]})[
"bbox"
]
return sample
def add_answer_info(self, sample_info, sample):
if "answers" in sample_info:
answers = sample_info["answers"]
answer_processor_arg = {"answers": answers}
if self.use_ocr:
answer_processor_arg["tokens"] = sample_info["ocr_tokens"]
processed_soft_copy_answers = self.answer_processor(answer_processor_arg)
# sample.answers = processed_soft_copy_answers["answers"]
sample.targets = processed_soft_copy_answers["answers_scores"]
return sample
def idx_to_answer(self, idx):
return self.answer_processor.convert_idx_to_answer(idx)
def format_for_prediction(self, report):
answers = report.scores.argmax(dim=1)
predictions = []
answer_space_size = self.answer_processor.get_true_vocab_size()
for idx, question_id in enumerate(report.question_id):
answer_id = answers[idx].item()
if answer_id >= answer_space_size:
answer_id -= answer_space_size
answer = report.context_tokens[idx][answer_id]
if answer == self.context_processor.PAD_TOKEN:
answer = "unanswerable"
else:
answer = self.answer_processor.idx2word(answer_id)
# actual_answer = report.answers[idx]
predictions.append(
{
"question_id": question_id.item(),
"answer": answer,
# "actual_answers": actual_answer,
# "question_tokens": report.question_tokens[idx],
# "image_id": report.image_id[idx].item()
}
)
return predictions
|
the-stack_0_11202 | import sys, os, argparse, random
parser = argparse.ArgumentParser()
required = parser.add_argument_group('required arguments')
## user inputs required
required.add_argument('-l', '--len', help='length of random sequences', dest='length')
required.add_argument('-n', '--num', help='number of random sequences', dest='number')
args = parser.parse_args()
k=int(args.number)
l=int(args.length)
dna = ["A","G","C","T"]
database=[]
while len(database)<k:
randseq=""
for i in range(0,l):
randseq+=random.choice(dna)
if randseq not in database:
database.append(randseq)
print(database)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.