content
stringlengths 5
1.05M
|
---|
#!/usr/bin/env python3
# PYTHONPATH is set properly when loading a workspace.
# This package needs to be installed first.
import pyaudio
import numpy
import threading
from datetime import datetime, timedelta
import queue
# ROS
import rospy
from opentera_webrtc_ros_msgs.msg import PeerAudio
p = pyaudio.PyAudio()
output_device_index = 0
class AudioWriter:
def __init__(self, peer_id: str):
self._peer_id = peer_id
self._audio_queue = queue.Queue()
self._quit_event = threading.Event()
self._thread = threading.Thread(target=self._run)
self._lastPushTime = datetime.now()
def get_last_push(self):
return self._lastPushTime
def push_audio(self, audio: PeerAudio, timeout=None):
self._audio_queue.put(audio, timeout=timeout)
# print('PUSH', datetime.now().timestamp(), self._audio_queue.qsize())
self._lastPushTime = datetime.now()
def pull_audio(self, timeout=None):
audio = self._audio_queue.get(timeout=timeout)
# print('PULL', datetime.now().timestamp(), self._audio_queue.qsize())
return audio
def _run(self):
print('Thread_run', self._peer_id)
stream = None
while not self._quit_event.isSet():
try:
# Write data (should get 10ms frames)
audio = self.pull_audio(timeout=0.010)
if audio:
if audio.frame.format == 'signed_16':
if stream is None:
stream = p.open(format=pyaudio.paInt16,
channels=audio.frame.channel_count,
rate=audio.frame.sampling_frequency,
output_device_index=output_device_index,
frames_per_buffer=int(audio.frame.frame_sample_count * 20),
output=True)
# Fill buffer with zeros ?
# for _ in range(10):
# stream.write(numpy.zeros(audio.frame.frame_sample_count, dtype=numpy.int16))
stream.write(audio.frame.data)
else:
print('Unsupported format: ', audio.frame.format, self._peer_id)
except queue.Empty as e:
# An exception will occur when queue is empty
pass
if stream:
stream.close()
print('Thread done!', self._peer_id)
def start(self):
self._quit_event.clear()
self._thread.start()
def stop(self):
if self._thread.is_alive():
self._quit_event.set()
print('Waiting for thread', self._peer_id)
self._thread.join()
class AudioMixerROS:
def __init__(self):
self._subscriber = rospy.Subscriber('/webrtc_audio', PeerAudio, self._on_peer_audio, queue_size=100)
self._writers = dict()
# Cleanup timer every second
self._timer = rospy.Timer(rospy.Duration(1), self._on_cleanup_timeout)
def shutdown(self):
self._timer.shutdown()
for writer in self._writers:
print('stopping writer', writer)
self._writers[writer].stop()
def _on_cleanup_timeout(self, event):
# Cleanup old threads ...
peers_to_delete = []
# Store since we cannot remove while iterating
for peer_id in self._writers:
if self._writers[peer_id].get_last_push() + timedelta(seconds=15) < datetime.now():
peers_to_delete.append(peer_id)
# Remove old peers
for peer in peers_to_delete:
self._writers[peer].stop()
del self._writers[peer]
def _on_peer_audio(self, audio: PeerAudio):
peer_id = audio.sender.id
if peer_id not in self._writers:
# Create new writer thread
writer = AudioWriter(peer_id)
self._writers[peer_id] = writer
# Start thread
writer.start()
# Push audio
self._writers[peer_id].push_audio(audio)
if __name__ == '__main__':
for index in range(p.get_device_count()):
info = p.get_device_info_by_index(index)
if info['name'] == 'default':
output_device_index = info['index']
# Init ROS
rospy.init_node('opentera_webrtc_audio_mixer', anonymous=True)
mixer = AudioMixerROS()
rospy.spin()
mixer.shutdown()
|
import os
import tkinter
from tkinter import *
class SimpleDialog:
def __init__(self, master,
text='', buttons=[], default=None, cancel=None,
title=None, class_=None):
if class_:
self.root = Toplevel(master, class_=class_)
else:
self.root = Toplevel(master)
if title:
self.root.title(title)
self.root.iconname(title)
self.message = Message(self.root, text=text, aspect=400)
self.message.pack(expand=1, fill=BOTH)
self.frame = Frame(self.root)
self.frame.pack(fill=BOTH)
self.num = default
self.cancel = cancel
self.default = default
self.root.bind('<Return>', self.return_event)
for num in range(len(buttons)):
s = buttons[num]
b = Button(self.frame, text=s,
command=(lambda self=self, num=num: self.done(num)))
b.config(relief=RIDGE, borderwidth=1)
b.pack(side=TOP, fill=BOTH)
self.root.protocol('WM_DELETE_WINDOW', self.wm_delete_window)
self._set_transient(master)
def _set_transient(self, master, relx=0.5, rely=0.3):
widget = self.root
widget.withdraw() # Remain invisible while we figure out the geometry
widget.transient(master)
widget.update_idletasks() # Actualize geometry information
if master.winfo_ismapped():
m_width = master.winfo_width()
m_height = master.winfo_height()
m_x = master.winfo_rootx()
m_y = master.winfo_rooty()
else:
m_width = master.winfo_screenwidth()
m_height = master.winfo_screenheight()
m_x = m_y = 0
w_width = widget.winfo_reqwidth()
w_height = widget.winfo_reqheight()
x = m_x + (m_width - w_width) * relx
y = m_y + (m_height - w_height) * rely
if x+w_width > master.winfo_screenwidth():
x = master.winfo_screenwidth() - w_width
elif x < 0:
x = 0
if y+w_height > master.winfo_screenheight():
y = master.winfo_screenheight() - w_height
elif y < 0:
y = 0
widget.geometry("+%d+%d" % (x, y))
widget.deiconify() # Become visible at the desired location
def go(self):
self.root.wait_visibility()
self.root.grab_set()
self.root.mainloop()
self.root.destroy()
return self.num
def return_event(self, event):
if self.default is None:
self.root.bell()
else:
self.done(self.default)
def wm_delete_window(self):
if self.cancel is None:
self.root.bell()
else:
self.done(self.cancel)
def done(self, num):
self.num = num
self.root.quit()
def finddesktop():
return os.path.join(os.path.expanduser("~"),'Desktop')
def findfile_desktop(end=''):
dtop = os.path.join(os.path.expanduser("~"),'Desktop')
r = []
for i in os.listdir(dtop):
if i.lower().endswith(end.lower()):
r.append(i)
return r
if __name__ == '__main__':
root = Tk()
gifs = findfile_desktop(end='')
s = SimpleDialog(root,buttons=gifs)
v = s.go()
print(gifs[v])
root.mainloop() |
#!/usr/bin/env python3
import xlrd
import csv
import requests
from bs4 import BeautifulSoup
import os
import subprocess as subp
import face_recognition
R = '\033[31m' # red
G = '\033[32m' # green
C = '\033[36m' # cyan
W = '\033[0m' # white
session = requests.session()
session.proxies = {}
session.proxies['http'] = 'socks5h://localhost:9050'
session.proxies['https'] = 'socks5h://localhost:9050'
url_media = []
foln = []
results = []
record = []
fol = ''
def file():
global file
file = input(C + '[+] ' + G + 'Please Enter File Name or Location -> ')
if '.txt' in str(file):
Text()
elif '.csv' in str(file):
Csv()
elif '.xlsx' in str(file):
Excel()
else:
print(R +"[!] File Type Is Not Supported." + W )
def Text():
with open(file, 'r') as torfile:
torfile = torfile.readlines()
url2 = [x.replace('\n', '') for x in torfile]
try:
try:
for res in url2:
print('\n' + C + '[>] ' + G + 'Scraping Url -> ' + R + '{}'.format(res) + W)
if 'http://' in url2:
response = session.get(res, HTTPAdapter(max_retries=5)).text
else:
response = session.get('http://'+res).text
soup = BeautifulSoup(response, 'lxml')
tags = soup.find_all('img')
if len(tags) < 1:
print(R + '[!] No Media Found...' +W)
url2.remove(res)
pass
for tag in tags:
urls = tag.get('src')
media = 'http://'+str(res)+'/'+str(urls)
if 'http://' and '//' in media:
media = media.replace('//','/')
media = media.replace('https://', 'http://')
print(C + "[>] " + W + str(urls))
media = 'http://'+str(res)+'/'+str(urls)
if 'http://' and '//' in media:
media = media.replace('//','/')
media = media.replace('http:/', 'http://')
url_media.append(media)
except requests.exceptions.ConnectionError as e:
print( '\n' + R +'[!] Connection Error in {}'.format(res) + W)
pass
except requests.exceptions.InvalidURL as e:
print( '\n' + R +'[!] Invalid URL{}'.format(res) + W)
pass
down = input(C + '[+] '+ G + 'Download Media (y/n) -> ')
if down == 'y':
Download()
else:
print(R + '[!] Exiting...' + W)
exit()
def Csv():
with open(file, newline='') as inputfile:
for row in csv.reader(inputfile):
results.append(row[0])
for res in results:
print('\n' + C + '[>] ' + G + 'Scraping Url -> ' + R + '{}'.format(res) + W)
try:
try:
if 'http://' in res:
response = session.get(res).text
else:
response = session.get('http://'+res).text
soup = BeautifulSoup(response, 'lxml' )
tags = soup.find_all('img')
for tag in tags:
url = tag.get('src')
print(C + "[>] " + W + str(urls))
media = 'http://'+str(res)+'/'+str(urls)
if 'http://' and '//' in media:
media = media.replace('//','/')
media = media.replace('http:/', 'http://')
url_media.append(media)
except requests.exceptions.ConnectionError as e:
print( '\n' + R +'[!] Connection Error in {}'.format(res) + W)
pass
except requests.exceptions.InvalidURL as e:
print( '\n' + R +'[!] Invalid URL{}'.format(res) + W)
pass
total_img = print('\n' + R + '[>] ' + G + 'Total Images -> {}'.format(len(media)))
down = input(C + '[+] '+ G + 'Download Media (y/n) -> ')
if down == 'y':
Download()
else:
print(R + '[!] Exiting...' + W)
exit()
def Excel():
workbook = xlrd.open_workbook('{}'.format(file))
sh = workbook.sheet_names()
print(G + '[>]' + C + ' Sheet Names -> {}'.format(sh) + W)
shn = input(C + '[+] ' + G + 'Please Enter the Sheet Name For Urls -> ' +W)
worksheet = workbook.sheet_by_name('{}'.format(shn))
total_rows = worksheet.nrows
total_cols = worksheet.ncols
for x in range(total_rows):
for y in range(total_cols):
record.append(worksheet.cell(x,y).value)
for res in record:
if '.onion' in res:
print('\n' + C + '[>] ' + G + 'Scraping Url -> ' + R + '{}'.format(res) + W)
try:
try:
if 'http://' in res:
response = session.get(res).text
else:
response = session.get('http://'+res).text
soup = BeautifulSoup(response, 'lxml' )
tags = soup.find_all('img')
if len(tags) < 1:
print(R + '[!] No Media Found...' +W)
record.remove(res)
for tag in tags:
urls = tag.get('src')
print(C + "[>] " + W + str(urls))
media = 'http://'+str(res)+'/'+str(urls)
if 'http://' and '//' in media:
media = media.replace('//','/')
media = media.replace('http:/', 'http://')
url_media.append(media)
except requests.exceptions.ConnectionError as e:
print( '\n' + R +'[!] Connection Error in {}'.format(res) + W)
pass
except requests.exceptions.InvalidURL as e:
print( '\n' + R +'[!] Invalid URL{}'.format(res) + W)
pass
total_img = print('\n' + R + '[>] ' + G + 'Total Images -> {}'.format(len(media)))
down = input(C + '[+] '+ G + 'Download Media (y/n) -> ')
if down == 'y':
Download()
else:
print(R + '[!] Exiting...' + W)
exit()
def Download():
global fol
fol = input('\n' + R + '[>] ' + G + 'Enter Folder Name -> ' +W)
os.system('mkdir Media/{}'.format(fol))
for item in url_media:
m = item.split('/')[-1]
if '.png' or '.jpg' or '.gif' in m:
r =session.get(item)
with open('Media/{}/{}'.format(fol,m), 'wb') as f:
f.write(r.content)
print('\n' + C + '[>] ' + R + 'All Files Downloaded in Media/{}'.format(fol) +W)
face = input(C + '[+] '+ G + 'Do You What To Search Image (y/n) -> ')
if face == 'y':
face_re()
else:
print(R + '[!] Exiting...' + W)
exit()
def face_re():
known_folder = input(C + '[+] '+ G + 'Enter the Known Images Folder Name or Location -> ')
unknown_folder =input(C + '[+] '+ G + 'Enter the Check Images Folder Name or Location -> ')
search = subp.Popen(['face_recognition', '{}'.format(known_folder), '{}'.format(unknown_folder)], stdout=subp.PIPE, stderr=subp.PIPE)
output = search.communicate()[0].decode('utf-8')
output = output.splitlines()
for name in output:
if 'person' in name:
pass
else:
name = name.split(',')[1]
print('\n' + C + '[>]' + G + 'Image Founded -> '+ R +'{}'.format(name)+W) |
from .naive_bayes import NaiveBayes |
# vim: set tabstop=4 expandtab :
###############################################################################
# Copyright (c) 2019-2021 ams AG
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
# Authors:
# - Thomas Winkler, ams AG, [email protected]
import configparser
import glob
import os
import os.path
import platform
import socket
import subprocess
import sys
import types
from ctypes import CDLL
from importlib.machinery import SourceFileLoader
from pathlib import Path
from typing import List, Tuple
from dottmi.dottexceptions import DottException
from dottmi.target_mem import TargetMemModel
from dottmi.utils import log, log_setup, singleton
class DottHooks(object):
_pre_connect_hook: types.FunctionType = None
@classmethod
def set_pre_connect_hook(cls, pre_connect_hook: types.FunctionType) -> None:
cls._pre_connect_hook = pre_connect_hook
@classmethod
def exec_pre_connect_hook(cls) -> None:
if cls._pre_connect_hook is not None:
cls._pre_connect_hook()
# ----------------------------------------------------------------------------------------------------------------------
@singleton
class Dott(object):
def __init__(self) -> None:
self._default_target = None
self._all_targets: List = []
# initialize logging subsystem
log_setup()
# read and pre-process configuration file
DottConf.parse_config()
# the port number used by the internal auto port discovery; discovery starts at config's gdb server port
self._next_gdb_srv_port: int = int(DottConf.conf['gdb_server_port'])
# Hook called before the first debugger connection is made
DottHooks.exec_pre_connect_hook()
self._default_target = self.create_target(DottConf.conf['device_name'], DottConf.conf['jlink_serial'])
def _get_next_srv_port(self, srv_addr: str) -> int:
"""
Find the next triplet of free ("bind-able") TCP ports on the given server IP address.
Ports are automatically advanced unit a free port triplet is found.
Args:
srv_addr: IP address of the server.
Returns:
Returns the first port number of the discovered, free port triplet.
"""
port = self._next_gdb_srv_port
sequentially_free_ports = 0
start_port = 0
while True:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.bind((srv_addr, port))
sequentially_free_ports += 1
if sequentially_free_ports == 1:
start_port = port
except socket.error:
# log.debug(f'Can not bind port {port} as it is already in use.')
sequentially_free_ports = 0
finally:
s.close()
if sequentially_free_ports > 2:
# JLINK GDB server needs 3 free ports in a row
break
port += 1
if port >= 65535:
raise DottException(f'Unable do find three (consecutive) free ports for IP {srv_addr}!')
self._next_gdb_srv_port = start_port + sequentially_free_ports
if self._next_gdb_srv_port > 65500:
self._next_gdb_srv_port = int(DottConf.conf['gdb_server_port'])
return start_port
def create_gdb_server(self, dev_name: str, jlink_serial: str = None, srv_addr: str = None, srv_port: int = -1) -> 'GdbServer':
"""
Factory method to create a new GDB server instance. The following parameters are defined via DottConfig:
gdb_server_binary, jlink_interface, device_endianess, jlink_speed, and jlink_server_addr.
Args:
dev_name: Device name as in JLinkDevices.xml
jlink_serial: JLINK serial number.
srv_addr: Server address.
launch: Whether or not to launch the GDB server process.
Returns:
The created GdbServer instance.
"""
from dottmi.gdb import GdbServerJLink
if srv_port == -1:
srv_port = int(DottConf.conf['gdb_server_port'])
if srv_addr is None:
srv_addr = DottConf.conf['gdb_server_addr']
if srv_addr is None:
# if gdb server is launched by DOTT, we determine the port ourselves
srv_port = self._get_next_srv_port('127.0.0.1')
gdb_server = GdbServerJLink(DottConf.conf['gdb_server_binary'],
srv_addr,
srv_port,
dev_name,
DottConf.conf['jlink_interface'],
DottConf.conf['device_endianess'],
DottConf.conf['jlink_speed'],
jlink_serial,
DottConf.conf['jlink_server_addr'])
return gdb_server
def create_target(self, dev_name: str, jlink_serial: str = None) -> 'Target':
from dottmi import target
from dottmi.gdb import GdbClient
srv_addr = DottConf.conf['gdb_server_addr']
try:
gdb_server = self.create_gdb_server(dev_name, jlink_serial, srv_addr=srv_addr)
# start GDB Client
gdb_client = GdbClient(DottConf.conf['gdb_client_binary'])
gdb_client.connect()
# create target instance and set GDB server address
target = target.Target(gdb_server, gdb_client)
except TimeoutError:
target = None
# add target to list of created targets to enable proper cleanup on shutdown
if target:
self._all_targets.append(target)
return target
@property
def target(self):
return self._default_target
@target.setter
def target(self, target: object):
raise ValueError('Target can not be set directly.')
def shutdown(self) -> None:
for t in self._all_targets:
t.disconnect()
self._all_targets = []
# ----------------------------------------------------------------------------------------------------------------------
# For backwards compatibility reasons the Dott() singleton can also be accessed via the all lowercase dott function.
def dott() -> Dott:
return Dott()
# ----------------------------------------------------------------------------------------------------------------------
# Central Dott configuration registry. Data is read in from dott ini file. Additional settings can be made via
# project specific conftest files.
class DottConf:
conf = {}
dott_runtime = None
@staticmethod
def set(key: str, val: str) -> None:
DottConf.conf[key] = val
@staticmethod
def set_runtime_if_unset(dott_runtime_path: str) -> None:
if not os.path.exists(dott_runtime_path):
raise ValueError(f'Provided DOTT runtime path ({dott_runtime_path}) does not exist.')
if os.environ.get('DOTTRUNTIME') is None:
os.environ['DOTTRUNTIME'] = dott_runtime_path
@staticmethod
def get(key: str):
return DottConf.conf[key]
@staticmethod
def _setup_runtime():
DottConf.set('DOTTRUNTIME', None)
dott_runtime_path = sys.prefix + os.sep + 'dott_data'
if os.path.exists(dott_runtime_path):
runtime_version: str = 'unknown'
with Path(dott_runtime_path + '/apps/version.txt').open() as f:
line = f.readline()
while line:
if 'version:' in line:
runtime_version = line.lstrip('version:').strip()
break
line = f.readline()
os.environ['DOTTGDBPATH'] = str(Path(f'{dott_runtime_path}/apps/gdb/bin'))
os.environ['PYTHONPATH27'] = str(Path(f'{dott_runtime_path}/apps/python27/python-2.7.13'))
DottConf.set('DOTTRUNTIME', f'{dott_runtime_path} (dott-runtime package)')
DottConf.set('DOTT_RUNTIME_VER', runtime_version)
DottConf.set('DOTTGDBPATH', str(Path(f'{dott_runtime_path}/apps/gdb/bin')))
DottConf.set('PYTHONPATH27', str(Path(f'{dott_runtime_path}/apps/python27/python-2.7.13')))
# Linux: check if libpython2.7 and libnurses5 are installed. Windows: They are included in the DOTT runtime.
if platform.system() == 'Linux':
res = os.system(str(Path(f'{dott_runtime_path}/apps/gdb/bin/arm-none-eabi-gdb-py')))
if res != 0:
raise DottException('Unable to start gdb client. This might be caused by missing dependencies.\n'
'Make sure that libpython2.7 and libncurses5 are installed.')
# If DOTTRUNTIME is set in the environment it overrides the integrated runtime in dott_data
if os.environ.get('DOTTRUNTIME') is not None and os.environ.get('DOTTRUNTIME').strip() != '':
dott_runtime_path = os.environ.get('DOTTRUNTIME')
dott_runtime_path = dott_runtime_path.strip()
DottConf.set('DOTTRUNTIME', dott_runtime_path)
if not os.path.exists(dott_runtime_path):
raise ValueError(f'Provided DOTT runtime path ({dott_runtime_path}) does not exist.')
try:
DottConf.dott_runtime = SourceFileLoader('dottruntime', dott_runtime_path + os.sep + 'dottruntime.py').load_module()
DottConf.dott_runtime.setup()
DottConf.set('DOTT_RUNTIME_VER', DottConf.dott_runtime.DOTT_RUNTIME_VER)
except Exception as ex:
raise Exception('Error setting up DOTT runtime.')
if DottConf.get('DOTTRUNTIME') is None:
raise Exception('Runtime components neither found in DOTT data path nor in DOTTRUNTIME folder.')
@staticmethod
def _get_jlink_path(segger_paths: List[str], segger_lib_name: str, jlink_gdb_server_binary: str) -> Tuple[str, str, int]:
all_libs = {}
for search_path in segger_paths:
libs = glob.glob(os.path.join(search_path, '**', segger_lib_name), recursive=True)
for lib in libs:
try:
if not os.path.exists(f'{os.path.dirname(lib)}{os.path.sep}{jlink_gdb_server_binary}'):
# Skip dirs which contain a JLINK dll but no GDB server executable (e.g., Ozone install folders).
continue
clib = CDLL(lib)
except OSError:
# Note: On Linux, Segger provides symlinks in the x86 folder to the 32bit version of the the
# JLink library using the 64bit library name. Attempting to load this library on a 64bit system
# results in an exception.
continue
ver = clib.JLINKARM_GetDLLVersion()
all_libs[ver] = lib
jlink_path: str = ''
jlink_version: str = '0'
if len(all_libs) > 0:
jlink_version = (sorted(all_libs.keys())[-1:])[0]
jlink_path = all_libs.get(jlink_version)
jlink_path = os.path.dirname(jlink_path)
# 6.50 6.50b 6.52 6.52a 6.52b 6.52c
known_issue_versions = (65000, 65020, 65200, 65210, 65220, 65230)
if jlink_version in known_issue_versions:
log.warn(f'The J-Link software with the highest version (in {jlink_path}) has known '
f'issues related to SRAM download and STM32 MCUs. Please upgrade to at least v6.52d')
else:
raise DottException(f'JLink software (esp. {segger_lib_name}) not found in path {segger_path}.')
jlink_version = f'{str(jlink_version)[:1]}.{str(jlink_version)[1:3]}{chr(int(str(jlink_version)[-2:]) + 0x60)}'
return jlink_path, segger_lib_name, jlink_version
@staticmethod
def parse_config():
# setup runtime environment
DottConf._setup_runtime()
log.info(f'DOTT runtime: {DottConf.get("DOTTRUNTIME")}')
log.info(f'DOTT runtime version: {DottConf.get("DOTT_RUNTIME_VER")}')
# print working directory
log.info(f'work directory: {os.getcwd()}')
# default ini file
dott_section = 'DOTT'
dott_ini = 'dott.ini'
# JLINK gdb server
if platform.system() == 'Linux':
jlink_default_path = [str(Path('/opt/SEGGER'))]
jlink_gdb_server_binary = 'JLinkGDBServerCLExe'
jlink_lib_name = 'libjlinkarm.so'
else:
jlink_default_path = [str(Path('C:/Program Files (x86)/SEGGER')), str(Path('C:/Program Files/SEGGER'))]
jlink_gdb_server_binary = 'JLinkGDBServerCL.exe'
jlink_lib_name = 'JLink_x64.dll'
# the DOTTJLINKPATH environment variable overrides the default location of the Segger JLink package
if 'DOTTJLINKPATH' in os.environ.keys():
log.info(f'Overriding default JLink path ({jlink_default_path}) with DOTTJLINKPATH ({os.environ["DOTTJLINKPATH"]})')
jlink_default_path = [os.environ['DOTTJLINKPATH']]
# if a dott.ini is found in the working directory then parse it
if os.path.exists(os.getcwd() + os.sep + dott_ini):
# read ini file
ini = configparser.ConfigParser()
ini.read(os.getcwd() + os.sep + dott_ini)
if not ini.has_section(dott_section):
raise Exception(f'Unable to find section DOTT in {dott_ini}')
# create an in-memory copy of the DOTT section of the init file
conf_tmp = dict(ini[dott_section].items())
else:
log.info(f'No dott.ini found in working directory.')
conf_tmp = {}
# only copy items from ini to in-memory config which are not already present (i.e., set programmatically)
for k, v in conf_tmp.items():
if k not in DottConf.conf.keys():
DottConf.conf[k] = v
# Go through the individual config options and set reasonable defaults
# where they are missing (or return an error)
if 'bl_load_elf' not in DottConf.conf:
DottConf.conf['bl_load_elf'] = None
if DottConf.conf['bl_load_elf'] is not None:
if not os.path.exists(DottConf.conf['bl_load_elf']):
raise ValueError(f'{DottConf.conf["bl_load_elf"]} does not exist.')
log.info(f'BL ELF (load): {DottConf.conf["bl_load_elf"]}')
if 'bl_symbol_elf' not in DottConf.conf:
# if no symbol file is specified assume that symbols are contained in the load file
DottConf.conf['bl_symbol_elf'] = DottConf.conf['bl_load_elf']
if DottConf.conf['bl_symbol_elf'] is not None:
if not os.path.exists(DottConf.conf['bl_symbol_elf']):
raise ValueError(f'{DottConf.conf["bl_symbol_elf"]} does not exist.')
log.info(f'BL ELF (symbol): {DottConf.conf["bl_symbol_elf"]}')
if 'bl_symbol_addr' not in DottConf.conf:
DottConf.conf['bl_symbol_addr'] = 0x0
elif DottConf.conf['bl_symbol_addr'].strip() == '':
DottConf.conf['bl_symbol_addr'] = 0x0
else:
DottConf.conf['bl_symbol_addr'] = int(DottConf.conf['bl_symbol_addr'], base=16)
log.info(f'BL ADDR (symbol): 0x{DottConf.conf["bl_symbol_addr"]:x}')
if 'app_load_elf' not in DottConf.conf:
raise Exception(f'app_load_elf not set')
if not os.path.exists(DottConf.conf['app_load_elf']):
raise ValueError(f'{DottConf.conf["app_load_elf"]} does not exist.')
log.info(f'APP ELF (load): {DottConf.conf["app_load_elf"]}')
if 'app_symbol_elf' not in DottConf.conf:
# if no symbol file is specified assume that symbols are contained in the load file
DottConf.conf['app_symbol_elf'] = DottConf.conf['app_load_elf']
if not os.path.exists(DottConf.conf['app_symbol_elf']):
raise ValueError(f'{DottConf.conf["app_symbol_elf"]} does not exist.')
log.info(f'APP ELF (symbol): {DottConf.conf["app_symbol_elf"]}')
if 'device_name' not in DottConf.conf:
DottConf.conf["device_name"] = 'unknown'
log.info(f'Device name: {DottConf.conf["device_name"]}')
if 'device_endianess' not in DottConf.conf:
DottConf.conf['device_endianess'] = 'little'
else:
if DottConf.conf['device_endianess'] != 'little' and DottConf.conf['device_endianess'] != 'big':
raise ValueError(f'device_endianess in {dott_ini} should be either "little" or "big".')
log.info(f'Device endianess: {DottConf.conf["device_endianess"]}')
# determine J-Link path and version
jlink_path, jlink_lib_name, jlink_version = DottConf._get_jlink_path(jlink_default_path, jlink_lib_name, jlink_gdb_server_binary)
DottConf.conf["jlink_path"] = jlink_path
DottConf.conf["jlink_lib_name"] = jlink_lib_name
DottConf.conf["jlink_version"] = jlink_version
log.info(f'J-LINK local path: {DottConf.conf["jlink_path"]}')
log.info(f'J-LINK local version: {DottConf.conf["jlink_version"]}')
# We are connecting to a J-LINK gdb server which was not started by DOTT. Therefore it does not make sense
# to print, e.g., SWD connection parameters.
if 'jlink_interface' not in DottConf.conf:
DottConf.conf['jlink_interface'] = 'SWD'
log.info(f'J-LINK interface: {DottConf.conf["jlink_interface"]}')
if 'jlink_speed' not in DottConf.conf:
DottConf.conf['jlink_speed'] = '15000'
log.info(f'J-LINK speed (set): {DottConf.conf["jlink_speed"]}')
if 'jlink_serial' not in DottConf.conf:
DottConf.conf['jlink_serial'] = None
elif DottConf.conf['jlink_serial'] is not None and DottConf.conf['jlink_serial'].strip() == '':
DottConf.conf['jlink_serial'] = None
if DottConf.conf['jlink_serial'] is not None:
log.info(f'J-LINK serial: {DottConf.conf["jlink_serial"]}')
if 'gdb_client_binary' not in DottConf.conf:
default_gdb = 'arm-none-eabi-gdb-py'
DottConf.conf['gdb_client_binary'] = str(Path(f'{os.environ["DOTTGDBPATH"]}/{default_gdb}'))
log.info(f'GDB client binary: {DottConf.conf["gdb_client_binary"]}')
if 'gdb_server_addr' not in DottConf.conf:
DottConf.conf['gdb_server_addr'] = None
elif DottConf.conf['gdb_server_addr'].strip() == '':
DottConf.conf['gdb_server_addr'] = None
else:
DottConf.conf['gdb_server_addr'] = DottConf.conf['gdb_server_addr'].strip()
log.info(f'GDB server address: {DottConf.conf["gdb_server_addr"]}')
if 'gdb_server_port' not in DottConf.conf or DottConf.conf['gdb_server_port'] is None:
DottConf.conf['gdb_server_port'] = '2331'
elif DottConf.conf['gdb_server_port'].strip() == '':
DottConf.conf['gdb_server_port'] = '2331'
log.info(f'GDB server port: {DottConf.conf["gdb_server_port"]}')
if 'jlink_server_addr' not in DottConf.conf or DottConf.conf['jlink_server_addr'] is None:
DottConf.conf['jlink_server_addr'] = None
elif DottConf.conf['jlink_server_addr'].strip() == '':
DottConf.conf['jlink_server_addr'] = None
if DottConf.conf["jlink_server_addr"] != None:
log.info(f'JLINK server address: {DottConf.conf["jlink_server_addr"]}')
if 'jlink_server_port' not in DottConf.conf or DottConf.conf['jlink_server_port'] is None:
DottConf.conf['jlink_server_port'] = '19020'
elif DottConf.conf['jlink_server_port'].strip() == '':
DottConf.conf['jlink_server_port'] = '19020'
if DottConf.conf["jlink_server_port"] != '19020':
log.info(f'JLINK server port: {DottConf.conf["jlink_server_port"]}')
if DottConf.conf['gdb_server_addr'] is None:
# no (remote) GDB server address given. try to find a local GDB server binary to launch instead
if 'gdb_server_binary' in DottConf.conf:
if not os.path.exists(DottConf.conf['gdb_server_binary']):
raise Exception(f'GDB server binary {DottConf.conf["gdb_server_binary"]} ({dott_ini}) not found!')
elif os.path.exists(jlink_path):
DottConf.conf['gdb_server_binary'] = str(Path(f'{jlink_path}/{jlink_gdb_server_binary}'))
else:
# As a last option we check if the GDB server binary is in PATH
try:
subprocess.check_call((jlink_gdb_server_binary, '-device'))
except subprocess.CalledProcessError:
# Segger gdb server exists and responded with an error since no device was specified
DottConf.conf['gdb_server_binary'] = jlink_gdb_server_binary
except Exception as ex:
raise Exception(f'GDB server binary {jlink_gdb_server_binary} not found! Checked {dott_ini}, '
'default location and PATH. Giving up.') from None
log.info(f'GDB server binary: {DottConf.conf["gdb_server_binary"]}')
else:
log.info('GDB server assumed to be already running (not started by DOTT).')
DottConf.conf['gdb_server_binary'] = None
default_mem_model: TargetMemModel = TargetMemModel.TESTHOOK
if 'on_target_mem_model' not in DottConf.conf:
DottConf.conf['on_target_mem_model'] = default_mem_model
else:
DottConf.conf['on_target_mem_model'] = str(DottConf.conf['on_target_mem_model']).upper()
if DottConf.conf['on_target_mem_model'] not in TargetMemModel.get_keys():
log.warn(f'On-target memory model ({DottConf.conf["on_target_mem_model"]}) from {dott_ini} is unknown. '
f'Falling back to default.')
DottConf.conf['on_target_mem_model'] = default_mem_model
else:
DottConf.conf['on_target_mem_model'] = TargetMemModel[DottConf.conf['on_target_mem_model']]
on_target_mem_prestack_alloc_size: int = 256
if 'on_target_mem_prestack_alloc_size' in DottConf.conf:
if str(DottConf.conf['on_target_mem_prestack_alloc_size']).strip() != '':
on_target_mem_prestack_alloc_size = int(DottConf.conf['on_target_mem_prestack_alloc_size'])
DottConf.conf['on_target_mem_prestack_alloc_size'] = on_target_mem_prestack_alloc_size
on_target_mem_prestack_alloc_location: str = '_main_init'
if 'on_target_mem_prestack_alloc_location' in DottConf.conf:
if str(DottConf.conf['on_target_mem_prestack_alloc_location']).strip() != '':
on_target_mem_prestack_alloc_location = str(DottConf.conf['on_target_mem_prestack_alloc_location'])
DottConf.conf['on_target_mem_prestack_alloc_location'] = on_target_mem_prestack_alloc_location
on_target_mem_prestack_halt_location: str = 'main'
if 'on_target_mem_prestack_halt_location' in DottConf.conf:
if str(DottConf.conf['on_target_mem_prestack_halt_location']).strip() != '':
on_target_mem_prestack_halt_location = str(DottConf.conf['on_target_mem_prestack_halt_location'])
DottConf.conf['on_target_mem_prestack_halt_location'] = on_target_mem_prestack_halt_location
on_target_mem_prestack_total_stack_size: int = None
if 'on_target_mem_prestack_total_stack_size' in DottConf.conf:
if str(DottConf.conf['on_target_mem_prestack_total_stack_size']).strip() != '':
on_target_mem_prestack_total_stack_size = int(DottConf.conf['on_target_mem_prestack_total_stack_size'])
DottConf.conf['on_target_mem_prestack_total_stack_size'] = on_target_mem_prestack_total_stack_size
if DottConf.conf['on_target_mem_model'] == TargetMemModel.PRESTACK:
log.info(f'Std. target mem model for DOTT default fixtures: {DottConf.conf["on_target_mem_model"]} '
f'({on_target_mem_prestack_alloc_size}bytes '
f'@{on_target_mem_prestack_alloc_location}; '
f'halt @{on_target_mem_prestack_halt_location}; '
f'total stack: {on_target_mem_prestack_total_stack_size if on_target_mem_prestack_total_stack_size is not None else "unknown"})')
else:
log.info(f'Std. target mem model for DOTT default fixtures: {DottConf.conf["on_target_mem_model"]}')
|
"""deserialization tools"""
import typing as t
from datetime import datetime
from functools import partial
from toolz import flip
from valuable import load
from . import types
registry = load.PrimitiveRegistry({
datetime: partial(flip(datetime.strptime), '%Y-%m-%dT%H:%M:%SZ'),
**{
c: c for c in [
int,
float,
bool,
str,
types.Issue.State
]
}
}) | load.GenericRegistry({
t.List: load.list_loader
}) | load.get_optional_loader | load.AutoDataclassRegistry()
|
import numpy as np
import unittest
from pythran.typing import *
from pythran.tests import TestEnv
try:
np.float128
has_float128 = True
except AttributeError:
has_float128 = False
class TestConversion(TestEnv):
def test_list_of_uint16(self):
self.run_test('def list_of_uint16(l): return l', [np.uint16(1),np.uint16(2)], list_of_uint16=[List[np.uint16]])
def test_set_of_int32(self):
self.run_test('def set_of_int32(l): return l', {np.int32(1),np.int32(-4)}, set_of_int32=[Set[np.int32]])
def test_dict_of_int64_and_int8(self):
self.run_test('def dict_of_int64_and_int8(l): return l', {1:np.int8(1),2:np.int8(3),3:np.int8(4),-4:np.int8(-5)}, dict_of_int64_and_int8=[Dict[np.int64,np.int8]])
def test_tuple_of_uint8_and_int16(self):
self.run_test('def tuple_of_uint8_and_int16(l): return l', (np.uint8(5), np.int16(-146)), tuple_of_uint8_and_int16=[Tuple[np.uint8, np.int16]])
def test_array_of_uint32(self):
self.run_test('def array_of_uint32(l): return l', np.ones(2,dtype=np.uint32), array_of_uint32=[NDArray[np.uint32, :]])
def test_array_of_uint64_to_uint32(self):
self.run_test('def array_of_uint64_to_uint32(l): import numpy ; return l, numpy.array(l, numpy.uint32)', np.ones(2,dtype=np.uint64), array_of_uint64_to_uint32=[NDArray[np.uint64, :]])
def test_list_of_float64(self):
self.run_test('def list_of_float64(l): return [2. * _ for _ in l]', [1.,2.], list_of_float64=[List[np.float64]])
@unittest.skipIf(not has_float128, "not float128")
def test_list_of_float128(self):
self.run_test('def list_of_float128(l): return [2. * _ for _ in l]', [np.float128(1.),np.float128(2.)], list_of_float128=[List[np.float128]])
@unittest.skipIf(not has_float128, "not float128")
def test_array_of_float128(self):
self.run_test('def array_of_float128(l): return l + 1', np.array([1.,2.], dtype=np.float128), array_of_float128=[NDArray[np.float128, :]])
def test_set_of_float32(self):
""" Check np.float32 conversion. """
code = """
def set_of_float32(l):
return { _ / 2 for _ in l}"""
self.run_test(code, {np.float32(1), np.float32(2)},
set_of_float32=[Set[np.float32]])
def test_dict_of_complex64_and_complex_128(self):
""" Check numpy complex type conversion. """
code = """
def dict_of_complex64_and_complex_128(l):
return l.keys(), l.values()"""
interface = [Dict[np.complex64, np.complex128]]
self.run_test(code, {np.complex64(3.1 + 1.1j): 4.5 + 5.5j},
dict_of_complex64_and_complex_128=interface)
def test_ndarray_bad_dimension(self):
code = 'def ndarray_bad_dimension(a): return a'
with self.assertRaises(BaseException):
self.run_test(code, np.ones((10,10)),
ndarray_bad_dimension=[NDArray[float, :]])
def test_ndarray_bad_dtype(self):
code = 'def ndarray_bad_dtype(a): return a'
with self.assertRaises(BaseException):
self.run_test(code, np.ones((10,10)),
ndarray_bad_dtype=[NDArray[np.uint8, :, :]])
def test_ndarray_bad_stride_type(self):
""" Check an error is raised when pythran input is strided. """
code = 'def ndarray_bad_stride_type(a): return a'
with self.assertRaises(BaseException):
self.run_test(code, np.ones((10, 10), dtype=np.uint8)[::2],
ndarray_bad_stride_type=[NDArray[np.uint8, :, :]])
def test_ndarray_with_stride_type(self):
code = 'def ndarray_with_stride_type(a): return a'
self.run_test(code, np.arange((10), dtype=np.uint8)[::2],
ndarray_with_stride_type=[NDArray[np.uint8, ::-1]])
def test_ndarray_with_stride_and_offset(self):
code = 'def ndarray_with_stride_and_offset(a): return a'
self.run_test(code, np.arange((10), dtype=np.uint8)[1::2],
ndarray_with_stride_and_offset=[NDArray[np.uint8, ::-1]])
def test_ndarray_with_negative_stride(self):
code = 'def ndarray_with_negative_stride(a): return a'
with self.assertRaises(BaseException):
self.run_test(code, np.arange((10), dtype=np.uint8)[::-2],
ndarray_with_negative_stride=[NDArray[np.uint8, ::-1]])
def iexpr_with_strides_and_offsets(self):
code = 'def iexpr_with_strides_and_offsets(a): return a'
self.run_test(code, np.array(np.arange((160), dtype=np.uint8).reshape((4, 5, 8)))[1][1::][:-1],
ndarray_with_strides_and_offsets=[NDArray[np.uint8, :, ::-1]])
def test_ndarray_with_strides_and_offsets(self):
code = 'def ndarray_with_strides_and_offsets(a): return a'
self.run_test(code, np.array(np.arange((128), dtype=np.uint8).reshape((16,8)))[1::3,2::2],
ndarray_with_strides_and_offsets=[NDArray[np.uint8, :, ::-1]])
def test_ndarray_with_stride_and_offset_and_end(self):
code = 'def ndarray_with_stride_and_offset_and_end(a): return a'
self.run_test(code, np.arange((10), dtype=np.uint16)[1:6:2],
ndarray_with_stride_and_offset_and_end=[NDArray[np.uint16, ::-1]])
def test_ndarray_with_multi_strides(self):
code = 'def ndarray_with_multi_strides(a): return a'
self.run_test(code, np.array(np.arange((128), dtype=np.uint8).reshape((16,8)))[:,1::3],
ndarray_with_multi_strides=[NDArray[np.uint8, :, ::-1]])
def test_ndarray_unsupported_reshaped_array_with_stride(self):
code = 'def ndarray_unsupported_reshaped_array_with_stride(a): return a'
with self.assertRaises(BaseException):
self.run_test(code, np.arange((128), dtype=np.uint8).reshape((16,8))[1::3,2::2],
ndarray_unsupported_reshaped_array_with_stride=[NDArray[np.uint8, :, ::-1]])
def test_transposed_arg0(self):
self.run_test("def np_transposed_arg0(a): return a", np.arange(9).reshape(3,3).T, np_transposed_arg0=[NDArray[int, :, :]])
def test_transposed_arg1(self):
self.run_test("def np_transposed_arg1(a): return a", np.arange(12).reshape(3,4).T, np_transposed_arg1=[NDArray[int, :, :]])
def test_transposed_arg2(self):
self.run_test("def np_transposed_arg2(a): return a", np.arange(12, dtype=complex).reshape(3,4).T, np_transposed_arg2=[NDArray[complex, :, :]])
def test_transposed_targ0(self):
self.run_test("def np_transposed_targ0(a): return a.T", np.arange(9).reshape(3,3).T, np_transposed_targ0=[NDArray[int, :, :]])
def test_transposed_targ1(self):
self.run_test("def np_transposed_targ1(a): return a.T", np.arange(12).reshape(3,4).T, np_transposed_targ1=[NDArray[int, :, :]])
def test_transposed_targ2(self):
self.run_test("def np_transposed_targ2(a): return a.T", np.arange(12, dtype=complex).reshape(3,4).T, np_transposed_targ2=[NDArray[complex, :, :]])
def test_transposed_argt0(self):
self.run_test("def np_transposed_argt0(a): return a.T", np.arange(9).reshape(3,3), np_transposed_argt0=[NDArray[int, :, :]])
def test_transposed_argt1(self):
self.run_test("def np_transposed_argt1(a): return a.T", np.arange(12).reshape(3,4), np_transposed_argt1=[NDArray[int, :, :]])
def test_transposed_argt2(self):
self.run_test("def np_transposed_argt2(a): return a.T", np.arange(12, dtype=complex).reshape(3,4), np_transposed_argt2=[NDArray[complex, :, :]])
def test_broadcasted_int8(self):
self.run_test('def broadcasted_int8(l): return l + 4', np.ones(10,dtype=np.int8).reshape(5,2), broadcasted_int8=[NDArray[np.int8, :, :]])
def test_broadcasted_uint8(self):
self.run_test('def broadcasted_uint8(l): return l - 4', np.ones(10,dtype=np.uint8).reshape(5,2), broadcasted_uint8=[NDArray[np.uint8, :, :]])
def test_broadcasted_int16(self):
self.run_test('def broadcasted_int16(l): return l * 4', np.ones(10,dtype=np.int16).reshape(5,2), broadcasted_int16=[NDArray[np.int16, :, :]])
def test_broadcasted_uint16(self):
self.run_test('def broadcasted_uint16(l): return l / 4', np.ones(10,dtype=np.uint16).reshape(5,2), broadcasted_uint16=[NDArray[np.uint16, :, :]])
@unittest.skip("no dynamic type promotion in pythran :-/")
def test_broadcasted_large_int8(self):
self.run_test('def broadcasted_large_int8(l): return l + 400', np.ones(10,dtype=np.int8).reshape(5,2), broadcasted_large_int8=[NDArray[np.int8, :, :]])
|
from pathlib import Path
from setuptools import setup, find_packages
package_dir = 'topic_analysis'
root = Path(__file__).parent.resolve()
# Read in package meta from about.py
about_path = root / package_dir / 'about.py'
with about_path.open('r', encoding='utf8') as f:
about = {}
exec(f.read(), about)
# Get readme
readme_path = root / 'README.md'
with readme_path.open('r', encoding='utf8') as f:
readme = f.read()
install_requires = [
'adjustText',
'joblib',
'matplotlib',
'numpy>=1.16',
'pandas',
'progressbar2',
'psutil',
'pystemmer',
'scipy',
'scikit-learn',
'spacy',
'spacy-lookups-data',
'symspellpy',
'unidecode'
]
test_requires = ['pytest']
def have_compiler():
"""
checks for the existence of a compiler by compiling a small C source file
source: https://charlesleifer.com/blog/misadventures-in-python-packaging-optional-c-extensions/
"""
from distutils.ccompiler import new_compiler
from distutils.errors import CompileError
from distutils.errors import DistutilsExecError
import os
import tempfile
import warnings
fd, fname = tempfile.mkstemp('.c', text=True)
f = os.fdopen(fd, 'w')
f.write('int main(int argc, char** argv) { return 0; }')
f.close()
compiler = new_compiler()
try:
compiler.compile([fname])
except (CompileError, DistutilsExecError):
warnings.warn('compiler not installed')
return False
except Exception as exc:
warnings.warn('unexpected error encountered while testing if compiler '
'available: %s' % exc)
return False
else:
return True
if not have_compiler():
install_requires.remove('pystemmer')
install_requires += ['nltk']
setup(
name=about['__title__'],
description=about['__summary__'],
long_description=readme,
long_description_content_type='text/markdown',
author=about['__author__'],
author_email=about['__email__'],
url=about['__uri__'],
version=about['__version__'],
license=about['__license__'],
packages=find_packages(exclude=('tests*',)),
install_requires=install_requires,
test_requires=test_requires,
zip_safe=True,
include_package_data=True,
classifiers=[
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering',
'Topic :: Software Development :: Libraries',
'Topic :: Utilities',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3.7',
'Topic :: Text Processing :: General'
]
)
# install spacy languages
import os
import sys
os.system(f'{sys.executable} -m spacy download nl_core_news_sm --user')
os.system(f'{sys.executable} -m spacy download en_core_web_sm --user') |
import pandas as pd
import numpy as np
import tensorflow as tf
from sklearn.model_selection import train_test_split
from sklearn.utils import shuffle
import tensorflow.keras.layers as L
from tensorflow.keras import Model
from sklearn.metrics import f1_score
from tensorflow.keras import callbacks
import pickle
from tqdm.notebook import tqdm
import matplotlib.pyplot as plt
import os
from tensorflow.keras.callbacks import Callback, LearningRateScheduler
from tensorflow.keras.losses import categorical_crossentropy
from tensorflow.keras.optimizers import Adam
from tensorflow.keras import backend as K
from tensorflow.keras import losses, models, optimizers
import tensorflow as tf, re, math
import tensorflow_addons as tfa
import efficientnet.tfkeras as efn
from tensorflow.keras.utils import to_categorical
os.environ["CUDA_VISIBLE_DEVICES"]="0"
seq_len=200
EPOCHS = 2
NNBATCHSIZE = 64
LR = 0.0015
lam = np.load('Synthetic_Processed/lamellar.npy')
hex = np.load('Synthetic_Processed/hexagonal.npy')
p = np.load('Synthetic_Processed/P_cubic.npy')
g = np.load('Synthetic_Processed/G_cubic.npy')
d = np.load('Synthetic_Processed/D_cubic.npy')
x = np.vstack((lam,hex,p,g,d))
y = np.hstack(([0]*len(lam), [1]*len(hex), [2]*len(p), [3]*len(g), [4]*len(d)))
y = to_categorical(y)
X_train, X_test, y_train, y_test = train_test_split(x, y, test_size=0.2)
EFNS = [efn.EfficientNetB0, efn.EfficientNetB1, efn.EfficientNetB2, efn.EfficientNetB3,
efn.EfficientNetB4, efn.EfficientNetB5, efn.EfficientNetB6]
def build_model(dim=200, ef=0):
inp1 = tf.keras.layers.Input(shape=(dim,dim,3))
base = EFNS[ef](input_shape=(dim,dim,3),weights='noisy-student',include_top=False)
x = base(inp1)
x = tf.keras.layers.GlobalAveragePooling2D()(x)
x = tf.keras.layers.Dense(1,activation='sigmoid')(x)
model = tf.keras.Model(inputs=inp1,outputs=x)
opt = tf.keras.optimizers.Adam(learning_rate=0.001)
loss = tf.keras.losses.BinaryCrossentropy(label_smoothing=0.05)
model.compile(optimizer=opt,loss=loss,metrics=['AUC', 'accuracy'])
return model
model = build_model()
print(model.summary())
# train on synth first for 1 epoch
history = model.fit(X_train, y_train
,
batch_size=NNBATCHSIZE,
epochs=EPOCHS,
callbacks=[
callbacks.ReduceLROnPlateau()
],
validation_data = (X_test, y_test)
)
model.save('trained_saxs_model.h5') |
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making BK-BASE 蓝鲸基础平台 available.
Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
BK-BASE 蓝鲸基础平台 is licensed under the MIT License.
License for BK-BASE 蓝鲸基础平台:
--------------------------------------------------------------------
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
class DataapiCommonCode(object):
# 参数异常
ILLEGAL_ARGUMENT_EX = "000"
# 数据异常
NO_INPUT_DATA_EX = "101"
NO_OUTPUT_DATA_EX = "111"
# DB操作异常
# -DSL 40x
SELECT_EX = "401"
INSERT_EX = "402"
UPDATE_EX = "403"
DELETE_EX = "404"
# -DDL 41x
CREATE_EX = "411"
# -存查接口 42x
CALL_SQ_CREATE_EX = "421"
CALL_SQ_EXISTS_EX = "422"
# -元数据接口 43x
CALL_MEATA_EX = "431"
# 服务异常
INNER_SERVER_EX = "500"
OUTTER_SERVER_EX = "501"
# 状态异常
NOT_SUCCESS_EX = "601"
ILLEGAL_STATUS_EX = "602"
# 资源异常
NOT_ENOUGH_RESOURCE_EX = "701"
OVER_LIMIT_RESOURCE_EX = "702"
LOWER_LIMIT_RESOURCE_EX = "703"
NOT_FOUND_RESOURCE_EX = "704"
# 期望外异常
UNEXPECT_EX = "777"
#
|
"""
Decorator utilities
"""
import functools
from requests.exceptions import ConnectionError as RequestsConnectionError
try:
from functools import cached_property
except ImportError: # pragma: no cover
# Python 3.7
from cached_property import cached_property
def retry_on_connection_error(func=None, max_retries=5):
"""
Decorator to automatically retry a function when a `ConnectionError` (such
as a Broken Pipe error) is raised.
"""
def decorate_func(f):
@functools.wraps(f)
def new_func(*args, **kwargs):
retries = 0
while retries < max_retries:
try:
return f(*args, **kwargs)
except RequestsConnectionError:
retries += 1
except Exception:
# Unexpected error, raise the message so it shows up
raise
raise Exception('Maximum retries exceeded')
return new_func
return decorate_func(func) if func else decorate_func
|
# Generated by Django 3.0.5 on 2020-08-21 16:37
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('user', '0002_auto_20200815_1933'),
]
operations = [
migrations.AlterField(
model_name='user',
name='QQ',
field=models.CharField(blank=True, max_length=20, verbose_name='QQ号'),
),
migrations.AlterField(
model_name='user',
name='WeChat',
field=models.CharField(blank=True, max_length=20, verbose_name='微信号'),
),
migrations.AlterField(
model_name='user',
name='apartment',
field=models.CharField(blank=True, max_length=5, verbose_name='寝室楼'),
),
migrations.AlterField(
model_name='user',
name='college',
field=models.CharField(blank=True, max_length=20, verbose_name='学院'),
),
migrations.AlterField(
model_name='user',
name='date_birth',
field=models.CharField(blank=True, max_length=10, verbose_name='生日'),
),
migrations.AlterField(
model_name='user',
name='head_img',
field=models.ImageField(blank=True, upload_to='head', verbose_name='头像'),
),
migrations.AlterField(
model_name='user',
name='nickname',
field=models.CharField(blank=True, max_length=6, verbose_name='昵称'),
),
migrations.AlterField(
model_name='user',
name='phone',
field=models.CharField(blank=True, max_length=11, verbose_name='手机号'),
),
migrations.AlterField(
model_name='user',
name='signature',
field=models.CharField(blank=True, max_length=20, verbose_name='个性签名'),
),
migrations.AlterField(
model_name='user',
name='trading_place',
field=models.CharField(blank=True, max_length=20, verbose_name='交易地点'),
),
]
|
from flask_wtf import FlaskForm
from wtforms import SubmitField, RadioField, BooleanField
class QuizQuestionViewModel(FlaskForm):
question_text: str = ''
answers = []
question_number: int = 0
subject_name: str = ''
answer_selection = RadioField()
is_validation_step = BooleanField()
submit = SubmitField()
|
V_MAJOR = 1
V_MINOR = 0
ENGINE_NAME = "cpuemulate"
PROJECT_NAME = "cpueditor"
TOOLS_DIR = "tools"
import sys, platform
PLATFORM = sys.platform
for platInfo in platform.uname():
if "microsoft" in platInfo.lower():
PLATFORM = "windows"
break
def IsWindows():
return PLATFORM == "windows"
def IsLinux():
return PLATFORM == "linux"
def IsMac():
return PLATFORM == "darwin" |
#######
# Objective: Create a dashboard that takes in two or more
# input values and returns their product as the output.
######
# Perform imports here:
# Launch the application:
# Create a Dash layout that contains input components
# and at least one output. Assign IDs to each component:
# Create a Dash callback:
# Add the server clause:
|
"""To be used in conjunction with stateAnalysis.cpp. This file generates plots of the respective action classes given a specific vehicular state."""
import plotly.express as px
import pandas as pd
import json
import tool as tl
from typing import Tuple, Dict
def load_data() -> Tuple[Dict, pd.DataFrame]:
"""Loads the data generated by actionAnalysis.cpp.
Returns:
Tuple[dict, pd.DataFrame]: A tuple with the raw data and the normalized actions.
"""
data = {}
children = []
with open(f"{tl.file_dir}/output/state_analysis.json") as json_data:
data = json.load(json_data)
for node in data["childMap"]:
children.append(
{
"d_lateral": node[1]["action_set"][0]["lateral_change"],
"d_velocity": node[1]["action_set"][0]["velocity_change"],
"invalid": node[1]["invalid"],
"collision": node[1]["collision"],
}
)
actions = pd.DataFrame(children)
return data, actions
def plot_state_analysis(vehicle: Dict, actions: pd.DataFrame) -> None:
labels = {}
labels["d_velocity"] = tl.l_math(r"\Delta v_\text{lon} [m/s]")
labels["d_lateral"] = tl.l_math(r"\Delta y_\text{lat} [m]")
labels["collision"] = "Collision State"
labels["invalid"] = "Invalid State"
for state in ["collision", "invalid"]:
# stringify for coloring
actions[state] = actions[state].astype(str)
title = f"{labels[state]}"
labels[state] = state.capitalize()
fig = px.scatter(
actions,
x="d_velocity",
y="d_lateral",
hover_data=[state],
title=title,
labels=labels,
width=600,
height=600,
color=state,
color_discrete_map={"True": "red", "False": "green"},
)
fig.update_traces(marker=dict(size=12))
fig.update_layout(
xaxis=dict(tickmode="linear", tick0=min(actions["d_velocity"])),
yaxis=dict(tickmode="linear", tick0=min(actions["d_lateral"])),
font=dict(family=tl.font_family, size=tl.font_size),
template=tl.theme_template
)
tl.generate_output(fig, f"state_analysis_{state}")
if __name__ == "__main__":
# The tool to run.
tool = "proseco_planning_tool_state_analysis"
# The options file to load.
options = "example_options.json"
# The scenario file to load.
scenario = "sc00.json"
tl.create_output_dir()
tl.run_tool(tool, options, scenario)
data, actions = load_data()
plot_state_analysis(data["agents"][0]["vehicle"], actions)
|
import argparse
import os
import random
import time
import warnings
import sys
import numpy as np
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import torch.optim
from sklearn.metrics import roc_auc_score
from scipy.special import softmax
from .meters import AverageMeter
from .meters import ProgressMeter
from .combiner import detach_tensor
'''
def pred_accuracy(output, target, k):
"""Computes the accuracy over the k top predictions for the specified values of k"""
output = detach_tensor(output)
target = detach_tensor(target)
batch_size = target.size(0)
argsorted_out = np.argsort(output)[:,-k:]
return np.asarray(np.any(argsorted_y.T == target, axis=0).mean(dtype='f')),
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res[0] # Seems like we only want the 1st
'''
def decorator_detach_tensor(function):
def wrapper(*args, **kwargs):
# TODO Find a simple way to handle this business ...
# If is eval, or if fast debug, or
# is train and not heavy, or is train and heavy
output = detach_tensor(args[0])
target = detach_tensor(args[1])
args = args[2:]
result = function(output, target, *args, **kwargs)
return result
return wrapper
@decorator_detach_tensor
def topk_acc(output, target, k):
"""Computes the accuracy over the k top predictions for the specified values of k"""
argsorted_out = np.argsort(output)[:,-k:]
matching = np.asarray(np.any(argsorted_out.T == target, axis=0))
return matching.mean(dtype='f')
@decorator_detach_tensor
def compute_auc_binary(output, target):
#assuming output and target are all vectors for binary case
try:
o = softmax(output, axis=1)
auc = roc_auc_score(target, o[:,1])
except:
return -1
return auc
class Evaluator:
def __init__(self, model, loss_func, metrics, loaders, args):
self.model = model
self.loss_func = loss_func
self.metrics = metrics
self.loaders = loaders
self.args = args
self.metric_best_vals = {metric: 0 for metric in self.metrics}
def evaluate(self, eval_type, epoch):
print(f'==> Evaluation for {eval_type}, epoch {epoch}')
loader = self.loaders[eval_type]
batch_time = AverageMeter('Time', ':6.3f')
losses = AverageMeter('Loss', ':.4e')
metric_meters = {metric: AverageMeter(metric, self.metrics[metric]['format']) \
for metric in self.metrics}
list_meters = [metric_meters[m] for m in metric_meters]
progress = ProgressMeter(
len(loader),
[batch_time, losses, *list_meters],
prefix=f'{eval_type}@Epoch {epoch}: ')
# switch to evaluate mode
self.model.eval()
all_output = []
all_gt = []
with torch.no_grad():
end = time.time()
for i, (images, target) in enumerate(loader):
if self.args.gpu is not None:
images = images.cuda(self.args.gpu, non_blocking=True)
target = target.cuda(self.args.gpu, non_blocking=True)
all_gt.append(target.cpu())
# compute output
output = self.model(images)
all_output.append(output.cpu())
loss = self.loss_func(output, target)
# JBY: For simplicity do losses first
losses.update(loss.item(), images.size(0))
for metric in self.metrics:
args = [output, target, *self.metrics[metric]['args']]
metric_func = globals()[self.metrics[metric]['func']]
result = metric_func(*args)
metric_meters[metric].update(result, images.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % self.args.print_freq == 0:
progress.display(i)
# TODO: this should also be done with the ProgressMeter
# print(' * Acc@1 {top1.avg:.3f} Acc@5 {top5.avg:.3f}'
# .format(top1=top1, top5=top5))
progress.display(i + 1)
all_output = np.concatenate(all_output)
all_gt = np.concatenate(all_gt)
for metric in self.metrics:
args = [all_output, all_gt, *self.metrics[metric]['args']]
metric_func = globals()[self.metrics[metric]['func']]
result = metric_func(*args)
metric_meters[metric].update(result, images.size(0))
self.metric_best_vals[metric] = max(metric_meters[metric].avg,
self.metric_best_vals[metric])
progress.display(i + 1, summary=True) |
# coding:utf-8
import os
import numpy as np
import tensorflow as tf
from cvtron.Base.Trainer import Trainer
from cvtron.data_zoo.segment.SBD import TFRecordConverter
from cvtron.model_zoo.deeplab.deeplabV3 import deeplab_v3
from cvtron.preprocessor import training
from cvtron.preprocessor.read_data import (distort_randomly_image_color,
random_flip_image_and_annotation,
rescale_image_and_annotation_by_factor,
scale_image_with_crop_padding,
tf_record_parser)
from cvtron.utils.logger.Logger import Logger
slim = tf.contrib.slim
class DeepLabTrainer(Trainer):
def __init__(self, config):
Trainer.__init__(self, config)
self.result = []
self.logger = Logger('Deep Lab Train Monitor')
def parseDataset(self, dataset_config):
tfrc = TFRecordConverter(
base_dataset_dir_voc=dataset_config['base_dataset_dir_voc'],
images_folder_name_voc=dataset_config['images_folder_name_voc'],
annotations_folder_name_voc=dataset_config['annotations_folder_name_voc'],
base_dataset_dir_aug_voc=dataset_config['base_dataset_dir_aug_voc'],
images_folder_name_aug_voc=dataset_config['images_folder_name_aug_voc'],
annotations_folder_name_aug_voc=dataset_config['annotations_folder_name_aug_voc']
)
train_images_filename_list, val_images_filename_list = tfrc.shuffle(
dataset_config['shuffle_ratio'], tfrc.get_files_list(dataset_config['filename']))
TRAIN_DATASET_DIR = dataset_config['train_dataset_dir']
TRAIN_FILE = 'train.tfrecords'
VALIDATION_FILE = 'validation.tfrecords'
train_writer = tf.python_io.TFRecordWriter(
os.path.join(TRAIN_DATASET_DIR, TRAIN_FILE))
val_writer = tf.python_io.TFRecordWriter(
os.path.join(TRAIN_DATASET_DIR, VALIDATION_FILE))
tfrc.convert(train_images_filename_list, train_writer)
tfrc.convert(val_images_filename_list, val_writer)
def train(self):
training_dataset = tf.data.TFRecordDataset(
self.config['train_filename'])
training_dataset = training_dataset.map(tf_record_parser)
training_dataset = training_dataset.map(
rescale_image_and_annotation_by_factor)
training_dataset = training_dataset.map(distort_randomly_image_color)
training_dataset = training_dataset.map(scale_image_with_crop_padding)
training_dataset = training_dataset.map(
random_flip_image_and_annotation)
training_dataset = training_dataset.repeat()
training_dataset = training_dataset.shuffle(
buffer_size=self.config['train_buffer_size'])
training_dataset = training_dataset.batch(self.config['batch_size'])
validation_dataset = tf.data.TFRecordDataset(
self.config['valid_filename'])
validation_dataset = validation_dataset.map(tf_record_parser)
validation_dataset = validation_dataset.map(
scale_image_with_crop_padding)
validation_dataset = validation_dataset.shuffle(
buffer_size=self.config['valid_buffer_size'])
validation_dataset = validation_dataset.batch(
self.config['batch_size'])
handle = tf.placeholder(tf.string, shape=[])
iterator = tf.data.Iterator.from_string_handle(
handle,
training_dataset.output_types,
training_dataset.output_shapes)
batch_images_tf, batch_labels_tf, _ = iterator.get_next()
training_iterator = training_dataset.make_initializable_iterator()
validation_iterator = validation_dataset.make_initializable_iterator()
class_labels = [v for v in range((self.config['number_of_classes']+1))]
class_labels[-1] = 255
is_training_tf = tf.placeholder(tf.bool, shape=[])
logits_tf = tf.cond(
is_training_tf,
true_fn=lambda: deeplab_v3(
batch_images_tf, self.config, is_training=True, reuse=False),
false_fn=lambda: deeplab_v3(
batch_images_tf, self.config, is_training=False, reuse=True)
)
valid_labels_batch_tf, valid_logits_batch_tf = training.get_valid_logits_and_labels(
annotation_batch_tensor=batch_labels_tf,
logits_batch_tensor=logits_tf,
class_labels=class_labels
)
cross_entropies = tf.nn.softmax_cross_entropy_with_logits(logits=valid_logits_batch_tf,
labels=valid_labels_batch_tf)
cross_entropy_tf = tf.reduce_mean(cross_entropies)
predictions_tf = tf.argmax(logits_tf, axis=3)
tf.summary.scalar('cross_entropy', cross_entropy_tf)
with tf.variable_scope("optimizer_vars"):
global_step = tf.Variable(0, trainable=False)
optimizer = tf.train.AdamOptimizer(
learning_rate=self.config['starting_learning_rate'])
train_step = slim.learning.create_train_op(
cross_entropy_tf, optimizer, global_step=global_step)
process_str_id = str(os.getpid())
merged_summary_op = tf.summary.merge_all()
self.LOG_FOLDER = os.path.join(
self.config['log_folder'], process_str_id)
if not os.path.exists(self.LOG_FOLDER):
os.makedirs(self.LOG_FOLDER)
variables_to_restore = slim.get_variables_to_restore(exclude=[self.config['resnet_model'] + "/logits", "optimizer_vars",
"DeepLab_v3/ASPP_layer", "DeepLab_v3/logits"])
miou, update_op = tf.contrib.metrics.streaming_mean_iou(tf.argmax(valid_logits_batch_tf, axis=1),
tf.argmax(
valid_labels_batch_tf, axis=1),
num_classes=self.config['number_of_classes'])
tf.summary.scalar('miou', miou)
restorer = tf.train.Saver(variables_to_restore)
saver = tf.train.Saver()
current_best_val_loss = np.inf
# start training
with tf.Session() as sess:
train_writer = tf.summary.FileWriter(
os.path.join(self.LOG_FOLDER, 'train'), sess.graph)
test_writer = tf.summary.FileWriter(
os.path.join(self.LOG_FOLDER, 'val'))
sess.run(tf.local_variables_initializer())
sess.run(tf.global_variables_initializer())
try:
restorer.restore(
sess, "/home/sfermi/Documents/Programming/project/cv/tmp/" + self.config['resnet_model'] + ".ckpt")
print("Model checkpoits for " +
self.config['resnet_model'] + " restored!")
except FileNotFoundError:
print("Please download " + self.config['resnet_model'] +
" model checkpoints from: https://github.com/tensorflow/models/tree/master/research/slim")
training_handle = sess.run(training_iterator.string_handle())
validation_handle = sess.run(validation_iterator.string_handle())
sess.run(training_iterator.initializer)
validation_running_loss = []
train_steps_before_eval = self.config['train_steps']
validation_steps = self.config['eval_steps']
while True:
training_average_loss = 0
for i in range(train_steps_before_eval):
_, global_step_np, train_loss, summary_string = sess.run([train_step,
global_step, cross_entropy_tf,
merged_summary_op],
feed_dict={is_training_tf: True,
handle: training_handle})
training_average_loss += train_loss
if i % self.config['log_per_step']:
train_writer.add_summary(
summary_string, global_step_np)
training_average_loss /= train_steps_before_eval
sess.run(validation_iterator.initializer)
validation_average_loss = 0
validation_average_miou = 0
for i in range(validation_steps):
val_loss, summary_string, _ = sess.run([cross_entropy_tf, merged_summary_op, update_op],
feed_dict={handle: validation_handle,
is_training_tf: False})
validation_average_loss += val_loss
validation_average_miou += sess.run(miou)
validation_average_loss /= validation_steps
validation_average_miou /= validation_steps
validation_running_loss.append(validation_average_loss)
validation_global_loss = np.mean(validation_running_loss)
if validation_global_loss < current_best_val_loss:
save_path = saver.save(
sess, self.LOG_FOLDER + "/train/model.ckpt")
print("Model checkpoints written! Best average val loss:",
validation_global_loss)
current_best_val_loss = validation_global_loss
print("Global step:", global_step_np, "Average train loss:",
training_average_loss, "\tGlobal Validation Avg Loss:", validation_global_loss,
"MIoU:", validation_average_miou)
result = {
'global_step': str(global_step_np),
'avg_train_loss': str(training_average_loss),
'avg_validation_loss': str(validation_average_loss),
'MIOU': str(validation_average_miou)
}
self.result.append(result)
self.logger.writeLog(self.result, os.path.join(self.LOG_FOLDER,'log.json'))
test_writer.add_summary(summary_string, global_step_np)
train_writer.close()
return '1234'
def getConfig(self):
return {
'batch_norm_epsilon': 1e-5,
'batch_norm_decay': 0.9997,
'number_of_classes': 21,
'l2_regularizer': 0.0001,
'starting_learning_rate': 0.00001,
'multi_grid': [1, 2, 4],
'output_stride': 16,
'gpu_id': 0,
'resnet_model': 'resnet_v2_152',
'train_filename': '/home/sfermi/Documents/Programming/project/cv/tmp/train.tfrecords',
'train_buffer_size': 500,
'batch_size': 1,
'valid_filename': '/home/sfermi/Documents/Programming/project/cv/tmp/validation.tfrecords',
'valid_buffer_size': 100,
'log_folder': '/home/sfermi/Documents/Programming/project/cv/tmp/',
'log_per_step': 10,
'train_steps': 100,
'eval_steps': 100,
}
def getConfigKey(self):
return [
'batch_norm_epsilon',
'batch_norm_decay',
'number_of_classes',
'l2_regularizer',
'starting_learning_rate',
'multi_grid',
'output_stride',
'gpu_id',
'resnet_model',
'train_filename',
'train_buffer_size',
'batch_size',
'valid_filename',
'valid_buffer_size',
'log_folder',
'log_per_step',
'train_steps',
'eval_steps'
]
|
import argparse
import textwrap
import json
import matplotlib.pyplot as plt
parser = argparse.ArgumentParser(prog='PROG', formatter_class=argparse.RawDescriptionHelpFormatter,\
epilog=textwrap.dedent('''\
Here an example on how to run the script:
python3 create_plots.py --input_file results.json --out plots.pdf
'''))
parser.add_argument("--params", action="store", dest="params", default='', \
help="name of the params file with absolute path")
opts = parser.parse_args()
params=opts.params
with open(params) as json_file:
data = json.load(json_file)
INPUT = str(data["out"])
OUTPUT = str(data["plots"])
with open(INPUT) as json_file:
data = json.load(json_file)
epochs=epochs=len(data['Swift']['loss'])
x_axis=[item for item in range(1,epochs+1)]
plt.figure(figsize=(12, 5))
plt.subplots_adjust(top=1.6)
plt.subplot(221)
plt.plot(x_axis,data['Swift']['loss'], label='Swift')
plt.plot(x_axis,data['Keras']['loss'], label='Keras')
plt.plot(x_axis,data['Pytorch']['loss'], label='Pytorch')
plt.xticks(x_axis,x_axis)
plt.ylabel('loss', weight='bold')
plt.xlabel('# of epochs', weight='bold')
plt.legend()
plt.subplot(222)
plt.plot(x_axis,data['Swift']['accuracy'], label='Swift')
plt.plot(x_axis,data['Keras']['accuracy'], label='Keras')
plt.plot(x_axis,data['Pytorch']['accuracy'], label='Pytorch')
plt.xticks(x_axis,x_axis)
plt.ylabel('Accuracy', weight='bold')
plt.xlabel('# of epochs', weight='bold')
plt.legend()
plt.subplot(223)
plt.bar(['Swift','Keras','Pytorch'],[data['Swift']['trainTime'],data['Keras']['trainTime'],data['Pytorch']['trainTime']])
plt.ylabel('Time for training (s)', weight='bold')
plt.tight_layout()
plt.savefig(OUTPUT) |
# -*- coding: utf-8 -*-
# Copyright (c) 2021 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""UX server HW info module."""
import platform
import re
import subprocess
from typing import Union
import cpuinfo
import psutil
from lpot.ux.utils.logger import log
from lpot.ux.utils.utils import determine_ip
class HWInfo:
"""Class responsible for gathering information about platform hardware."""
def __init__(self) -> None:
"""Initialize HW Info class and gather information platform hardware."""
cpu_info = cpuinfo.get_cpu_info()
self.sockets: int = get_number_of_sockets()
self.cores: int = psutil.cpu_count(logical=False)
self.cores_per_socket: int = int(psutil.cpu_count(logical=False) / self.sockets)
self.threads_per_socket: int = int(psutil.cpu_count(logical=True) / self.sockets)
self.total_memory: str = f"{psutil.virtual_memory().total / (1024 ** 3):.3f}GB"
self.system: str = get_distribution()
self.ip = determine_ip()
if cpu_info.get("brand"):
self.platform = cpu_info.get("brand")
elif cpu_info.get("brand_raw"):
self.platform = cpu_info.get("brand_raw")
else:
self.platform = ""
self.hyperthreading_enabled: bool = psutil.cpu_count(logical=False) != psutil.cpu_count(
logical=True,
)
self.turboboost_enabled = is_turbo_boost_enabled()
self.bios_version = get_bios_version()
self.kernel: str = get_kernel_version()
try:
min_cpu_freq = int(psutil.cpu_freq(percpu=False).min)
max_cpu_freq = int(psutil.cpu_freq(percpu=False).max)
except Exception:
log.warning("Cannot collect cpu frequency information.")
min_cpu_freq = 0
max_cpu_freq = 0
finally:
if min_cpu_freq == 0:
self.min_cpu_freq = "n/a"
else:
self.min_cpu_freq = f"{min_cpu_freq}Hz"
if max_cpu_freq == 0:
self.max_cpu_freq = "n/a"
else:
self.max_cpu_freq = f"{max_cpu_freq}Hz"
def get_number_of_sockets() -> int:
"""Get number of sockets in platform."""
cmd = "lscpu | grep 'Socket(s)' | cut -d ':' -f 2"
proc = subprocess.Popen(
args=cmd,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
universal_newlines=False,
)
proc.wait()
if proc.stdout:
for line in proc.stdout:
return int(line.decode("utf-8", errors="ignore").strip())
return 0
def get_distribution() -> str:
"""
Return system distibution.
:return: distribution name
:rtype: str
"""
if psutil.WINDOWS:
return f"{platform.system()} {platform.release()}"
elif psutil.LINUX:
try:
return " ".join(platform.dist())
except AttributeError:
return f"{platform.system()} {platform.release()}"
else:
return platform.platform()
def get_bios_version() -> str:
"""Return bios version."""
if psutil.LINUX:
try:
cmd = "cat /sys/class/dmi/id/bios_version"
proc = subprocess.Popen(
args=cmd,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
universal_newlines=False,
)
proc.wait()
if proc.stdout and proc.returncode == 0:
for line in proc.stdout:
bios_version = line.decode("utf-8", errors="ignore").strip()
else:
bios_version = "n/a"
cmd = "grep microcode /proc/cpuinfo -m1"
proc = subprocess.Popen(
args=cmd,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
universal_newlines=False,
)
proc.wait()
if proc.stdout and proc.returncode == 0:
for line in proc.stdout:
microcode_raw = line.decode("utf-8", errors="ignore").strip()
result = re.search(r"microcode.*: (.*)", microcode_raw)
if result:
microcode = result.group(1)
else:
microcode = "n/a"
return f"BIOS {bios_version} Microcode {microcode}"
except Exception:
return "n/a"
return "n/a"
def is_turbo_boost_enabled() -> Union[str, bool]:
"""
Check if turbo boost is enabled.
Return True if enabled, False if disabled, None if cannot collect info.
"""
if psutil.LINUX:
try:
cmd = "cat /sys/devices/system/cpu/intel_pstate/no_turbo"
proc = subprocess.Popen(
args=cmd,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
universal_newlines=False,
)
proc.wait()
if proc.stdout:
for line in proc.stdout:
raw_status = int(line.decode("utf-8", errors="ignore").strip())
if raw_status == 0:
return True
elif raw_status == 1:
return False
else:
return "n/a"
except Exception:
return "n/a"
return "n/a"
def get_kernel_version() -> str:
"""Return kernel version."""
if psutil.LINUX:
return platform.release()
else:
return platform.platform()
|
from base64 import b64decode
from inspect import getmembers, ismethod
from .constants import *
from .message import (JMSMessageType, _add_required_message_attribute_names,
_encode_jms_message, _encode_jms_messages,
_get_string_attribute)
class JMSClient(object):
def __init__(self, sqs_client):
assert sqs_client, 'sqs_client cannot be None'
assert sqs_client.__class__.__module__ == 'botocore.client' and sqs_client.__class__.__name__ == 'SQS', 'sqs_client must be of type botocore.client.SQS'
self.__sqs_client = sqs_client
for method in getmembers(sqs_client, ismethod):
if not method[0].startswith('_') and method[0] not in ['receive_message', 'send_message', 'send_message_batch']:
setattr(self, method[0], method[1])
def __get_sqs_client(self):
return self.__sqs_client
sqs_client = property(__get_sqs_client)
def receive_jms_message(self, **kwargs):
kwargs['MessageAttributeNames'] = _add_required_message_attribute_names(kwargs.get('MessageAttributeNames') or [])
response = self.sqs_client.receive_message(**kwargs)
messages = response.get('Messages')
if messages:
for message in messages:
if JMS_SQS_MESSAGE_TYPE not in message['MessageAttributes']:
raise ValueError('Message missing attribute {}'.format(JMS_SQS_MESSAGE_TYPE))
message_type = JMSMessageType.get(_get_string_attribute(message['MessageAttributes'].pop(JMS_SQS_MESSAGE_TYPE)))
if message_type == JMSMessageType.BYTE:
message['Body'] = b64decode(message['Body'])
message[JMS_MESSAGE_TYPE] = message_type
if JMS_SQS_CORRELATION_ID in message['MessageAttributes']:
message[JMS_CORRELATION_ID] = _get_string_attribute(message['MessageAttributes'].pop(JMS_SQS_CORRELATION_ID))
if JMS_SQS_REPLY_TO_QUEUE_NAME in message['MessageAttributes'] and JMS_SQS_REPLY_TO_QUEUE_URL in message['MessageAttributes']:
message[JMS_REPLY_TO] = {}
message[JMS_REPLY_TO][QUEUE_NAME] = _get_string_attribute(message['MessageAttributes'].pop(JMS_SQS_REPLY_TO_QUEUE_NAME))
message[JMS_REPLY_TO][QUEUE_URL] = _get_string_attribute(message['MessageAttributes'].pop(JMS_SQS_REPLY_TO_QUEUE_URL))
return response
def send_bytes_message(self, JMSReplyTo=None, JMSCorrelationId=None, **kwargs):
return self.sqs_client.send_message(**_encode_jms_message(JMSMessageType=JMSMessageType.BYTE, JMSReplyTo=JMSReplyTo, JMSCorrelationId=JMSCorrelationId, **kwargs))
def send_jms_message_batch(self, **kwargs):
return self.sqs_client.send_message_batch(**_encode_jms_messages(**kwargs))
def send_text_message(self, JMSReplyTo=None, JMSCorrelationId=None, **kwargs):
return self.sqs_client.send_message(**_encode_jms_message(JMSMessageType=JMSMessageType.TEXT, JMSReplyTo=JMSReplyTo, JMSCorrelationId=JMSCorrelationId, **kwargs))
|
import yaml
import os, os.path
loc = os.path.dirname(os.path.abspath(__file__))
hosts = yaml.load(open(loc+'/hosts.yml'))
projects = yaml.load(open(loc+'/projects.yml'))
allocations = yaml.load(open(loc+'/allocations.yml'))
users = yaml.load(open(loc+'/users.yml'))
|
import os
from .util import Object
from hardhat.terminal import Terminal
from threading import Thread
try:
from urllib.request import ProxyHandler, build_opener, Request
from urllib.request import install_opener
from urllib.parse import urlparse
from http.server import HTTPServer, BaseHTTPRequestHandler
except ImportError:
from urllib2 import ProxyHandler, build_opener, install_opener, Request
from urlparse import urlparse
from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler
CACHE_PROXY_PORT = 19992
class Requestor(Object):
def __init__(self):
http_proxy = os.environ.get('http_proxy',
os.environ.get('HTTP_PROXY'))
https_proxy = os.environ.get('https_proxy',
os.environ.get('HTTPS_PROXY'))
ftp_proxy = os.environ.get('ftp_proxy',
os.environ.get('FTP_PROXY'))
proxy = dict()
if http_proxy:
proxy['http'] = http_proxy
if https_proxy:
proxy['https'] = https_proxy
if ftp_proxy:
proxy['ftp_proxy'] = ftp_proxy
self.proxy_handler = ProxyHandler(proxy)
self.opener = build_opener(self.proxy_handler)
def request(self, url):
request = Request(url)
return self.opener.open(request)
class CacheProxyRequestor(Object):
def __init__(self, host, port):
self.url = 'http://%s:%s' % (host, port)
env = {'http': self.url,
'https': self.url,
'ftp': self.url
}
self.proxy_handler = ProxyHandler(env)
self.opener = build_opener(self.proxy_handler)
def request(self, url):
request = Request(url)
return self.opener.open(request)
class CacheProxyHandler(BaseHTTPRequestHandler):
def _get_filename(self):
dir = self.server.directory
path = urlparse(self.path)
host = path.hostname
path = path.path
while len(path) and path[0] == '/':
path = path[1:]
host_dir = os.path.join(dir, host)
return os.path.join(dir, host_dir, path)
def _check_directory(self, file):
dirname = os.path.dirname(file)
if not os.path.exists(dirname):
os.makedirs(dirname)
def _download_file(self, file):
requestor = self.server.requestor
tmp = file + '.tmp'
with open(tmp, 'wb') as dst:
src = requestor.request(self.path)
while True:
c = src.read(4096)
if not c:
break
dst.write(c)
os.rename(tmp, file)
def do_GET(self):
path = Terminal.light_green(self.path)
print('proxy_path=%s' % (path))
filename = self._get_filename()
self._check_directory(filename)
if not os.path.exists(filename):
ui_path = Terminal.normal_red(self.path)
print('cache miss. downloading: %s' % (ui_path))
self._download_file(filename)
self.send_response(200)
self.end_headers()
with open(filename, 'rb') as f:
while True:
c = f.read(4096)
if not c:
break
self.wfile.write(c)
class CacheProxyServer(HTTPServer):
def __init__(self, directory, address, handler_class):
HTTPServer.__init__(self, address, handler_class)
self.directory = directory
self.requestor = Requestor()
class Proxy(Object):
def __init__(self, directory, port=19992):
self.directory = directory
self.port = port
self.host = '127.0.0.1'
self.requestor = CacheProxyRequestor(self.host, port)
@property
def url(self):
return self.requestor.url
def __enter__(self):
address = (self.host, self.port)
self.proxy = CacheProxyServer(self.directory,
address,
CacheProxyHandler)
self.thread = Thread(target=self._run)
self.thread.daemon = True
self.thread.start()
return self
def _run(self):
self.proxy.serve_forever(poll_interval=.1)
def __exit__(self, x, y, z):
self.proxy.shutdown()
|
import pytest
from flask import g, session
from sqlalchemy import text
from flaskr.db import get_db
def test_register(client, app):
assert client.get("/auth/register").status_code == 200
response = client.post(
"/auth/register",
data={"email": "a", "password": "a", "first_name": "a", "last_name": "a"},
)
assert "http://localhost/auth/login" == response.headers["Location"]
with app.app_context():
assert (
get_db()
.execute(
text("SELECT * FROM users WHERE email = 'a'"),
)
.fetchone()
is not None
)
@pytest.mark.parametrize(
("email", "password", "first_name", "last_name", "message"),
(
("", "", "a", "a", b"Email is required."),
("a", "", "a", "a", b"Password is required."),
("[email protected]", "admin", "a", "a", b"already registered"),
),
)
def test_register_validate_input(
client, email, password, first_name, last_name, message
):
response = client.post(
"/auth/register",
data={
"email": email,
"password": password,
"first_name": first_name,
"last_name": last_name,
},
)
assert message in response.data
def test_login(client, auth):
assert client.get("/auth/login").status_code == 200
response = auth.login()
assert response.headers["Location"] == "http://localhost/"
with client:
client.get("/")
assert session["user_id"] == 1
assert g.user["email"] == "[email protected]"
@pytest.mark.parametrize(
("email", "password", "message"),
(
("a", "admin", b"Incorrect email."),
("[email protected]", "a", b"Incorrect password."),
),
)
def test_login_validate_input(auth, email, password, message):
response = auth.login(email, password)
assert message in response.data
|
# coding=utf-8
"""
Ref:
https://github.com/NVIDIA/apex/blob/f5cd5ae937f168c763985f627bbf850648ea5f3f/examples/imagenet/main_amp.py#L256
"""
import torch
# import torch.nn as nn
# import torch.nn.functional as F
class data_prefetcher():
def __init__(self, loader):
self.loader = iter(loader)
self.stream = torch.cuda.Stream()
#self.mean = torch.tensor([0.485 * 255, 0.456 * 255, 0.406 * 255]).cuda().view(1,3,1,1)
#self.std = torch.tensor([0.229 * 255, 0.224 * 255, 0.225 * 255]).cuda().view(1,3,1,1)
# With Amp, it isn't necessary to manually convert data to half.
# if args.fp16:
# self.mean = self.mean.half()
# self.std = self.std.half()
self.preload()
def preload(self):
try:
self.next_input, self.next_target = next(self.loader)
except StopIteration:
self.next_input = None
self.next_target = None
return
with torch.cuda.stream(self.stream):
self.next_input = self.next_input.cuda(non_blocking=True)
self.next_target = self.next_target.cuda(non_blocking=True)
# With Amp, it isn't necessary to manually convert data to half.
# if args.fp16:
# self.next_input = self.next_input.half()
# else:
#self.next_input = self.next_input.float()
#self.next_input = self.next_input.sub_(self.mean).div_(self.std)
def next(self):
torch.cuda.current_stream().wait_stream(self.stream)
input = self.next_input
target = self.next_target
self.preload()
return input, target |
from utils import setup_distrib, disk, square, rank_print, plot_matrix
import torch.distributed as dist
import torch.multiprocessing as mp
import torch
OPERATION = dist.ReduceOp.MAX
def main_process(rank, world_size=2):
device = setup_distrib(rank, world_size)
image = torch.zeros((11, 11), device=device)
input_tensors = []
if rank == 0:
input_tensors.append(disk(image, (4, 5), 2, rank + 1)[0])
input_tensors.append(square(image, (5, 5), 3, rank + 1)[0])
elif rank == 1:
input_tensors.append(disk(image, (7, 6), 2, rank + 1)[0])
input_tensors.append(square(image, (0, 2), 4, rank + 1)[0])
output = torch.zeros_like(image, device=device)
plot_matrix(
input_tensors[0],
rank,
title=f"Rank {rank}",
name="before_reduce_scatter_0",
folder="reduce_scatter",
)
plot_matrix(
input_tensors[1],
rank,
title=f"",
name="before_reduce_scatter_1",
folder="reduce_scatter",
)
plot_matrix(
output, rank, title=f"", name="before_reduce_scatter", folder="reduce_scatter"
)
# The main operation
dist.reduce_scatter(output, input_tensors, op=OPERATION)
plot_matrix(
output, rank, title=f"", name="after_reduce_scatter", folder="reduce_scatter"
)
if __name__ == "__main__":
mp.spawn(main_process, nprocs=2, args=())
|
import gym
import numpy as np
import torch
from stable_baselines3.common.vec_env import DummyVecEnv as VecEnv
from src.core.reparam_module import ReparamPolicy
from tqdm import tqdm
from src.core.gail import train_discriminator, roll_buffer, TerminalLogger
from dataclasses import dataclass
from src.safe_options.policy_gradient import trpo_step, ppo_step
import torch.nn.functional as F
from src.options.envs import OptionsEnv
from src.safe_options.collisions import feasible
from intersim.envs import IntersimpleLidarFlatIncrementingAgent
from src.util.wrappers import Setobs, TransformObservation
@dataclass
class Buffer:
states: torch.Tensor
actions: torch.Tensor
rewards: torch.Tensor
dones: torch.Tensor
@dataclass
class HLBuffer:
states: torch.Tensor
safe_actions: torch.Tensor
actions: torch.Tensor
rewards: torch.Tensor
dones: torch.Tensor
@dataclass
class OptionsRollout:
hl: HLBuffer
ll: Buffer
def gail(env_fn, expert_data, discriminator, disc_opt, disc_iters, policy, value,
v_opt, v_iters, epochs, rollout_episodes, rollout_steps, gamma,
gae_lambda, delta, backtrack_coeff, backtrack_iters, cg_iters=10, cg_damping=0.1, wasserstein=False, wasserstein_c=None, logger=TerminalLogger(), callback=None):
policy(torch.zeros(env_fn(0).observation_space['observation'].shape), torch.zeros(env_fn(0).observation_space['safe_actions'].shape))
policy = ReparamPolicy(policy)
logger.add_scalar('expert/mean_episode_length', (~expert_data.dones).sum() / expert_data.states.shape[0])
logger.add_scalar('expert/mean_reward_per_episode', expert_data.rewards[~expert_data.dones].sum() / expert_data.states.shape[0])
for epoch in tqdm(range(epochs)):
hl_data, ll_data, collisions = rollout(env_fn, policy, rollout_episodes, rollout_steps)
generator_data = OptionsRollout(HLBuffer(*hl_data), Buffer(*ll_data))
generator_data.ll.actions += 0.1 * torch.randn_like(generator_data.ll.actions)
gen_mean_episode_length = (~generator_data.ll.dones).sum() / generator_data.ll.states.shape[0]
logger.add_scalar('gen/mean_episode_length', gen_mean_episode_length , epoch)
gen_mean_reward_per_episode = generator_data.hl.rewards[~generator_data.hl.dones].sum() / generator_data.hl.states.shape[0]
logger.add_scalar('gen/mean_reward_per_episode', gen_mean_reward_per_episode, epoch)
logger.add_scalar('gen/unsafe_probability_mass', policy.unsafe_probability_mass(policy(generator_data.hl.states[~generator_data.hl.dones], generator_data.hl.safe_actions[~generator_data.hl.dones])).mean(), epoch)
gen_collision_rate = (1. * collisions.any(-1)).mean()
logger.add_scalar('gen/collision_rate', gen_collision_rate, epoch)
discriminator, loss = train_discriminator(expert_data, generator_data.ll, discriminator, disc_opt, disc_iters, wasserstein, wasserstein_c)
if wasserstein:
generator_data.ll.rewards = discriminator(generator_data.ll.states, generator_data.ll.actions)
else:
generator_data.ll.rewards = -F.logsigmoid(discriminator(generator_data.ll.states, generator_data.ll.actions))
logger.add_scalar('disc/final_loss', loss, epoch)
disc_mean_reward_per_episode = generator_data.ll.rewards[~generator_data.ll.dones].sum() / generator_data.ll.states.shape[0]
logger.add_scalar('disc/mean_reward_per_episode', disc_mean_reward_per_episode , epoch)
#assert generator_data.ll.rewards.shape == generator_data.ll.dones.shape
generator_data.hl.rewards = torch.where(~generator_data.ll.dones, generator_data.ll.rewards, torch.tensor(0.)).sum(-1)
value, policy = trpo_step(value, policy, generator_data.hl.states, generator_data.hl.safe_actions, generator_data.hl.actions, generator_data.hl.rewards, generator_data.hl.dones, gamma, gae_lambda, delta, backtrack_coeff, backtrack_iters, v_opt, v_iters, cg_iters, cg_damping)
expert_data = roll_buffer(expert_data, shifts=-3, dims=0)
if callback is not None:
callback({
'epoch': epoch,
'value': value,
'policy': policy,
'gen/mean_episode_length': gen_mean_episode_length.item(),
'gen/mean_reward_per_episode': gen_mean_reward_per_episode.item(),
'gen/collision_rate': gen_collision_rate.item(),
'disc/mean_reward_per_episode': disc_mean_reward_per_episode.item(),
})
return value, policy
def gail_ppo(env_fn, expert_data, discriminator, disc_opt, disc_iters, policy, value,
v_opt, v_iters, epochs, rollout_episodes, rollout_steps, gamma,
gae_lambda, clip_ratio, pi_opt, pi_iters, target_kl=None, max_grad_norm=None, wasserstein=False, wasserstein_c=None, logger=TerminalLogger(), callback=None, lr_schedulers=[]):
logger.add_scalar('expert/mean_episode_length', (~expert_data.dones).sum() / expert_data.states.shape[0])
logger.add_scalar('expert/mean_reward_per_episode', expert_data.rewards[~expert_data.dones].sum() / expert_data.states.shape[0])
for epoch in range(epochs):
hl_data, ll_data, collisions = rollout(env_fn, policy, rollout_episodes, rollout_steps)
generator_data = OptionsRollout(HLBuffer(*hl_data), Buffer(*ll_data))
generator_data.ll.actions += 0.1 * torch.randn_like(generator_data.ll.actions)
gen_mean_episode_length = (~generator_data.ll.dones).sum() / generator_data.ll.states.shape[0]
logger.add_scalar('gen/mean_episode_length', gen_mean_episode_length, epoch)
gen_mean_reward_per_episode = generator_data.hl.rewards[~generator_data.hl.dones].sum() / generator_data.hl.states.shape[0]
logger.add_scalar('gen/mean_reward_per_episode', gen_mean_reward_per_episode, epoch)
logger.add_scalar('gen/unsafe_probability_mass', policy.unsafe_probability_mass(policy(generator_data.hl.states[~generator_data.hl.dones], generator_data.hl.safe_actions[~generator_data.hl.dones])).mean(), epoch)
gen_collision_rate = (1. * collisions.any(-1)).mean()
logger.add_scalar('gen/collision_rate', gen_collision_rate, epoch)
discriminator, loss = train_discriminator(expert_data, generator_data.ll, discriminator, disc_opt, disc_iters, wasserstein, wasserstein_c)
if wasserstein:
generator_data.ll.rewards = discriminator(generator_data.ll.states, generator_data.ll.actions)
else:
generator_data.ll.rewards = -F.logsigmoid(discriminator(generator_data.ll.states, generator_data.ll.actions))
logger.add_scalar('disc/final_loss', loss, epoch)
disc_mean_reward_per_episode = generator_data.ll.rewards[~generator_data.ll.dones].sum() / generator_data.ll.states.shape[0]
logger.add_scalar('disc/mean_reward_per_episode', disc_mean_reward_per_episode, epoch)
#assert generator_data.ll.rewards.shape == generator_data.ll.dones.shape
generator_data.hl.rewards = torch.where(~generator_data.ll.dones, generator_data.ll.rewards, torch.tensor(0.)).sum(-1)
value, policy = ppo_step(value, policy, generator_data.hl.states, generator_data.hl.safe_actions, generator_data.hl.actions, generator_data.hl.rewards, generator_data.hl.dones, clip_ratio, gamma, gae_lambda, pi_opt, pi_iters, v_opt, v_iters, target_kl, max_grad_norm)
expert_data = roll_buffer(expert_data, shifts=-3, dims=0)
if callback is not None:
callback({
'epoch': epoch,
'value': value,
'policy': policy,
'gen/mean_episode_length': gen_mean_episode_length.item(),
'gen/mean_reward_per_episode': gen_mean_reward_per_episode.item(),
'gen/collision_rate': gen_collision_rate.item(),
'disc/mean_reward_per_episode': disc_mean_reward_per_episode.item(),
})
for lr_scheduler in lr_schedulers:
lr_scheduler.step()
return value, policy
def rollout(env_fn, policy, n_episodes, max_steps_per_episode):
env = env_fn(0)
states = torch.zeros(n_episodes, max_steps_per_episode + 1, *env.observation_space['observation'].shape)
safe_actions = torch.zeros(n_episodes, max_steps_per_episode + 1, *env.observation_space['safe_actions'].shape)
actions = torch.zeros(n_episodes, max_steps_per_episode + 1, *env.action_space.shape)
rewards = torch.zeros(n_episodes, max_steps_per_episode + 1)
dones = torch.ones(n_episodes, max_steps_per_episode + 1, dtype=bool)
collisions = torch.zeros(n_episodes, max_steps_per_episode, dtype=bool)
ll_states = torch.zeros(n_episodes, max_steps_per_episode, env.max_plan_length + 1, *env.observation_space['observation'].shape)
ll_actions = torch.zeros(n_episodes, max_steps_per_episode, env.max_plan_length + 1, *env.ll_action_space.shape)
ll_rewards = torch.zeros(n_episodes, max_steps_per_episode, env.max_plan_length + 1)
ll_dones = torch.ones(n_episodes, max_steps_per_episode, env.max_plan_length + 1, dtype=bool)
env = VecEnv(list(map(lambda i: (lambda: env_fn(i)), range(n_episodes))))
obs = env.reset()
states[:, 0] = torch.tensor(obs['observation']).clone().detach()
safe_actions[:, 0] = torch.tensor(obs['safe_actions']).clone().detach()
dones[:, 0] = False
for s in tqdm(range(max_steps_per_episode), 'Rollout'):
actions[:, s] = policy.sample(policy(states[:, s], safe_actions[:, s])).clone().detach()
clipped_actions = actions[:, s]
if isinstance(env.action_space, gym.spaces.Box):
clipped_actions = torch.clamp(clipped_actions, torch.from_numpy(env.action_space.low), torch.from_numpy(env.action_space.high))
o, r, d, info = env.step(clipped_actions)
states[:, s + 1] = torch.tensor(o['observation']).clone().detach()
safe_actions[:, s + 1] = torch.tensor(o['safe_actions']).clone().detach()
rewards[:, s] = torch.tensor(r).clone().detach()
dones[:, s + 1] = torch.tensor(d).clone().detach()
collisions[:, s] = torch.from_numpy(np.stack([
any(k['collision'] for k in i['ll']['infos']) for i in info
])).detach().clone()
ll_states[:, s] = torch.from_numpy(np.stack([i['ll']['observations'] for i in info])).clone().detach()
ll_actions[:, s] = torch.from_numpy(np.stack([i['ll']['actions'] for i in info])).clone().detach()
ll_rewards[:, s] = torch.from_numpy(np.stack([i['ll']['rewards'] for i in info])).clone().detach()
ll_dones[:, s] = torch.from_numpy(np.stack([i['ll']['plan_done'] for i in info])).clone().detach()
dones = dones.cumsum(1) > 0
states = states[:, :max_steps_per_episode]
safe_actions = safe_actions[:, :max_steps_per_episode]
actions = actions[:, :max_steps_per_episode]
rewards = rewards[:, :max_steps_per_episode]
dones = dones[:, :max_steps_per_episode]
return (states, safe_actions, actions, rewards, dones), (ll_states, ll_actions, ll_rewards, ll_dones), collisions
class SafeOptionsEnv(OptionsEnv):
def __init__(self, env, options, safe_actions_collision_method=None, abort_unsafe_collision_method=None):
super().__init__(env, options)
self.safe_actions_collision_method = safe_actions_collision_method
self.abort_unsafe_collision_method = abort_unsafe_collision_method
self.observation_space = gym.spaces.Dict({
'observation': self.observation_space,
'safe_actions': gym.spaces.Box(low=0., high=1., shape=(self.action_space.n,)),
})
def safe_actions(self):
if self.safe_actions_collision_method is None:
return np.ones(len(self.options), dtype=bool)
plans = [self.plan(o) for o in self.options]
plans = [np.pad(p, (0, self.max_plan_length - len(p)), constant_values=np.nan) for p in plans]
plans = np.stack(plans)
safe = feasible(self.env, plans, method=self.safe_actions_collision_method)
if not safe.any():
# action 0 is considered safe fallback
safe[0] = True
return safe
def reset(self, *args, **kwargs):
obs = super().reset(*args, **kwargs)
obs = {
'observation': obs,
'safe_actions': self.safe_actions(),
}
return obs
def step(self, action, render_mode=None):
obs, reward, done, info = super().step(action, render_mode)
obs = {
'observation': obs,
'safe_actions': self.safe_actions(),
}
return obs, reward, done, info
def execute_plan(self, obs, option, render_mode=None):
observations = np.zeros((self.max_plan_length + 1, *self.env.observation_space.shape))
actions = np.zeros((self.max_plan_length + 1, *self.ll_action_space.shape))
rewards = np.zeros((self.max_plan_length + 1,))
env_done = np.ones((self.max_plan_length + 1,), dtype=bool)
plan_done = np.ones((self.max_plan_length + 1,), dtype=bool)
infos = []
plan = self.plan(option)
observations[0] = obs
env_done[0] = False
for k, u in enumerate(plan):
plan_done[k] = False
o, r, d, i = self.env.step(u)
actions[k] = u
rewards[k] = r
env_done[k] = d
infos.append(i)
observations[k+1] = o
if render_mode is not None:
self.env.render(render_mode)
if d:
break
if self.abort_unsafe_collision_method is not None:
if not feasible(self.env, plan[k:], method=self.abort_unsafe_collision_method):
break
n_steps = k + 1
return observations, actions, rewards, env_done, plan_done, infos, n_steps
obs_min = np.array([
[-1000, -1000, 0, -np.pi, -1e-1, 0.],
[0, -np.pi, -20, -20, -np.pi, -1e-1],
[0, -np.pi, -20, -20, -np.pi, -1e-1],
[0, -np.pi, -20, -20, -np.pi, -1e-1],
[0, -np.pi, -20, -20, -np.pi, -1e-1],
[0, -np.pi, -20, -20, -np.pi, -1e-1],
]).reshape(-1)
obs_max = np.array([
[1000, 1000, 20, np.pi, 1e-1, 0.],
[50, np.pi, 20, 20, np.pi, 1e-1],
[50, np.pi, 20, 20, np.pi, 1e-1],
[50, np.pi, 20, 20, np.pi, 1e-1],
[50, np.pi, 20, 20, np.pi, 1e-1],
[50, np.pi, 20, 20, np.pi, 1e-1],
]).reshape(-1)
def NormalizedSafeOptionsEvalEnv(safe_actions_collision_method='circle', abort_unsafe_collision_method='circle', **kwargs):
return SafeOptionsEnv(Setobs(
TransformObservation(IntersimpleLidarFlatIncrementingAgent(
n_rays=5,
**kwargs,
), lambda obs: (obs - obs_min) / (obs_max - obs_min + 1e-10))
), options=[(0, 5), (1, 5), (2, 5), (4, 5), (6, 5), (8, 5), (10, 5)], safe_actions_collision_method=safe_actions_collision_method, abort_unsafe_collision_method=abort_unsafe_collision_method)
|
from datetime import datetime
from django.core.cache import cache
from django.conf import settings
class AlreadyLocked(Exception):
def __init__(self, lock):
self.lock_obj = lock
super(AlreadyLocked, self).__init__()
class EditLock(object):
"""
Using Django's cache system this class implements edit-locking for
Wiki pages.
To aquire a lock just instantiate a ``EditLock`` object. If a lock
for the resource at ``slug`` already exists an AlreadyLocked exception
is raised, which provides access to the lock via ``lock_obj`` attribute.
To release a lock call the release() method on the lock object.
``duration`` is the lock period in seconds.
The lock itself holds a tuple containing the ``owner`` and the creation
time.
"""
def __init__(self, slug, duration, owner, cache_key_prefix=u"rcs.wiki:editlock"):
self.cache_key = "%s%s:%s" % (settings.CACHE_MIDDLEWARE_KEY_PREFIX,
cache_key_prefix, slug)
lock = cache.get(self.cache_key)
if lock is not None:
# not locked by ourself?
if lock[0] != owner:
raise AlreadyLocked(lock)
else:
cache.set(self.cache_key, (owner, datetime.now()), duration)
def release(self):
cache.delete(self.cache_key)
|
from src.models.db import AggregateDatasetModel, AggregateModelData, AggregateBenchmarkData
class SummarizeDatasetRepo(object):
def __init__(self):
self.aggregateDatasetModel = AggregateDatasetModel()
self.aggregateModel = AggregateModelData()
self.aggregateBenchmark = AggregateBenchmarkData()
#processing requests based on the data type
def aggregate(self, search_data):
corpus_stats = []
count = 0
if search_data["type"] == "model":
corpus_stats,count = self.aggregateModel.data_aggregator(search_data)
return corpus_stats, count
if search_data["type"] == "benchmark":
corpus_stats,count = self.aggregateBenchmark.data_aggregator(search_data)
return corpus_stats, count
else:
corpus_stats,count = self.aggregateDatasetModel.data_aggregator(search_data)
return corpus_stats, count
|
"""
This module contains functionality required for disconnected installation.
"""
import glob
import logging
import os
import tempfile
import yaml
from ocs_ci.framework import config
from ocs_ci.helpers.disconnected import get_oc_mirror_tool, get_opm_tool
from ocs_ci.ocs import constants
from ocs_ci.ocs.exceptions import NotFoundError
from ocs_ci.ocs.resources.catalog_source import CatalogSource, disable_default_sources
from ocs_ci.utility import templating
from ocs_ci.utility.utils import (
create_directory_path,
exec_cmd,
get_latest_ds_olm_tag,
get_ocp_version,
login_to_mirror_registry,
prepare_customized_pull_secret,
wait_for_machineconfigpool_status,
)
from ocs_ci.utility.version import (
get_semantic_ocp_running_version,
VERSION_4_10,
)
logger = logging.getLogger(__name__)
def get_csv_from_image(bundle_image):
"""
Extract clusterserviceversion.yaml file from operator bundle image.
Args:
bundle_image (str): OCS operator bundle image
Returns:
dict: loaded yaml from CSV file
"""
manifests_dir = os.path.join(
config.ENV_DATA["cluster_path"], constants.MANIFESTS_DIR
)
ocs_operator_csv_yaml = os.path.join(manifests_dir, constants.OCS_OPERATOR_CSV_YAML)
create_directory_path(manifests_dir)
with prepare_customized_pull_secret(bundle_image) as authfile_fo:
exec_cmd(
f"oc image extract --registry-config {authfile_fo.name} "
f"{bundle_image} --confirm "
f"--path /manifests/ocs-operator.clusterserviceversion.yaml:{manifests_dir}"
)
try:
with open(ocs_operator_csv_yaml) as f:
return yaml.safe_load(f)
except FileNotFoundError as err:
logger.error(f"File {ocs_operator_csv_yaml} does not exists ({err})")
raise
def mirror_images_from_mapping_file(mapping_file, icsp=None, ignore_image=None):
"""
Mirror images based on mapping.txt file.
Args:
mapping_file (str): path to mapping.txt file
icsp (dict): ImageContentSourcePolicy used for mirroring (workaround for
stage images, which are pointing to different registry than they
really are)
ignore_image: image which should be ignored when applying icsp
(mirrored index image)
"""
if icsp:
# update mapping.txt file with urls updated based on provided
# imageContentSourcePolicy
with open(mapping_file) as mf:
mapping_file_content = []
for line in mf:
# exclude ignore_image
if ignore_image and ignore_image in line:
continue
# apply any matching policy to all lines from mapping file
for policy in icsp["spec"]["repositoryDigestMirrors"]:
# we use only first defined mirror for particular source,
# because we don't use any ICSP with more mirrors for one
# source and it will make the logic very complex and
# confusing
line = line.replace(policy["source"], policy["mirrors"][0])
mapping_file_content.append(line)
# write mapping file to disk
mapping_file = "_updated".join(os.path.splitext(mapping_file))
with open(mapping_file, "w") as f:
f.writelines(mapping_file_content)
# mirror images based on the updated mapping file
# ignore errors, because some of the images might be already mirrored
# via the `oc adm catalog mirror ...` command and not available on the
# mirror
pull_secret_path = os.path.join(constants.TOP_DIR, "data", "pull-secret")
exec_cmd(
f"oc image mirror --filter-by-os='.*' -f {mapping_file} "
f"--insecure --registry-config={pull_secret_path} "
"--max-per-registry=2 --continue-on-error=true --skip-missing=true",
timeout=3600,
ignore_error=True,
)
def prune_and_mirror_index_image(
index_image, mirrored_index_image, packages, icsp=None
):
"""
Prune given index image and push it to mirror registry, mirror all related
images to mirror registry and create relevant imageContentSourcePolicy
This uses `opm index prune` command, which supports only sqlite-based
catalogs (<= OCP 4.10), for >= OCP 4.11 use `oc-mirror` tool implemented in
mirror_index_image_via_oc_mirror(...) function.
Args:
index_image (str): index image which will be pruned and mirrored
mirrored_index_image (str): mirrored index image which will be pushed to
mirror registry
packages (list): list of packages to keep
icsp (dict): ImageContentSourcePolicy used for mirroring (workaround for
stage images, which are pointing to different registry than they
really are)
Returns:
str: path to generated catalogSource.yaml file
"""
get_opm_tool()
pull_secret_path = os.path.join(constants.TOP_DIR, "data", "pull-secret")
# prune an index image
logger.info(
f"Prune index image {index_image} -> {mirrored_index_image} "
f"(packages: {', '.join(packages)})"
)
cmd = (
f"opm index prune -f {index_image} "
f"-p {','.join(packages)} "
f"-t {mirrored_index_image}"
)
if config.DEPLOYMENT.get("opm_index_prune_binary_image"):
cmd += (
f" --binary-image {config.DEPLOYMENT.get('opm_index_prune_binary_image')}"
)
# opm tool doesn't have --authfile parameter, we have to supply auth
# file through env variable
os.environ["REGISTRY_AUTH_FILE"] = pull_secret_path
exec_cmd(cmd)
# login to mirror registry
login_to_mirror_registry(pull_secret_path)
# push pruned index image to mirror registry
logger.info(f"Push pruned index image to mirror registry: {mirrored_index_image}")
cmd = f"podman push --authfile {pull_secret_path} --tls-verify=false {mirrored_index_image}"
exec_cmd(cmd)
# mirror related images (this might take very long time)
logger.info(f"Mirror images related to index image: {mirrored_index_image}")
cmd = (
f"oc adm catalog mirror {mirrored_index_image} -a {pull_secret_path} --insecure "
f"{config.DEPLOYMENT['mirror_registry']} --index-filter-by-os='.*' --max-per-registry=2"
)
oc_acm_result = exec_cmd(cmd, timeout=7200)
for line in oc_acm_result.stdout.decode("utf-8").splitlines():
if "wrote mirroring manifests to" in line:
break
else:
raise NotFoundError(
"Manifests directory not printed to stdout of 'oc adm catalog mirror ...' command."
)
mirroring_manifests_dir = line.replace("wrote mirroring manifests to ", "")
logger.debug(f"Mirrored manifests directory: {mirroring_manifests_dir}")
if icsp:
# update mapping.txt file with urls updated based on provided
# imageContentSourcePolicy
mapping_file = os.path.join(
f"{mirroring_manifests_dir}",
"mapping.txt",
)
mirror_images_from_mapping_file(mapping_file, icsp, mirrored_index_image)
# create ImageContentSourcePolicy
icsp_file = os.path.join(
f"{mirroring_manifests_dir}",
"imageContentSourcePolicy.yaml",
)
# make icsp name unique - append run_id
with open(icsp_file) as f:
icsp_content = yaml.safe_load(f)
icsp_content["metadata"]["name"] += f"-{config.RUN['run_id']}"
with open(icsp_file, "w") as f:
yaml.dump(icsp_content, f)
exec_cmd(f"oc apply -f {icsp_file}")
wait_for_machineconfigpool_status("all")
cs_file = os.path.join(
f"{mirroring_manifests_dir}",
"catalogSource.yaml",
)
return cs_file
def mirror_index_image_via_oc_mirror(index_image, packages, icsp=None):
"""
Mirror all images required for ODF deployment and testing to mirror
registry via `oc-mirror` tool and create relevant imageContentSourcePolicy.
https://github.com/openshift/oc-mirror
Args:
index_image (str): index image which will be pruned and mirrored
packages (list): list of packages to keep
icsp (dict): ImageContentSourcePolicy used for mirroring (workaround for
stage images, which are pointing to different registry than they
really are)
Returns:
str: mirrored index image
"""
get_oc_mirror_tool()
pull_secret_path = os.path.join(constants.TOP_DIR, "data", "pull-secret")
# login to mirror registry
login_to_mirror_registry(pull_secret_path)
# oc mirror tool doesn't have --authfile or similar parameter, we have to
# make the auth file available in the ~/.docker/config.json location
docker_config_file = "~/.docker/config.json"
if not os.path.exists(os.path.expanduser(docker_config_file)):
os.makedirs(os.path.expanduser("~/.docker/"), exist_ok=True)
os.symlink(pull_secret_path, os.path.expanduser(docker_config_file))
# prepare imageset-config.yaml file
imageset_config_data = templating.load_yaml(constants.OC_MIRROR_IMAGESET_CONFIG)
imageset_config_data["storageConfig"]["registry"][
"imageURL"
] = f"{config.DEPLOYMENT['mirror_registry']}/odf-qe-metadata:latest"
_packages = [{"name": package} for package in packages]
imageset_config_data["mirror"]["operators"].append(
{
"catalog": index_image,
"packages": _packages,
}
)
imageset_config_file = os.path.join(
config.ENV_DATA["cluster_path"],
f"imageset-config-{config.RUN['run_id']}.yaml",
)
templating.dump_data_to_temp_yaml(imageset_config_data, imageset_config_file)
# mirror required images
logger.info(
f"Mirror required images to mirror registry {config.DEPLOYMENT['mirror_registry']}"
)
cmd = (
f"oc mirror --config {imageset_config_file} "
f"docker://{config.DEPLOYMENT['mirror_registry']} "
"--dest-skip-tls --ignore-history"
)
exec_cmd(cmd, timeout=7200)
# look for manifests directory with Image mapping, CatalogSource and ICSP
# manifests
mirroring_manifests_dir = glob.glob("oc-mirror-workspace/results-*")
if not mirroring_manifests_dir:
raise NotFoundError(
"Manifests directory created by 'oc mirror ...' command not found."
)
mirroring_manifests_dir.sort(reverse=True)
mirroring_manifests_dir = mirroring_manifests_dir[0]
logger.debug(f"Mirrored manifests directory: {mirroring_manifests_dir}")
if icsp:
# update mapping.txt file with urls updated based on provided
# imageContentSourcePolicy
mapping_file = os.path.join(
f"{mirroring_manifests_dir}",
"mapping.txt",
)
mirror_images_from_mapping_file(mapping_file, icsp)
# create ImageContentSourcePolicy
icsp_file = os.path.join(
f"{mirroring_manifests_dir}",
"imageContentSourcePolicy.yaml",
)
# make icsp name unique - append run_id
with open(icsp_file) as f:
icsp_content = yaml.safe_load(f)
icsp_content["metadata"]["name"] = f"odf-{config.RUN['run_id']}"
with open(icsp_file, "w") as f:
yaml.dump(icsp_content, f)
exec_cmd(f"oc apply -f {icsp_file}")
wait_for_machineconfigpool_status("all")
# get mirrored index image url from prepared catalogSource file
cs_file = glob.glob(
os.path.join(
f"{mirroring_manifests_dir}",
"catalogSource-*.yaml",
)
)
if not cs_file:
raise NotFoundError(
"CatalogSource file not found in the '{mirroring_manifests_dir}'."
)
with open(cs_file[0]) as f:
cs_content = yaml.safe_load(f)
return cs_content["spec"]["image"]
def prepare_disconnected_ocs_deployment(upgrade=False):
"""
Prepare disconnected ocs deployment:
- mirror required images from redhat-operators
- get related images from OCS operator bundle csv
- mirror related images to mirror registry
- create imageContentSourcePolicy for the mirrored images
- disable the default OperatorSources
Args:
upgrade (bool): is this fresh installation or upgrade process
(default: False)
Returns:
str: mirrored OCS registry image prepared for disconnected installation
or None (for live deployment)
"""
if config.DEPLOYMENT.get("stage_rh_osbs"):
raise NotImplementedError(
"Disconnected installation from stage is not implemented!"
)
logger.info(
f"Prepare for disconnected OCS {'upgrade' if upgrade else 'installation'}"
)
# Disable the default OperatorSources
disable_default_sources()
pull_secret_path = os.path.join(constants.TOP_DIR, "data", "pull-secret")
# login to mirror registry
login_to_mirror_registry(pull_secret_path)
# prepare main index image (redhat-operators-index for live deployment or
# ocs-registry image for unreleased version)
if (not upgrade and config.DEPLOYMENT.get("live_deployment")) or (
upgrade
and config.DEPLOYMENT.get("live_deployment")
and config.UPGRADE.get("upgrade_in_current_source", False)
):
index_image = (
f"{config.DEPLOYMENT['cs_redhat_operators_image']}:v{get_ocp_version()}"
)
mirrored_index_image = (
f"{config.DEPLOYMENT['mirror_registry']}/{constants.MIRRORED_INDEX_IMAGE_NAMESPACE}/"
f"{constants.MIRRORED_INDEX_IMAGE_NAME}:v{get_ocp_version()}"
)
else:
if upgrade:
index_image = config.UPGRADE.get("upgrade_ocs_registry_image", "")
else:
index_image = config.DEPLOYMENT.get("ocs_registry_image", "")
ocs_registry_image_and_tag = index_image.rsplit(":", 1)
image_tag = (
ocs_registry_image_and_tag[1]
if len(ocs_registry_image_and_tag) == 2
else None
)
if not image_tag:
image_tag = get_latest_ds_olm_tag(
upgrade=False if upgrade else config.UPGRADE.get("upgrade", False),
latest_tag=config.DEPLOYMENT.get("default_latest_tag", "latest"),
)
index_image = f"{config.DEPLOYMENT['default_ocs_registry_image'].split(':')[0]}:{image_tag}"
mirrored_index_image = f"{config.DEPLOYMENT['mirror_registry']}{index_image[index_image.index('/'):]}"
logger.debug(f"index_image: {index_image}")
if get_semantic_ocp_running_version() <= VERSION_4_10:
# For OCP 4.10 and older, we have to use `opm index prune ...` and
# `oc adm catalog mirror ...` approach
prune_and_mirror_index_image(
index_image,
mirrored_index_image,
constants.DISCON_CL_REQUIRED_PACKAGES,
)
else:
# For OCP 4.11 and higher, we have to use new tool `oc-mirror`, because
# the `opm index prune ...` doesn't support file-based catalog image
# The `oc-mirror` tool is a technical preview in OCP 4.10, so we might
# try to use it also there.
# https://cloud.redhat.com/blog/how-oc-mirror-will-help-you-reduce-container-management-complexity
mirrored_index_image = mirror_index_image_via_oc_mirror(
index_image,
constants.DISCON_CL_REQUIRED_PACKAGES_PER_ODF_VERSION[get_ocp_version()],
)
logger.debug(f"mirrored_index_image: {mirrored_index_image}")
# in case of live deployment, we have to create the mirrored
# redhat-operators catalogsource
if config.DEPLOYMENT.get("live_deployment"):
# create redhat-operators CatalogSource
catalog_source_data = templating.load_yaml(constants.CATALOG_SOURCE_YAML)
catalog_source_manifest = tempfile.NamedTemporaryFile(
mode="w+", prefix="catalog_source_manifest", delete=False
)
catalog_source_data["spec"]["image"] = f"{mirrored_index_image}"
catalog_source_data["metadata"]["name"] = constants.OPERATOR_CATALOG_SOURCE_NAME
catalog_source_data["spec"]["displayName"] = "Red Hat Operators - Mirrored"
# remove ocs-operator-internal label
catalog_source_data["metadata"]["labels"].pop("ocs-operator-internal", None)
templating.dump_data_to_temp_yaml(
catalog_source_data, catalog_source_manifest.name
)
exec_cmd(
f"oc {'replace' if upgrade else 'apply'} -f {catalog_source_manifest.name}"
)
catalog_source = CatalogSource(
resource_name=constants.OPERATOR_CATALOG_SOURCE_NAME,
namespace=constants.MARKETPLACE_NAMESPACE,
)
# Wait for catalog source is ready
catalog_source.wait_for_state("READY")
if (not upgrade and config.DEPLOYMENT.get("live_deployment")) or (
upgrade
and config.DEPLOYMENT.get("live_deployment")
and config.UPGRADE.get("upgrade_in_current_source", False)
):
return None
else:
return mirrored_index_image
|
#Defines a day aggregator object
import datetime
"""Node and Leaf classes helping to build a date tree structure, could be expanded easily"""
class MainContainer(object):
"""
Container class holding all derived instances
"""
def __init__(self, date=None, _type="", sensor="", _id="DummyID", name="container"):
self.id = _id
self.type = _type
self.sensor = sensor
self.children = []
self.date = self.correct_date(date)
self.value = None
self.min_value = None
self.max_value = None
self.actual_child = 0
self.name = name
def __str__(self):
return "Date: %s, Type: %s, Sensor: %s" % (str(self.date), self.type, self.sensor)
def __eq__(self, other):
return True
def __add__(self, other):
return self.value + other.value
def __iadd__(self, other):
self.value += other.value
return self
#Recursively calculates the value
def aggregate(self):
value = 0
for child in self.children:
value += child.aggregate()
result = float(value) / len(self.children)
self.value = result
return result
def correct_date(self, date):
return date
def get_value(self):
if self.value is not None:
return self.value
else:
self.aggregate()
return self.value
def get_min_value(self):
if self.min_value is not None:
return self.min_value
else:
instance_min = float("inf")
for child in self.children:
local_min = child.get_min_value()
if local_min < instance_min:
instance_min = local_min
self.min_value = instance_min
return instance_min
def get_max_value(self):
if self.max_value is not None:
return self.max_value
else:
instance_max = 0
for child in self.children:
local_max = child.get_max_value()
if local_max > instance_max:
instance_max = local_max
self.max_value = instance_max
return instance_max
def add_child(self, timestamp):
self.children.append(Day(date=timestamp))
self.actual_child += 1
class Day(MainContainer):
"""Day class, representing a day instance"""
def __init__(self, date):
super().__init__(date=date)
self.name = "day"
# Magic methods, allowing comparison of datetime objects in the resolution of the Container items
def __eq__(self, other):
other_date = self.correct_date(other)
if self.date == other_date:
return True
else:
return False
def __lt__(self, other):
other_date = self.correct_date(other)
if self.date < other_date:
return True
else:
return False
def __gt__(self, other):
other_date = self.correct_date(other)
if self.date > other_date:
return True
else:
return False
def correct_date(self, date):
date = date.replace(hour=0, minute=0)
return date
def add_child(self, timestamp):
self.children.append(Hour(date=timestamp))
self.actual_child += 1
class Hour(Day):
def __init__(self, date):
super().__init__(date=date)
self.name = "hour"
def correct_date(self, date):
date = date.replace(minute=0)
return date
def add_child(self, timestamp):
self.children.append(Minute(date=timestamp))
self.actual_child += 1
class Minute(Hour):
"""Leaf node class, overrides several methods as a result of its leaf node function"""
def __init__(self, date):
super().__init__(date=date)
self.name = "minute"
def aggregate(self):
try:
value = 0
for child in self.children:
value += child
result = float(value) / len(self.children)
self.value = result
return result
except (ValueError, TypeError):
raise AssertionError('Extracted Values are not numeric, cannot calculate mean')
def get_min_value(self):
try:
instance_min = min(self.children)
self.min_value = instance_min
return instance_min
except (ValueError, TypeError):
raise AssertionError('Extracted Values are not numeric, cannot calculate minimum')
def get_max_value(self):
try:
instance_max = max(self.children)
self.max_value = instance_max
return instance_max
except (ValueError, TypeError):
raise AssertionError('Extracted Values are not numeric, cannot calculate maximum')
def get_raw_values(self):
return self.children
def correct_date(self, date):
return date
def add_child(self, value):
for i in value:
self.children.append(i) |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Docstring
"""Butyi's Python3 Auto-DJ RadioPlayer. It plays programme of my radio station."""
# Authorship information
__author__ = "Janos BENCSIK"
__copyright__ = "Copyright 2020, butyi.hu"
__credits__ = "James Robert (jiaaro) for pydub (https://github.com/jiaaro/pydub)"
__license__ = "GPL"
__version__ = "0.0.5"
__maintainer__ = "Janos BENCSIK"
__email__ = "[email protected]"
__status__ = "Prototype"
# Import section
import time, glob, random, os, threading, datetime, sys
from pydub import AudioSegment
from pydub.playback import play
from threading import Thread
from threading import Event
import configparser
import ftplib, io
# Subfunctions, classes
class MusicPlayer(Thread):
def __init__(self, song):
Thread.__init__(self)
self.song = song
self._stopper = Event()
self.setName('MusicThread')
def run(self):
play(self.song)
def stop(self):
self._stopper.set()
def UpdateSongInfo():
print("\n\n" + infotext + "\n")
# Write infotext into file
if 0 < len(TextOutFile): # If song info is configured to write into a file too
if 0 < len(TextOutFTPhost) and 0 < len(TextOutFTPuser) and 0 < len(TextOutFTPpass): # FTP write is configured
try:
ftpsession = ftplib.FTP()
ftpsession.connect(TextOutFTPhost,TextOutFTPport)
ftpsession.login(TextOutFTPuser,TextOutFTPpass)
if 0 < len(TextOutFTPpath):
ftpsession.cwd(TextOutFTPpath)
ftpsession.storbinary('STOR '+TextOutFile,io.BytesIO(bytearray(infotext,'utf-8'))) # Update file content
ftpsession.quit() # Close FTP
except ftplib.all_errors as e:
if 0 < len(ErrLogFile): # Log file into history
with open(ErrLogFile, 'a+') as f:
f.write(str(datetime.datetime.today())+" -> ERROR! FTP update failed: "+str(e)+"\r\n")
else: # Simple local filesystem write
with open(TextOutFile, 'w') as f:
f.write(infotext)
# define global variables
TextOutFile = "" # Default path if not defined
HistoryFile = "" # Default path if not defined
ErrLogFile = "" # Default path if not defined
TextOutFTPhost = ""
TextOutFTPport = 21
TextOutFTPpath = ""
TextOutFTPuser = ""
TextOutFTPpass = ""
Programme = "";
Paths = [] # Empty list for paths of songs
Jingles = [] # Empty list for jingles
JinglePath = ""
JinglePeriod = 15
Songs = [] # Empty list for songs
CurrentProgramme = "Not yet read in";
SongName = "Jingle.mp3"
CurrentSong = False
NextSong = False
JingleOverlap = 1 # Overlap of jingle and following song in secs
RecentlyPlayed = [] # Empty list for recently played songs to prevent soon repeat
LastJingleTimestamp = 0 # Start with a jingle
GaindB = 0 # Increase (os recrease) volume a bit may needed for direct USB connection of transmitter board
TargetGain = 0 # dynalic gain, different for each song
Normalize = False
LowPassFilterHz = 0
Artists = [] # Empty list for Artists
DebugConfigRead = False # Set True id you want to debug read and evaluation of config.ini
ProgrammeCheckDateTime = datetime.datetime.today()
CurrentSongLength = 0
ProgrammeStartJingleRequested = False
CfgPath = os.path.dirname(sys.argv[0])+"/config.ini"
if not os.path.exists(CfgPath):
print("ERROR! Cannot open ini file "+CfgPath)
exit(1)
#
# CrossFade timings:
#
# volume
# 100% ^-----oldsong--\ |--newsong-----
# | \|
# | |\
# | | \
# 0% | | \------.
# |--------------------------------> time
# End of old song |
# Start of new song |
# DropEnd |------|
# FadeOut |-------|
# Overlap |----|
#
FadeOut = 8
DropEnd = 0
Overlap = 6
# Start playing
if __name__ == '__main__':
# Start stopwatch to measure below code execution
start = time.time()
while True: # Play forever (exit: Ctrl+C)
# Put programme name here into info text, because it may changed for next song
infotext = CurrentProgramme
# Calculate start of next song. This moment to be used to check the current programme
if CurrentSong != False:
CurrentSongLength = (len(CurrentSong)/1000) - Overlap
ProgrammeCheckDateTime = datetime.datetime.today() + datetime.timedelta(seconds=CurrentSongLength)
# Read config to know which programme to be played
c = configparser.ConfigParser(inline_comment_prefixes=';')
c.read(CfgPath)
for section in c._sections :
if DebugConfigRead:
print("Section "+section)
if section == "Settings":
if c.has_option(section, 'textoutfile'):
TextOutFile = c.get(section, 'textoutfile')
if c.has_option(section, 'historyfile'):
HistoryFile = c.get(section, 'historyfile')
if c.has_option(section, 'errlogfile'):
ErrLogFile = c.get(section, 'errlogfile')
if c.has_option(section, 'textoutftphost'): TextOutFTPhost = c.get(section, 'textoutftphost')
if c.has_option(section, 'textoutftpport'):
TextOutFTPport = c.getint(section, 'textoutftpport')
if c.has_option(section, 'textoutftppath'):
TextOutFTPpath = c.get(section, 'textoutftppath')
if c.has_option(section, 'textoutftpuser'):
TextOutFTPuser = c.get(section, 'textoutftpuser')
if c.has_option(section, 'textoutftppass'):
TextOutFTPpass = c.get(section, 'textoutftppass')
if c.has_option(section, 'lowpassfilterhz'):
LowPassFilterHz = c.getint(section, 'lowpassfilterhz')
if c.has_option(section, 'gaindb'):
GaindB = c.getint(section, 'gaindb')
if c.has_option(section, 'normalize'):
Normalize = True
continue
if c.has_option(section, 'months'):
if str(ProgrammeCheckDateTime.month) not in c.get(section, 'months').split():
continue
if c.has_option(section, 'days'):
if str(ProgrammeCheckDateTime.day) not in c.get(section, 'days').split():
continue
if c.has_option(section, 'weekdays'):
if str(ProgrammeCheckDateTime.weekday()) not in c.get(section, 'weekdays').split():
continue
if c.has_option(section, 'hours'):
if str(ProgrammeCheckDateTime.time().hour) not in c.get(section, 'hours').split():
continue
if not c.has_option(section, 'path1' ):
continue
Programme = section;
del Paths[:];
for x in range(1,10):
if c.has_option(section, 'path'+str(x) ):
Paths.append( c.get(section, 'path'+str(x) ) )
if c.has_option(section, 'fadeout'):
FadeOut = c.getint(section, 'fadeout')
if c.has_option(section, 'dropend'):
DropEnd = c.getint(section, 'dropend')
if c.has_option(section, 'overlap'):
Overlap = c.getint(section, 'overlap')
if c.has_option(section, 'jinglepath'):
JinglePath = c.get(section, 'jinglepath')
if c.has_option(section, 'jingleperiod'):
JinglePeriod = c.getint(section, 'jingleperiod')
if c.has_option(section, 'jingleoverlap'):
JingleOverlap = c.getint(section, 'jingleoverlap')
if DebugConfigRead:
print(" Program: "+Programme)
print(" Paths: "+str(Paths))
print(" DropEnd: "+str(DropEnd))
print(" FadeOut: "+str(FadeOut))
print(" Overlap: "+str(Overlap))
print(" JinglePath: "+JinglePath)
print(" JinglePeriod: "+str(JinglePeriod))
if Programme != CurrentProgramme: # When programme changed
CurrentProgramme = Programme
if JinglePeriod == 1:
ProgrammeStartJingleRequested = True
# Read jingles in
if 0 < len(JinglePath):
for cp, folders, files in os.walk(JinglePath):
for fi in files:
if fi.endswith(".mp3"):
Jingles.append(os.path.join(cp, fi))
# Clear list for songs
del Songs[:]
# Go through all defined Paths to parse songs
for Path in Paths:
# Recursive walk in folder
#Songs = glob.glob("**/*.mp3", recursive=True) # for <= Python 3.5
for cp, folders, files in os.walk(Path):
for fi in files:
if fi.endswith(".mp3"):
Songs.append(os.path.join(cp, fi))
# Decrease played list when switch to a folder which contains less songs than before
while (len(Songs)/2) < len(RecentlyPlayed): # If list is longer than half of songs
RecentlyPlayed.pop(0) # Drop oldest elements
# Update infotext
infotext += "\n" + os.path.basename(SongName)[:-4]
if CurrentSong != False:
if 0 < len(HistoryFile) and SongName != "": # Log file into history
with open(HistoryFile, 'a+') as f:
f.write(str(datetime.datetime.today())+" -> "+os.path.basename(SongName)[:-4]+"\r\n")
RecentlyPlayed.append(os.path.basename(SongName)) # Add now playing
if (len(Songs)/2) < len(RecentlyPlayed): # If list is full
RecentlyPlayed.pop(0) # Drop oldest element
while True: # Search a song not in RecentlyPlayed list
SongName = Songs[random.randrange(0,len(Songs))]
if os.path.basename(SongName) not in RecentlyPlayed:
# Ensure to not play consecutive two songs from the same artists
NewArtists = os.path.basename(SongName).split(" - ")[0].split(" Ft. ")
Union = set(Artists) & set(NewArtists)
if 0 == len(Union): # If artist is different
Artists = NewArtists # Save this artist(s) for next check
break # Fount the next song
# Continue to prepare info text
infotext += "\n" + os.path.basename(SongName)[:-4]
infotext += "\n" + str(int(CurrentSongLength)) + "\n"
if CurrentSong != False:
# Write infotext to stdout or FTP
UpdateSongInfo()
# Pre-load mp3 to eliminate delay
try:
NextSong = AudioSegment.from_mp3(SongName) # Load song
except:
if 0 < len(ErrLogFile): # Log file into history
with open(ErrLogFile, 'a+') as f:
f.write(str(datetime.datetime.today())+" -> ERROR! Cannot open file: '"+SongName+"'\r\n")
time.sleep(10); # To prevent the log file become large too fast
continue
if 0 < DropEnd:
NextSong = NextSong[:(len(NextSong)-(DropEnd*1000))] # drop end of song
NextSong = NextSong.fade_out(FadeOut*1000) # Fade out at end
# Cut high frequency (from 12 kHz) to not disturb 19kHz pilot signal.
# This is slow and cause high resource usage for 10-20s each song on my laptop.
# I am affraid it will take more time on a smaller hardware like Raspberry Pi
# So, instead I propose to prepare all mp3 files with some low pass filter.
if 0 < LowPassFilterHz:
NextSong = NextSong.low_pass_filter(LowPassFilterHz)
TargetGain = GaindB
if Normalize:
TargetGain -= NextSong.dBFS;
if 0 != TargetGain:
NextSong = NextSong.apply_gain(TargetGain)
# Wait till start of next song
if CurrentSong != False:
SleepTime = (len(CurrentSong)/1000) - Overlap - int((time.time()-start))
if 0 < SleepTime:
time.sleep(SleepTime)
# if there was no jingle since jingleperiod minutes, play once before next song
if 0 < len(Jingles):
if (1 < JinglePeriod and (LastJingleTimestamp+(60*JinglePeriod)) < int(time.time())) or ProgrammeStartJingleRequested:
ProgrammeStartJingleRequested = False
rnd = int(time.time()) % len(Jingles)
jin = Jingles[rnd]; # Choose a jingle
if 0 < len(HistoryFile): # Log file into history
with open(HistoryFile, 'a+') as f:
f.write(str(datetime.datetime.today())+" -> "+os.path.basename(jin)[:-4]+"\r\n")
infotext = CurrentProgramme
infotext += "\n" + os.path.basename(jin)[:-4]
infotext += "\n" + os.path.basename(SongName)[:-4]
try:
jin = AudioSegment.from_mp3(jin) # Load the choosen jingle
except:
if 0 < len(ErrLogFile): # Log file into history
with open(ErrLogFile, 'a+') as f:
f.write(str(datetime.datetime.today())+" -> ERROR! Cannot open file: '"+jin+"'\r\n")
time.sleep(10); # To prevent the log file become large too fast
jin = False
if False != jin:
infotext += "\n" + str(int(len(jin)/1000)) + "\n"
UpdateSongInfo()
TargetGain = GaindB
if Normalize:
TargetGain -= jin.dBFS;
TargetGain -= 3 # Be a bit less loud than the music
if 0 != TargetGain:
jin = jin.apply_gain(TargetGain)
MusicPlayer(jin).start() # Play jingle in a separate thread
time.sleep((len(jin)/1000)-JingleOverlap) # wait to finish the jingle
LastJingleTimestamp = time.time()
# Start stopwatch to measure below code execution
start = time.time()
# Start playing the next song in a separate thread
CurrentSong = NextSong
MusicPlayer(CurrentSong).start()
|
import DBLib as db
exit = False
while exit != True:
print ("\nCurrent database actions:\n-Add Entry = Adds a new entry to database\n-Delete Entry = Deletes an entry from the database\n-Show Entries = Shows the whole entries")
answer = input("\nDatabase Action: ")
if answer == 'Add Entry':
uName = input("\nUser Name: ")
uDiscriminator = input("\nUser Discriminator: ")
uCurrency = input("\nUser Currency: ")
msgCount = input("\nMessage Count: ")
db.saveEntry(uName,uDiscriminator,uCurrency,msgCount)
if answer == 'Delete Entry':
uNum = input("\nUser Number: ")
db.deleteEntry(uNum)
if answer == 'len':
message = input("\nMessage: ")
print (len(message))
if answer == 'userData':
print(db.getUserData('9600'))
if answer == 'Test Mode':
db.addExp('9600', 120)
print ("Done")
if answer == 'Show Entries':
print(db.getEntries())
if answer == "Exit":
exit = True
|
# Copyright (c) 2013 Rackspace, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import uuid
import ddt
import falcon
import mock
from oslo_serialization import jsonutils
from oslo_utils import timeutils
import six
from testtools import matchers
from zaqar import tests as testing
from zaqar.tests.unit.transport.wsgi import base
from zaqar.transport import validation
@ddt.ddt
class TestMessagesMongoDB(base.V2Base):
config_file = 'wsgi_mongodb.conf'
@testing.requires_mongodb
def setUp(self):
super(TestMessagesMongoDB, self).setUp()
self.default_message_ttl = self.boot.transport._defaults.message_ttl
if self.conf.pooling:
uri = self.mongodb_url
for i in range(4):
db_name = "zaqar_test_pools_" + str(i)
# NOTE(dynarro): we need to create a unique uri.
uri = "%s/%s" % (uri, db_name)
options = {'database': db_name}
doc = {'weight': 100, 'uri': uri, 'options': options}
self.simulate_put(self.url_prefix + '/pools/' + str(i),
body=jsonutils.dumps(doc))
self.assertEqual(falcon.HTTP_201, self.srmock.status)
self.project_id = '7e55e1a7e'
self.headers.update({
'Client-ID': str(uuid.uuid4()),
'X-Project-ID': self.project_id
})
# TODO(kgriffs): Add support in self.simulate_* for a "base path"
# so that we don't have to concatenate against self.url_prefix
# all over the place.
self.queue_path = self.url_prefix + '/queues/fizbit'
self.messages_path = self.queue_path + '/messages'
doc = '{"_ttl": 60}'
self.simulate_put(self.queue_path, body=doc, headers=self.headers)
def tearDown(self):
self.simulate_delete(self.queue_path, headers=self.headers)
if self.conf.pooling:
for i in range(4):
self.simulate_delete(self.url_prefix + '/pools/' + str(i),
headers=self.headers)
super(TestMessagesMongoDB, self).tearDown()
def test_name_restrictions(self):
sample_messages = [
{'body': {'key': 'value'}, 'ttl': 200},
]
messages_path = self.url_prefix + '/queues/%s/messages'
sample_doc = jsonutils.dumps({'messages': sample_messages})
self.simulate_post(messages_path % 'Nice-Boat_2',
body=sample_doc, headers=self.headers)
self.assertEqual(falcon.HTTP_201, self.srmock.status)
self.simulate_post(messages_path % 'Nice-Bo@t',
body=sample_doc, headers=self.headers)
self.assertEqual(falcon.HTTP_400, self.srmock.status)
self.simulate_post(messages_path % ('_niceboat' * 8),
body=sample_doc, headers=self.headers)
self.assertEqual(falcon.HTTP_400, self.srmock.status)
def _test_post(self, sample_messages):
sample_doc = jsonutils.dumps({'messages': sample_messages})
result = self.simulate_post(self.messages_path,
body=sample_doc, headers=self.headers)
self.assertEqual(falcon.HTTP_201, self.srmock.status)
result_doc = jsonutils.loads(result[0])
msg_ids = self._get_msg_ids(self.srmock.headers_dict)
self.assertEqual(len(sample_messages), len(msg_ids))
expected_resources = [six.text_type(self.messages_path + '/' + id)
for id in msg_ids]
self.assertEqual(expected_resources, result_doc['resources'])
# NOTE(kgriffs): As of v1.1, "partial" is no longer given
# in the response document.
self.assertNotIn('partial', result_doc)
self.assertEqual(len(sample_messages), len(msg_ids))
lookup = dict([(m['ttl'], m['body']) for m in sample_messages])
# Test GET on the message resource directly
# NOTE(cpp-cabrera): force the passing of time to age a message
timeutils_utcnow = 'oslo_utils.timeutils.utcnow'
now = timeutils.utcnow() + datetime.timedelta(seconds=10)
with mock.patch(timeutils_utcnow) as mock_utcnow:
mock_utcnow.return_value = now
for msg_id in msg_ids:
message_uri = self.messages_path + '/' + msg_id
headers = self.headers.copy()
headers['X-Project-ID'] = '777777'
# Wrong project ID
self.simulate_get(message_uri, headers=headers)
self.assertEqual(falcon.HTTP_404, self.srmock.status)
# Correct project ID
result = self.simulate_get(message_uri, headers=self.headers)
self.assertEqual(falcon.HTTP_200, self.srmock.status)
# Check message properties
message = jsonutils.loads(result[0])
self.assertEqual(message_uri, message['href'])
self.assertEqual(lookup[message['ttl']], message['body'])
self.assertEqual(msg_id, message['id'])
# no negative age
# NOTE(cpp-cabrera): testtools lacks GreaterThanEqual on py26
self.assertThat(message['age'],
matchers.GreaterThan(-1))
# Test bulk GET
query_string = 'ids=' + ','.join(msg_ids)
result = self.simulate_get(self.messages_path,
query_string=query_string,
headers=self.headers)
self.assertEqual(falcon.HTTP_200, self.srmock.status)
result_doc = jsonutils.loads(result[0])
expected_ttls = set(m['ttl'] for m in sample_messages)
actual_ttls = set(m['ttl'] for m in result_doc['messages'])
self.assertFalse(expected_ttls - actual_ttls)
actual_ids = set(m['id'] for m in result_doc['messages'])
self.assertFalse(set(msg_ids) - actual_ids)
def test_exceeded_payloads(self):
# Get a valid message id
self._post_messages(self.messages_path)
msg_id = self._get_msg_id(self.srmock.headers_dict)
# Bulk GET restriction
query_string = 'ids=' + ','.join([msg_id] * 21)
self.simulate_get(self.messages_path,
query_string=query_string, headers=self.headers)
self.assertEqual(falcon.HTTP_400, self.srmock.status)
# Listing restriction
self.simulate_get(self.messages_path,
query_string='limit=21',
headers=self.headers)
self.assertEqual(falcon.HTTP_400, self.srmock.status)
# Bulk deletion restriction
query_string = 'ids=' + ','.join([msg_id] * 22)
self.simulate_delete(self.messages_path,
query_string=query_string, headers=self.headers)
self.assertEqual(falcon.HTTP_400, self.srmock.status)
def test_post_single(self):
sample_messages = [
{'body': {'key': 'value'}, 'ttl': 200},
]
self._test_post(sample_messages)
def test_post_multiple(self):
sample_messages = [
{'body': 239, 'ttl': 100},
{'body': {'key': 'value'}, 'ttl': 200},
{'body': [1, 3], 'ttl': 300},
]
self._test_post(sample_messages)
def test_post_optional_ttl(self):
sample_messages = {
'messages': [
{'body': 239},
{'body': {'key': 'value'}, 'ttl': 200},
],
}
# Manually check default TTL is max from config
sample_doc = jsonutils.dumps(sample_messages)
result = self.simulate_post(self.messages_path,
body=sample_doc, headers=self.headers)
self.assertEqual(falcon.HTTP_201, self.srmock.status)
result_doc = jsonutils.loads(result[0])
href = result_doc['resources'][0]
result = self.simulate_get(href, headers=self.headers)
message = jsonutils.loads(result[0])
self.assertEqual(self.default_message_ttl, message['ttl'])
def test_post_to_non_ascii_queue(self):
# NOTE(kgriffs): This test verifies that routes with
# embedded queue name params go through the validation
# hook, regardless of the target resource.
path = self.url_prefix + u'/queues/non-ascii-n\u0153me/messages'
if six.PY2:
path = path.encode('utf-8')
self._post_messages(path)
self.assertEqual(falcon.HTTP_400, self.srmock.status)
def test_post_with_long_queue_name(self):
# NOTE(kgriffs): This test verifies that routes with
# embedded queue name params go through the validation
# hook, regardless of the target resource.
queues_path = self.url_prefix + '/queues/'
game_title = 'v' * validation.QUEUE_NAME_MAX_LEN
self.addCleanup(
self.simulate_delete, queues_path + game_title,
headers=self.headers)
self._post_messages(queues_path + game_title + '/messages')
self.assertEqual(falcon.HTTP_201, self.srmock.status)
game_title += 'v'
self._post_messages(queues_path + game_title + '/messages')
self.assertEqual(falcon.HTTP_400, self.srmock.status)
def test_post_to_missing_queue(self):
self.addCleanup(
self.simulate_delete, self.url_prefix + '/queues/nonexistent',
headers=self.headers)
self._post_messages(self.url_prefix + '/queues/nonexistent/messages')
self.assertEqual(falcon.HTTP_201, self.srmock.status)
def test_post_using_queue_default_message_ttl(self):
queue_path = self.url_prefix + '/queues/test_queue1'
messages_path = queue_path + '/messages'
doc = '{"_default_message_ttl": 999}'
self.simulate_put(queue_path, body=doc, headers=self.headers)
self.addCleanup(self.simulate_delete, queue_path, headers=self.headers)
sample_messages = {
'messages': [
{'body': {'key': 'value'}},
],
}
sample_doc = jsonutils.dumps(sample_messages)
result = self.simulate_post(messages_path,
body=sample_doc, headers=self.headers)
result_doc = jsonutils.loads(result[0])
href = result_doc['resources'][0]
result = self.simulate_get(href, headers=self.headers)
message = jsonutils.loads(result[0])
self.assertEqual(999, message['ttl'])
def test_post_using_queue_max_messages_post_size(self):
queue_path = self.url_prefix + '/queues/test_queue2'
messages_path = queue_path + '/messages'
doc = '{"_max_messages_post_size": 1023}'
self.simulate_put(queue_path, body=doc, headers=self.headers)
self.addCleanup(self.simulate_delete, queue_path, headers=self.headers)
sample_messages = {
'messages': [
{'body': {'key': 'a' * 1204}},
],
}
sample_doc = jsonutils.dumps(sample_messages)
self.simulate_post(messages_path,
body=sample_doc, headers=self.headers)
self.assertEqual(falcon.HTTP_400, self.srmock.status)
def test_get_from_missing_queue(self):
body = self.simulate_get(self.url_prefix +
'/queues/nonexistent/messages',
headers=self.headers)
self.assertEqual(falcon.HTTP_200, self.srmock.status)
self._empty_message_list(body)
@ddt.data('', '0xdeadbeef', '550893e0-2b6e-11e3-835a-5cf9dd72369')
def test_bad_client_id(self, text_id):
self.simulate_post(self.queue_path + '/messages',
body='{"ttl": 60, "body": ""}',
headers={'Client-ID': text_id})
self.assertEqual(falcon.HTTP_400, self.srmock.status)
self.simulate_get(self.queue_path + '/messages',
query_string='limit=3&echo=true',
headers={'Client-ID': text_id})
self.assertEqual(falcon.HTTP_400, self.srmock.status)
@ddt.data(None, '[', '[]', '{}', '.')
def test_post_bad_message(self, document):
self.simulate_post(self.queue_path + '/messages',
body=document,
headers=self.headers)
self.assertEqual(falcon.HTTP_400, self.srmock.status)
@ddt.data(-1, 59, 1209601)
def test_unacceptable_ttl(self, ttl):
doc = {'messages': [{'ttl': ttl, 'body': None}]}
self.simulate_post(self.queue_path + '/messages',
body=jsonutils.dumps(doc),
headers=self.headers)
self.assertEqual(falcon.HTTP_400, self.srmock.status)
def test_exceeded_message_posting(self):
# Total (raw request) size
doc = {'messages': [{'body': "some body", 'ttl': 100}] * 20}
body = jsonutils.dumps(doc, indent=4)
max_len = self.transport_cfg.max_messages_post_size
long_body = body + (' ' * (max_len - len(body) + 1))
self.simulate_post(self.queue_path + '/messages',
body=long_body,
headers=self.headers)
self.assertEqual(falcon.HTTP_400, self.srmock.status)
@ddt.data('{"overflow": 9223372036854775808}',
'{"underflow": -9223372036854775809}')
def test_unsupported_json(self, document):
self.simulate_post(self.queue_path + '/messages',
body=document,
headers=self.headers)
self.assertEqual(falcon.HTTP_400, self.srmock.status)
def test_delete(self):
self._post_messages(self.messages_path)
msg_id = self._get_msg_id(self.srmock.headers_dict)
target = self.messages_path + '/' + msg_id
self.simulate_get(target, headers=self.headers)
self.assertEqual(falcon.HTTP_200, self.srmock.status)
self.simulate_delete(target, headers=self.headers)
self.assertEqual(falcon.HTTP_204, self.srmock.status)
self.simulate_get(target, headers=self.headers)
self.assertEqual(falcon.HTTP_404, self.srmock.status)
# Safe to delete non-existing ones
self.simulate_delete(target, headers=self.headers)
self.assertEqual(falcon.HTTP_204, self.srmock.status)
def test_bulk_delete(self):
path = self.queue_path + '/messages'
self._post_messages(path, repeat=5)
[target, params] = self.srmock.headers_dict['location'].split('?')
# Deleting the whole collection is denied
self.simulate_delete(path, headers=self.headers)
self.assertEqual(falcon.HTTP_400, self.srmock.status)
self.simulate_delete(target, query_string=params, headers=self.headers)
self.assertEqual(falcon.HTTP_204, self.srmock.status)
self.simulate_get(target, query_string=params, headers=self.headers)
self.assertEqual(falcon.HTTP_404, self.srmock.status)
# Safe to delete non-existing ones
self.simulate_delete(target, query_string=params, headers=self.headers)
self.assertEqual(falcon.HTTP_204, self.srmock.status)
# Even after the queue is gone
self.simulate_delete(self.queue_path, headers=self.headers)
self.assertEqual(falcon.HTTP_204, self.srmock.status)
self.simulate_delete(target, query_string=params, headers=self.headers)
self.assertEqual(falcon.HTTP_204, self.srmock.status)
def test_list(self):
path = self.queue_path + '/messages'
self._post_messages(path, repeat=10)
query_string = 'limit=3&echo=true'
body = self.simulate_get(path,
query_string=query_string,
headers=self.headers)
self.assertEqual(falcon.HTTP_200, self.srmock.status)
cnt = 0
while jsonutils.loads(body[0])['messages'] != []:
contents = jsonutils.loads(body[0])
[target, params] = contents['links'][0]['href'].split('?')
for msg in contents['messages']:
self.simulate_get(msg['href'], headers=self.headers)
self.assertEqual(falcon.HTTP_200, self.srmock.status)
body = self.simulate_get(target,
query_string=params,
headers=self.headers)
cnt += 1
self.assertEqual(4, cnt)
self.assertEqual(falcon.HTTP_200, self.srmock.status)
self._empty_message_list(body)
# Stats
body = self.simulate_get(self.queue_path + '/stats',
headers=self.headers)
self.assertEqual(falcon.HTTP_200, self.srmock.status)
message_stats = jsonutils.loads(body[0])['messages']
# NOTE(kgriffs): The other parts of the stats are tested
# in tests.storage.base and so are not repeated here.
expected_pattern = self.queue_path + '/messages/[^/]+$'
for message_stat_name in ('oldest', 'newest'):
self.assertThat(message_stats[message_stat_name]['href'],
matchers.MatchesRegex(expected_pattern))
# NOTE(kgriffs): Try to get messages for a missing queue
body = self.simulate_get(self.url_prefix +
'/queues/nonexistent/messages',
headers=self.headers)
self.assertEqual(falcon.HTTP_200, self.srmock.status)
self._empty_message_list(body)
def test_list_with_bad_marker(self):
path = self.queue_path + '/messages'
self._post_messages(path, repeat=5)
query_string = 'limit=3&echo=true&marker=sfhlsfdjh2048'
body = self.simulate_get(path,
query_string=query_string,
headers=self.headers)
self.assertEqual(falcon.HTTP_200, self.srmock.status)
self._empty_message_list(body)
def test_no_uuid(self):
headers = {
'Client-ID': "textid",
'X-Project-ID': '7e7e7e'
}
path = self.queue_path + '/messages'
self.simulate_post(path, body='[{"body": 0, "ttl": 100}]',
headers=headers)
self.assertEqual(falcon.HTTP_400, self.srmock.status)
self.simulate_get(path, headers=headers)
self.assertEqual(falcon.HTTP_400, self.srmock.status)
def test_get_claimed_contains_claim_id_in_href(self):
path = self.queue_path
res = self._post_messages(path + '/messages', repeat=5)
for url in jsonutils.loads(res[0])['resources']:
message = self.simulate_get(url)
self.assertNotIn('claim_id', jsonutils.loads(message[0])['href'])
self.simulate_post(path + '/claims',
body='{"ttl": 100, "grace": 100}',
headers=self.headers)
self.assertEqual(falcon.HTTP_201, self.srmock.status)
for url in jsonutils.loads(res[0])['resources']:
message = self.simulate_get(url)
self.assertIn('claim_id', jsonutils.loads(message[0])['href'])
# NOTE(cpp-cabrera): regression test against bug #1210633
def test_when_claim_deleted_then_messages_unclaimed(self):
path = self.queue_path
self._post_messages(path + '/messages', repeat=5)
# post claim
self.simulate_post(path + '/claims',
body='{"ttl": 100, "grace": 100}',
headers=self.headers)
self.assertEqual(falcon.HTTP_201, self.srmock.status)
location = self.srmock.headers_dict['location']
# release claim
self.simulate_delete(location, headers=self.headers)
self.assertEqual(falcon.HTTP_204, self.srmock.status)
# get unclaimed messages
self.simulate_get(path + '/messages',
query_string='echo=true',
headers=self.headers)
self.assertEqual(falcon.HTTP_200, self.srmock.status)
# NOTE(cpp-cabrera): regression test against bug #1203842
def test_get_nonexistent_message_404s(self):
path = self.url_prefix + '/queues/notthere/messages/a'
self.simulate_get(path, headers=self.headers)
self.assertEqual(falcon.HTTP_404, self.srmock.status)
def test_get_multiple_invalid_messages_404s(self):
path = self.url_prefix + '/queues/notthere/messages'
self.simulate_get(path, query_string='ids=a,b,c',
headers=self.headers)
self.assertEqual(falcon.HTTP_404, self.srmock.status)
def test_delete_multiple_invalid_messages_204s(self):
path = self.url_prefix + '/queues/notthere/messages'
self.simulate_delete(path, query_string='ids=a,b,c',
headers=self.headers)
self.assertEqual(falcon.HTTP_204, self.srmock.status)
def test_delete_message_with_invalid_claim_doesnt_delete_message(self):
path = self.queue_path
resp = self._post_messages(path + '/messages', 1)
location = jsonutils.loads(resp[0])['resources'][0]
self.simulate_delete(location, query_string='claim_id=invalid',
headers=self.headers)
self.assertEqual(falcon.HTTP_400, self.srmock.status)
self.simulate_get(location, headers=self.headers)
self.assertEqual(falcon.HTTP_200, self.srmock.status)
def test_no_duplicated_messages_path_in_href(self):
"""Test for bug 1240897."""
path = self.queue_path + '/messages'
self._post_messages(path, repeat=1)
msg_id = self._get_msg_id(self.srmock.headers_dict)
query_string = 'ids=%s' % msg_id
body = self.simulate_get(path,
query_string=query_string,
headers=self.headers)
messages = jsonutils.loads(body[0])
self.assertNotIn(self.queue_path + '/messages/messages',
messages['messages'][0]['href'])
def _post_messages(self, target, repeat=1):
doc = {'messages': [{'body': 239, 'ttl': 300}] * repeat}
body = jsonutils.dumps(doc)
return self.simulate_post(target, body=body, headers=self.headers)
def _get_msg_id(self, headers):
return self._get_msg_ids(headers)[0]
def _get_msg_ids(self, headers):
return headers['location'].rsplit('=', 1)[-1].split(',')
@ddt.data(1, 2, 10)
def test_pop(self, message_count):
self._post_messages(self.messages_path, repeat=message_count)
msg_id = self._get_msg_id(self.srmock.headers_dict)
target = self.messages_path + '/' + msg_id
self.simulate_get(target, self.project_id)
self.assertEqual(falcon.HTTP_200, self.srmock.status)
query_string = 'pop=' + str(message_count)
result = self.simulate_delete(self.messages_path, self.project_id,
query_string=query_string)
self.assertEqual(falcon.HTTP_200, self.srmock.status)
result_doc = jsonutils.loads(result[0])
self.assertEqual(message_count, len(result_doc['messages']))
self.simulate_get(target, self.project_id)
self.assertEqual(falcon.HTTP_404, self.srmock.status)
@ddt.data('', 'pop=1000000', 'pop=10&ids=1', 'pop=-1')
def test_pop_invalid(self, query_string):
self.simulate_delete(self.messages_path, self.project_id,
query_string=query_string)
self.assertEqual(falcon.HTTP_400, self.srmock.status)
def test_pop_empty_queue(self):
query_string = 'pop=1'
result = self.simulate_delete(self.messages_path, self.project_id,
query_string=query_string)
self.assertEqual(falcon.HTTP_200, self.srmock.status)
result_doc = jsonutils.loads(result[0])
self.assertEqual([], result_doc['messages'])
def test_pop_single_message(self):
self._post_messages(self.messages_path, repeat=5)
msg_id = self._get_msg_id(self.srmock.headers_dict)
target = self.messages_path + '/' + msg_id
self.simulate_get(target, self.project_id)
self.assertEqual(falcon.HTTP_200, self.srmock.status)
# Pop Single message from the queue
query_string = 'pop=1'
result = self.simulate_delete(self.messages_path, self.project_id,
query_string=query_string)
self.assertEqual(falcon.HTTP_200, self.srmock.status)
# Get messages from the queue & verify message count
query_string = 'echo=True'
result = self.simulate_get(self.messages_path, self.project_id,
query_string=query_string,
headers=self.headers)
result_doc = jsonutils.loads(result[0])
actual_msg_count = len(result_doc['messages'])
expected_msg_count = 4
self.assertEqual(expected_msg_count, actual_msg_count)
class TestMessagesMongoDBPooled(TestMessagesMongoDB):
config_file = 'wsgi_mongodb_pooled.conf'
# TODO(cpp-cabrera): remove this skipTest once pooled queue
# listing is implemented
def test_list(self):
self.skipTest("Need to implement pooled queue listing.")
class TestMessagesFaultyDriver(base.V2BaseFaulty):
config_file = 'wsgi_faulty.conf'
def test_simple(self):
project_id = 'xyz'
path = self.url_prefix + '/queues/fizbit/messages'
body = '{"messages": [{"body": 239, "ttl": 100}]}'
headers = {
'Client-ID': str(uuid.uuid4()),
'X-Project-ID': project_id
}
self.simulate_post(path,
body=body,
headers=headers)
self.assertEqual(falcon.HTTP_500, self.srmock.status)
self.simulate_get(path,
headers=headers)
self.assertEqual(falcon.HTTP_503, self.srmock.status)
self.simulate_get(path + '/nonexistent', headers=headers)
self.assertEqual(falcon.HTTP_503, self.srmock.status)
self.simulate_delete(path + '/nada', headers=headers)
self.assertEqual(falcon.HTTP_503, self.srmock.status)
|
# coding = utf-8
from appium import webdriver
import yaml
import logging
"""
@Author: Allison Liu
@Date: 07/06/2019
"""
def desired_caps():
with open('../config/xueqiu_caps.yaml', 'r', encoding='utf-8') as f:
data = yaml.load(f)
des_caps = {
'platformName': data['platformName'],
'platformVersion': data['platformVersion'],
'deviceName': data['deviceName'],
'udid': data['udid'],
'appPackage': data['appPackage'],
'appActivity': data['appActivity']
}
logging.info("Start xueqiu app....")
driver = webdriver.Remote("http://"+str(data['ip'])+':'+ str(data['port']) + '/wd/hub',des_caps)
driver.implicitly_wait(8)
return driver
if __name__ == '__main__':
desired_caps() |
'''
Given an array nums of integers, return how many of them contain an even number of digits.
Example 1:
Input: nums = [12,345,2,6,7896]
Output: 2
Explanation:
12 contains 2 digits (even number of digits).
345 contains 3 digits (odd number of digits).
2 contains 1 digit (odd number of digits).
6 contains 1 digit (odd number of digits).
7896 contains 4 digits (even number of digits).
Therefore only 12 and 7896 contain an even number of digits.
Example 2:
Input: nums = [555,901,482,1771]
Output: 1
Explanation:
Only 1771 contains an even number of digits.
'''
class Solution(object):
def findNumbers(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
return len([num for num in nums if len(str(num))%2 == 0]) |
"""
爬取悦音台mv排行榜
使用requests --- bs4 技术
"""
import requests
import bs4
import random
def get_html(url):
try:
r = requests.get(url, timeout=30)
r.raise_for_status
r.encoding = r.apparent_encoding
return r.text
except:
return "Something wrong!"
def get_content(url):
if url[-2:] == "ML":
print("内地排行榜")
elif url[-2:] == "HT":
print("香港排行榜")
elif url[-2:] == "US":
print("欧美排行榜")
elif url[-2:] == "KR":
print("韩国排行榜")
else:
print("日本排行榜")
html = get_html(url)
soup = bs4.BeautifulSoup(html, 'lxml')
li_list = soup.find_all('li', attrs={'name': 'dmvLi'})
for li in li_list:
match = {}
try:
if li.find('h3', class_='desc_score'):
match['分数'] = li.find('h3', class_='desc_score').text
else:
match['分数'] = li.find('h3', class_='asc_score').text
match['排名'] = li.find('div', class_='top_num').text
match['名字'] = li.find('a', class_='mvname').text
match['发布时间'] = li.find('p', class_='c9').text
match['歌手'] = li.find('a', class_='special').text
except:
return "报错!!!"
print(match)
def main():
base_url = "http://vchart.yinyuetai.com/vchart/trends?area="
suffix = ['ML', 'HT', 'US', 'JP', 'KR']
for suff in suffix:
url = base_url + suff
get_content(url)
if __name__ == '__main__':
main()
"""
# 这段代码为反爬虫技术, 通过模拟user-agent来爬取网页源码
def get_agent():
'''
模拟header的user-agent字段,
返回一个随机的user-agent字典类型的键值对
'''
agents = ['Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0;',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.6; rv,2.0.1) Gecko/20100101 Firefox/4.0.1',
'Opera/9.80 (Macintosh; Intel Mac OS X 10.6.8; U; en) Presto/2.8.131 Version/11.11',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_0) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.56 Safari/535.11',
'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; 360SE)']
fakeheader = {}
fakeheader['User-agent'] = agents[random.randint(0, len(agents))]
return fakeheader
def get_proxy():
'''
简答模拟代理池
返回一个字典类型的键值对,
'''
proxy = ["http://116.211.143.11:80",
"http://183.1.86.235:8118",
"http://183.32.88.244:808",
"http://121.40.42.35:9999",
"http://222.94.148.210:808"]
fakepxs = {}
fakepxs['http'] = proxy[random.randint(0, len(proxy))]
return fakepxs
""" |
import requests
HOST = "http://192.168.77.47:3000"
# Create a new case
params = {"engineer_id": "1", "case_name": "sample_case", "elevator_tag": "ELEVATOR_001", "description": "What's wrong?"}
r = requests.post("{}/service/create_case".format(HOST), params)
print(r.text)
# Create problem for the new case
files = {"photo": open("sample.jpg", "rb")}
params = {"case_name": "sample_case", "label": "Cable broken.", "description": "Red cable broken."}
r = requests.post("{}/service/create_problem".format(HOST), params, files=files)
print(r.text)
|
import os
def is_aws_credentials_not_set():
return not (
"AWS_ACCESS_KEY_ID" in os.environ and
"AWS_SECRET_ACCESS_KEY" in os.environ and
"AWS_DEFAULT_REGION" in os.environ and
"AWS_ROLE" in os.environ
)
|
from temboo.Library.SendGrid.WebAPI.InvalidEmails.DeleteInvalidAddress import DeleteInvalidAddress, DeleteInvalidAddressInputSet, DeleteInvalidAddressResultSet, DeleteInvalidAddressChoreographyExecution
from temboo.Library.SendGrid.WebAPI.InvalidEmails.RetrieveInvalidEmails import RetrieveInvalidEmails, RetrieveInvalidEmailsInputSet, RetrieveInvalidEmailsResultSet, RetrieveInvalidEmailsChoreographyExecution
|
from unittest import TestCase
from obihai_helper import ObihaiHelper
class TestObihaiHelper(TestCase):
def test_add_caller_to_history(self):
config_folder = "./"
self.helper = ObihaiHelper(config_folder)
self.helper.add_caller_to_history("port2", "11234567891", "John Smith", 1, "12:23:45 06/21")
self.helper.add_caller_to_history("port2", "11234567890", "John Smith", 1, "12:23:45 06/22")
self.helper.add_caller_to_history("port2", "11234567890", "John Smith", 1, "12:23:45 06/23")
self.helper.add_caller_to_history("port2", "11234567890", "John Smith", 1, "12:23:45 06/24")
class TestObihaiHelper(TestCase):
def test_get_caller_name_from_opencnam(self):
config_folder = "./"
self.helper = ObihaiHelper(config_folder)
self.helper.get_caller_name_from_opencnam("11234567890")
|
# 序号
# 方法及描述
# 1
# os.access(path, mode)
# 检验权限模式
# 2
# os.chdir(path)
#
# 改变当前工作目录
# 3
# os.chflags(path, flags)
#
# 设置路径的标记为数字标记。
# 4
# os.chmod(path, mode)
#
# 更改权限
# 5
# os.chown(path, uid, gid)
#
# 更改文件所有者
# 6
# os.chroot(path)
#
# 改变当前进程的根目录
# 7
# os.close(fd)
#
# 关闭文件描述符 fd
# 8
# os.closerange(fd_low, fd_high)
#
# 关闭所有文件描述符,从 fd_low (包含) 到 fd_high (不包含), 错误会忽略
# 9
# os.dup(fd)
#
# 复制文件描述符 fd
# 10
# os.dup2(fd, fd2)
#
# 将一个文件描述符 fd 复制到另一个 fd2
# 11
# os.fchdir(fd)
#
# 通过文件描述符改变当前工作目录
# 12
# os.fchmod(fd, mode)
#
# 改变一个文件的访问权限,该文件由参数fd指定,参数mode是Unix下的文件访问权限。
# 13
# os.fchown(fd, uid, gid)
#
# 修改一个文件的所有权,这个函数修改一个文件的用户ID和用户组ID,该文件由文件描述符fd指定。
# 14
# os.fdatasync(fd)
#
# 强制将文件写入磁盘,该文件由文件描述符fd指定,但是不强制更新文件的状态信息。
# 15
# os.fdopen(fd[, mode[, bufsize]])
#
# 通过文件描述符 fd 创建一个文件对象,并返回这个文件对象
# 16
# os.fpathconf(fd, name)
#
# 返回一个打开的文件的系统配置信息。name为检索的系统配置的值,它也许是一个定义系统值的字符串,这些名字在很多标准中指定(POSIX.1, Unix 95, Unix 98, 和其它)。
# 17
# os.fstat(fd)
#
# 返回文件描述符fd的状态,像stat()。
# 18
# os.fstatvfs(fd)
#
# 返回包含文件描述符fd的文件的文件系统的信息,Python 3.3 相等于 statvfs()。
# 19
# os.fsync(fd)
#
# 强制将文件描述符为fd的文件写入硬盘。
# 20
# os.ftruncate(fd, length)
#
# 裁剪文件描述符fd对应的文件, 所以它最大不能超过文件大小。
# 21
# os.getcwd()
#
# 返回当前工作目录
# 22
# os.getcwdu()
#
# 返回一个当前工作目录的Unicode对象
# 23
# os.isatty(fd)
#
# 如果文件描述符fd是打开的,同时与tty(-like)设备相连,则返回true, 否则False。
# 24
# os.lchflags(path, flags)
#
# 设置路径的标记为数字标记,类似 chflags(),但是没有软链接
# 25
# os.lchmod(path, mode)
#
# 修改连接文件权限
# 26
# os.lchown(path, uid, gid)
#
# 更改文件所有者,类似 chown,但是不追踪链接。
# 27
# os.link(src, dst)
#
# 创建硬链接,名为参数 dst,指向参数 src
# 28
# os.listdir(path)
#
# 返回path指定的文件夹包含的文件或文件夹的名字的列表。
# 29
# os.lseek(fd, pos, how)
#
# 设置文件描述符 fd当前位置为pos, how方式修改: SEEK_SET 或者 0 设置从文件开始的计算的pos; SEEK_CUR或者 1 则从当前位置计算; os.SEEK_END或者2则从文件尾部开始. 在unix,Windows中有效
# 30
# os.lstat(path)
#
# 像stat(),但是没有软链接
# 31
# os.major(device)
#
# 从原始的设备号中提取设备major号码 (使用stat中的st_dev或者st_rdev field)。
# 32
# os.makedev(major, minor)
#
# 以major和minor设备号组成一个原始设备号
# 33
# os.makedirs(path[, mode])
#
# 递归文件夹创建函数。像mkdir(), 但创建的所有intermediate-level文件夹需要包含子文件夹。
# 34
# os.minor(device)
#
# 从原始的设备号中提取设备minor号码 (使用stat中的st_dev或者st_rdev field )。
# 35
# os.mkdir(path[, mode])
#
# 以数字mode的mode创建一个名为path的文件夹.默认的 mode 是 0777 (八进制)。
# 36
# os.mkfifo(path[, mode])
#
# 创建命名管道,mode 为数字,默认为 0666 (八进制)
# 37
# os.mknod(filename[, mode=0600, device])
# 创建一个名为filename文件系统节点(文件,设备特别文件或者命名pipe)。
# 38
# os.open(file, flags[, mode])
#
# 打开一个文件,并且设置需要的打开选项,mode参数是可选的
# 39
# os.openpty()
#
# 打开一个新的伪终端对。返回 pty 和 tty的文件描述符。
# 40
# os.pathconf(path, name)
#
# 返回相关文件的系统配置信息。
# 41
# os.pipe()
#
# 创建一个管道. 返回一对文件描述符(r, w) 分别为读和写
# 42
# os.popen(command[, mode[, bufsize]])
#
# 从一个 command 打开一个管道
# 43
# os.read(fd, n)
#
# 从文件描述符 fd 中读取最多 n 个字节,返回包含读取字节的字符串,文件描述符 fd对应文件已达到结尾, 返回一个空字符串。
# 44
# os.readlink(path)
#
# 返回软链接所指向的文件
# 45
# os.remove(path)
#
# 删除路径为path的文件。如果path 是一个文件夹,将抛出OSError; 查看下面的rmdir()删除一个 directory。
# 46
# os.removedirs(path)
#
# 递归删除目录。
# 47
# os.rename(src, dst)
#
# 重命名文件或目录,从 src 到 dst
# 48
# os.renames(old, new)
#
# 递归地对目录进行更名,也可以对文件进行更名。
# 49
# os.rmdir(path)
#
# 删除path指定的空目录,如果目录非空,则抛出一个OSError异常。
# 50
# os.stat(path)
#
# 获取path指定的路径的信息,功能等同于C API中的stat()系统调用。
# 51
# os.stat_float_times([newvalue])
# 决定stat_result是否以float对象显示时间戳
# 52
# os.statvfs(path)
#
# 获取指定路径的文件系统统计信息
# 53
# os.symlink(src, dst)
#
# 创建一个软链接
# 54
# os.tcgetpgrp(fd)
#
# 返回与终端fd(一个由os.open()返回的打开的文件描述符)关联的进程组
# 55
# os.tcsetpgrp(fd, pg)
#
# 设置与终端fd(一个由os.open()返回的打开的文件描述符)关联的进程组为pg。
# 56
# os.tempnam([dir[, prefix]])
#
# Python3 中已删除。返回唯一的路径名用于创建临时文件。
# 57
# os.tmpfile()
#
# Python3 中已删除。返回一个打开的模式为(w+b)的文件对象 .这文件对象没有文件夹入口,没有文件描述符,将会自动删除。
# 58
# os.tmpnam()
#
# Python3 中已删除。为创建一个临时文件返回一个唯一的路径
# 59
# os.ttyname(fd)
#
# 返回一个字符串,它表示与文件描述符fd 关联的终端设备。如果fd 没有与终端设备关联,则引发一个异常。
# 60
# os.unlink(path)
#
# 删除文件路径
# 61
# os.utime(path, times)
#
# 返回指定的path文件的访问和修改的时间。
# 62
# os.walk(top[, topdown=True[, onerror=None[, followlinks=False]]])
#
# 输出在文件夹中的文件名通过在树中游走,向上或者向下。
# 63
# os.write(fd, str)
#
# 写入字符串到文件描述符 fd中. 返回实际写入的字符串长度
# 64
# os.path 模块
#
# 获取文件的属性信息。
# 65
# os.pardir()
#
# 获取当前目录的父目录,以字符串形式显示目录名。
import os
# 在当前路径创建haha目录 存在就无法创建
os.mkdir('haha')
# 返回当前目录
os.getcwd()
|
from redwind import db
from redwind.models import Venue
db.create_all()
db.engine.execute(
'ALTER TABLE location ADD COLUMN venue_id INTEGER REFERENCES venue(id)')
db.engine.execute(
'ALTER TABLE post ADD COLUMN venue_id INTEGER REFERENCES venue(id)')
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import re
import sys
import traceback
import os
import threading
import time
import socket
import select
descriptors = list()
Desc_Skel = {}
_Worker_Thread = None
_Lock = threading.Lock() # synchronization lock
Debug = False
def dprint(f, *v):
if Debug:
print >>sys.stderr, "iface: " + f % v
def floatable(str):
try:
float(str)
return True
except:
return False
class UpdateMetricThread(threading.Thread):
def __init__(self, params):
threading.Thread.__init__(self)
self.running = False
self.shuttingdown = False
self.refresh_rate = params["refresh_rate"]
self.mp = params["metrix_prefix"]
self.metric = {}
self.last_metric = {}
def shutdown(self):
self.shuttingdown = True
if not self.running:
return
self.join()
def run(self):
self.running = True
while not self.shuttingdown:
_Lock.acquire()
updated = self.update_metric()
_Lock.release()
if not updated:
time.sleep(0.2)
else:
if "time" in self.last_metric:
dprint("metric delta period %.3f" % (self.metric['time'] - self.last_metric['time']))
self.running = False
def update_metric(self):
if "time" in self.metric:
if (time.time() - self.metric['time']) < self.refresh_rate:
return False
dprint("updating metrics")
self.last_metric = self.metric.copy()
try:
f = open('/proc/net/dev', 'r')
except IOError:
dprint("unable to open /proc/net/dev")
return False
for line in f:
if re.search(':', line):
tokens = re.split('[:\s]+', line.strip())
iface = tokens[0].strip(':')
self.metric.update({
'time' : time.time(),
'%s_%s_%s' % (self.mp, iface, 'rx_bytes') : int(tokens[1]),
'%s_%s_%s' % (self.mp, iface, 'rx_packets') : int(tokens[2]),
'%s_%s_%s' % (self.mp, iface, 'rx_errs') : int(tokens[3]),
'%s_%s_%s' % (self.mp, iface, 'rx_drop') : int(tokens[4]),
'%s_%s_%s' % (self.mp, iface, 'rx_fifo') : int(tokens[5]),
'%s_%s_%s' % (self.mp, iface, 'rx_frame') : int(tokens[6]),
'%s_%s_%s' % (self.mp, iface, 'rx_compressed') : int(tokens[7]),
'%s_%s_%s' % (self.mp, iface, 'rx_multicast') : int(tokens[8]),
'%s_%s_%s' % (self.mp, iface, 'tx_bytes') : int(tokens[9]),
'%s_%s_%s' % (self.mp, iface, 'tx_packets') : int(tokens[10]),
'%s_%s_%s' % (self.mp, iface, 'tx_errs') : int(tokens[11]),
'%s_%s_%s' % (self.mp, iface, 'tx_drop') : int(tokens[12]),
'%s_%s_%s' % (self.mp, iface, 'tx_fifo') : int(tokens[13]),
'%s_%s_%s' % (self.mp, iface, 'tx_frame') : int(tokens[14]),
'%s_%s_%s' % (self.mp, iface, 'tx_compressed') : int(tokens[15]),
'%s_%s_%s' % (self.mp, iface, 'tx_multicast') : int(tokens[16]),
})
return True
def metric_delta(self, name):
val = 0
if name in self.metric and name in self.last_metric:
_Lock.acquire()
if self.metric['time'] - self.last_metric['time'] != 0:
val = (self.metric[name] - self.last_metric[name]) / (self.metric['time'] - self.last_metric['time'])
_Lock.release()
return float(val)
def metric_init(params):
global descriptors, Desc_Skel, _Worker_Thread, Debug
# initialize skeleton of descriptors
Desc_Skel = {
'name' : 'XXX',
'call_back' : metric_delta,
'time_max' : 60,
'value_type' : 'float',
'format' : '%.0f',
'units' : 'XXX',
'slope' : 'XXX', # zero|positive|negative|both
'description' : 'XXX',
'groups' : 'network'
}
params["refresh_rate"] = params["refresh_rate"] if "refresh_rate" in params else 15
params["metrix_prefix"] = params["metrix_prefix"] if "metrix_prefix" in params else "iface"
Debug = params["debug"] if "debug" in params else False
dprint("debugging has been turned on")
_Worker_Thread = UpdateMetricThread(params)
_Worker_Thread.start()
mp = params["metrix_prefix"]
try:
f = open("/proc/net/dev", 'r')
except IOError:
return
for line in f:
if re.search(':', line):
tokens = re.split('[:\s]+', line.strip())
iface = tokens[0].strip(':')
for way in ('tx', 'rx'):
descriptors.append(create_desc(Desc_Skel, {
"name" : '%s_%s_%s_%s' % (mp, iface, way, 'bytes'),
"units" : "bytes/s",
"slope" : "both",
"description": 'Interface %s %s bytes per seconds' % (iface, way.upper())
}))
descriptors.append(create_desc(Desc_Skel, {
"name" : '%s_%s_%s_%s' % (mp, iface, way, 'packets'),
"units" : "packets/s",
"slope" : "both",
"description": 'Interface %s %s packets per seconds' % (iface, way.upper())
}))
descriptors.append(create_desc(Desc_Skel, {
"name" : '%s_%s_%s_%s' % (mp, iface, way, 'errs'),
"units" : "errs/s",
"slope" : "both",
"description": 'Interface %s %s errors per seconds' % (iface, way.upper())
}))
descriptors.append(create_desc(Desc_Skel, {
"name" : '%s_%s_%s_%s' % (mp, iface, way, 'drop'),
"units" : "drop/s",
"slope" : "both",
"description": 'Interface %s %s drop per seconds' % (iface, way.upper())
}))
descriptors.append(create_desc(Desc_Skel, {
"name" : '%s_%s_%s_%s' % (mp, iface, way, 'fifo'),
"units" : "fifo/s",
"slope" : "both",
"description": 'Interface %s %s fifo per seconds' % (iface, way.upper())
}))
descriptors.append(create_desc(Desc_Skel, {
"name" : '%s_%s_%s_%s' % (mp, iface, way, 'frame'),
"units" : "frame/s",
"slope" : "both",
"description": 'Interface %s %s frame per seconds' % (iface, way.upper())
}))
descriptors.append(create_desc(Desc_Skel, {
"name" : '%s_%s_%s_%s' % (mp, iface, way, 'compressed'),
"units" : "compressed/s",
"slope" : "both",
"description": 'Interface %s %s compressed per seconds' % (iface, way.upper())
}))
descriptors.append(create_desc(Desc_Skel, {
"name" : '%s_%s_%s_%s' % (mp, iface, way, 'multicast'),
"units" : "multicast/s",
"slope" : "both",
"description": 'Interface %s %s multicast per seconds' % (iface, way.upper())
}))
return descriptors
def create_desc(skel, prop):
d = skel.copy()
for k,v in prop.iteritems():
d[k] = v
return d
def metric_delta(name):
return _Worker_Thread.metric_delta(name)
def metric_cleanup():
_Worker_Thread.shutdown()
if __name__ == '__main__':
params = {
"debug" : True,
"refresh_rate" : 15
}
try:
metric_init(params)
while True:
time.sleep(params['refresh_rate'])
for d in descriptors:
v = d['call_back'](d['name'])
print ('value for %s is ' + d['format']) % (d['name'], v)
except KeyboardInterrupt:
time.sleep(0.2)
os._exit(1)
except:
traceback.print_exc()
os._exit(1)
|
from conans import ConanFile, CMake
import os
# This easily allows to copy the package in other user or channel
channel = os.getenv("CONAN_CHANNEL", "testing")
username = os.getenv("CONAN_USERNAME", "demo")
class HelloReuseConan(ConanFile):
settings = "os", "compiler", "build_type", "arch"
requires = ("hello/0.1@%s/%s" % (username, channel),
"gtest/1.8.0@lasote/stable")
generators = "cmake"
default_options = "gtest:shared=False"
def build(self):
# For following issue https://github.com/conan-io/conan/issues/475
if (self.settings.compiler == "Visual Studio" and
self.settings.build_type == "Debug" and
not self.settings.compiler.runtime.value.endswith("d")):
self.settings.compiler.runtime.value += "d"
cmake = CMake(self.settings)
self.run('cmake "%s" %s' % (self.conanfile_directory, cmake.command_line))
self.run("cmake --build . %s" % cmake.build_config)
def test(self):
# equal to ./bin/greet, but portable win: .\bin\greet
self.run(os.sep.join([".","bin", "test_hello"]))
|
import numpy as np
from scipy.linalg import expm
import leg_controllers.hopper as hopper
import leg_controllers.model as model
A = np.array([
[0., 1., 0.],
[-hopper.omega**2, 0., -model.g],
[0.,0.,0.]
])
def reference(E,y0,t):
# calculate initial velocity from E,y0
v0 = -np.sqrt(2*(E-.5*(hopper.omega**2)*(y0**2)-model.g*y0))
x0 = np.array([y0,v0,1.])
return np.array([(expm(A*t)@x0)[0] for t in t])
|
from fastapi import APIRouter
from . import auth, challenges, roles, users
router = APIRouter(prefix="/v1")
router.include_router(auth.router)
router.include_router(challenges.router)
router.include_router(roles.router)
router.include_router(users.router)
|
#!/usr/bin/python
"""
Examples for how to use cluster.py
"""
import argparse
import os.path
import time
import yaml
import pylib.mps.cluster.packages as packages
def push(p, packages, args):
p.push(args.local_root, args.package, args.version)
def f_import(p, packages, args):
if '' != args.location:
src = args.location
else:
src = packages[args.package]['src']
version = p.f_import(src, args.package, args.version)
print(version)
def list(p, packages, args):
versions = p.get_versions(args.package)
current = p.get_current(args.package)
maxlen = 0
for v in versions:
if maxlen < len(v):
maxlen = len(v)
for v in versions:
if args.pretty:
t, Null, ver = v.partition('_')
print("%s %s %s %s" % ('*' if current == v else ' ',
v + (maxlen-len(v)) * ' ', time.asctime(time.localtime(int(t))), ver))
else:
print(v)
def remove(p, packages, args):
p.remove(args.package, args.version)
def stop(p, packages, args):
p.stop(args.package)
def start(p, packages, args):
p.start(args.package)
def activate(p, packages, args):
p.activate(args.package, args.version)
def get_current(p, packages, args):
current = p.get_current(args.package)
if current:
print(current)
def set_current(p, packages, args):
p.set_current(args.package, args.version)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description = 'Cluster Library')
parser.add_argument('--dry-run', action = 'store_true')
parser.add_argument('--host', required = True)
parser.add_argument('--package_def',
default = os.path.join(os.path.dirname(__file__), 'packages.yaml'))
parser.add_argument('-u', '--user', default = 'walle')
parser.add_argument('-i', '--key',
default = '/home/share/aws/r77_aws_keypair')
parser.add_argument('-r', '--root', default = '/home/share/repo')
subparsers = parser.add_subparsers(help = "Sub Commands:")
p = subparsers.add_parser('import', help = 'import package to repository')
p.add_argument('-p', '--package', required = True)
p.add_argument('-v', '--version', default = '')
p.add_argument('--location', default = '')
p.set_defaults(func = f_import)
p = subparsers.add_parser('push', help = 'copy package to the remote node')
p.add_argument('-p', '--package', required = True)
p.add_argument('-v', '--version', default = '')
p.add_argument('--local-root', default = '/home/share/repo')
p.set_defaults(func = push)
p = subparsers.add_parser('list', help = 'list all versions for a given package')
p.add_argument('-p', '--package', required = True)
p.add_argument('--pretty', dest = 'pretty', action = 'store_true',
default = False)
p.set_defaults(func = list)
p = subparsers.add_parser('remove', help = 'delete version of the package')
p.add_argument('-p', '--package', required = True)
p.add_argument('-v', '--version', required = True)
p.set_defaults(func = remove)
p = subparsers.add_parser('stop', help = 'stop running package')
p.add_argument('-p', '--package', required = True)
p.set_defaults(func = stop)
p = subparsers.add_parser('start', help = 'start running package')
p.add_argument('-p', '--package', required = True)
p.set_defaults(func = start)
p = subparsers.add_parser('activate', help = 'stop, set_current, start in one command')
p.add_argument('-p', '--package', required = True)
p.add_argument('-v', '--version', required = True)
p.set_defaults(func = activate)
p = subparsers.add_parser('get_current', help = 'get current version of the package')
p.add_argument('-p', '--package', required = True)
p.set_defaults(func = get_current)
p = subparsers.add_parser('set_current', help = "change 'current' symlink")
p.add_argument('-p', '--package', required = True)
p.add_argument('-v', '--version', required = True)
p.set_defaults(func = set_current)
args = parser.parse_args()
pkg = packages.Packages(args.host, user = args.user,
root = args.root, key = args.key, dry_run = args.dry_run)
packages = {}
if os.path.isfile(args.package_def):
packages = yaml.safe_load(file(args.package_def, 'r'))
args.func(pkg, packages, args)
|
#!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
# Copyright 2019-2020 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_wireless_controller_wids_profile
short_description: Configure wireless intrusion detection system (WIDS) profiles in Fortinet's FortiOS and FortiGate.
description:
- This module is able to configure a FortiGate or FortiOS (FOS) device by allowing the
user to set and modify wireless_controller feature and wids_profile category.
Examples include all parameters and values need to be adjusted to datasources before usage.
Tested with FOS v6.0.0
version_added: "2.10"
author:
- Link Zheng (@chillancezen)
- Jie Xue (@JieX19)
- Hongbin Lu (@fgtdev-hblu)
- Frank Shen (@frankshen01)
- Miguel Angel Munoz (@mamunozgonzalez)
- Nicolas Thomas (@thomnico)
notes:
- Legacy fortiosapi has been deprecated, httpapi is the preferred way to run playbooks
requirements:
- ansible>=2.9.0
options:
access_token:
description:
- Token-based authentication.
Generated from GUI of Fortigate.
type: str
required: false
enable_log:
description:
- Enable/Disable logging for task.
type: bool
required: false
default: false
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
type: str
default: root
member_path:
type: str
description:
- Member attribute path to operate on.
- Delimited by a slash character if there are more than one attribute.
- Parameter marked with member_path is legitimate for doing member operation.
member_state:
type: str
description:
- Add or delete a member under specified attribute path.
- When member_state is specified, the state option is ignored.
choices:
- present
- absent
state:
description:
- Indicates whether to create or remove the object.
type: str
required: true
choices:
- present
- absent
wireless_controller_wids_profile:
description:
- Configure wireless intrusion detection system (WIDS) profiles.
default: null
type: dict
suboptions:
ap_auto_suppress:
description:
- Enable/disable on-wire rogue AP auto-suppression .
type: str
choices:
- enable
- disable
ap_bgscan_disable_day:
description:
- Optionally turn off scanning for one or more days of the week. Separate the days with a space. By default, no days are set.
type: str
choices:
- sunday
- monday
- tuesday
- wednesday
- thursday
- friday
- saturday
ap_bgscan_disable_end:
description:
- 'End time, using a 24-hour clock in the format of hh:mm, for disabling background scanning .'
type: str
ap_bgscan_disable_schedules:
description:
- Firewall schedules for turning off FortiAP radio background scan. Background scan will be disabled when at least one of the schedules is
valid. Separate multiple schedule names with a space.
type: list
suboptions:
name:
description:
- Schedule name. Source firewall.schedule.group.name firewall.schedule.recurring.name firewall.schedule.onetime.name.
required: true
type: str
ap_bgscan_disable_start:
description:
- 'Start time, using a 24-hour clock in the format of hh:mm, for disabling background scanning .'
type: str
ap_bgscan_duration:
description:
- Listening time on a scanning channel (10 - 1000 msec).
type: int
ap_bgscan_idle:
description:
- Waiting time for channel inactivity before scanning this channel (0 - 1000 msec).
type: int
ap_bgscan_intv:
description:
- Period of time between scanning two channels (1 - 600 sec).
type: int
ap_bgscan_period:
description:
- Period of time between background scans (60 - 3600 sec).
type: int
ap_bgscan_report_intv:
description:
- Period of time between background scan reports (15 - 600 sec).
type: int
ap_fgscan_report_intv:
description:
- Period of time between foreground scan reports (15 - 600 sec).
type: int
ap_scan:
description:
- Enable/disable rogue AP detection.
type: str
choices:
- disable
- enable
ap_scan_passive:
description:
- Enable/disable passive scanning. Enable means do not send probe request on any channels .
type: str
choices:
- enable
- disable
ap_scan_threshold:
description:
- Minimum signal level/threshold in dBm required for the AP to report detected rogue AP (-95 to -20).
type: str
asleap_attack:
description:
- Enable/disable asleap attack detection .
type: str
choices:
- enable
- disable
assoc_flood_thresh:
description:
- The threshold value for association frame flooding.
type: int
assoc_flood_time:
description:
- Number of seconds after which a station is considered not connected.
type: int
assoc_frame_flood:
description:
- Enable/disable association frame flooding detection .
type: str
choices:
- enable
- disable
auth_flood_thresh:
description:
- The threshold value for authentication frame flooding.
type: int
auth_flood_time:
description:
- Number of seconds after which a station is considered not connected.
type: int
auth_frame_flood:
description:
- Enable/disable authentication frame flooding detection .
type: str
choices:
- enable
- disable
comment:
description:
- Comment.
type: str
deauth_broadcast:
description:
- Enable/disable broadcasting de-authentication detection .
type: str
choices:
- enable
- disable
deauth_unknown_src_thresh:
description:
- 'Threshold value per second to deauth unknown src for DoS attack (0: no limit).'
type: int
eapol_fail_flood:
description:
- Enable/disable EAPOL-Failure flooding (to AP) detection .
type: str
choices:
- enable
- disable
eapol_fail_intv:
description:
- The detection interval for EAPOL-Failure flooding (1 - 3600 sec).
type: int
eapol_fail_thresh:
description:
- The threshold value for EAPOL-Failure flooding in specified interval.
type: int
eapol_logoff_flood:
description:
- Enable/disable EAPOL-Logoff flooding (to AP) detection .
type: str
choices:
- enable
- disable
eapol_logoff_intv:
description:
- The detection interval for EAPOL-Logoff flooding (1 - 3600 sec).
type: int
eapol_logoff_thresh:
description:
- The threshold value for EAPOL-Logoff flooding in specified interval.
type: int
eapol_pre_fail_flood:
description:
- Enable/disable premature EAPOL-Failure flooding (to STA) detection .
type: str
choices:
- enable
- disable
eapol_pre_fail_intv:
description:
- The detection interval for premature EAPOL-Failure flooding (1 - 3600 sec).
type: int
eapol_pre_fail_thresh:
description:
- The threshold value for premature EAPOL-Failure flooding in specified interval.
type: int
eapol_pre_succ_flood:
description:
- Enable/disable premature EAPOL-Success flooding (to STA) detection .
type: str
choices:
- enable
- disable
eapol_pre_succ_intv:
description:
- The detection interval for premature EAPOL-Success flooding (1 - 3600 sec).
type: int
eapol_pre_succ_thresh:
description:
- The threshold value for premature EAPOL-Success flooding in specified interval.
type: int
eapol_start_flood:
description:
- Enable/disable EAPOL-Start flooding (to AP) detection .
type: str
choices:
- enable
- disable
eapol_start_intv:
description:
- The detection interval for EAPOL-Start flooding (1 - 3600 sec).
type: int
eapol_start_thresh:
description:
- The threshold value for EAPOL-Start flooding in specified interval.
type: int
eapol_succ_flood:
description:
- Enable/disable EAPOL-Success flooding (to AP) detection .
type: str
choices:
- enable
- disable
eapol_succ_intv:
description:
- The detection interval for EAPOL-Success flooding (1 - 3600 sec).
type: int
eapol_succ_thresh:
description:
- The threshold value for EAPOL-Success flooding in specified interval.
type: int
invalid_mac_oui:
description:
- Enable/disable invalid MAC OUI detection.
type: str
choices:
- enable
- disable
long_duration_attack:
description:
- Enable/disable long duration attack detection based on user configured threshold .
type: str
choices:
- enable
- disable
long_duration_thresh:
description:
- Threshold value for long duration attack detection (1000 - 32767 usec).
type: int
name:
description:
- WIDS profile name.
required: true
type: str
null_ssid_probe_resp:
description:
- Enable/disable null SSID probe response detection .
type: str
choices:
- enable
- disable
sensor_mode:
description:
- Scan WiFi nearby stations .
type: str
choices:
- disable
- foreign
- both
spoofed_deauth:
description:
- Enable/disable spoofed de-authentication attack detection .
type: str
choices:
- enable
- disable
weak_wep_iv:
description:
- Enable/disable weak WEP IV (Initialization Vector) detection .
type: str
choices:
- enable
- disable
wireless_bridge:
description:
- Enable/disable wireless bridge detection .
type: str
choices:
- enable
- disable
'''
EXAMPLES = '''
- collections:
- fortinet.fortios
connection: httpapi
hosts: fortigate01
vars:
ansible_httpapi_port: 443
ansible_httpapi_use_ssl: true
ansible_httpapi_validate_certs: false
vdom: root
tasks:
- name: fortios_wireless_controller_wids_profile
fortios_wireless_controller_wids_profile:
vdom: root
state: present
wireless_controller_wids_profile:
ap_auto_suppress: disable
ap_bgscan_duration: 20
ap_bgscan_idle: 0
ap_bgscan_intv: 1
ap_bgscan_period: 600
ap_bgscan_report_intv: 30
ap_fgscan_report_intv: 15
ap_scan: disable
ap_scan_passive: disable
ap_scan_threshold: '-90'
asleap_attack: disable
assoc_flood_thresh: 30
assoc_flood_time: 10
assoc_frame_flood: disable
auth_flood_thresh: 30
auth_flood_time: 10
auth_frame_flood: disable
deauth_broadcast: disable
deauth_unknown_src_thresh: 10
eapol_fail_flood: disable
eapol_fail_intv: 1
eapol_fail_thresh: 10
eapol_logoff_flood: disable
eapol_logoff_intv: 1
eapol_logoff_thresh: 10
eapol_pre_fail_flood: disable
eapol_pre_fail_intv: 1
eapol_pre_fail_thresh: 10
eapol_pre_succ_flood: disable
eapol_pre_succ_intv: 1
eapol_pre_succ_thresh: 10
eapol_start_flood: disable
eapol_start_intv: 1
eapol_start_thresh: 10
eapol_succ_flood: disable
eapol_succ_intv: 1
eapol_succ_thresh: 10
invalid_mac_oui: disable
long_duration_attack: disable
long_duration_thresh: 8200
name: terr-test-rdmstr
null_ssid_probe_resp: disable
sensor_mode: disable
spoofed_deauth: disable
weak_wep_iv: disable
wireless_bridge: disable
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: str
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: str
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: str
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: str
sample: "id"
name:
description: Name of the table used to fulfill the request
returned: always
type: str
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: str
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: str
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: str
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: str
sample: "success"
vdom:
description: Virtual domain used
returned: always
type: str
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: str
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible_collections.fortinet.fortios.plugins.module_utils.fortios.fortios import FortiOSHandler
from ansible_collections.fortinet.fortios.plugins.module_utils.fortios.fortios import check_legacy_fortiosapi
from ansible_collections.fortinet.fortios.plugins.module_utils.fortios.fortios import schema_to_module_spec
from ansible_collections.fortinet.fortios.plugins.module_utils.fortios.fortios import check_schema_versioning
from ansible_collections.fortinet.fortios.plugins.module_utils.fortimanager.common import FAIL_SOCKET_MSG
from ansible_collections.fortinet.fortios.plugins.module_utils.fortios.comparison import is_same_comparison
from ansible_collections.fortinet.fortios.plugins.module_utils.fortios.comparison import serialize
def filter_wireless_controller_wids_profile_data(json):
option_list = ['ap_auto_suppress', 'ap_bgscan_disable_day', 'ap_bgscan_disable_end',
'ap_bgscan_disable_schedules', 'ap_bgscan_disable_start', 'ap_bgscan_duration',
'ap_bgscan_idle', 'ap_bgscan_intv', 'ap_bgscan_period',
'ap_bgscan_report_intv', 'ap_fgscan_report_intv', 'ap_scan',
'ap_scan_passive', 'ap_scan_threshold', 'asleap_attack',
'assoc_flood_thresh', 'assoc_flood_time', 'assoc_frame_flood',
'auth_flood_thresh', 'auth_flood_time', 'auth_frame_flood',
'comment', 'deauth_broadcast', 'deauth_unknown_src_thresh',
'eapol_fail_flood', 'eapol_fail_intv', 'eapol_fail_thresh',
'eapol_logoff_flood', 'eapol_logoff_intv', 'eapol_logoff_thresh',
'eapol_pre_fail_flood', 'eapol_pre_fail_intv', 'eapol_pre_fail_thresh',
'eapol_pre_succ_flood', 'eapol_pre_succ_intv', 'eapol_pre_succ_thresh',
'eapol_start_flood', 'eapol_start_intv', 'eapol_start_thresh',
'eapol_succ_flood', 'eapol_succ_intv', 'eapol_succ_thresh',
'invalid_mac_oui', 'long_duration_attack', 'long_duration_thresh',
'name', 'null_ssid_probe_resp', 'sensor_mode',
'spoofed_deauth', 'weak_wep_iv', 'wireless_bridge']
dictionary = {}
for attribute in option_list:
if attribute in json and json[attribute] is not None:
dictionary[attribute] = json[attribute]
return dictionary
def underscore_to_hyphen(data):
if isinstance(data, list):
for i, elem in enumerate(data):
data[i] = underscore_to_hyphen(elem)
elif isinstance(data, dict):
new_data = {}
for k, v in data.items():
new_data[k.replace('_', '-')] = underscore_to_hyphen(v)
data = new_data
return data
def wireless_controller_wids_profile(data, fos, check_mode=False):
vdom = data['vdom']
state = data['state']
wireless_controller_wids_profile_data = data['wireless_controller_wids_profile']
filtered_data = underscore_to_hyphen(filter_wireless_controller_wids_profile_data(wireless_controller_wids_profile_data))
# check_mode starts from here
if check_mode:
mkey = fos.get_mkey('wireless_controller', 'wids_profile', filtered_data, vdom=vdom)
current_data = fos.get('wireless_controller', 'wids_profile', vdom=vdom, mkey=mkey)
is_existed = current_data and current_data.get('http_status') == 200 \
and isinstance(current_data.get('results'), list) \
and len(current_data['results']) > 0
# 2. if it exists and the state is 'present' then compare current settings with desired
if state == 'present' or state is True:
if mkey is None:
return False, True, filtered_data
# if mkey exists then compare each other
# record exits and they're matched or not
if is_existed:
is_same = is_same_comparison(
serialize(current_data['results'][0]), serialize(filtered_data))
return False, not is_same, filtered_data
# record does not exist
return False, True, filtered_data
if state == 'absent':
if mkey is None:
return False, False, filtered_data
if is_existed:
return False, True, filtered_data
return False, False, filtered_data
return True, False, {'reason: ': 'Must provide state parameter'}
if state == "present" or state is True:
return fos.set('wireless-controller',
'wids-profile',
data=filtered_data,
vdom=vdom)
elif state == "absent":
return fos.delete('wireless-controller',
'wids-profile',
mkey=filtered_data['name'],
vdom=vdom)
else:
fos._module.fail_json(msg='state must be present or absent!')
def is_successful_status(resp):
return 'status' in resp and resp['status'] == 'success' or \
'http_status' in resp and resp['http_status'] == 200 or \
'http_method' in resp and resp['http_method'] == "DELETE" and resp['http_status'] == 404
def fortios_wireless_controller(data, fos, check_mode):
fos.do_member_operation('wireless_controller_wids_profile')
if data['wireless_controller_wids_profile']:
resp = wireless_controller_wids_profile(data, fos, check_mode)
else:
fos._module.fail_json(msg='missing task body: %s' % ('wireless_controller_wids_profile'))
if check_mode:
return resp
return not is_successful_status(resp), \
is_successful_status(resp) and \
(resp['revision_changed'] if 'revision_changed' in resp else True), \
resp
versioned_schema = {
"type": "list",
"children": {
"comment": {
"type": "string",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"eapol_pre_succ_flood": {
"type": "string",
"options": [
{
"value": "enable",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "disable",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"eapol_succ_flood": {
"type": "string",
"options": [
{
"value": "enable",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "disable",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"eapol_succ_thresh": {
"type": "integer",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"ap_bgscan_disable_end": {
"type": "string",
"revisions": {
"v6.0.0": True,
"v7.0.0": False,
"v6.0.5": True,
"v6.4.4": False,
"v7.0.1": False,
"v6.4.0": False,
"v6.4.1": False,
"v6.2.0": False,
"v6.2.3": False,
"v6.2.5": False,
"v6.2.7": False,
"v6.0.11": True
}
},
"eapol_start_intv": {
"type": "integer",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"eapol_logoff_thresh": {
"type": "integer",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"eapol_logoff_intv": {
"type": "integer",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"ap_bgscan_intv": {
"type": "integer",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"assoc_flood_thresh": {
"type": "integer",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"null_ssid_probe_resp": {
"type": "string",
"options": [
{
"value": "enable",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "disable",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"assoc_frame_flood": {
"type": "string",
"options": [
{
"value": "enable",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "disable",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"ap_bgscan_disable_day": {
"type": "string",
"options": [
{
"value": "sunday",
"revisions": {
"v6.0.11": True,
"v6.0.0": True,
"v6.0.5": True
}
},
{
"value": "monday",
"revisions": {
"v6.0.11": True,
"v6.0.0": True,
"v6.0.5": True
}
},
{
"value": "tuesday",
"revisions": {
"v6.0.11": True,
"v6.0.0": True,
"v6.0.5": True
}
},
{
"value": "wednesday",
"revisions": {
"v6.0.11": True,
"v6.0.0": True,
"v6.0.5": True
}
},
{
"value": "thursday",
"revisions": {
"v6.0.11": True,
"v6.0.0": True,
"v6.0.5": True
}
},
{
"value": "friday",
"revisions": {
"v6.0.11": True,
"v6.0.0": True,
"v6.0.5": True
}
},
{
"value": "saturday",
"revisions": {
"v6.0.11": True,
"v6.0.0": True,
"v6.0.5": True
}
}
],
"revisions": {
"v6.0.0": True,
"v7.0.0": False,
"v6.0.5": True,
"v6.4.4": False,
"v7.0.1": False,
"v6.4.0": False,
"v6.4.1": False,
"v6.2.0": False,
"v6.2.3": False,
"v6.2.5": False,
"v6.2.7": False,
"v6.0.11": True
}
},
"eapol_fail_thresh": {
"type": "integer",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"ap_bgscan_disable_schedules": {
"type": "list",
"children": {
"name": {
"type": "string",
"revisions": {
"v7.0.1": True,
"v7.0.0": True,
"v6.4.4": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
}
},
"revisions": {
"v7.0.1": True,
"v7.0.0": True,
"v6.4.4": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
},
"ap_scan": {
"type": "string",
"options": [
{
"value": "disable",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "enable",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"deauth_broadcast": {
"type": "string",
"options": [
{
"value": "enable",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "disable",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"wireless_bridge": {
"type": "string",
"options": [
{
"value": "enable",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "disable",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"ap_scan_threshold": {
"type": "string",
"revisions": {
"v7.0.1": True,
"v7.0.0": True,
"v6.4.4": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True
}
},
"assoc_flood_time": {
"type": "integer",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"long_duration_thresh": {
"type": "integer",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"eapol_pre_fail_flood": {
"type": "string",
"options": [
{
"value": "enable",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "disable",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"eapol_logoff_flood": {
"type": "string",
"options": [
{
"value": "enable",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "disable",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"ap_bgscan_duration": {
"type": "integer",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"eapol_pre_succ_thresh": {
"type": "integer",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"ap_bgscan_idle": {
"type": "integer",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"ap_bgscan_report_intv": {
"type": "integer",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"auth_frame_flood": {
"type": "string",
"options": [
{
"value": "enable",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "disable",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"ap_fgscan_report_intv": {
"type": "integer",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"ap_bgscan_period": {
"type": "integer",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"eapol_pre_succ_intv": {
"type": "integer",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"eapol_succ_intv": {
"type": "integer",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"eapol_pre_fail_thresh": {
"type": "integer",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"eapol_fail_flood": {
"type": "string",
"options": [
{
"value": "enable",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "disable",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"auth_flood_time": {
"type": "integer",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"ap_scan_passive": {
"type": "string",
"options": [
{
"value": "enable",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "disable",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"weak_wep_iv": {
"type": "string",
"options": [
{
"value": "enable",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "disable",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"ap_auto_suppress": {
"type": "string",
"options": [
{
"value": "enable",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "disable",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"invalid_mac_oui": {
"type": "string",
"options": [
{
"value": "enable",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "disable",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"name": {
"type": "string",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"asleap_attack": {
"type": "string",
"options": [
{
"value": "enable",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "disable",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"eapol_start_flood": {
"type": "string",
"options": [
{
"value": "enable",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "disable",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"deauth_unknown_src_thresh": {
"type": "integer",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"long_duration_attack": {
"type": "string",
"options": [
{
"value": "enable",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "disable",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"spoofed_deauth": {
"type": "string",
"options": [
{
"value": "enable",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "disable",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"eapol_start_thresh": {
"type": "integer",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"eapol_pre_fail_intv": {
"type": "integer",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"auth_flood_thresh": {
"type": "integer",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"ap_bgscan_disable_start": {
"type": "string",
"revisions": {
"v6.0.0": True,
"v7.0.0": False,
"v6.0.5": True,
"v6.4.4": False,
"v7.0.1": False,
"v6.4.0": False,
"v6.4.1": False,
"v6.2.0": False,
"v6.2.3": False,
"v6.2.5": False,
"v6.2.7": False,
"v6.0.11": True
}
},
"eapol_fail_intv": {
"type": "integer",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
"sensor_mode": {
"type": "string",
"options": [
{
"value": "disable",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "foreign",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
},
{
"value": "both",
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
],
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
},
"revisions": {
"v6.0.0": True,
"v7.0.0": True,
"v6.0.5": True,
"v6.4.4": True,
"v7.0.1": True,
"v6.4.0": True,
"v6.4.1": True,
"v6.2.0": True,
"v6.2.3": True,
"v6.2.5": True,
"v6.2.7": True,
"v6.0.11": True
}
}
def main():
module_spec = schema_to_module_spec(versioned_schema)
mkeyname = 'name'
fields = {
"access_token": {"required": False, "type": "str", "no_log": True},
"enable_log": {"required": False, "type": bool},
"vdom": {"required": False, "type": "str", "default": "root"},
"member_path": {"required": False, "type": "str"},
"member_state": {
"type": "str",
"required": False,
"choices": ["present", "absent"]
},
"state": {"required": True, "type": "str",
"choices": ["present", "absent"]},
"wireless_controller_wids_profile": {
"required": False, "type": "dict", "default": None,
"options": {
}
}
}
for attribute_name in module_spec['options']:
fields["wireless_controller_wids_profile"]['options'][attribute_name] = module_spec['options'][attribute_name]
if mkeyname and mkeyname == attribute_name:
fields["wireless_controller_wids_profile"]['options'][attribute_name]['required'] = True
check_legacy_fortiosapi()
module = AnsibleModule(argument_spec=fields,
supports_check_mode=True)
versions_check_result = None
if module._socket_path:
connection = Connection(module._socket_path)
if 'access_token' in module.params:
connection.set_option('access_token', module.params['access_token'])
if 'enable_log' in module.params:
connection.set_option('enable_log', module.params['enable_log'])
else:
connection.set_option('enable_log', False)
fos = FortiOSHandler(connection, module, mkeyname)
versions_check_result = check_schema_versioning(fos, versioned_schema, "wireless_controller_wids_profile")
is_error, has_changed, result = fortios_wireless_controller(module.params, fos, module.check_mode)
else:
module.fail_json(**FAIL_SOCKET_MSG)
if versions_check_result and versions_check_result['matched'] is False:
module.warn("Ansible has detected version mismatch between FortOS system and your playbook, see more details by specifying option -vvv")
if not is_error:
if versions_check_result and versions_check_result['matched'] is False:
module.exit_json(changed=has_changed, version_check_warning=versions_check_result, meta=result)
else:
module.exit_json(changed=has_changed, meta=result)
else:
if versions_check_result and versions_check_result['matched'] is False:
module.fail_json(msg="Error in repo", version_check_warning=versions_check_result, meta=result)
else:
module.fail_json(msg="Error in repo", meta=result)
if __name__ == '__main__':
main()
|
# coding: utf-8
class Vertice:
def __init__(self, nome_cidade, distancia_objetivo):
self.nome_cidade = nome_cidade
self.visitado = False
self.distancia_objetivo = distancia_objetivo
self.cidades = []
def adicionar_cidade(self, cidade):
self.cidades.append(cidade)
def mostra_cidade(self):
for i in self.cidades:
print(i.vertice.nome_cidade, i.custo)
class Cidade:
def __init__(self, vertice, custo):
self.vertice = vertice
self.custo = custo
class Grafo:
portoUniao = Vertice("Porto União", 203)
pauloFrontin = Vertice("Paulo Frontin", 172)
canoinhas = Vertice("Canoinhas", 141)
irati = Vertice("Irati", 139)
palmeira = Vertice("Palmeira", 59)
campoLargo = Vertice("Campo Largo", 27)
curitiba = Vertice("Curitiba", 0)
balsaNova = Vertice("Balsa Nova", 41)
araucaria = Vertice("Araucária", 23)
saoJose = Vertice("São José dos Pinhais", 13)
contenda = Vertice("Contenda", 39)
mafra = Vertice("Mafra", 94)
tijucas = Vertice("Tijucas do Sul", 56)
lapa = Vertice("Lapa", 74)
saoMateus = Vertice("São Mateus do Sul", 123)
tresBarras = Vertice("Três Barras", 131)
portoUniao.adicionar_cidade(Cidade(pauloFrontin, 46))
portoUniao.adicionar_cidade(Cidade(canoinhas, 78))
portoUniao.adicionar_cidade(Cidade(saoMateus, 87))
pauloFrontin.adicionar_cidade(Cidade(portoUniao, 46))
pauloFrontin.adicionar_cidade(Cidade(irati, 75))
canoinhas.adicionar_cidade(Cidade(portoUniao, 78))
canoinhas.adicionar_cidade(Cidade(tresBarras, 12))
canoinhas.adicionar_cidade(Cidade(mafra, 66))
irati.adicionar_cidade(Cidade(pauloFrontin, 75))
irati.adicionar_cidade(Cidade(palmeira, 75))
irati.adicionar_cidade(Cidade(saoMateus, 57))
palmeira.adicionar_cidade(Cidade(irati, 75))
palmeira.adicionar_cidade(Cidade(saoMateus, 77))
palmeira.adicionar_cidade(Cidade(campoLargo, 55))
campoLargo.adicionar_cidade(Cidade(palmeira, 55))
campoLargo.adicionar_cidade(Cidade(balsaNova, 22))
campoLargo.adicionar_cidade(Cidade(curitiba, 29))
curitiba.adicionar_cidade(Cidade(campoLargo, 29))
curitiba.adicionar_cidade(Cidade(araucaria, 37))
curitiba.adicionar_cidade(Cidade(saoJose, 15))
balsaNova.adicionar_cidade(Cidade(curitiba, 51))
balsaNova.adicionar_cidade(Cidade(campoLargo, 22))
balsaNova.adicionar_cidade(Cidade(contenda, 19))
araucaria.adicionar_cidade(Cidade(curitiba, 37))
araucaria.adicionar_cidade(Cidade(contenda, 18))
saoJose.adicionar_cidade(Cidade(curitiba, 15))
saoJose.adicionar_cidade(Cidade(tijucas, 49))
contenda.adicionar_cidade(Cidade(balsaNova, 19))
contenda.adicionar_cidade(Cidade(araucaria, 18))
contenda.adicionar_cidade(Cidade(lapa, 26))
mafra.adicionar_cidade(Cidade(tijucas, 99))
mafra.adicionar_cidade(Cidade(lapa, 57))
mafra.adicionar_cidade(Cidade(canoinhas, 66))
tijucas.adicionar_cidade(Cidade(mafra, 99))
tijucas.adicionar_cidade(Cidade(saoJose, 49))
lapa.adicionar_cidade(Cidade(contenda, 26))
lapa.adicionar_cidade(Cidade(saoMateus, 60))
lapa.adicionar_cidade(Cidade(mafra, 57))
saoMateus.adicionar_cidade(Cidade(palmeira, 77))
saoMateus.adicionar_cidade(Cidade(irati, 57))
saoMateus.adicionar_cidade(Cidade(lapa, 60))
saoMateus.adicionar_cidade(Cidade(tresBarras, 43))
saoMateus.adicionar_cidade(Cidade(portoUniao, 87))
tresBarras.adicionar_cidade(Cidade(saoMateus, 43))
tresBarras.adicionar_cidade(Cidade(canoinhas, 12))
grafo = Grafo()
class VetorOrdenado:
def __init__(self, capacidade):
self.capacidade = capacidade
self.ultima_posicao = -1
self.valores = list(range(self.capacidade))
def insere(self, vertice):
if self.ultima_posicao == self.capacidade - 1:
print('Capacidade máxima atingida')
return
posicao = 0
for i in range(self.ultima_posicao + 1):
posicao = i
if self.valores[i].distancia_objetivo > vertice.distancia_objetivo:
break
if i == self.ultima_posicao:
posicao = i + 1
x = self.ultima_posicao
while x >= posicao:
self.valores[x + 1] = self.valores[x]
x -= 1
self.valores[posicao] = vertice
self.ultima_posicao += 1
def imprime(self):
if self.ultima_posicao == -1:
print('O vetor está vazio')
else:
for i in range(self.ultima_posicao + 1):
print(f'[ {i} ] - {self.valores[i].nome_cidade} a {self.valores[i].distancia_objetivo}')
class Gulosa:
def __init__(self, objetivo):
self.objetivo = objetivo
self.encontrado = False
def buscar(self, atual):
print(f'Passando pela cidade : {atual.nome_cidade}')
atual.visitado = True
if atual == self.objetivo:
self.encontrado = True
else:
vetor_ordenado = VetorOrdenado(len(atual.cidades))
for Cidade in atual.cidades:
if Cidade.vertice.visitado == False:
Cidade.vertice.visitado == True
vetor_ordenado.insere(Cidade.vertice)
vetor_ordenado.imprime()
if vetor_ordenado.valores[0] != None:
self.buscar(vetor_ordenado.valores[0])
busca_gulosa = Gulosa(grafo.curitiba)
busca_gulosa.buscar(grafo.portoUniao)
|
"""
Managing Attack Logs.
========================
"""
import csv
from csv import writer
from textattack.metrics.attack_metrics import (
AttackQueries,
AttackSuccessRate,
WordsPerturbed,
)
from textattack.metrics.quality_metrics import Perplexity, USEMetric
from . import CSVLogger, FileLogger, VisdomLogger, WeightsAndBiasesLogger
class AttackLogManager:
"""Logs the results of an attack to all attached loggers."""
def __init__(self):
self.loggers = []
self.results = []
self.enable_advance_metrics = False
def enable_stdout(self):
self.loggers.append(FileLogger(stdout=True))
def enable_visdom(self):
self.loggers.append(VisdomLogger())
def enable_wandb(self, **kwargs):
self.loggers.append(WeightsAndBiasesLogger(**kwargs))
def disable_color(self):
self.loggers.append(FileLogger(stdout=True, color_method="file"))
def add_output_file(self, filename, color_method):
self.loggers.append(FileLogger(filename=filename, color_method=color_method))
def add_output_csv(self, filename, color_method):
self.loggers.append(CSVLogger(filename=filename, color_method=color_method))
def log_result(self, result):
"""Logs an ``AttackResult`` on each of `self.loggers`."""
self.results.append(result)
for logger in self.loggers:
logger.log_attack_result(result)
def log_results(self, results):
"""Logs an iterable of ``AttackResult`` objects on each of
`self.loggers`."""
for result in results:
self.log_result(result)
self.log_summary()
def log_summary_rows(self, rows, title, window_id):
for logger in self.loggers:
logger.log_summary_rows(rows, title, window_id)
def log_sep(self):
for logger in self.loggers:
logger.log_sep()
def flush(self):
for logger in self.loggers:
logger.flush()
def log_attack_details(self, attack_name, model_name):
# @TODO log a more complete set of attack details
attack_detail_rows = [
["Attack algorithm:", attack_name],
["Model:", model_name],
]
self.log_summary_rows(attack_detail_rows, "Attack Details", "attack_details")
def log_summary(self):
total_attacks = len(self.results)
if total_attacks == 0:
return
# Default metrics - calculated on every attack
attack_success_stats = AttackSuccessRate().calculate(self.results)
words_perturbed_stats = WordsPerturbed().calculate(self.results)
attack_query_stats = AttackQueries().calculate(self.results)
# @TODO generate this table based on user input - each column in specific class
# Example to demonstrate:
# summary_table_rows = attack_success_stats.display_row() + words_perturbed_stats.display_row() + ...
summary_table_rows = [
[
"Number of successful attacks:",
attack_success_stats["successful_attacks"],
],
["Number of failed attacks:", attack_success_stats["failed_attacks"]],
["Number of skipped attacks:", attack_success_stats["skipped_attacks"]],
[
"Original accuracy:",
str(attack_success_stats["original_accuracy"]) + "%",
],
[
"Accuracy under attack:",
str(attack_success_stats["attack_accuracy_perc"]) + "%",
],
[
"Attack success rate:",
str(attack_success_stats["attack_success_rate"]) + "%",
],
[
"Average perturbed word %:",
str(words_perturbed_stats["avg_word_perturbed_perc"]) + "%",
],
[
"Average num. words per input:",
words_perturbed_stats["avg_word_perturbed"],
],
]
# the blanks will be model name and attack name later
result_row = [attack_success_stats["successful_attacks"],
attack_success_stats["failed_attacks"],
str(attack_success_stats["original_accuracy"]),
str(attack_success_stats["attack_accuracy_perc"]),
str(attack_success_stats["attack_success_rate"]),
str(words_perturbed_stats["avg_word_perturbed_perc"]),
words_perturbed_stats["avg_word_perturbed"]]
# name of csv file
filename = "test_results.csv"
# writing to csv file
with open(filename, 'a') as f_object:
#Pass this file object to csv.writer()
# and get a writer object
writer_object = writer(f_object)
# Pass the list as an argument into
# the writerow()
writer_object.writerow(result_row)
# Close the file object
f_object.close()
summary_table_rows.append(
["Avg num queries:", attack_query_stats["avg_num_queries"]]
)
if self.enable_advance_metrics:
perplexity_stats = Perplexity().calculate(self.results)
use_stats = USEMetric().calculate(self.results)
summary_table_rows.append(
[
"Average Original Perplexity:",
perplexity_stats["avg_original_perplexity"],
]
)
summary_table_rows.append(
[
"Average Attack Perplexity:",
perplexity_stats["avg_attack_perplexity"],
]
)
summary_table_rows.append(
["Average Attack USE Score:", use_stats["avg_attack_use_score"]]
)
self.log_summary_rows(
summary_table_rows, "Attack Results", "attack_results_summary"
)
# Show histogram of words changed.
numbins = max(words_perturbed_stats["max_words_changed"], 10)
for logger in self.loggers:
logger.log_hist(
words_perturbed_stats["num_words_changed_until_success"][:numbins],
numbins=numbins,
title="Num Words Perturbed",
window_id="num_words_perturbed",
)
|
#!/usr/bin/env python3
import select, socket
response = b'HTTP/1.1 200 OK\r\nConnection: close\r\n\r\nHello World!'
serv = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
serv.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
serv.bind(('0.0.0.0', 9000))
serv.listen(16)
serv.setblocking(0)
rlist = [ serv ]
try:
while True:
rl, wl, el = select.select(rlist, [], [])
for sock in rl:
if sock == serv:
client, address = serv.accept()
rlist.append(client)
print('[SERVER] Client Connected %s:%s' % address )
else:
data = sock.recv(1024)
if data:
sock.send(response)
rlist.remove(sock)
sock.close()
except KeyboardInterrupt:
serv.close()
for client in clisnts:
clients.close()
|
# Write a program, which sums the ASCII codes of N characters and prints the sum on the console.
# On the first line, you will receive N – the number of lines.
# On the following N lines – you will receive a letter per line. Print the total sum in the following format: "The sum equals: {total_sum}".
# Note: n will be in the interval [1…20].
n = int(input())
ascii_sum = 0
for _ in range(n):
char = input()
ascii_sum += ord(char)
print(f"The sum equals: {ascii_sum}")
|
"""
Pointers
--------
Ex:
a house that you want to sell;
a few Python functions that work with images, so you pass high-resolution image data between your functions.
Those large image files remain in one single place in memory. What you do is create variables that hold the locations
of those images in memory. These variables are small and can easily be passed around between different functions.
Pointers: allow you to point to a potentially large segment of memory with just a simple memory address.
In Python, you don't manipulate pointers directly (unlike C/Pascal).
s = set()
We would normally say that s is a variable of the type set. That is, s is a set. This is not strictly true, however.
The variable s is rather a reference (a "safe" pointer) to a set. The set constructor creates a set somewhere in memory
and returns the memory location where that set starts. This is what gets stored in s.
Python hides this complexity from us. We can safely assume that s is a set and that everything works fine.
Array
------
- a sequential list of data;
- sequential = each element is stored right after the previous one in memory
If array is really big & you're low on memory => might be impossible to find large enough storage to fit entire array
Benefits:
Arrays are very fast: Since each element follows from the previous one in memory, there is no need to jump around
between different memory locations => important when choosing between a list and an array in real-world applications.
Pointer structures
------------------
- Contrary to arrays, pointer structures are lists of items that can be spread out in memory.
- Each item contains one or more links to other items in the structure
- Type of links are dependent on the type of structure:
for linked lists => links to the next (and possibly previous) items in the structure,
for a tree => parent-child links as well as sibling links
in a tile-based game whith a game map built up of hexes, each node will have links to up to 6 adjacent map cells.
Benefits:
They don't require sequential storage space;
They can start small and grow arbitrarily as you add more nodes to the structure
But: for a list of ints, each node needs the space of an int & an additional int for storing the pointer to next node.
""" |
#!/usr/bin/python3 -B
import re
import os
import sys
import json
import shutil
import base64
import subprocess
from time import sleep
from rich.progress_bar import ProgressBar
from rich.console import Console
from rich.panel import Panel
from rich.table import Table
PIPE = subprocess.PIPE
DESCRIPTION = b'Clt5ZWxsb3ddZnJvbVsveWVsbG93XVtyZWRdOlsvcmVkXQogICAgW2JsdWVdQHBzaF90YW1bL2JsdWVdIFtjeWFuXSMgSGFja2VyTW9kZVsvY3lhbl0KClt5ZWxsb3ddZXhhbXBsZVsveWVsbG93XVtyZWRdOlsvcmVkXQogICAgW3JlZF0kWy9yZWRdIFtncmVlbl1wYWNrYWdlLWhlbHBlclsvZ3JlZW5dIFtibHVlXWluZm9bL2JsdWVdIFt2aW9sZXRdbm1hcFsvdmlvbGV0XQogICAgW3JlZF0kWy9yZWRdIFtncmVlbl1wYWNrYWdlLWhlbHBlclsvZ3JlZW5dIFtibHVlXXNlYXJjaFsvYmx1ZV0gW3Zpb2xldF12aW1bL3Zpb2xldF0KICAgIFtyZWRdJFsvcmVkXSBbZ3JlZW5dcGFja2FnZS1oZWxwZXJbL2dyZWVuXSBbYmx1ZV1pbnN0YWxsWy9ibHVlXSBbdmlvbGV0XXB5dGhvbjJbL3Zpb2xldF0KClt5ZWxsb3ddaW5mb1sveWVsbG93XVtyZWRdOlsvcmVkXQogICAgW3NlYV9ncmVlbjFdZ2V0IGFsbCBpbmZvcm1hdGlvbiBieSBwYWNrYWdlWy9zZWFfZ3JlZW4xXQogICAgW2JsdWVdaW5mb1svYmx1ZV0gW3JlZF08Wy9yZWRdW3Zpb2xldF1wYWNrYWdlX25hbWVbL3Zpb2xldF1bcmVkXT5bL3JlZF0KW3llbGxvd11zZWFyY2hbL3llbGxvd11bcmVkXTpbL3JlZF0KICAgIFtzZWFfZ3JlZW4xXVNlYXJjaCBtb3JlIHRoYW4gMTQwMCBwYWNrYWdlc1svc2VhX2dyZWVuMV0KICAgIFtibHVlXXNlYXJjaFsvYmx1ZV0gW3JlZF08Wy9yZWRdW3Zpb2xldF1wYXR0cmVuWy92aW9sZXRdW3JlZF0+Wy9yZWRdClt5ZWxsb3ddaW5zdGFsbFsveWVsbG93XVtyZWRdOlsvcmVkXQogICAgW3NlYV9ncmVlbjFdaW5zdGFsbCBwYWNrYWdlICFbL3NlYV9ncmVlbjFdCiAgICBbYmx1ZV1pbnN0YWxsWy9ibHVlXSBbcmVkXTxbL3JlZF1bdmlvbGV0XXBhY2thZ2VfbmFtZVsvdmlvbGV0XVtyZWRdPlsvcmVkXQo='
class PackagesHelper:
def _popen(self, command: str) -> str:
return subprocess.Popen(
command,
stdout=PIPE,
stdin=PIPE,
stderr=PIPE,
shell=True,
).communicate()[0].decode()
def _info_package_as_json(self, package: str) -> dict:
lines: list = self._popen(f'pkg show {package}').split('\n')
data: dict = {}
for line in lines:
if line:
find = re.findall("([\w\W\S]*):\ (.*)", line)
if find:
data[find[0][0]] = find[0][1]
if not data:
data["Error"] = "No packages found"
data["Exists"] = True if shutil.which(package) else False
return data
def _print(self, text: str) -> None:
Console().print(text)
def _panel(self, description: str, title: str, **kwargs) -> None:
expand = kwargs.get('expand')
if expand != None:
kwargs.pop('expand')
else:
expand = True
self._print(
Panel(
description,
expand=expand,
title=title,
**kwargs
)
)
def info(self, package: str) -> None:
package_info = self._info_package_as_json(package)
_package = package_info.get('Package')
_version = package_info.get("Version")
title = ("[white]" + _package if _package else "[red]None") + (" [green]" + _version if _version else "")
description = json.dumps({f'[blue]{k}[/blue]': v for k, v in package_info.items()}, indent=3,).replace('"', '')
self._panel(
description,
title,
border_style="cyan",
)
def install(self, package: str) -> None:
installed_size = self._info_package_as_json(package).get('Installed-Size')
installed_size = "[green]" + (installed_size if installed_size else "0.0 KB")
exists = True if shutil.which(package) else False
self._print(
("[cyan]# package is [yellow]exists[/yellow] [red]![/cyan][/red]\n" if exists else "")
+ f"[cyan]# Installing {installed_size}[/green][/cyan]"
)
sleep(2)
os.system(f"pkg install {package}")
self._print(
"[blue]# Done [green]✓[/green][/blue]"
if shutil.which(package) else
"[yellow]# Error [red]✗[/yellow][/red]"
)
def search(self, pattren: str) -> None:
console = Console()
bar = ProgressBar(width=shutil.get_terminal_size()[0] - 10, total=100)
packages = re.findall(r"\n([\w\-\_\S]*)/", self._popen(f"pkg search {pattren}"))
get_item = lambda key, data: data.get(key) if key in data else '[red]None[/red]'
table = Table(
title="Search for Packages",
caption=f"{len(packages)} Packages.",
caption_justify="right",
expand=True,
)
table.add_column("Package", style="cyan", header_style="bright_cyan")
table.add_column("Size", style="green", header_style="bright_green")
table.add_column("Exists", style="yellow", header_style="bright_yellow")
table.add_column("HomePage", style="blue", header_style="bright_blue")
i = 0
console.show_cursor(False)
for pkg in packages:
information = self._info_package_as_json(pkg)
_exists = True if shutil.which(pkg) else False
table.add_row(
get_item("Package", information),
get_item("Installed-Size", information),
(str(_exists) if _exists else f"[red]{_exists}[/red]"),
get_item("Homepage", information),
style="dim" if _exists else "none"
)
point = round(i/len(packages)*100)
bar.update(point)
console.print(bar)
console.file.write(f' {point}%\r')
i+=1
self._print(table)
console.show_cursor(True)
if __name__ == "__main__":
argv = sys.argv[1:]
packageshelper = PackagesHelper()
help_msg = lambda: packageshelper._panel(base64.b64decode(DESCRIPTION).decode(), "[white]package[red]-[/red]helper", expand=False, border_style="cyan")
if len(argv) >= 2:
try:
packageshelper.__getattribute__(argv[0])(argv[1])
except AttributeError:
help_msg()
except Exception as e:
print(f"# {e.__class__.__name__}: {str(e)}")
help_msg()
else:
help_msg()
|
import copy
from cgi import parse_header
from functools import reduce, wraps
from heapq import nsmallest
from io import BytesIO
import json
from operator import itemgetter
import os.path
from lxml.html import document_fromstring
from PIL import Image
from twisted.internet import defer, protocol, reactor
from twisted.web.client import Agent, RedirectAgent, ContentDecoderAgent, GzipDecoder
from twisted.web.http_headers import Headers
from twisted.web.iweb import UNKNOWN_LENGTH
class JSONConfig(object):
config = {}
def __init__(self, filename='config.json', default=None):
self.filename = filename
if not os.path.isfile(self.filename) and default is not None:
self.config = default
self.save()
else:
self.load()
def __getitem__(self, key):
return self.config[key]
def __setitem__(self, key, value):
self.config[key] = value
def __delitem__(self, key):
del self.config[key]
def load(self):
with open(self.filename, 'r') as fd:
self.config = json.load(fd)
def save(self):
with open(self.filename, 'w') as fd:
json.dump(self.config, fd, indent=' ', sort_keys=True)
# Some convenience functions for the lazy
def get(self, key):
return reduce(lambda c, k: c[k], key.split('.'), self.config)
def isset(self, key):
try:
reduce(lambda c, k: c[k], key.split('.'), self.config)
except KeyError:
return False
return True
class User:
def __init__(self, mask, modes='', is_self=False):
self.nick, _, mask = mask.partition('!')
self.user, _, self.host = mask.partition('@')
self.modes = modes
self.is_self = is_self
def __str__(self):
return self.nick
def isIdentified(self):
return 'r' in self.modes
def isSecure(self):
return 'z' in self.modes
def isOper(self):
return 'o' in self.modes
def setMode(self, added, mode, arg=None):
if added and mode not in self.modes:
self.modes += mode
elif not added and mode in self.modes:
self.modes = self.modes.replace(mode, '')
class Channel:
def __init__(self, name, modes=''):
self.name = str(name)
self.users = []
self.modes = modes
for c in list('qsahv'):
setattr(self, c + 'ops', [])
self.bans = []
self.isUser = not self.name.startswith('#')
def __str__(self):
return self.name
def isQOP(self, user):
if type(user) is User:
user = user.nick
return user.lower() in self.qops
def isSOP(self, user):
if type(user) is User:
user = user.nick
return user.lower() in self.sops
def isAOP(self, user):
if type(user) is User:
user = user.nick
return user.lower() in self.aops
isOP = isAOP
def isHOP(self, user):
if type(user) is User:
user = user.nick
return user.lower() in self.hops
isHalfOP = isHOP
def isVOP(self, user):
if type(user) is User:
user = user.nick
return user.lower() in self.vops
isVoice = isVOP
def addUser(self, user):
if type(user) is User:
user = user.nick
if user.lower() not in self.users:
self.users.append(user.lower())
def renameUser(self, user, new):
if type(user) is User:
user = user.nick
for attr in ['users', 'qops', 'sops', 'aops', 'hops', 'vops']:
if hasattr(self, attr) and user.lower() in getattr(self, attr):
getattr(self, attr).remove(user.lower())
getattr(self, attr).append(new.lower())
def removeUser(self, user):
if type(user) is User:
user = user.nick
# The user is leaving the channel so they're not here at all
for attr in ['users', 'qops', 'sops', 'aops', 'hops', 'vops']:
if hasattr(self, attr) and user.lower() in getattr(self, attr):
getattr(self, attr).remove(user.lower())
def setMode(self, added, mode, arg=None):
op_translations = {
'q': 'q',
'a': 's',
'o': 'a',
'h': 'h',
'v': 'v'
}
if added:
if arg is not None:
if mode in op_translations.keys() and hasattr(self, op_translations[mode] + 'ops') and arg.lower() not in getattr(self, op_translations[mode] + 'ops'):
getattr(self, op_translations[mode] + 'ops').append(arg.lower())
elif mode == 'b' and arg.lower() not in self.bans:
self.bans.append(arg.lower())
elif mode not in self.modes:
self.modes += mode
else:
if arg is not None:
if mode in op_translations.keys() and hasattr(self, op_translations[mode] + 'ops') and arg.lower() in getattr(self, op_translations[mode] + 'ops'):
getattr(self, op_translations[mode] + 'ops').remove(arg.lower())
elif mode == 'b' and arg.lower() in self.bans:
self.bans.remove(arg.lower())
elif mode in self.modes:
self.modes = self.modes.replace(mode, '')
class Receiver(protocol.Protocol):
def __init__(self, response, finished):
self.response = response
self.charset = response.headers['Content-Type'][1]['charset']
self.finished = finished
self.buffer = ''
self.remaining = 1024 * 1024 * 4
if self.response.length is UNKNOWN_LENGTH:
self.response.length = 0
self.obtain_length = True
else:
self.obtain_length = False
def dataReceived(self, data):
if self.remaining > 0:
data = data[:self.remaining]
self.buffer += data.decode(self.charset)
self.remaining -= len(data)
if self.obtain_length:
self.response.length += len(data)
def connectionLost(self, reason):
self.finished.callback(self.buffer)
class ImageReceiver(Receiver):
def __init__(self, response, finished):
super().__init__(response, finished)
self.buffer = BytesIO()
def dataReceived(self, data):
if self.remaining > 0:
data = data[:self.remaining]
self.buffer.write(data)
self.remaining -= len(data)
if self.obtain_length:
self.response.length += len(data)
def connectionLost(self, reason):
# Return the image since it's the body
self.finished.callback(Image.open(self.buffer))
class HTMLReceiver(Receiver):
def connectionLost(self, reason):
self.finished.callback(document_fromstring(self.buffer))
class JSONReceiver(Receiver):
def connectionLost(self, reason):
self.finished.callback(json.loads(self.buffer))
# Inspired by cyclone.httpclient.HTTPClient
class HTTPClient:
user_agent = b'Mozilla/5.0 (Windows NT 6.2; WOW64; rv:28.0) Gecko/20100101 Firefox/28.0'
def __init__(self):
self.agent = ContentDecoderAgent(RedirectAgent(Agent(reactor)), [(b'gzip', GzipDecoder)])
@defer.inlineCallbacks
def fetch(self, url, receiver=None):
resp = yield self.agent.request(b'GET', url.encode('utf-8'), Headers({b'User-Agent': [self.user_agent]}))
resp.error = None
resp.headers = dict(resp.headers.getAllRawHeaders())
for k, v in resp.headers.copy().items():
resp.headers[k.decode('utf-8')] = parse_header(v[0].decode('utf-8'))
del resp.headers[k]
if 'Content-Type' not in resp.headers.keys():
resp.headers['Content-Type'] = ['application/octet-stream', {'charset': 'utf-8'}]
elif 'charset' not in resp.headers['Content-Type'][1].keys():
resp.headers['Content-Type'][1]['charset'] = 'utf-8'
if receiver is None:
mime_type = resp.headers['Content-Type'][0]
if mime_type.startswith('image'):
receiver = ImageReceiver
elif mime_type == 'application/json':
receiver = JSONReceiver
elif mime_type == 'text/html':
receiver = HTMLReceiver
else:
receiver = Receiver
d = defer.Deferred()
resp.receiver = receiver
resp.deliverBody(resp.receiver(resp, d))
resp.body = yield d
if resp.length is UNKNOWN_LENGTH:
resp.length = None # A null value serves as a good unknown
defer.returnValue(resp)
# Thanks e000 for the original snippet of this code
def deferred_lfu_cache(maxsize=100):
class Counter(dict):
def __missing__(self, key):
return 0
def decorator(func):
cache = {}
use_count = Counter()
kwargs_mark = object()
waiting = {}
@wraps(func)
def wrapper(*args, **kwargs):
key = args
if kwargs:
key += (kwargs_mark,) + tuple(sorted(kwargs.items()))
use_count[key] += 1
if key in cache:
wrapper.hits += 1
return defer.succeed((cache[key], True))
elif key in waiting:
d = defer.Deferred()
waiting[key].append(d)
return d
else:
def success(result, key):
wrapper.misses += 1
cache[key] = result
if len(cache) > maxsize:
for key, _ in nsmallest(maxsize / 10, use_count.items(), key=itemgetter(1)):
del cache[key], use_count[key]
wrapper.hits -= 1
for d in waiting[key]:
wrapper.hits += 1
d.callback((result, False))
del waiting[key]
def error(err, key):
for d in waiting[key]:
d.errback(err)
del waiting[key]
defer.maybeDeferred(func, *args, **kwargs).addCallback(success, key).addErrback(error, key)
d = defer.Deferred()
waiting[key] = [d]
return d
def clear():
cache.clear()
use_count.clear()
wrapper.hits = wrapper.misses = 0
wrapper.hits = wrapper.misses = 0
wrapper.clear = clear
wrapper.size = lambda: len(cache)
wrapper.waiting = lambda: len(waiting)
wrapper.maxsize = maxsize
return wrapper
return decorator
|
# ParentID: 1002002
# Character field ID when accessed: 109090300
# ObjectID: 1000005
# Object Position Y: -724
# Object Position X: -142
|
from random import randint
####################################
# DEVELOPER : VIKRAM SINGH #
# TECHNOLOGY STACK : PYTHON #
####################################
# Evaluation Definitions
X_WINS = 1000
O_WINS = -1000
DRAW = 0
class TicTacToe():
'''
ALGO: MIN-MAX
By making any of the move if in the upcoming possibilities if opponent has even
one single winning move and all loosing move then I AM LOST.
If opponent has all lost moves then I win.
'''
def __init__(self):
self.outcomes = []
self.gameBoard = ['.'] * 9
self.coinsType = ['x', 'o']
self.gameOn = True
self.computerPlay = 0
self.computerType = '-'
self.humanType = '-'
def _checkIfWinner(self, playerType):
# check verticals
if self.gameBoard[0] == self.gameBoard[3] and self.gameBoard[0] == self.gameBoard[6] and self.gameBoard[0] == playerType:
return True
if self.gameBoard[1] == self.gameBoard[4] and self.gameBoard[1] == self.gameBoard[7] and self.gameBoard[1] == playerType:
return True
if self.gameBoard[2] == self.gameBoard[5] and self.gameBoard[2] == self.gameBoard[8] and self.gameBoard[2] == playerType:
return True
# check horizontals
if self.gameBoard[0] == self.gameBoard[1] and self.gameBoard[0] == self.gameBoard[2] and self.gameBoard[0] == playerType:
return True
if self.gameBoard[3] == self.gameBoard[4] and self.gameBoard[3] == self.gameBoard[5] and self.gameBoard[3] == playerType:
return True
if self.gameBoard[6] == self.gameBoard[7] and self.gameBoard[6] == self.gameBoard[8] and self.gameBoard[6] == playerType:
return True
# check diagonal
if self.gameBoard[0] == self.gameBoard[4] and self.gameBoard[8] == self.gameBoard[0] and self.gameBoard[0] == playerType:
return True
if self.gameBoard[2] == self.gameBoard[4] and self.gameBoard[2] == self.gameBoard[6] and self.gameBoard[2] == playerType:
return True
return False
def _switchPlayerType(self, playerType):
if playerType == 'x':
return 'o'
return 'x'
def _findPlayerTypeWins(self, playerType):
if playerType == 'x':
return X_WINS
return O_WINS
def _isBoardFull(self):
if '.' in self.gameBoard:
return False
return True
def _checkGameOver(self):
if self._checkIfWinner('x'):
return (True, 'x')
if self._checkIfWinner('o'):
return (True, 'o')
if self._isBoardFull():
return (True, 'd')
return (False, '.')
def _validPositionToEnterCoin(self, pos):
return self.gameBoard[pos] == '.'
def _positionEvaluation(self, playerType):
if self._checkIfWinner(playerType):
return self._findPlayerTypeWins(playerType)
if self._checkIfWinner(self._switchPlayerType(playerType)):
return self._findPlayerTypeWins(self._switchPlayerType(playerType))
return DRAW
def _findMove(self, depth, playerType):
ans = 0
possibilities = [-1] * 9
if self._positionEvaluation(playerType) != DRAW:
return (possibilities, self._positionEvaluation(playerType))
if self._isBoardFull():
return (possibilities, self._positionEvaluation(playerType))
for idx, e in enumerate(self.gameBoard):
if e == '.':
self.gameBoard[idx] = playerType
res = self._findMove(depth + 1, self._switchPlayerType(playerType))
possibilities[idx] = res[1]
self.gameBoard[idx] = '.'
if self._findPlayerTypeWins(playerType) in possibilities:
ans = self._findPlayerTypeWins(playerType)
elif DRAW in possibilities:
ans = DRAW
else:
ans = self._findPlayerTypeWins(self._switchPlayerType(playerType))
return (possibilities, ans)
def _printGameBoard(self):
print "\n 0 1 2 | " + self.gameBoard[0] + " " + self.gameBoard[1] + " " + self.gameBoard[2]
print " 3 4 5 | " + self.gameBoard[3] + " " + self.gameBoard[4] + " " + self.gameBoard[5]
print " 6 7 8 | " + self.gameBoard[6] + " " + self.gameBoard[7] + " " + self.gameBoard[8]
print "\n"
return
def _getPossibilitiesInterpretation(self, playerType):
possibilities = ["You Win" if possibility == self._findPlayerTypeWins(playerType) else possibility for possibility in self.outcomes]
possibilities = ["You Loose" if possibility == self._findPlayerTypeWins(self._switchPlayerType(playerType)) else possibility for possibility in possibilities]
interpreatation = zip([i for i in range(9)], ["Draw" if possibility == 0 else possibility for possibility in possibilities])
return interpreatation
def _printPossibilitiesInterpretation(self, playerType):
interpreatation = self._getPossibilitiesInterpretation(playerType)
for tup in interpreatation:
print tup[0], " : ", tup[1]
print
return
def _getBestPossibleIndexForPlaying(self, playerType):
interpreatation = self._getPossibilitiesInterpretation(playerType)
# first check if winning possible
res = [tup[0] for tup in interpreatation if tup[1] == "You Win"]
if res:
return res[randint(0, len(res)-1)]
# then check if draw possible
res = [tup[0] for tup in interpreatation if tup[1] == "Draw"]
if res:
return res[randint(0, len(res)-1)]
# last possible
res = [tup[0] for tup in interpreatation if tup[1] == "You Loose"]
if res:
return res[randint(0, len(res)-1)]
return -1
def _startGameLoop(self):
# Game Loop till it ends or crashes
while(self.gameOn):
humanValidPos = False
self._printGameBoard()
pos = input("Enter position for `" + self.humanType + "` : ")
while not humanValidPos:
if not self._validPositionToEnterCoin(pos):
pos = input("Please enter valid position for `" + self.humanType + "` : ")
else:
humanValidPos = True
self.gameBoard[pos] = self.humanType
# Checking If game over
result = self._checkGameOver()
if result[0]:
self._printGameBoard()
if result[1] == 'd':
print "Game Over ! Result => DRAW"
else:
if self.computerType == result[1]:
print "Game Over ! Result => Computer Wins"
else:
print "Game Over ! Result => Human Wins, Actually just writing, even though it will never happen"
return 1
self.outcomes = self._findMove(1, self.computerType)[0]
print
print self.outcomes
self._printPossibilitiesInterpretation(self.computerType)
pos = self._getBestPossibleIndexForPlaying(self.computerType)
self.gameBoard[pos] = self.computerType
# Checking If game over
result = self._checkGameOver()
if result[0]:
self._printGameBoard()
if result[1] == 'd':
print "Game Over ! Result => DRAW"
else:
if self.computerType == result[1]:
print "Game Over ! Result => Computer Wins"
else:
print "Game Over ! Result => Human Wins, Actually just writing, even though it will never happen"
return 1
return 0
def startNewGameOnTerminal(self):
self.gameBoard = ['.'] * 9;
self.computerPlay = input("\nWho starts first ?\n 1: Computer\n 2: Player\n\n ")
if self.computerPlay == 1:
self.computerType = self.coinsType[randint(0,1)] # Random Selection
self.humanType = self._switchPlayerType(self.computerType)
print "\nComputer Selects : ", self.computerType
print "You are now : ", self.humanType
# Computer Making First Move
firstPossibilities = [0, 2, 4, 6, 8] #best possible moves, though all are equal if played optimally.
pos = firstPossibilities[randint(0, len(firstPossibilities)-1)]
self.gameBoard[pos] = self.computerType
else:
self.humanType = raw_input("Choose your coin type [`x` / `o`]: ")
self.computerType = self._switchPlayerType(self.humanType)
print "\nComputer is : ", self.computerType
print "You are : ", self.humanType
self._startGameLoop()
return
def robotsNextMove(self, humanMove):
# while not humanValidPos:
# if not self._validPositionToEnterCoin(pos):
# pos = input("Please enter valid position for `" + self.humanType + "` : ")
# else:
# humanValidPos = True
response = ""
returnMove = -1
self.gameBoard[humanMove] = self.humanType
self._printGameBoard()
# Checking If game over
result = self._checkGameOver()
if result[0]:
if result[1] == 'd':
print "Game Over ! Result => DRAW"
response = "DR"
else:
if self.computerType == result[1]:
print "Game Over ! Result => Computer Wins"
response = "CW"
else:
print "Game Over ! Result => Human Wins, Actually just writing, even though it will never happen"
response = "HW"
return (True, returnMove, response)
self.outcomes = self._findMove(1, self.computerType)[0]
print
print self.outcomes
self._printPossibilitiesInterpretation(self.computerType)
pos = self._getBestPossibleIndexForPlaying(self.computerType)
returnMove = pos
self.gameBoard[pos] = self.computerType
self._printGameBoard()
# Checking If game over
result = self._checkGameOver()
if result[0]:
if result[1] == 'd':
print "Game Over ! Result => DRAW"
response = "DR"
else:
if self.computerType == result[1]:
print "Game Over ! Result => Computer Wins"
response = "CW"
else:
print "Game Over ! Result => Human Wins, Actually just writing, even though it will never happen"
response = "HW"
return (True, returnMove, response)
return (False, returnMove, response)
def startNewGameWithRobot(self, playerOne, playerType):
self.gameBoard = ['.'] * 9
pos = -1
if playerOne == "COMPUTER":
self.computerType = self.coinsType[randint(0,1)] # Random Selection
self.humanType = self._switchPlayerType(self.computerType)
print "\nComputer Selects : ", self.computerType
print "You are now : ", self.humanType
# Computer Making First Move
firstPossibilities = [0, 2, 4, 6, 8] #best possible moves, though all are equal if played optimally.
pos = firstPossibilities[randint(0, len(firstPossibilities)-1)]
self.gameBoard[pos] = self.computerType
self._printGameBoard()
if playerOne == "HUMAN":
self.humanType = playerType
self.computerType = self._switchPlayerType(self.humanType)
print "\nComputer is : ", self.computerType
print "You are : ", self.humanType
self._printGameBoard()
dic = {}
dic["computerMove"] = pos
dic["computerType"] = self.computerType
dic["humanType"] = self.humanType
return dic
def run(self):
self.startNewGameOnTerminal()
return
if __name__ == "__main__":
game = TicTacToe()
game.run()
|
# -*- coding: utf-8 -*-
import sys
from .error import Invalid
import warnings
import logging, inspect
log = logging.getLogger(__name__)
_python3 = sys.version_info[0]>=3
class PASS:
pass
class __MISSING__( str ):
def __str__(self):
return ''
__unicode__ = __str__
MISSING = __MISSING__()
def _append_list(klass, key, data):
key = "__%s__" % key
setattr\
( klass
, key
, list(getattr(klass,key,[])) + list(data)
)
def _merge_dict(klass, key, data):
log.debug('merge dict %s in %s' % ( key, klass ))
fields = dict(getattr\
( klass
, '__%s__' % key
, {}
))
fields.update( data )
setattr\
( klass
, "__%s__" % key
, fields
)
log.debug('%s.__%s__ = %s' % ( klass, key, getattr( klass, key ) ))
def _merge_fields(klass, key, fields):
if (len(fields)%2 != 0) or (len(fields)<2):
raise SyntaxError("Invalid number of fields supplied (%s). Use: %s(key, value, key, value, …)" % (len(fields),key))
prev_fields = getattr\
( klass
, '__%s__' % key
, []
)
newfields = list(prev_fields)
field_index = {}
pos = 0
for (name, value) in prev_fields:
field_index[name] = pos
pos+=1
pos = 0
for value in fields:
if pos%2 != 0:
if name in field_index:
newfields[field_index[name]] = (name,value)
else:
newfields.append((name,value))
else:
name = value
pos += 1
setattr\
( klass
, '__%s__' % key
, newfields
)
def _callback(klass):
advice_data = klass.__dict__['__advice_data__']
for key,(data, callback) in advice_data.items():
callback( klass, key, data)
del klass.__advice_data__
return klass
def defaultErrorFormatter( context, error ):
return error.message % error.extra
# TODO: don't set context.value to None when in fact "" is given
class Context( dict ):
__value__ = MISSING
__error__ = MISSING
__result__ = MISSING
parent = None
root = None
key = '/'
isValidated = False
isValidating = False
taggedValidators = {}
indexKeyRelation = {}
numValues = 0
def __init__(self, validator=None, value=MISSING, key='/', parent=None):
if parent is not None:
self.parent = parent
self.root = parent.root
self.key = key
sep = self.root is not parent and '.' or ''
self['path'] = '%s%s%s' % (parent.path,sep,key)
else:
self.root = self
self.errorFormatter = defaultErrorFormatter
self['path'] = key
self.validator = validator
self.value = value
@property
def path(self):
return self['path']
@property
def childs(self):
warnings.warn("Context.childs is deprecated. Please context.children instead", DeprecationWarning, stacklevel=2)
return self.children
@property
def children(self):
children = self.get('children',None)
if children is None:
children = self[ 'children' ] = {}
return children
@property
def errorlist(self):
errorlist = self.get('errorlist',None)
if errorlist is None:
errorlist = self[ 'errorlist' ] = []
return errorlist
@property
def updates(self):
updates = self.get('updates',None)
if updates is None:
updates = self[ 'updates' ] = []
return updates
@property
def value(self):
return self.get('value',self.__value__)
@value.setter
def value( self, value):
if value is self.value:
return
if self.root.isValidating:
self['value'] = value
self.root.updates.append( self.path )
return
if (value == '') or value is [] or value is {}:
value = None
self.__value__ = value
self.clear()
@property
def result(self):
return self.validate()
@property
def error(self):
return self.__error__.__unicode__()
@error.setter
def error( self, error ):
self.__error__ = error
error.context = self
message = error.validator.__messages__[error.key]
if message is not None:
extra = error.data['extra']
value = error.value
data = error.data
data['message'] = message
if hasattr(error,'realkey'):
data['key'] = error.realkey
extra['value.type'] = getattr(value, '__class__', None) is not None \
and getattr(value.__class__,'__name__', False) or 'unknown'
value_str = value
decode_utf8 = False
if not isinstance(value, str):
if _python3 and isinstance( value, bytes ):
decode_utf8 = True
else:
try:
value_str = str(value)
except:
value_str = ''
elif not _python3:
decode_utf8 = True
if decode_utf8:
try:
value_str = value.decode('utf-8')
except UnicodeDecodeError:
value_str = ''
extra['value'] = value_str
cache = getattr( self, 'cache', None)
if cache is not None:
extra.update( cache )
self['error'] = self.__error__.data
if self.__error__.context.path not in self.root.errorlist:
self.root.errorlist.append( self.__error__.context.path )
@property
def validator(self):
if not hasattr(self, '__validator__'):
return None
return self.__validator__
@validator.setter
def validator(self,value):
self.__validator__ = value
self.clear()
def setIndexFunc( self, func ):
self.__result__ = MISSING
self.__error__ = MISSING
self.indexKeyRelation = {}
if func is not None:
self.indexFunc = func
elif hasattr( self, 'indexFunc'):
del self.indexFunc
def getKeyByIndex( self, index ):
key = self.indexKeyRelation.get( index, None )
if key is not None:
return key
indexFunc = getattr(self,'indexFunc',None)
if indexFunc:
if not self.indexKeyRelation:
self.numValues = len(self.children)
self.indexKeyRelation[ index ] = indexFunc( index )
return self.indexKeyRelation[ index ]
else:
raise SyntaxError('Context %s has no children supporting indexing' % self.path)
def clear( self, force=False ):
if not self.isValidated and not force:
return
dict.clear( self )
if self.parent is not None and self.parent.path:
self['path'] = '%s.%s' % (self.parent.path,self.key)
else:
self['path'] = self.key
self.isValidated = False
self.__result__ = MISSING
self.__error__ = MISSING
def validate( self ):
if self.isValidated:
if self.__error__ is not MISSING:
raise self.__error__
return self.__result__
self.isValidating = True
if self.parent is not None:
if not self.parent.isValidated and not self.parent.isValidating:
self.parent.validate()
if self.validator is None:
raise AttributeError("No validator set for context '%s'" % self.path )
try:
result = self.validator.validate( self, self.__value__)
except Invalid as e:
self.error = e
raise e
else:
if result is not PASS:
self.__result__ = result
else:
self.__result__ = self.__value__
return self.__result__
finally:
self.isValidated = True
self.isValidating = False
def __call__( self, path ):
if path.__class__ is int:
if path < 0:
path = self.numValues+path
return self( self.getKeyByIndex( path ) )
elif not path:
raise SyntaxError('Path cannot be empty')
path = path.split('.',1)
try:
child = self.children[path[0]]
except KeyError:
child = self.children[path[0]] = Context( key=path[0], parent=self )
if len(path) == 1:
return child
else:
path=path[1]
return child(path)
from .util import varargs2kwargs
# Some kind of 'clonable' object -
# we reinitialize child objects with inherited kwargs merged with new ones.
# This allows us to alter just a few specific parameters in child objects.
# without the need for implementors of validators to provide setters or too
# much specification for their attributes or how they are provided.
# * the setParameters function will be inspected, so that it will use
# named parameters as kwargs, regardless if they are provided as *args
# ( you cannot use *varargs in setParameters )
# * you can also define a setArguments function, which will be called before
# setParameters, using the provided *varargs. keywords defined in
# setParameters will not be moved from *args to **kwargs when setArguments
# is defined. You can use it for attributes you only want to initialize
# once. It also allows you to 'name' *varargs in the function definition.
# * __inherit__ specifies what attributes should be copied to child instances.
class Parameterized:
__kwargs__ = {}
__inherit__ = [ ]
__isRoot__ = True
__ignoreClassParameters__ = []
def __init__( self, *args, **kwargs ):
parent = kwargs.pop( '_parent', None )
if not hasattr( self, 'setArguments') and args:
func = getattr( self, 'setParameters', None)
if func is not None:
( args, kwargs, shifted ) = varargs2kwargs( func, args, kwargs )
if parent is not None:
self.__isRoot__ = False
newkwargs = dict(parent.__kwargs__ )
newkwargs.update(kwargs)
kwargs = newkwargs
for key in self.__inherit__:
setattr(self, key, getattr(parent, key))
else:
for key in self.__getParameterNames__():
if hasattr(self.__class__,key)\
and not key in self.__ignoreClassParameters__\
and not key in kwargs:
kwargs[key] = getattr(self.__class__, key)
if args or (parent is None):
if hasattr( self, 'setArguments' ):
self.setArguments( *args )
elif args:
raise SyntaxError('%s takes no further arguments' % self.__class__.__name__)
if hasattr( self, 'setParameters' ):
try:
self.setParameters( **kwargs )
except TypeError as e:
raise TypeError(self.__class__.__name__+': '+e[0])
elif kwargs:
raise SyntaxError('%s takes no parameters' % self.__class__.__name__)
self.__kwargs__ = kwargs
def __call__( self, *args, **kwargs):
kwargs['_parent'] = self
return self.__class__( *args, **kwargs )
@classmethod
def __getParameterNames__( cls ):
if not hasattr( cls, '__parameterNames__'):
if not hasattr( cls, 'setParameters'):
names = ()
else:
spec = inspect.getargspec( cls.setParameters )
if spec.varargs:
raise SyntaxError('Cannot use *varargs in setParameters, please use %s.setArguments' % cls.__name__)
names = spec.args[1:]
setattr\
( cls,'__parameterNames__'
, names
)
return cls.__parameterNames__
def inherit( *members ):
def decorate( klass ):
klassInherits = list(getattr(klass,'__inherit__', [] ))
klassInherits += members
setattr( klass, '__inherit__', klassInherits )
return klass
return decorate
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
var1 = 'Hello World!'
var2 = "Python Runoob"
print "var1[0]: ", var1[0]
print "var2[1:5]: ", var2[1:5]
print "更新字符串 :- ", var1[:6] + 'Runoob!'
a = "Hello"
b = "Python"
print "a + b 输出结果:", a + b
print "a * 2 输出结果:", a * 2
print "a[1] 输出结果:", a[1]
print "a[1:4] 输出结果:", a[1:4]
if( "H" in a) :
print "H 在变量 a 中"
else :
print "H 不在变量 a 中"
if( "M" not in a) :
print "M 不在变量 a 中"
else :
print "M 在变量 a 中"
print r'\n'
#print R'\n' #报错,不知道为什么
#Python 字符串格式化
print "My name is %s and weight is %d kg!" % ('Zara', 21)
hi = '''hi
there'''
print hi # repr()
print hi # str()
errHTML = '''
<HTML><HEAD><TITLE>
Friends CGI Demo</TITLE></HEAD>
<BODY><H3>ERROR</H3>
<B>%s</B><P>
<FORM><INPUT TYPE=button VALUE=Back
ONCLICK="window.history.back()"></FORM>
</BODY></HTML>
'''
print errHTML
"""
#python中cursor操作数据库
import MySQLdb
cursor.execute('''
CREATE TABLE users (
login VARCHAR(8),
uid INTEGER,
prid INTEGER)
''')
"""
#Unicode 字符串
#Python 中定义一个 Unicode 字符串和定义一个普通字符串一样简单:
#引号前小写的"u"表示这里创建的是一个 Unicode 字符串。如果你想加入一个特殊字符,可以使用 Python 的 Unicode-Escape 编码。如下例所示:
#被替换的 \u0020 标识表示在给定位置插入编码值为 0x0020 的 Unicode 字符(空格符)。
print u'Hello World !'
print u'Hello\u0020World !'
|
# -*- coding: utf-8 -*- #
# Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for the Tag Manager server."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.api_lib.util import apis
TAGS_API_VERSION = 'v3'
def TagClient():
"""Returns a client instance of the CRM Tags service."""
return apis.GetClientInstance('cloudresourcemanager', TAGS_API_VERSION)
def TagMessages():
"""Returns the messages module for the Tags service."""
return apis.GetMessagesModule('cloudresourcemanager', TAGS_API_VERSION)
def TagKeysService():
"""Returns the tag keys service class."""
client = TagClient()
return client.tagKeys
def TagValuesService():
"""Returns the tag values service class."""
client = TagClient()
return client.tagValues
def TagBindingsService():
"""Returns the tag bindings service class."""
client = TagClient()
return client.tagBindings
def TagHoldsService():
"""Returns the tag holds service class."""
client = TagClient()
return client.tagValues_tagHolds
def OperationsService():
"""Returns the operations service class."""
client = TagClient()
return client.operations
|
import difflib
import discord
import pandas as pd
from gamestonk_terminal.stocks.screener.finviz_model import get_screener_data
import discordbot.config_discordbot as cfg
from discordbot.helpers import pagination
from discordbot.stocks.screener import screener_options as so
async def ownership_command(ctx, preset="template", sort="", limit="5", ascend="False"):
"""Displays stocks based on own share float and ownership data [Finviz]"""
try:
# Check for argument
if preset == "template" or preset not in so.all_presets:
raise Exception("Invalid preset selected!")
# Debug
if cfg.DEBUG:
print(f"!stocks.scr.ownership {preset} {sort} {limit} {ascend}")
# Check for argument
if not limit.lstrip("-").isnumeric():
raise Exception("Number has to be an integer")
limit = int(limit)
if limit < 0:
raise Exception("Number has to be above 0")
if ascend.lower() == "false":
ascend = False
elif ascend.lower() == "true":
ascend = True
else:
raise Exception("ascend argument has to be true or false")
# Output Data
df_screen = get_screener_data(
preset,
"ownership",
limit,
ascend,
)
description = ""
if isinstance(df_screen, pd.DataFrame):
if df_screen.empty:
return []
df_screen = df_screen.dropna(axis="columns", how="all")
if sort:
if " ".join(sort) in so.d_cols_to_sort["ownership"]:
df_screen = df_screen.sort_values(
by=[" ".join(sort)],
ascending=ascend,
na_position="last",
)
else:
similar_cmd = difflib.get_close_matches(
" ".join(sort),
so.d_cols_to_sort["ownership"],
n=1,
cutoff=0.7,
)
if similar_cmd:
description = f"Replacing '{' '.join(sort)}' by '{similar_cmd[0]}' so table can be sorted.\n\n"
df_screen = df_screen.sort_values(
by=[similar_cmd[0]],
ascending=ascend,
na_position="last",
)
else:
raise ValueError(
f"Wrong sort column provided! Select from: {', '.join(so.d_cols_to_sort['ownership'])}"
)
df_screen = df_screen.fillna("")
future_column_name = df_screen["Ticker"]
df_screen = df_screen.head(n=limit).transpose()
df_screen.columns = future_column_name
df_screen.drop("Ticker")
columns = []
initial_str = description + "Page 0: Overview"
i = 1
for column in df_screen.columns.values:
initial_str = initial_str + "\nPage " + str(i) + ": " + column
i += 1
columns.append(
discord.Embed(
title="Stocks: [Finviz] Ownership Screener",
description=initial_str,
colour=cfg.COLOR,
).set_author(
name=cfg.AUTHOR_NAME,
icon_url=cfg.AUTHOR_ICON_URL,
)
)
for column in df_screen.columns.values:
columns.append(
discord.Embed(
title="Stocks: [Finviz] Ownership Screener",
description="```"
+ df_screen[column].fillna("").to_string()
+ "```",
colour=cfg.COLOR,
).set_author(
name=cfg.AUTHOR_NAME,
icon_url=cfg.AUTHOR_ICON_URL,
)
)
await pagination(columns, ctx)
except Exception as e:
embed = discord.Embed(
title="ERROR Stocks: [Finviz] Ownership Screener",
colour=cfg.COLOR,
description=e,
)
embed.set_author(
name=cfg.AUTHOR_NAME,
icon_url=cfg.AUTHOR_ICON_URL,
)
await ctx.send(embed=embed)
|
from fontTools.ttLib import newTable
from fontTools.ttLib.tables import otTables as ot
from fontTools.colorLib import builder
from fontTools.colorLib.errors import ColorLibError
import pytest
def test_buildCOLR_v0():
color_layer_lists = {
"a": [("a.color0", 0), ("a.color1", 1)],
"b": [("b.color1", 1), ("b.color0", 0)],
}
colr = builder.buildCOLR(color_layer_lists)
assert colr.tableTag == "COLR"
assert colr.version == 0
assert colr.ColorLayers["a"][0].name == "a.color0"
assert colr.ColorLayers["a"][0].colorID == 0
assert colr.ColorLayers["a"][1].name == "a.color1"
assert colr.ColorLayers["a"][1].colorID == 1
assert colr.ColorLayers["b"][0].name == "b.color1"
assert colr.ColorLayers["b"][0].colorID == 1
assert colr.ColorLayers["b"][1].name == "b.color0"
assert colr.ColorLayers["b"][1].colorID == 0
def test_buildCPAL_v0():
palettes = [
[(0.68, 0.20, 0.32, 1.0), (0.45, 0.68, 0.21, 1.0)],
[(0.68, 0.20, 0.32, 0.6), (0.45, 0.68, 0.21, 0.6)],
[(0.68, 0.20, 0.32, 0.3), (0.45, 0.68, 0.21, 0.3)],
]
cpal = builder.buildCPAL(palettes)
assert cpal.tableTag == "CPAL"
assert cpal.version == 0
assert cpal.numPaletteEntries == 2
assert len(cpal.palettes) == 3
assert [tuple(c) for c in cpal.palettes[0]] == [
(82, 51, 173, 255),
(54, 173, 115, 255),
]
assert [tuple(c) for c in cpal.palettes[1]] == [
(82, 51, 173, 153),
(54, 173, 115, 153),
]
assert [tuple(c) for c in cpal.palettes[2]] == [
(82, 51, 173, 76),
(54, 173, 115, 76),
]
def test_buildCPAL_palettes_different_lengths():
with pytest.raises(ColorLibError, match="have different lengths"):
builder.buildCPAL([[(1, 1, 1, 1)], [(0, 0, 0, 1), (0.5, 0.5, 0.5, 1)]])
def test_buildPaletteLabels():
name_table = newTable("name")
name_table.names = []
name_ids = builder.buildPaletteLabels(
[None, "hi", {"en": "hello", "de": "hallo"}], name_table
)
assert name_ids == [0xFFFF, 256, 257]
assert len(name_table.names) == 3
assert str(name_table.names[0]) == "hi"
assert name_table.names[0].nameID == 256
assert str(name_table.names[1]) == "hallo"
assert name_table.names[1].nameID == 257
assert str(name_table.names[2]) == "hello"
assert name_table.names[2].nameID == 257
def test_build_CPAL_v1_types_no_labels():
palettes = [
[(0.1, 0.2, 0.3, 1.0), (0.4, 0.5, 0.6, 1.0)],
[(0.1, 0.2, 0.3, 0.6), (0.4, 0.5, 0.6, 0.6)],
[(0.1, 0.2, 0.3, 0.3), (0.4, 0.5, 0.6, 0.3)],
]
paletteTypes = [
builder.ColorPaletteType.USABLE_WITH_LIGHT_BACKGROUND,
builder.ColorPaletteType.USABLE_WITH_DARK_BACKGROUND,
builder.ColorPaletteType.USABLE_WITH_LIGHT_BACKGROUND
| builder.ColorPaletteType.USABLE_WITH_DARK_BACKGROUND,
]
cpal = builder.buildCPAL(palettes, paletteTypes=paletteTypes)
assert cpal.tableTag == "CPAL"
assert cpal.version == 1
assert cpal.numPaletteEntries == 2
assert len(cpal.palettes) == 3
assert cpal.paletteTypes == paletteTypes
assert cpal.paletteLabels == [cpal.NO_NAME_ID] * len(palettes)
assert cpal.paletteEntryLabels == [cpal.NO_NAME_ID] * cpal.numPaletteEntries
def test_build_CPAL_v1_labels():
palettes = [
[(0.1, 0.2, 0.3, 1.0), (0.4, 0.5, 0.6, 1.0)],
[(0.1, 0.2, 0.3, 0.6), (0.4, 0.5, 0.6, 0.6)],
[(0.1, 0.2, 0.3, 0.3), (0.4, 0.5, 0.6, 0.3)],
]
paletteLabels = ["First", {"en": "Second", "it": "Seconda"}, None]
paletteEntryLabels = ["Foo", "Bar"]
with pytest.raises(TypeError, match="nameTable is required"):
builder.buildCPAL(palettes, paletteLabels=paletteLabels)
with pytest.raises(TypeError, match="nameTable is required"):
builder.buildCPAL(palettes, paletteEntryLabels=paletteEntryLabels)
name_table = newTable("name")
name_table.names = []
cpal = builder.buildCPAL(
palettes,
paletteLabels=paletteLabels,
paletteEntryLabels=paletteEntryLabels,
nameTable=name_table,
)
assert cpal.tableTag == "CPAL"
assert cpal.version == 1
assert cpal.numPaletteEntries == 2
assert len(cpal.palettes) == 3
assert cpal.paletteTypes == [cpal.DEFAULT_PALETTE_TYPE] * len(palettes)
assert cpal.paletteLabels == [256, 257, cpal.NO_NAME_ID]
assert cpal.paletteEntryLabels == [258, 259]
assert name_table.getDebugName(256) == "First"
assert name_table.getDebugName(257) == "Second"
assert name_table.getDebugName(258) == "Foo"
assert name_table.getDebugName(259) == "Bar"
def test_invalid_ColorPaletteType():
with pytest.raises(ValueError, match="not a valid ColorPaletteType"):
builder.ColorPaletteType(-1)
with pytest.raises(ValueError, match="not a valid ColorPaletteType"):
builder.ColorPaletteType(4)
with pytest.raises(ValueError, match="not a valid ColorPaletteType"):
builder.ColorPaletteType("abc")
def test_buildCPAL_v1_invalid_args_length():
with pytest.raises(ColorLibError, match="Expected 2 paletteTypes, got 1"):
builder.buildCPAL([[(0, 0, 0, 0)], [(1, 1, 1, 1)]], paletteTypes=[1])
with pytest.raises(ColorLibError, match="Expected 2 paletteLabels, got 1"):
builder.buildCPAL(
[[(0, 0, 0, 0)], [(1, 1, 1, 1)]],
paletteLabels=["foo"],
nameTable=newTable("name"),
)
with pytest.raises(ColorLibError, match="Expected 1 paletteEntryLabels, got 0"):
cpal = builder.buildCPAL(
[[(0, 0, 0, 0)], [(1, 1, 1, 1)]],
paletteEntryLabels=[],
nameTable=newTable("name"),
)
def test_buildCPAL_invalid_color():
with pytest.raises(
ColorLibError,
match=r"In palette\[0\]\[1\]: expected \(R, G, B, A\) tuple, got \(1, 1, 1\)",
):
builder.buildCPAL([[(1, 1, 1, 1), (1, 1, 1)]])
with pytest.raises(
ColorLibError,
match=(
r"palette\[1\]\[0\] has invalid out-of-range "
r"\[0..1\] color: \(1, 1, -1, 2\)"
),
):
builder.buildCPAL([[(0, 0, 0, 0)], [(1, 1, -1, 2)]])
def test_buildColor():
c = builder.buildColor(0)
assert c.PaletteIndex == 0
assert c.Transparency.value == 0.0
assert c.Transparency.varIdx == 0
c = builder.buildColor(1, transparency=0.5)
assert c.PaletteIndex == 1
assert c.Transparency.value == 0.5
assert c.Transparency.varIdx == 0
c = builder.buildColor(3, transparency=builder.VariableFloat(0.5, varIdx=2))
assert c.PaletteIndex == 3
assert c.Transparency.value == 0.5
assert c.Transparency.varIdx == 2
def test_buildSolidColorPaint():
p = builder.buildSolidColorPaint(0)
assert p.Format == 1
assert p.Color.PaletteIndex == 0
assert p.Color.Transparency.value == 0.0
assert p.Color.Transparency.varIdx == 0
p = builder.buildSolidColorPaint(1, transparency=0.5)
assert p.Format == 1
assert p.Color.PaletteIndex == 1
assert p.Color.Transparency.value == 0.5
assert p.Color.Transparency.varIdx == 0
p = builder.buildSolidColorPaint(
3, transparency=builder.VariableFloat(0.5, varIdx=2)
)
assert p.Format == 1
assert p.Color.PaletteIndex == 3
assert p.Color.Transparency.value == 0.5
assert p.Color.Transparency.varIdx == 2
def test_buildColorStop():
s = builder.buildColorStop(0.1, 2)
assert s.StopOffset == builder.VariableFloat(0.1)
assert s.Color.PaletteIndex == 2
assert s.Color.Transparency == builder._DEFAULT_TRANSPARENCY
s = builder.buildColorStop(offset=0.2, paletteIndex=3, transparency=0.4)
assert s.StopOffset == builder.VariableFloat(0.2)
assert s.Color == builder.buildColor(3, transparency=0.4)
s = builder.buildColorStop(
offset=builder.VariableFloat(0.0, varIdx=1),
paletteIndex=0,
transparency=builder.VariableFloat(0.3, varIdx=2),
)
assert s.StopOffset == builder.VariableFloat(0.0, varIdx=1)
assert s.Color.PaletteIndex == 0
assert s.Color.Transparency == builder.VariableFloat(0.3, varIdx=2)
def test_buildColorLine():
stops = [(0.0, 0), (0.5, 1), (1.0, 2)]
cline = builder.buildColorLine(stops)
assert cline.Extend == builder.ExtendMode.PAD
assert cline.StopCount == 3
assert [
(cs.StopOffset.value, cs.Color.PaletteIndex) for cs in cline.ColorStop
] == stops
cline = builder.buildColorLine(stops, extend="pad")
assert cline.Extend == builder.ExtendMode.PAD
cline = builder.buildColorLine(stops, extend=builder.ExtendMode.REPEAT)
assert cline.Extend == builder.ExtendMode.REPEAT
cline = builder.buildColorLine(stops, extend=builder.ExtendMode.REFLECT)
assert cline.Extend == builder.ExtendMode.REFLECT
cline = builder.buildColorLine([builder.buildColorStop(*s) for s in stops])
assert [
(cs.StopOffset.value, cs.Color.PaletteIndex) for cs in cline.ColorStop
] == stops
stops = [
{"offset": (0.0, 1), "paletteIndex": 0, "transparency": (0.5, 2)},
{"offset": (1.0, 3), "paletteIndex": 1, "transparency": (0.3, 4)},
]
cline = builder.buildColorLine(stops)
assert [
{
"offset": cs.StopOffset,
"paletteIndex": cs.Color.PaletteIndex,
"transparency": cs.Color.Transparency,
}
for cs in cline.ColorStop
] == stops
def test_buildPoint():
pt = builder.buildPoint(0, 1)
assert pt.x == builder.VariableInt(0)
assert pt.y == builder.VariableInt(1)
pt = builder.buildPoint(
builder.VariableInt(2, varIdx=1), builder.VariableInt(3, varIdx=2)
)
assert pt.x == builder.VariableInt(2, varIdx=1)
assert pt.y == builder.VariableInt(3, varIdx=2)
# float coords are rounded
pt = builder.buildPoint(x=-2.5, y=3.5)
assert pt.x == builder.VariableInt(-2)
assert pt.y == builder.VariableInt(4)
# tuple args are cast to VariableInt namedtuple
pt = builder.buildPoint((1, 2), (3, 4))
assert pt.x == builder.VariableInt(1, varIdx=2)
assert pt.y == builder.VariableInt(3, varIdx=4)
def test_buildAffine2x2():
matrix = builder.buildAffine2x2(1.5, 0, 0.5, 2.0)
assert matrix.xx == builder.VariableFloat(1.5)
assert matrix.xy == builder.VariableFloat(0.0)
assert matrix.yx == builder.VariableFloat(0.5)
assert matrix.yy == builder.VariableFloat(2.0)
def test_buildLinearGradientPaint():
color_stops = [
builder.buildColorStop(0.0, 0),
builder.buildColorStop(0.5, 1),
builder.buildColorStop(1.0, 2, transparency=0.8),
]
color_line = builder.buildColorLine(color_stops, extend=builder.ExtendMode.REPEAT)
p0 = builder.buildPoint(x=100, y=200)
p1 = builder.buildPoint(x=150, y=250)
gradient = builder.buildLinearGradientPaint(color_line, p0, p1)
assert gradient.Format == 2
assert gradient.ColorLine == color_line
assert gradient.p0 == p0
assert gradient.p1 == p1
assert gradient.p2 == gradient.p1
assert gradient.p2 is not gradient.p1
gradient = builder.buildLinearGradientPaint({"stops": color_stops}, p0, p1)
assert gradient.ColorLine.Extend == builder.ExtendMode.PAD
assert gradient.ColorLine.ColorStop == color_stops
gradient = builder.buildLinearGradientPaint(color_line, p0, p1, p2=(150, 230))
assert gradient.p2 == builder.buildPoint(x=150, y=230)
assert gradient.p2 != gradient.p1
def test_buildRadialGradientPaint():
color_stops = [
builder.buildColorStop(0.0, 0),
builder.buildColorStop(0.5, 1),
builder.buildColorStop(1.0, 2, transparency=0.8),
]
color_line = builder.buildColorLine(color_stops, extend=builder.ExtendMode.REPEAT)
c0 = builder.buildPoint(x=100, y=200)
c1 = builder.buildPoint(x=150, y=250)
r0 = builder.VariableInt(10)
r1 = builder.VariableInt(5)
gradient = builder.buildRadialGradientPaint(color_line, c0, c1, r0, r1)
assert gradient.Format == 3
assert gradient.ColorLine == color_line
assert gradient.c0 == c0
assert gradient.c1 == c1
assert gradient.r0 == r0
assert gradient.r1 == r1
assert gradient.Affine is None
gradient = builder.buildRadialGradientPaint({"stops": color_stops}, c0, c1, r0, r1)
assert gradient.ColorLine.Extend == builder.ExtendMode.PAD
assert gradient.ColorLine.ColorStop == color_stops
matrix = builder.buildAffine2x2(2.0, 0.0, 0.0, 2.0)
gradient = builder.buildRadialGradientPaint(
color_line, c0, c1, r0, r1, affine=matrix
)
assert gradient.Affine == matrix
gradient = builder.buildRadialGradientPaint(
color_line, c0, c1, r0, r1, affine=(2.0, 0.0, 0.0, 2.0)
)
assert gradient.Affine == matrix
def test_buildLayerV1Record():
layer = builder.buildLayerV1Record("a", 2)
assert layer.LayerGlyph == "a"
assert layer.Paint.Format == 1
assert layer.Paint.Color.PaletteIndex == 2
layer = builder.buildLayerV1Record("a", builder.buildSolidColorPaint(3, 0.9))
assert layer.Paint.Format == 1
assert layer.Paint.Color.PaletteIndex == 3
assert layer.Paint.Color.Transparency.value == 0.9
layer = builder.buildLayerV1Record(
"a",
builder.buildLinearGradientPaint(
{"stops": [(0.0, 3), (1.0, 4)]}, (100, 200), (150, 250)
),
)
assert layer.Paint.Format == 2
assert layer.Paint.ColorLine.ColorStop[0].StopOffset.value == 0.0
assert layer.Paint.ColorLine.ColorStop[0].Color.PaletteIndex == 3
assert layer.Paint.ColorLine.ColorStop[1].StopOffset.value == 1.0
assert layer.Paint.ColorLine.ColorStop[1].Color.PaletteIndex == 4
assert layer.Paint.p0.x.value == 100
assert layer.Paint.p0.y.value == 200
assert layer.Paint.p1.x.value == 150
assert layer.Paint.p1.y.value == 250
layer = builder.buildLayerV1Record(
"a",
builder.buildRadialGradientPaint(
{
"stops": [
(0.0, 5),
{"offset": 0.5, "paletteIndex": 6, "transparency": 0.8},
(1.0, 7),
]
},
(50, 50),
(75, 75),
30,
10,
),
)
assert layer.Paint.Format == 3
assert layer.Paint.ColorLine.ColorStop[0].StopOffset.value == 0.0
assert layer.Paint.ColorLine.ColorStop[0].Color.PaletteIndex == 5
assert layer.Paint.ColorLine.ColorStop[1].StopOffset.value == 0.5
assert layer.Paint.ColorLine.ColorStop[1].Color.PaletteIndex == 6
assert layer.Paint.ColorLine.ColorStop[1].Color.Transparency.value == 0.8
assert layer.Paint.ColorLine.ColorStop[2].StopOffset.value == 1.0
assert layer.Paint.ColorLine.ColorStop[2].Color.PaletteIndex == 7
assert layer.Paint.c0.x.value == 50
assert layer.Paint.c0.y.value == 50
assert layer.Paint.c1.x.value == 75
assert layer.Paint.c1.y.value == 75
assert layer.Paint.r0.value == 30
assert layer.Paint.r1.value == 10
def test_buildLayerV1Record_from_dict():
layer = builder.buildLayerV1Record("a", {"format": 1, "paletteIndex": 0})
assert layer.LayerGlyph == "a"
assert layer.Paint.Format == 1
assert layer.Paint.Color.PaletteIndex == 0
layer = builder.buildLayerV1Record(
"a",
{
"format": 2,
"colorLine": {"stops": [(0.0, 0), (1.0, 1)]},
"p0": (0, 0),
"p1": (10, 10),
},
)
assert layer.Paint.Format == 2
assert layer.Paint.ColorLine.ColorStop[0].StopOffset.value == 0.0
layer = builder.buildLayerV1Record(
"a",
{
"format": 3,
"colorLine": {"stops": [(0.0, 0), (1.0, 1)]},
"c0": (0, 0),
"c1": (10, 10),
"r0": 4,
"r1": 0,
},
)
assert layer.Paint.Format == 3
assert layer.Paint.r0.value == 4
def test_buildLayerV1Array():
layers = [
("a", 1),
("b", {"format": 1, "paletteIndex": 2, "transparency": 0.5}),
(
"c",
{
"format": 2,
"colorLine": {"stops": [(0.0, 3), (1.0, 4)], "extend": "repeat"},
"p0": (100, 200),
"p1": (150, 250),
},
),
(
"d",
{
"format": 3,
"colorLine": {
"stops": [
{"offset": 0.0, "paletteIndex": 5},
{"offset": 0.5, "paletteIndex": 6, "transparency": 0.8},
{"offset": 1.0, "paletteIndex": 7},
]
},
"c0": (50, 50),
"c1": (75, 75),
"r0": 30,
"r1": 10,
},
),
builder.buildLayerV1Record("e", builder.buildSolidColorPaint(8)),
]
layersArray = builder.buildLayerV1Array(layers)
assert layersArray.LayerCount == len(layersArray.LayerV1Record)
assert all(isinstance(l, ot.LayerV1Record) for l in layersArray.LayerV1Record)
def test_buildBaseGlyphV1Record():
baseGlyphRec = builder.buildBaseGlyphV1Record("a", [("b", 0), ("c", 1)])
assert baseGlyphRec.BaseGlyph == "a"
assert isinstance(baseGlyphRec.LayerV1Array, ot.LayerV1Array)
layerArray = builder.buildLayerV1Array([("b", 0), ("c", 1)])
baseGlyphRec = builder.buildBaseGlyphV1Record("a", layerArray)
assert baseGlyphRec.BaseGlyph == "a"
assert baseGlyphRec.LayerV1Array == layerArray
def test_buildBaseGlyphV1Array():
colorGlyphs = {
"a": [("b", 0), ("c", 1)],
"d": [
("e", {"format": 1, "paletteIndex": 2, "transparency": 0.8}),
(
"f",
{
"format": 3,
"colorLine": {"stops": [(0.0, 3), (1.0, 4)], "extend": "reflect"},
"c0": (0, 0),
"c1": (0, 0),
"r0": 10,
"r1": 0,
},
),
],
"g": builder.buildLayerV1Array([("h", 5)]),
}
glyphMap = {
".notdef": 0,
"a": 4,
"b": 3,
"c": 2,
"d": 1,
"e": 5,
"f": 6,
"g": 7,
"h": 8,
}
baseGlyphArray = builder.buildBaseGlyphV1Array(colorGlyphs, glyphMap)
assert baseGlyphArray.BaseGlyphCount == len(colorGlyphs)
assert baseGlyphArray.BaseGlyphV1Record[0].BaseGlyph == "d"
assert baseGlyphArray.BaseGlyphV1Record[1].BaseGlyph == "a"
assert baseGlyphArray.BaseGlyphV1Record[2].BaseGlyph == "g"
baseGlyphArray = builder.buildBaseGlyphV1Array(colorGlyphs)
assert baseGlyphArray.BaseGlyphCount == len(colorGlyphs)
assert baseGlyphArray.BaseGlyphV1Record[0].BaseGlyph == "a"
assert baseGlyphArray.BaseGlyphV1Record[1].BaseGlyph == "d"
assert baseGlyphArray.BaseGlyphV1Record[2].BaseGlyph == "g"
def test_splitSolidAndGradientGlyphs():
colorGlyphs = {
"a": [
("b", 0),
("c", 1),
("d", {"format": 1, "paletteIndex": 2}),
("e", builder.buildSolidColorPaint(paletteIndex=3)),
]
}
colorGlyphsV0, colorGlyphsV1 = builder._splitSolidAndGradientGlyphs(colorGlyphs)
assert colorGlyphsV0 == {"a": [("b", 0), ("c", 1), ("d", 2), ("e", 3)]}
assert not colorGlyphsV1
colorGlyphs = {
"a": [("b", builder.buildSolidColorPaint(paletteIndex=0, transparency=1.0))]
}
colorGlyphsV0, colorGlyphsV1 = builder._splitSolidAndGradientGlyphs(colorGlyphs)
assert not colorGlyphsV0
assert colorGlyphsV1 == colorGlyphs
colorGlyphs = {
"a": [("b", 0)],
"c": [
("d", 1),
(
"e",
{
"format": 2,
"colorLine": {"stops": [(0.0, 2), (1.0, 3)]},
"p0": (0, 0),
"p1": (10, 10),
},
),
],
}
colorGlyphsV0, colorGlyphsV1 = builder._splitSolidAndGradientGlyphs(colorGlyphs)
assert colorGlyphsV0 == {"a": [("b", 0)]}
assert "a" not in colorGlyphsV1
assert "c" in colorGlyphsV1
assert len(colorGlyphsV1["c"]) == 2
layer_d = colorGlyphsV1["c"][0]
assert layer_d[0] == "d"
assert isinstance(layer_d[1], ot.Paint)
assert layer_d[1].Format == 1
layer_e = colorGlyphsV1["c"][1]
assert layer_e[0] == "e"
assert isinstance(layer_e[1], ot.Paint)
assert layer_e[1].Format == 2
class BuildCOLRTest(object):
def test_automatic_version_all_solid_color_glyphs(self):
colr = builder.buildCOLR({"a": [("b", 0), ("c", 1)]})
assert colr.version == 0
assert hasattr(colr, "ColorLayers")
assert colr.ColorLayers["a"][0].name == "b"
assert colr.ColorLayers["a"][1].name == "c"
def test_automatic_version_no_solid_color_glyphs(self):
colr = builder.buildCOLR(
{
"a": [
(
"b",
{
"format": 3,
"colorLine": {
"stops": [(0.0, 0), (1.0, 1)],
"extend": "repeat",
},
"c0": (1, 0),
"c1": (10, 0),
"r0": 4,
"r1": 2,
},
),
("c", {"format": 1, "paletteIndex": 2, "transparency": 0.8}),
],
"d": [
(
"e",
{
"format": 2,
"colorLine": {
"stops": [(0.0, 2), (1.0, 3)],
"extend": "reflect",
},
"p0": (1, 2),
"p1": (3, 4),
"p2": (2, 2),
},
)
],
}
)
assert colr.version == 1
assert not hasattr(colr, "ColorLayers")
assert hasattr(colr, "table")
assert isinstance(colr.table, ot.COLR)
assert colr.table.BaseGlyphRecordCount == 0
assert colr.table.BaseGlyphRecordArray is None
assert colr.table.LayerRecordCount == 0
assert colr.table.LayerRecordArray is None
def test_automatic_version_mixed_solid_and_gradient_glyphs(self):
colr = builder.buildCOLR(
{
"a": [("b", 0), ("c", 1)],
"d": [
(
"e",
{
"format": 2,
"colorLine": {"stops": [(0.0, 2), (1.0, 3)]},
"p0": (1, 2),
"p1": (3, 4),
"p2": (2, 2),
},
)
],
}
)
assert colr.version == 1
assert not hasattr(colr, "ColorLayers")
assert hasattr(colr, "table")
assert isinstance(colr.table, ot.COLR)
assert colr.table.VarStore is None
assert colr.table.BaseGlyphRecordCount == 1
assert isinstance(colr.table.BaseGlyphRecordArray, ot.BaseGlyphRecordArray)
assert colr.table.LayerRecordCount == 2
assert isinstance(colr.table.LayerRecordArray, ot.LayerRecordArray)
assert isinstance(colr.table.BaseGlyphV1Array, ot.BaseGlyphV1Array)
assert colr.table.BaseGlyphV1Array.BaseGlyphCount == 1
assert isinstance(
colr.table.BaseGlyphV1Array.BaseGlyphV1Record[0], ot.BaseGlyphV1Record
)
assert colr.table.BaseGlyphV1Array.BaseGlyphV1Record[0].BaseGlyph == "d"
assert isinstance(
colr.table.BaseGlyphV1Array.BaseGlyphV1Record[0].LayerV1Array,
ot.LayerV1Array,
)
assert (
colr.table.BaseGlyphV1Array.BaseGlyphV1Record[0]
.LayerV1Array.LayerV1Record[0]
.LayerGlyph
== "e"
)
def test_explicit_version_0(self):
colr = builder.buildCOLR({"a": [("b", 0), ("c", 1)]}, version=0)
assert colr.version == 0
assert hasattr(colr, "ColorLayers")
def test_explicit_version_1(self):
colr = builder.buildCOLR({"a": [("b", 0), ("c", 1)]}, version=1)
assert colr.version == 1
assert not hasattr(colr, "ColorLayers")
assert hasattr(colr, "table")
assert isinstance(colr.table, ot.COLR)
assert colr.table.VarStore is None
|
import threading
import logging
from defensics.coap_proxy import CoapProxy
from defensics.tcp_server import TCPServer
class DataHandler(threading.Thread):
def __init__(self, proxy: CoapProxy, tcp_server: TCPServer):
super().__init__()
self.proxy = proxy
self.tcp_server = tcp_server
logging.debug('local proxy: ' + str(self.proxy) + 'global proxy: ' + str(proxy))
def run(self):
rc = 1
while rc != 0:
rc = self.proxy.run()
logging.debug('Proxy retry')
logging.debug(str(self.proxy.device_iface) + str(self.proxy.req_char_iface) + str(self.proxy.rsp_char_iface))
self.tcp_server.start()
for data in self.tcp_server.recv():
if self.proxy.is_ready():
try:
rsp = self.proxy.send(data)
self.tcp_server.send(rsp)
except TimeoutError:
logging.debug("Response timeout!")
# because proxy isn't run in separate process, it can easily be restarted
self.proxy.run()
logging.debug("Reconnecting")
else:
logging.error('proxy not ready')
self.proxy.run()
|
# This is a comment
import os
import sys
from PySide2.QtWidgets import QApplication, QMainWindow
from PySide2.QtCore import QFile
from PySide2.Qt3DExtras import Qt3DExtras
from PySide2.Qt3DCore import Qt3DCore
from PySide2.QtGui import QVector3D, QColor
from PySide2.Qt3DRender import Qt3DRender
from ui_mainwindow import Ui_MainWindow
from PySide2.QtCore import QObject, Signal, Slot
import numpy as np
class MainWindow(QMainWindow):
def __init__(self, simulator):
super(MainWindow, self).__init__()
self.simulator = simulator
self.ui = Ui_MainWindow()
self.ui.setupUi(self)
self.ui.pushButton.clicked.connect(self.echo)
@Slot()
def echo(self):
self.simulator.run()
class Simulator(object):
def __init__(self):
pass
def run(self):
print('Running simulation')
if __name__ == "__main__":
app = QApplication(sys.argv)
simulator = Simulator()
window = MainWindow(simulator)
window.show()
sys.exit(app.exec_())
|
import zmq
context = zmq.Context()
socket = context.socket(zmq.REP)
socket.bind("tcp://*:5555")
example_container_id = "346cddf529f3a92d49d6d2b6a8ceb2154eff14709c10123ef1432029e4f2864a"
while True:
message = socket.recv_string()
socket.send_string(example_container_id)
|
#!/usr/bin/env python
from argparse import ArgumentParser
from os.path import isfile, isdir, join
from sys import exit
import numpy as np
#from pleiopred import prior_generating, coord_trimmed, pre_sumstats
import pred_main_bi_rho
# Create the master argparser and returns the argparser object
def get_argparser():
parser = ArgumentParser(prog="PleioPred",
description="Genetic Risk Prediction by joint modeling of multiple diseases and functional annotation.")
## Input Files
## Parameters
parser.add_argument('--N1', required=True, type=int,
help="Sample size of the first disease GWAS")
parser.add_argument('--N2', required=True, type=int,
help="Sample size of the second disease GWAS")
parser.add_argument('--rho', required=True, type=float,
help="Tuning parameter in (-1,1)"
", the genetic correlation between diseases")
parser.add_argument('--alpha', required=True, type=str,
help="hyperparameter for the prior of PV")
parser.add_argument('--init_PV', required=True, type=str,
help="hyperparameter for the prior of PV")
parser.add_argument('--init_betas', required=True,
help="path to initial values (AnnoPred-inf scores)")
parser.add_argument('--zero_jump_prob', required=True, type=float,
help="shrinkage level")
parser.add_argument('--num_iter', type=int, default=60,
help="Number of iterations for MCMC, default to 60.")
parser.add_argument('--burn_in', type=int, default=10,
help="burn-in for MCMC, default to 10.")
parser.add_argument('--local_ld_prefix', required=True,
help="A local LD file name prefix"
", will be created if not present")
parser.add_argument('--hfile', required=True,
help="per-SNP heritability estimation")
parser.add_argument('--ld_radius', type=int,
help="If not provided, will use the number of SNPs in"
" common divided by 3000")
parser.add_argument('--coord_D1', required=True,
help="Output H5 File for coord_genotypes of D1")
parser.add_argument('--coord_D2', required=True,
help="Output H5 File for coord_genotypes of D2")
parser.add_argument('--out', default="PleioPred_out",
help="Output filename prefix for AnnoPred")
parser.add_argument('--user_h1', type=float,
help="User-provided heritability estimation for D1")
parser.add_argument('--user_h2', type=float,
help="User-provided heritability estimation for D2")
return parser
def process_args(args):
pdict = {}
pdict['coord_D1'] = args.coord_D1
pdict['coord_D2'] = args.coord_D2
pdict['N1'] = args.N1
pdict['N2'] = args.N2
if (args.rho>-1 and args.rho<1):
pdict['rho'] = args.rho
else:
exit("Tuning parameter needs to be in (-1,1)!")
pdict['ld_radius'] = args.ld_radius
pdict['local_ld_prefix'] = args.local_ld_prefix
pdict['hfile'] = args.hfile
pdict['out'] = args.out
pdict['alpha'] = [float(item) for item in args.alpha.split(',')]
pdict['zero_jump_prob'] = args.zero_jump_prob
pdict['num_iter'] = args.num_iter
pdict['burn_in'] = args.burn_in
pdict['init_betas'] = args.init_betas
pdict['init_PV'] = [float(item) for item in args.init_PV.split(',')]
pdict['user_h1'] = args.user_h1
pdict['user_h2'] = args.user_h2
return pdict
def main(pdict):
print(pdict)
pred_main_bi_rho.main(pdict)
if __name__ == '__main__':
args = get_argparser().parse_args()
main(process_args(args))
|
from .sr04 import SR04
|
from django.apps import AppConfig
class FlippyConfig(AppConfig):
name = "flippy"
|
"""
This middleware can be used when a known proxy is fronting the application,
and is trusted to be properly setting the `X-Forwarded-Proto`,
`X-Forwarded-Host` and `x-forwarded-prefix` headers with.
Modifies the `host`, 'root_path' and `scheme` information.
https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers#Proxies
Original source: https://github.com/encode/uvicorn/blob/master/uvicorn/middleware/proxy_headers.py
Altered to accomodate x-forwarded-host instead of x-forwarded-for
Altered: 27-01-2022
"""
from typing import List, Optional, Tuple, Union
from starlette.types import ASGIApp, Receive, Scope, Send
Headers = List[Tuple[bytes, bytes]]
class ProxyHeadersMiddleware:
def __init__(self, app, trusted_hosts: Union[List[str], str] = "127.0.0.1") -> None:
self.app = app
if isinstance(trusted_hosts, str):
self.trusted_hosts = {item.strip() for item in trusted_hosts.split(",")}
else:
self.trusted_hosts = set(trusted_hosts)
self.always_trust = "*" in self.trusted_hosts
def remap_headers(self, src: Headers, before: bytes, after: bytes) -> Headers:
remapped = []
before_value = None
after_value = None
for header in src:
k, v = header
if k == before:
before_value = v
continue
elif k == after:
after_value = v
continue
remapped.append(header)
if after_value:
remapped.append((before, after_value))
elif before_value:
remapped.append((before, before_value))
return remapped
async def __call__(self, scope, receive, send) -> None:
if scope["type"] in ("http", "websocket"):
client_addr: Optional[Tuple[str, int]] = scope.get("client")
client_host = client_addr[0] if client_addr else None
if self.always_trust or client_host in self.trusted_hosts:
headers = dict(scope["headers"])
if b"x-forwarded-proto" in headers:
# Determine if the incoming request was http or https based on
# the X-Forwarded-Proto header.
x_forwarded_proto = headers[b"x-forwarded-proto"].decode("latin1")
scope["scheme"] = x_forwarded_proto.strip() # type: ignore[index]
if b"x-forwarded-host" in headers:
# Setting scope["server"] is not enough because of https://github.com/encode/starlette/issues/604#issuecomment-543945716
scope["headers"] = self.remap_headers(
scope["headers"], b"host", b"x-forwarded-host"
)
if b"x-forwarded-prefix" in headers:
x_forwarded_prefix = headers[b"x-forwarded-prefix"].decode("latin1")
scope["root_path"] = x_forwarded_prefix
return await self.app(scope, receive, send) |
class Solution:
def kthSmallest(self, mat: List[List[int]], k: int) -> int:
h = mat[0][:]
for row in mat[1:]:
h = sorted([i+j for i in row for j in h])[:k]
return h[k-1]
|
# Copyright 2019 TWO SIGMA OPEN SOURCE, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from beakerx.tabledisplay import TableDisplay
class TestTableDisplayAPI_time_zone(unittest.TestCase):
def test_default_time_zone(self):
# given
mapList4 = [
{"a": 1, "b": 2, "c": 3},
{"a": 4, "b": 5, "c": 6},
{"a": 7, "b": 8, "c": 5}
]
# when
tabledisplay = TableDisplay(mapList4)
# then
self.assertTrue("timeZone" not in tabledisplay.model)
def test_should_set_time_zone(self):
# given
timezone = "TZ1"
mapList4 = [
{"a": 1, "b": 2, "c": 3},
{"a": 4, "b": 5, "c": 6},
{"a": 7, "b": 8, "c": 5}
]
tabledisplay = TableDisplay(mapList4)
# when
tabledisplay.setTimeZone(timezone)
# then
self.assertEqual(tabledisplay.model["timeZone"], timezone)
|
input_number: float = float(input())
if input_number == 0:
print('zero')
exit(0)
input_number_sign: str = 'positive' if input_number > 0 else 'negative'
input_number_abs: float = abs(input_number)
if input_number_abs < 1:
print(f'small {input_number_sign}')
elif input_number_abs > 1000000:
print(f'large {input_number_sign}')
else:
print(input_number_sign)
|
from typing import Callable, List
def chain_functions(*functions_list: List[Callable]) -> Callable:
"""Chain the given functions in a single pipeline.
A helper function that creates another one that invoke
all the given functions (defined in functions_list) in
a waterfall way.
The given functions should have the same input/output
interface in order to run properly as a pipeline.
:param functions_list: A list of functions.
:type functions_list: List[Callable]
:return:
:rtype: Callable
"""
def wrapper_function(*input_args: List[object]) -> tuple:
res = input_args
for single_function in functions_list:
args_as_list = object_as_tuple(res)
res = single_function(*args_as_list)
return res
return wrapper_function
def object_as_tuple(obj: object) -> tuple:
"""Transform an object into a tuple.
If the given objet is already a tuple, just return it.
:param obj: A given object
:type obj: object
:return: The object as a tuple.
:rtype: tuple
"""
if isinstance(obj, tuple):
return obj
else:
return (obj,)
|
import sys, itertools
# sys.stdin = open('input.txt', 'r')
n, m = map(int, sys.stdin.readline().strip().split())
# temp = itertools.combinations(range(n), m)
for temp in itertools.permutations(range(1, n+1), m):
print(*temp) |
import asyncio
import time
import numpy as np
from mavfleetcontrol.craft import Craft
from mavfleetcontrol.actions.point import FlyToPoint
from mavfleetcontrol.actions.percision_land import PercisionLand
from mavfleetcontrol.actions.arm import Arm
from mavfleetcontrol.actions.disarm import Disarm
from mavfleetcontrol.actions.land import land
from mavfleetcontrol.actions.circle import Circle
import zmq
from zmq.asyncio import Context, Poller
"""Reciever to be run on the companion computer of the drone
using zmq with asyncio with pub/sub and dealer/router"""
url = 'tcp://127.0.0.1:5555'
url2 = 'tcp://127.0.0.1:5556'
ctx = Context.instance()
async def heartbeat(drone):
while True:
await asyncio.sleep(1)
# drone.override_action(land)
async def receiver(drone):
"""receive messages with polling"""
pull = ctx.socket(zmq.PULL)
pull.connect(url2)
poller = Poller()
poller.register(pull, zmq.POLLIN)
while True:
try:
events = await poller.poll()
if pull in dict(events):
# print("recving", events)
msg = await pull.recv_multipart()
# print((msg[0]))
# print(msg.type)
if(msg[0]==b'.'):
# heartbeat(drone)
pass
if(((msg[0])[:2])==(b'OA')):
# print((msg[0])[2])
if(((msg[0])[2])==49):
drone.add_action(Arm())
if(((msg[0])[2])==50):
drone.add_action(Disarm())
if(((msg[0])[2])==51):
drone.add_action(FlyToPoint(np.array([0, 0, -1]), tolerance=2.5))
if(((msg[0])[2])==52):
drone.add_action(land())
if(((msg[0])[2])==53):
drone.add_action( PercisionLand( 1.0, np.array([1, 1]) ) )
if(((msg[0])[2])==54):
drone.add_action(FlyToPoint(np.array([0,0,-10]),tolerance =1))
if(((msg[0])[2])==55):
drone.add_action(Circle(velocity=20.0,radius=8.0,angle=0.0))
if(((msg[0])[2])==57):
drone.override_action(Spin())
if(((msg[0])[2])==57):
drone.override_action(Killing())
except Exception as E:
print(E)
#----------------------------------------------------------------------
if __name__ == "__main__":
# drone = Craft("drone1","serial:///dev/serial0:1000000")
drone = Craft("drone1", "udp://:14540")
drone.start()
asyncio.ensure_future(receiver(drone))
asyncio.get_event_loop().run_forever()
|
from max_sequence import max_sequence
import unittest
class Test(unittest.TestCase):
def test_1(self):
result = max_sequence([-2, 1, -3, 4, -1, 2, 1, -5, 4])
self.assertEqual(result, [4, -1, 2, 1])
def test_2(self):
result = max_sequence([-2, -1, -3, -4, -1, -2, -1, -5, -4])
self.assertEqual(result, [])
if __name__ == "__main__":
unittest.main()
|
# NOTE: For stable output, this should be run on a Python version that
# guarantees dict order (3.7+).
import re
import os.path
import sys
from enum import Flag, auto
from collections import defaultdict
from pprint import pprint
if len(sys.argv) < 3:
print("Need input and output file names")
sys.exit(1)
fn = os.path.expanduser(sys.argv[1])
out = os.path.expanduser(sys.argv[2])
print("Building %s from %s" % (out, fn))
# In a Source config file (does the format have a name?), data consists
# of alternating keys and values. Keys are always strings; values are
# either strings or mappings. A string starts with a double quote, ends
# with a double quote, and... what happens if it contains one? TODO.
# A mapping starts with an open brace, contains one or more (TODO: can
# it contain zero?) key+value pairs, and ends with an open brace.
# Between any two values, any amount of whitespace is found, possibly
# including comments, which start with "//" and end at EOL.
# 20210521: People call it "VDF", I guess that works. And yes, a mapping
# can contain zero kv pairs, which should be fine for this code. Still
# don't know about strings containing quotes. Other notes on parsing this
# format can be seen in my shed - parsevdf.pike and its grammar.
# Skip whitespace and comments
RE_SKIP = re.compile(r'(\s*//[^\n]*\n)*\s*')
# Read a single string
RE_STRING = re.compile(r'"([^"\n]*)"')
def merge_mappings(m1, m2, path):
"""Recursively merge the contents of m2 into m1"""
if type(m1) is not type(m2): raise ValueError("Cannot merge %s and %s" % (type(m1), type(m2)))
if type(m1) is str:
if m1 == m2: return m1 # Attempting to merge "foo" into "foo" just produces "foo"
# Actually.... the Valve-provided files have dirty data in them.
# We can't make assertions like this. Sigh.
# raise ValueError("Cannot merge different strings %r and %r --> %s" % (m1, m2, path))
return m1 # Keep the one from m1... I think??
for k,v in m2.items():
if k in m1: merge_mappings(m1[k], v, path + "." + k)
else: m1[k] = v
def parse_cfg(data):
pos = 0
def skip_ws():
nonlocal pos
pos = RE_SKIP.match(data, pos).end()
def parse_str():
nonlocal pos
m = RE_STRING.match(data, pos)
if not m: raise ValueError("Unable to parse string at pos %d" % pos)
pos = m.end()
return m.group(1)
def parse_mapping(path):
nonlocal pos
pos += 1 # Skip the initial open brace
ret = {}
while "moar stuffo":
skip_ws()
if data[pos] == '}': break
key = parse_str()
value = parse_value(path + "." + key)
if key in ret:
# Sometimes there are duplicates. I don't know what the deal is.
merge_mappings(value, ret[key], path + "." + key)
ret[key] = value
pos += 1 # Skip the final close brace
return ret
def parse_value(path):
skip_ws()
if data[pos] == '"': return parse_str()
if data[pos] == '{': return parse_mapping(path)
raise ValueError("Unexpected glyph '%s' at pos %d" % (data[pos], pos))
skip_ws()
assert data[pos] == '"' # The file should always start with a string
title = parse_str()
return parse_value(title)
with open(fn) as f: data = f.read()
info = parse_cfg(data)
class Cat(Flag):
Pistol = auto()
Shotgun = auto()
SMG = auto()
AR = auto()
Sniper = auto()
LMG = auto()
Grenade = auto() # Not currently being listed
Equipment = auto() # Not currently being listed
# ----- The first eight categories define the weapon type. Others are flags. Note that
# the number of categories above this line is hard-coded as WEAPON_TYPE_CATEGORIES below.
Automatic: "fully Automatic gun" = auto()
Scoped: "Scoped weapon" = auto()
Starter: "Starter pistol" = auto()
NonDamaging = auto() # Not currently detected (will only be on grenade/equip)
# Create some aliases used by the weapon_type lookup
SubMachinegun = SMG
Rifle = AR
SniperRifle = Sniper
Machinegun = LMG
demo_items = dict(
# - "How many total Shotguns do I have here?" -- just count 'em (7)
nova=1,
mag7=3,
sawedoff=3,
xm1014=0,
# - "Find my largest magazine fully Automatic gun. How many shots till I reload?" -- it's a Galil (35)
galilar=1,
bizon=0,
p90=0,
m249=0,
negev=0,
# - "How many distinct Pistols do I have here?" -- count unique items (5)
# The actual selection here is arbitrary and could be randomized.
deagle=-3,
elite=-3,
fiveseven=-3,
glock=0,
hkp2000=-3,
p250=0,
cz75a=0,
tec9=-3,
usp_silencer=0,
revolver=0,
# - "This is my SMG. There are none quite like it. How well does it penetrate armor?" -- it's an MP9 (60)
mp9=1,
mac10=-2,
mp7=-2,
mp5sd=-2,
ump45=-2,
# - "This is my Shotgun. There are none quite like it. How many shots till I reload?" -- it's a Nova (8)
# (covered above)
)
arrays = defaultdict(list)
arrays["categories"] = [c.name for c in Cat]
arrays["category_descr"] = [c.__annotations__.get(c.name, c.name) for c in Cat]
for weapon, data in info["prefabs"].items():
if data.get("prefab") == "grenade":
print("Got a nade:", weapon)
if "item_class" not in data or "attributes" not in data: continue
# This is a sneaky way to restrict it to just "normal weapons", since
# you can't apply a sticker to your fists or your tablet :)
if "stickers" not in data: continue
weap = weapon.replace("_prefab", "")
arrays["item_name"].append(weap) # NOTE: This isn't always the same as the weapon_class (cf CZ75a).
weap = weap.replace("weapon_", "")
for attr, dflt in {
"primary clip size": "-1",
"primary reserve ammo max": "-1",
"in game price": "-1",
"kill award": "300",
"range modifier": "0.98",
}.items():
arrays[attr.replace(" ", "_")].append(float(data["attributes"].get(attr, dflt)))
arrays["armor_pen"].append(float(data["attributes"]["armor ratio"]) * 50)
# The data file has two speeds available. For scoped weapons, the main
# speed is unscoped and the alternate is scoped (SG556 has 210 / 150), but
# for the stupid Revolver, the alternate speed is your real speed, and
# the "base" speed is how fast you move while charging your shot. Since
# logically the shot-charging is the alternate, we just pick the higher
# speed in all cases, so we'll call the SG556 "210" and the R8 "220".
spd = data["attributes"].get("max player speed", "260")
spd2 = data["attributes"].get("max player speed alt", "260")
arrays["max_player_speed"].append(max(float(spd), float(spd2)))
arrays["demo_quantity"].append(demo_items.get(weap, -1))
cat = Cat[data["visuals"]["weapon_type"]]
if int(data["attributes"].get("bullets", "1")) > 1: cat |= Cat.Shotgun # Probably don't actually need this
if int(data["attributes"].get("is full auto", "0")): cat |= Cat.Automatic
if int(data["attributes"].get("zoom levels", "0")): cat |= Cat.Scoped
if data["item_class"] in {"weapon_hkp2000", "weapon_glock"}: cat |= Cat.Starter
# TODO: Suppressed weapons
arrays["category"].append(cat.value)
# Get a quick dump of which weapons are in which categories
# for c in Cat:
# if cat & c: arrays["cat_" + c.name].append(weap)
# pprint(list(info["prefabs"]))
with open(out, "w") as f:
print("//Autogenerated file, do not edit", file=f)
print("#define WEAPON_TYPE_CATEGORIES 8", file=f)
for name, arr in arrays.items():
if name in {"item_name", "categories", "category_descr"} or name.startswith("cat_"): # String fields
print(f"char weapondata_{name}[][] = {{", file=f)
for val in arr:
print(f'\t"{val}",', file=f) # Don't have quotes in them. K?
else: # Numeric fields
t = {"category": "int", "demo_quantity": "int"}.get(name, "float")
print(f"{t} weapondata_{name}[] = {{", file=f)
for val in arr:
print(f"\t{val},", file=f)
print("};", file=f)
print(f"//Autogenerated from {fn}", file=f)
|
# coding: utf-8
#
# Copyright 2022 :Barry-Thomas-Paul: Moss
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http: // www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Interface Class
# this is a auto generated file generated by Cheetah
# Libre Office Version: 7.3
# Namespace: com.sun.star.linguistic2
import typing
from abc import abstractmethod, ABC
if typing.TYPE_CHECKING:
from ..lang.locale import Locale as Locale_70d308fa
class XLanguageGuessing(ABC):
"""
This interface allows to guess the language of a text.
The current set of supported languages is:
**since**
OOo 2.2
See Also:
`API XLanguageGuessing <https://api.libreoffice.org/docs/idl/ref/interfacecom_1_1sun_1_1star_1_1linguistic2_1_1XLanguageGuessing.html>`_
"""
__ooo_ns__: str = 'com.sun.star.linguistic2'
__ooo_full_ns__: str = 'com.sun.star.linguistic2.XLanguageGuessing'
__ooo_type_name__: str = 'interface'
__pyunointerface__: str = 'com.sun.star.linguistic2.XLanguageGuessing'
@abstractmethod
def disableLanguages(self, aLanguages: 'typing.Tuple[Locale_70d308fa, ...]') -> None:
"""
allows to explicitly discard some languages from the set of languages possibly returned.
By default all languages are enabled.
Raises:
com.sun.star.lang.IllegalArgumentException: ``IllegalArgumentException``
"""
@abstractmethod
def enableLanguages(self, aLanguages: 'typing.Tuple[Locale_70d308fa, ...]') -> None:
"""
allows to explicitly re-enable some languages that got previously disabled.
By default all languages are enabled.
Raises:
com.sun.star.lang.IllegalArgumentException: ``IllegalArgumentException``
"""
@abstractmethod
def getAvailableLanguages(self) -> 'typing.Tuple[Locale_70d308fa, ...]':
"""
returns a list of all supported languages.
This should be the same as the mathematical union of all enabled and disabled languages.
"""
@abstractmethod
def getDisabledLanguages(self) -> 'typing.Tuple[Locale_70d308fa, ...]':
"""
returns the list of all disabled languages
"""
@abstractmethod
def getEnabledLanguages(self) -> 'typing.Tuple[Locale_70d308fa, ...]':
"""
returns the list of all enabled languages
"""
@abstractmethod
def guessPrimaryLanguage(self, aText: str, nStartPos: int, nLen: int) -> 'Locale_70d308fa':
"""
determines the single most probable language of a sub-string.
Please note that because statistical analysis is part of the algorithm the likelihood to get the correct result increases with the length of the sub-string. A word is much less likely guessed correctly compared to a sentence or even a whole paragraph.
Also note that some languages are that \"close\" to each other that it will be quite unlikely to find a difference in them, e.g. English (UK), English (IE) and English (AUS) and most likely English (US) as well. And thus the result may be arbitrary.
Raises:
com.sun.star.lang.IllegalArgumentException: ``IllegalArgumentException``
"""
__all__ = ['XLanguageGuessing']
|
DEPS = [
'recipe_engine/path',
'recipe_engine/properties',
'recipe_engine/python',
'recipe_engine/step',
]
# TODO(phajdan.jr): provide coverage (http://crbug.com/693058).
DISABLE_STRICT_COVERAGE = True
|
clinvar = open('/lgc/datasets/dbsnp/141/clinvar_20140702.vcf')
variants = 0
variants_before_129 = 0
pathogenic_before_129 = 0
for line in clinvar:
if not line.startswith('#'):
variants += 1
row = line.split('\t')
info = row[7].split(';')
#get dbsnpbuild
dbsnpbuild = 0
for item in info:
if item.startswith('dbSNPBuildID'):
dbsnpbuild = int(item.split('=')[1])
# print 'dbsnpbuild', dbsnpbuild
if dbsnpbuild <= 129:
variants_before_129 += 1
#check if it's pathogenic
if item.startswith('CLNSIG'):
# print item
significance = item.split('=')[1]
# 5 is pathogenic
# print 'significance', significance
if dbsnpbuild <= 129 and significance == '5':
pathogenic_before_129 += 1
print line
print 'total variants: ', variants
print 'total variants_before_129: ', variants_before_129
print 'total pathogenic_before_129: ', pathogenic_before_129
|
# -*- coding: utf-8 -*-
"""
Created on Fri Jan 22 23:55:22 2021
@author: Reza
"""
class InternalServerError(Exception):
pass
class SchemaValidationError(Exception):
pass
class DataNotExistsError(Exception):
pass
class KeyError(Exception):
pass
class ThresholdError(Exception):
pass
errors = {
"InternalServerError": {
"message": "Something went wrong",
"status": 500
},
"SchemaValidationError": {
"message": "Request is missing required fields",
"status": 400
},
"DataNotExistsError": {
"message": "Data does not exists",
"status": 400
},
"KeyError": {
"message": "Search parameter not found in data",
"status": 400
},
"ThresholdError": {
"message": "Threshold is missing",
"status": 400
}
} |
import time
subscriptions = set()
def subscribe_for_events(sub):
subscriptions.add(sub)
def unsubscribe_from_events(sub):
subscriptions.remove(sub)
def put_event(event):
for subscription in subscriptions:
subscription.put(event)
def app_state_event(app_state):
put_event({
'type': 'app_state',
'unix_timestamp': time.time(),
'app_state': app_state,
})
def image_event(device_name, image_path):
# path is relative to /static/
put_event({
'type': 'image',
'unix_timestamp': time.time(),
'device_name': device_name,
'image_path': image_path,
})
def log_event(text):
put_event({
'type': 'log',
'unix_timestamp': time.time(),
'text': text,
})
|
import functools
import json
import logging
import re
import traceback
import urllib.parse
from apispec import yaml_utils
from flask import Blueprint, current_app, jsonify, make_response, request
from flask_babel import lazy_gettext as _
import jsonschema
from marshmallow import ValidationError
from marshmallow_sqlalchemy.fields import Related, RelatedList
import prison
from sqlalchemy.exc import IntegrityError
from werkzeug.exceptions import BadRequest
import yaml
from flask_appbuilder.api.convert import Model2SchemaConverter
from flask_appbuilder.api.schemas import get_info_schema, get_item_schema, get_list_schema
from flask_appbuilder._compat import as_unicode
from flask_appbuilder.const import (
API_ADD_COLUMNS_RES_KEY,
API_ADD_COLUMNS_RIS_KEY,
API_ADD_TITLE_RES_KEY,
API_ADD_TITLE_RIS_KEY,
API_DESCRIPTION_COLUMNS_RES_KEY,
API_DESCRIPTION_COLUMNS_RIS_KEY,
API_EDIT_COLUMNS_RES_KEY,
API_EDIT_COLUMNS_RIS_KEY,
API_EDIT_TITLE_RES_KEY,
API_EDIT_TITLE_RIS_KEY,
API_FILTERS_RES_KEY,
API_FILTERS_RIS_KEY,
API_LABEL_COLUMNS_RES_KEY,
API_LABEL_COLUMNS_RIS_KEY,
API_LIST_COLUMNS_RES_KEY,
API_LIST_COLUMNS_RIS_KEY,
API_LIST_TITLE_RES_KEY,
API_LIST_TITLE_RIS_KEY,
API_ORDER_COLUMN_RIS_KEY,
API_ORDER_COLUMNS_RES_KEY,
API_ORDER_COLUMNS_RIS_KEY,
API_ORDER_DIRECTION_RIS_KEY,
API_PAGE_INDEX_RIS_KEY,
API_PAGE_SIZE_RIS_KEY,
API_PERMISSIONS_RES_KEY,
API_PERMISSIONS_RIS_KEY,
API_RESULT_RES_KEY,
API_SELECT_COLUMNS_RIS_KEY,
API_SHOW_COLUMNS_RES_KEY,
API_SHOW_COLUMNS_RIS_KEY,
API_SHOW_TITLE_RES_KEY,
API_SHOW_TITLE_RIS_KEY,
API_URI_RIS_KEY,
PERMISSION_PREFIX,
)
from flask_appbuilder.exceptions import FABException, InvalidOrderByColumnFABException
from flask_appbuilder.security.decorators import permission_name, protect,has_access
from flask_appbuilder.api import BaseModelApi,BaseApi,ModelRestApi
from myapp import app, appbuilder,db,event_logger
conf = app.config
log = logging.getLogger(__name__)
def get_error_msg():
if current_app.config.get("FAB_API_SHOW_STACKTRACE"):
return traceback.format_exc()
return "Fatal error"
def safe(f):
def wraps(self, *args, **kwargs):
try:
return f(self, *args, **kwargs)
except BadRequest as e:
return self.response_error(400,message=str(e))
except Exception as e:
logging.exception(e)
return self.response_error(500,message=get_error_msg())
return functools.update_wrapper(wraps, f)
def rison(schema=None):
"""
Use this decorator to parse URI *Rison* arguments to
a python data structure, your method gets the data
structure on kwargs['rison']. Response is HTTP 400
if *Rison* is not correct::
class ExampleApi(BaseApi):
@expose('/risonjson')
@rison()
def rison_json(self, **kwargs):
return self.response(200, result=kwargs['rison'])
You can additionally pass a JSON schema to
validate Rison arguments::
schema = {
"type": "object",
"properties": {
"arg1": {
"type": "integer"
}
}
}
class ExampleApi(BaseApi):
@expose('/risonjson')
@rison(schema)
def rison_json(self, **kwargs):
return self.response(200, result=kwargs['rison'])
"""
def _rison(f):
def wraps(self, *args, **kwargs):
value = request.args.get(API_URI_RIS_KEY, None)
kwargs["rison"] = dict()
if value:
try:
kwargs["rison"] = prison.loads(value)
except prison.decoder.ParserException:
if current_app.config.get("FAB_API_ALLOW_JSON_QS", True):
# Rison failed try json encoded content
try:
kwargs["rison"] = json.loads(
urllib.parse.parse_qs(f"{API_URI_RIS_KEY}={value}").get(
API_URI_RIS_KEY
)[0]
)
except Exception:
return self.response_error(400,message="Not a valid rison/json argument"
)
else:
return self.response_error(400,message="Not a valid rison argument")
if schema:
try:
jsonschema.validate(instance=kwargs["rison"], schema=schema)
except jsonschema.ValidationError as e:
return self.response_error(400,message=f"Not a valid rison schema {e}")
return f(self, *args, **kwargs)
return functools.update_wrapper(wraps, f)
return _rison
def expose(url="/", methods=("GET",)):
"""
Use this decorator to expose API endpoints on your API classes.
:param url:
Relative URL for the endpoint
:param methods:
Allowed HTTP methods. By default only GET is allowed.
"""
def wrap(f):
if not hasattr(f, "_urls"):
f._urls = []
f._urls.append((url, methods))
return f
return wrap
# 在响应体重添加字段和数据
def merge_response_func(func, key):
"""
Use this decorator to set a new merging
response function to HTTP endpoints
candidate function must have the following signature
and be childs of BaseApi:
```
def merge_some_function(self, response, rison_args):
```
:param func: Name of the merge function where the key is allowed
:param key: The key name for rison selection
:return: None
"""
def wrap(f):
if not hasattr(f, "_response_key_func_mappings"):
f._response_key_func_mappings = dict()
f._response_key_func_mappings[key] = func
return f
return wrap
import pysnooper
# @pysnooper.snoop(depth=5)
# 暴露url+视图函数。视图函数会被覆盖,暴露url也会被覆盖
class MyappModelRestApi(ModelRestApi):
api_type = 'json'
allow_browser_login = True
base_filters = []
page_size = 100
old_item = {}
datamodel=None
post_list=None
pre_json_load=None
# @pysnooper.snoop()
def merge_add_field_info(self, response, **kwargs):
_kwargs = kwargs.get("add_columns", {})
response[API_ADD_COLUMNS_RES_KEY] = self._get_fields_info(
self.add_columns,
self.add_model_schema,
self.add_query_rel_fields,
**_kwargs,
)
def merge_edit_field_info(self, response, **kwargs):
_kwargs = kwargs.get("edit_columns", {})
response[API_EDIT_COLUMNS_RES_KEY] = self._get_fields_info(
self.edit_columns,
self.edit_model_schema,
self.edit_query_rel_fields,
**_kwargs,
)
def merge_search_filters(self, response, **kwargs):
# Get possible search fields and all possible operations
search_filters = dict()
dict_filters = self._filters.get_search_filters()
for col in self.search_columns:
search_filters[col] = [
{"name": as_unicode(flt.name), "operator": flt.arg_name}
for flt in dict_filters[col]
]
response[API_FILTERS_RES_KEY] = search_filters
def merge_add_title(self, response, **kwargs):
response[API_ADD_TITLE_RES_KEY] = self.add_title
def merge_edit_title(self, response, **kwargs):
response[API_EDIT_TITLE_RES_KEY] = self.edit_title
def merge_label_columns(self, response, **kwargs):
_pruned_select_cols = kwargs.get(API_SELECT_COLUMNS_RIS_KEY, [])
if _pruned_select_cols:
columns = _pruned_select_cols
else:
# Send the exact labels for the caller operation
if kwargs.get("caller") == "list":
columns = self.list_columns
elif kwargs.get("caller") == "show":
columns = self.show_columns
else:
columns = self.label_columns # pragma: no cover
response[API_LABEL_COLUMNS_RES_KEY] = self._label_columns_json(columns)
def merge_list_label_columns(self, response, **kwargs):
self.merge_label_columns(response, caller="list", **kwargs)
def merge_show_label_columns(self, response, **kwargs):
self.merge_label_columns(response, caller="show", **kwargs)
# @pysnooper.snoop()
def merge_show_columns(self, response, **kwargs):
_pruned_select_cols = kwargs.get(API_SELECT_COLUMNS_RIS_KEY, [])
if _pruned_select_cols:
response[API_SHOW_COLUMNS_RES_KEY] = _pruned_select_cols
else:
response[API_SHOW_COLUMNS_RES_KEY] = self.show_columns
def merge_description_columns(self, response, **kwargs):
_pruned_select_cols = kwargs.get(API_SELECT_COLUMNS_RIS_KEY, [])
if _pruned_select_cols:
response[API_DESCRIPTION_COLUMNS_RES_KEY] = self._description_columns_json(
_pruned_select_cols
)
else:
# Send all descriptions if cols are or request pruned
response[API_DESCRIPTION_COLUMNS_RES_KEY] = self._description_columns_json(
self.description_columns
)
def merge_list_columns(self, response, **kwargs):
_pruned_select_cols = kwargs.get(API_SELECT_COLUMNS_RIS_KEY, [])
if _pruned_select_cols:
response[API_LIST_COLUMNS_RES_KEY] = _pruned_select_cols
else:
response[API_LIST_COLUMNS_RES_KEY] = self.list_columns
def merge_order_columns(self, response, **kwargs):
_pruned_select_cols = kwargs.get(API_SELECT_COLUMNS_RIS_KEY, [])
if _pruned_select_cols:
response[API_ORDER_COLUMNS_RES_KEY] = [
order_col
for order_col in self.order_columns
if order_col in _pruned_select_cols
]
else:
response[API_ORDER_COLUMNS_RES_KEY] = self.order_columns
def merge_list_title(self, response, **kwargs):
response[API_LIST_TITLE_RES_KEY] = self.list_title
def merge_show_title(self, response, **kwargs):
response[API_SHOW_TITLE_RES_KEY] = self.show_title
def response_error(self,code,message='error',status=1,result={}):
back_data = {
'result': result,
"status": status,
'message': message
}
return self.response(code, **back_data)
@expose("/_info", methods=["GET"])
@merge_response_func(BaseApi.merge_current_user_permissions, API_PERMISSIONS_RIS_KEY)
@merge_response_func(merge_add_field_info, API_ADD_COLUMNS_RIS_KEY)
@merge_response_func(merge_edit_field_info, API_EDIT_COLUMNS_RIS_KEY)
@merge_response_func(merge_search_filters, API_FILTERS_RIS_KEY)
@merge_response_func(merge_show_label_columns, API_LABEL_COLUMNS_RIS_KEY)
@merge_response_func(merge_show_columns, API_SHOW_COLUMNS_RIS_KEY)
@merge_response_func(merge_list_label_columns, API_LABEL_COLUMNS_RIS_KEY)
@merge_response_func(merge_list_columns, API_LIST_COLUMNS_RIS_KEY)
@merge_response_func(merge_list_title, API_LIST_TITLE_RIS_KEY)
@merge_response_func(merge_show_title, API_SHOW_TITLE_RIS_KEY)
@merge_response_func(merge_add_title, API_ADD_TITLE_RIS_KEY)
@merge_response_func(merge_edit_title, API_EDIT_TITLE_RIS_KEY)
@merge_response_func(merge_description_columns, API_DESCRIPTION_COLUMNS_RIS_KEY)
@merge_response_func(merge_order_columns, API_ORDER_COLUMNS_RIS_KEY)
def api_info(self, **kwargs):
_response = dict()
_args = kwargs.get("rison", {})
self.set_response_key_mappings(_response, self.api_info, _args, **_args)
return self.response(200, **_response)
@expose("/<int:pk>", methods=["GET"])
# @pysnooper.snoop()
def api_get(self, pk, **kwargs):
item = self.datamodel.get(pk, self._base_filters)
if not item:
return self.response_error(404, "Not found")
_response = dict()
_args = kwargs.get("rison", {})
select_cols = _args.get(API_SELECT_COLUMNS_RIS_KEY, [])
_pruned_select_cols = [col for col in select_cols if col in self.show_columns]
self.set_response_key_mappings(
_response,
self.get,
_args,
**{API_SELECT_COLUMNS_RIS_KEY: _pruned_select_cols},
)
if _pruned_select_cols:
_show_model_schema = self.model2schemaconverter.convert(_pruned_select_cols)
else:
_show_model_schema = self.show_model_schema
_response['data'] = _show_model_schema.dump(item, many=False).data # item.to_json()
_response['data']["id"] = pk
self.pre_get(_response)
back_data = {
'result': _response['data'],
"status": 0,
'message': "success"
}
return self.response(200, **back_data)
@expose("/", methods=["GET"])
# @pysnooper.snoop(watch_explode=('_args'))
def api_list(self, **kwargs):
_response = dict()
if self.pre_json_load:
json = self.pre_json_load(request.json)
else:
json = request.json
_args = json or {}
_args.update(request.args)
# handle select columns
select_cols = _args.get(API_SELECT_COLUMNS_RIS_KEY, [])
_pruned_select_cols = [col for col in select_cols if col in self.list_columns]
self.set_response_key_mappings(
_response,
self.get_list,
_args,
**{API_SELECT_COLUMNS_RIS_KEY: _pruned_select_cols},
)
if _pruned_select_cols:
_list_model_schema = self.model2schemaconverter.convert(_pruned_select_cols)
else:
_list_model_schema = self.list_model_schema
# handle filters
try:
# 参数缩写都在每个filter的arg_name
from flask_appbuilder.models.sqla.filters import FilterEqualFunction, FilterStartsWith
joined_filters = self._handle_filters_args(_args)
except FABException as e:
return self.response_error(400,message=str(e))
# handle base order
try:
order_column, order_direction = self._handle_order_args(_args)
except InvalidOrderByColumnFABException as e:
return self.response_error(400,message=str(e))
# handle pagination
page_index, page_size = self._handle_page_args(_args)
# Make the query
query_select_columns = _pruned_select_cols or self.list_columns
count, lst = self.datamodel.query(
joined_filters,
order_column,
order_direction,
page=page_index,
page_size=page_size,
select_columns=query_select_columns,
)
if self.post_list:
lst = self.post_list(lst)
# pks = self.datamodel.get_keys(lst)
# import marshmallow.schema
import marshmallow.marshalling
_response['data'] = _list_model_schema.dump(lst, many=True).data # [item.to_json() for item in lst]
# _response["ids"] = pks
_response["count"] = count # 这个是总个数
for index in range(len(lst)):
_response['data'][index]['id']= lst[index].id
self.pre_get_list(_response)
back_data = {
'result': _response['data'],
"status": 0,
'message': "success"
}
return self.response(200, **back_data)
# @pysnooper.snoop()
def json_to_item(self,data):
class Back:
pass
back = Back()
try:
item = self.datamodel.obj(**data)
# for key in data:
# if hasattr(item,key):
# setattr(item,key,data[key])
setattr(back,'data',item)
except Exception as e:
setattr(back, 'data', data)
setattr(back, 'errors', str(e))
return back
# @expose("/add", methods=["POST"])
# def add(self):
@expose("/", methods=["POST"])
# @pysnooper.snoop(watch_explode=('item', 'data'))
def api_add(self):
self.old_item = {}
if not request.is_json:
return self.response_error(400,message="Request is not JSON")
try:
if self.pre_json_load:
json = self.pre_json_load(request.json)
else:
json = request.json
item = self.add_model_schema.load(json)
# item = self.add_model_schema.load(data)
except ValidationError as err:
return self.response_error(422,message=err.messages)
# This validates custom Schema with custom validations
if isinstance(item.data, dict):
return self.response_error(422,message=item.errors)
try:
self.pre_add(item.data)
self.datamodel.add(item.data, raise_exception=True)
self.post_add(item.data)
result_data = self.add_model_schema.dump(
item.data, many=False
).data
result_data['id'] = self.datamodel.get_pk_value(item.data)
back_data={
'result': result_data,
"status":0,
'message':"success"
}
return self.response(
200,
**back_data,
)
except IntegrityError as e:
return self.response_error(422,message=str(e.orig))
except Exception as e1:
return self.response_error(500, message=str(e1))
@expose("/<pk>", methods=["PUT"])
# @pysnooper.snoop(watch_explode=('item','data'))
def api_edit(self, pk):
item = self.datamodel.get(pk, self._base_filters)
self.old_item = item.to_json()
if not request.is_json:
return self.response_error(400, message="Request is not JSON")
if not item:
return self.response_error(404,message='Not found')
try:
if self.pre_json_load:
json = self.pre_json_load(request.json)
else:
json = request.json
data = self._merge_update_item(item, json)
item = self.edit_model_schema.load(data, instance=item)
except ValidationError as err:
return self.response_error(422,message=err.messages)
# This validates custom Schema with custom validations
if isinstance(item.data, dict):
return self.response_error(422,message=item.errors)
self.pre_update(item.data)
try:
self.datamodel.edit(item.data, raise_exception=True)
self.post_update(item.data)
result = self.edit_model_schema.dump(
item.data, many=False
).data
result['id'] = self.datamodel.get_pk_value(item.data)
back_data={
"status":0,
"message":"success",
"result":result
}
return self.response(
200,
**back_data,
)
except IntegrityError as e:
return self.response_error(422,message=str(e.orig))
@expose("/<pk>", methods=["DELETE"])
# @pysnooper.snoop()
def api_delete(self, pk):
item = self.datamodel.get(pk, self._base_filters)
if not item:
return self.response_error(404,message='Not found')
self.pre_delete(item)
try:
self.datamodel.delete(item, raise_exception=True)
self.post_delete(item)
back_data={
"status":0,
"message":"success",
"result":item.to_json()
}
return self.response(200, **back_data)
except IntegrityError as e:
return self.response_error(422,message=str(e.orig))
"""
------------------------------------------------
HELPER FUNCTIONS
------------------------------------------------
"""
def _handle_page_args(self, rison_args):
"""
Helper function to handle rison page
arguments, sets defaults and impose
FAB_API_MAX_PAGE_SIZE
:param rison_args:
:return: (tuple) page, page_size
"""
page = rison_args.get(API_PAGE_INDEX_RIS_KEY, 0)
page_size = rison_args.get(API_PAGE_SIZE_RIS_KEY, self.page_size)
return self._sanitize_page_args(page, page_size)
def _sanitize_page_args(self, page, page_size):
_page = page or 0
_page_size = page_size or self.page_size
max_page_size = self.max_page_size or current_app.config.get(
"FAB_API_MAX_PAGE_SIZE"
)
# Accept special -1 to uncap the page size
if max_page_size == -1:
if _page_size == -1:
return None, None
else:
return _page, _page_size
if _page_size > max_page_size or _page_size < 1:
_page_size = max_page_size
return _page, _page_size
def _handle_order_args(self, rison_args):
"""
Help function to handle rison order
arguments
:param rison_args:
:return:
"""
order_column = rison_args.get(API_ORDER_COLUMN_RIS_KEY, "")
order_direction = rison_args.get(API_ORDER_DIRECTION_RIS_KEY, "")
if not order_column and self.base_order:
return self.base_order
if not order_column:
return "", ""
elif order_column not in self.order_columns:
raise InvalidOrderByColumnFABException(
f"Invalid order by column: {order_column}"
)
return order_column, order_direction
def _handle_filters_args(self, rison_args):
self._filters.clear_filters()
self._filters.rest_add_filters(rison_args.get(API_FILTERS_RIS_KEY, []))
return self._filters.get_joined_filters(self._base_filters)
# @pysnooper.snoop(watch_explode=("column"))
def _description_columns_json(self, cols=None):
"""
Prepares dict with col descriptions to be JSON serializable
"""
ret = {}
cols = cols or []
d = {k: v for (k, v) in self.description_columns.items() if k in cols}
for key, value in d.items():
ret[key] = as_unicode(_(value).encode("UTF-8"))
edit_form_extra_fields = self.edit_form_extra_fields
for col in edit_form_extra_fields:
column = edit_form_extra_fields[col]
if hasattr(column, 'kwargs') and column.kwargs:
description = column.kwargs.get('description','')
if description:
ret[col] = description
return ret
def _label_columns_json(self, cols=None):
"""
Prepares dict with labels to be JSON serializable
"""
ret = {}
cols = cols or []
d = {k: v for (k, v) in self.label_columns.items() if k in cols}
for key, value in d.items():
ret[key] = as_unicode(_(value).encode("UTF-8"))
if hasattr(self.datamodel.obj,'label_columns') and self.datamodel.obj.label_columns:
for col in self.datamodel.obj.label_columns:
ret[col] = self.datamodel.obj.label_columns[col]
return ret
# @pysnooper.snoop(watch_explode=("field",'datamodel','column','default'))
def _get_field_info(self, field, filter_rel_field, page=None, page_size=None):
"""
Return a dict with field details
ready to serve as a response
:param field: marshmallow field
:return: dict with field details
"""
ret = dict()
ret["name"] = field.name
if self.datamodel:
list_columns = self.datamodel.list_columns
if field.name in list_columns:
column = list_columns[field.name]
default = column.default
if default:
ret['default']=default.arg
ret["label"] = _(self.label_columns.get(field.name, ""))
ret["description"] = _(self.description_columns.get(field.name, ""))
# if field.name in self.edit_form_extra_fields:
# if hasattr(self.edit_form_extra_fields[field.name],'label'):
# ret["label"] = self.edit_form_extra_fields[field.name].label
# if hasattr(self.edit_form_extra_fields[field.name], 'description'):
# ret["description"] = self.edit_form_extra_fields[field.name].description
# Handles related fields
if isinstance(field, Related) or isinstance(field, RelatedList):
ret["count"], ret["values"] = self._get_list_related_field(
field, filter_rel_field, page=page, page_size=page_size
)
if field.validate and isinstance(field.validate, list):
ret["validate"] = [str(v) for v in field.validate]
elif field.validate:
ret["validate"] = [str(field.validate)]
ret["type"] = field.__class__.__name__
ret["required"] = field.required
# When using custom marshmallow schemas fields don't have unique property
ret["unique"] = getattr(field, "unique", False)
return ret
def _get_fields_info(self, cols, model_schema, filter_rel_fields, **kwargs):
"""
Returns a dict with fields detail
from a marshmallow schema
:param cols: list of columns to show info for
:param model_schema: Marshmallow model schema
:param filter_rel_fields: expects add_query_rel_fields or
edit_query_rel_fields
:param kwargs: Receives all rison arguments for pagination
:return: dict with all fields details
"""
ret = list()
for col in cols:
page = page_size = None
col_args = kwargs.get(col, {})
if col_args:
page = col_args.get(API_PAGE_INDEX_RIS_KEY, None)
page_size = col_args.get(API_PAGE_SIZE_RIS_KEY, None)
ret.append(
self._get_field_info(
model_schema.fields[col],
filter_rel_fields.get(col, []),
page=page,
page_size=page_size,
)
)
return ret
def _get_list_related_field(
self, field, filter_rel_field, page=None, page_size=None
):
"""
Return a list of values for a related field
:param field: Marshmallow field
:param filter_rel_field: Filters for the related field
:param page: The page index
:param page_size: The page size
:return: (int, list) total record count and list of dict with id and value
"""
ret = list()
if isinstance(field, Related) or isinstance(field, RelatedList):
datamodel = self.datamodel.get_related_interface(field.name)
filters = datamodel.get_filters(datamodel.get_search_columns_list())
page, page_size = self._sanitize_page_args(page, page_size)
order_field = self.order_rel_fields.get(field.name)
if order_field:
order_column, order_direction = order_field
else:
order_column, order_direction = "", ""
if filter_rel_field:
filters = filters.add_filter_list(filter_rel_field)
count, values = datamodel.query(
filters, order_column, order_direction, page=page, page_size=page_size
)
for value in values:
ret.append({"id": datamodel.get_pk_value(value), "value": str(value)})
return count, ret
def _merge_update_item(self, model_item, data):
"""
Merge a model with a python data structure
This is useful to turn PUT method into a PATCH also
:param model_item: SQLA Model
:param data: python data structure
:return: python data structure
"""
data_item = self.edit_model_schema.dump(model_item, many=False).data
for _col in self.edit_columns:
if _col not in data.keys():
data[_col] = data_item[_col]
return data
|
from argh import arg
from six import iteritems
import pnc_cli.common as common
import pnc_cli.cli_types as types
import pnc_cli.utils as utils
from pnc_cli.swagger_client import ProductRest
from pnc_cli.pnc_api import pnc_api
__author__ = 'thauser'
def create_product_object(**kwargs):
created_product = ProductRest()
for key, value in iteritems(kwargs):
setattr(created_product, key, value)
return created_product
@arg("name", help="Name for the Product", type=types.unique_product_name)
@arg("abbreviation", help="The abbreviation or \"short name\" of the new Product", type=types.unique_product_abbreviation)
@arg("-d", "--description", help="Detailed description of the new Product")
@arg("-p", "--product-code", help="The Product code for the new Product")
@arg("-sn", "--pgm-system-name", help="The system code for the new Product")
@arg("-pvids", "--product-version-ids", type=types.existing_product_version, nargs='+',
help="Space separated list of associated ProductVersion ids.")
def create_product(name, abbreviation, **kwargs):
"""
Create a new Product
"""
data = create_product_raw(name, abbreviation, **kwargs)
if data:
return utils.format_json(data)
def create_product_raw(name, abbreviation, **kwargs):
product = create_product_object(name=name, abbreviation=abbreviation, **kwargs)
response = utils.checked_api_call(pnc_api.products, 'create_new', body=product)
if response:
return response.content
@arg("product-id", help="ID of the Product to update", type=types.existing_product_id)
@arg("-n", "--name", help="New name for the Product", type=types.unique_product_name)
@arg("-d", "--description", help="New Product description")
@arg("-a", "--abbreviation", help="New abbreviation")
@arg("-p", "--product-code", help="New Product code")
@arg("-sn", "--pgm-system-name", help="New system name")
@arg("--product-version-ids", type=types.existing_product_version, nargs='+',
help="Space separated list of associated ProductVersion ids.")
def update_product(product_id, **kwargs):
"""
Update a Product with new information
"""
content = update_product_raw(product_id, **kwargs)
if content:
return utils.format_json(content)
def update_product_raw(product_id, **kwargs):
to_update = pnc_api.products.get_specific(id=product_id).content
for key, value in iteritems(kwargs):
if value is not None:
setattr(to_update, key, value)
response = utils.checked_api_call(
pnc_api.products, 'update', id=product_id, body=to_update)
if response:
return response.content
@arg("-i", "--id", help="ID of the Product to retrieve", type=types.existing_product_id)
@arg("-n", "--name", help="Name of the Product to retrieve", type=types.existing_product_name)
def get_product(id=None, name=None):
"""
Get a specific Product by name or ID
"""
content = get_product_raw(id, name)
if content:
return utils.format_json(content)
def get_product_raw(id=None, name=None):
prod_id = common.set_id(pnc_api.products, id, name)
if prod_id is None:
return None
response = utils.checked_api_call(pnc_api.products, 'get_specific', id=prod_id)
if response:
return response.content
@arg("-i", "--id", help="ID of the Product to retrieve versions from", type=types.existing_product_id)
@arg("-n", "--name", help="Name of the Product to retrieve versions from", type=types.existing_product_name)
@arg("-p", "--page-size", help="Limit the amount of Product Versions returned", type=int)
@arg("--page-index", help="Select the index of page", type=int)
@arg("-s", "--sort", help="Sorting RSQL")
@arg("-q", help="RSQL query")
def list_versions_for_product(id=None, name=None, page_size=200, page_index=0, sort='', q=''):
"""
List all ProductVersions for a given Product
"""
content = list_versions_for_product_raw(id, name, page_size, page_index, sort, q)
if content:
return utils.format_json_list(content)
def list_versions_for_product_raw(id=None, name=None, page_size=200, page_index=0, sort='', q=''):
prod_id = common.set_id(pnc_api.products, id, name)
response = utils.checked_api_call(
pnc_api.products, 'get_product_versions', id=prod_id, page_size=page_size, page_index=page_index, sort=sort, q=q)
if response:
return response.content
@arg("-p", "--page-size", help="Limit the amount of Products returned", type=int)
@arg("--page-index", help="Select the index of page", type=int)
@arg("-s", "--sort", help="Sorting RSQL")
@arg("-q", help="RSQL query")
def list_products(page_size=200, page_index=0, sort="", q=""):
"""
List all Products
"""
content = list_products_raw(page_size, page_index, sort, q)
if content:
return utils.format_json_list(content)
def list_products_raw(page_size=200, page_index=0, sort='', q=''):
response = utils.checked_api_call(pnc_api.products, 'get_all', page_size=page_size, page_index=page_index, q=q, sort=sort)
if response:
return response.content
|
import os
import argparse
import pickle
import tensorflow as tf
import numpy as np
import pandas as pd
from augmentation import processing_data_functions, AUTO, partial
from skimage.measure import label
from google.protobuf.descriptor import Error
from sklearn.metrics import accuracy_score, f1_score
import segmentation_models as sm
from training import get_Xy
from dynamic_watershed import post_process
from metric.from_hover import get_fast_aji_plus
def options():
parser = argparse.ArgumentParser(description="setting up training")
parser.add_argument("--path", type=str)
parser.add_argument("--meta", type=str, default="meta.pkl")
parser.add_argument("--weights", type=str, default="model_weights.h5")
parser.add_argument("--alpha", type=float, default=5)
parser.add_argument("--beta", type=float, default=0.5)
parser.add_argument("--history", type=str)
parser.add_argument("--param", type=str, required=False)
parser.add_argument('--aji', dest='aji', action='store_true')
parser.add_argument('--no_aji', dest='aji', action='store_false')
args = parser.parse_args()
if args.param:
f = open(args.param, "r").read()
d = dict(x.split("=") for x in f.split(": ")[1].split("; "))
args.param = d
args.type = args.param["type"]
args.model = args.param["model"]
args.backbone = args.param["backbone"]
if args.type == "binary":
activation = "sigmoid"
elif args.type == "distance":
activation = "relu"
else:
raise Error(f"unknown type: {args.type}, not implemented")
args.activation = activation
args.classes = 1
if args.model == "Unet":
model_f = sm.Unet
elif args.model == "FPN":
model_f = sm.FPN
elif args.model == "Linknet":
model_f = sm.Linknet
elif args.model == "PSPNet":
model_f = sm.PSPNet
else:
raise Error(f"unknown backbone: {args.model}, not implemented")
args.model_f = model_f
return args
def load_meta(file_name):
with open(file_name, "rb") as handle:
d = pickle.load(handle)
mean, std = d["mean"], d["std"]
return mean, std
def setup_data(path, mean, std, backbone, batch_size=1, image_size=224):
preprocess_input = sm.get_preprocessing(backbone)
x_val, y_val = get_Xy(path)
x_val = preprocess_input(x_val)
y_labeled = np.load(path)["labeled_y"]
validation_ds = (
tf.data.Dataset.from_tensor_slices((x_val, y_val))
.batch(batch_size, drop_remainder=True)
.map(
partial(
processing_data_functions(
key="validation",
size=image_size,
p=None,
mean=mean,
std=std
),
bs=batch_size,
),
num_parallel_calls=AUTO,
)
.prefetch(AUTO)
)
if y_labeled.shape[1] != image_size:
pad = (y_labeled.shape[1] - image_size) // 2
y_labeled = y_labeled[:, pad:-pad, pad:-pad]
return validation_ds, y_labeled
def load_model(opt):
model = opt.model_f(
opt.backbone,
classes=opt.classes,
activation=opt.activation,
encoder_weights=None
)
model.load_weights(opt.weights)
return model
def main():
opt = options()
mean, std = load_meta(opt.meta)
ds_val, y_labeled = setup_data(opt.path, mean, std, opt.param['backbone'])
model = load_model(opt)
pred = model.predict(ds_val)
# aji computation
if opt.aji:
ajis = []
n = pred.shape[0]
for i in range(n):
if opt.type == "binary":
pred_i = post_process(
pred[i, :, :, 0],
opt.alpha / 255,
thresh=opt.beta
)
else:
pred_i = post_process(
pred[i, :, :, 0],
opt.alpha,
thresh=opt.beta
)
gt_i = y_labeled[i]
ajis.append(get_fast_aji_plus(label(gt_i), pred_i))
aji = np.mean(ajis)
# accuracy, f1,
y_true = (y_labeled > 0).flatten()
y_pred = (pred > opt.beta).flatten()
acc = accuracy_score(y_true, y_pred)
f1 = f1_score(y_true, y_pred)
table_training = pd.read_csv(opt.history, index_col=0)
if opt.type == "binary":
name_acc_train = "binary_accuracy"
name_f1_train = "f1-score"
name_acc_val = "val_binary_accuracy"
name_f1_val = "val_f1-score"
elif opt.type == "distance":
name_acc_train = "accuracy_d"
name_f1_train = "f1_score_d"
name_acc_val = "val_accuracy_d"
name_f1_val = "val_f1_score_d"
acc_from_history = table_training[name_acc_train].max()
f1_from_history = table_training[name_f1_train].max()
val_acc_from_history = table_training[name_acc_val].max()
val_f1_from_history = table_training[name_f1_val].max()
dic = {
"val_acc": acc,
"val_f1": f1,
"history_acc": acc_from_history,
"history_f1": f1_from_history,
"history_val_acc": val_acc_from_history,
"history_val_f1": val_f1_from_history,
"alpha": opt.alpha,
"beta": opt.beta,
"weights": os.readlink(opt.weights),
"meta": os.readlink(opt.meta),
}
if opt.aji:
dic["aji"] = aji
if opt.type == "binary":
dic["history_val_auc"] = table_training["val_auc"].max()
dic["history_val_iou"] = table_training["val_iou_score"].max()
else:
dic["history_val_auc"] = np.nan
dic["history_val_iou"] = np.nan
if opt.param:
dic.update(opt.param)
df = pd.DataFrame(dic, index=[0])
df.to_csv("score.csv", index=False)
if __name__ == "__main__":
main()
|
#
# Copyright (C) 2019 Databricks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from databricks import koalas as ks
from databricks.koalas.testing.utils import ReusedSQLTestCase, SQLTestUtils
class SparkFrameMethodsTest(ReusedSQLTestCase, SQLTestUtils):
def test_frame_apply_negative(self):
with self.assertRaisesRegex(
ValueError, "The output of the function.* pyspark.sql.DataFrame.*int"
):
ks.range(10).spark.apply(lambda scol: 1)
|
import numpy as np
from sklearn.preprocessing import LabelBinarizer
def dcg_score(y_true, y_score, k=5):
"""Discounted cumulative gain (DCG) at rank K.
Reference:
https://www.kaggle.com/davidgasquez/ndcg-scorer
Parameters
----------
y_true : array, shape = [n_samples]
Ground truth (true relevance labels).
y_score : array, shape = [n_samples, n_classes]
Predicted scores.
k : int
Rank.
Returns
-------
score : float
"""
order = np.argsort(y_score)[::-1]
y_true = np.take(y_true, order[:k])
gain = 2 ** y_true - 1
discounts = np.log2(np.arange(len(y_true)) + 2)
return np.sum(gain / discounts)
def ndcg_score(ground_truth, predictions, k=5):
"""Normalized discounted cumulative gain (NDCG) at rank K.
Normalized Discounted Cumulative Gain (NDCG) measures the performance of a
recommendation system based on the graded relevance of the recommended
entities. It varies from 0.0 to 1.0, with 1.0 representing the ideal
ranking of the entities.
Reference:
https://www.kaggle.com/davidgasquez/ndcg-scorer
Parameters
----------
ground_truth : array, shape = [n_samples]
Ground truth (true labels represended as integers).
predictions : array, shape = [n_samples, n_classes]
Predicted probabilities.
k : int
Rank.
Returns
-------
score : float
Example
-------
>>> ground_truth = [1, 0, 2]
>>> predictions = [[0.15, 0.55, 0.2], [0.7, 0.2, 0.1], [0.06, 0.04, 0.9]]
>>> score = ndcg_score(ground_truth, predictions, k=2)
1.0
>>> predictions = [[0.9, 0.5, 0.8], [0.7, 0.2, 0.1], [0.06, 0.04, 0.9]]
>>> score = ndcg_score(ground_truth, predictions, k=2)
0.6666666666
"""
lb = LabelBinarizer()
lb.fit(range(len(predictions) + 1))
T = lb.transform(ground_truth)
scores = []
# Iterate over each y_true and compute the DCG score
for y_true, y_score in zip(T, predictions):
actual = dcg_score(y_true, y_score, k)
best = dcg_score(y_true, y_true, k)
score = float(actual) / float(best)
scores.append(score)
return np.mean(scores)
|
'''
Code for generating and applying unified diff patches to
files being modified.
Goal is to be able to make relatively clean edits to the existing
x3 scripts and other source files without having to copy the
contents.
Initial version is not expected to support multiple patches to
the same file through this method.
This will operate on files in the patches folder.
This folder should generally have three versions of any given file:
file_name
file_name.patch
[source file from the x3 directory]
The bare file is the modified version, eg. out of Exscriptor, kept
without a suffix for easier editing (eg. retain a .xml suffix).
The .patch version contains the unified diff, and is the component
to generally upload to the repository and distribute.
The source file will generally be read from the x3 directory,
after setting up the path using Set_Path() as with normal transforms.
To make a patch from a modified file, call Generate_Patch(file_name).
To rebuild a modified file from a patch, call Generate_Original(file_name).
These should be called using the flow for normal transforms, after
setting up paths appropriately for finding the source files.
Problem:
Exscriptor/XStudio write their xml output in a different format than that
used in the x3 source files, eg. with different indentation, and also do
not include the sourceplaintext section, plus there is a possibility
that element attributes will be given in a different order (when it
is arbitrary), and the text gets a bit jumbled between nodes.
Because of this, the diffs tend to include the full file contents, and
are duplicated with the original and modified encodings.
Possible solutions:
1) Switch to using xml parsing and a diff generator, which should
handle the indentation and attribute order inconsistencies.
-Requires other external libraries.
-Does not handle sourceplaintext discrepency automatically.
2) Edit the source file when reading it in to adjust it closer
to the exscriptor version, removing the sourceplaintext section
and increasing indents.
-Messy to implement, and fragile to unexpected format changes.
3) Load both xml files using elementtree, then convert them back
to text, then take the diff.
-Patch application will need to follow a similar procedure.
-Should result in matched indentation and attribute ordering.
-Still needs removal of sourceplaintext, but this is easy
since it is just an xml element to delete.
Go with option (3), which should work out the best overall, albeit
slightly slowing down patch application since the source files need
to be xml parsed whenever a patch is applied (eg. every time the
manager is run).
Update: solution doesn't really deal with the text jumbling, where
the x3 version may have some characters in a var node, and the
other tools move those chars to a text node.
Also, the x3 version often uses encoded characters to avoid spaces
and some other special chars, where the tools just put in spaces/etc
directly, further troubling the diff.
New solution:
For these script edits, try to do them in game and save there.
While awkward, this should get the file format to sync up properly.
Initial testing looks great (except the in game editor usage part).
Middle ground: edit externally using X-Studio, make a version of the
file without the ! (need to edit title and internals, eg. with a
replace-all in notepad++), open in game, insert/delete a line,
resave, put the ! back.
Update:
When editing scripts from mods, they may not be saved from the
game, or could be from a much older version.
In such cases, normalizing the file format somewhat may provide
some relief.
This will be done on a per-file basis.
For applying the diff as a patch, can use third party code.
This is mit licensed, but designed to work on files, not raw text.
https://github.com/techtonik/python-patch
Another, seemingly much simpler option would be this, but it lacks a license.
https://gist.github.com/noporpoise/16e731849eb1231e86d78f9dfeca3abc
This code has an mit license and works on text, but when tried out
it had some bugs (was not applying all patch changes, eg. skipping
the signature removal).
https://github.com/danielmoniz/merge_in_memory
Ultimately, a very simple patch application function will be written,
since it can be debugged easily that way.
TODO:
Consider support for applying multiple patches to a single file, perhaps
by doing some sort of patch join and applying a single unified patch (so
that original file line numbers aren't changed by prior patches).
'''
import os
import difflib
import xml.etree.ElementTree
from xml.dom import minidom
import re
from .File_Paths import *
from . import Misc
from .. import Common
def Make_Patch(virtual_path, verify = False, reformat_xml = False):
'''
Generates a patch for the given file in the patches folder.
* virtual_path
- Path of the file to be patched, existing in a modified
form in the patches folder, to be compared against
the one loaded from the game files.
- The patch will be the same name suffixed with .patch.
* verify
- Bool, if True, a call to Apply_Patch will be made, and
the resulting text compared to the original modified file
to verify a match.
* reformat_xml
- Bool, if True, this will parse xml files, strip out
whitespace, and reconvert them to strings, to somewhat
standardize formatting between inputs.
'''
print('Making patch for {}'.format(virtual_path))
# Error if the modified file not found.
modified_path = Virtual_Path_to_Project_Patch_Path(virtual_path)
if not os.path.exists(modified_path):
if Common.Settings.developer:
print('Error: patch for file {} not found in /patches'.format(virtual_path))
else:
raise Common.File_Missing_Exception()
return
# Get the modified text.
with open(modified_path, 'r') as file:
modified_file_text = file.read()
# Search out the source file.
try:
source_file_text = Misc.Load_File(virtual_path, return_text = True)
except Common.File_Missing_Exception as ex:
if Common.Settings.developer:
print('Error: source for file {} not found'.format(virtual_path))
else:
raise ex
return
# Do some extra handling of xml to standardize format.
if virtual_path.endswith('.xml'):
# Look up the encoding on the source file, to be safe.
# This is generally expected to be whatever was used as default
# for scripts, which don't specify encoding; eg. utf-8.
encoding = Misc.Load_File(virtual_path, return_game_file = True).encoding
# Optionally try to reformat.
# This will probably not end up being used, since attempts to
# match up formatting didn't pan out due to too many integrated
# differences.
if reformat_xml:
source_file_text = Standardize_XML_Format(source_file_text, encoding)
modified_file_text = Standardize_XML_Format(modified_file_text, encoding)
else:
# Use some default encoding.
encoding = None
# From these, can get the diff.
# This requires lists of strings as input, and works line-by-line,
# doing full line replacements. The lines in the lists should
# end in newlines to work properly.
source_file_lines = list(source_file_text.splitlines(keepends=True))
modified_file_lines = list(modified_file_text.splitlines(keepends=True))
unified_diff = difflib.unified_diff(
source_file_lines,
modified_file_lines,
# 'n' is the number of context lines to include around the
# block sections changed. In general, want this to be 0 to
# avoid including excess input lines, though with these it
# may be possible to patch a file that doesn't match the
# exact original source (eg. with changed line numbers).
n = 0)
# Write this out as-is to act as a patch.
patch_path = modified_path + '.patch'
with open(patch_path, 'w') as file:
file.writelines(unified_diff)
if verify:
# Apply the patch, get the modified file back.
patched_file_text = Apply_Patch(virtual_path, reformat_xml)
# Compare the patched file to the original modified file.
if patched_file_text != modified_file_text:
print('Error: patch did not reproduce the original modified input. '
'Writing result to {}.patched for viewing.'.format(virtual_path))
# Make a copy of the attempted patched file which had
# the error.
with open(modified_path + '.patched', 'w') as file:
file.write(patched_file_text)
return
def Apply_Patch(virtual_path, reformat_xml = False):
'''
Reads and applies a patch to the original text, producing the
modified text, and updates the File_Manager object accordingly.
Returns the modified text.
Primarily intended for xml files, though should work on any
file with Get_Text and Update_From_Text methods.
'''
# Error if the patch file not found.
patch_path = Virtual_Path_to_Project_Patch_Path(virtual_path) + '.patch'
if not os.path.exists(patch_path):
if Common.Settings.developer:
print('Error: patch for file {} not found in /patches'.format(virtual_path))
else:
raise Common.File_Missing_Exception()
return
# Get the patch text.
with open(patch_path, 'r') as file:
patch_file_text = file.read()
# Search out the source file.
source_game_file = Misc.Load_File(virtual_path, return_game_file = True)
source_file_text = source_game_file.Get_Text()
# Do some extra handling of xml to standardize format.
if virtual_path.endswith('.xml'):
# Look up the encoding on the source file, to be safe.
# This is generally expected to be whatever was used as default
# for scripts, which don't specify encoding; eg. utf-8.
encoding = source_game_file.encoding
# Optionally try to reformat.
if reformat_xml:
source_file_text = Standardize_XML_Format(source_file_text,
encoding)
# To apply the patch manually, can traverse the changed blocks,
# get their source file start line, blindly apply deletions and
# insertions (- and + lines in the patch), and should end up
# with everything correct.
# Operate on a list of source lines, for easy deletion and insertion.
# Split manually on newline, to ensure any final last empty line is
# kept (so any verification match doesn't get a false error).
modified_file_lines = list(source_file_text.split('\n'))
# Keep track of insertions and deletions, to correct source line indices
# from the patch based on prior changes.
line_offset = 0
# Track if an error occurs.
error = False
# Loop over the patch lines.
for patch_line in patch_file_text.splitlines():
# Skip the file name definitions, prefixed by --- and +++, which
# should be empty anyway since not specified earlier.
if patch_line.startswith('---') or patch_line.startswith('+++'):
continue
# A line with @@ has this format:
# @@ -source_line_start,source_line_count +dest_line_start,dest_line_count @@
# or
# @@ -source_line_start +dest_line_start @@
# The latter appears when only 1 line is changed.
# Only the source_line_start is really needed, since line counts
# are implicit in how many - and + lines are present below the tag
# if the patch file is well formed.
elif patch_line.startswith('@@'):
# Isolate it by splitting on the - and , or ' ' surrounding it.
post_dash = patch_line.split('-')[1]
source_terms = post_dash.split(' ')[0]
if ',' in source_terms:
line_number = int(source_terms.split(',')[0])
source_count = int(source_terms.split(',')[1])
else:
line_number = int(source_terms)
# Default is 1 line changed.
source_count = 1
# Note: patch line numbers start from 1, but the list is 0 indexed,
# so decrement by 1 to get the proper index.
line_number -= 1
# In the special case of a line not being deleted, and only
# insertions happening, those insertions should be placed after
# the line. To handle this nicely, check this case and bump
# the line number when found.
if source_count == 0:
line_number += 1
# Apply the line offset based on prior changes.
line_number += line_offset
continue
# Delete a line if needed.
elif patch_line.startswith('-'):
# Pop off the line, and verify it matches the reference
# in the patch.
line_text = modified_file_lines.pop(line_number)
ref_text = patch_line.replace('-','',1)
if line_text != ref_text:
# Problem found.
# When developing, it can be useful to let this continue
# so that a rebuilt text file can be checked to see how
# it differs from expected.
# Otherwise, can just raise an exception.
if not Settings.developer:
raise Common.Text_Patch_Exception()
error = True
print(('File patcher mismatch: line {}, original {},'
' expected {}').format(
line_number,
line_text,
ref_text
))
# After the pop, line_number points to the next line (which
# moved down an index), so leaving it unchanged should
# support another pop following this.
# Decrease the overall offset for future patch blocks.
line_offset -= 1
elif patch_line.startswith('+'):
# Isolate the patch text and insert it.
ref_text = patch_line.replace('+','',1)
modified_file_lines.insert(line_number, ref_text)
# The line number should now advance, so that another insertion
# goes to the next line.
line_number += 1
# Increase the overall offset for future patch blocks.
line_offset += 1
# Any other lines in the patch are likely just context, and
# can be safely ignored.
# Rejoin the list into a text block, adding back the newlines.
modified_file_text = '\n'.join(modified_file_lines)
if error:
print('Skipping {} due to patch error'.format(virtual_path))
else:
# Update the file object directly.
source_game_file.Update_From_Text(modified_file_text)
# Also return a copy of the new text if desired.
return modified_file_text
def Standardize_XML_Format(xml_text, encoding):
'''
Standardize the newlines, indentation, and attribute ordering
for an xml text block.
'''
element_root = xml.etree.ElementTree.fromstring(xml_text)
# Note: excess indentation can arise from the text or tail of each element,
# eg. when it is a newline followed by spaces that prefix the next
# element when printed, or a newline in a text field preceeding a
# subelement.
for element in element_root.iter():
# If nothing is left after splitting on the whitespace, can
# replace with an empty string.
if element.tail:
if not ''.join(re.split('\s+', element.tail)):
element.tail = ''
if element.text:
if not ''.join(re.split('\s+', element.text)):
element.text = ''
modified_xml_text = xml.etree.ElementTree.tostring(element_root,
encoding = "unicode")
# Get rid of exscriptor "linenr" attributes from elements, which aren't
# present in source scripts.
for element in element_root.iter():
element.attrib.pop('linenr', None)
# For source scripts, remove the sourceplaintext element that is not
# present in exscriptor scripts.
source_plain_text = element_root.find('sourceplaintext')
if source_plain_text != None:
element_root.remove(source_plain_text)
# Getting standard format of lines/indents appears to require
# the minidom package instead.
# Examples online just feed the elementtree text output into
# this and remake the text.
# Note: elementtree output is a byte string, but minidom output
# appears to be a normal python string.
modified_xml_text = xml.etree.ElementTree.tostring(element_root)
minidom_version = minidom.parseString(modified_xml_text)
# Make sure the final string is encoded the same way as the input,
# else oddities can arise with eg. how spaces are given.
modified_xml_text = minidom_version.toprettyxml(indent="\t")
return modified_xml_text |
import subprocess
def build_message_database(code):
if code:
return 'Database deploy failed.'
return 'Database ok.'
def build_message_deploy(code):
if code:
return 'Composition deploy failed'
return 'Composition deployed'
def deploy_backend():
print('deploying database')
exec_code = subprocess.run(
[
"docker-compose",
"-f",
".docker/docker-compose.prod.yml",
"up",
"--detach",
"database"
]).returncode
message = build_message_database(exec_code)
if exec_code:
return exec_code, message
print('deploying composition')
exec_code = subprocess.run(
[
"docker-compose",
"-f",
".docker/docker-compose.prod.yml",
"up",
"--detach",
]).returncode
message = '\n'.join([message, build_message_deploy(exec_code)])
if exec_code:
return exec_code, message
print('making a migration')
exec_code = subprocess.run(
[
"docker-compose",
"-f",
".docker/docker-compose.prod.yml",
"exec",
"-T",
"backend",
"sh", "-c",
"python manage.py migrate --noinput",
]).returncode
message = '\n'.join([message, 'migrations failed.' if exec_code else 'migrations applied successfully'])
if exec_code:
return exec_code, message
print('collecting static')
if exec_code:
return exec_code, message
exec_code = subprocess.run(
[
"docker-compose",
"-f",
".docker/docker-compose.prod.yml",
"exec",
"-T",
"backend",
"sh", "-c",
"python manage.py collectstatic -c --noinput",
]).returncode
message = '\n'.join([message, 'collectstatic failed.' if exec_code else 'collectstatic succeed'])
return exec_code, message
|
#!/usr/bin/env python
#coding:utf-8
from gevent import monkey
monkey.patch_all()
import web
from web.httpserver import StaticMiddleware
from socketio import server
urls = (
'/', 'IndexHandler', # 返回首页
'/topic', 'TopicHandler',
'/topic/(\d+)', 'TopicHandler',
'/message', 'MessageHandler',
'/user', 'UserHandler',
'/user/(\d+)', 'UserHandler',
'/login', 'LoginHandler',
'/logout', 'LogoutHandler',
'/socket.io/.*', 'SocketHandler',
)
app = web.application(urls, globals())
application = app.wsgifunc(StaticMiddleware)
if web.config.get('_session') is None:
session = web.session.Session(
app,
web.session.DiskStore('sessions'),
initializer={'login': False, 'user': None}
)
web.config._session = session
web.config.debug = False
from handlers.index import IndexHandler # noqa
from handlers.user import UserHandler, LoginHandler, LogoutHandler # noqa
from handlers.topic import TopicHandler # noqa
from handlers.message import MessageHandler # noqa
from handlers.socket import SocketHandler # noqa
if __name__ == "__main__":
import sys
PORT = 8080
if len(sys.argv) > 1:
PORT = int(sys.argv[1])
print 'http://localhost:%s' % PORT
server.SocketIOServer(
('0.0.0.0', PORT),
application,
resource="socket.io",
policy_server=True,
policy_listener=('0.0.0.0', 10843),
).serve_forever()
|
import sys
import jmespath
from termcolor import colored
from datetime import datetime
from botocore.compat import json, six
def milis2iso(milis):
res = datetime.utcfromtimestamp(milis/1000.0).isoformat()
return (res + ".000")[:23] + 'Z'
class LogPrinter(object):
def __init__(self, log_group_name, max_stream_length, **kwargs):
self.log_group_name = log_group_name
self.max_stream_length = max_stream_length
self.color_enabled = kwargs.get('color_enabled')
self.output_stream_enabled = kwargs.get('output_stream_enabled')
self.output_group_enabled = kwargs.get('output_group_enabled')
self.output_timestamp_enabled = kwargs.get('output_timestamp_enabled')
self.output_ingestion_time_enabled = kwargs.get('output_ingestion_time_enabled')
self.query = kwargs.get('query')
if self.query:
self.query_expression = jmespath.compile(self.query)
def print_log(self, event):
output = []
group_length = len(self.log_group_name)
if self.output_group_enabled:
output.append(
self.__color(
self.log_group_name.ljust(group_length, ' '),
'green'
)
)
if self.output_stream_enabled:
output.append(
self.__color(
event['logStreamName'].ljust(self.max_stream_length,
' '),
'cyan'
)
)
if self.output_timestamp_enabled:
output.append(
self.__color(
milis2iso(event['timestamp']),
'yellow'
)
)
if self.output_ingestion_time_enabled:
output.append(
self.__color(
milis2iso(event['ingestionTime']),
'blue'
)
)
message = event['message']
if self.query is not None and message[0] == '{':
parsed = json.loads(event['message'])
message = self.query_expression.search(parsed)
if not isinstance(message, six.string_types):
message = json.dumps(message)
output.append(message.rstrip())
print(' '.join(output))
try:
sys.stdout.flush()
except IOError as e:
if e.errno == errno.EPIPE:
# SIGPIPE received, so exit
os._exit(0)
else:
# We don't want to handle any other errors from this
raise
def __color(self, text, color):
"""Returns coloured version of ``text`` if ``color_enabled``."""
if self.color_enabled:
return colored(text, color)
return text
|
# -*- coding: utf-8 -*-
from datetime import timedelta
from influxdb import SeriesHelper
from redtrics.api.influxdbapi import InfluxDBApi
from redtrics.api.github import GithubApi
from redtrics.utils.dateutils import DateUtils
from redtrics.metrics.registry import MetricRegistry
from .base import BaseMetric
class CommitsLastWeekSeriesHelper(SeriesHelper):
class Meta:
series_name = 'commits_last_week'
fields = ['commits', 'additions', 'deletions', 'biggest']
tags = ['base']
@MetricRegistry.register
class CommitsLastWeek(BaseMetric):
name = 'commits_last_week'
description = "Commits Last week"
def __init__(self):
BaseMetric.__init__(self)
self.gh = GithubApi()
self.influxdb = InfluxDBApi()
def _retrieve_data(self):
self.commits = []
for repo in self.gh.repos():
self.logger.debug("Retrieving commit info for: repo {} - branch {}".format(repo, self.base))
try:
for commit in repo.iter_commits(self.base, since=DateUtils.one_week_ago() + timedelta(days=1)):
self.commits.append(self.gh.commit(repo.name, commit.sha))
except:
self.logger.debug("{0} is empty in {1}".format(repo, self.base))
def _compute(self):
self.results = {
'commits': len(self.commits),
'additions': 0,
'deletions': 0,
'biggest': 0
}
for c in self.commits:
self.results['additions'] += c.additions
self.results['deletions'] += c.deletions
self.results['biggest'] = max(self.results['biggest'], c.additions + c.deletions)
def _write_results(self):
CommitsLastWeekSeriesHelper(base=self.base, **self.results)
CommitsLastWeekSeriesHelper.commit(self.influxdb.client)
|
from Disruptor import Disruptor
from Scrambler import Scrambler
import Receiver
from Descrambler import Descrambler
#Klasa przedstawiajaca kanal transmisyjny dla sygnalu scramblowanego/niescramblowanego
class Channel:
def __init__(self, sygnal, algorythm, channel):
disrupter = Disruptor(sygnal) #tworzenie zaklocenia na podstawie sygnalu poczatkowego
scrambler = Scrambler(sygnal) #tworzenie scramblera na podstawie sygnalu
if(channel == "Main"):
signal1 = disrupter.disruption2(algorythm) #zaklocenie sygnalu bez scramblowania
if(channel == "BSC"):
signal1 = disrupter.bscDistruption()
if(channel == "Gilbert"):
signal1 = disrupter.gilbertDistruption()
signal2 = scrambler.scramble(algorythm) #wykonanie scramblingu na sygnale zadanym algorytmem
disrupter2 = Disruptor(signal2) #utworzenie disruptora na podstawie skramlowanego sygnalu
signal3 = disrupter2.disruption(algorythm) #zaklocenie skramblowanego sygnlau
descr = Descrambler(signal3) #tworzenie descramblera na podstawie zakloconego skramblowanego sygnalu
signal4 = descr.descramble(algorythm)
Receiver.receiver(sygnal, signal1, signal4, algorythm) #wysylka wszystich sygnalow |
from scipy import misc
import tensorflow as tf
import align.detect_face
import matplotlib.pyplot as plt
import numpy as np
minsize = 20 # minimum size of face
threshold = [0.6, 0.7, 0.7] # three steps's threshold
factor = 0.709 # scale factor
gpu_memory_fraction = 1.0
# function pick = nms(boxes,threshold,type)
# 非极大值抑制,去掉重复的检测框
def nms(boxes, threshold, method):
if boxes.size==0:
return np.empty((0,3))
# 还原后的框的坐标
print("进入nms非极大值抑制")
x1 = boxes[:,0]
y1 = boxes[:,1]
x2 = boxes[:,2]
y2 = boxes[:,3]
print(x1,y1,x2,y2)
# 得分值,即是人脸的可信度
s = boxes[:,4]
print(s)
area = (x2-x1+1) * (y2-y1+1)
print(area)
# 排序,从小到大,返回的是坐标
I = np.argsort(s)
#print(I)
pick = np.zeros_like(s, dtype=np.int16)
#print(pick)
counter = 0
s = 0
while I.size>0:
i = I[-1]
s = s+1
print("进入while%d"%s)
print(i)
pick[counter] = i
counter += 1
idx = I[0:-1]
#print(idx)
#print(type(idx))
#x22= np.array([17.,18.,19.])
#print(x22[idx])
#print( x1[idx])
#print( y1[idx])
#print( x2[idx])
#print( y2[idx])
xx1 = np.maximum(x1[i], x1[idx])
yy1 = np.maximum(y1[i], y1[idx])
xx2 = np.minimum(x2[i], x2[idx])
yy2 = np.minimum(y2[i], y2[idx])
#print(xx1)
#print(yy1)
#print(xx2)
#print(yy2)
w = np.maximum(0.0, xx2-xx1+1)
h = np.maximum(0.0, yy2-yy1+1)
inter = w * h
#print(inter)
#print(area[idx])
#print(area[i])
if method is 'Min':
o = inter / np.minimum(area[i], area[idx])
else:
o = inter / (area[i] + area[idx] - inter)
#print(o)
#print(threshold)
I = I[np.where(o<=threshold)]
#print(I)
pick = pick[0:counter]
print(pick)
print("_________________________")
return pick
def generateBoundingBox(imap, reg, scale, t):
"""Use heatmap to generate bounding boxes"""
stride = 2
cellsize = 12
# 获取x1,y1,x2,y2的坐标
print("进入generate")
#print(imap.shape)
imap = np.transpose(imap)
print(imap.shape)
#print(type(imap))
dx1 = np.transpose(reg[:, :, 0])
dy1 = np.transpose(reg[:, :, 1])
dx2 = np.transpose(reg[:, :, 2])
dy2 = np.transpose(reg[:, :, 3])
print("进入reg")
#print(reg[:, :, 0].shape)
print(dx1)
print(dy1)
print(dx2)
print(dy2)
# 获取可信度大于阈值的人脸框的坐标
print(imap)
y, x = np.where(imap >= t)
print(y)
print(x)
#print(type(y))
#print(y.shape)
#print(y.shape[0])
# 只有一个符合的情况
if y.shape[0] == 1:
#print("进入if判断")
dx1 = np.flipud(dx1)#翻转矩阵
dy1 = np.flipud(dy1)
dx2 = np.flipud(dx2)
dy2 = np.flipud(dy2)
# 筛选出符合条件的框
print("_____________")
# a= imap[(y,x)]
# print(a)
score = imap[(y, x)]
print(score)
print("_____________")
#print(dx1[(y, x)].shape)
print([dx1[(y, x)], dy1[(y, x)], dx2[(y, x)], dy2[(y, x)]])
print((np.vstack([dx1[(y, x)], dy1[(y, x)], dx2[(y, x)], dy2[(y, x)]])).shape)
print("_____________")
reg = np.transpose(np.vstack([dx1[(y, x)], dy1[(y, x)], dx2[(y, x)], dy2[(y, x)]]))
print(reg.shape)
if reg.size == 0:
#print("进入if")
reg = np.empty((0, 3))
# 还原尺度
print("_____________")
#print(np.vstack([y,x]))
bb = np.transpose(np.vstack([y, x]))
print(bb)
print('进入计算部分')
#print(stride * bb)
print(scale)
# #print((stride * bb + 1))
#print((stride * bb + 1) / scale)
q1 = np.fix((stride * bb + 1) / scale)
q2 = np.fix((stride * bb + cellsize - 1 + 1) / scale)
print(q1)
print(q2)
# shape(None, 9)
#print(np.expand_dims(score, 0))
#print(np.expand_dims(score, 1))
boundingbox = np.hstack([q1, q2, np.expand_dims(score, 1), reg])
#print(boundingbox)
return boundingbox, reg
# boxes返回值中,前4个值是还原比例后的人脸框坐标,第5个值是该人脸框中是人脸的概率,后4个值的未还原的人脸框坐标
# inter-scale nms
# 非极大值抑制,去掉重复的检测框
print('Creating networks and loading parameters')
with tf.Graph().as_default():
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=gpu_memory_fraction)
sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options, log_device_placement=False))
with sess.as_default():
pnet, rnet, onet = align.detect_face.create_mtcnn(sess, None)
image_path = 'C:\\Users\\rjx\\PycharmProjects\\untitled1\\facenet-master\\data\\test\\test4.jpg'
img = misc.imread(image_path)
#print(img.shape)
bounding_boxes, _ = align.detect_face.detect_face(img, minsize, pnet, rnet, onet, threshold, factor)
nrof_faces = bounding_boxes.shape[0] # 人脸数目
#print('找到人脸数目为:{}'.format(nrof_faces))
print(_.shape)
print(bounding_boxes.shape)
#print(type(bounding_boxes))
print(bounding_boxes[:,:4])
det = bounding_boxes[:,0:4]
# 保存所有人脸框
det_arr = []
#print(type(det_arr))
# 原图片大小
img_size = np.asarray(img.shape)[0:2]
#print(img_size)
# for i in range(nrof_faces):
# #print(det[i])
# print(np.squeeze(det[i]))
# det_arr.append(np.squeeze(det[i]))
# print(det_arr)
# 即使有多张人脸,也只要一张人脸就够了
# 获取人脸框的大小
bounding_box_size = (det[:,2]-det[:,0])*(det[:,3]-det[:,1])
print(bounding_box_size)
# 原图片中心坐标
img_center = img_size / 2
#print(img_center)
# 求人脸框中心点相对于图片中心点的偏移,
# (det[:,0]+det[:,2])/2和(det[:,1]+det[:,3])/2组成的坐标其实就是人脸框中心点
offsets = np.vstack([ (det[:,0]+det[:,2])/2-img_center[1], (det[:,1]+det[:,3])/2-img_center[0] ])
#print([ (det[:,0]+det[:,2])/2-img_center[1], (det[:,1]+det[:,3])/2-img_center[0] ])
#print(offsets)
# 求人脸框中心到图片中心偏移的平方和
# 假设offsets=[[ 4.20016056 145.02849352 -134.53862838] [ -22.14250919 -26.74770141 -30.76835772]]
# 则offset_dist_squared=[ 507.93206189 21748.70346425 19047.33436466]
offset_dist_squared = np.sum(np.power(offsets,2.0),0)
#print(offset_dist_squared)
# 用人脸框像素大小减去偏移平方和的两倍,得到的结果哪个大就选哪个人脸框
# 其实就是综合考虑了人脸框的位置和大小,优先选择框大,又靠近图片中心的人脸框
index = np.argmax(bounding_box_size-offset_dist_squared*2.0) # some extra weight on the centering
#print(bounding_box_size-offset_dist_squared*2.0)
#print(index)
det_arr.append(det[index,:])
print("______________________________")
#print(det_arr)
#print(enumerate(det_arr))
for i, det in enumerate(det_arr):
# [4,] 边界框扩大margin区域,并进行裁切
det = np.squeeze(det)
#print(i)
#print(det)
bb = np.zeros(4, dtype=np.int32)
# 边界框周围的裁剪边缘,就是我们这里要裁剪的人脸框要比MTCNN获取的人脸框大一点,
# 至于大多少,就由margin参数决定了
# print(bb)
bb[0] = np.maximum(det[0] - 32 / 2, 0)
bb[1] = np.maximum(det[1] - 32 / 2, 0)
bb[2] = np.minimum(det[2] + 32 / 2, img_size[1])
bb[3] = np.minimum(det[3] + 32 / 2, img_size[0])
# print(np.max(det[0] - 32 / 2, 0))
# print(det[1] - 32 / 2)
# print(det[2] + 32 / 2)
# print(det[3] + 32 / 2)
#print(bb)
# 裁剪人脸框,再缩放
cropped = img[bb[1]:bb[3], bb[0]:bb[2], :]
#print(cropped)
# 缩放到指定大小,并保存图片,以及边界框位置信息
scaled = misc.imresize(cropped, (160, 160), interp='bilinear')
#nrof_successfully_aligned += 1
#filename_base, file_extension = os.path.splitext(output_filename)
#if args.detect_multiple_faces:
# output_filename_n = "{}_{}{}".format(filename_base, i, file_extension)
#else:
# output_filename_n = "{}{}".format(filename_base, file_extension)
# 保存图片
#misc.imsave(output_filename_n, scaled)
# 记录信息到bounding_boxes_XXXXX.txt文件里
#text_file.write('%s %d %d %d %d\n' % (output_filename_n, bb[0], bb[1], bb[2], bb[3]))
###########################################################################################
factor_count=0
total_boxes=np.empty((0,9))
points=np.empty(0)
#print(type(total_boxes))
print(total_boxes)
print("显示total_boxes")
#print(points)
#print(type(points))
# 获取输入的图片的宽高
h=img.shape[0]
w=img.shape[1]
print(h)
print(w)
# 宽/高,谁小取谁 250*250
minl=np.amin([h, w])
#print(minl)
m=12.0/minsize#P Net 12*12 12/20=0.6
minl=minl*m#250*0.6=150
#print(minl)
# create scale pyramid
# 创建比例金字塔
scales=[]
while minl>=12:
scales += [m*np.power(factor, factor_count)]
minl = minl*factor
#print(minl)
factor_count += 1
#print(factor_count)
print(scales)
# 将图片显示出来
plt.figure()
scale_img = img.copy()
# 第一步,首先将图像缩放到不同尺寸形成“图像金字塔”
# 然后,经过P-Net网络
# first stage
i=0
for scale in scales:
# 宽高要取整
hs = int(np.ceil(h * scale))
ws = int(np.ceil(w * scale))
print(hs)
print(ws)
# 使用opencv的方法对图片进行缩放
im_data = align.detect_face.imresample(img, (hs, ws))
print(im_data.shape)
print("im_data设置完毕")
#plt.imshow(scale_img)
#plt.show()
# 可视化的显示“图像金字塔”的效果
# --韦访添加
#plt.imshow(img)
#plt.show()
#plt.imshow(im_data)
#plt.show()
#scale_img[0:im_data.shape[0], 0:im_data.shape[1]] = 0
#scale_img[0:im_data.shape[0], 0:im_data.shape[1]] = im_data[0:im_data.shape[0], 0:im_data.shape[1]]
# plt.imshow(scale_img)
# plt.show()
# print('im_data.shape[0]', im_data.shape[0])
# print('im_data.shape[1]', im_data.shape[1])
# # 对图片数据进行归一化处理 [-1,1]
# #print(im_data.shape)
im_data = (im_data - 127.5) * 0.0078125
print("---------------------")
#print(im_data.shape)
# 增加一个维度,即batch size,因为我们这里每次只处理一张图片,其实batch size就是1
img_x = np.expand_dims(im_data, 0)
#print(img_x.shape)
img_y = np.transpose(img_x, (0, 2, 1, 3))
#print(img_y.shape)
# 送进P-Net网络
# 假设img_y.shape=(1, 150, 150, 3)
# 因为P-Net网络要经过3层核为3*3步长为1*1的卷积层,一层步长为2*2池化层
# 所以conv4-2层输出形状为(1, 70, 70, 4)
# 70是这么来的,(150-3+1)/1=148,经过池化层后为148/2=74,
# 再经过一个卷积层(74-3+1)/1=72,再经过一个卷积层(72-3+1)/1=70
# 计算方法参考博客:https://blog.csdn.net/rookie_wei/article/details/80146620
# prob1层的输出形状为(1, 70, 70, 2)
out = pnet(img_y)
#print(type(out))
#print(out[0].shape)
#print(out[1].shape)
# 又变回来
# out0的形状是(1, 70, 70, 4)
# 返回的是可能是人脸的框的坐标
out0 = np.transpose(out[0], (0, 2, 1, 3))
# out1的形状是(1, 70, 70, 2)
# 返回的是对应与out0框中是人脸的可信度,第2个值为是人脸的概率
out1 = np.transpose(out[1], (0, 2, 1, 3))
print("out的shape")
print(out0.shape)
print(out1.shape)
print("-----------------")
#print(out0[:,:,:,:].shape)
print(out0[0,:,:,:].shape)
print("-----------------")
#print(out1[:,:,:,1].shape)
print(out1[0,:,:,1].shape)
# out1[0,:,:,1]:表示框的可信度,只要一个值即可,因为这两个值相加严格等于1,这里只要获取“是”人脸框的概率
# out0[0,:,:,:]:人脸框
# scales:图片缩减比例
# threshold:阈值,这里取0.6
# boxes返回值中,前4个值是还原比例后的人脸框坐标,第5个值是该人脸框中是人脸的概率,后4个值的未还原的人脸框坐标
boxes, _ = generateBoundingBox(out1[0, :, :, 1].copy(), out0[0, :, :, :].copy(), scale, threshold[0])
# # 人脸框坐标对应的可信度
# print('处理之前:', out1[0, :, :, 1])
# print('------------------')
# s = boxes[:, 4]
# print('处理之后:', s)
#
# # # 显示人脸框
# print('------------------')
# x1 = boxes[:, 0]
# y1 = boxes[:, 1]
# x2 = boxes[:, 2]
# y2 = boxes[:, 3]
# print(len(boxes))
# print('------------------')
# for i in range(len(boxes)):
# print(x1[i])
# print(y1[i])
# print(x2[i])
# print(y2[i])
# print('------------------')
# print(i)
# plt.gca().add_patch(plt.Rectangle((x1[i], y1[i]), x2[i] - x1[i], y2[i] - y1[i], edgecolor='w',facecolor='none'))
# --韦访添加
# plt.imshow(scale_img)
# plt.show()
# exit()
# inter-scale nms
# 非极大值抑制,去掉重复的检测框
pick = nms(boxes.copy(), 0.5, 'Union')
if boxes.size > 0 and pick.size > 0:
boxes = boxes[pick, :]
total_boxes = np.append(total_boxes, boxes, axis=0)
# x1 = boxes[:, 0]
# y1 = boxes[:, 1]
# x2 = boxes[:, 2]
# y2 = boxes[:, 3]
# for i in range(len(boxes)):
# print(x1[i], y1[i], x2[i], y2[i])
# plt.gca().add_patch(
# plt.Rectangle((x1[i], y1[i]), x2[i] - x1[i], y2[i] - y1[i], edgecolor='w', facecolor='none'))
# --韦访添加
#plt.imshow(scale_img)
#plt.show()
#exit()
# 图片按照所有scale走完一遍,会得到在原图上基于不同scale的所有的bb,然后对这些bb再进行一次NMS
# 并且这次NMS的threshold要提高
numbox = total_boxes.shape[0]
if numbox > 0:
# 再经过nms筛选掉一些可靠度更低的人脸框
pick = nms(total_boxes.copy(), 0.7, 'Union')
total_boxes = total_boxes[pick, :]
# 获取每个人脸框的宽高
regw = total_boxes[:, 2] - total_boxes[:, 0]
regh = total_boxes[:, 3] - total_boxes[:, 1]
# x1 = total_boxes[:, 0]
# y1 = total_boxes[:, 1]
# x2 = total_boxes[:, 2]
# y2 = total_boxes[:, 3]
# for i in range(len(total_boxes)):
# print(x1[i], y1[i], x2[i], y2[i])
# plt.gca().add_patch(
# plt.Rectangle((x1[i], y1[i]), x2[i] - x1[i], y2[i] - y1[i], edgecolor='w', facecolor='none'))
# 对人脸框坐标做一些处理,使得人脸框更紧凑
qq1 = total_boxes[:, 0] + total_boxes[:, 5] * regw
qq2 = total_boxes[:, 1] + total_boxes[:, 6] * regh
qq3 = total_boxes[:, 2] + total_boxes[:, 7] * regw
qq4 = total_boxes[:, 3] + total_boxes[:, 8] * regh
# x1 = qq1
# y1 = qq2
# x2 = qq3
# y2 = qq4
# for i in range(len(total_boxes)):
# print('lll', x1[i], y1[i], x2[i], y2[i])
# plt.gca().add_patch(
# plt.Rectangle((x1[i], y1[i]), x2[i] - x1[i], y2[i] - y1[i], edgecolor='r', facecolor='none'))
# --韦访添加
# plt.imshow(scale_img)
# plt.show()
# exit()
total_boxes = np.transpose(np.vstack([qq1, qq2, qq3, qq4, total_boxes[:, 4]]))
total_boxes = align.detect_face.rerec(total_boxes.copy())
total_boxes[:, 0:4] = np.fix(total_boxes[:, 0:4]).astype(np.int32)
dy, edy, dx, edx, y, ey, x, ex, tmpw, tmph = align.detect_face.pad(total_boxes.copy(), w, h)
# R-Net
numbox = total_boxes.shape[0]
if numbox > 0:
# second stage R-Net 对于P-Net输出的bb,缩放到24x24大小
tempimg = np.zeros((24, 24, 3, numbox))
for k in range(0, numbox):
tmp = np.zeros((int(tmph[k]), int(tmpw[k]), 3))
tmp[dy[k] - 1:edy[k], dx[k] - 1:edx[k], :] = img[y[k] - 1:ey[k], x[k] - 1:ex[k], :]
if tmp.shape[0] > 0 and tmp.shape[1] > 0 or tmp.shape[0] == 0 and tmp.shape[1] == 0:
# R-Net输入大小为24*24,所以要进行缩放
tempimg[:, :, :, k] = align.detect_face.imresample(tmp, (24, 24))
#else:
# return np.empty()
# 标准化[-1,1]
tempimg = (tempimg - 127.5) * 0.0078125
# 转置[n,24,24,3]
tempimg1 = np.transpose(tempimg, (3, 1, 0, 2))
# 经过R-Net网络
out = rnet(tempimg1)
out0 = np.transpose(out[0])
out1 = np.transpose(out[1])
score = out1[1, :]
ipass = np.where(score > threshold[1])
total_boxes = np.hstack([total_boxes[ipass[0], 0:4].copy(), np.expand_dims(score[ipass].copy(), 1)])
mv = out0[:, ipass[0]]
if total_boxes.shape[0] > 0:
pick = nms(total_boxes, 0.7, 'Union')
total_boxes = total_boxes[pick, :]
total_boxes = align.detect_face.bbreg(total_boxes.copy(), np.transpose(mv[:, pick]))
total_boxes = align.detect_face.rerec(total_boxes.copy())
# 第三步,经过O-Net网络
numbox = total_boxes.shape[0]
if numbox > 0:
# third stage
total_boxes = np.fix(total_boxes).astype(np.int32)
dy, edy, dx, edx, y, ey, x, ex, tmpw, tmph = align.detect_face.pad(total_boxes.copy(), w, h)
tempimg = np.zeros((48, 48, 3, numbox))
for k in range(0, numbox):
tmp = np.zeros((int(tmph[k]), int(tmpw[k]), 3))
tmp[dy[k] - 1:edy[k], dx[k] - 1:edx[k], :] = img[y[k] - 1:ey[k], x[k] - 1:ex[k], :]
if tmp.shape[0] > 0 and tmp.shape[1] > 0 or tmp.shape[0] == 0 and tmp.shape[1] == 0:
# O-Net输入大小为48*48,所以要进行缩放
tempimg[:, :, :, k] = align.detect_face.imresample(tmp, (48, 48))
#else:
# return np.empty()
tempimg = (tempimg - 127.5) * 0.0078125
tempimg1 = np.transpose(tempimg, (3, 1, 0, 2))
# 经过O-Net网络
out = onet(tempimg1)
out0 = np.transpose(out[0])
out1 = np.transpose(out[1])
out2 = np.transpose(out[2])
score = out2[1, :]
points = out1
ipass = np.where(score > threshold[2])
points = points[:, ipass[0]]
total_boxes = np.hstack([total_boxes[ipass[0], 0:4].copy(), np.expand_dims(score[ipass].copy(), 1)])
mv = out0[:, ipass[0]]
w = total_boxes[:, 2] - total_boxes[:, 0] + 1
h = total_boxes[:, 3] - total_boxes[:, 1] + 1
points[0:5, :] = np.tile(w, (5, 1)) * points[0:5, :] + np.tile(total_boxes[:, 0], (5, 1)) - 1
points[5:10, :] = np.tile(h, (5, 1)) * points[5:10, :] + np.tile(total_boxes[:, 1], (5, 1)) - 1
if total_boxes.shape[0] > 0:
total_boxes = align.detect_face.bbreg(total_boxes.copy(), np.transpose(mv))
pick = nms(total_boxes.copy(), 0.7, 'Min')
total_boxes = total_boxes[pick, :]
points = points[:, pick]
# 显示人脸框和关键点
for i in range(len(total_boxes)):
x1 = total_boxes[:, 0]
y1 = total_boxes[:, 1]
x2 = total_boxes[:, 2]
y2 = total_boxes[:, 3]
print('lll', x1[i], y1[i], x2[i], y2[i])
plt.gca().add_patch(
plt.Rectangle((x1[i], y1[i]), x2[i] - x1[i], y2[i] - y1[i], edgecolor='r', facecolor='none'))
plt.scatter(points[0], points[5], c='red')
plt.scatter(points[1], points[6], c='red')
plt.scatter(points[2], points[7], c='red')
plt.scatter(points[3], points[8], c='red')
plt.scatter(points[4], points[9], c='red')
plt.imshow(scale_img)
plt.show()
exit()
|
"""A library for describing and applying affine transforms to PIL images."""
import numpy as np
import PIL.Image
class RGBTransform(object):
"""A description of an affine transformation to an RGB image.
This class is immutable.
Methods correspond to matrix left-multiplication/post-application:
for example,
RGBTransform().multiply_with(some_color).desaturate()
describes a transformation where the multiplication takes place first.
Use rgbt.applied_to(image) to return a converted copy of the given image.
For example:
grayish = RGBTransform.desaturate(factor=0.5).applied_to(some_image)
"""
def __init__(self, matrix=None):
self._matrix = matrix if matrix is not None else np.eye(4)
def _then(self, operation):
return RGBTransform(np.dot(_embed44(operation), self._matrix))
def desaturate(self, factor=1.0, weights=(0.299, 0.587, 0.114)):
"""Desaturate an image by the given amount.
A factor of 1.0 will make the image completely gray;
a factor of 0.0 will leave the image unchanged.
The weights represent the relative contributions of each channel.
They should be a 1-by-3 array-like object (tuple, list, np.array).
In most cases, their values should sum to 1.0
(otherwise, the transformation will cause the image
to get lighter or darker).
"""
weights = _to_rgb(weights, "weights")
# tile: [wr, wg, wb] ==> [[wr, wg, wb], [wr, wg, wb], [wr, wg, wb]]
desaturated_component = factor * np.tile(weights, (3, 1))
saturated_component = (1 - factor) * np.eye(3)
operation = desaturated_component + saturated_component
return self._then(operation)
def multiply_with(self, base_color, factor=1.0):
"""Multiply an image by a constant base color.
The base color should be a 1-by-3 array-like object
representing an RGB color in [0, 255]^3 space.
For example, to multiply with orange,
the transformation
RGBTransform().multiply_with((255, 127, 0))
might be used.
The factor controls the strength of the multiplication.
A factor of 1.0 represents straight multiplication;
other values will be linearly interpolated between
the identity (0.0) and the straight multiplication (1.0).
"""
component_vector = _to_rgb(base_color, "base_color") / 255.0
new_component = factor * np.diag(component_vector)
old_component = (1 - factor) * np.eye(3)
operation = new_component + old_component
return self._then(operation)
def mix_with(self, base_color, factor=1.0):
"""Mix an image by a constant base color.
The base color should be a 1-by-3 array-like object
representing an RGB color in [0, 255]^3 space.
For example, to mix with orange,
the transformation
RGBTransform().mix_with((255, 127, 0))
might be used.
The factor controls the strength of the color to be added.
If the factor is 1.0, all pixels will be exactly the new color;
if it is 0.0, the pixels will be unchanged.
"""
base_color = _to_rgb(base_color, "base_color")
operation = _embed44((1 - factor) * np.eye(3))
operation[:3, 3] = factor * base_color
return self._then(operation)
def get_matrix(self):
"""Get the underlying 3-by-4 matrix for this affine transform."""
return self._matrix[:3, :]
def applied_to(self, image):
"""Apply this transformation to a copy of the given RGB* image.
The image should be a PIL image with at least three channels.
Specifically, the RGB and RGBA modes are both supported, but L is not.
Any channels past the first three will pass through unchanged.
The original image will not be modified;
a new image of the same mode and dimensions will be returned.
"""
# PIL.Image.convert wants the matrix as a flattened 12-tuple.
# (The docs claim that they want a 16-tuple, but this is wrong;
# cf. _imaging.c:767 in the PIL 1.1.7 source.)
matrix = tuple(self.get_matrix().flatten())
channel_names = image.getbands()
channel_count = len(channel_names)
if channel_count < 3:
raise ValueError("Image must have at least three channels!")
elif channel_count == 3:
return image.convert('RGB', matrix)
else:
# Probably an RGBA image.
# Operate on the first three channels (assuming RGB),
# and tack any others back on at the end.
channels = list(image.split())
rgb = PIL.Image.merge('RGB', channels[:3])
transformed = rgb.convert('RGB', matrix)
new_channels = transformed.split()
channels[:3] = new_channels
return PIL.Image.merge(''.join(channel_names), channels)
def applied_to_pixel(self, color):
"""Apply this transformation to a single RGB* pixel.
In general, you want to apply a transformation to an entire image.
But in the special case where you know that the image is all one color,
you can save cycles by just applying the transformation to that color
and then constructing an image of the desired size.
For example, in the result of the following code,
image1 and image2 should be identical:
rgbt = create_some_rgb_tranform()
white = (255, 255, 255)
size = (100, 100)
image1 = rgbt.applied_to(PIL.Image.new("RGB", size, white))
image2 = PIL.Image.new("RGB", size, rgbt.applied_to_pixel(white))
The construction of image2 will be faster for two reasons:
first, only one PIL image is created; and
second, the transformation is only applied once.
The input must have at least three channels;
the first three channels will be interpreted as RGB,
and any other channels will pass through unchanged.
To match the behavior of PIL,
the values of the resulting pixel will be rounded (not truncated!)
to the nearest whole number.
"""
color = tuple(color)
channel_count = len(color)
extra_channels = tuple()
if channel_count < 3:
raise ValueError("Pixel must have at least three channels!")
elif channel_count > 3:
color, extra_channels = color[:3], color[3:]
color_vector = np.array(color + (1, )).reshape(4, 1)
result_vector = np.dot(self._matrix, color_vector)
result = result_vector.flatten()[:3]
full_result = tuple(result) + extra_channels
rounded = tuple(int(round(x)) for x in full_result)
return rounded
def _embed44(matrix):
"""Embed a 4-by-4 or smaller matrix in the upper-left of I_4."""
result = np.eye(4)
r, c = matrix.shape
result[:r, :c] = matrix
return result
def _to_rgb(thing, name="input"):
"""Convert an array-like object to a 1-by-3 numpy array, or fail."""
thing = np.array(thing)
assert thing.shape == (3, ), (
"Expected %r to be a length-3 array-like object, but found shape %s" %
(name, thing.shape))
return thing |
from roboclaw import *
print "set pid started"
current_pid = readM1pidq(128)
print "M1 before p,i,d,qpps: ", current_pid
SetM1pidq(128,22000,80000,16351,180000)
new_pid = readM1pidq(128)
print "M1 after p,i,d,qpps: ", new_pid
current_pid = readM2pidq(128)
print "M2 before p,i,d,qpps: ", current_pid
SetM2pidq(128,22000,80000,16351,180000)
new_pid = readM2pidq(128)
print "M2 after p,i,d,qpps: ", new_pid
current_pid = readM2pidq(129)
print "M3 before p,i,d,qpps: ", current_pid
SetM2pidq(129,22000,80000,16351,180000)
new_pid = readM2pidq(129)
print "M3 after p,i,d,qpps: ", new_pid
|
# (C) StackState 2020
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
from ..base.utils.persistent_state import *
|
from fastapi import FastAPI
import os
import sys
from pathlib import Path
DIR = Path(__file__).absolute().parent
sys.path.append(str(DIR))
from data import ReadDatas
data = ReadDatas()
data.main()
root_path = os.getenv("ROOT_PATH", "")
app = FastAPI(
title="子育て施設一覧(保育園・幼稚園・認定こども園等) API",
root_path=root_path
)
@app.get("/")
def hello():
return "Hello! Please access /docs"
@app.get("/list/")
def get_data():
return data.df.T
@app.get("/query/")
def do_query(q=None):
return data.query(q).T
@app.get("/version/")
def get_version():
return {"version": data.get_version()} |
import os
import sys
import yaml
import json
import subprocess
sys.path.insert(0, os.path.abspath('./functions'))
from create_canned_tasks import main
from jira_api import get_all_tasks_in_epic
# load JIRA username and password
jira_creds = yaml.load(open('./user_dev.yml'))
username = jira_creds['user']['name']
password = jira_creds['user']['password']
# point to JIRA development environment
url = 'https://10.221.100.4'
def test_main():
"""Integration test of main method."""
output = subprocess.check_output("./run_create_canned_tasks.sh",shell=True)
# TO-DO: Refactor this mess!!!
# parse create_canned_tasks_output
output_index = str(output).find('AI-')
epic_key = str(output)[output_index:][:-3]
key = epic_key.split('-')[0]
id = epic_key.split('-')[1]
# get all tasks associated with epic_key
response = get_all_tasks_in_epic(url, username, password, key, id)
assert response['status_code'] == 200
# # confirm that tasks match with ./tasks/tests/
# number_of_tasks = json.loads(response['response'])['total']
# assert number_of_tasks == 'number_of_tasks_in_tests'
# issues = json.loads(response['response'])['issues']
# issue_summaries = list(map(lambda x: x['fields']['summary'], issues))
# assert issue_summaries == 'summaries in JSON in tests'
|
Subsets and Splits