metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "jferrarot/routingPlanning",
"score": 3
}
|
#### File: jferrarot/routingPlanning/router.py
```python
import pandas as pd
import pathlib
import math
import googlemaps
from tkinter import filedialog
from tkinter import *
from datetime import datetime
from pandas.core.frame import DataFrame
GMAPS_CLIENT = googlemaps.Client(key=<API_KEY>)
LINK_TEMPLATE="https://www.google.com/maps/search/?api=1&query=<lat>,<lng>"
ORIG_COORDENATES = (<LAT_ORIG>,<LNG_ORIG>)
DEST_COORDENATES = (<LAT_DEST>,<LNG_DEST>)
LAT_MIN_VALID = -13.00
LNG_MIN_VALID = -78.00
LAT_MAX_VALID = -10.00
LNG_MAX_VALID = -75.00
TODAY_STRING = todayS=(datetime.now()).strftime("%d%m%Y_%H%M%S")
OUTPUT_FILE_1 = TODAY_STRING+"_resultFormatted.xlsx"
OUTPUT_FILE_2 = TODAY_STRING+"_resultOrderBiker.xlsx"
OUTPUT_FILE_3 = TODAY_STRING+"_OrderByBiker.txt"
MAX_PERMITTED_ROUTINGS = 10
class Point:
def __init__(self,lat,lng):
self.lat=lat
self.lng=lng
#These functions can be used as a replacement for google Distance calculator. The orders are sorted first by relative distance to the started point.
def rad(x):
return x*math.pi/180
def getDistance(p1,p2):
R = 6378137 #earth mean radius in meter
dLat = rad(p2.lat - p1.lat)
dLong = rad(p2.lng - p1.lng)
a = math.sin(dLat / 2) * math.sin(dLat / 2) + math.cos(rad(p1.lat)) * math.cos(rad(p2.lat)) * math.sin(dLong / 2) * math.sin(dLong / 2)
c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a))
d = R*c
return d
#You can adjust the value of your latitude or longitude, if your project have a fixed range of deliver.
def validLatLng(lat,lng):
return lat > LAT_MIN_VALID and lat < LAT_MAX_VALID and lng > LNG_MIN_VALID and lng < LNG_MAX_VALID
#This function creates the maps links for google maps, but first it validates: latitude & longitude (Lima, Perú only), that exist a path that connect the started point and the end point.
def createMapsLinks(addressList):
rows = []
for address in addressList:
row = []
lat,lng,error,link="","","",""
distance = 0
geocodeResult = GMAPS_CLIENT.geocode(address)
if(len(geocodeResult)>0):
lat = geocodeResult[0]['geometry']['location']['lat']
lng = geocodeResult[0]['geometry']['location']['lng']
if(validLatLng(lat,lng)):
geoDirections = GMAPS_CLIENT.directions(origin=ORIG_COORDENATES,destination=[lat,lng])
if(len(geoDirections)>0):
distance = geoDirections[0]['legs'][0]['distance']['value']
error = 'NO'
link = LINK_TEMPLATE.replace("<lat>",str(lat)).replace("<lng>",str(lng))
else:
distance = 0
error = 'Validar dirección: Latitud y longitud no corresponden a Peru.'
link = ""
else:
distance = 0
error = 'Validar dirección: Latitud y longitud no corresponden a Lima.'
link=""
else:
lat=0.00
lng=0.00
distance=0
error="Direccion no valida."
link=""
row.append(lat)
row.append(lng)
row.append(error)
row.append(link)
row.append(distance)
rows.append(row)
dfl = pd.DataFrame(rows,columns=["LAT","LNG","ERROR","LINK","DISTANCE"])
return dfl
#the method works with any of both files xls or xlsx. This file have the information of a normal order (address, info of client, info of product ordered, etc.). Using panda,
#it creates a dataframe for easier manipulation, call the method to create the map links, print it in a new file and return the dataframe including the new columns:
#latitude(lat), longitude(lng), error[if address wasnt found], link, distance[distance from origin to that place].
def readerFormatter(fileWithPath,extension):
if(extension==".xlsx"):
excelData = pd.read_excel(fileWithPath, engine='openpyxl')
elif(extension==".xls"):
excelData = pd.read_excel(fileWithPath)
else:
excelData = pd.DataFrame()
if(not excelData.empty):
data = pd.DataFrame(excelData)
addressList = data['FULL ADRESS'].tolist()
dfl = createMapsLinks(addressList)
finalDataFrame=data.reset_index(drop=True).join(dfl.reset_index(drop=True),how='left')
finalDataFrame.to_excel(pathlib.Path(__file__).parent/("output/"+OUTPUT_FILE_1),engine='openpyxl',index=False)
return finalDataFrame
else:
return None
#This method uses the route optimization of google to create the best delivery path for a biker. It returns the dataframe with an extra column, this column is the order.
def generateFastestDeliveryRoutes(df,nRoutes):
nOrders = len(df.index)
nDeliveries = nOrders//nRoutes
nRestOrders = nOrders%nRoutes
extra =0
if(nRestOrders>0):
extra+=1
totalDeliveries = nDeliveries+extra
start = 0
end = nRoutes
orderList = []
for i in range(totalDeliveries):
dfSlice = df.iloc[start:end]
latitudes = dfSlice['LAT'].tolist()
longitudes = dfSlice['LNG'].tolist()
coordenates = list(zip(latitudes,longitudes))
result=GMAPS_CLIENT.directions(origin=ORIG_COORDENATES,waypoints=coordenates,optimize_waypoints=True,destination=DEST_COORDENATES)
tempList = result[0]['waypoint_order']
tempList = [item+start for item in tempList]
start = end
end = end + nRoutes
orderList = orderList + tempList
df['ID_ROUTE'] = orderList
dfSorted=df.sort_values(by=['ID_ROUTE'])
return dfSorted
#it generates the routing and assign which biker have to deliver which orders.
def router(df,nBikers,nOrders):
nValidOrders = (df["ERROR"]=="NO").sum() #remove orders with errors
validDf = df[df["ERROR"]=="NO"] #new dataFrame
validDf=generateFastestDeliveryRoutes(validDf,nOrders)
nDeliveries = nValidOrders//nOrders
nRestOrders = nValidOrders%nOrders
extraDelivery = 0
if(nRestOrders>0):
extraDelivery+=1
nTotalDeliveries = nDeliveries + extraDelivery
validDf.insert(1,'ASSIGNED_BIKE','NN') #create a new column with the id of a biker
start = 0
end = nOrders
for m in range(nBikers):
validDf.iloc[start:end,validDf.columns.get_loc('ASSIGNED_BIKE')] = m + 1 #assign the biker
start = end
end = end + nOrders
return validDf
#it creates a dictionary of deliveries for easy manipulation
def createDeliverDictionary(dfRoutedFiltered):
ordersByBikers = {"NN":{"quantity":0,"orders":[]}}
for row in dfRoutedFiltered.itertuples():
key = str(row[5])
clientNames = row[6]
telephone = row[7]
link = row[10]
fullAddress = row[8]
order = {"client":clientNames,"telephone":telephone,"link":link,"fullAddress":fullAddress}
if(key in ordersByBikers):
ordersByBikers[key]['quantity'] = ordersByBikers[key]['quantity']+1
ordersByBikers[key]['orders'].append(order)
else:
ordersByBikers[key] = {"quantity":0,"orders":[]}
ordersByBikers[key]['quantity'] = ordersByBikers[key]['quantity']+1
ordersByBikers[key]['orders'].append(order)
return ordersByBikers
#uses the created dictionary to print a txt file with the orders by bikers.
def printOrdersByBikers(ordersByBikers):
with open(pathlib.Path(__file__).parent/("output/"+OUTPUT_FILE_3),'a') as f:
for key in ordersByBikers:
if(ordersByBikers[key]['quantity']>0):
print(" \tMOTORIZADO "+key+"\tNUMERO ENTREGAS: "+str(ordersByBikers[key]['quantity']), file=f)
listaPedidos = ordersByBikers[key]['orders']
index = 1
for pedido in listaPedidos:
print(str(index)+".\tCLIENTE: "+pedido["client"], file=f)
print(" \tTELEFONO: "+str(pedido["telephone"]), file=f)
print(" \tDIRECCION: "+pedido["fullAddress"], file=f)
print(" \tLINK DIRECCION: "+pedido["link"], file=f)
print("\n",file=f)
index+=1
print("---------------------------------------------------------------------------------------------------------------------------------------", file=f)
#main method
def main():
root = Tk()
root.withdraw()
fileName = filedialog.askopenfilename(title="Seleccione archivo con pedidos:",filetypes=(("archivos xlsx","*.xlsx"),("archivos xls","*.xls")))
root.update()
if(fileName):
fileExtension = pathlib.Path(fileName).suffix
nBikers = 0
nOrdersbyBikers = 0
try:
nBikers = int(input("Ingrese numero de motorizados (mayor a 0): "))
if(nBikers<0):
print("Ingrese un valor valido para motorizados. (mayor a 0)")
raise Exception
nOrdersbyBikers = int(input("Ingrese numero de productos que puede llevar cada motorizado (1 a "+str(MAX_PERMITTED_ROUTINGS)+"): "))
if(nOrdersbyBikers<0 or nOrdersbyBikers>MAX_PERMITTED_ROUTINGS):
print("Ingrese un número de productos por motorizado valido. (1 a "+str(MAX_PERMITTED_ROUTINGS)+")")
raise Exception
try:
df=readerFormatter(fileName,fileExtension)
if(type(df)==DataFrame):
if(not df.empty):
dfSorted=df.sort_values(by=['DISTANCE'])
dfRouted=router(dfSorted,nBikers,nOrdersbyBikers)
dfEnrutadoFiltered = dfRouted[["#","ID","ID CC","ID_ROUTE","ASSIGNED_BIKE","CONTACT","PHONE","FULL ADRESS","COMMENT","LINK","LAT","LNG"]]
dfEnrutadoFiltered.to_excel(pathlib.Path(__file__).parent/("output/"+OUTPUT_FILE_2),engine='openpyxl',index=False)
formatoImpresion = createDeliverDictionary(dfEnrutadoFiltered)
printOrdersByBikers(formatoImpresion)
except Exception:
print("Ha habido un error durante la ejecución del archivo base, revise que las columnas estén correctas o no se hayan cambiado de nombre.")
input("Presione Enter para salir...")
except ValueError:
print("No es un valor entero válido.")
input("Presione Enter para salir...")
except Exception:
input("Presione Enter para salir...")
else:
print("No se ha seleccionado archivo.")
input("Presione Enter para salir...")
if __name__ == "__main__":
main()
```
|
{
"source": "jferreira23/tutorial-pybr",
"score": 3
}
|
#### File: tutorial-pybr/api_pedidos/magalu_api.py
```python
import os
from http import HTTPStatus
from uuid import UUID
import httpx
from api_pedidos.esquema import Item
from api_pedidos.excecao import (
FalhaDeComunicacaoError,
PedidoNaoEncontradoError,
)
# tenant e apikey fixos somente para demonstrações
APIKEY = os.environ.get("APIKEY", "coloque aqui sua apikey")
TENANT_ID = os.environ.get("TENANT_ID", "21fea73c-e244-497a-<PASSWORD>6")
MAGALU_API_URL = "https://alpha.api.magalu.com"
MAESTRO_SERVICE_URL = f"{MAGALU_API_URL}/maestro/v1"
def _recupera_itens_por_pacote(uuid_do_pedido, uuid_do_pacote):
response = httpx.get(
f"{MAESTRO_SERVICE_URL}/orders/{uuid_do_pedido}/packages/{uuid_do_pacote}/items",
headers={"X-Api-Key": APIKEY, "X-Tenant-Id": TENANT_ID},
)
response.raise_for_status()
return [
Item(
sku=item["product"]["code"],
# campos que utilizam a função get são opicionais
description=item["product"].get("description", ""),
image_url=item["product"].get("image_url", ""),
reference=item["product"].get("reference", ""),
quantity=item["quantity"],
)
for item in response.json()
]
def recuperar_itens_por_pedido(identificacao_do_pedido: UUID) -> list[Item]:
try:
response = httpx.get(
f"{MAESTRO_SERVICE_URL}/orders/{identificacao_do_pedido}",
headers={"X-Api-Key": APIKEY, "X-Tenant-Id": TENANT_ID},
)
response.raise_for_status()
pacotes = response.json()["packages"]
itens = []
for pacote in pacotes:
itens.extend(
_recupera_itens_por_pacote(
identificacao_do_pedido, pacote["uuid"]
)
)
return itens
except httpx.HTTPStatusError as exc:
# aqui poderiam ser tratados outros erros como autenticação
if exc.response.status_code == HTTPStatus.NOT_FOUND:
raise PedidoNaoEncontradoError() from exc
except httpx.HTTPError as exc:
raise FalhaDeComunicacaoError() from exc
```
|
{
"source": "jferrl/Strimko-Resolver",
"score": 4
}
|
#### File: Strimko-Resolver/proyect/Strimko.py
```python
import sys
from constraint import Problem, AllDifferentConstraint
matrixOfStrimko = None
class Matrix:
def __init__(self,size):
self.size = size
#matrix of the game
self.matrix = [[0 for x in range(self.size)] for y in range(self.size)]
# routes where we are going to sav
self.routes = []
def addRoute(self,route):
if len(self.routes) == self.size:
return
if route.getRouteSize() > self.size:
return
self.routes.append(route)
def printMatrix(self):
print(self.matrix)
def printRoutes(self):
for i in range(self.size):
print(self.routes[i].printRoute())
class Route:
#elements are a list of boxes
def __init__(self,elements):
self.elements = elements
def printRoute(self):
for i in range(len(self.elements)):
print(self.elements[i])
def getRouteSize(self):
return len(self.elements)
def solve():
problem = Problem()
size = matrixOfStrimko.size + 1
# Define the variables: rows of x variables rangin in 1...x
# x = tam of the matrix
for i in range(1, size):
problem.addVariables(range(i * 10 + 1, i * 10 + size), range(1, size))
#Each row has different values
for i in range(1, size):
problem.addConstraint(AllDifferentConstraint(), range(i * 10 + 1, i * 10 + size ))
# Each colum has different values
for i in range(1, size):
problem.addConstraint(AllDifferentConstraint(), range(10 + i, 10 * size + i, 10))
#Each route has different values
for i in range(0, size - 1):
problem.addConstraint(AllDifferentConstraint(),matrixOfStrimko.routes[i].elements)
return problem.getSolutions()
def getRouteFromUserImput(size):
elementsOfRoute = []
#get the elements of the route from console
for i in range(size):
inputOfBox = input()
inputOfBox = inputOfBox.split()
#convert it to interger
box = inputOfBox[0] + inputOfBox[1]
#store box as Object into elements of Route
elementsOfRoute.append(int(box))
return elementsOfRoute
def printSolutions(solutions):
for solution in solutions:
for i in range(1, matrixOfStrimko.size + 1):
for j in range(1, matrixOfStrimko.size + 1):
index = i * 10 + j
sys.stdout.write("%s " % solution[index])
print("")
print("")
def main():
global matrixOfStrimko
sizeOfMatrix = int(input())
if sizeOfMatrix <2:
print("Invalid input: the size of the strimko must be 2 or higher")
return
matrix = Matrix(sizeOfMatrix)
for i in range(sizeOfMatrix):
matrix.addRoute(Route(getRouteFromUserImput(sizeOfMatrix)))
input()
matrixOfStrimko = matrix
solutions = solve()
#print the len of the solutions of the strimko
print(len(solutions))
#in case of show the matrixes with the posible solutions
#printSolutions(solutions)
if __name__ == "__main__":
main()
```
|
{
"source": "jferroaq/Tarea7z",
"score": 3
}
|
#### File: jferroaq/Tarea7z/hello.py
```python
import kivy
from kivy.app import App
from kivy.uix.button import Label
from kivy.uix.colorpicker import ColorPicker
from kivy.graphics import Color, Ellipse, Triangle
from kivy.properties import StringProperty, ObjectProperty
class Titulo(Label):
cadena=StringProperty("Jesus te ama...")
triangle=ObjectProperty(None)
def __init__(self, **kwargs):
super(Titulo, self).__init__(**kwargs)
with self.canvas:
self.triangle=Triangle(points= [40, 40, 200, 200, 160, 40])
def on_touch_down(self, touch):
if self.collide_point(*touch.pos):
self.cadena="Collide: "+str(touch.pos)
print("on_touch_down-->Collide")
return True
return super(Titulo, self).on_touch_down(touch)
def on_cadena(self, obj, pos):
print("Se ha actualizado 'Cadena'")
def on_triangle(self, obj, pos):
print("Se ha actualizado 'triangle'")
class SaludoApp(App):
def build(self):
self.paleta=ColorPicker()
self.pintor=Titulo()
self.pintor.bind(on_touch_down=self.dentro)
return self.pintor
def dentro(self, obj, st):
lista=self.pintor.triangle.points
tu=st.x, st.y
rpta = True
py=lista[-1]
px=lista[-2]
for i in range(0, len(lista), 2):
px0=px
py0=py
px=lista[i]
py=lista[i+1]
a=px - px0
b=py - py0
c=tu[0] - px0
d=tu[1] - py0
if (b*c - a*d) < 0:
rpta = False
print(rpta)
break
if rpta == True:
self.pintor.add_widget(self.paleta)
return rpta
def eleccion(self, obj, st):
print("Pos X: %g, Pos Y: %g" %(st.x, st.y))
ca,cb,cc = .5, .5, .6
a,b = 150,45
radio = 50
with self.pintor.canvas:
Color(ca, cb, cc, mode = 'hsv' )
Triangle(
points = [0, 0, 100, 100, 80, 20])
if __name__ in ["__main__", "__android__"]:
SaludoApp().run()
```
|
{
"source": "JFEscobarM/Proyecto_final",
"score": 3
}
|
#### File: JFEscobarM/Proyecto_final/app.py
```python
from flask import Flask, render_template, request, redirect, url_for, session
from flask_mysqldb import MySQL,MySQLdb
from os import path
from notifypy import Notify
app = Flask(__name__)
app.config['MYSQL_HOST'] = 'localhost'
app.config['MYSQL_USER'] = 'root'
app.config['MYSQL_PASSWORD'] = ''
app.config['MYSQL_DB'] = 'proyecto_p'
app.config['MYSQL_CURSORCLASS'] = 'DictCursor'
mysql = MySQL(app)
@app.route('/')
def home():
return render_template("contenido.html")
@app.route('/layout', methods = ["GET", "POST"])
def layout():
session.clear()
return render_template("contenido.html")
@app.route('/login', methods= ["GET", "POST"])
def login():
notificacion = Notify()
if request.method == 'POST':
email = request.form['email']
password = request.form['password']
cur = mysql.connection.cursor()
cur.execute("SELECT * FROM users WHERE email=%s",(email,))
user = cur.fetchone()
cur.close()
if user!=None:
if password == user["password"]:
session['name'] = user['name']
session['email'] = user['email']
session['tipo'] = user['id_tip_usu']
if session['tipo'] == 1:
return render_template("docente/home.html")
elif session['tipo'] == 2:
return render_template("estudiante/homeTwo.html")
else:
notificacion.title = "Error de Acceso"
notificacion.message="Correo y/o contraseña incorrectos"
notificacion.send()
return render_template("login.html")
else:
notificacion.title = "Error de Acceso"
notificacion.message="El usuario no se encuentra registrado"
notificacion.send()
return render_template("login.html")
else:
return render_template("login.html")
@app.route('/registro', methods = ["GET", "POST"])
def registro():
cur = mysql.connection.cursor()
cur.execute("SELECT * FROM tip_usu")
tipo = cur.fetchall()
notificacion = Notify()
if request.method == 'GET':
return render_template("registro.html", tipo = tipo)
else:
name = request.form['name']
email = request.form['email']
password = request.form['password']
tip = request.form['tipo']
cur = mysql.connection.cursor()
cur.execute("INSERT INTO users (name, email, password, id_tip_usu) VALUES (%s,%s,%s,%s)", (name, email, password,tip,))
mysql.connection.commit()
notificacion.title = "Registro Exitoso"
notificacion.message="Ya se encuentra registrado, por favor inicie sesión para ingresar a la plataforma"
notificacion.send()
return redirect(url_for('login'))
if __name__ == '__main__':
app.secret_key = "sllave"
app.run(debug=True)
```
#### File: notifypy/os_notifiers/linux.py
```python
from loguru import logger
import subprocess
import shlex
from ..exceptions import BinaryNotFound, NotificationFailure, LinuxDbusException
from ._base import BaseNotifier
try:
from jeepney import DBusAddress, new_method_call
from jeepney.io.blocking import open_dbus_connection
import os
# check if dbus is available
_dbus_address = os.getenv("DBUS_SESSION_BUS_ADDRESS")
if _dbus_address:
logger.info("Jeepney and Dbus is available. Using DBUS for notifications..")
USE_LEGACY = False
else:
logger.error(
"Jeepney is available but DBUS is not. Using legacy notification instead."
)
USE_LEGACY = True
except ImportError:
logger.error("DBUS suppport not installed. Using libnotify for notifications!")
USE_LEGACY = True
class LinuxNotifierLibNotify(BaseNotifier):
def __init__(self):
"""Main Linux Notification Class
This uses libnotify's tool of notfiy-send.
I'll add support for (and probably use as first choice) sending
through dbus.
"""
call_find_notify_send = self._find_installed_notify_send()
if not call_find_notify_send:
logger.error("Unable to find notify-send.")
raise BinaryNotFound("notify-send")
if call_find_notify_send:
self._notify_send_binary = call_find_notify_send
call_find_aplay = self._find_installed_aplay()
if not call_find_aplay:
# no Aplay is available.
self._aplay_binary = False
else:
self._aplay_binary = call_find_aplay
@staticmethod
def _find_installed_aplay():
"""Function to find the path for notify-send"""
try:
run_which_for_aplay = subprocess.check_output(["which", "aplay"])
return run_which_for_aplay.decode("utf-8")
except subprocess.CalledProcessError:
logger.exception("Unable to find aplay.")
return False
except Exception:
logger.exception("Unhandled exception for finding aplay.")
return False
@staticmethod
def _find_installed_notify_send():
"""Function to find the path for notify-send"""
try:
run_which_for_notify_send = subprocess.check_output(
["which", "notify-send"]
)
return run_which_for_notify_send.decode("utf-8")
except subprocess.CalledProcessError:
logger.exception("Unable to find notify-send.")
return False
except Exception:
logger.exception("Unhandled exception for finding notify-send.")
return False
def send_notification(
self,
notification_title,
notification_subtitle,
notification_icon,
notification_audio,
**kwargs,
):
try:
notification_title = " " if notification_title == "" else notification_title
notification_subtitle = (
" " if notification_subtitle == "" else notification_subtitle
)
generated_command = [
self._notify_send_binary.strip(),
notification_title,
notification_subtitle,
]
if notification_icon:
generated_command.append(f"--icon={shlex.quote(notification_icon)}")
if kwargs.get("application_name"):
generated_command.append(
f"--app-name={shlex.quote(kwargs.get('application_name'))}"
)
logger.debug(f"Generated command: {generated_command}")
if notification_audio:
if self._aplay_binary == False:
raise BinaryNotFound("aplay (Alsa)")
subprocess.Popen(
[self._aplay_binary.strip(), notification_audio],
stdout=subprocess.DEVNULL,
stderr=subprocess.STDOUT,
)
subprocess.check_output(generated_command)
return True
except subprocess.CalledProcessError:
logger.exception("Unable to send notification.")
return False
except Exception:
logger.exception("Unhandled exception for sending notification.")
return False
class LinuxNotifier(BaseNotifier):
def __init__(self, **kwargs):
"""Main Linux Notification Class (Dbus)
This uses jeepney library as the dbus communicator
"""
self._dbus_notifications = DBusAddress(
"/org/freedesktop/Notifications",
bus_name="org.freedesktop.Notifications",
interface="org.freedesktop.Notifications",
)
call_find_aplay = self._find_installed_aplay()
if not call_find_aplay:
# no Aplay is available.
self._aplay_binary = False
logger.debug("aplay binary not installed.. audio will not work!")
else:
self._aplay_binary = call_find_aplay
if kwargs.get("linux_fallback_libnotify"):
self._fallback_to_libnotify = True
else:
self._fallback_to_libnotify = False
@staticmethod
def _find_installed_aplay():
"""Function to find the path for notify-send"""
try:
run_which_for_aplay = subprocess.check_output(["which", "aplay"])
return run_which_for_aplay.decode("utf-8")
except subprocess.CalledProcessError:
logger.exception("Unable to find aplay.")
return False
except Exception:
logger.exception("Unhandled exception for finding aplay.")
return False
def send_notification(
self,
notification_title,
notification_subtitle,
notification_icon,
notification_audio,
**kwargs,
):
try:
_attempt_to_open_dbus_connection = open_dbus_connection(bus="SESSION")
logger.debug("linux: opened dbus connection")
except Exception:
logger.exception("issue with opening DBUS connection!")
if self._fallback_to_libnotify == True:
logger.debug("falling back to libnotify!")
return LinuxNotifierLibNotify().send_notification(
notification_title,
notification_subtitle,
notification_icon,
notification_audio,
**kwargs,
)
else:
logger.exception(
"there was an exception trying to open the dbus connection. fallback was not enabled, therefore this will return False."
)
return False
try:
notification_title = " " if notification_title == "" else notification_title
notification_subtitle = (
" " if notification_subtitle == "" else notification_subtitle
)
if notification_audio:
# TODO: https://specifications.freedesktop.org/notification-spec/latest/ar01s09.html
# use sound param instead of relying on alsa?
if self._aplay_binary == False:
raise BinaryNotFound("aplay (Alsa)")
subprocess.Popen(
[self._aplay_binary.strip(), notification_audio],
stdout=subprocess.DEVNULL,
stderr=subprocess.STDOUT,
)
create_notification = new_method_call(
self._dbus_notifications,
"Notify",
"susssasa{sv}i",
(
kwargs.get("application_name"), # App name
0, # Not replacing any previous notification
notification_icon if notification_icon else "", # Icon
notification_title, # Summary
notification_subtitle,
[],
{}, # Actions, hints
-1, # expire_timeout (-1 = default)
),
)
reply = _attempt_to_open_dbus_connection.send_and_get_reply(
create_notification, timeout=2
)
logger.debug(f"confirmed notification sent! id: {reply}")
_attempt_to_open_dbus_connection.close()
return True
except Exception:
logger.exception("issue with sending through dbus!")
if self._fallback_to_libnotify == True:
logger.debug("falling back to libnotify!")
return LinuxNotifierLibNotify().send_notification(
notification_title,
notification_subtitle,
notification_icon,
notification_audio,
**kwargs,
)
return False
```
#### File: notifypy/os_notifiers/windows.py
```python
import pathlib
import os
import subprocess
from xml.etree import ElementTree
import tempfile
import uuid
import codecs
from loguru import logger
from ._base import BaseNotifier
class WindowsNotifier(BaseNotifier):
def __init__(self):
"""Main Notification System for Windows. Basically ported from go-toast/toast"""
# Create the base
self._top_ps1_script = f"""
[Windows.UI.Notifications.ToastNotificationManager, Windows.UI.Notifications, ContentType = WindowsRuntime] | Out-Null
[Windows.UI.Notifications.ToastNotification, Windows.UI.Notifications, ContentType = WindowsRuntime] | Out-Null
[Windows.Data.Xml.Dom.XmlDocument, Windows.Data.Xml.Dom.XmlDocument, ContentType = WindowsRuntime] | Out-Null
"""
def _generate_notification_xml(
self,
application_id,
notification_title,
notification_subtitle,
notification_icon,
notification_audio,
):
# Create the top <toast> element
top_element = ElementTree.Element("toast")
# set the duration for the top element
top_element.set("duration", "short")
# create the <visual> element
visual_element = ElementTree.SubElement(top_element, "visual")
# create <binding> element
binding_element = ElementTree.SubElement(visual_element, "binding")
# add the required attribute for this.
# For some reason, go-toast set the template attribute to "ToastGeneric"
# but it never worked for me.
binding_element.set("template", "ToastImageAndText02")
# create <image> element
image_element = ElementTree.SubElement(binding_element, "image")
# add an Id
image_element.set("id", "1")
# add the src
image_element.set("src", notification_icon)
# add the message and title
title_element = ElementTree.SubElement(binding_element, "text")
title_element.set("id", "1")
title_element.text = notification_title
message_element = ElementTree.SubElement(binding_element, "text")
message_element.set("id", "2")
message_element.text = notification_subtitle
if notification_audio:
# the user has provided his own audio file, no need to play the default sound.
audio_element = ElementTree.SubElement(top_element, "audio")
audio_element.set("silent", "true")
# Great we have a generated XML notification.
# We need to create the rest of the .ps1 file and dump it to the temporary directory
generated_ps1_file = f"""
{self._top_ps1_script}
$APP_ID = "{application_id}"
$template = @"
{ElementTree.tostring(top_element, encoding="utf-8").decode('utf-8')}
"@
$xml = New-Object Windows.Data.Xml.Dom.XmlDocument
$xml.LoadXml($template)
$toast = New-Object Windows.UI.Notifications.ToastNotification $xml
[Windows.UI.Notifications.ToastNotificationManager]::CreateToastNotifier($APP_ID).Show($toast)
"""
return generated_ps1_file
def send_notification(
self,
notification_title,
notification_subtitle,
notification_icon,
application_name,
notification_audio,
):
generated_file = self._generate_notification_xml(
notification_title=notification_title,
notification_subtitle=notification_subtitle,
notification_icon=notification_icon,
application_id=application_name,
notification_audio=notification_audio,
)
if notification_audio:
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
subprocess.Popen(
[
"Powershell",
f'(New-Object Media.SoundPlayer "{notification_audio}").playsync()',
],
startupinfo=startupinfo,
)
# open the temporary directory
with tempfile.TemporaryDirectory() as temp_dir:
generated_uuid_file = str(uuid.uuid4())
with codecs.open(
f"{temp_dir}/{generated_uuid_file}.ps1", "w", "utf_8_sig"
) as ps1_file:
ps1_file.write(generated_file)
# exceute the file
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
subprocess.Popen(
[
"Powershell",
"-ExecutionPolicy",
"Bypass",
"-File",
f"{generated_uuid_file}.ps1",
],
cwd=temp_dir,
startupinfo=startupinfo,
).wait()
return True
```
|
{
"source": "jfeser/castor",
"score": 2
}
|
#### File: bench/demomatch/run-bench.py
```python
import configparser
import logging
import psycopg2
import os
import shutil
import json
import shlex
from subprocess import check_call, check_output
log = logging.getLogger(name=__file__)
handler = logging.StreamHandler()
formatter = logging.Formatter(
'%(asctime)s %(name)-12s %(levelname)-8s %(message)s')
handler.setFormatter(formatter)
log.addHandler(handler)
log.setLevel(logging.DEBUG)
def rpath(p):
return os.path.join(os.path.dirname(os.path.abspath(__file__)), p)
CONFIG = configparser.ConfigParser()
CONFIG.read(rpath('../../config'))
COMPILE_EXE = CONFIG['default']['build_root'] + '/bin/compile.exe'
DB = CONFIG['default']['demomatch_db']
PRESENT_QUERIES = 10
ABSENT_QUERIES = 10
TABLE_SIZE = 10000000
BENCHMARKS = [
rpath('example1.txt'),
rpath('example2.txt'),
rpath('example3.txt'),
]
def bench_dir(bench_file):
return os.path.splitext(bench_file)[0]
conn = psycopg2.connect("dbname='{}' port='{}'".format(DB, PORT))
# Generate benchmark table.
log.debug('Generating benchmark table ({} rows).'.format(TABLE_SIZE))
c = conn.cursor()
c.execute('drop table if exists log_bench')
c.execute('drop index if exists idx_id')
c.execute('drop index if exists idx_counter')
c.execute('drop index if exists idx_succ')
c.execute('create table log_bench as (select * from swing_components_progressbardemo.log order by random() limit {})'.format(TABLE_SIZE))
log.debug('Generating benchmark id index.')
c.execute('create index idx_id on log_bench (id)')
log.debug('Generating benchmark counter index.')
c.execute('create index idx_counter on log_bench (counter)')
log.debug('Generating benchmark succ index.')
c.execute('create index idx_succ on log_bench (succ)')
conn.commit()
log.info('Generating benchmark table done.')
for bench in BENCHMARKS:
# Make benchmark dir.
benchd = bench_dir(bench)
if os.path.isdir(benchd):
shutil.rmtree(benchd)
os.mkdir(benchd)
# Build, saving the log.
with open(benchd + '/build.log', 'w') as b_log:
cmd = [
COMPILE_EXE,
'-v',
'-o', benchd,
'-db', DB,
'-p', 'id_p:string',
'-p', 'id_c:string',
bench
]
cmd_str = shlex.quote(' '.join(cmd))
log.debug('Building %s in %s.', cmd_str, os.getcwd())
check_call(cmd, stdout=b_log, stderr=b_log)
log.info('Done building %s.', bench)
# Run benchmarks
times = []
c.execute('select * from (select lp.id, lc.id from log_bench as lp, log_bench as lc where lp.counter < lc.counter and lc.counter < lp.succ order by random()) as t limit %d' % PRESENT_QUERIES)
present_ids = c.fetchall()
for (lp_id, lc_id) in present_ids:
ts = []
query = "explain (format json, analyze) select lp.counter from log_bench as lp, log_bench as lc where lp.counter < lc.counter and lc.counter < lp.succ and lp.id = '%s' and lc.id = '%s'" % (lp_id, lc_id)
log.debug('Running Postgres query: %s', query)
c.execute(query)
pg_time = c.fetchall()[0][0][0]['Execution Time']
ts.append(pg_time)
log.info('Done running Postgres query.')
for bench_num, bench in enumerate(BENCHMARKS):
benchd = bench_dir(bench)
os.chdir(benchd)
cmd = ['./scanner.exe', '-t', '1', 'data.bin', str(lp_id), str(lc_id)]
cmd_str = shlex.quote(' '.join(cmd))
try:
log.debug('Running %s in %s.', cmd_str, os.getcwd())
out = check_output(cmd, capture_output=True, encoding='utf-8')
ts.append(float(out.split(' ')[0][:-2]))
except:
log.exception('Running %s failed.', cmd)
ts.append(None)
# try:
# log.debug('Profiling %s in %s.', cmd_str, os.getcwd())
# run(cmd, env={'CPUPROFILE': '%d.prof' % bench_num})
# except:
# log.exception('Profiling %s failed.', cmd_str)
os.chdir('..')
log.debug(ts)
times.append(ts)
logging.shutdown()
```
|
{
"source": "jfeser/castor-opt",
"score": 3
}
|
#### File: castor-opt/bin/logtime.py
```python
from datetime import datetime
import re
import sys
def main(fn):
with open(fn, 'r') as f:
for line in f:
m = re.search(r'^\[[A-Z]+\] \[(.+?)\]', line)
if m is not None:
first_date = datetime.fromisoformat(m.group(1))
break
else:
print('Error: No date found.')
exit(1)
for line in reversed(list(f)):
m = re.search(r'^\[[A-Z]+\] \[(.+?)\]', line)
if m is not None:
last_date = datetime.fromisoformat(m.group(1))
break
else:
print('Error: No date found.')
exit(1)
print(last_date - first_date)
if __name__ == '__main__':
if len(sys.argv) != 2:
print('Usage: logtime.py LOG')
exit(1)
main(sys.argv[1])
```
|
{
"source": "jfeser/earley",
"score": 4
}
|
#### File: jfeser/earley/ambiguate.py
```python
import sys
def parse_grammar(fn):
grammar = {}
with open(fn, 'r') as grammar_file:
for line in grammar_file:
line = line.strip() # Strip trailing whitespace
line = line.split('#')[0] # Stop reading after comment starts
if len(line) == 0: continue
# Split into component parts
line = line.split()
lhs = line[0]
rhs = line[1:]
# Add to grammar
if lhs not in grammar:
grammar[lhs] = []
grammar[lhs].append(rhs)
return grammar
def print_grammar(grammar):
for (lhs, rhs) in grammar.items():
for rule in rhs:
print('%s\t%s' % (lhs, ' '.join(rule)))
def symbols(g):
res = set([])
for (lhs, rhs) in g.iteritems():
res.add(lhs)
for syms in rhs:
res |= set(syms)
return res
def nonterminals(g):
return set(g.keys())
def terminals(g):
return symbols(g) - nonterminals(g)
def main(grammar_fn, n):
grammar = parse_grammar(grammar_fn)
for i in range(n-1):
mapping = { sym : '%s_%d' % (sym, i) for sym in nonterminals(grammar) }
for (lhs, rhs) in list(grammar.items()):
new_lhs = mapping[lhs]
new_rhs = [[mapping.get(sym, sym) for sym in rule] for rule in rhs]
grammar[new_lhs] = new_rhs
for (s1, s2) in mapping.items():
grammar[s1].append([s2])
print_grammar(grammar)
if __name__ == '__main__':
if len(sys.argv) != 3:
print('Usage: ambiguate.py GRAMMAR N')
exit(1)
main(sys.argv[1], int(sys.argv[2]))
```
#### File: jfeser/earley/reference.py
```python
from pprint import pprint
import sys
def parse_grammar(grammar_file):
grammar = {}
# Parse grammar
for line in grammar_file:
line = line.strip() # Strip trailing whitespace
line = line.split('#')[0] # Stop reading after comment starts
if len(line) == 0: continue
# Split into component parts
line = line.split()
lhs = line[0]
rhs = line[1:]
# Add to grammar
if lhs not in grammar:
grammar[lhs] = []
grammar[lhs].append(rhs)
return grammar
class StateSet(object):
def __init__(self, states=[]):
self._states = set(states)
self._work = list(self._states)
def __str__(self):
return '{%s}' % ', '.join([str(s) for s in self._states])
__repr__ = __str__
def add(self, s):
if s not in self._states:
self._states.add(s)
self._work.append(s)
def has_next(self):
return self._work != []
def next(self):
return self._work.pop(0)
class State(object):
def __init__(self, lhs, rhs, pos=0, origin=0):
self.lhs = lhs
self.rhs = tuple(rhs)
self.pos = pos
self.origin = origin
def __str__(self):
rhs_dot = list(self.rhs)
rhs_dot.insert(self.pos, '•')
rhs_str = ' '.join(rhs_dot)
return '%s -> %s' % (self.lhs, rhs_str)
__repr__ = __str__
def __eq__(self, other):
return (self.lhs, self.rhs, self.pos, self.origin) == \
(other.lhs, other.rhs, other.pos, other.origin)
def __hash__(self):
return hash((self.lhs, self.rhs, self.pos, self.origin))
def finished(self):
return self.pos == len(self.rhs)
def next_elem(self):
return self.rhs[self.pos]
def incr_pos(self):
return State(self.lhs, self.rhs, self.pos + 1, self.origin)
def parse(grammar, words):
# Create chart.
chart = [StateSet() for _ in range(len(words) + 1)]
def predictor(state, k):
lhs = state.next_elem()
for rhs in grammar[lhs]:
chart[k].add(State(lhs, rhs, origin=k))
def scanner(state, k):
if k >= len(words):
return
if words[k] == state.next_elem():
chart[k+1].add(state.incr_pos())
def completer(state, k):
assert state.origin != k
for s in chart[state.origin]._states:
if not (s.finished()) and s.rhs[s.pos] == state.lhs:
chart[k].add(s.incr_pos())
# Initialize.
for rhs in grammar['START']:
chart[0].add(State('START', rhs))
for k in range(len(words) + 1):
while chart[k].has_next():
state = chart[k].next()
if not state.finished():
if state.next_elem() in grammar:
predictor(state, k)
else:
scanner(state, k)
else:
completer(state, k)
return chart
def main(grammar_file, in_file):
grammar = parse_grammar(grammar_file)
words = in_file.read().split()
chart = parse(grammar, words)
for (k, states) in enumerate(chart):
for state in states._states:
print("(0, ({}, {}, {}))".format(k, state, state.origin))
if __name__ == '__main__':
if len(sys.argv) > 3 or len(sys.argv) < 2:
print('Usage: earley.py GRAMMAR [FILE]')
exit(1)
grammar_file = open(sys.argv[1], 'r')
if len(sys.argv) == 3:
in_file = open(sys.argv[2], 'r')
else:
in_file = sys.stdin
main(grammar_file, in_file)
```
|
{
"source": "jfeser/ImputeDB",
"score": 2
}
|
#### File: test/experiments/run_experiments.py
```python
from __future__ import print_function
from generate_big_joins import create_join_workload
import os
import subprocess
import tempfile
executable_default = ["java","-Xmx3200m","-jar","../../dist/simpledb.jar"]
executable_longimpute = ["java","-Xmx3200m","-Dsimpledb.ImputeSlow","-jar","../../dist/simpledb.jar"]
output_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "output")
catalog_default = "../../catalog.txt"
queries_default = "queries.txt"
def run_large05_experiment():
iters = 220
min_alpha = 0.00
max_alpha = 1.00
step = 0.499999
queries = queries_default
this_output_dir = os.path.join(output_dir, "regression_tree")
run_experiment(this_output_dir, iters, min_alpha, max_alpha, step, queries =
queries, executable = executable_longimpute, imputationMethod =
"REGRESSION_TREE")
this_output_dir = os.path.join(output_dir, "mean")
run_experiment(this_output_dir, iters, min_alpha, max_alpha, step, queries =
queries, executable = executable_longimpute, imputationMethod =
"MEAN")
this_output_dir = os.path.join(output_dir, "hot_deck")
run_experiment(this_output_dir, iters, min_alpha, max_alpha, step, queries =
queries, executable = executable_longimpute, imputationMethod =
"HOTDECK")
def run_large05_experiment_count():
iters = 220
min_alpha = 0.00
max_alpha = 1.00
step = 0.499999
queries = "queries_count.txt"
this_output_dir = os.path.join(output_dir, "regression_tree")
run_experiment(this_output_dir, iters, min_alpha, max_alpha, step, queries =
queries, executable = executable_longimpute, imputationMethod =
"REGRESSION_TREE")
this_output_dir = os.path.join(output_dir, "mean")
run_experiment(this_output_dir, iters, min_alpha, max_alpha, step, queries =
queries, executable = executable_longimpute, imputationMethod =
"MEAN")
this_output_dir = os.path.join(output_dir, "hot_deck")
run_experiment(this_output_dir, iters, min_alpha, max_alpha, step, queries =
queries, executable = executable_longimpute, imputationMethod =
"HOTDECK")
def run_acs_experiment():
catalog = catalog_default
executable = executable_default
this_output_dir = os.path.join(output_dir, "acs")
# Impute on base table
(f, acs_query) = tempfile.mkstemp()
os.write(f, "SELECT AVG(c0) FROM acs_dirty;\n")
iters = 1
print("Running acs base...")
executable_max_heap = [executable[0]] + ["-Xmx3200m", "-Dsimpledb.ImputeSlow"] + executable[1:]
cmd = executable_max_heap + \
["experiment", catalog, acs_query, this_output_dir, str(iters), "--base"]
print(cmd)
subprocess.call(cmd)
print("Running acs base...done.")
# Impute using ImputeDB
iters = 220
min_alpha = 0.00
max_alpha = 1.00
step = 1.00
subprocess.call(executable +
["experiment", catalog, acs_query, this_output_dir,
str(iters), str(min_alpha), str(max_alpha), str(step)])
os.close(f)
def run_join_experiments():
join_output_dir = os.path.join(output_dir, "joins")
iters = 20
min_alpha = 0.00
max_alpha = 1.00
step = 1.00
# parameters specific to join workload
# number of queries to generate and evaluate per size of join
nqueries = 5
# minimum number of joins
min_njoins = 2
# maximum number of joins
max_njoins = 8
# evaluate each size of join separately
for njoins in range(min_njoins, max_njoins + 1):
print("Running join experiments. N-joins %d" % njoins)
# create sub directory for each size of joins
this_output_dir = os.path.join(join_output_dir, str(njoins))
# create workload, written out to base directory
workload = create_join_workload(njoins, nqueries)
# local file with queries
queries = "joins_n%d_queries.txt" % njoins
with open(queries, 'w') as f:
f.write(workload)
# execute this size of n joins
run_experiment(this_output_dir, iters, min_alpha, max_alpha, step, queries = queries, plan_only = True)
def run_experiment(this_output_dir, iters, min_alpha, max_alpha, step,
queries=None, executable=None, plan_only=False, imputationMethod=""):
if not os.path.isdir(this_output_dir):
os.makedirs(this_output_dir)
if not queries:
queries = queries_default
if not executable:
executable = executable_default
catalog = catalog_default
if imputationMethod:
imputationMethodOpt = ["--imputationMethod={}".format(imputationMethod)]
else:
imputationMethodOpt = []
planOnlyOpt = ["--planOnly={}".format(plan_only)]
# Timing using ImputeDB
subprocess.call(executable +
["experiment", catalog, queries, this_output_dir,
str(iters), str(min_alpha), str(max_alpha), str(step)] + planOnlyOpt +
imputationMethodOpt)
# Timing using impute on base table
if not plan_only:
subprocess.call(executable + ["experiment", catalog, queries,
this_output_dir, str(iters), "--base"] +
imputationMethodOpt)
if __name__ == "__main__":
import fire
fire.Fire({
"large05" : run_large05_experiment,
"large05_count" : run_large05_experiment_count,
"acs" : run_acs_experiment,
"joins" : run_join_experiments,
})
print("Done.")
```
|
{
"source": "jfeser/loopfinder",
"score": 2
}
|
#### File: jfeser/loopfinder/ffmpeg_writer.py
```python
import subprocess as sp
import os
import numpy as np
try:
from subprocess import DEVNULL # py3k
except ImportError:
DEVNULL = open(os.devnull, 'wb')
from moviepy.config import get_setting
from moviepy.tools import verbose_print
class FFMPEG_VideoWriter:
""" A class for FFMPEG-based video writing.
A class to write videos using ffmpeg. ffmpeg will write in a large
choice of formats.
Parameters
-----------
filename
Any filename like 'video.mp4' etc. but if you want to avoid
complications it is recommended to use the generic extension
'.avi' for all your videos.
size
Size (width,height) of the output video in pixels.
fps
Frames per second in the output video file.
codec
FFMPEG codec. It seems that in terms of quality the hierarchy is
'rawvideo' = 'png' > 'mpeg4' > 'libx264'
'png' manages the same lossless quality as 'rawvideo' but yields
smaller files. Type ``ffmpeg -codecs`` in a terminal to get a list
of accepted codecs.
Note for default 'libx264': by default the pixel format yuv420p
is used. If the video dimensions are not both even (e.g. 720x405)
another pixel format is used, and this can cause problem in some
video readers.
audiofile
Optional: The name of an audio file that will be incorporated
to the video.
preset
Sets the time that FFMPEG will take to compress the video. The slower,
the better the compression rate. Possibilities are: ultrafast,superfast,
veryfast, faster, fast, medium (default), slow, slower, veryslow,
placebo.
bitrate
Only relevant for codecs which accept a bitrate. "5000k" offers
nice results in general.
withmask
Boolean. Set to ``True`` if there is a mask in the video to be
encoded.
"""
def __init__(self, filename, size, fps, codec="libx264", audiofile=None,
preset="medium", bitrate=None, withmask=False,
logfile=None, threads=None, ffmpeg_params=None):
if logfile is None:
logfile = sp.PIPE
self.filename = filename
self.codec = codec
self.ext = self.filename.split(".")[-1]
# order is important
cmd = [
get_setting("FFMPEG_BINARY"),
'-y',
'-loglevel', 'error' if logfile == sp.PIPE else 'info',
'-f', 'rawvideo',
'-vcodec', 'rawvideo',
'-s', '%dx%d' % (size[0], size[1]),
'-pix_fmt', 'rgba' if withmask else 'rgb24',
'-r', '%.02f' % fps,
'-i', '-', '-an',
]
if audiofile is not None:
cmd.extend([
'-i', audiofile,
'-acodec', 'copy'
])
cmd.extend([
'-vcodec', codec,
'-preset', preset,
])
if ffmpeg_params is not None:
cmd.extend(ffmpeg_params)
if bitrate is not None:
cmd.extend([
'-b', bitrate
])
if threads is not None:
cmd.extend(["-threads", str(threads)])
if ((codec == 'libx264') and
(size[0] % 2 == 0) and
(size[1] % 2 == 0)):
cmd.extend([
'-pix_fmt', 'yuv420p'
])
cmd.extend([
filename
])
popen_params = {"stdout": DEVNULL,
"stderr": logfile,
"stdin": sp.PIPE}
# This was added so that no extra unwanted window opens on windows
# when the child process is created
if os.name == "nt":
popen_params["creationflags"] = 0x08000000
self.proc = sp.Popen(cmd, **popen_params)
def write_frame(self, img_array):
""" Writes one frame in the file."""
try:
self.proc.stdin.write(img_array.tostring())
except IOError as err:
ffmpeg_error = self.proc.stderr.read()
error = (str(err) + ("\n\nMoviePy error: FFMPEG encountered "
"the following error while writing file %s:"
"\n\n %s" % (self.filename, ffmpeg_error)))
if "Unknown encoder" in ffmpeg_error:
error = error+("\n\nThe video export "
"failed because FFMPEG didn't find the specified "
"codec for video encoding (%s). Please install "
"this codec or change the codec when calling "
"write_videofile. For instance:\n"
" >>> clip.write_videofile('myvid.webm', codec='libvpx')")%(self.codec)
elif "incorrect codec parameters ?" in ffmpeg_error:
error = error+("\n\nThe video export "
"failed, possibly because the codec specified for "
"the video (%s) is not compatible with the given "
"extension (%s). Please specify a valid 'codec' "
"argument in write_videofile. This would be 'libx264' "
"or 'mpeg4' for mp4, 'libtheora' for ogv, 'libvpx for webm. "
"Another possible reason is that the audio codec was not "
"compatible with the video codec. For instance the video "
"extensions 'ogv' and 'webm' only allow 'libvorbis' (default) as a"
"video codec."
)%(self.codec, self.ext)
elif "encoder setup failed" in ffmpeg_error:
error = error+("\n\nThe video export "
"failed, possibly because the bitrate you specified "
"was too high or too low for the video codec.")
elif "Invalid encoder type" in ffmpeg_error:
error = error + ("\n\nThe video export failed because the codec "
"or file extension you provided is not a video")
raise IOError(error)
def close(self):
self.proc.stdin.close()
if self.proc.stderr is not None:
self.proc.stderr.close()
self.proc.wait()
del self.proc
def ffmpeg_write_video(clip, filename, fps, codec="libx264", bitrate=None,
preset="medium", withmask=False, write_logfile=False,
audiofile=None, verbose=True, threads=None, ffmpeg_params=None):
""" Write the clip to a videofile. See VideoClip.write_videofile for details
on the parameters.
"""
if write_logfile:
logfile = open(filename + ".log", 'w+')
else:
logfile = None
verbose_print(verbose, "[MoviePy] Writing video %s\n"%filename)
writer = FFMPEG_VideoWriter(filename, clip.size, fps, codec = codec,
preset=preset, bitrate=bitrate, logfile=logfile,
audiofile=audiofile, threads=threads,
ffmpeg_params=ffmpeg_params)
nframes = int(clip.duration*fps)
for t,frame in clip.iter_frames(progress_bar=False, with_times=True,
fps=fps, dtype="uint8"):
if withmask:
mask = (255*clip.mask.get_frame(t))
if mask.dtype != "uint8":
mask = mask.astype("uint8")
frame = np.dstack([frame,mask])
writer.write_frame(frame)
writer.close()
if write_logfile:
logfile.close()
verbose_print(verbose, "[MoviePy] Done.\n")
def ffmpeg_write_image(filename, image, logfile=False):
""" Writes an image (HxWx3 or HxWx4 numpy array) to a file, using
ffmpeg. """
if image.dtype != 'uint8':
image = image.astype("uint8")
cmd = [ get_setting("FFMPEG_BINARY"), '-y',
'-s', "%dx%d"%(image.shape[:2][::-1]),
"-f", 'rawvideo',
'-pix_fmt', "rgba" if (image.shape[2] == 4) else "rgb24",
'-i','-', filename]
if logfile:
log_file = open(filename + ".log", 'w+')
else:
log_file = sp.PIPE
popen_params = {"stdout": DEVNULL,
"stderr": log_file,
"stdin": sp.PIPE}
if os.name == "nt":
popen_params["creationflags"] = 0x08000000
proc = sp.Popen(cmd, **popen_params)
out, err = proc.communicate(image.tostring())
if proc.returncode:
err = "\n".join(["[MoviePy] Running : %s\n" % cmd,
"WARNING: this command returned an error:",
err.decode('utf8')])
raise IOError(err)
del proc
def write_gif(clip, filename, fps=None, program= 'ffmpeg',
opt="OptimizeTransparency", fuzz=1, verbose=True, withmask=True,
loop=0, dispose=True, colors=None):
""" Write the VideoClip to a GIF file, without temporary files.
Converts a VideoClip into an animated GIF using ImageMagick
or ffmpeg.
Parameters
-----------
filename
Name of the resulting gif file.
fps
Number of frames per second (see note below). If it
isn't provided, then the function will look for the clip's
``fps`` attribute (VideoFileClip, for instance, have one).
program
Software to use for the conversion, either 'ImageMagick' or
'ffmpeg'.
opt
(ImageMagick only) optimalization to apply, either
'optimizeplus' or 'OptimizeTransparency'.
fuzz
(ImageMagick only) Compresses the GIF by considering that
the colors that are less than fuzz% different are in fact
the same.
Notes
-----
The gif will be playing the clip in real time (you can
only change the frame rate). If you want the gif to be played
slower than the clip you will use ::
>>> # slow down clip 50% and make it a gif
>>> myClip.speedx(0.5).write_gif('myClip.gif')
"""
#
# We use processes chained with pipes.
#
# if program == 'ffmpeg'
# frames --ffmpeg--> gif
#
# if program == 'ImageMagick' and optimize == (None, False)
# frames --ffmpeg--> bmp frames --ImageMagick--> gif
#
#
# if program == 'ImageMagick' and optimize != (None, False)
# frames -ffmpeg-> bmp frames -ImagMag-> gif -ImagMag-> better gif
#
if fps is None:
fps = clip.fps
DEVNULL = open(os.devnull, 'wb')
delay= 100.0/fps
if clip.mask is None:
withmask = False
cmd1 = [get_setting("FFMPEG_BINARY"), '-y', '-loglevel', 'error',
'-f', 'rawvideo',
'-vcodec','rawvideo', '-r', "%.02f"%fps,
'-s', "%dx%d"%(clip.w, clip.h),
'-pix_fmt', ('rgba' if withmask else 'rgb24'),
'-i', '-']
popen_params = {"stdout": DEVNULL,
"stderr": DEVNULL,
"stdin": DEVNULL}
if os.name == "nt":
popen_params["creationflags"] = 0x08000000
if program == "ffmpeg":
popen_params["stdin"] = sp.PIPE
popen_params["stdout"] = DEVNULL
proc1 = sp.Popen(cmd1+[ '-pix_fmt', ('rgba' if withmask else 'rgb24'),
'-r', "%.02f"%fps, filename], **popen_params)
else:
popen_params["stdin"] = sp.PIPE
popen_params["stdout"] = sp.PIPE
proc1 = sp.Popen(cmd1+ ['-f', 'image2pipe', '-vcodec', 'bmp', '-'],
**popen_params)
if program == 'ImageMagick':
cmd2 = [get_setting("IMAGEMAGICK_BINARY"), '-delay', "%.02f"%(delay),
"-dispose" ,"%d"%(2 if dispose else 1),
'-loop', '%d'%loop, '-', '-coalesce']
if (opt in [False, None]):
popen_params["stdin"] = proc1.stdout
popen_params["stdout"] = DEVNULL
proc2 = sp.Popen(cmd2+[filename], **popen_params)
else:
popen_params["stdin"] = proc1.stdout
popen_params["stdout"] = sp.PIPE
proc2 = sp.Popen(cmd2+['gif:-'], **popen_params)
if opt:
cmd3 = [get_setting("IMAGEMAGICK_BINARY"), '-', '-layers', opt,
'-fuzz', '%d'%fuzz+'%'
]+(["-colors", "%d"%colors] if colors is not None else [])+[
filename]
popen_params["stdin"] = proc2.stdout
popen_params["stdout"] = DEVNULL
proc3 = sp.Popen(cmd3, **popen_params)
# We send all the frames to the first process
verbose_print(verbose, "\n[MoviePy] >>>> Building file %s\n"%filename)
verbose_print(verbose, "[MoviePy] Generating GIF frames...\n")
try:
for t,frame in clip.iter_frames(fps=fps, progress_bar=False,
with_times=True, dtype="uint8"):
if withmask:
mask = 255 * clip.mask.get_frame(t)
frame = np.dstack([frame, mask]).astype('uint8')
proc1.stdin.write(frame.tostring())
except IOError as err:
error = ("[MoviePy] Error: creation of %s failed because "
"of the following error:\n\n%s.\n\n."%(filename, str(err)))
if program == "ImageMagick":
error = error + ("This can be due to the fact that "
"ImageMagick is not installed on your computer, or "
"(for Windows users) that you didn't specify the "
"path to the ImageMagick binary in file conf.py." )
raise IOError(error)
if program == 'ImageMagick':
verbose_print(verbose, "[MoviePy] Optimizing the GIF with ImageMagick...\n")
proc1.stdin.close()
proc1.wait()
if program == 'ImageMagick':
proc2.wait()
if opt:
proc3.wait()
verbose_print(verbose, "[MoviePy] >>>> File %s is ready !"%filename)
```
|
{
"source": "jfeser/ocaml-tree-diff",
"score": 2
}
|
#### File: jfeser/ocaml-tree-diff/reference_test.py
```python
import os
import subprocess
logpath = '/home/<user>/log.txt'
timepath = '/usr/bin/time'
timeout_sec = 2*60*60 # 2 hr command time limit.
def test_cmd(cmd, label):
print("testing", label)
cmdlet = '{} --format="%M %E" --output={} -a {}'.format(timepath, logpath, cmd)
interrupted = False
try:
completed = subprocess.run(cmdlet, shell=True, timeout=timeout_sec)
if completed.returncode != 0:
interrupted = True
except subprocess.TimeoutExpired:
interrupted = True
if not interrupted:
with open(logpath, "a") as myfile:
myfile.write("{}\n\n".format(label))
return interrupted
def test_mine(f1, f2, file_label):
label = "mine-{}".format(file_label)
cmd = './reference.exe -- -s {} -t {}'.format(f1,f2)
return test_cmd(cmd, label)
def test_jsondiff(f1, f2, file_label):
label = "jsondiff-{}".format(file_label)
cmd = "python3 ../jsondiff/cli.py {} {}".format(f1,f2)
res = test_cmd(cmd, label)
print("")
return res
def main():
rel_data_path = "../data/"
targets = range(1,9)
if os.path.exists(logpath):
os.remove(logpath)
os.system("dune build reference/reference.exe")
os.system("cp _build/default/reference/reference.exe .")
for target in targets:
f1 = "{}{}a.json".format(rel_data_path,target)
f2 = "{}{}b.json".format(rel_data_path,target)
file_label = "{}-{}-{}".format(target, os.path.getsize(f1), os.path.getsize(f2))
ctd_mine = True
ctd_jsondiff = True
for i in range(3):
if ctd_mine and test_mine(f1, f2, file_label):
ctd_mine = False
print("mine-{} timed out, skipping in future".format(file_label))
if ctd_jsondiff and test_jsondiff(f1, f2, file_label):
ctd_jsondiff = False
print("jsondiff-{} timed out, skipping in future".format(file_label))
os.system("rm reference.exe")
main()
```
|
{
"source": "jfeudeline/advent-of-code-2019",
"score": 4
}
|
#### File: jfeudeline/advent-of-code-2019/day-01.py
```python
def fuel_requirement(mass):
fuel = mass // 3 - 2
return fuel if fuel>0 else 0
# Part 1
with open("input-01.txt") as f:
total_fuel_requirement = sum(fuel_requirement(int(mass)) for mass in f)
print(total_fuel_requirement)
# Part 2
def rec_fuel_requirement(mass):
if mass <= 0:
return 0
else:
fuel = fuel_requirement(mass)
return fuel + rec_fuel_requirement(fuel)
with open("input-01.txt") as f:
total_rec_fuel_requirement = sum(rec_fuel_requirement(int(mass)) for mass in f)
print(total_rec_fuel_requirement)
```
#### File: jfeudeline/advent-of-code-2019/day-03.py
```python
with open("input-03.txt") as f:
wires = []
for line in f:
wires.append(line.split(','))
# Calcul des coordonnées de passages des deux chemins
def get_coords_line(start, move):
"Calcule les coordonnées de passage d'une ligne"
directions = {
'R' : (0, 1),
'L' : (0, -1),
'U' : (1, 1),
'D' : (1, -1)
}
dir = directions[move[0]]
line = []
now = list(start)
for step in range(int(move[1:])):
now[dir[0]] += dir[1]
line.append(tuple(now))
return line
def get_coords_path(wire):
"Calcule les coordonnées de passage d'un chemin"
path = [(0,0)]
for step in wire:
path += get_coords_line(path[-1], step)
return path
# Coordonnées des points de passages des chemins
paths=[]
for wire in wires:
paths.append(get_coords_path(wire))
# Coordonnées des intersections des chemins
intersections = set(paths[0][1:]).intersection(set(paths[1][1:]))
# Part 1
intersections_distances = (abs(x) + abs(y) for x, y in intersections)
print(min(intersections_distances))
# Part 2
intersections_lengths = (paths[0].index(coord) + paths[1].index(coord) for coord in intersections)
print(min(intersections_lengths))
```
#### File: jfeudeline/advent-of-code-2019/day-07.py
```python
import itertools
from intcode import run_intcode
def start_amp(program, phase):
amp = run_intcode(program)
next(amp)
amp.send(phase)
return amp
def start_amps(program, phases):
amps = []
for phase in phases:
amps.append(start_amp(program, phase))
return amps
def run_amps(amps, start_input):
next_input = start_input
for amp in amps:
next_input = amp.send(next_input)
try :
next(amp)
except StopIteration:
pass
return next_input
with open("input-07.txt") as f:
input_code = [int(code) for code in f.readline().split(',')]
# Part 1
phases_permutations = list(itertools.permutations(range(5)))
signals = (run_amps(start_amps(input_code, phases), 0) for phases in phases_permutations)
print(f"Highest signal Part 1 : {max(signals)}")
# Part 2
def iterative_run_amps(amps, start_input):
next_input = start_input
while True:
try:
next_input = run_amps(amps, next_input)
except StopIteration:
return next_input
phases_permutations = list(itertools.permutations(range(5,10)))
signals = (iterative_run_amps(start_amps(input_code, phases), 0) for phases in phases_permutations)
print(f"Highest signal Part 2 : {max(signals)}")
```
#### File: jfeudeline/advent-of-code-2019/day-19.py
```python
from intcode import run_intcode
with open("input-19.txt") as f:
code = [int(x) for x in f.readline().strip().split(',')]
def test_beam(point):
drone = run_intcode(code)
next(drone)
drone.send(point[0])
return drone.send(point[1])
# Part 1
number_of_beam_points = sum(test_beam((x,y)) for x in range(50) for y in range(50))
print(number_of_beam_points)
# Part 2
x, y = 0, 0
while True:
if test_beam((x, y+99)):
if test_beam((x+99, y)):
break
else:
y += 1
else:
x += 1
print(10000 * x + y)
```
|
{
"source": "JFF-Bohdan/dupfilesremover",
"score": 2
}
|
#### File: dupfilesremover/command_line/dupfilesremover.py
```python
import datetime
import sys
from dupfilesremover.app import DuplicateImagesRemoverApplication
from loguru import logger
def main():
tm_begin = datetime.datetime.utcnow()
logger.remove()
logger.add(sys.stderr, format="{time} {level} {message}", level="INFO")
app = DuplicateImagesRemoverApplication(logger)
app.run()
tm_end = datetime.datetime.utcnow()
logger.info("app finished @ {}".format(tm_end - tm_begin))
if __name__ == "__main__":
main()
```
|
{
"source": "JFF-Bohdan/item_lookup",
"score": 2
}
|
#### File: JFF-Bohdan/item_lookup/baseline_initial_loading_time.py
```python
import sys
import time
from typing import Dict
import humanize
from loguru import logger
from shared.command_line_args_support import base_arg_parser
from shared.filtering_and_transformation import convert_to_two_columns_line, \
convert_tuple_of_strings_to_tuple_of_integers, is_valid_format
from shared.memory_usage import get_current_memory_usage
from terminaltables import AsciiTable
from tqdm import tqdm
valid_entries_count = 0
invalid_entries_count = 0
def count_records_stat(input_file: str):
global invalid_entries_count
global valid_entries_count
with open(input_file, "r", encoding="utf-8") as input_file:
for index, line in enumerate(tqdm(input_file)):
# skipping header
if not index:
continue
two_columns_tuple = convert_to_two_columns_line(line)
if not two_columns_tuple:
invalid_entries_count += 1
continue
if not is_valid_format(two_columns_tuple):
invalid_entries_count += 1
continue
_ = convert_tuple_of_strings_to_tuple_of_integers(two_columns_tuple)
valid_entries_count += 1
def render_stat(stat_dict: Dict) -> str:
data = [["Parameter", "Value"]]
for k, v in stat_dict.items():
data.append([k, v])
table = AsciiTable(data)
return table.table
def main():
logger.info("Application started")
parser = base_arg_parser()
args = parser.parse_args()
logger.debug(f"args: {args}")
timestamp_begin = time.perf_counter_ns()
count_records_stat(args.input_file)
timestamp_end = time.perf_counter_ns()
execution_time_ns = (timestamp_end - timestamp_begin)
execution_time_ms = execution_time_ns / 1_000_000
human_readable_memory_usage = humanize.naturalsize(get_current_memory_usage())
stat_data = {
"Valid entries count": valid_entries_count,
"Invalid entries count": invalid_entries_count,
"Execution time (ms)": round(execution_time_ms, 3),
"Execution time (ns)": execution_time_ns,
"Current memory usage": human_readable_memory_usage
}
stat_output = render_stat(stat_data)
logger.info(f"Execution stat:\n{stat_output}")
logger.info("Application finished")
if __name__ == "__main__":
main()
sys.exit(0)
```
#### File: item_lookup/shared/valid_records_generator.py
```python
from typing import Iterator, Tuple
from .file_reader import yield_strings_from_file
from .filtering_and_transformation import convert_to_two_columns_line, convert_tuple_of_strings_to_tuple_of_integers, \
is_valid_format
def get_all_valid_records(
input_file: str,
logger
) -> Iterator[Tuple[int, int]]:
file_content = yield_strings_from_file(
file_name=input_file,
logger=logger,
records_to_skip=1
)
wrong_lines_count = 0
for line in file_content:
line = line.strip()
two_columns_tuple = convert_to_two_columns_line(line)
if not two_columns_tuple:
logger.debug(f"Wrong line '{line}': non two columns line")
wrong_lines_count += 1
continue
if not is_valid_format(two_columns_tuple):
logger.debug(f"Wrong line '{line}': invalid format")
wrong_lines_count += 1
continue
two_columns_tuple = convert_tuple_of_strings_to_tuple_of_integers(two_columns_tuple)
yield two_columns_tuple
logger.info(f"Wrong lines count: {wrong_lines_count}")
```
#### File: JFF-Bohdan/item_lookup/tuned_pipelined_loading_time.py
```python
import sys
import time
from typing import Dict, Iterator, Tuple
import humanize
from loguru import logger
from shared.command_line_args_support import base_arg_parser
from shared.memory_usage import get_current_memory_usage
from terminaltables import AsciiTable
from tqdm import tqdm
DATA_LIMIT = None
class DataProcessingPipeline(object):
def __init__(self, file_name: str, data_limit=None):
self._file_name = file_name
self._valid_entries_count = 0
self._invalid_entries_count = 0
self._data_limit = data_limit
def run(self):
for _ in tqdm(self._convert_numbers_to_integers()):
self._valid_entries_count += 1
def _convert_numbers_to_integers(self) -> Iterator[Tuple[int, int]]:
for row in self._filter_for_lines_with_two_columns():
try:
result = tuple(int(column) for column in row)
if (
(result[0] > 9999) or
(result[1] > 999999) or
(str(result[0]).rjust(4, "0") != row[0]) or
(str(result[1]).rjust(6, "0") != row[1])
):
self._invalid_entries_count += 1
continue
yield result
except ValueError:
self._invalid_entries_count += 1
def _filter_for_lines_with_two_columns(self) -> Iterator[Tuple[str, str]]:
for line in self._yield_strings_from_file():
line = str(line).strip() if line else line
if not line:
self._invalid_entries_count += 1
continue
split_data = line.split(",")
if len(split_data) != 2:
self._invalid_entries_count += 1
continue
yield tuple(split_data)
def _yield_strings_from_file(self) -> Iterator[str]:
logger.debug(f"Loading data from file '{self._file_name}'")
with open(self._file_name, "r", encoding="utf-8") as input_file:
for index, line in enumerate(input_file):
# skip header
if not index:
continue
if self._data_limit and (index >= self._data_limit):
break
yield line
@property
def valid_entries_count(self):
return self._valid_entries_count
@property
def invalid_entries_count(self):
return self._invalid_entries_count
def render_stat(stat_dict: Dict) -> str:
data = [["Parameter", "Value"]]
for k, v in stat_dict.items():
data.append([k, v])
table = AsciiTable(data)
return table.table
def main():
logger.info("Application started")
parser = base_arg_parser()
args = parser.parse_args()
logger.debug(f"args: {args}")
pipeline = DataProcessingPipeline(args.input_file, DATA_LIMIT)
timestamp_begin = time.perf_counter_ns()
pipeline.run()
timestamp_end = time.perf_counter_ns()
execution_time_ns = (timestamp_end - timestamp_begin)
execution_time_ms = execution_time_ns / 1_000_000
execution_time_secs = execution_time_ms / 1_000
human_readable_memory_usage = humanize.naturalsize(get_current_memory_usage())
human_readable_execution_time = humanize.naturaldelta(execution_time_secs)
stat_data = {
"Valid entries count": pipeline.valid_entries_count,
"Invalid entries count": pipeline.invalid_entries_count,
"Execution time (ms)": round(execution_time_ms, 3),
"Execution time (ns)": execution_time_ns,
"Execution time (human readable)": human_readable_execution_time,
"Current memory usage": human_readable_memory_usage
}
stat_output = render_stat(stat_data)
logger.info(f"Execution stat:\n{stat_output}")
logger.info("Application finished")
if __name__ == "__main__":
main()
sys.exit(0)
```
|
{
"source": "JFF-Bohdan/large_files_parsing",
"score": 3
}
|
#### File: data_cleaner/support_functions/support_functions.py
```python
import codecs
import csv
import logging
def init_logger(log_name=__name__, log_level=logging.DEBUG):
logger = logging.getLogger(log_name)
logger.setLevel(log_level)
formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
ch = logging.StreamHandler()
ch.setLevel(log_level)
ch.setFormatter(formatter)
logger.addHandler(ch)
logger.propagate = False
return logger
def iterate_over_csv_input_data(input_file_name, delimiter, quote_char):
with codecs.open(input_file_name, "r", "utf-8") as input_csvfile:
data_reader = csv.reader(input_csvfile, delimiter=delimiter, quotechar=quote_char)
for row_data in data_reader:
yield row_data
```
|
{
"source": "JFF-Bohdan/nqueens",
"score": 3
}
|
#### File: nqueens/system/logs_support.py
```python
import config
import logging
import sys
def init_logger(mainLoggerName=__name__):
logger = logging.getLogger(mainLoggerName)
# create formatter
formatter = logging.Formatter(config.LOG_FORMAT)
# adding console handler
console_handler = logging.StreamHandler(sys.stdout)
console_handler.setLevel(config.LOG_LEVEL)
console_handler.setFormatter(formatter)
logger.addHandler(console_handler)
logger.setLevel(config.LOG_LEVEL)
return logger
```
#### File: nqueens/system/solver_impl.py
```python
from system.shared import LastErrorHolder
class NQueensProblemRecursiveSolver(LastErrorHolder):
"""
N-Queens problem solver. Recursive implementation
"""
def __init__(self, dimensions):
if dimensions <= 0:
raise ValueError("dimenstions must be > 0")
super().__init__()
self.dimensions = dimensions
# initializing empty board
self.board = [[0] * self.dimensions for stub in range(self.dimensions)]
def __canSet(self, row, col):
"""
Checks ability to set 1 in needful row and column
:param row: row number
:param col: column number
:return: True when 1 can be set in given row and column, otherwise returns False
"""
if self.board[row][col]:
return False
# checking in this row on left side
for i in range(0, col):
if self.board[row][i]:
return False
# checking upper diagonal on left side
i = row
j = col
while (i >= 0) and (j >= 0):
if self.board[i][j]:
return False
i -= 1
j -= 1
# checking lower diagonal on left side
i = row
j = col
while (j >= 0) and (i < self.dimensions):
if self.board[i][j]:
return False
i += 1
j -= 1
return True
def __solve_worker(self, col=0):
"""
Low level generator of solutions
:param col: column to calculate
:return: solutions
"""
if col == self.dimensions:
yield self.board
return
for i in range(0, self.dimensions):
if self.__canSet(i, col):
self.board[i][col] = 1
yield from self.__solve_worker(col + 1)
self.board[i][col] = 0
def solve(self):
"""
Soltions generator. Calculates all solutions and returns them
:return: problem solutions
"""
yield from self.__solve_worker(0)
class NQueensProblemSolverWrapper(LastErrorHolder):
"""
Wrapper for N-Queens problem solution. Can find all solutions, count total solutions
count and execute callback function.
Callback function can process solutions. For example: print them to log, print to
file or save int database
"""
def __init__(self, logger, dimensions, cls=NQueensProblemRecursiveSolver):
"""
Initializer for wrapper
:param logger: logger
:param dimensions: chessboard dimensions
:param cls: worker class
"""
super().__init__()
self.dimensions = dimensions
self.worker = cls(self.dimensions)
self.logger = logger
self.total_solutions_count = 0
self.solution_handler = None
self.solution_handler_kwargs = None
def __handle_solution(self, solution):
if self.solution_handler is None:
return
if self.solution_handler_kwargs is None:
self.solution_handler(solution)
return
self.solution_handler(solution, **self.solution_handler_kwargs)
def process(self):
self.logger.debug("solver initialized for chessboard size: {}".format(self.dimensions))
have_callback = (self.solution_handler is not None)
for solution in self.worker.solve():
if have_callback:
self.__handle_solution(solution)
self.total_solutions_count += 1
if self.total_solutions_count == 0:
return self.setError("no solutions found")
self.logger.info("solutions search complete, found solutions count: {}".format(self.total_solutions_count))
return True
```
#### File: nqueens/system/stopwatch.py
```python
import time
class Stopwatch:
"""
Calculates work time
"""
def __init__(self,
auto_start=False,
interval_print=False,
reset_timer_on_interval_print=False):
"""
:param auto_start: if True when need start time measure immediately
:param interval_print: True when time-interval print available
:param reset_timer_on_interval_print: True when need reset timer on interval print
"""
self.start_at = 0
self.stop_at = 0
self.interval_print = interval_print
self.prefix = "DONE at "
self.reset_timer_on_interval_print = reset_timer_on_interval_print
if auto_start:
self.start()
def running(self):
"""
Returns True when stopwatch running
:return: True when running, otherwise False
"""
return self.start_at > 0
def stopped(self):
"""
Returns True when timer stopped
:return: True when stopped, otherwise False
"""
return self.stop_at > 0
def start(self):
"""
Starts timer
:return: None
"""
self.start_at = time.time()
self.stop_at = 0
def stop(self):
"""
Stops timer
:return: None
"""
self.stop_at = time.time()
def secs(self):
"""
Returns elapsed seconds count from previous timer start
:return:
"""
return round(self.stop_at - self.start_at, 2)
def interval_sec(self):
"""
Returns elapsed seconds count from start to CURRENT moment
:return:
"""
ret_value = round((time.time() - self.start_at), 2)
if self.reset_timer_on_interval_print:
self.start()
return ret_value
@staticmethod
def print_seconds_nice(seconds, prefix=""):
"""
Static method for interval print in human readable format
:param seconds: seconds count
:param prefix: prefix for print
:return: string which contains human readable representation of interval
"""
if seconds < 60:
return "{}{}s".format(prefix, seconds)
minutes = seconds // 60
seconds -= minutes * 60
if minutes < 60:
seconds = round(seconds, 2)
return "{}{}m {}s".format(prefix, minutes, seconds)
hours = minutes // 60
minutes -= hours * 60
if hours < 24:
minutes = int(minutes)
seconds = round(seconds, 2)
return "{}{}h {}m {}s".format(prefix, hours, minutes, seconds)
days = hours // 24
hours -= days * 24
seconds = round(seconds, 2)
return "{}{}d {}h {}m {}s".format(prefix, days, hours, minutes, seconds)
def __str__(self):
if self.interval_print:
seconds = round(self.interval_sec(), 2)
else:
seconds = round(self.secs(), 2)
return Stopwatch.print_seconds_nice(seconds)
```
|
{
"source": "JFF-Bohdan/pygsmmodule",
"score": 2
}
|
#### File: pygsmmodule/tests/test_imei_support.py
```python
from pygsmmodule.imei.imei import ImeiSupport
def test_can_check_valid_imeis():
valid_imeis = [
356938035643809,
490154203237518,
"356938035643809",
358065019104265,
"357805023984942",
356938035643809
]
for imei in valid_imeis:
assert ImeiSupport.is_valid(imei)
def test_can_detect_invalid_imeis():
invalid_imeis = [
358065019104263,
"357805023984941",
356938035643801
]
for imei in invalid_imeis:
assert not ImeiSupport.is_valid(imei)
def test_generates_valid_imeis():
imeis_to_test_count = 100
for _ in range(imeis_to_test_count):
value = ImeiSupport.generate_new()
assert value is not None
assert len(str(value)) == 16
assert ImeiSupport.is_valid(value)
def test_generates_valid_sequental_imeis():
current_imei = ImeiSupport.generate_new()
assert ImeiSupport.is_valid(current_imei)
imeis_to_test_count = 100
for _ in range(imeis_to_test_count):
current_imei = ImeiSupport.next(current_imei)
assert ImeiSupport.is_valid(current_imei)
def test_generates_predicted_sequental_ids():
expected_results = [
(358065019104273, 358065019104281),
(357805023984942, 357805023984959),
(356938035643809, 356938035643817)
]
for current_value, next_value in expected_results:
generated_value = ImeiSupport.next(current_value)
assert generated_value == next_value
```
|
{
"source": "JFF-Bohdan/pyimei",
"score": 3
}
|
#### File: JFF-Bohdan/pyimei/test.py
```python
from pyimei import ImeiSupport
def checkImeisArray(imeis):
for imei in imeis:
if ImeiSupport.isValid(imei):
print("IMEI: '{}' is valid".format(imei))
else:
print("IMEI '{}' is NOT valid".format(imei))
#testing classes
ImeiSupport.test()
valid_imeis = [
356938035643809,
490154203237518,
"356938035643809"
]
invalid_imeis = [
358065019104263,
"357805023984941",
356938035643801
]
checkImeisArray(valid_imeis)
checkImeisArray(invalid_imeis)
print("Generating independent FAKE imeis...")
RANDOM_IMEIS_QTY = 5
for i in range(RANDOM_IMEIS_QTY):
print("\tfake IMEI[{}] = {}".format(i+1, ImeiSupport.generateNew()))
print("Generating sequental FAKE imeis:")
DEP_RANDOM_IMEIS_QTY = 5
startImei = ImeiSupport.generateNew()
currentImei = startImei
print("start IMEI: {}".format(startImei))
for i in range(RANDOM_IMEIS_QTY):
currentImei = ImeiSupport.next(currentImei)
print("\tfake IMEI[{}] = {}".format(i+1, currentImei))
print("DONE")
```
|
{
"source": "JFF-Bohdan/reqlog",
"score": 3
}
|
#### File: reqlog/dbschema/shared.py
```python
import ksuid
def get_string_ksuid():
return str(ksuid.ksuid())
def get_base62_ksuid():
return ksuid.ksuid().toBase62()
```
#### File: reqlog/dbschema/tbl_data_collecting_device.py
```python
import datetime
from reqlog.app import Base
import sqlalchemy
from sqlalchemy import BigInteger, Boolean, Column, DateTime, Integer, Sequence, String, Text
from .shared import get_base62_ksuid, get_string_ksuid
class DataCollectingDevice(Base):
__tablename__ = "data_collecting_device"
# TODO: add than token must be uniq
# TODO: add that uid must be uniq
dcd_id = Column(BigInteger().with_variant(Integer, "sqlite"), Sequence("gen_data_collecting_device"), primary_key=True)
dcn_id = Column(
BigInteger().with_variant(Integer, "sqlite"),
sqlalchemy.ForeignKey("data_collecting_nodes.dcn_id"),
nullable=False
)
dcd_uid = Column(String(255), nullable=False, default=get_string_ksuid)
write_token = Column(String(255), nullable=True, default=get_base62_ksuid)
read_token = Column(String(255), nullable=True)
dcd_name = Column(String(255), nullable=False)
adding_dts = Column(DateTime, nullable=False, default=datetime.datetime.utcnow)
update_dts = Column(DateTime, nullable=False, default=datetime.datetime.utcnow)
last_activity = Column(DateTime, nullable=True)
description = Column(Text, nullable=True, default="")
is_in_use = Column(Boolean, nullable=False, default=True)
def __init__(self):
pass
```
#### File: support/datatablessupport/datatablessupport.py
```python
import re
class DataTableParserSupportFunctions:
def __init__(self): # pragma: no coverage
pass
@staticmethod
def convert_to_integer(value, defaultValue=None):
if value is None:
return None
if not str(value).isnumeric():
return defaultValue
return int(value)
@staticmethod
def find_middle_value(value, prefix, suffix):
pattern = "{}(.+?){}".format(prefix, suffix)
m = re.search(pattern, value)
if not m:
return None
found = m.group(1)
return found
@staticmethod
def db_bool_to_bool(value):
if value is None:
return False
if not str(value).isnumeric():
return False
if int(value) != 0:
return True
return False
@staticmethod
def scan_boolean(value):
if value is None:
return False
value = str(value)
if value.isnumeric():
return DataTableParserSupportFunctions.db_bool_to_bool(value)
value = value.lower()
if value == "true":
return True
return False
@staticmethod
def safe_log_error(logger, message):
if not logger:
return
logger.error(message)
class BaseAdapter(object):
def __init__(self, request=None): # pragma: no coverage
self.request = request
def get_all_keys(self):
pass
def get_value(self, field_name, field_default_value=None):
pass
class DataTablesBottleRequestAdapter(BaseAdapter):
def __init__(self, request):
super().__init__(request)
def get_all_keys(self):
return self.request.params.keys()
def get_value(self, field_name, field_default_value=None):
return self.request.params.get(field_name, field_default_value)
class DataTablesFlaskRequestAdapter(BaseAdapter):
def __init__(self, request):
super().__init__(request)
def get_all_keys(self):
return list(self.request.values.keys())
def get_value(self, field_name, field_default_value=None):
return self.request.values.get(field_name, field_default_value)
```
#### File: support/db_init/db_init.py
```python
import datetime
import random
import ksuid
from reqlog.dbschema import DataCollectingDevice, DataCollectingNode, DcAvailableScope, LinkUserToScope, LoggedRequest, RequestParameters, User
from reqlog.support.shared import create_hash
from .initialization_consts import ADMIN_USER_EMAIL, ADMIN_USER_LOGIN, ADMIN_USER_PASSWORD, \
DEMO_CONFIGURATION, DEMO_USER_EMAIL, DEMO_USER_LOGIN, DEMO_USER_NAME, DEMO_USER_PASSWORD, \
DEMO_USER_UID, POSSIBLE_PARAMS_VALUE, POSSIBLE_TEST_PARAMS_NAME
def init_database(logger, session):
add_initial_users(session)
session.flush()
add_dictionaries_data(session)
session.flush()
give_all_scopes_to_admin(session)
session.flush()
dcns_qty, dcds_qty = add_demo_nodes_and_devices(session)
logger.info("added DCNs count - {}".format(dcns_qty))
logger.info("added DCDs count - {}".format(dcds_qty))
session.flush()
session.commit()
def add_initial_users(session):
if not is_user_exists_by_login(session, ADMIN_USER_LOGIN):
user = User()
user.password_hash, user.password_salt = create_hash(ADMIN_USER_PASSWORD)
user.user_name = ADMIN_USER_LOGIN
user.user_email = ADMIN_USER_EMAIL
user.user_login = ADMIN_USER_LOGIN
session.add(user)
if not is_user_exists_by_login(session, DEMO_USER_LOGIN):
user = User()
user.password_hash, user.password_salt = create_hash(DEMO_USER_PASSWORD)
user.user_name = DEMO_USER_NAME
user.user_email = DEMO_USER_EMAIL
user.user_login = DEMO_USER_LOGIN
user.user_uid = DEMO_USER_UID
session.add(user)
def is_user_exists_by_login(session, login):
user_id = session.query(
User.user_id
).filter(
User.user_login == login
).scalar()
return user_id is not None
def add_demo_nodes_and_devices(session):
user = session.query(
User
).filter(
User.user_login == DEMO_USER_LOGIN
).scalar()
assert user is not None
dcns_qty = 0
dcds_qty = 0
for dcn_item in DEMO_CONFIGURATION:
node = session.query(
DataCollectingNode
).filter(
DataCollectingNode.owner_id == user.user_id,
DataCollectingNode.dcn_name == dcn_item["dcn_name"]
).scalar()
if node is None:
node = DataCollectingNode()
node.dcn_name = dcn_item["dcn_name"]
node.dcn_uid = str(ksuid.ksuid())
node.owner_id = user.user_id
node.description = dcn_item["dcn_description"]
session.add(node)
session.flush()
dcns_qty += 1
for dcd_item in dcn_item["devices"]:
dcd = session.query(
DataCollectingDevice
).filter(
DataCollectingDevice.dcn_id == node.dcn_id,
DataCollectingDevice.dcd_name == dcd_item["dcd_name"],
DataCollectingDevice.write_token == dcd_item["write_token"],
DataCollectingDevice.read_token == dcd_item["read_token"]
).scalar()
if dcd is not None:
continue
dcd = DataCollectingDevice()
dcd.dcn_id = node.dcn_id
dcd.dcd_uid = str(ksuid.ksuid())
dcd.write_token = dcd_item["write_token"]
dcd.read_token = dcd_item["read_token"]
dcd.dcd_name = dcd_item["dcd_name"]
dcd.description = dcd_item["dcd_description"]
session.add(dcd)
session.flush()
dcds_qty += 1
return dcns_qty, dcds_qty
def add_dictionaries_data(session):
for (scope_code, scope_name) in DcAvailableScope.get_all_possible_scopes():
stub_id = session.query(
DcAvailableScope.scope_id
).filter(
DcAvailableScope.scope_code == scope_code
).scalar()
if stub_id is not None:
continue
item = DcAvailableScope()
item.scope_code = scope_code
item.scope_name = scope_name
item.scope_description = scope_name
session.add(item)
def give_all_scopes_to_admin(session):
user_id = session.query(
User.user_id
).filter(
User.user_login == ADMIN_USER_LOGIN
).scalar()
assert user_id is not None
data = session.query(
DcAvailableScope.scope_id
).filter(
DcAvailableScope.is_in_use == True
).all()
for (scope_id, ) in data:
stub_id = session.query(
LinkUserToScope.row_id
).filter(
LinkUserToScope.user_id == user_id,
LinkUserToScope.scope_id == scope_id
).scalar()
if stub_id is not None:
continue
link = LinkUserToScope()
link.user_id = user_id
link.scope_id = scope_id
session.add(link)
def gen_fake_requests_for_demo_user(logger, session):
random.seed()
user = session.query(
User
).filter(
User.user_login == DEMO_USER_LOGIN
).scalar()
assert user is not None
dcns = session.query(
DataCollectingNode.dcn_id
).filter(
DataCollectingNode.owner_id == user.user_id
).subquery()
avail_dcd_list = session.query(
DataCollectingDevice.dcd_id,
DataCollectingDevice.dcn_id
).filter(
DataCollectingDevice.dcn_id.in_(dcns)
).all()
max_requests_count = random.randint(100, 1000)
logger.info("devices count in Demo organization - {}".format(len(avail_dcd_list)))
params_to_add = []
dcn_last_activity_dts = {}
dcd_last_activity_dts = {}
for _ in range(max_requests_count):
req = LoggedRequest()
req.method = random.choice(["get", "post"])
dcd_info = random.choice(avail_dcd_list)
req.dcd_id = dcd_info[0]
dcn_last_activity_dts[dcd_info[1]] = datetime.datetime.utcnow()
dcd_last_activity_dts[dcd_info[0]] = datetime.datetime.utcnow()
session.add(req)
session.flush()
for _ in range(random.randint(3, 10)):
param = RequestParameters()
param.request_id = req.request_id
param.parameter_name = random.choice(POSSIBLE_TEST_PARAMS_NAME)
v = random.choice(POSSIBLE_PARAMS_VALUE)
if callable(v):
v = v()
param.parameter_value = v
params_to_add.append(param)
session.bulk_save_objects(params_to_add)
session.flush()
for dcn_id in dcn_last_activity_dts.keys():
session.query(
DataCollectingNode
).filter(
DataCollectingNode.dcn_id == dcn_id
).update(
{
DataCollectingNode.last_activity_dts: dcn_last_activity_dts[dcn_id]
},
synchronize_session=False
)
for dcd_id in dcd_last_activity_dts.keys():
session.query(
DataCollectingDevice
).filter(
DataCollectingDevice.dcd_id == dcd_id
).update(
{
DataCollectingDevice.last_activity: dcd_last_activity_dts[dcn_id]
},
synchronize_session=False
)
logger.info("added requests count - {}".format(max_requests_count))
logger.info("added params count - {}".format(len(params_to_add)))
```
#### File: tests/functional/api_collect_test.py
```python
from reqlog.support.db_init.initialization_consts import DEMO_DEVICE_WRITE_TOKEN
from tests.functional.func_shared import setup_application
def test_returns_401_on_no_access_token_sent():
app = setup_application(force_recreate_database=False, force_initialize_database=False)
params = {
"foo": "bar",
"bizz": "bazz",
"boo": "poo"
}
resp = app.post("/collect", params=params, expect_errors=True)
assert resp.status_code == 401
def test_returns_403_when_wrong_token_sent_in_request_header():
app = setup_application(force_recreate_database=True, force_initialize_database=False)
params = {
"foo": "bar",
"bizz": "bazz",
"boo": "poo",
}
headers = {
"access_token": "bad"
}
resp = app.post("/collect", params=params, headers=headers, expect_errors=True)
assert resp.status_code == 403
def test_returns_403_when_wrong_token_sent_in_request_parameters():
app = setup_application(force_recreate_database=True, force_initialize_database=False)
params = {
"foo": "bar",
"bizz": "bazz",
"boo": "poo",
"t": "bad"
}
resp = app.post("/collect", params=params, expect_errors=True)
assert resp.status_code == 403
def test_successfully_adds_values():
app = setup_application()
params = {
"foo": "bar",
"bizz": "bazz",
"boo": "poo",
"t": DEMO_DEVICE_WRITE_TOKEN
}
resp = app.post("/collect", params=params)
assert resp.status_code == 200
assert resp.json is not None
json_resp = resp.json
assert "method" in json_resp
assert "dcd_id" in json_resp
assert "request" in json_resp
request_info = json_resp["request"]
assert type(request_info) == dict
assert "id" in request_info
assert "uid" in request_info
assert "params" in request_info
request_id = request_info["id"]
assert request_id is not None
assert str(request_id).isnumeric()
assert request_id == 1
response_params = request_info["params"]
for item in params:
if item == "t":
continue
assert item in response_params
def test_successfully_adds_values_again():
app = setup_application(force_recreate_database=True, force_initialize_database=True)
params = {
"foo": "bar",
"bizz": "bazz",
"boo": "poo",
"t": DEMO_DEVICE_WRITE_TOKEN
}
resp = app.post("/collect", params=params)
assert resp.status_code == 200
assert resp.json is not None
json_resp = resp.json
request_id = json_resp["request"]["id"]
assert str(request_id).isnumeric()
assert request_id == 1
def test_successfully_adds_values_with_access_token_in_header():
app = setup_application()
headers = {
"access_token": DEMO_DEVICE_WRITE_TOKEN
}
expected_t_value = "foo_bar"
params = {
"foo": "bar",
"bizz": "bazz",
"boo": "poo",
"t": expected_t_value
}
resp = app.post("/collect", params=params, headers=headers)
assert resp.status_code == 200
assert resp.json is not None
json_resp = resp.json
assert "method" in json_resp
assert "dcd_id" in json_resp
assert "request" in json_resp
request_info = json_resp["request"]
assert type(request_info) == dict
assert "id" in request_info
assert "uid" in request_info
assert "params" in request_info
request_id = request_info["id"]
assert request_id is not None
assert str(request_id).isnumeric()
assert request_id > 0
added_params = request_info["params"]
assert "t" in added_params
assert added_params["t"] == expected_t_value
response_params = request_info["params"]
for item in params:
assert item in response_params
```
#### File: tests/functional/api_view_all_test.py
```python
import ksuid
from reqlog.support.db_init.initialization_consts import DEMO_DEVICE_READ_TOKEN, DEMO_DEVICE_WRITE_TOKEN
from tests.functional.func_shared import setup_application
def test_returns_401_on_no_access_token_sent():
app = setup_application(force_recreate_database=True, force_initialize_database=False)
resp = app.get("/view_all", expect_errors=True)
assert resp.status_code == 401
def test_returns_403_when_wrong_token_sent_in_request_parameters():
app = setup_application(force_recreate_database=True, force_initialize_database=False)
params = {
"t": "bad"
}
resp = app.get("/view_all", params=params, expect_errors=True)
assert resp.status_code == 403
def test_returns_403_when_wrong_token_sent_in_request_header():
app = setup_application(force_recreate_database=True, force_initialize_database=False)
headers = {
"access_token": "bad"
}
resp = app.get("/view_all", headers=headers, expect_errors=True)
assert resp.status_code == 403
def test_adds_and_returns_valid_events():
app = setup_application(force_recreate_database=True, force_initialize_database=True)
headers = {
"access_token": DEMO_DEVICE_WRITE_TOKEN
}
expected_uid = ksuid.ksuid().toBase62()
params = {
"id": expected_uid,
"foo": "bar",
"bizz": "bazz",
"boo": "poo",
"t": "special"
}
resp = app.post("/collect", params=params, headers=headers)
assert resp.status_code == 200
assert resp.json is not None
json_resp = resp.json
requies_uid = json_resp["request"]["uid"]
assert requies_uid is not None
headers = {
"access_token": DEMO_DEVICE_READ_TOKEN
}
resp = app.get("/view_all", headers=headers)
assert resp.status_code == 200
assert resp.json is not None
json_resp = resp.json
assert "data" in json_resp
json_data = json_resp["data"]
assert type(json_data) == list
found = False
for item in json_data:
assert "request_uid" in item
assert "request_id" in item
assert "request_uid" in item
assert "adding_dts" in item
assert "method" in item
assert "parameters" in item
if item["request_uid"] == requies_uid:
v = {item["name"]: item["value"] for item in item["parameters"]}
assert v == params
found = True
break
assert found
```
#### File: tests/functional/version_route_test.py
```python
from reqlog.version import __app_name__, __version__
from tests.functional.func_shared import setup_application
def test_version_route_returns_200():
app = setup_application(False, False)
resp = app.get("/version")
assert resp.status_code == 200
def test_version_route_returns_valid_values():
app = setup_application(False, False)
resp = app.get("/version")
assert resp.status_code == 200
js_response = resp.json
assert js_response is not None
assert "version" in js_response
assert "app_name" in js_response
assert js_response["version"] == __version__
assert js_response["app_name"] == __app_name__
```
|
{
"source": "JFF-Bohdan/tamaku",
"score": 3
}
|
#### File: tamaku/tests/test_solver_implementation.py
```python
from solver.solver_impl import find_best_step, game_result_to_string, play_game_bool
def test_solver_implementation_by_fixture():
with open("./tests/valid_output_fixture.txt", "rt") as file:
for line in file:
if line is None:
break
str(line).strip()
if len(line) == 0:
continue
task = [str(item).strip() for item in line.split() if len(str(item).strip()) > 0]
assert len(task) == 2
assert str(task[0]).isnumeric()
value = int(task[0])
assert game_result_to_string(play_game_bool(value)) == task[1]
def test_solver_low_level_func():
assert find_best_step(17) == 8
```
|
{
"source": "JFF-Bohdan/tdd-kata",
"score": 3
}
|
#### File: tdd-kata/work_classes/recently_used_list.py
```python
class RecentlyUsedList(object):
def __init__(self, max_list_size=5):
self._items = []
self._max_list_size = max_list_size
def __len__(self):
return len(self._items)
def insert(self, item):
if item is None:
raise Exception("Null not allowed")
if (type(item) is str) and (len(item) == 0):
raise Exception("Empty strings not allowed")
if item in self._items:
self._items.remove(item)
if (self._max_list_size is not None) and (len(self._items) >= self._max_list_size):
self._items.pop(-1)
self._items.insert(0, item)
def __getitem__(self, index):
if index < 0:
raise IndexError
return self._items[index]
```
|
{
"source": "JFF-Bohdan/vrc_t70_demo",
"score": 2
}
|
#### File: JFF-Bohdan/vrc_t70_demo/daemon.py
```python
import queue
import signal
import sys
import threading
from functools import partial
from demo_impl.daemon.events_processor import EventProcessor
from demo_impl.daemon.threaded_daemon import ThreadedVrcT70Daemon
from demo_impl.shared.models.devices import VrcT70Device # noqa
from demo_impl.shared.models.sensors import VrcT70Sensor # noqa
from demo_impl.shared.models.shared import Base
from demo_impl.shared.support.config_helper import DaemonConfigHelper
from demo_impl.shared.support.config_support import get_config
from loguru import logger as initial_logger
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
def initialize_database(connection_uri, echo=False):
engine = create_engine(connection_uri, echo=echo)
return engine
def init_logger(config):
initial_logger.remove()
log_format = "{time:YYYY-MM-DD at HH:mm:ss} {level} {file}:{line} {function}() : {message}"
initial_logger.add(
sys.stderr,
format=log_format,
level=DaemonConfigHelper.get_daemon_log_level(config),
backtrace=DaemonConfigHelper.get_daemon_log_backtrace(config),
diagnose=DaemonConfigHelper.get_daemon_log_diagnose(config)
)
log_file_name = DaemonConfigHelper.get_daemon_log_file_name(config)
if log_file_name:
initial_logger.add(
log_file_name,
format=log_format,
level=DaemonConfigHelper.get_daemon_log_level(config),
rotation=DaemonConfigHelper.get_daemon_log_file_rotation(config),
compression=DaemonConfigHelper.get_daemon_log_file_compression(config),
backtrace=DaemonConfigHelper.get_daemon_log_backtrace(config),
diagnose=DaemonConfigHelper.get_daemon_log_diagnose(config)
)
return initial_logger
def signal_handler(sig, frame, daemon, need_stop, logger):
logger.warning("!!!!!! want to stop daemon !!!!!!")
need_stop.set()
if not daemon:
sys.exit(0)
logger.warning("going to shutdown with Ctrl-C")
daemon.stop()
def main():
need_stop = threading.Event()
config = get_config()
if not config:
initial_logger.error("can't initialize, can't read config file")
return -1
logger = init_logger(config)
logger.info("daemon started")
db_uri = DaemonConfigHelper.get_database_connection_uri(config)
logger.debug("db_uri: '{}'".format(db_uri))
logger.info("initializing database connection")
engine = initialize_database(db_uri)
logger.info("recreating all tables (if required)")
Base.metadata.create_all(engine)
Session = sessionmaker(bind=engine)
session = Session()
uart_name = DaemonConfigHelper.get_uart_name(config)
uart_speed = DaemonConfigHelper.get_uart_speed(config)
devices_addresses = DaemonConfigHelper.get_devices_address(config)
logger.info("going connect to port {} with speed {}".format(uart_name, uart_speed))
logger.info("will poll for devices with addresses: {}".format(devices_addresses))
if not devices_addresses:
logger.error("no device addresses for polling, going out")
return -1
events_queue = queue.Queue()
threaded_daemon = ThreadedVrcT70Daemon(events_queue, logger, uart_name, uart_speed, devices_addresses)
events_processor = EventProcessor(logger, session)
partial_handler = partial(signal_handler, daemon=threaded_daemon, need_stop=need_stop, logger=logger)
signal.signal(signal.SIGINT, partial_handler)
threaded_daemon.start()
while not need_stop.is_set():
try:
event = events_queue.get(block=True, timeout=0.5)
except queue.Empty:
event = None
if not event:
continue
logger.debug("event registered: {}".format(event))
events_processor.process_event(event)
threaded_daemon.join()
while not events_queue.empty():
event = events_queue.get()
logger.debug("event skipped at end: {}".format(event))
session.commit()
logger.info("daemon finished")
return 0
if __name__ == "__main__":
res = main()
exit(res)
```
#### File: demo_impl/daemon/threaded_daemon.py
```python
import threading
from .support import init_serial
from .vrc_t70_daemon import VrcT70Daemon
class ThreadedVrcT70Daemon(threading.Thread):
def __init__(
self,
events_queue,
logger,
uart_name,
uart_speed,
devices_addresses,
*args
):
super().__init__(*args)
self._events_queue = events_queue
self.logger = logger
self.uart_name = uart_name
self.uart_speed = uart_speed
self.devices_addresses = devices_addresses
uart = init_serial(self.uart_name, self.uart_speed)
self._daemon = VrcT70Daemon(
self._events_queue,
uart,
self.devices_addresses,
self.logger
)
self._daemon.pre_queuing_func = self._port_info_adder_function
def run(self):
self._daemon.init()
self._daemon.run()
def stop(self):
self.logger.warning("stopping daemon for uart '{}'".format(self.uart_name))
self._daemon.stop()
def _port_info_adder_function(self, event):
event.device_port_name = str(self.uart_name).strip().lower()
return event
```
#### File: demo_impl/daemon/vrc_t70_daemon.py
```python
import datetime
import random
import time
from .data_types import DeviceLastCommunicationTouch, SensorReadData
from .utils import hexlify_to_string
from .vrc_t70_stateful_communicator import VrcT70StatefulCommunicator
class VrcT70Daemon(object):
DEFAULT_POLLING_SLEEP_INTERVAL = 1
DEFAULT_TEMP_ROUND_PRESISSION = 3
def __init__(self, events_queue, serial, devices_addresses, logger): # , db_session
self._serial = serial
self._devices_addresses = devices_addresses
self._communicators = []
self.logger = logger
self._stop = False
self._polling_sleep_interval = VrcT70Daemon.DEFAULT_POLLING_SLEEP_INTERVAL
# self.db_session = db_session
self._events_queue = events_queue
# self._map_device_addresses_to_ids = dict()
self._round_precission = VrcT70Daemon.DEFAULT_TEMP_ROUND_PRESISSION
self.pre_queuing_func = None
def init(self):
for device_address in self._devices_addresses:
hex_device_address = "0x{0:02x}".format(device_address)
self.logger.info("creating new communicator for device {}".format(hex_device_address))
communicator = VrcT70StatefulCommunicator(self._serial, controller_address=device_address, logger=self.logger)
communicator.ping()
new_session_id = random_byte_array(4)
self.logger.debug("initializing session id with {}".format(hexlify_to_string(new_session_id)))
r = communicator.set_session_id(new_session_id)
assert r.session_id() == new_session_id
self.logger.debug("scanning for sensors on trunks...")
communicator.rescan_sensors_on_trunks()
self.logger.info(
"sensors count per trunk for {}: {}".format(
hex_device_address,
communicator.get_sensors_per_trunk_count()
)
)
self._communicators.append(communicator)
self._on_communicator_registration(communicator)
def run(self):
while not self._stop:
for communicator in self._communicators:
if self._stop:
break
self.logger.debug("updating data for controller {}".format(communicator.hex_device_address_for_communicator()))
events = communicator.update_temperatures()
self.logger.debug("events: {}".format(events))
self._on_events_from_device_received(communicator, events)
if self._stop:
break
self.logger.debug("going to sleep for {} second(s)".format(self._polling_sleep_interval))
time.sleep(self._polling_sleep_interval)
def stop(self):
self._stop = True
def _on_communicator_registration(self, communicator):
event = DeviceLastCommunicationTouch(device_address=communicator.controller_address)
if self.pre_queuing_func:
event = self.pre_queuing_func(event)
self._events_queue.put(event)
def _on_events_from_device_received(self, communicator, events):
external_event = SensorReadData()
external_event.device_address = communicator.controller_address
events_list = list()
for event in events:
external_event.sensor_address = event.sensor_address
external_event.trunk_number = event.trunk_number
external_event.sensor_index = event.sensor_index
external_event.is_connected = event.is_connected
external_event.temperature = round(event.temperature, self._round_precission) \
if event.temperature is not None else None
if self.pre_queuing_func:
external_event = self.pre_queuing_func(external_event)
events_list.append(external_event)
self._events_queue.put(external_event)
def random_byte_array(length):
return bytearray((random.getrandbits(8) for _ in range(length)))
def utc_now():
return datetime.datetime.utcnow()
```
#### File: demo_impl/daemon/vrc_t70_stateful_communicator.py
```python
from vrc_t70 import MAX_TRUNKS_COUNT, VrcT70Communicator
from .data_types import SensorReadData
from .utils import hexlify_to_string
class VrcT70StatefulCommunicator(VrcT70Communicator):
def __init__(self, serial, controller_address, logger):
super().__init__(serial, controller_address)
self._sensors_per_trunk_count = dict()
self.logger = logger
self._sensors_addresses_per_trunk = dict()
self._map_sensor_address_to_trunk_number_and_index = dict()
self._map_sensor_address_to_temperature = dict()
self._map_trunk_number_and_index_to_sensor_address = dict()
def rescan_sensors_on_trunks(self):
self._sensors_addresses_per_trunk = dict()
self._map_sensor_address_to_trunk_number_and_index = dict()
self._map_sensor_address_to_temperature = dict()
self._map_trunk_number_and_index_to_sensor_address = dict()
hex_device_address = self.hex_device_address_for_communicator()
self.logger.info("rescanning devices on trunks for {}".format(hex_device_address))
for trunk_number in range(1, MAX_TRUNKS_COUNT + 1):
self.logger.info("scanning trunk {} for {}".format(trunk_number, hex_device_address))
self.logger.debug("rescanning trunk")
res = self.rescan_sensors_on_trunk(trunk_number)
assert res.trunk_number() == trunk_number
sensors_count = res.sensors_count()
self._sensors_per_trunk_count[trunk_number] = sensors_count
self.logger.info("retrieving sensors addresses")
addresses = self.get_sensors_unique_addresses_on_trunk(trunk_number)
assert sensors_count == addresses.sensors_count()
addresses_on_trunk = []
for sensor_index in range(sensors_count):
assert not addresses.is_error_detected(sensor_index)
unique_address = addresses.sensor_unique_address(sensor_index)
unique_address = hexlify_to_string(unique_address)
addresses_on_trunk.append(unique_address)
sensor_position = (trunk_number, sensor_index)
self._map_sensor_address_to_trunk_number_and_index[unique_address] = sensor_position
self._map_trunk_number_and_index_to_sensor_address[sensor_position] = unique_address
self._sensors_addresses_per_trunk[trunk_number] = addresses_on_trunk
self.logger.info(
"sensors addresses for trunk {}: {}".format(
trunk_number,
self.get_sensors_addresses_on_trunk(trunk_number)
)
)
return self.get_sensors_per_trunk_count()
def update_temperatures(self):
updated_data = []
for trunk_number in range(1, MAX_TRUNKS_COUNT + 1):
sensors_count = self._sensors_per_trunk_count[trunk_number]
temperatures = self.get_temperature_on_trunk(trunk_number)
assert temperatures.temperatures_count() == sensors_count
for sensor_index in range(sensors_count):
is_connected = temperatures.is_connected(sensor_index)
if is_connected:
temperature = temperatures.temperature(sensor_index)
else:
temperature = None
sensor_position = (trunk_number, sensor_index)
unique_address = self._map_trunk_number_and_index_to_sensor_address[sensor_position]
self._map_sensor_address_to_temperature[unique_address] = temperature
event = SensorReadData(
device_address=self.controller_address,
sensor_address=unique_address,
trunk_number=trunk_number,
sensor_index=sensor_index,
is_connected=is_connected,
temperature=temperature
)
updated_data.append(event)
return updated_data
def get_sensors_addresses_on_trunk(self, trunk_number):
return self._sensors_addresses_per_trunk[trunk_number]
def get_sensors_per_trunk_count(self):
return [self._sensors_per_trunk_count[trunk_number] for trunk_number in sorted(self._sensors_per_trunk_count.keys())]
def hex_device_address_for_communicator(self):
return "0x{0:02x}".format(self.controller_address)
```
#### File: shared/support/config_support.py
```python
import argparse
import configparser
import os
from loguru import logger
def get_config():
parser = argparse.ArgumentParser()
parser.add_argument(
"--config",
metavar="FILE",
help="configuration file path"
)
args = parser.parse_args()
config = configparser.RawConfigParser()
if (not args.config) or (not os.path.exists(args.config)):
logger.error("can't find file: {}".format(args.config))
return None
try:
config.read(args.config)
except BaseException as e:
logger.exception("can't read config: {}".format(e))
return None
return config
```
#### File: api_blueprint/routes/supplementary.py
```python
from demo_impl.webui.__version__ import __version__
from flask import jsonify
from loguru import logger
from ..api_blueprint import api_blueprint
@api_blueprint.route("/version")
@api_blueprint.route("/liveness")
def version_endpoint():
res = {
"version": __version__,
"short_name": "VRC-T70 Demo Application",
"long_name": "Flask based demo application for VRC-T70 Python package (SQLite db used)"
}
logger.info("test-2")
return jsonify(res)
```
|
{
"source": "JFF-Bohdan/vrc_t70",
"score": 3
}
|
#### File: vrc_t70/tools_shared/cmd_line_parser.py
```python
import argparse
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument(
"-u",
"--uart",
action="store",
dest="uart_name",
type=str,
help="uart name",
required=True
)
parser.add_argument(
"-s",
"--speed",
action="store",
dest="uart_speed",
help="uart speed",
type=int,
default=115200
)
parser.add_argument(
"-a",
"--address",
action="store",
dest="device_address",
help="device address",
type=int,
default=0x01
)
return parser.parse_args()
def get_scaner_args():
parser = argparse.ArgumentParser()
parser.add_argument(
"-u",
"--uart",
action="store",
dest="uart_name",
type=str,
help="uart name",
required=True
)
parser.add_argument(
"-s",
"--speed",
action="store",
dest="uart_speed",
help="uart speed",
type=int,
default=115200
)
parser.add_argument(
"-d",
"--delay",
action="store",
dest="wait_delay",
help="wait delay",
type=float,
default=0.15
)
parser.add_argument(
"-m",
"--min",
action="store",
dest="min_address",
help="min address for search",
type=int,
default=1
)
parser.add_argument(
"-x",
"--max",
action="store",
dest="max_address",
help="max address for search",
type=int,
default=0xff - 1
)
return parser.parse_args()
```
#### File: vrc_t70/vrc_t70/response.py
```python
import struct
import crcmod
class VrcT70Response(object):
def __init__(self, other=None):
if other:
self._assign_from_other(other)
return
self.address = None
self.id_event = None
self.sequence_id = None
self.processing_result = None
self.data = None
self.crc = None
self._crc_func = None
def is_crc_valid(self):
if self._crc_func is None:
self._crc_func = crcmod.mkCrcFun(poly=0x1D5, initCrc=0, rev=False, xorOut=0)
data_length = len(self.data) if self.data else 0
data = bytearray([
self.address & 0xff,
self.id_event & 0xff,
(self.sequence_id & 0xff00) >> 8,
self.sequence_id & 0xff,
self.processing_result & 0xff,
data_length & 0xff
]
)
if self.data:
data.extend(self.data)
data.append(self.crc)
return self._crc_func(data) == 0x00
def _assign_from_other(self, other):
d = other.__dict__
for k, v in d.items():
setattr(self, k, v)
class TrunkSensortsCountResponse(VrcT70Response):
def __init__(self, data):
super().__init__(data)
def trunk_number(self):
return self.data[0]
def sensors_count(self):
return self.data[1]
class TemperatureOnSensorResponse(VrcT70Response):
def __init__(self, data):
super().__init__(data)
def trunk_number(self):
return self.data[0]
def sensor_index(self):
return self.data[1]
def is_connected(self):
return self.data[2] == 1
def temperature(self):
res, = struct.unpack("<f", self.data[3:])
return res
class SensorUniqueIdResponse(VrcT70Response):
def __init__(self, data):
super().__init__(data)
def trunk_number(self):
return self.data[0]
def sensor_index(self):
return self.data[1]
def unique_address(self):
return self.data[2:]
class TemperatureOnTrunkResponse(VrcT70Response):
def __init__(self, data):
super().__init__(data)
def trunk_number(self):
return self.data[0]
def temperatures_count(self):
return (len(self.data) - 1) // 5
def is_connected(self, sensor_index):
return self.data[1 + sensor_index * (1 + 4)] == 1
def temperature(self, sensor_index):
offset = 1 + sensor_index * (1 + 4) + 1
res, = struct.unpack("<f", self.data[offset: offset + 4])
return res
class SensorUniqueAddressOnTrunkResponse(VrcT70Response):
def __init__(self, data):
super().__init__(data)
def trunk_number(self):
return self.data[0]
def sensors_count(self):
return (len(self.data) - 1) // 9
def is_error_detected(self, sensor_index):
offset = 1 + sensor_index * (8 + 1) + 8
return self.data[offset] == 1
def sensor_unique_address(self, sensor_index):
offset = 1 + sensor_index * (8 + 1)
return self.data[offset: offset + 8]
class SessionIdResponse(VrcT70Response):
def __init__(self, data):
super().__init__(data)
def session_id(self):
return self.data
class ControllerNewAddressResponse(VrcT70Response):
def __init__(self, data):
super().__init__(data)
def new_address(self):
return self.data[0]
```
|
{
"source": "JFF-Bohdan/yabtool",
"score": 3
}
|
#### File: yabtool/shared/jinja2_helpers.py
```python
import os
from jinja2 import BaseLoader, Environment, StrictUndefined
def jinja2_custom_filter_extract_year_four_digits(value):
return value.strftime("%Y")
def jinja2_custom_filter_extract_month_two_digits(value):
return value.strftime("%m")
def jinja2_custom_filter_extract_day_two_digits(value):
return value.strftime("%d")
def create_rendering_environment():
env = Environment(loader=BaseLoader, undefined=StrictUndefined)
env.filters["extract_year_four_digits"] = jinja2_custom_filter_extract_year_four_digits
env.filters["extract_month_two_digits"] = jinja2_custom_filter_extract_month_two_digits
env.filters["extract_day_two_digits"] = jinja2_custom_filter_extract_day_two_digits
env.filters["base_name"] = os.path.basename
return env
```
#### File: yabtool/supported_steps/base.py
```python
import datetime
import os
class DryRunExecutionError(Exception):
pass
class TransmissionError(Exception):
pass
class WrongParameterTypeError(Exception):
pass
class StepContextData(object):
def __init__(self):
self.name = None
self.description = None
def pretty_time_delta(seconds):
sign_string = "-" if seconds < 0 else ""
seconds = abs(seconds)
days, seconds = divmod(seconds, 86400)
days = int(days)
hours, seconds = divmod(seconds, 3600)
hours = int(hours)
minutes, seconds = divmod(seconds, 60)
minutes = int(minutes)
seconds = round(seconds, 3)
if days > 0:
return "{}{}d{}h{}m{}s".format(sign_string, days, hours, minutes, seconds)
elif hours > 0:
return "{}{}h{}m{}s".format(sign_string, hours, minutes, seconds)
elif minutes > 0:
return "{}{}m{}s".format(sign_string, minutes, seconds)
else:
return "{}{:.3f}s".format(sign_string, seconds)
def time_interval(timestamp_start, timestamp_end):
time_elapsed = (timestamp_end - timestamp_start)
res = float(time_elapsed.total_seconds())
res += round(time_elapsed.microseconds / 1000000, 3)
return res
class BaseFlowStep(object):
BYTES_IN_MEGABYTE = 1024 * 1024
def __init__(
self,
logger,
rendering_context,
step_context,
secret_context,
rendering_environment,
):
self.logger = logger
self.rendering_context = rendering_context
self.step_context = step_context
self.rendering_environment = rendering_environment
self.secret_context = secret_context
self.additional_output_context = None
@classmethod
def step_name(cls):
pass
@property
def mixed_context(self):
return self._get_mixed_context()
def run(self, stat_entry, dry_run=False):
output_variables = self._generate_output_variables()
self.logger.debug("output_variables: {}".format(output_variables))
return output_variables
def vote_for_flow_execution_skipping(self):
return None
def _render_parameter(self, parameter_name, context=None):
if not context:
context = self.mixed_context
template = context[parameter_name]
self.logger.debug("'{}'@template: '{}'".format(parameter_name, template))
res = self._render_result(template)
self.logger.debug("'{}'@value: '{}'".format(parameter_name, res))
return res
def _get_mixed_context(self):
mixed_context = self.rendering_context.to_context()
mixed_context = {**mixed_context, **self._get_step_context()}
return mixed_context
def _get_step_context(self):
return {**self.secret_context, **self.step_context}
def _render_result(self, template, additional_context=None):
if not template:
return ""
mixed_context = self.mixed_context
if additional_context:
mixed_context = {**mixed_context, **additional_context}
return self._render_from_template_and_context(template, mixed_context)
def _render_from_template_and_context(self, template, context):
jinja2_template = self.rendering_environment.from_string(template)
return jinja2_template.render(**context)
def _generate_output_variables(self):
res = dict()
if "generates" not in self.step_context:
return res
for requested_value_name, requested_value_template in self.step_context["generates"].items():
res[requested_value_name] = self._render_result(requested_value_template, self.additional_output_context)
return res
def _get_metric_by_name(self, stat_entry, metric_name, initial_value=None, units_name=None):
return stat_entry.metrics.get_metric(metric_name, initial_value=initial_value, units_name=units_name)
def _get_file_size_in_mibs(self, file_name):
file_size = os.path.getsize(file_name)
size_in_megs = (file_size / BaseFlowStep.BYTES_IN_MEGABYTE)
return size_in_megs
def _get_current_timestamp(self):
return datetime.datetime.utcnow()
```
#### File: yabtool/supported_steps/factory.py
```python
from .step_calculate_file_hash_and_save_to_file import StepCalculateFileHashAndSaveToFile
from .step_compress_file_with_7z import StepCompressFileWith7Z
from .step_make_directory_for_backup import StepMakeDirectoryForBackup
from .step_make_firebird_database_backup import StepMakeFirebirdDatabaseBackup, StepMakeFirebirdLinuxDatabaseBackup
from .step_make_healthchecks_ping import StepMakeHealthchecksPing
from .step_make_pg_win_database_backup import StepMakePgDatabaseWinBackup
from .step_s3_multipart_upload_with_rotation import StepS3MultipartUploadWithRotation
from .step_s3_strict_uploader import StepS3StrictUploader
from .step_validate_7z_archive import StepValidate7ZArchive
class StepsFactory(object):
def __init__(self):
self._known_steps = dict()
pass
def register_class(self, cls):
self._known_steps[cls.step_name()] = cls
def create_object(self, step_name, **kwargs):
return self._known_steps[step_name](**kwargs)
def is_step_known(self, step_name):
return step_name in self._known_steps
def create_steps_factory():
factory = StepsFactory()
factory.register_class(StepMakeDirectoryForBackup)
factory.register_class(StepMakeFirebirdDatabaseBackup)
factory.register_class(StepMakeFirebirdLinuxDatabaseBackup)
factory.register_class(StepCalculateFileHashAndSaveToFile)
factory.register_class(StepCompressFileWith7Z)
factory.register_class(StepValidate7ZArchive)
factory.register_class(StepS3MultipartUploadWithRotation)
factory.register_class(StepS3StrictUploader)
factory.register_class(StepMakePgDatabaseWinBackup)
factory.register_class(StepMakeHealthchecksPing)
return factory
```
#### File: yabtool/supported_steps/s3boto_client.py
```python
import os
import threading
from boto3.s3.transfer import MB, S3Transfer, TransferConfig
from botocore.exceptions import ClientError
from .base import WrongParameterTypeError
class ProgressPercentage(object):
def __init__(self, logger, filename):
self._filename = filename
self._bytes_to_transmit = float(os.path.getsize(filename))
self._transmitted_bytes_count = 0
self._lock = threading.Lock()
self._logger = logger
def __call__(self, bytes_amount):
with self._lock:
self._transmitted_bytes_count += bytes_amount
percentage = (self._transmitted_bytes_count / self._bytes_to_transmit) * 100
percentage = round(percentage, 2)
self._logger.info("transmitted: {}%".format(percentage))
@staticmethod
def _get_file_size(file_name):
return os.path.getsize(file_name)
class S3BasicBotoClient(object):
DEFAULT_TRANSMISSION_CHUNK_SIZE = 8 * MB
DEFAULT_NOTIFICATION_THRESHHOLD = 1 * MB
DEFAULT_TRANSMISSION_MAX_THREADS = 20
DEFAULT_MAX_TRANSMISSION_ATTEMPTS = 5
def __init__(self, logger, s3_client):
self.logger = logger
self._client = s3_client
def create_bucket(self, bucket_name, region=None):
try:
if region is None:
self._client.create_bucket(Bucket=bucket_name)
else:
location = {"LocationConstraint": region}
self._client.create_bucket(
Bucket=bucket_name,
CreateBucketConfiguration=location
)
except ClientError as e:
self.logger.error(e)
return False
return True
def is_object_exists(self, bucket_name, object_name):
try:
self._client.head_object(Bucket=bucket_name, Key=object_name)
except ClientError:
return False
return True
def is_bucket_exists(self, bucket_name):
try:
_ = self._client.head_bucket(Bucket=bucket_name) # noqa
except ClientError as e:
self.logger.debug(e)
return False
return True
def upload_file(
self,
dest_bucket_name,
dest_object_name,
source_file_name,
transfer_config=None
):
if transfer_config is None:
transfer_config = TransferConfig(
multipart_threshold=S3BasicBotoClient.DEFAULT_NOTIFICATION_THRESHHOLD,
max_concurrency=S3BasicBotoClient.DEFAULT_TRANSMISSION_MAX_THREADS,
multipart_chunksize=S3BasicBotoClient.DEFAULT_TRANSMISSION_CHUNK_SIZE,
num_download_attempts=S3BasicBotoClient.DEFAULT_MAX_TRANSMISSION_ATTEMPTS,
use_threads=True
)
transfer = S3Transfer(self._client, config=transfer_config)
transfer.upload_file(
source_file_name,
dest_bucket_name,
dest_object_name,
callback=ProgressPercentage(self.logger, source_file_name),
)
return True
def copy_file_from_one_bucket_to_another(
self,
src_bucket_name,
src_object_name,
dest_bucket_name,
dest_object_name,
):
copy_source = {
"Bucket": src_bucket_name,
"Key": src_object_name
}
self._client.copy(copy_source, dest_bucket_name, dest_object_name)
def put_object(self, dest_bucket_name, dest_object_name, src_data):
"""Add an object to an Amazon S3 bucket
The src_data argument must be of type bytes or a string that references
a file specification.
:param dest_bucket_name: string
:param dest_object_name: string
:param src_data: bytes of data or string reference to file spec
:return: True if src_data was added to dest_bucket/dest_object, otherwise
False
"""
object_data = None
need_close = False
try:
if isinstance(src_data, bytes):
object_data = src_data
elif isinstance(src_data, str):
need_close = True
object_data = open(src_data, "rb")
else:
msg = "Type of {} for the argument 'src_data' is not supported.".format(str(type(src_data)))
self.logger.error(msg)
raise WrongParameterTypeError(msg)
self._put_object(dest_bucket_name, dest_object_name, object_data)
finally:
if need_close:
object_data.close()
def list_files_in_folder(self, bucket_name, folder=""):
response = self._client.list_objects(Bucket=bucket_name, Prefix=folder)
return [content.get("Key") for content in response.get("Contents", [])]
def delete_object(self, bucket_name, key):
self._client.delete_object(Bucket=bucket_name, Key=key)
def get_object_tags(self, bucket_name, key):
ret = {}
resp = self._client.get_object_tagging(Bucket=bucket_name, Key=key)
if "TagSet" not in resp:
return ret
tags_set = resp["TagSet"]
for tags_set_item in tags_set:
ret[tags_set_item["Key"]] = tags_set_item["Value"]
return ret
def set_object_tags(self, bucket_name, key, tags):
tags_list = [{"Key": str(key), "Value": str(value)} for key, value in tags.items()]
self._client.put_object_tagging(Bucket=bucket_name, Key=key, Tagging={"TagSet": tags_list})
def delete_object_tags(self, bucket_name, key):
self._client.get_object_tagging(Bucket=bucket_name, Key=key)
def _put_object(self, dest_bucket_name, dest_object_name, object_data):
# Put the object
try:
self._client.put_object(Bucket=dest_bucket_name, Key=dest_object_name, Body=object_data)
except Exception as e:
# AllAccessDisabled error == bucket not found
# NoSuchKey or InvalidRequest error == (dest bucket/obj == src bucket/obj)
self.logger.error(e)
raise
```
#### File: yabtool/supported_steps/step_make_directory_for_backup.py
```python
import os
from .base import BaseFlowStep
class StepMakeDirectoryForBackup(BaseFlowStep):
def run(self, stat_entry, dry_run=False):
res = self._render_parameter("generation_mask")
res = os.path.normpath(os.path.abspath(res))
if not dry_run:
os.makedirs(res)
self.additional_output_context = {"result": res}
return super().run(dry_run)
@classmethod
def step_name(cls):
return "mkdir_for_backup"
```
#### File: yabtool/yabtool/yabtool_flow_orchestrator.py
```python
import codecs
import copy
import datetime
import os
import uuid
import terminaltables
from yabtool.shared.jinja2_helpers import create_rendering_environment
from yaml import safe_load
from .supported_steps import create_steps_factory
from .supported_steps.base import pretty_time_delta, time_interval
from .yabtool_stat import StepExecutionStatisticEntry
DEFAULT_CONFIG_RELATIVE_NAME = "./config/config.yaml"
class ConfigurationValidationException(BaseException):
pass
class RenderingContext(object):
def __init__(self):
self.config_file_name = None
self.secrets_file_name = None
self.config_context = dict()
self.secrets_context = dict()
self.target_name = None
self.flow_name = None
self.basic_values = dict()
self.previous_steps_values = list()
self.temporary_folder = None
self.root_temporary_folder = None
self.remove_temporary_folder = None
self.perform_dry_run = None
self.unknown_args = None
def to_context(self):
res = self.basic_values
for item in self.previous_steps_values:
res = {**res, **item}
return res
class YabtoolFlowOrchestrator(object):
def __init__(self, logger):
self.rendering_context = RenderingContext()
self.logger = logger
self._steps_factory = None
self._backup_start_timestamp = datetime.datetime.utcnow()
self._skip_flow_execution_voting_result = None
self.skip_voting_enabled = True
self.dry_run_statistics = []
self.active_run_statistics = []
def initialize(self, args, unknown_args):
self.rendering_context.config_file_name = self._get_config_file_name(args)
self.logger.debug(
"config_file_name: '{}'".format(self.rendering_context.config_file_name)
)
if not self.rendering_context.config_file_name:
raise ConfigurationValidationException("No configuration file specified")
if not os.path.exists(self.rendering_context.config_file_name):
raise ConfigurationValidationException(
"Configuration file does not exists. Path: '{}'".format(
self.rendering_context.config_file_name
)
)
self.rendering_context.config_context = self._load_yaml_file(
self.rendering_context.config_file_name
)
self.rendering_context.unknown_args = unknown_args
self.rendering_context.secrets_file_name = self._get_secrets_file_name(args)
if not self.rendering_context.secrets_file_name:
raise ConfigurationValidationException(
"Secrets file is not specified"
)
if not os.path.exists(self.rendering_context.secrets_file_name):
raise ConfigurationValidationException(
"Secrets file does not exists. Path: '{}'".format(
self.rendering_context.secrets_file_name
)
)
self.logger.debug("loading secrets from: '{}'".format(self.rendering_context.secrets_file_name))
self.rendering_context.secrets_context = self._load_yaml_file(
self.rendering_context.secrets_file_name
)
self.rendering_context.target_name = self._get_target_name(args)
self.logger.debug("target_name: '{}'".format(self.target_name))
self.rendering_context.flow_name = self._get_flow_name(args)
self.logger.debug("flow_name: '{}'".format(self.rendering_context.flow_name))
self._override_config_parameters_with_secrets()
self.rendering_context.remove_temporary_folder = self.config_context["parameters"]["remove_temporary_folder"]
self.rendering_context.perform_dry_run = self.config_context["parameters"]["perform_dry_run"] or args.dry_run
self.rendering_context.root_temporary_folder = self._get_temporary_folder(args)
self.rendering_context.temporary_folder = os.path.join(
self.rendering_context.root_temporary_folder,
self._create_folder_name_for_execution(),
)
os.makedirs(self.rendering_context.temporary_folder)
self.logger.debug(
"temporary_folder: '{}'".format(self.rendering_context.temporary_folder)
)
self._steps_factory = create_steps_factory()
self.rendering_context.basic_values = self._init_basic_values()
self.logger.debug(
"basic_values: {}".format(self.rendering_context.basic_values)
)
return True
def dry_run(self):
self.logger.warning("performing dry run")
if self.rendering_context.perform_dry_run:
self._run(dry_run=True)
def run(self):
self.logger.warning("performing active run")
self._run(dry_run=False)
def print_stat(self):
if self.dry_run_statistics:
stat_data = self.produce_exeuction_stat(self.dry_run_statistics)
self.logger.info("{}:\n{}".format("Dry run statistics:", stat_data))
metrics_data_list = self.produce_execution_metrics(self.dry_run_statistics)
for step_name, metrics_data_item in metrics_data_list:
self.logger.info("Metrics for '{}':\n{}".format(step_name, metrics_data_item))
if self.active_run_statistics:
stat_data = self.produce_exeuction_stat(self.active_run_statistics)
self.logger.info("{}:\n{}".format("Execution statistics:", stat_data))
metrics_data_list = self.produce_execution_metrics(self.active_run_statistics)
for step_name, metrics_data_item in metrics_data_list:
self.logger.info("Metrics for '{}':\n{}".format(step_name, metrics_data_item))
if (not self.dry_run_statistics) and (not self.active_run_statistics):
self.logger.info("No execution statistics")
def produce_exeuction_stat(self, stat_source):
header = ["Step Name", "Exexcution start timestamp", "Execution end timestamp", "Time elapsed "]
data = [header]
total_time_elapsed_seconds = 0
max_length = len(data[0]) if data else 0
print_format = "%Y-%m-%d %H:%M:%S"
for statistics_item in stat_source:
# step_name = "{} ({})".format(statistics_item.step_human_readable_name, statistics_item.step_name)
step_name = statistics_item.step_name
time_elapsed_in_seconds = time_interval(
statistics_item.execution_start_timestamp,
statistics_item.execution_end_timestamp,
)
data_row = [
step_name,
statistics_item.execution_start_timestamp.strftime(print_format),
statistics_item.execution_end_timestamp.strftime(print_format),
pretty_time_delta(time_elapsed_in_seconds)
]
data.append(data_row)
max_length = max_length if max_length <= len(data_row) else len(data_row)
total_time_elapsed_seconds += time_elapsed_in_seconds
data_row = [""] * (max_length - 2)
data_row.append("Total")
data_row.append(pretty_time_delta(total_time_elapsed_seconds))
data.append(data_row)
table = terminaltables.AsciiTable(data)
return table.table
def produce_execution_metrics(self, stat_source):
res = []
for statistics_item in stat_source:
step_name = "{} ({})".format(statistics_item.step_human_readable_name, statistics_item.step_name)
metrics = statistics_item.metrics
if metrics.is_empty():
continue
metrics_data = [["Name", "Value"]]
for metric_name in metrics.get_all_metrics():
metric = metrics.get_metric(metric_name)
metrics_data.append(
[
metric.metric_name,
"{} {}".format(
metric.value,
metric.units_name
)
]
)
table = terminaltables.AsciiTable(metrics_data)
res_item = (step_name, table.table)
res.append(res_item)
return res
def _get_config_file_name(self, args):
config_file_name = args.config
if not config_file_name:
config_file_name = DEFAULT_CONFIG_RELATIVE_NAME
src_path = os.path.abspath(os.path.dirname(__file__))
assert os.path.exists(src_path)
self.logger.debug("src_path: '{}'".format(src_path))
config_file_name = os.path.join(src_path, config_file_name)
config_file_name = os.path.abspath(config_file_name)
config_file_name = os.path.normpath(config_file_name)
if not os.path.exists(config_file_name):
self.logger.error("can't find configuration file in '{}'".format(config_file_name))
assert os.path.exists(config_file_name), config_file_name
else:
config_file_name = os.path.abspath(config_file_name)
config_file_name = os.path.normpath(config_file_name)
return config_file_name
def _run(self, dry_run):
assert self.rendering_context.flow_name
if dry_run:
self._skip_flow_execution_voting_result = None
flow_data = self.rendering_context.config_context["flows"][self.flow_name]
flow_description = flow_data["description"]
self.logger.info("flow_description: '{}'".format(flow_description))
self.rendering_context.previous_steps_values = []
rendering_environment = create_rendering_environment()
secret_targets_context = self.rendering_context.secrets_context["targets"][self.target_name]
self._execute_steps(dry_run, flow_data, rendering_environment, secret_targets_context)
def _execute_steps(self, dry_run, flow_data, rendering_environment, secret_targets_context):
assert self._steps_factory
statistics_list = self.dry_run_statistics if dry_run else self.active_run_statistics
statistics_list.clear()
if self._need_skip_voting():
self.logger.warning("Want skip flow execution")
return
positive_votes_for_flow_execution_skipping = []
for step_context in flow_data["steps"]:
step_name = step_context["name"]
step_human_readable_name = step_context.get("human_readable_name", step_name)
step_description = step_context.get("description", "<no description>")
self.logger.debug(
"validating step '{}': {}".format(step_name, step_description)
)
if not self._steps_factory.is_step_known(step_name):
raise ConfigurationValidationException(
"Unknown step '{}'".format(step_name)
)
if dry_run:
self.logger.debug("performing dry run for step '{}'".format(step_name))
else:
self.logger.debug("performing active run for step '{}'".format(step_name))
secret_context = dict()
relative_secrets = step_context.get("relative_secrets", [])
required_secrets = [step_name]
required_secrets.extend(relative_secrets)
for required_secret in required_secrets:
if (
("steps_configuration" in secret_targets_context) and # noqa
(required_secret in secret_targets_context["steps_configuration"])
):
secret_context = {
**secret_context,
**secret_targets_context["steps_configuration"][required_secret]
}
step_object = self._steps_factory.create_object(
step_name,
logger=self.logger,
rendering_context=self.rendering_context,
step_context=step_context,
secret_context=secret_context,
rendering_environment=rendering_environment,
)
if dry_run:
self.logger.info("initializing dry run for step: '{}'".format(step_name))
self.logger.debug("checking for decision for flow skipping")
self._check_for_flow_execution_skipping(step_object, positive_votes_for_flow_execution_skipping)
else:
self.logger.info("initializing active run for step: '{}'".format(step_name))
stat_entry = StepExecutionStatisticEntry(
step_name=step_name,
step_human_readable_name=step_human_readable_name,
execution_start_timestamp=datetime.datetime.utcnow()
)
additional_variables = step_object.run(stat_entry, dry_run=dry_run)
stat_entry.execution_end_timestamp = datetime.datetime.utcnow()
statistics_list.append(stat_entry)
self.logger.debug("additional_variables: {}".format(additional_variables))
self.rendering_context.previous_steps_values.append(additional_variables)
if dry_run and positive_votes_for_flow_execution_skipping:
self.logger.info(
"Flow execution can be SKIPPED.\n\tThese steps voted to skip flow execution: {}".format(
positive_votes_for_flow_execution_skipping
)
)
self._skip_flow_execution_voting_result = True
def _check_for_flow_execution_skipping(self, step_object, positive_votes_for_flow_execution_skipping):
step_name = step_object.step_name()
if self._skip_flow_execution_voting_result is not None:
self.logger.debug(
"decision for flow execution skipping already made. Can skip flow execution: {}".format(
self._skip_flow_execution_voting_result
)
)
return
vote = step_object.vote_for_flow_execution_skipping()
if vote is None:
self.logger.debug("step '{}' do not want to vote for flow execution skipping".format(step_name))
return
self.logger.debug("step '{}' voted for flow execution skipping".format(step_name))
if not vote:
self._skip_flow_execution_voting_result = False
self.logger.debug("step '{}' voted against flow execution skipping".format(step_name))
else:
positive_votes_for_flow_execution_skipping.append(step_name)
self.logger.debug("step '{}' voted for flow execution skipping".format(step_name))
def _get_target_name(self, args):
if args.target:
return args.target
assert self.rendering_context.secrets_context
return self.rendering_context.secrets_context["defaults"]["target"]
def _get_flow_name(self, args):
if args.flow:
return args.flow
assert self.rendering_context.secrets_context
assert self.rendering_context.target_name
targets_context = self.rendering_context.secrets_context["targets"][
self.rendering_context.target_name
]
return targets_context["flow_type"]
def _override_config_parameters_with_secrets(self):
if "parameters" in self.secrets_context:
override_params = self.secrets_context["parameters"]
for key, value in override_params.items():
self.config_context["parameters"][key] = value
secret_targets_context = self.secrets_context["targets"][self.target_name]
if "config_patch" in secret_targets_context:
config_patch = secret_targets_context["config_patch"]
patched_steps_count = 0
for step_patch in config_patch["steps"]:
step_patch_data = copy.deepcopy(step_patch)
self.logger.debug("step_patch_data: {}".format(step_patch_data))
del step_patch_data["name"]
patched_steps_count += self._patch_step_in_flow(step_patch["name"], step_patch_data)
if patched_steps_count:
flow_data = self.rendering_context.config_context["flows"][self.flow_name]
self.logger.debug("flow_data:\n{}".format(flow_data))
def _patch_step_in_flow(self, step_name, step_patch_data):
flow_data = self.rendering_context.config_context["flows"][self.flow_name]
flow_steps = flow_data["steps"]
patched_steps_count = 0
flow_steps_count = len(flow_steps)
for index in range(flow_steps_count):
flow_step = flow_steps[index]
if flow_step["name"] != step_name:
continue
self.logger.warning(
"patching step '{}' for flow '{}' with:\n{}".format(
step_name,
self.flow_name,
step_patch_data
)
)
flow_steps[index] = {**flow_step, **step_patch_data}
patched_steps_count += 1
return patched_steps_count
def _get_temporary_folder(self, args):
if args.temporary_folder:
return args.temporary_folder
temporary_folder = self.rendering_context.secrets_context["defaults"]["temporary_folder"]
if temporary_folder:
return temporary_folder
return self.rendering_context.config_context["defaults"]["temporary_folder"]
def _need_skip_voting(self):
if not self.skip_voting_enabled:
return False
if (self._skip_flow_execution_voting_result is not None) and self._skip_flow_execution_voting_result:
return True
return False
def _create_folder_name_for_execution(self):
res = "{}_{}".format(self._backup_start_timestamp.isoformat(), str(uuid.uuid4()))
res = res.replace(":", "")
res = res.replace("-", "")
return res
@staticmethod
def _load_yaml_file(file_name, codec="utf-8"):
with codecs.open(file_name, "r", codec) as input_file:
return safe_load(input_file.read())
@staticmethod
def _get_secrets_file_name(args):
secrets_file_name = args.secrets
if not secrets_file_name:
return None
secrets_file_name = os.path.abspath(secrets_file_name)
secrets_file_name = os.path.normpath(secrets_file_name)
return secrets_file_name
def _init_basic_values(self):
res = self._get_additional_rendering_variables()
res["main_target_name"] = self.rendering_context.target_name
res["week_day_short_name"] = self._backup_start_timestamp.strftime("%a")
res["week_number"] = self._backup_start_timestamp.strftime("%U")
res["month_short_name"] = self._backup_start_timestamp.strftime("%b")
res["month_two_digit_number"] = self._backup_start_timestamp.strftime("%m")
res["backup_start_timestamp"] = self._backup_start_timestamp
res["flow_name"] = self.flow_name
res["yabtool_exec_folder"] = self.rendering_context.temporary_folder
res["current_year"] = self._backup_start_timestamp.strftime("%Y")
res["current_month"] = res["month_two_digit_number"]
res["current_day_of_month"] = self._backup_start_timestamp.strftime("%d")
res["current_date"] = self._backup_start_timestamp.strftime("%Y-%m-%d")
res["current_time"] = self._backup_start_timestamp.strftime("%H%M%S")
res["lower"] = str.lower
res["upper"] = str.upper
return res
def _get_additional_rendering_variables(self):
targets_context = self.rendering_context.secrets_context["targets"][self.target_name]
res = targets_context.get("additional_variables", {})
self.logger.info("additional variables: {}".format(res))
return res
@property
def config_context(self):
return self.rendering_context.config_context
@property
def secrets_context(self):
return self.rendering_context.secrets_context
@property
def flow_name(self):
return self.rendering_context.flow_name
@property
def target_name(self):
return self.rendering_context.target_name
@property
def backup_start_timestamp(self):
return self._backup_start_timestamp
```
|
{
"source": "jffifa/algo-solution",
"score": 3
}
|
#### File: algo-solution/ural/1013.py
```python
N = int(input())
K = int(input())
M = int(input())
def mul(m1, m2):
m = [[0,0],[0,0]]
for i in range(0,2):
for j in range(0,2):
for k in range(0,2):
m[i][j] = m[i][j]+m1[i][k]*m2[k][j]
m[i][j] = m[i][j] % M
return m
def mpow(m, p):
res = [[1,0],[0,1]]
while p > 0:
if p & 1:
res = mul(res, m)
m = mul(m, m)
p = p >> 1
return res
m = [[K-1,K-1],[1,0]]
r = mpow(m, N)
print(r[0][0])
```
|
{
"source": "jffifa/kotoribuy",
"score": 2
}
|
#### File: kotoribuy/expo/views.py
```python
from django.shortcuts import render
from expo.models import Tag, Booth
from django.core.paginator import Paginator
# Create your views here.
def index(request):
return tag_filter(request)
def booth_detail(request, booth_id=None):
context = {}
booth = Booth.objects.get(pk=booth_id)
booth_tags = booth.tag_set.all()
context['booth'] = booth
context['booth_tag'] = booth_tags
return render(request, 'expo/booth_detail.html', context)
def tag_filter(request, tag_id=None):
tag_query = None
if tag_id:
tag_id = int(tag_id)
elif request.method == 'POST' and 'tag_query' in request.POST:
tag_query = request.POST['tag_query']
context = {}
if tag_query:
tags = Tag.objects.filter(name__icontains=tag_query)
context['tag_query'] = tag_query
elif tag_id:
tags = Tag.objects.filter(id=tag_id)
context['tag_query'] = tags[0].name
else:
tags = Tag.objects.all()
tags = tags.order_by('name')
context['tags'] = tags
if tag_query or tag_id:
booth_map = {}
for tag in tags:
for b in tag.booth_set.all():
if b.id not in booth_map:
booth_map[b.id] = b
booths = booth_map.values()
else:
booths = Booth.objects.all()
context['booths'] = booths
return render(request, 'expo/tag_booth_list.html', context)
```
|
{
"source": "jffifa/kyotogang-toolset",
"score": 3
}
|
#### File: kyotogang-toolset/kotori/session.py
```python
import cookielib
import urllib
import urllib2
import xmlparser
import time
from gconf import GConf as gconf
class Session(object):
"""stage1st session
"""
def __init__(self, username, password):
self.username = username
self.password = password
self.loginUrl = gconf.LOGIN_URL
self.keepConnUrl = gconf.KEEP_CONN_URL
self.httpHeader = gconf.LOGIN_HEADER
self.cookie = cookielib.CookieJar()
self.stream = urllib2.build_opener(urllib2.HTTPCookieProcessor(self.cookie))
self.stream.addheaders = self.httpHeader.items()
self.status = gconf.SESSION_STATUS_INIT
self.lastRes = None # last time connect response
self.mutex = False
def get_cookie(self):
return self.cookie # cookielib is thread safe itself
# thread safe urlopen?
def open(self, url, data=None):
while self.mutex:
sleep(0.1)
self.mutex = True
while True:
try:
res = self.stream.open(url)
break
except:
pass
s = res.read()
self.mutex = False
return s
def login(self):
postData = {
'fastloginfield':'username',
'username':self.username,
'password':self.password,
'quickforward':'yes',
'handlekey':'ls',
}
encPostData = urllib.urlencode(postData)
while self.mutex:
sleep(0.1)
self.mutex = True
try:
self.lastRes = self.stream.open(self.loginUrl, encPostData)
resStr = self.lastRes.read()
except urllib2.URLError as e:
if gconf.DEBUG:
print e.reason
except Exception as e:
if gconf.DEBUG:
print e
if (self.lastRes is not None) and (xmlparser.verify_login(resStr)):
self.status = gconf.SESSION_STATUS_LOGIN
else:
self.status = gconf.SESSION_STATUS_LOGOUT
self.mutex = False
if gconf.DEBUG:
print resStr
def keep_connect(self):
if gconf.DEBUG:
print self.username, self.status
while self.mutex:
sleep(0.1)
self.mutex = True
try:
if self.status == gconf.SESSION_STATUS_LOGIN:
self.lastRes = self.stream.open(self.keepConnUrl)
except urllib2.URLError as e:
#self.status = gconf.SESSION_STATUS_LOGOUT
if gconf.DEBUG:
print e.reason
except Exception as e:
if gconf.DEBUG:
print e
self.mutex = False
if gconf.DEBUG:
for item in self.cookie:
print item.name, item.value
print self.lastRes.read()
def logout(self):
pass
# test case
if __name__ == '__main__':
s = Session('jffifa', '123456')
s.login()
import time
time.sleep(5)
s.keep_connect()
```
|
{
"source": "jffifa/trpg",
"score": 2
}
|
#### File: trpg/registration/utility.py
```python
from django.conf import settings
def decrypt(text):
half_len = len(text)//2
return text[half_len:]
```
#### File: trpg/views/auth.py
```python
from .base import JSONView
from django.contrib.auth import authenticate, login, logout
from django.urls import reverse
from ..registration.utility import decrypt as password_decrypt
class LoginView(JSONView):
def get_context_data(self, **kwargs):
username = self.request.POST['username']
password = self.request.POST['password']
password = password_decrypt(password)
user = authenticate(self.request, username=username, password=password)
if user is not None:
login(self.request, user)
return {'succ': True}
else:
return {'succ': False}
class LogoutView(JSONView):
def get_context_data(self, **kwargs):
logout(self.request)
return {
'succ': True,
'redirect': reverse('hall'),
}
```
#### File: trpg/views/base.py
```python
from django.views.generic.base import ContextMixin, View
from django.http import JsonResponse
class JSONView(ContextMixin, View):
def get_json_object(self, context):
"""
Returns an object that will be serialized as JSON by json.dumps().
"""
# Note: This is *EXTREMELY* naive; in reality, you'll need
# to do much more complex handling to ensure that arbitrary
# objects -- such as Django model instances or querysets
# -- can be serialized as JSON.
return context
def render_to_response(self, context, **response_kwargs):
return JsonResponse(self.get_json_object(context), **response_kwargs)
def get(self, request, *args, **kwargs):
context = self.get_context_data(**kwargs)
return self.render_to_response(context)
def post(self, request, *args, **kwargs):
return self.get(request, *args, **kwargs)
```
|
{
"source": "jfftck/Sqeezz_v4",
"score": 2
}
|
#### File: sqeezz/libs/modules.py
```python
from importlib import import_module
from importlib.util import find_spec, module_from_spec, spec_from_file_location
from os import path
from sys import modules
from ..sqeezzutils import LazyLoad
_base_package = modules['__main__']
def lazy_module(mod: str):
def __inner():
return module(mod)
return LazyLoad(__inner)
def module(mod: str):
global _base_package
if find_spec(mod) is not None:
return import_module(mod)
root = path.dirname(path.abspath(_base_package.__file__))
spec = spec_from_file_location(mod, path.join(root, '{}.py'.format(path.join(*mod.split('.')))))
module = module_from_spec(spec)
modules[mod] = module
spec.loader.exec_module(module)
return module
```
|
{
"source": "jf-galtenco/Adafruit_Python_PCA9685",
"score": 2
}
|
#### File: Adafruit_Python_PCA9685/examples/v2Int.py
```python
def v2Int(v,vmax,Imax):
from math import floor
#transforem la consigne voltage en consigne pwm entre 0 et 4095
# attention ce nest pas extremement precis mais ca suffit dans une boucle de controle relative
return floor(Imax*v/vmax)
```
|
{
"source": "Jfghanimah/fpga-miner",
"score": 2
}
|
#### File: Jfghanimah/fpga-miner/sha.py
```python
import timeit
W = 32 #Number of bits in word
M = 1 << W
FF = M - 1 #0xFFFFFFFF (for performing addition mod 2**32)
#Constants from SHA256 definition
K_t = (0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5,
0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5,
0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3,
0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174,
0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc,
0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da,
0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7,
0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967,
0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13,
0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85,
0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3,
0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070,
0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5,
0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3,
0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208,
0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2)
#Initial values for compression func
H_t = (0x6a09e667, 0xbb67ae85, 0x3c6ef372, 0xa54ff53a,
0x510e527f, 0x9b05688c, 0x1f83d9ab, 0x5be0cd19)
#Block Padding
padding = (
0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00)
# 32-bit bitwise rotate right
def RR(x, b):
return ((x >> b) | (x << (W - b))) & FF
# Pads a message and converts to byte array
def Pad(W):
mdi = len(W) % 64
L = (len(W) << 3).to_bytes(8, 'big') #Binary of len(W) in bits
npad = 55 - mdi if mdi < 56 else 119 - mdi #Pad so 64 | len; add 1 block if needed
return bytes(W, 'ascii') + b'\x80' + (b'\x00' * npad) + L #64 | 1 + npad + 8 + len(W)
# Compression Function
def Sha256CF(Wt, Kt, A, B, C, D, E, F, G, H):
Ch = (E & F) ^ (~E & G)
Ma = (A & B) ^ (A & C) ^ (B & C) #Major
S0 = RR(A, 2) ^ RR(A, 13) ^ RR(A, 22) #Sigma_0
S1 = RR(E, 6) ^ RR(E, 11) ^ RR(E, 25) #Sigma_1
T1 = H + S1 + Ch + Wt + Kt
return (T1 + S0 + Ma) & FF, A, B, C, (D + T1) & FF, E, F, G
def Sha256(M):
'''
Performs SHA256 on an input string
M: The string to process
return: A 32 byte array of the binary digest
'''
M = Pad(M) #Pad message so that length is divisible by 64
DG = list(H_t) #Digest as 8 32-bit words (A-H)
for j in range(0, len(M), 64): #Iterate over message in chunks of 64
S = M[j:j + 64] #Current chunk
W = [0] * 64
W[0:16] = [int.from_bytes(S[i:i + 4], 'big') for i in range(0, 64, 4)]
for i in range(16, 64):
s0 = RR(W[i - 15], 7) ^ RR(W[i - 15], 18) ^ (W[i - 15] >> 3)
s1 = RR(W[i - 2], 17) ^ RR(W[i - 2], 19) ^ (W[i - 2] >> 10)
W[i] = (W[i - 16] + s0 + W[i-7] + s1) & FF
A, B, C, D, E, F, G, H = DG #State of the compression function
for i in range(64):
A, B, C, D, E, F, G, H = Sha256CF(W[i], K_t[i], A, B, C, D, E, F, G, H)
DG = [(X + Y) & FF for X, Y in zip(DG, (A, B, C, D, E, F, G, H))]
return b''.join(Di.to_bytes(4, 'big') for Di in DG) #Convert to byte array
if __name__ == "__main__":
print('\n'*10)
print("Running Benchmark for software\n")
time = timeit.timeit("Sha256('Bitcoin Miner!')", number=10000, globals=globals())
print(f'Python Software Encryption Speed: {10000/time} H/s\n')
while(1):
msg = input("Enter msg:")
bd = Sha256(msg)
print(''.join('{:02x}'.format(i) for i in bd))
```
|
{
"source": "Jfghanimah/g3-bot",
"score": 3
}
|
#### File: g3-bot/app/pump_trader.py
```python
import os
import json
import requests
import math
from dotenv import load_dotenv
load_dotenv()
TD_ACCOUNT_NUMBER = os.getenv('TD_ACCOUNT_NUMBER')
TD_CONSUMER_KEY = os.getenv('TD_CONSUMER_KEY')
TD_REFRESH_TOKEN = os.getenv('TD_REFRESH_TOKEN')
URL_ENCODED_REDIRECT_URI = "https%3A%2F%2Flocalhost%3A8080%2Fcallback"
CAPITAL_ALLOCATION_RATE = 0.20
ROLLING_STOP_LOSS = 0.05
#This only really needs to be called like every 30 minutes but it doesnt blow up if i get a new one for every time sooooo.......
def get_access_token():
endpoint = 'https://api.tdameritrade.com/v1/oauth2/token'
headers = {
'Content-Type': 'application/x-www-form-urlencoded',
}
data = {
'grant_type': 'refresh_token',
'refresh_token': TD_REFRESH_TOKEN,
'access_type': '',
'code': '',
'client_id': TD_CONSUMER_KEY,
'redirect_uri': ''
}
response = requests.post(endpoint, headers=headers, data=data)
content = json.loads(response.content)
access_token = content["access_token"]
return access_token
def get_stock_data(ticker, access_token=None):
if access_token == None:
endpoint = f'https://api.tdameritrade.com/v1/marketdata/{ticker}/quotes?apikey={TD_CONSUMER_KEY}'
response = requests.get(url=endpoint)
content = json.loads(response.content)
stock_data = content[ticker]
else:
endpoint = f'https://api.tdameritrade.com/v1/marketdata/{ticker}/quotes'
headers = {
'Authorization':f'Bearer {access_token}'
}
response = requests.get(url=endpoint, headers=headers)
content = json.loads(response.content)
stock_data = content[ticker]
return stock_data
def get_accounts(access_token):
endpoint = "https://api.tdameritrade.com/v1/accounts"
headers = {"authorization": f"Bearer {access_token}"}
response = requests.get(url=endpoint, headers=headers)
content = json.loads(response.content)
return content
def send_order(ticker, amount, access_token):
endpoint = f'https://api.tdameritrade.com/v1/accounts/{TD_ACCOUNT_NUMBER}/orders'
headers = {"Authorization": f"Bearer {access_token}",
"Content-Type": "application/json"
}
data = {
"complexOrderStrategyType": "NONE",
"orderType": "TRAILING_STOP",
"session": "NORMAL",
"stopPriceLinkBasis": "BID",
"stopPriceLinkType": "PERCENT",
"stopPriceOffset": int(100*ROLLING_STOP_LOSS),
"duration": "DAY",
"orderStrategyType": "SINGLE",
"orderLegCollection": [
{
"instruction": "BUY",
"quantity": amount,
"instrument": {
"symbol": ticker,
"assetType": "EQUITY"
}
}
]
}
data = json.dumps(data)
response = requests.post(url=endpoint, headers=headers, data=data)
content = json.loads(response.content)
print(content)
return content
def set_position(ticker):
token = get_access_token()
ask_price = get_stock_data(ticker, token)["askPrice"]
#accounts = get_accounts(token)
#available_cash = accounts[0]["securitiesAccount"]["currentBalances"]["cashAvailableForTrading"]
log = {}
with open('performance.json', newline='\n') as f:
log = json.load(f)
available_cash = log[-1]["AVAILABLE CASH"]-log[-1]["COST"]
trade_allocation = available_cash * CAPITAL_ALLOCATION_RATE
total_shares = math.floor(trade_allocation/ask_price)
print("------------------")
details = {
"TICKER": ticker,
"ASK PRICE": ask_price,
"AVAILABLE CASH": available_cash,
"ALLOCATION RATE": CAPITAL_ALLOCATION_RATE,
"TRADE ALLOCATION": trade_allocation,
"BUYING": total_shares,
"COST": total_shares*ask_price
}
for key, value in details.items():
print(f"{key}: {value}")
log.append(details)
with open('performance.json', 'w', newline='\n') as f:
json.dump(log, f)
```
|
{
"source": "Jfghanimah/josephghanimah.com",
"score": 2
}
|
#### File: josephghanimah.com/app/__init__.py
```python
import os
from flask import Flask
from flask_mail import Mail
from decouple import config
app = Flask(__name__)
mail = Mail()
class Config:
SECRET_KEY = config('SECRET_KEY')
GOOGLE_RECAPTCHA_SECRET_KEY = config('GOOGLE_RECAPTCHA_SECRET_KEY')
# Email configuration
MAIL_SERVER = 'smtp.gmail.com'
MAIL_PORT = 587
MAIL_USE_TLS = True
MAIL_USERNAME = config("MAIL_USERNAME")
MAIL_PASSWORD = config("<PASSWORD>")
def create_app(config_class=Config):
app = Flask(__name__)
app.config.from_object(Config)
mail.init_app(app)
from app.main.routes import main
from app.errors.handlers import errors
app.register_blueprint(main)
app.register_blueprint(errors)
return app
```
#### File: app/main/routes.py
```python
import urllib
import json
from datetime import datetime
from flask import (Blueprint, render_template, redirect, flash ,
url_for, send_from_directory, request, current_app)
from flask_mail import Message
from app.main.forms import ContactForm
from app import mail
main = Blueprint('main', __name__)
# this STS is to for HTTPS connections
@main.after_request
def apply_caching(response):
response.headers["Strict-Transport-Security"] = 'max-age=63072000; includeSubDomains; preload'
response.headers
return response
@main.route('/robots.txt')
@main.route('/sitemap.xml')
@main.route('/favicon.ico')
def static_from_root():
return send_from_directory(current_app.static_folder, request.path[1:])
@main.route("/index")
@main.route("/home")
@main.route("/main")
def redirect_home():
return redirect(url_for('main.home'))
@main.route("/", methods=['GET', 'POST'])
def home():
form = ContactForm()
if form.validate_on_submit():
if verify_reCAPTCHA():
form_answer = request.form['question']
correct_answer = datetime.today().day
if not (form_answer == str(correct_answer)):
print(form_answer, correct_answer)
flash("Your response to the math question is wrong!", "red")
else:
name = form.name.data
email = form.email.data
subject = form.subject.data
body = form.message.data
send_email(name=name, subject=subject, email=email, body=body, answer=form_answer)
flash("Your message has been sent!", "green")
return redirect(url_for("main.home")) #This resets the page entirely
else:
flash("Invalid reCAPTCHA. Please try again.", "red")
if form.email.errors:
flash(f"There was an error with your information: {', '.join(form.email.errors)}", "red")
return render_template("home.html", form=form)
@main.route("/video")
def video():
return render_template("video.html", video_name="WHOS-NEXT.mp4")
@main.route("/github")
def redirect_github():
return redirect("https://github.com/Jfghanimah")
@main.route("/linkedin")
def redirect_linkedin():
return redirect("https://www.linkedin.com/in/joseph-ghanimah/")
def send_email(name, email, subject, body, answer):
msg = Message(subject, sender=("Josephghanimah.com","<EMAIL>"))
msg.recipients=["<EMAIL>"]
msg.body = f"Name: {name}\nEmail: {email}\nMessage: {body}\nAnswer: {answer}"
mail.send(msg)
# Returns True or False depending on the google recaptcha api call
def verify_reCAPTCHA():
recaptcha_response = request.form.get('g-recaptcha-response')
url = 'https://www.google.com/recaptcha/api/siteverify'
values = {
'secret': current_app.config['GOOGLE_RECAPTCHA_SECRET_KEY'],
'response': recaptcha_response
}
data = urllib.parse.urlencode(values).encode()
req = urllib.request.Request(url, data=data)
response = urllib.request.urlopen(req)
result = json.loads(response.read().decode())
return result['success']
```
|
{
"source": "jfgonzalez99/bigDATA",
"score": 3
}
|
#### File: bigDATA/bigDATA/matrix.py
```python
from numpy import array, diag, mean, shape, sign, zeros
from numpy.linalg import det, eig, inv, norm
from numpy.linalg import solve as npsolve
from numpy.random import rand
from scipy.linalg import lu, svd
import matplotlib.pyplot as plt
def evectors(matrix):
""" Returns the eigenvectors of a given matrix.
Args
---
`matrix : np.array` A numpy matrix
Returns
---
`vecs : np.array` A numpy matrix containing the eigenvectors
"""
vals_vecs = eig(matrix)
return vals_vecs[1]
def evalues(matrix):
""" Returns the eigenvalues of a given matrix.
Args
---
`matrix : np.array` A numpy matrix
Returns
---
`vals : np.array` A numpy vector containing the eigenvalues
"""
vals_vecs = eig(matrix)
return vals_vecs[0]
def inverse(matrix):
""" Returns the inverse of a given matrix.
Args
---
`matrix : np.array` A numpy matrix
Returns
---
`inv : np.array` The inverse matrix
"""
invMatrix = inv(matrix)
return invMatrix
def covarianceMatrix(A, B):
""" Returns the covariance matrix of two given matrices `A` and `B`.
Args
---
`A : np.array` A `m` x `n` numpy matrix
`B : np.array` A `m` x `n` numpy matrix
Returns
---
`cMatrix : np.array` The covariance matrix
"""
N = len(A[0])
C = (A @ B.T) / N
return C
def SVDecomp(matrix):
""" Performs a singlular value decomposition a given matrix.
Args
---
`matrix : np.array` An `m` x `n` numpy matrix
Returns
---
`U : np.array` An `m` x `m` orthonormal matrix whose columns are the
eigenvectors of `matrix @ matrix.T`
`S : np.array` An `m` x `n` matrix with the singular values (square roots
of the non-zero eigenvalues of `matrix @ matrix.T`) along diagonal
`V : np.array` An `n` x `n` orthonormal matrix whose columns are the
eigenvectors of `matrix.T @ matrix`
"""
U, S, V = svd(matrix)
S = diag(S)
V = V.T
return U, S, V
def LUDecomp(matrix):
""" Performs an LU decomposition a given matrix.
Args
---
`matrix : np.array` A numpy matrix
Returns
---
`P : np.array` The permutation matrix
`L : np.array` Lower triangular matrix
`U : np.array` Upper triangular matrix
"""
P, L, U = lu(matrix)
P = P.T
return P, L, U
def polarDecomp(matrix):
""" Performs a polar decomposition on a given matrix and breaks down the matrix into its rotating and stretching components.
Args
---
`matrix : np.array` A numpy matrix
Returns
---
`rotate : np.array` The rotation matrix
`stretch : np.array` The stretch matrix
"""
U,S,V = SVDecomp(matrix)
rotate = [email protected]
stretch = U@[email protected]
return rotate, stretch
def random(height, width):
""" Returns a random matrix of a given size.
Args
---
`height : int` The height of the returned matrix
`width : int` The width of the returned matrix
Returns
---
`randomMatrix : np.array` A random matrix of the desired height and width
"""
randomMatrix = rand(height, width)
return randomMatrix
def solve(A, b):
""" Solve for `x` a system of linear equations in the form of `Ax=b`.
Args
---
`A : np.array` The left hand matrix
`b : np.array` The right hand vector
Returns
---
`x : np.array` The solution vector
"""
x = npsolve(A,b)
return x
def solveMany(A, B):
""" Solves for many `x`s in a system of linear equations in the form of `Ax=b` where multiple `b`'s are given. Uses LU decomposition to bring time down from `O(n^3/3)` to `O(n^2)`.
Args
---
`A : np.array` The left hand matrix
`B : np.array` A matrix whose columns are composed of all the right hand vectors
Returns
---
`X : np.array` A matrix whose columns are composed of all the solution vectors that correspond with their respective column in `B`
"""
P,L,U = LUDecomp(A)
N = len(B[0])
X = zeros(shape(B))
for i in range(N):
c = solve(L, P @ B[:,i])
x_i = solve(U, c)
X[:,i] = x_i
return X
def perturb(A, b, delta_b):
""" Perturbs the system `Ax=b` by `delta_b`.
Args
---
`A : np.array` The left hand matrix
`b : np.array` The right hand vector
`delta_b : np.array` The perturbing vector
Returns
---
`relError : float` The relative error to the solution caused by `delta_b`
`relPerturbation : float` The relative perturbation of `b` caused by `delta_b`
"""
x1 = solve(A, b)
x2 = solve(A, b + delta_b)
delta_x = x2 - x1
relError = norm(delta_x)/norm(x1)
relPerturbation = norm(delta_b)/norm(b)
return relError, relPerturbation
def optimalFit(X, Y, plot=False):
""" Given two sets of points, finds the optimal shift and rotation to fit the points in matrix `X` onto `Y`.
Args
---
`X : np.array` An `m` x `n` matrix that represents the set of points to be shifted and rotated
`Y : np.array` An `m` x `n` matrix that represents the desired set of points to be shifted onto
`plot : bool` If set to true and data is 2-dimensional will plot the points and ideal transformation
Returns
---
`X_Translation : np.array` An `m` x `1` vector that is the optimal translation of `X` onto `Y`
`X_Rotation : np.array` An `m` x `m` matrix that is the optimal rotation of `X` onto `Y`
`X_Translated_Rotated : np.array` `X` after the optimal shift and rotation has been applied
"""
M = len(X[:,0])
N = len(X[0])
# Find center of mass of X and Y
X_Center = array([mean(X[i]) for i in range(M)])
Y_Center = array([mean(Y[i]) for i in range(M)])
# Optimal shift of X
X_Translation = Y_Center - X_Center
# Shift X to optimal position
X_Translated = zeros(shape(X))
for i in range(N):
X_Translated[:,i] = X[:,i] + X_Translation
# Find optimal rotation of X
C = Y @ X_Translated.T
SVD = SVDecomp(C)
U,V = SVD[0], SVD[2]
X_Rotation = U @ V.T
# Rotate X to optimal position
X_Translated_Rotated = zeros(shape(X))
for i in range(N):
X_Translated_Rotated[:,i] = X_Rotation @ X_Translated[:,i]
if plot and (M == 2):
# Plot original points
subplt1 = plt.subplot(1, 2, 1)
hl1, = plt.plot(X[0], X[1], '.', color="red", markersize=7)
hl2, = plt.plot(Y[0], Y[1], '.', color="blue", markersize=7)
plt.legend([hl1, hl2], ['X', 'Y'])
plt.title("Original Points")\
# Plot tranformed points
plt.subplot(1, 2, 2, sharex=subplt1, sharey=subplt1)
hl3, = plt.plot(X_Translated_Rotated[0], X_Translated_Rotated[1], '.',
color="red", markersize=7)
hl4, = plt.plot(Y[0], Y[1], '.', color="blue", markersize=7)
plt.legend([hl3, hl4], ['X_Translated_Rotated', 'Y'])
plt.title("Optimal Transformation")
plt.show()
return X_Translation, X_Rotation, X_Translated_Rotated
```
|
{
"source": "jfgreen-tw/fake-mesh",
"score": 3
}
|
#### File: jfgreen-tw/fake-mesh/mesh_client_test.py
```python
from __future__ import absolute_import, print_function
from unittest import TestCase, main
import random
import signal
import sys
import traceback
from mesh_client import MeshClient, MeshError, default_ssl_opts
def print_stack_frames(signum=None, frame=None):
for frame in sys._current_frames().values():
traceback.print_stack(frame)
print()
signal.signal(signal.SIGUSR1, print_stack_frames)
class TestError(Exception):
pass
class MeshClientTest(TestCase):
uri = 'https://localhost:8829'
def setUp(self):
self.alice_mailbox = str(random.randint(0, 1000000000000))
self.bob_mailbox = str(random.randint(0, 1000000000000))
self.alice = MeshClient(
self.uri,
self.alice_mailbox,
'password',
max_chunk_size=5,
**default_ssl_opts)
self.bob = MeshClient(
self.uri,
self.bob_mailbox,
'password',
max_chunk_size=5,
**default_ssl_opts)
def test_handshake(self):
alice = self.alice
hand_shook = alice.handshake()
self.assertEqual(hand_shook, b"hello")
def test_send_receive(self):
alice = self.alice
bob = self.bob
message_id = alice.send_message(self.bob_mailbox, b"Hello Bob 1")
self.assertEqual([message_id], bob.list_messages())
msg = bob.retrieve_message(message_id)
self.assertEqual(msg.read(), b"Hello Bob 1")
self.assertEqual(msg.sender, self.alice_mailbox)
self.assertEqual(msg.recipient, self.bob_mailbox)
msg.acknowledge()
self.assertEqual([], bob.list_messages())
def test_line_by_line(self):
alice = self.alice
bob = self.bob
message_id = alice.send_message(self.bob_mailbox, b"Hello Bob 1\nHello Bob 2")
self.assertEqual([message_id], bob.list_messages())
msg = bob.retrieve_message(message_id)
self.assertEqual(list(iter(msg)), [b"Hello Bob 1\n", b"Hello Bob 2"])
def test_readline(self):
alice = self.alice
bob = self.bob
message_id = alice.send_message(self.bob_mailbox, b"Hello Bob 1\nHello Bob 2")
self.assertEqual([message_id], bob.list_messages())
msg = bob.retrieve_message(message_id)
self.assertEqual(msg.readline(), b"Hello Bob 1\n")
def test_readlines(self):
alice = self.alice
bob = self.bob
message_id = alice.send_message(self.bob_mailbox, b"Hello Bob 1\nHello Bob 2")
self.assertEqual([message_id], bob.list_messages())
msg = bob.retrieve_message(message_id)
self.assertEqual(msg.readlines(), [b"Hello Bob 1\n", b"Hello Bob 2"])
def test_transparent_compression(self):
alice = self.alice
bob = self.bob
print("Sending")
alice._transparent_compress = True
message_id = alice.send_message(
self.bob_mailbox, b"Hello Bob Compressed")
self.assertEqual([message_id], bob.list_messages())
print("Receiving")
msg = bob.retrieve_message(message_id)
self.assertEqual(msg.read(), b"Hello Bob Compressed")
self.assertEqual(msg.mex_header('from'), self.alice_mailbox)
msg.acknowledge()
self.assertEqual([], bob.list_messages())
def test_iterate_and_context_manager(self):
alice = self.alice
bob = self.bob
alice.send_message(self.bob_mailbox, b"Hello Bob 2")
alice.send_message(self.bob_mailbox, b"Hello Bob 3")
messages_read = 0
for (msg, expected) in zip(bob.iterate_all_messages(),
[b"Hello Bob 2", b"Hello Bob 3"]):
with msg:
self.assertEqual(msg.read(), expected)
messages_read += 1
self.assertEqual(2, messages_read)
self.assertEqual([], bob.list_messages())
def test_context_manager_failure(self):
alice = self.alice
bob = self.bob
message_id = alice.send_message(self.bob_mailbox, b"Hello Bob 4")
try:
with bob.retrieve_message(message_id) as msg:
self.assertEqual(msg.read(), b"Hello Bob 4")
raise TestError()
except TestError:
pass
self.assertEqual([message_id], bob.list_messages())
def test_optional_args(self):
alice = self.alice
bob = self.bob
message_id = alice.send_message(
self.bob_mailbox,
b"Hello Bob 5",
subject="Hello World",
filename="upload.txt",
local_id="12345",
message_type="DATA",
process_id="321",
workflow_id="111",
encrypted=False,
compressed=False)
with bob.retrieve_message(message_id) as msg:
self.assertEqual(msg.subject, "Hello World")
self.assertEqual(msg.filename, "upload.txt")
self.assertEqual(msg.local_id, "12345")
self.assertEqual(msg.message_type, "DATA")
self.assertEqual(msg.process_id, "321")
self.assertEqual(msg.workflow_id, "111")
self.assertFalse(msg.encrypted)
self.assertFalse(msg.compressed)
message_id = alice.send_message(
self.bob_mailbox, b"Hello Bob 5", encrypted=True, compressed=True)
with bob.retrieve_message(message_id) as msg:
self.assertTrue(msg.encrypted)
self.assertTrue(msg.compressed)
if __name__ == "__main__":
main()
```
#### File: src/fake_mesh/wsgi_helpers.py
```python
from __future__ import print_function
import logging
import sys
from wsgiref.headers import Headers
from wsgiref.util import request_uri
class DebugMiddleware(object):
def __init__(self, app):
self._app = app
def __call__(self, environ, start_response):
request_line = environ['REQUEST_METHOD'] + ' ' + request_uri(environ)
print(request_line, file=sys.stderr)
if 'CONTENT_TYPE' in environ:
print('Content-Type:', environ['CONTENT_TYPE'], file=sys.stderr)
for k, v in environ.items():
if k.startswith('HTTP_'):
print(k[5:].lower().replace('_', '-') + ': ' + v,
file=sys.stderr)
if 'wsgi.input' in environ:
body = environ['wsgi.input']
old_body_read = body.read
def read(*args):
result = old_body_read(*args)
print(result.decode('iso-8859-1', 'replace'), file=sys.stderr)
return result
body.read = read
def inner_start_response(status, headers, exc_info=None):
print(file=sys.stderr)
print(status, file=sys.stderr)
print(Headers(headers), file=sys.stderr)
print(file=sys.stderr)
if exc_info is None:
return start_response(status, headers)
else:
return start_response(status, headers, exc_info)
for data in self._app(environ, inner_start_response):
sys.stderr.write(data.decode('iso-8859-1', 'replace'))
yield data
print(file=sys.stderr)
class LoggingMiddleware(object):
def __init__(self, app, logger='wsgi_request'):
self._app = app
self._logger = logging.getLogger(logger)
def __call__(self, environ, start_response):
method = environ['REQUEST_METHOD']
uri = request_uri(environ)
def inner_start_response(status, headers, exc_info=None):
inner_start_response.status = status
if exc_info is None:
return start_response(status, headers)
else:
return start_response(status, headers, exc_info)
result = self._app(environ, inner_start_response)
self._logger.info("%s %s -> %s",
method, uri, inner_start_response.status)
return result
```
|
{
"source": "jfharden/post-timestamp-app-poc",
"score": 2
}
|
#### File: lambdas/dynamo_inserter/lambda_handler.py
```python
import boto3
import os
from dynamo_inserter.dynamo_inserter import DynamoInserter
def lambda_handler(event, context):
boto3_session = boto3.Session()
dynamo_table = os.environ["DYNAMO_TABLE"]
dynamo_inserter = DynamoInserter(boto3_session.client("dynamodb"), dynamo_table)
dynamo_inserter.insert(event, context)
return
```
#### File: sqs_to_s3/sqs_to_s3/sqs_to_s3.py
```python
import hashlib
import json
from base64 import b64encode
from datetime import datetime, timezone
class SQSToS3:
"""Accepts sqs events and writes the messages as files to s3
"""
def __init__(self, s3_client, s3_bucket):
"""Initialise
Args:
s3_client (boto3.S3.Client): Boto3 S3 Client
s3_bucket (string): Name of the s3 bucket to deliver to
"""
self._s3_client = s3_client
self._s3_bucket = s3_bucket
def deliver(self, event, context):
"""Deliver the records contained withtin the event to S3
Args:
event (dict): Event JSON as delivered by SQS
context (LambdaContext): Request Context as provided by SQS
Returns:
dict: The s3 PutObject response
"""
# Ignoring encoding errors since we know we are dealing with a bad message, and that could be
# corrupted encoding
event_bytes = json.dumps(event).encode("utf-8", errors="ignore")
md5_digest = hashlib.md5(event_bytes).digest()
event_md5sum = b64encode(md5_digest).decode("utf-8")
# Not using isoformat time so we get more compatible filenames on s3
s3_key = "{timestamp}_{request_id}.json".format(
timestamp=datetime.now(timezone.utc).strftime("%Y-%m-%dT%H-%M-%SZ"),
request_id=context.aws_request_id,
)
return self._s3_client.put_object(
Bucket=self._s3_bucket,
Key=s3_key,
Body=event_bytes,
ContentMD5=event_md5sum,
)
```
|
{
"source": "jfharden/simple-web-crawler",
"score": 3
}
|
#### File: simple-web-crawler/crawler/crawler.py
```python
import logging
from crawler.site_map import SiteMap
from crawler.links.link import Link
from crawler.pages.page_fetcher import PageFetcher
class Crawler:
"""Crawls the given domain
Attributes:
site_map: The site map of the crawled domain.
"""
site_map = None
def __init__(self, start_domain):
"""Initialiser
Args:
start_domain (string): The domain to start crawling
"""
self._start_link = Link(start_domain, "/")
self.site_map = SiteMap()
self._links_to_visit = set()
def crawl(self):
"""Crawl the domain
"""
logging.info("Fetching: {}".format(self._start_link))
start_page = PageFetcher.get(self._start_link)
self.site_map.add_page(start_page)
self._links_to_visit = self._determine_links_to_visit(start_page)
while len(self._links_to_visit) != 0:
self._crawl_remaining_pages()
def _crawl_remaining_pages(self):
"""After the first page has been crawled, spider out and crawl all out links within our subdomain which haven't
yet been visited
"""
new_links_to_visit = set()
visited_links = set()
for link in self._links_to_visit:
logging.info("Fetching: {}".format(link))
page = PageFetcher.get(link)
visited_links.add(link)
self.site_map.add_page(page)
new_links_to_visit.update(self._determine_links_to_visit(page))
self._links_to_visit.update(new_links_to_visit)
# Remove the now visited links
self._links_to_visit.difference_update(visited_links)
def _determine_links_to_visit(self, page):
"""Get the links we still need to visit from the page.
Args:
page (crawler.pages.page.Page): The page to extract the links from
Returns:
list: List of crawler.links.link.Link, only includes links which are inside the subdomain and still
not yet visited
"""
links_to_visit = set()
for link in page.out_links:
if link.in_crawled_domain() and not self.site_map.link_already_visited(link):
links_to_visit.add(link)
return links_to_visit
```
#### File: crawler/pages/page_fetcher.py
```python
import requests
from crawler.pages.page import Page
class PageFetcher:
"""Gets pages
"""
def get(link):
"""Get the page at the specified link
Args:
link (crawler.links.link.Link): The link to fetch
Returns:
crawler.pages.page: The page
"""
response = requests.get(link.url)
return Page(link, response.text)
```
|
{
"source": "jfhbrook/notion-tools",
"score": 2
}
|
#### File: notion-tools/notion/ctx.py
```python
from notion.client import NotionClient
from notion.settings import Settings
class Context:
def __init__(self):
self.settings = Settings.from_file()
self._client = None
def get_client(self):
if not self._client:
self.settings.validate()
self._client = NotionClient(token_v2=self.settings.token, monitor=False)
return self._client
def update_settings(self, **kwargs):
self.settings = self.settings.update(**kwargs)
```
|
{
"source": "jfhilliard/TestProject",
"score": 3
}
|
#### File: Sandbox/test/test_ffts.py
```python
import unittest
import numpy as np
from numpy.testing import assert_allclose
from Sandbox.utils.ffts import ffts, iffts, ffts2, iffts2
class TestFFTUtils(unittest.TestCase):
def test_ffts_odd(self):
"""Test FFTS with odd length"""
x = [1, 1, 1]
y = ffts(x)
expected_y = [0, 3, 0]
assert_allclose(y, expected_y)
def test_ffts_even(self):
"""Test FFTS with even length"""
x = [1, 1, 1, 1]
y = ffts(x)
expected_y = [0, 0, 4, 0]
assert_allclose(y, expected_y)
def test_iffts_odd(self):
"""Test IFFTS with odd length"""
x = [1, 1, 1]
y = iffts(x)
expected_y = [0, 1, 0]
assert_allclose(y, expected_y)
def test_iffts_even(self):
"""Test IFFTS with even length"""
x = [1, 1, 1, 1]
y = iffts(x)
expected_y = [0, 0, 1, 0]
assert_allclose(y, expected_y)
def test_1d_inverses(self):
"""Test that FFTS and IFFTS are inverses of each other"""
def check_inverses(x):
y = ffts(x)
z = iffts(y)
assert_allclose(x, z)
y = iffts(x)
z = ffts(y)
assert_allclose(x, z)
x = [1, 1, 1, 1]
check_inverses(x)
x = [1, 1, 1]
check_inverses(x)
x = [1, 2, 3]
check_inverses(x)
x = [1, 1, 2, 3]
check_inverses(x)
def test_ffts2_odd_odd(self):
"""Test FFTS2 with odd x odd input"""
x = np.ones((3, 3))
y = ffts2(x)
expected_y = [[0, 0, 0],
[0, 9, 0],
[0, 0, 0]]
assert_allclose(y, expected_y)
def test_ffts2_odd_even(self):
"""Test FFTS2 with odd x even input"""
x = np.ones((3, 4))
y = ffts2(x)
expected_y = [[0, 0, 0, 0],
[0, 0, 12, 0],
[0, 0, 0, 0]]
assert_allclose(y, expected_y)
y = ffts2(x.T)
assert_allclose(y.T, expected_y)
def test_ffts2_even_even(self):
"""Test FFTS2 with even x even input"""
x = np.ones((4, 4))
y = ffts2(x)
expected_y = [[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 16, 0],
[0, 0, 0, 0]]
assert_allclose(y, expected_y)
def test_iffts2_odd_odd(self):
"""Test IFFTS2 with odd x odd input"""
x = np.ones((3, 3))
y = iffts2(x)
expected_y = [[0, 0, 0],
[0, 1, 0],
[0, 0, 0]]
assert_allclose(y, expected_y)
def test_iffts2_odd_even(self):
"""Test IFFTS2 with even x even input"""
x = np.ones((3, 4))
y = iffts2(x)
expected_y = [[0, 0, 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 0]]
assert_allclose(y, expected_y)
y = iffts2(x.T)
assert_allclose(y.T, expected_y)
def test_iffts2_even_even(self):
"""Test IFFTS2 with even x even input"""
x = np.ones((4, 4))
y = iffts2(x)
expected_y = [[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 0]]
assert_allclose(y, expected_y)
def test_2d_inverses(self):
"""Test that FFTS and IFFTS are inverses of each other"""
def check_inverses(x):
y = ffts2(x)
z = iffts2(y)
assert_allclose(x, z)
y = iffts2(x)
z = ffts2(y)
assert_allclose(x, z)
x = np.ones((3, 3))
check_inverses(x)
x = np.ones((3, 4))
check_inverses(x)
x = np.ones((4, 4))
check_inverses(x)
if __name__ == '__main__':
unittest.main(verbosity=2)
```
#### File: Sandbox/test/test_playground.py
```python
import unittest
from Sandbox.playground.playground import sum_list
class TestPlayground(unittest.TestCase):
"""Test my playground functions"""
def test_add_list2(self):
"""Test addition of list of 2 elements"""
x = [1, 2]
y = sum_list(x)
expected_value = 3
self.assertEqual(y, expected_value)
def test_add_list3(self):
"""Test addition of list of 3 elements"""
x = [1, 2, 3]
y = sum_list(x)
expected_value = 6
self.assertEqual(y, expected_value)
def test_with_tuple(self):
"""Test that it works with a tuple input"""
x = (1, 2, 3, 4)
y = sum_list(x)
expected_value = 10
self.assertEqual(y, expected_value)
if __name__ == '__main__':
unittest.main(verbosity=2)
```
|
{
"source": "JFHoelscher/cblearn",
"score": 2
}
|
#### File: datasets/tests/test_musician_similarity.py
```python
import numpy as np
import pytest
from cblearn.datasets import fetch_musician_similarity
@pytest.mark.remote_data
def test_fetch_musician_similarity(tmp_path):
data_home = tmp_path / 'cblearn_datasets'
bunch = fetch_musician_similarity(data_home=data_home, shuffle=False)
assert bunch.data.shape == (131_970, 3)
assert bunch.judgement_id.shape == (131_970, )
assert bunch.user.shape == (131_970, )
assert bunch.survey_or_game.shape == (131_970, )
assert bunch.artist_name.shape == (448, )
assert bunch.artist_id.shape == (448, )
assert bunch.artist_name[bunch.data][0, 0] == 'queen'
assert tuple(bunch.artist_id[bunch.data][0]) == (4325, 1735, 3295)
assert tuple(bunch.artist_id[bunch.data][-1]) == (3603, 4913, 4948)
triplets = fetch_musician_similarity(data_home=data_home, shuffle=False, return_triplets=True)
np.testing.assert_equal(bunch.data, triplets)
np.testing.assert_equal(bunch.artist_name[triplets], bunch.artist_name[bunch.data])
shuffle_bunch = fetch_musician_similarity(data_home=data_home, random_state=42)
assert not np.all(shuffle_bunch.data == bunch.data)
assert not np.all(shuffle_bunch.user == bunch.user)
np.testing.assert_equal(shuffle_bunch.user.sort(), bunch.user.sort())
```
#### File: cblearn/datasets/_things_similarity.py
```python
import csv
import io
from pathlib import Path
import logging
import joblib
import os
from os.path import join
from typing import Optional, Union
from urllib.request import urlretrieve
import zipfile
import numpy as np
from sklearn.datasets import _base
from sklearn.utils import check_random_state, Bunch
ARCHIVE = _base.RemoteFileMetadata(
filename='osfstorage-archive.zip',
url='https://files.osf.io/v1/resources/z2784/providers/osfstorage/?zip=',
checksum=('cannot check - zip involves randomness'))
logger = logging.getLogger(__name__)
def fetch_things_similarity(data_home: Optional[os.PathLike] = None, download_if_missing: bool = True,
shuffle: bool = True, random_state: Optional[np.random.RandomState] = None,
return_data: bool = False) -> Union[Bunch, np.ndarray]:
""" Load the things similarity dataset (odd-one-out).
=================== =====================
Trials 146,012
Objects (Things) 1,854
Query 3 images, odd one out
=================== =====================
See :ref:`things_similarity_dataset` for a detailed description.
>>> dataset = fetch_things_similarity(shuffle=True) # doctest: +REMOTE_DATA
>>> dataset.word[[0, -1]].tolist() # doctest: +REMOTE_DATA
['aardvark', 'zucchini']
>>> dataset.data.shape # doctest: +REMOTE_DATA
(146012, 3)
Args:
data_home : optional, default: None
Specify another download and cache folder for the datasets. By default
all scikit-learn data is stored in '~/scikit_learn_data' subfolders.
download_if_missing : optional, default=True
shuffle: default = True
Shuffle the order of triplet constraints.
random_state: optional, default = None
Initialization for shuffle random generator
return_triplets : boolean, default=False.
If True, returns numpy array instead of a Bunch object.
Returns:
dataset : :class:`~sklearn.utils.Bunch`
Dictionary-like object, with the following attributes.
data : ndarray, shape (n_query, 3)
Each row corresponding a odd-one-out query, entries are object indices.
The first column is the selected odd-one.
word : (n_objects,)
Single word associated with the thing objects.
synset : (n_objects,)
Wordnet Synset associated with the thing objects.
wordnet_id : (n_objects,)
Wordnet Id associated with the thing objects.
thing_id : (n_objects,)
Unique Id string associated with the thing objects.
DESCR : string
Description of the dataset.
data : numpy arrays (n_query, 3)
Only present when `return_data=True`.
Raises:
IOError: If the data is not locally available, but download_if_missing=False
"""
data_home = Path(_base.get_data_home(data_home=data_home))
if not data_home.exists():
data_home.mkdir()
filepath = Path(_base._pkl_filepath(data_home, 'things_similarity.pkz'))
if not filepath.exists():
if not download_if_missing:
raise IOError("Data not found and `download_if_missing` is False")
logger.info('Downloading imagenet similarity data from {} to {}'.format(ARCHIVE.url, data_home))
archive_path = (ARCHIVE.filename if data_home is None
else join(data_home, ARCHIVE.filename))
urlretrieve(ARCHIVE.url, archive_path)
with zipfile.ZipFile(archive_path) as zf:
with zf.open('data/data1854_batch5_test10.txt', 'r') as f:
data = np.loadtxt(f, delimiter=' ')
with zf.open('items1854names.tsv', 'r') as f:
objects = np.array(list(csv.reader(io.TextIOWrapper(f), dialect='excel-tab'))[1:]).T
joblib.dump((data, objects), filepath, compress=6)
os.remove(archive_path)
else:
(data, objects) = joblib.load(filepath)
if shuffle:
random_state = check_random_state(random_state)
data = random_state.permutation(data)
if return_data:
return data
module_path = Path(__file__).parent
with module_path.joinpath('descr', 'things_similarity.rst').open() as rst_file:
fdescr = rst_file.read()
return Bunch(data=data,
word=objects[0],
synset=objects[1],
wordnet_id=objects[2],
thing_id=objects[5],
DESCR=fdescr)
```
#### File: cblearn/embedding/_mlds.py
```python
from typing import Union
import numpy as np
from sklearn.base import BaseEstimator
from sklearn.utils import check_random_state
from sklearn.linear_model import LogisticRegression
from scipy.stats import norm
from scipy.optimize import minimize
from cblearn import utils
from cblearn.embedding._base import TripletEmbeddingMixin
class MLDS(BaseEstimator, TripletEmbeddingMixin):
""" A maximum-likelihood difference scaling (MLDS) estimator .
MLDS [1]_ is limited to monotonic, one-dimensional embeddings.
note::
This method assumes, that the objects can be embedded in a one-dimensional space
and that the object indices are consistent to their order in this space.
Attributes:
embedding_: The final embedding, shape (n_objects, 1)
log_likelihood_: The final log-likelihood of the embedding.
n_iter_: Optimization iterations
>>> from cblearn import datasets
>>> true_embedding = sorted(np.random.uniform(1, 2, (15, 1)))
>>> triplets = datasets.make_random_triplets(true_embedding, size=400, monotonic=True, result_format='list-order')
>>> triplets.shape, np.unique(triplets).shape
((400, 3), (15,))
>>> estimator = MLDS(random_state=42).fit(triplets)
>>> estimator.embedding_.shape
(15, 1)
>>> estimator.score(triplets) > 0.9
True
>>> estimator = MLDS(method='optim', random_state=42).fit(triplets)
>>> estimator.score(triplets) > 0.9
True
References
----------
.. [1] <NAME>., & <NAME>. (2012). Modeling Psychophysical Data in R.
Springer New York. https://doi.org/10.1007/978-1-4614-4475-6
"""
def __init__(self, n_components: int = 1, random_state: Union[None, int, np.random.RandomState] = None,
method='glm', verbose: int = 0, max_iter: int = 1000):
"""
Args:
n_components: Embedding dimension for api compatibility. Only 1 is supported for MLDS.
random_state: The seed of the pseudo random number generator used to initialize the optimization.
method: Optimizer method, either 'glm' or 'optim'.
verbose: Enable verbose output.
max_iter: Maximum number of optimization iterations.
"""
if n_components != 1:
raise ValueError(f"MLDS expects n_components=1, got {n_components}")
self.n_components = n_components
self.random_state = random_state
self.method = method
self.verbose = verbose
self.max_iter = max_iter
def _log_likelihood(self, x, quadruplet, answer, float_min=np.finfo(float).tiny):
prob = norm.cdf((x[quadruplet[:, 0]] - x[quadruplet[:, 1]])
- (x[quadruplet[:, 2]] - x[quadruplet[:, 3]]))
log_likelihood = (np.log(np.maximum(prob ** answer, float_min))
+ np.log(np.maximum((1 - prob) ** (1 - answer), float_min)))
return log_likelihood.sum()
def fit(self, X: utils.Query, y: np.ndarray = None) -> 'MLDS':
"""Computes the embedding.
Args:
X: The training input samples, shape (n_samples, 3)
y: Ignored
init: Initial embedding for optimization
Returns:
This estimator
"""
random_state = check_random_state(self.random_state)
n_objects = X.max() + 1
triplets, answer = utils.check_query_response(X, y, result_format='list-boolean')
quads = triplets[:, [1, 0, 0, 2]]
if self.method.lower() == 'glm':
X01, rows = np.zeros((len(quads), n_objects)), np.arange(len(triplets))
X01[rows, quads[:, 0]] += 1
X01[rows, quads[:, 3]] += 1
X01[rows, quads[:, 1]] -= 1
X01[rows, quads[:, 2]] -= 1
glm = LogisticRegression(verbose=self.verbose, max_iter=self.max_iter,
fit_intercept=False, random_state=random_state)
glm.fit(X01, answer.astype(int))
self.embedding_ = glm.coef_.reshape(-1, 1)
self.log_likelihood_ = glm.predict_log_proba(X01)[rows, answer.astype(int)].mean()
self.n_iter_ = glm.n_iter_
elif self.method.lower() == 'optim':
def objective(*args):
return -self._log_likelihood(*args)
init = np.linspace(0, 1, n_objects)
result = minimize(objective, init, args=(quads, answer),
method='L-BFGS-B', options=dict(maxiter=self.max_iter, disp=self.verbose))
if self.verbose and not result.success:
print(f"MLDS's optimization failed with reason: {result.message}.")
self.embedding_ = result.x.reshape(-1, 1)
self.log_likelihood_ = -result.fun
self.n_iter_ = result.nit
else:
raise ValueError(f"Expects optimizer method in {{glm, optim}}, got {self.method}")
self.embedding_ -= self.embedding_.min()
return self
```
|
{
"source": "jfhucka/Configator",
"score": 2
}
|
#### File: splunk_ansible_code-master/backend/change_index.py
```python
lockBitLocation = "lockDirectory"
logFileLocation = "logs"
credential_name = "BreakMainSheet_credentials.json"
SCOPES = 'https://www.googleapis.com/auth/spreadsheets.readonly'
CLIENT_SECRET_FILE = 'client_secret.json'
APPLICATION_NAME = 'Google Sheets API Python Break Main'
debug=True
import argparse
import os
import time
import urllib2
import yaml
import subprocess
import re
import uuid
import glob
import shutil
import httplib2
# sudo pip install --upgrade google-api-python-client
from apiclient import discovery
from apiclient.discovery import build
from oauth2client import client
from oauth2client import tools
from oauth2client.file import Storage
#######
#
# Setting the lock bit prevents the system from running two simulatanous process to update the same
# host at the same time
# Note: This does not prevent against multiple Ansisble servers from accessing the same host. In which
# case a lock bit must be set by the Ansible playbook on the taregt host.
#
#######
def set_lock(target_hostname, cwd):
lockFileName = target_hostname+".lck"
lockDir = cwd+"/"+lockBitLocation
lockFileFullPath = lockDir+"/"+lockFileName
if not os.path.isdir(lockDir):
os.makedirs(lockDir)
if (debug): print "Created lock directory "+lockDir
if os.path.isfile(lockFileFullPath):
if (debug): print "ERROR. Lock file exists. Host is already being updated."
exit(0)
else:
with open(lockFileFullPath, "w") as f:
f.write("")
f.close()
return(lockFileFullPath)
#######
#
# Remove the lock file so that other processes can update the same host
#
#######
def remove_lock(lockFileFullPath):
try:
os.remove(lockFileFullPath)
if (debug): print "Lock file removed"
except:
if (debug): print "ERROR. Not able to remove the lock file: "+lockFileFullPath
exit(0)
def stop_logging(fileHandle):
# Populate the logfile with an closing event ..
timestamp = time.strftime("%Y-%m-%d %H:%M:%S")
fileHandle.write(timestamp+" message=Stopped Logging.\n")
fileHandle.close()
return()
######
#
# Check that another process is not pushing or pulling down configs form the splunk git production repo
# If local bit is already set, then wait until unlocked.
# When unlocked, immediately set the lock bit with a unique guid name. Check that this process got the lock and then move on ....
#
#####
def check_set_repo_lock(cwd):
j=0
while j<10 :
lockFileName_wildcard = "git_repo*.lck"
lockDir = cwd+"/"+lockBitLocation
lockFileFullPath_wildcard = lockDir+"/"+lockFileName_wildcard
id = uuid.uuid1()
lockFileName = "git_repo_"+str(id)+".lck"
lockFileFullPath = lockDir+"/"+lockFileName
i = 0
x = glob.glob(lockFileFullPath_wildcard)
while i<100 and len(x)>0:
time.sleep(5)
x = glob.glob(lockFileFullPath_wildcard)
if (debug): print "Waiting for git_repo.lck bit to be cleared."
i=i+1
if i==100:
if (debug): print "git_repo.lck bit never cleared."
exit(0)
with open(lockFileFullPath, "w") as f:
f.write("")
f.close()
# Check to make sure that we got the lock
if os.path.isfile(lockFileFullPath):
if (debug): print("Created repo lock file "+lockFileFullPath)
return(lockFileFullPath)
else:
j=j+1
if (debug): print "Giving up. Not able to create lock file."
exit(0)
def get_splunk_home(cwd,target_hostname):
# Find all the instances of Splunk in the /var/directory/ directory.
# Try and get a "running" responce from any one of those instances.
# ssh splunk1-dev.snc1 'ls -1 /var/directory/splunk*/bin/splunk'
command = ['ssh','-o','StrictHostKeyChecking no',target_hostname,'ls -1 /var/directory/splunk*/bin/splunk']
if (debug): print str(command)
output=subprocess.Popen(command,stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
stdout, stderr = output.communicate()
splunk_lines = stdout.split()
for item in splunk_lines:
command = ['ssh',target_hostname,'sudo '+item+' status']
#if (debug): print str(command)
output=subprocess.Popen(command,stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
stdout, stderr = output.communicate()
status_split = stdout.split("\n")
if "splunkd is running" in status_split[0]:
segments = item.split("/")
splunk_home = "/"+segments[1]+"/"+segments[2]+"/"+segments[3]+"/"
return(splunk_home)
return("")
def scp_input_files(cwd,target_hostname,splunk_home,whoiam):
# Find all the "inputs.conf" files in the Splunk working directory
# cp the inputs.conf tile to "/tmp/etc-system-local-inputs.conf and chown to the users name
# scp all the files down to the local machine
command = ['ssh','-o','StrictHostKeyChecking no',target_hostname,'sudo find '+splunk_home+'etc -name inputs.conf']
#print str(command)
output=subprocess.Popen(command,stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
stdout, stderr = output.communicate()
splunk_lines = stdout.split()
#print str(splunk_lines)
# Create a directory on the host computer to hold all the inputs.conf file
command = ['ssh',target_hostname,'mkdir /tmp/inputs_repo']
#print str(command)
output=subprocess.Popen(command,stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
stdout, stderr = output.communicate()
#print str(stdout)
# Put all the inputs.conf files into /tmp/inputs_repo
dst_list = []
for input_file in splunk_lines:
if splunk_home in input_file:
dst_filename = input_file.replace("/","_")
dst_path = "/tmp/inputs_repo/"+dst_filename
dst_list.append(dst_path)
command = ['ssh',target_hostname,'sudo cp '+input_file+' '+dst_path]
#print str(command)
output=subprocess.Popen(command,stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
stdout, stderr = output.communicate()
#print str(stdout)
# Chown the /tmp/inputs_repo so that is readable
command = ['ssh',target_hostname,'sudo chown -R '+whoiam+":"+whoiam+" /tmp/inputs_repo"]
#print str(command)
output=subprocess.Popen(command,stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
stdout, stderr = output.communicate()
#print str(stdout)
# Make local inputs_repo to hold the incoming inputs.conf file
command = ['mkdir',cwd+"/inputs_repo"]
#print str(command)
output=subprocess.Popen(command,stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
stdout, stderr = output.communicate()
#print str(stdout)
# scp the files inputs.conf files from the host into the local inputs_repo
command = ['scp',target_hostname+":/tmp/inputs_repo/*",cwd+"/inputs_repo"]
#print str(command)
output=subprocess.Popen(command,stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
stdout, stderr = output.communicate()
#print str(stdout)
print "All inputs.conf downloaded to local repo "+cwd+"/inputs_repo"
return(True)
def process_local_input_files(cwd,target_hostname,splunk_home,whoiam,index_map):
# Only edit "local" files
# Read in the entire file and write out to a new file stanza-by-stanza
# 1. Write out everting to first stanza
# 2. Look at all data in the current stanza
# 3. find the sourcetype (if any)
# 4. Find the index (if any)
# 5. Look in the index map
# 6. WRite out the new stanza
# 7. Rinse and repeat
#
# Return a list of new files (if any)
# Get list of all inputs.conf in inputs_repo
repo_path = cwd+"/inputs_repo/*inputs.conf"
input_files = glob.glob(repo_path)
new_files=[]
for input_file_name in input_files:
print ""
print "Processing file "+input_file_name
print "================"
if "local" in input_file_name:
print "This is a local file"
with open(input_file_name,'r') as content_file:
inputs_contents = content_file.read()
inputs_list = inputs_contents.split("\n")
#print "Read in the file "+input_file_name
file_size = len(inputs_list)
#print "File size is "+str(file_size)
file_index = 0
modified=0
new_inputs_list = []
first_stanza = 0
bad=0
# Read the entire inputs_list and build a new_inputs_list
while 1==1:
input_line = inputs_list[file_index]
#print ""
#print input_line
found_stanza = 0
if ("[" in input_line) and ("]" in input_line):
# Found a stanza. Look for a sourcetype or index KV pair
stanza_buffer = []
found_sourcetype = 0
found_stanza = 1
found_index=0
stanza_position = 0
index_name = ""
stanza_buffer.append(input_line)
#print "input_line="+str(input_line)+" stanza_buffer="+str(stanza_buffer)
file_index=file_index+1
input_line = inputs_list[file_index]
while not (("[" in input_line) and ("]" in input_line)):
if "index" in input_line:
# Found an index KV pair in the stanza. Parse it.
input_line_split = input_line.split("=")
index_name = "".join(input_line_split[1].split())
#print "Found index name "+index_name
index_index = stanza_position
found_index=1
elif "sourcetype" in input_line:
# FOund a sourcetype KV pair in teh stanza, Parse it.
input_line_split = input_line.split("=")
sourcetype_name = "".join(input_line_split[1].split())
#print "Found sourcetype named "+sourcetype_name
found_sourcetype = 1
stanza_buffer.append(input_line)
stanza_position = stanza_position+1
file_index=file_index+1
if file_index+1 >= file_size:
break
input_line = inputs_list[file_index]
# At this point, the entire stanza as been copied and parsed.
# Check to see if the recorded stanza had a sourcetype
if (found_sourcetype == 1):
# Check to see if we need to change the index or inject a new index KV pair
if index_name == "":
# The stanza did not have a declared index.
# Declare one and append to the stanza_buffer
try:
new_index = index_map[sourcetype_name]
if new_index == "":
print "ERROR. Found "+sourcetype_name+" in the csv sourcetype <-> index map, buthe NEW_INDEX value was missing."
except:
print "ERROR. The sourcetype was found in "+input_file_name
print "ERROR. .... but an assigned index for "+sourcetype_name+" was not found in the spreadsheet csv file."
bad=1
#return(bad)
index_kv = "index = "+new_index
stanza_buffer.append(index_kv)
modified=1
else:
# The stanza did have a declared index.
# Check to see if it is correct and modifiy if needed
try:
new_index = index_map[sourcetype_name]
if new_index == "":
print "ERROR. Found "+sourcetype_name+" in the csv sourcetype <-> index map, buthe NEW_INDEX balue was missing."
except:
print "ERROR. The sourcetype was found in "+input_file_name
print "ERROR. .... but an assigned index for "+sourcetype_name+" was not found in the spreadsheet csv file."
bad=1
#return(bad)
if index_name == new_index:
# The sourcetype has the correct index.
pass
else:
# The sourcetype has the wrong index. Replace the line with teh correct index
index_kv = "index = "+new_index
stanza_buffer[index_index+1]=index_kv
modified=1
# Copy the over the stanza
for item in stanza_buffer:
new_inputs_list.append(item)
elif (found_sourcetype == 0) and (found_index==1):
# The stanza had an index KV pair by not a sourcetype
print "WARN. A monitor in "+input_file_name+" had an index "+index_name+" but no soucetype."
for item in stanza_buffer:
new_inputs_list.append(item)
else:
# Found a stanza but did not see a sourcetype in the stanza. So just copy over. No edits.
for item in stanza_buffer:
new_inputs_list.append(item)
else:
# This is a line outside of a stanza
new_inputs_list.append(input_line)
file_index=file_index+1
# Exit out of loop if we have reached the end of the file
#print "file_index+1="+str(file_index+1)+" file_size="+str(file_size)
if file_index+1 >= file_size:
break
# Outside of the while loop
# Finished reading and parsing the file
if (modified == 1) and (bad==0):
# Dump the contents into a new file
new_file_name = input_file_name+".new"
new_file = open(new_file_name,"w")
for item in new_inputs_list:
item_scrub = item.replace("\n","")
new_file.write(item+"\n")
new_file.close()
new_files.append(new_file_name)
print "A new file was created =>"+new_file_name
else:
print "No changes to the file "+input_file_name
else:
print "This is not a local file. Moving on ...."
print "Advancing to the next inputs.conf file ..."
print "Finished processing all the inputs.conf files."
return(new_files)
def get_credentials():
#print str(tools.argparser)
#print str(argparse.ArgumentParser(parents=[tools.argparser]))
#print str(argparse.ArgumentParser(parents=[tools.argparser]).parse_args())
flags = argparse.ArgumentParser(parents=[tools.argparser]).parse_args()
print str(flags)
client_secret_file = cwd+"/"+credential_name
if not os.path.exists(client_secret_file):
print "ERROR. The Google Sheet secret file "+client_secret_file+" does not exist. Not ablwe to get sourcetype <-> index mapping"
exit()
print "client_secret_file="+client_secret_file
home_dir = os.path.expanduser('~')
credential_dir = os.path.join(home_dir, '.credentials')
if not os.path.exists(credential_dir):
os.makedirs(credential_dir)
credential_path = os.path.join(credential_dir, 'sheets.googleapis.com-break-main.json')
store = Storage(credential_path)
credentials = store.get()
if not credentials or credentials.invalid:
print "Create credential file"
flow = client.flow_from_clientsecrets(client_secret_file, SCOPES)
flow.user_agent = APPLICATION_NAME
#credentials = tools.run_flow(flow, store, flags)
credentials = tools.run_flow(flow, store)
print('Storing credentials to ' + credential_path)
return credentials
#
# python ./read_google_sheet.py --noauth_local_webserver
#
def read_index_map(cwd):
# python ./read_google_sheet.py --noauth_local_webserver
read_google_sheet_location = cwd+"/read_google_sheet.py"
command = ['python',read_google_sheet_location,'--noauth_local_webserver']
#print str(command)
output=subprocess.Popen(command,stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
stdout, stderr = output.communicate()
#print "stdout="+str(stdout)
row_split = stdout.split("\n")
try:
sourcetype_dict = {}
sourcetype_list = []
sourcetype_kv_pair = row_split[0]
sourcetype_kv_split = sourcetype_kv_pair.split("=")
sourcetype_kv_values = sourcetype_kv_split[1]
sourcetype_cell_list = sourcetype_kv_values.split(",")
first = 1
for sourcetype_cell in sourcetype_cell_list:
#print "Here0"
sourcetype_cell_clean1 = sourcetype_cell.replace("[u'","")
sourcetype_cell_clean2 = sourcetype_cell_clean1.replace("']","")
sourcetype_cell_clean3 = sourcetype_cell_clean2.replace("[","")
#print "Here 0.5"
sourcetype_cell_clean4 = sourcetype_cell_clean3.replace(" ","")
sourcetype_cell_clean5 = sourcetype_cell_clean4.replace("]","")
#print "Here1"
if first == 1:
first=0
#print "Here2"
if sourcetype_cell_clean5 != "SOURCETYPE":
print "ERROR. Can not parse the sourcetype row returned by "+str(command)
print " "+str(sourcetype_cell_clean5)
print" Was expecting first cell value to be SOURCETYPE"
print str(sourcetype_kv_pair)
exit()
continue
#print "sourcetype cell = "+sourcetype_cell_clean5
sourcetype_dict[sourcetype_cell_clean5]=""
#print "Here3"
sourcetype_list.append(sourcetype_cell_clean5)
except:
print "ERROR. Can not parse the sourcetype row returned by "+str(command)
print str(sourcetype_kv_pair)
exit()
try:
index_kv_pair = row_split[1]
index_kv_split = index_kv_pair.split("=")
index_kv_values = index_kv_split[1]
index_cell_list = index_kv_values.split(",")
first = 1
index=0
for index_cell in index_cell_list:
index_cell_clean1 = index_cell.replace("[u'","")
index_cell_clean2 = index_cell_clean1.replace("']","")
index_cell_clean3 = index_cell_clean2.replace("[","")
index_cell_clean4 = index_cell_clean3.replace("]","")
index_cell_clean5 = index_cell_clean4.replace(" ","")
if first == 1:
first=0
if index_cell_clean5 != "NEW_INDEX":
print "ERROR. Can not parse the new new index returned by "+str(command)
print" Was expecting first cell value to be NEW_INDEX"
print str(index_kv_pair)
exit()
continue
sourcetype_name = sourcetype_list[index]
#print "index cell = "+index_cell_clean5+" index="+str(index)+" sourcetype_name="+sourcetype_name
sourcetype_dict[sourcetype_name]=index_cell_clean5
index=index+1
except:
print "ERROR. Can not parse the new_index returned by "+str(command)
#print str(index_kv_pair)
exit()
return(sourcetype_dict)
def scp_new_input_files(cwd,target_hostname,splunk_home,whoiam,new_files):
# Upload each new file into the /tmp directory
# chown each file
# move the file into the respective directory but KEEP the .new extention
# A separate script will backup old inputs.conf and replace with the inputs.conf.new
print "Important ... newly created inputs.conf.new files are being deposited onto "+target_hostname
print " These .new files do not take effect until they replace the inputs.conf files and Splunk is restarted\n"
for new_file in new_files:
command = ['scp',new_file,target_hostname+':/tmp/inputs_repo/']
#print str(command)
output=subprocess.Popen(command,stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
stdout, stderr = output.communicate()
#print "stdout="+str(stdout)
command = ['ssh',target_hostname,'sudo chown splunk:splunk /tmp/inputs_repo/*.new']
#print str(command)
output=subprocess.Popen(command,stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
stdout, stderr = output.communicate()
#print "stdout="+str(stdout)
new_file_split = new_file.split("/")
new_file_name = new_file_split[-1]
new_file_path = new_file_name.replace("_","/")
command = ['ssh',target_hostname,'sudo -u splunk cp /tmp/inputs_repo/'+new_file_name+' '+new_file_path]
#print str(command)
output=subprocess.Popen(command,stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
stdout, stderr = output.communicate()
#print "stdout="+str(stdout)
print "Deposited "+new_file_path
return()
if __name__ == "__main__":
# Given a specific host, this script will :
# 1. read the index.map file and create an internal mapping between sourcetype and index
# 2. Find all the inputs.conf file in the working Splunk instance
# 3. scp all the inputs.conf files locally
# 4. process all the inputs.conf files and change/insert the index=_____
# 5. scp each inputs.conf back into its original directory.
# Parse arguments
parse = argparse.ArgumentParser(usage='%(prog)s hostname copy_new_files_to_host(T/F)', description='Change the "index=" KV pair for every sourcetype on the specified host according to the spreadsheet in Google Doc.')
parse.add_argument('hostname', nargs=1, help='The name of the host that has the Splunk FWDer configs. e.g. myhost.snc1')
#parse.add_argument('index_map',nargs=1, help='The name of the csv file that has the sourcetype-to-indexName mappsing')
parse.add_argument('copy_new_files_to_host',nargs=1, help='T or F to copy any newly created inputs.conf files up to the target host.')
args = parse.parse_args()
target_hostname = args.hostname[0]
#index_map = args.index_map[0]
copy_new_files_to_host = args.copy_new_files_to_host[0]
#target_hostname = "orders-app3.snc1"
#index_map = "ff"
#copy_new_files_to_host = "F"
# Create a lock file to prevent more than one process working on the same host at the same time
cwd = os.getcwd()
lockFile = set_lock(target_hostname,cwd)
# Read in and build the index_lut
index_lut = read_index_map(cwd)
print "\nFound "+str(len(index_lut))+" sourcetype in the sourcetype Google Sheet.\n"
if len(index_lut) != 0:
# Get to the target host and find Splunk Home. Home is were "/bin/splunk status" is running.
splunk_home = get_splunk_home(cwd,target_hostname)
if splunk_home != "":
if (debug): print "Splunk Home is "+splunk_home
command = ['whoami']
output=subprocess.Popen(command,stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
stdout, stderr = output.communicate()
whoiam = "".join(stdout.split())
print "Welcome "+whoiam
# Find all the "inputs.conf" files in the Splunk working directory
# cp the inputs.conf tile to "/tmp/etc-system-local-inputs.conf and chown to the users name
# scp all the files down to the local machine
#result = scp_input_files(cwd,target_hostname,splunk_home,whoiam)
# process all the inputs.conf files and change/insert the index=_____
new_files = process_local_input_files(cwd,target_hostname,splunk_home,whoiam,index_lut)
if len(new_files) > 0:
# scp all the new files back up to the host and into their respective directories.
# Save save the original inputs.conf in the directory with inputs.conf-YYYY-MM-DD-HH-MM-SS
if copy_new_files_to_host == "T":
print "\n\nGonna upload "+str(new_files)
scp_new_input_files(cwd,target_hostname,splunk_home,whoiam,new_files)
print "\nDone"
else:
print "\n\nNew inputs.conf files have been been created. But you have elected to NOT upload them to the target host."
else:
print "There are no inputs.conf files in local directories on host "+target_hostname+" that need to be updated."
else:
print "Can not proceed without a sourcetype <-> index map. Bye."
remove_lock(lockFile)
exit(0)
```
|
{
"source": "JFiedler23/PyInvaders",
"score": 3
}
|
#### File: PyInvaders/Source/Invaders.py
```python
import pygame
import sys
import os
import random
import game_data
from player import *
from laser import *
from alien import *
pygame.init()
my_path = os.path.abspath(os.path.dirname(__file__))
#<----------PYGAME SPECIFIC OBJECTS---------->
#basic screen setup
screenSize = screenWidth, screenHeight = 640, 480
screen = pygame.display.set_mode(screenSize)
pygame.display.set_caption("Invaders!")
#colors that are used frequently
black = (0,0,0)
white = (255,255,255)
#creating font
game_font_path = os.path.join(my_path, '../Fonts/Atari.ttf')
gameFont = pygame.font.Font(game_font_path, 28)
title_font_path = os.path.join(my_path, "../Fonts/SPACEBAR.ttf")
title_font = pygame.font.Font(title_font_path, 32)
#framerate clock
clock = pygame.time.Clock()
#game data object
data = game_data.GameData()
#<----------Main Menu---------->
def MainMenu():
data.score = 0
data.curr_level = 1
data.alien_speed = 850
data.alien_y = 40
icon_path = os.path.join(my_path, '../Images/icon.png')
icon = pygame.image.load(icon_path)
pygame.display.set_icon(icon)
startButton = pygame.Rect(190, 140, 256, 64)
high_score_button = pygame.Rect(190, 224, 256, 64)
while True:
clock.tick(56)
mx, my = pygame.mouse.get_pos()
title_text = title_font.render("INVADERS", 1, white)
startButtonText = title_font.render("Start", 1, black)
high_score_button_text = title_font.render("Scores", 1, black)
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
pygame.quit()
sys.exit()
if event.type == pygame.MOUSEBUTTONDOWN:
if event.button == 1:
if startButton.collidepoint((mx, my)):
HowToPlay()
elif high_score_button.collidepoint((mx, my)):
DisplayHighScores()
screen.fill(black)
pygame.draw.rect(screen, (69, 180, 186), startButton, 0)
pygame.draw.rect(screen, (69, 180, 186), high_score_button, 0)
screen.blit(title_text, (210, 20))
screen.blit(startButtonText, (250, 150))
screen.blit(high_score_button_text, (235, 235))
pygame.display.update()
#<----------GAME OVER SCREEN---------->
def GameOver():
SaveHighScores(data.score)
game_over_sound_path = os.path.join(my_path, "../Sounds/Game_Over.wav")
game_over_sound = pygame.mixer.Sound(game_over_sound_path)
game_over_sound.play()
while True:
clock.tick(60)
gameOverText = title_font.render("GAME OVER", 1, white)
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_RETURN:
MainMenu()
screen.fill(black)
screen.blit(gameOverText, (210, 20))
pygame.display.update()
#<----------CHECKING/SAVING HIGH SCORES---------->
def SaveHighScores(score):
high_scores = []
index = -1
#Getting current high scores list
with open("high_scores.txt", "r") as f:
high_scores_data = f.readlines()
#splitting entries between name and high score
for item in high_scores_data:
high_scores.append(item.split())
#Checking if player set a new high score
for i in high_scores:
#if so grabbing index of beaten high score
if score >= int(i[1]):
index = high_scores.index(i)
break
#if we have new high score
if index > -1:
name = GetPlayerName()
high_scores.pop()
new_entry = [name, str(score)]
#scores before and after index
top = high_scores[:index]
top.append(new_entry)
bottom = high_scores[index:]
#Creating new high scores list
new_high_scores = top + bottom
#writing new high scores to file
with open("high_scores.txt", "w") as f:
for i in new_high_scores:
entry = i[0] + " " + i[1] + "\n"
f.write(entry)
DisplayHighScores()
#<----------HIGH SCORE SCREEN---------->
def DisplayHighScores():
x, y = 225, 70
yIncrease = 0
title_text = title_font.render("High Scores", 0, (white))
#Getting current high scores list
with open("high_scores.txt", "r") as f:
high_scores_data = f.readlines()
while True:
clock.tick(56)
screen.fill(black)
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_RETURN:
MainMenu()
for entry in high_scores_data:
score_text = gameFont.render(entry[:-1], 0, (white))
screen.blit(score_text, (x, y+yIncrease))
yIncrease += 40
yIncrease = 0
screen.blit(title_text, (170, 20))
pygame.display.update()
#<----------NEW HIGH SCORE SCREEN---------->
def GetPlayerName():
name = ""
#input_field = pygame.Rect(190, 140, 256, 64)
title_text = title_font.render("New High Score!", 0, white)
input_header = gameFont.render("Enter your name: ", 0, white)
#Getting player name
while True:
name_text = gameFont.render(name, 0, white)
clock.tick(56)
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_RETURN:
return name
elif event.key == pygame.K_BACKSPACE:
name = name[:-1]
else:
name += event.unicode
screen.fill(black)
#pygame.draw.rect(screen, (69, 180, 186), input_field, 2)
screen.blit(title_text, (140, 20))
screen.blit(input_header, (170, 150))
screen.blit(name_text, (395, 150))
pygame.display.update()
#<----------HOW TO PLAY SCREEN---------->
def HowToPlay():
button_font = pygame.font.Font(game_font_path, 20)
title_text = gameFont.render("How to play", 1, white)
arrow_text = gameFont.render("Movement: ", 1, white)
spacebar_header_text = gameFont.render("Shoot (Hold): ", 1, white)
spacebar_text = button_font.render("Space", 1, white)
continue_text = gameFont.render("Press enter to begin...", 1, white)
arrow_key_path = os.path.join(my_path, "../Images/lr_arrow_keys.png")
arrow_key_img = pygame.image.load(arrow_key_path)
spacebar_path = os.path.join(my_path, "../Images/spacebar.png")
spacebar_img = pygame.image.load(spacebar_path)
while True:
clock.tick(56)
screen.fill(black)
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_RETURN:
game(data)
screen.blit(title_text, (235, 10))
screen.blit(arrow_text, (50, 120))
screen.blit(spacebar_header_text, (50, 270))
screen.blit(arrow_key_img, (250, 100))
screen.blit(spacebar_img, (250, 250))
screen.blit(spacebar_text, (290, 270))
screen.blit(continue_text, (150, 400))
pygame.display.update()
#<----------WIN SCREEN---------->
def Win():
win_header = gameFont.render("Congratulations!", 1, white)
win_text = gameFont.render("You saved the planet from the alien invasion.", 1, white)
thank_you_text = gameFont.render("Thank you for playing my game.", 1, white)
name_text = gameFont.render("Written by <NAME>", 1, white)
while True:
screen.fill(black)
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_RETURN:
MainMenu()
screen.blit(win_header, (210, 100))
screen.blit(win_text, (5, 150))
screen.blit(thank_you_text, (120, 200))
screen.blit(name_text, (180, 250))
pygame.display.update()
#<----------GAME---------->
def game(data):
#Creating sound objects
laser_sound_path = os.path.join(my_path, "../Sounds/Laser_1.wav")
laser_sound = pygame.mixer.Sound(laser_sound_path)
explosion_sound_path = os.path.join(my_path, "../Sounds/Explosion.wav")
explosion_sound = pygame.mixer.Sound(explosion_sound_path)
#loading player image
spaceship_path = os.path.join(my_path, "../Images/spaceship.png")
ssImage = pygame.image.load(spaceship_path)
ssImage = ssImage.convert()
#loading laser image
laser_path = os.path.join(my_path, "../Images/laser_bullet.png")
laserImg = pygame.image.load(laser_path)
laserImg = laserImg.convert()
#loading alien Image
alien_path = os.path.join(my_path, "../Images/alien_ship.png")
alienImg = pygame.image.load(alien_path)
alienImg = alienImg.convert()
explosionImg = [pygame.image.load(os.path.join(my_path, "../Images/explosion_1.png")), \
pygame.image.load(os.path.join(my_path, "../Images/explosion_2.png")), \
pygame.image.load(os.path.join(my_path, "../Images/explosion_3.png")), \
pygame.image.load(os.path.join(my_path, "../Images/explosion_4.png")), \
pygame.image.load(os.path.join(my_path, "../Images/explosion_5.png")), \
pygame.image.load(os.path.join(my_path, "../Images/explosion_6.png")), \
pygame.image.load(os.path.join(my_path, "../Images/explosion_7.png"))]
#creating player shoot event
RELOAD_SPEED = 350
RELOADED_EVENT = pygame.USEREVENT + 1
reloaded = True
pygame.time.set_timer(RELOADED_EVENT, RELOAD_SPEED)
#creating alien movement event
move_speed = data.alien_speed
ALIEN_MOVE_EVENT = pygame.USEREVENT + 2
pygame.time.set_timer(ALIEN_MOVE_EVENT, move_speed)
#creating alien shoot event
ALIEN_SHOOT_SPEED = 900
ALIEN_SHOOT_EVENT = pygame.USEREVENT + 3
pygame.time.set_timer(ALIEN_SHOOT_EVENT, ALIEN_SHOOT_SPEED)
#explosion event
EXPLOSION_SPEED = 200
EXPLOSION_EVENT = pygame.USEREVENT + 4
pygame.time.set_timer(EXPLOSION_EVENT, EXPLOSION_SPEED)
#<----------INITIALIZING GAME OBJECTS---------->
#initializing Player
mainPlayer = Player(64, 64, (screenWidth // 2), (screenHeight - 64))
#alien initialization variables
aliens = []
aliensInRow = (screenWidth // 64) - 2
numRows = 4
#initializing all aliens
for i in range(aliensInRow):
for j in range(numRows):
aliens.append(Alien(64, 64, (70 * i+1), (64 * j)+data.alien_y))
#<----------GAMEPLAY FUNCTIONS---------->
#image draw
def redrawGameWindow():
scoreText = gameFont.render("Score: "+ str(data.score), 1, white)
levelText = gameFont.render("Level: "+ str(data.curr_level), 1, (255, 255, 255))
screen.blit(scoreText, (480, 10))
screen.blit(levelText, (10, 10))
mainPlayer.draw(screen, ssImage)
for alien in aliens:
alien.draw(screen, alienImg)
for laser in playerLasers:
laser.draw(screen, laserImg)
for laser in alienLasers:
laser.draw(screen, laserImg)
def playerDestory():
#checking for alien-player collision
for alien in aliens:
if (mainPlayer.hitbox[1] < alien.hitbox[1] + alien.hitbox[3]) and (mainPlayer.hitbox[1] + mainPlayer.hitbox[3] > alien.hitbox[1]):
if (mainPlayer.hitbox[0] < alien.hitbox[0] + alien.hitbox[2]) and (mainPlayer.hitbox[0] + mainPlayer.hitbox[2] > alien.hitbox[0]):
return True
#<----------GAMEPLAY VARIABLES---------->
run = True
movingRight = True #alien movement direction
animCount = 0
explosion = False
playerLasers = []
alienLasers = []
numAliens = len(aliens)
#<----------GAME LOOP---------->
while run:
screen.fill(black)
largestX = 0
smallestX = 1000
clock.tick(56)
#Getting all key presses
keys = pygame.key.get_pressed()
#Checking for custom events
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
#player reload event
if event.type == RELOADED_EVENT:
mainPlayer.shoot(keys, playerLasers, laser_sound)
#Alien movement
if event.type == ALIEN_MOVE_EVENT:
#Keeping track of smallest and largest x values
for alien in aliens:
if alien.x > largestX:
largestX = alien.x
if alien.x < smallestX:
smallestX = alien.x
#Checking boundaries
for alien in aliens:
#moving right
if largestX < (screenWidth - alien.width - alien.moveDistance) and movingRight:
alien.x += alien.moveDistance
#Moving down when right edge of screen is reached
elif largestX+5 == (screenWidth - alien.width - alien.moveDistance):
alien.y += alien.moveDistance
else:
movingRight = False
#moving left
if smallestX >= alien.moveDistance and not movingRight:
alien.x -= alien.moveDistance
else:
movingRight = True
#Alien shooting
if event.type == ALIEN_SHOOT_EVENT:
if len(aliens) > 0:
choice = random.randint(0, len(aliens)-1)
aliens[choice].shoot(alienLasers, laser_sound)
if playerDestory():
explosion_sound.play()
run = False
GameOver()
#Player laser manager
for laser in playerLasers:
#Checking if an alien has been hit
for alien in aliens:
if (laser.hitbox[1]) < (alien.hitbox[1] + alien.hitbox[3]) and (laser.hitbox[1] + laser.hitbox[3]) > alien.hitbox[1]:
if (laser.hitbox[0] - laser.hitbox[2]) < (alien.hitbox[0] + alien.hitbox[2]) and (laser.hitbox[0] + laser.hitbox[2]) > alien.hitbox[0]:
explosion_sound.play()
data.score += 100
tempX, tempY = alien.x, alien.y
aliens.pop(aliens.index(alien))
explosion = True
#just in case two aliens are hit at once
try:
playerLasers.pop(playerLasers.index(laser))
except ValueError:
continue
if laser.y > 0 and laser.y < screenHeight:
laser.y -= laser.velocity
else:
playerLasers.pop(playerLasers.index(laser))
#Alien laser manager
for laser in alienLasers:
#checking if player has been hit
if (laser.hitbox[1] < mainPlayer.hitbox[1] + mainPlayer.hitbox[3]) and (laser.hitbox[1] + laser.hitbox[3] > mainPlayer.hitbox[1]):
if (laser.hitbox[0] - laser.hitbox[2] < mainPlayer.hitbox[0] + mainPlayer.hitbox[2]) and (laser.hitbox[0] + laser.hitbox[2] > mainPlayer.hitbox[0]):
explosion_sound.play()
run = False
GameOver()
if laser.y < screenHeight and laser.y > 0:
laser.y += laser.velocity
else:
alienLasers.pop(alienLasers.index(laser))
#player controller
mainPlayer.controller(screenWidth, keys)
#explosion animation
if animCount + 1 >= 56:
animCount = 0
explosion = False
if explosion:
screen.blit(explosionImg[animCount//8], (tempX, tempY))
animCount += 1
#player wins level, start new level.
if len(aliens) <= 0 and data.curr_level <= 10:
data.curr_level += 1
data.alien_y += 5
data.alien_speed -= 5
game(data)
elif data.curr_level > 10:
Win()
redrawGameWindow()
pygame.display.update()
MainMenu()
```
|
{
"source": "jfigueroama/simulator-mhcc",
"score": 3
}
|
#### File: jfigueroama/simulator-mhcc/gendata.py
```python
import random
from datetime import datetime, time
from pyrsistent import m, pmap
import simulators
import utils
from simulators import meter_new_context, pv_new_context, update_meter_context, update_pv_change, simulate_linear_meter, simulate_pv, pv_sba, rand_meter_context
random.seed()
def main():
meter = meter_new_context()
pv = pv_new_context().set('curve_start', time(7,0))
dt = datetime.now()
utils.print_headers()
for hour in range(0, 24):
for minute in range(0, 60):
for second in filter(lambda x: x % 2 == 0, range(0,60)):
dt = dt.replace(dt.year, dt.month, dt.day, hour, minute, second)
ti = time(hour, minute, second)
meter = rand_meter_context(meter, random)
pv = (update_pv_change(pv, random.random())
.set('rnd_changep', random.random())
.set('dt', dt))
meter = simulate_linear_meter(meter)
pv = simulate_pv(pv_sba, pv)
utils.print_result(meter['value'], pv['value'], dt)
if __name__ == "__main__":
main()
```
#### File: jfigueroama/simulator-mhcc/utils.py
```python
def headers():
return ("captured-at" + "\t"
"consumption(W)" + "\t"
"pvpower(kW)" + "\t"
"consumption+pvpower(W)" + "\t"
"consumption-pvpower(W)")
def write_result(fname, consuption, power, dt):
with open(fname, "a") as f:
line = (dt.strftime("%Y-%m-%dT%H:%M:%S") + "\t"
+ str(consuption) + "\t"
+ str(power) + "\t"
+ str(consuption + (power*1000)) + "\t"
+ str(consuption - (power*1000)) + "\n")
f.write(line)
def write_headers(fname):
with open(fname, "a") as f:
f.write(headers())
def print_headers():
print(headers())
def print_result(consuption, power, dt):
line = (dt.strftime("%Y-%m-%dT%H:%M:%S") + "\t"
+ str(consuption) + "\t"
+ str(power) + "\t"
+ str(consuption + (power*1000)) + "\t"
+ str(consuption - (power*1000)))
print(line)
```
|
{
"source": "jfigui/pyrad",
"score": 3
}
|
#### File: EGG-INFO/scripts/main_extract_trt.py
```python
import datetime
import argparse
import atexit
import os
import numpy as np
from pyrad.io import get_trtfile_list, read_trt_data, write_trt_cell_data
print(__doc__)
def main():
"""
"""
# parse the arguments
parser = argparse.ArgumentParser(
description='Entry to Pyrad processing framework')
# positional arguments
parser.add_argument(
'start_times', type=str,
help=('Start times of the data to process. Format YYYYMMDDhhmmss.' +
'Coma separated'))
parser.add_argument(
'end_times', type=str,
help=('End times of the data to process. Format YYYYMMDDhhmmss.' +
'Coma separated'))
# keyword arguments
parser.add_argument(
'--raw_trtbase', type=str,
default='/store/msrad/radar/rad4alp/TRT/',
help='name of folder containing the TRT cell data')
parser.add_argument(
'--proc_trtbase', type=str,
default='/store/msrad/radar/trt/',
help='name of folder containing the TRT cell data')
parser.add_argument(
'--nsteps_min', type=int,
default=3,
help=('Minimum number of time steps to consider the TRT cell ' +
'worth processing'))
args = parser.parse_args()
print("====== TRT cell extraction started: %s" %
datetime.datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S"))
atexit.register(_print_end_msg,
"====== TRT cell extraction finished: ")
start_time_list = args.start_times.split(',')
end_time_list = args.end_times.split(',')
for i, start_time_str in enumerate(start_time_list):
end_time_str = end_time_list[i]
starttime = datetime.datetime.strptime(start_time_str, '%Y%m%d%H%M%S')
endtime = datetime.datetime.strptime(end_time_str, '%Y%m%d%H%M%S')
data_input_path = (
args.raw_trtbase+starttime.strftime('%y%j/TRTC%y%j/'))
data_output_path = (
args.proc_trtbase+starttime.strftime('%Y-%m-%d')+'/TRTC_cell/')
if not os.path.isdir(data_output_path):
os.makedirs(data_output_path)
flist = get_trtfile_list(data_input_path, starttime, endtime)
if flist is None:
continue
traj_ID = np.array([], dtype=int)
yyyymmddHHMM = np.array([], dtype=datetime.datetime)
lon = np.array([], dtype=float)
lat = np.array([], dtype=float)
ell_L = np.array([], dtype=float)
ell_S = np.array([], dtype=float)
ell_or = np.array([], dtype=float)
area = np.array([], dtype=float)
vel_x = np.ma.array([], dtype=float)
vel_y = np.ma.array([], dtype=float)
det = np.ma.array([], dtype=float)
RANKr = np.array([], dtype=int)
CG_n = np.array([], dtype=int)
CG_p = np.array([], dtype=int)
CG = np.array([], dtype=int)
CG_percent_p = np.ma.array([], dtype=float)
ET45 = np.ma.array([], dtype=float)
ET45m = np.ma.array([], dtype=float)
ET15 = np.ma.array([], dtype=float)
ET15m = np.ma.array([], dtype=float)
VIL = np.ma.array([], dtype=float)
maxH = np.ma.array([], dtype=float)
maxHm = np.ma.array([], dtype=float)
POH = np.ma.array([], dtype=float)
RANK = np.ma.array([], dtype=float)
Dvel_x = np.ma.array([], dtype=float)
Dvel_y = np.ma.array([], dtype=float)
cell_contour = []
for fname in flist:
print('Reading TRT file '+fname)
(traj_ID_aux, yyyymmddHHMM_aux, lon_aux, lat_aux, ell_L_aux,
ell_S_aux, ell_or_aux, area_aux, vel_x_aux, vel_y_aux, det_aux,
RANKr_aux, CG_n_aux, CG_p_aux, CG_aux, CG_percent_p_aux,
ET45_aux, ET45m_aux, ET15_aux, ET15m_aux, VIL_aux, maxH_aux,
maxHm_aux, POH_aux, RANK_aux, Dvel_x_aux, Dvel_y_aux,
cell_contour_aux) = read_trt_data(fname)
if traj_ID_aux is None:
continue
traj_ID = np.append(traj_ID, traj_ID_aux)
yyyymmddHHMM = np.append(yyyymmddHHMM, yyyymmddHHMM_aux)
lon = np.append(lon, lon_aux)
lat = np.append(lat, lat_aux)
ell_L = np.append(ell_L, ell_L_aux)
ell_S = np.append(ell_S, ell_S_aux)
ell_or = np.append(ell_or, ell_or_aux)
area = np.append(area, area_aux)
vel_x = np.append(vel_x, vel_x_aux)
vel_y = np.append(vel_y, vel_y_aux)
det = np.append(det, det_aux)
RANKr = np.append(RANKr, RANKr_aux)
CG_n = np.append(CG_n, CG_n_aux)
CG_p = np.append(CG_p, CG_p_aux)
CG = np.append(CG, CG_aux)
CG_percent_p = np.append(CG_percent_p, CG_percent_p_aux)
ET45 = np.append(ET45, ET45_aux)
ET45m = np.append(ET45m, ET45m_aux)
ET15 = np.append(ET15, ET15_aux)
ET15m = np.append(ET15m, ET15m_aux)
VIL = np.append(VIL, VIL_aux)
maxH = np.append(maxH, maxH_aux)
maxHm = np.append(maxHm, maxHm_aux)
POH = np.append(POH, POH_aux)
RANK = np.append(RANK, RANK_aux)
Dvel_x = np.append(Dvel_x, Dvel_x_aux)
Dvel_y = np.append(Dvel_y, Dvel_y_aux)
cell_contour.extend(cell_contour_aux)
traj_ID_unique_list = np.unique(traj_ID)
print('Total Number of cells: '+str(traj_ID_unique_list.size))
ncells = 0
for traj_ID_unique in traj_ID_unique_list:
ind = np.where(traj_ID == traj_ID_unique)[0]
if ind.size < args.nsteps_min:
continue
traj_ID_cell = traj_ID[ind]
yyyymmddHHMM_cell = yyyymmddHHMM[ind]
lon_cell = lon[ind]
lat_cell = lat[ind]
ell_L_cell = ell_L[ind]
ell_S_cell = ell_S[ind]
ell_or_cell = ell_or[ind]
area_cell = area[ind]
vel_x_cell = vel_x[ind]
vel_y_cell = vel_y[ind]
det_cell = det[ind]
RANKr_cell = RANKr[ind]
CG_n_cell = CG_n[ind]
CG_p_cell = CG_p[ind]
CG_cell = CG[ind]
CG_percent_p_cell = CG_percent_p[ind]
ET45_cell = ET45[ind]
ET45m_cell = ET45m[ind]
ET15_cell = ET15[ind]
ET15m_cell = ET15m[ind]
VIL_cell = VIL[ind]
maxH_cell = maxH[ind]
maxHm_cell = maxHm[ind]
POH_cell = POH[ind]
RANK_cell = RANK[ind]
Dvel_x_cell = Dvel_x[ind]
Dvel_y_cell = Dvel_y[ind]
cell_contour_cell = []
for ind_el in ind:
cell_contour_cell.append(cell_contour[ind_el])
fname = data_output_path+str(traj_ID_unique)+'.trt'
fname = write_trt_cell_data(
traj_ID_cell, yyyymmddHHMM_cell, lon_cell, lat_cell,
ell_L_cell, ell_S_cell, ell_or_cell, area_cell, vel_x_cell,
vel_y_cell, det_cell, RANKr_cell, CG_n_cell, CG_p_cell,
CG_cell, CG_percent_p_cell, ET45_cell, ET45m_cell, ET15_cell,
ET15m_cell, VIL_cell, maxH_cell, maxHm_cell, POH_cell,
RANK_cell, Dvel_x_cell,
Dvel_y_cell, cell_contour_cell, fname)
print('Written individual TRT cell file '+fname)
ncells += 1
print('Number of cells with '+str(args.nsteps_min) +
' or more time steps: '+str(ncells))
def _print_end_msg(text):
"""
prints end message
Parameters
----------
text : str
the text to be printed
Returns
-------
Nothing
"""
print(text + datetime.datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S"))
# ---------------------------------------------------------
# Start main:
# ---------------------------------------------------------
if __name__ == "__main__":
main()
```
#### File: EGG-INFO/scripts/main_precipitation_comparison.py
```python
import datetime
import atexit
import glob
from warnings import warn
import os
import numpy as np
from pyrad.io import read_ts_cum
from pyrad.graph import plot_scatter_comp
from pyrad.util import compute_1d_stats
print(__doc__)
def main():
"""
"""
param_vec = ['RR_Z', 'RR_hydro']
smn_station_vec = ['CIM', 'MAG', 'OTL']
tstart = '20180401'
tend = '20180430'
np_radar_min = 6
np_sensor_min = 6
min_val = 0.2
fbase = '/data/pyrad_products/mals_loc_dataquality/'
img_ext = 'png'
avg_time = 3600
print("====== precipitation comparison started: %s" %
datetime.datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S"))
atexit.register(_print_end_msg,
"====== comparison finished: ")
startdate = datetime.datetime.strptime(tstart, '%Y%m%d')
enddate = datetime.datetime.strptime(tend, '%Y%m%d')
ndays = (enddate - startdate).days + 1
print('Number of days to process: '+str(ndays)+'\n\n')
for param in param_vec:
ts_vec = np.array([])
val_radar = np.ma.array([])
np_radar = np.array([])
val_sensor = np.ma.array([])
np_sensor = np.array([])
for station in smn_station_vec:
for day in range(ndays):
current_date = startdate + datetime.timedelta(days=day)
day_dir = current_date.strftime("%Y-%m-%d")
daybase = current_date.strftime("%Y%m%d")
fpath = (fbase+day_dir+'/rg'+station+'_'+param+'/RRcum' +
str(avg_time)+'s/')
fname = glob.glob(
fpath+daybase+'_'+str(avg_time) +
's_acc_ts_comp_POINT_MEASUREMENT_*.csv')
if not fname:
warn('No file found in '+fpath)
continue
else:
(ts_aux, np_radar_aux, radar_value_aux, np_sensor_aux,
sensor_value_aux) = read_ts_cum(fname[0])
ts_vec = np.append(ts_vec, ts_aux)
val_radar = np.ma.append(val_radar, radar_value_aux)
np_radar = np.append(np_radar, np_radar_aux)
val_sensor = np.ma.append(val_sensor, sensor_value_aux)
np_sensor = np.append(np_sensor, np_sensor_aux)
# filter out undesired data
ind = np.where(np.logical_and(
np.logical_and(
np_radar >= np_radar_min, np_sensor >= np_sensor_min),
np.logical_and(val_sensor >= min_val, val_radar >= min_val)))[0]
val_sensor = val_sensor[ind]
val_radar = val_radar[ind]
# compute statistics
stats = compute_1d_stats(val_sensor, val_radar)
# create output image
fpath = fbase+'RR/'
if os.path.isdir(fpath):
pass
else:
os.makedirs(fpath)
figfname = [
startdate.strftime('%Y%m%d')+'-'+enddate.strftime('%Y%m%d')+'_' +
str(avg_time)+'s_acc_ts_comp_'+param+'.'+img_ext]
for i in range(len(figfname)):
figfname[i] = fpath+figfname[i]
labelx = 'RG (mm)'
labely = 'Radar (mm)'
titl = (str(avg_time)+' s Acc. Comp. '+startdate.strftime('%Y%m%d') +
'-'+enddate.strftime('%Y%m%d'))
metadata = (
'npoints: '+str(stats['npoints'])+'\n' +
'NB: '+'{:.2f}'.format(float(stats['NB']))+'\n' +
'corr: '+'{:.2f}'.format(float(stats['corr']))+'\n' +
'RMS: '+'{:.2f}'.format(float(stats['RMS']))+'\n' +
'Nash: '+'{:.2f}'.format(float(stats['Nash']))+'\n')
plot_scatter_comp(
val_sensor, val_radar, figfname, labelx=labelx,
labely=labely, titl=titl, axis='equal', metadata=metadata,
dpi=300)
def _print_end_msg(text):
"""
prints end message
Parameters
----------
text : str
the text to be printed
Returns
-------
Nothing
"""
print(text + datetime.datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S"))
# ---------------------------------------------------------
# Start main:
# ---------------------------------------------------------
if __name__ == "__main__":
main()
```
#### File: EGG-INFO/scripts/main_process_data_birds.py
```python
import datetime
import argparse
import atexit
import os
import glob
from warnings import warn
from pyrad.flow.flow_control import main as pyrad_main
from pyrad.io import get_fieldname_pyart
from pyrad.io import read_profile_ts
from pyrad.graph import get_field_name, _plot_time_range
from pyart.config import get_metadata
print(__doc__)
def main():
"""
"""
# parse the arguments
parser = argparse.ArgumentParser(
description='Entry to Pyrad processing framework')
# positional arguments
parser.add_argument(
'proc_cfgfile', type=str, help='name of main configuration file')
parser.add_argument(
'starttime', type=str,
help=('starting time of the data to be processed. ' +
'Format ''YYYYMMDDhhmmss'''))
parser.add_argument(
'endtime', type=str,
help='end time of the data to be processed. Format ''YYYYMMDDhhmmss''')
# keyword arguments
parser.add_argument(
'--cfgpath', type=str,
default=os.path.expanduser('~')+'/pyrad/config/processing/',
help='configuration file path')
parser.add_argument(
'--storepath', type=str,
default='/store/msrad/radar/pyrad_products/rad4alp_birds_PHA/',
help='Base data storing path')
parser.add_argument(
'--hres', type=int, default=200, help='Height resolution [m]')
args = parser.parse_args()
print("====== PYRAD data processing started: %s" %
datetime.datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S"))
atexit.register(_print_end_msg,
"====== PYRAD data processing finished: ")
print('config path: '+args.cfgpath)
print('config file: '+args.proc_cfgfile)
print('start time: '+args.starttime)
print('end time: '+args.endtime)
proc_starttime = datetime.datetime.strptime(
args.starttime, '%Y%m%d%H%M%S')
proc_endtime = datetime.datetime.strptime(
args.endtime, '%Y%m%d%H%M%S')
cfgfile_proc = args.cfgpath+args.proc_cfgfile
pyrad_main(cfgfile_proc, starttime=proc_starttime, endtime=proc_endtime)
# Plot time-height
file_base = args.storepath
hres = args.hres
datatype_list = [
'dBZc', 'eta_h', 'bird_density', 'WIND_SPEED', 'WIND_DIRECTION',
'wind_vel_h_u', 'wind_vel_h_v', 'wind_vel_v']
startdate = proc_starttime.replace(hour=0, minute=0, second=0, microsecond=0)
enddate = proc_endtime.replace(hour=0, minute=0, second=0, microsecond=0)
ndays = int((enddate-startdate).days)+1
for datatype in datatype_list:
flist = []
for i in range(ndays):
time_dir = (
proc_starttime+datetime.timedelta(days=i)).strftime('%Y-%m-%d')
filepath = (
file_base+time_dir+'/VAD/PROFILE_WIND/' +
'*_wind_profile_VAD_WIND_hres'+str(hres)+'.csv')
labels = [
'u_wind', 'std_u_wind', 'np_u_wind',
'v_wind', 'std_v_wind', 'np_v_wind',
'w_wind', 'std_w_wind', 'np_w_wind',
'mag_h_wind', 'dir_h_wind']
label_nr = 0
if datatype == 'dBZc':
filepath = (
file_base+time_dir+'/velFilter/PROFILE_dBZc/' +
'*_rhi_profile_*_dBZc_hres'+str(hres)+'.csv')
labels = [
'50.0-percentile', '25.0-percentile', '75.0-percentile']
# dBZ mean data
# filepath = (
# file_base+time_dir+'/velFilter/PROFILE_dBZc_mean/' +
# '*_rhi_profile_*_dBZc_hres'+str(hres)+'.csv')
# labels = [
# 'Mean', 'Min', 'Max']
# dBZ linear mean data
# filepath = (
# file_base+time_dir+'/velFilter/PROFILE_dBZc_linear_mean/' +
# '*_rhi_profile_*_dBZc_hres'+str(hres)+'.csv')
# labels = [
# 'Mean', 'Min', 'Max']
# dBZ before filtering with fitted velocity
# filepath = (
# file_base+time_dir+'/echoFilter/PROFILE_dBZc/' +
# '*_rhi_profile_*_dBZc_hres'+str(hres)+'.csv')
# labels = [
# '50.0-percentile', '25.0-percentile', '75.0-percentile']
#
# dBZ before filtering with fitted velocity. Linear mean
# filepath = (
# file_base+time_dir+'/echoFilter/PROFILE_dBZc_linear_mean/' +
# '*_rhi_profile_*_dBZc_hres'+str(hres)+'.csv')
# labels = [
# 'Mean', 'Min', 'Max']
elif datatype == 'eta_h':
filepath = (
file_base+time_dir+'/vol_refl/PROFILE/' +
'*_rhi_profile_*_eta_h_hres'+str(hres)+'.csv')
labels = [
'50.0-percentile', '25.0-percentile', '75.0-percentile']
# mean data
# filepath = (
# file_base+time_dir+'/vol_refl/PROFILE_mean/' +
# '*_rhi_profile_*_eta_h_hres'+str(hres)+'.csv')
# labels = [
# 'Mean', 'Min', 'Max']
elif datatype == 'bird_density':
filepath = (
file_base+time_dir+'/bird_density/PROFILE/' +
'*_rhi_profile_*_bird_density_hres'+str(hres)+'.csv')
labels = [
'50.0-percentile', '25.0-percentile', '75.0-percentile']
# mean data
# filepath = (
# file_base+time_dir+'/bird_density/PROFILE_mean/' +
# '*_rhi_profile_*_bird_density_hres'+str(hres)+'.csv')
# labels = [
# 'Mean', 'Min', 'Max']
elif datatype == 'WIND_SPEED':
label_nr = 9
elif datatype == 'WIND_DIRECTION':
label_nr = 10
elif datatype == 'wind_vel_h_u':
label_nr = 0
elif datatype == 'wind_vel_h_v':
label_nr = 3
elif datatype == 'wind_vel_v':
label_nr = 6
flist_aux = glob.glob(filepath)
if not flist_aux:
warn('No profile files found in '+filepath)
continue
flist.extend(flist_aux)
if not flist:
warn('No profile files found')
continue
flist.sort()
field_name = get_fieldname_pyart(datatype)
field_dict = get_metadata(field_name)
titl = 'bird retrieval '+args.starttime+'\n'+get_field_name(
field_dict, field_name)
tbin_edges, hbin_edges, np_ma, data_ma, t_start = read_profile_ts(
flist, labels, hres=hres, label_nr=label_nr)
basepath_out = os.path.dirname(flist[0])
fname = (
basepath_out+'/'+args.starttime+'_TIME_HEIGHT_' +
datatype+'_hres'+str(hres)+'.png')
vmin = vmax = None
_plot_time_range(
tbin_edges, hbin_edges/1000., data_ma, field_name, [fname],
titl=titl, figsize=[10, 8], vmin=vmin, vmax=vmax, dpi=72)
print("----- plot to '%s'" % fname)
# Plot number of points
field_dict = get_metadata('number_of_samples')
titl = 'bird retrieval '+args.starttime+'\n'+get_field_name(
field_dict, 'number_of_samples')
fname = (
basepath_out+'/'+args.starttime+'_TIME_HEIGHT_' +
datatype+'nsamples_hres'+str(hres)+'.png')
vmin = vmax = None
_plot_time_range(
tbin_edges, hbin_edges/1000., np_ma, 'number_of_samples', [fname],
titl=titl, figsize=[10, 8], vmin=vmin, vmax=vmax, dpi=72)
print("----- plot to '%s'" % fname)
def _print_end_msg(text):
"""
prints end message
Parameters
----------
text : str
the text to be printed
Returns
-------
Nothing
"""
print(text + datetime.datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S"))
# ---------------------------------------------------------
# Start main:
# ---------------------------------------------------------
if __name__ == "__main__":
main()
```
#### File: EGG-INFO/scripts/main_process_trt_data.py
```python
import datetime
import argparse
import atexit
import glob
import os
from shutil import copy
from warnings import warn
import numpy as np
from pyrad.io import read_trt_traj_data, write_trt_cell_scores
from pyrad.io import write_trt_cell_lightning
from pyrad.util import belongs_roi_indices
from pyrad.graph import plot_timeseries, plot_scatter_comp, plot_pos
print(__doc__)
def main():
"""
"""
# parse the arguments
parser = argparse.ArgumentParser(
description='Entry to Pyrad processing framework')
# positional arguments
parser.add_argument(
'days', nargs='+', type=str,
help='Dates to process. Format YYYY-MM-DD')
# keyword arguments
parser.add_argument(
'--trtbase', type=str,
default='/store/msrad/radar/trt/',
help='name of folder containing the TRT cell data')
parser.add_argument(
'--lon', type=str,
default='8.9000010,9.2000000,9.4999970,9.4999970,8.9000010',
help=('longitude of the points defining the perimeter of the area ' +
'of interest'))
parser.add_argument(
'--lat', type=str,
default='47.0000030,47.0000030,47.0000030,47.5999930,47.5999930',
help=('latitude of the points defining the perimeter of the area ' +
'of interest'))
args = parser.parse_args()
print("====== TRT cell processing started: %s" %
datetime.datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S"))
atexit.register(_print_end_msg,
"====== comparison finished: ")
time_dir_list = args.days
lons = args.lon.split(',')
lats = args.lat.split(',')
if np.size(lons) != np.size(lats):
warn(
str(np.size(lons))+' longitudes but '+str(np.size(lats)) +
' latitudes. Their number must be equal')
return
lon_list = []
lat_list = []
for i, lon in enumerate(lons):
lon_list.append(float(lon))
lat_list.append(float(lats[i]))
roi = {
'lon': lon_list,
'lat': lat_list
}
# List for collection of max data
cell_ID_max_list = []
nflashes_max_list = []
area_flash_max_list = []
flash_density_max_list = []
time_flash_density_max_list = []
flash_density_max_rank_list = []
rank_max_list = []
time_rank_max_list = []
# List for collection of flashes data
cell_ID_list = np.ma.asarray([], dtype=int)
time_list = np.ma.asarray([], dtype=datetime.datetime)
lon_list = np.ma.asarray([], dtype=float)
lat_list = np.ma.asarray([], dtype=float)
flash_density_list = np.ma.asarray([], dtype=float)
rank_flash_density_list = np.ma.asarray([], dtype=float)
area_list = np.ma.asarray([], dtype=float)
nflash_list = np.ma.asarray([], dtype=int)
for i, time_dir in enumerate(time_dir_list):
data_input_path = args.trtbase+time_dir+'/TRTC_cell/'
data_output_base = args.trtbase+time_dir+'/TRTC_cell_plots/'
flist = glob.glob(data_input_path+'*.trt')
for fname in flist:
print('Reading TRT trajectory file '+fname)
(traj_ID, yyyymmddHHMM, lon, lat, _, _, _, area, vel_x, vel_y,
det, RANKr, CG_n, CG_p, CG, _, ET45, ET45m, ET15, ET15m, VIL,
maxH, maxHm, POH, _, _, _, _) = read_trt_traj_data(fname)
inds, is_roi = belongs_roi_indices(lat, lon, roi)
if is_roi == 'None':
continue
elif is_roi == 'Some' and len(lat[inds]) < 3:
continue
data_output_path = data_output_base+is_roi+'/'
if not os.path.isdir(data_output_path):
os.makedirs(data_output_path)
# copy file
copy(fname, data_output_path)
# general caracteristics
flash_density = CG/area
cell_ID_max_list.append(traj_ID[0])
flash_density_max_list.append(np.max(flash_density))
nflashes_max_list.append(CG[np.argmax(flash_density)])
area_flash_max_list.append(area[np.argmax(flash_density)])
time_flash_density_max_list.append(
yyyymmddHHMM[np.argmax(flash_density)])
flash_density_max_rank_list.append(
RANKr[np.argmax(flash_density)])
rank_max_list.append(np.max(RANKr))
time_rank_max_list.append(yyyymmddHHMM[np.argmax(RANKr)])
cell_ID_list = np.append(cell_ID_list, traj_ID)
time_list = np.append(time_list, yyyymmddHHMM)
lon_list = np.append(lon_list, lon)
lat_list = np.append(lat_list, lat)
flash_density_list = np.append(flash_density_list, flash_density)
rank_flash_density_list = np.append(
rank_flash_density_list, RANKr)
area_list = np.append(area_list, area)
nflash_list = np.append(nflash_list, CG)
# Time series plots
figfname = data_output_path+str(traj_ID[0])+'_flash_density.png'
plot_timeseries(
yyyymmddHHMM, [flash_density], [figfname], labelx='Time UTC',
labely='Flash density [flashes/km2]',
title=str(traj_ID[0])+' flash density')
figfname = data_output_path+str(traj_ID[0])+'_area.png'
plot_timeseries(
yyyymmddHHMM, [area], [figfname], labelx='Time UTC',
labely='area [km2]', title=str(traj_ID[0])+' cell area')
figfname = data_output_path+str(traj_ID[0])+'_vel.png'
plot_timeseries(
yyyymmddHHMM, [vel_x, vel_y], [figfname], labelx='Time UTC',
labely='Velocity [km/h]', labels=['x speed', 'y speed'],
title=str(traj_ID[0])+' cell velocity')
figfname = data_output_path+str(traj_ID[0])+'_det.png'
plot_timeseries(
yyyymmddHHMM, [det], [figfname], labelx='Time UTC',
labely='Detection threshold [dBZ]',
title=str(traj_ID[0])+' cell detection threshold')
figfname = data_output_path+str(traj_ID[0])+'_rank.png'
plot_timeseries(
yyyymmddHHMM, [RANKr], [figfname], labelx='Time UTC',
labely='Rank [-]', title=str(traj_ID[0])+' cell rank')
figfname = data_output_path+str(traj_ID[0])+'_lightning.png'
plot_timeseries(
yyyymmddHHMM, [CG_n, CG_p, CG], [figfname], labelx='Time UTC',
labely='N flash [-]', labels=['CG-', 'CG+', 'CG'],
title=str(traj_ID[0])+' flashes in cell')
figfname = data_output_path+str(traj_ID[0])+'_ET.png'
plot_timeseries(
yyyymmddHHMM, [ET45, ET45m, ET15, ET15m], [figfname],
labelx='Time UTC', labely='Echo Top [km]',
labels=['ET45', 'ET45m', 'ET15', 'ET15m'],
title=str(traj_ID[0])+' Echo top')
figfname = data_output_path+str(traj_ID[0])+'_VIL.png'
plot_timeseries(
yyyymmddHHMM, [VIL], [figfname], labelx='Time UTC',
labely='VIL [Kg/m2]', labels=['VIL'],
title=str(traj_ID[0])+' VIL')
figfname = data_output_path+str(traj_ID[0])+'_maxH.png'
plot_timeseries(
yyyymmddHHMM, [maxH, maxHm], [figfname], labelx='Time UTC',
labely='Max. Echo Height [Km]', labels=['maxH', 'maxHm'],
title=str(traj_ID[0])+' Height of Max. Reflectivity')
figfname = data_output_path+str(traj_ID[0])+'_POH.png'
plot_timeseries(
yyyymmddHHMM, [POH], [figfname], labelx='Time UTC',
labely='POH [%]', labels=['POH'],
title=str(traj_ID[0])+' Probability of Hail')
# plot position
# get time since start of cell in s
td_vec = yyyymmddHHMM-yyyymmddHHMM[0]
tt_s = np.empty(td_vec.size, dtype=float)
for j, td in enumerate(td_vec):
tt_s[j] = td.total_seconds()
cb_label = (
'Time since '+yyyymmddHHMM[0].strftime('%Y-%m-%d %H:%M') +
' [s]')
figfname = data_output_path+str(traj_ID[0])+'_pos.png'
figfname = plot_pos(
lat, lon, tt_s, [figfname], cb_label=cb_label,
titl=str(traj_ID[0])+' Cell Position')
print('Plotted '+' '.join(figfname))
fname = args.trtbase+'Santis_cell_scores.csv'
write_trt_cell_scores(
cell_ID_max_list, time_flash_density_max_list,
flash_density_max_rank_list, nflashes_max_list, area_flash_max_list,
flash_density_max_list, time_rank_max_list, rank_max_list, fname)
fname = args.trtbase+'Santis_cell_euclid_lightning.csv'
write_trt_cell_lightning(
cell_ID_list, time_list, lon_list, lat_list, area_list,
rank_flash_density_list, nflash_list, flash_density_list, fname)
plot_scatter_comp(
flash_density_list, rank_flash_density_list/10.,
[args.trtbase+'hist_flash_density_rank'],
labelx='flash density [flashes/km2]', labely='rank',
titl='Flash density vs Rank', axis=None, metadata=None, dpi=72)
def _print_end_msg(text):
"""
prints end message
Parameters
----------
text : str
the text to be printed
Returns
-------
Nothing
"""
print(text + datetime.datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S"))
# ---------------------------------------------------------
# Start main:
# ---------------------------------------------------------
if __name__ == "__main__":
main()
```
#### File: EGG-INFO/scripts/rewrite_monitoring.py
```python
import datetime
import atexit
import numpy as np
import os
from pyrad.io import read_monitoring_ts, write_monitoring_ts
from pyrad.graph import plot_monitoring_ts
from pyrad.io import generate_field_name_str, get_fieldname_pyart
print(__doc__)
def main():
"""
"""
input_base = (
'/store/msrad/radar/pyrad_products/')
output_base = (
'/store/msrad/radar/pyrad_products/')
rad_vec = ['D']
var_vec = ['PhiDP0', 'RhoHV_rain', 'ZDR_prec', 'ZDR_snow', 'dBZ_bias']
year_vec = [datetime.datetime(2018, 1, 1)]
plot_data = True
print("====== Monitoring rewriting started: %s" %
datetime.datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S"))
atexit.register(_print_end_msg,
"====== Monitoring rewriting finished: ")
for i, rad in enumerate(rad_vec):
print('Processing Radar '+rad)
for j, var in enumerate(var_vec):
if var == 'dBZ':
basedir = 'rad4alp_gc_PH'+rad
dsdir = 'monitoring_clt_Zh'
mon_type = 'GC_MONITORING'
quantiles = [50., 95., 99.]
elif var == 'dBZv':
basedir = 'rad4alp_gc_PH'+rad
dsdir = 'monitoring_clt_Zv'
mon_type = 'GC_MONITORING'
quantiles = [50., 95., 99.]
elif var == 'RhoHV_rain':
basedir = 'rad4alp_dataquality_PL'+rad
dsdir = 'monitoring_RhoHV'
mon_type = 'MONITORING'
quantiles = [65., 80., 95.]
elif var == 'PhiDP0':
basedir = 'rad4alp_dataquality_PL'+rad
dsdir = 'monitoring_PhiDP0'
mon_type = 'MONITORING'
quantiles = [25., 50., 75.]
elif var == 'ZDR_prec':
basedir = 'rad4alp_dataquality_PL'+rad
dsdir = 'monitoring_ZDR'
mon_type = 'MONITORING'
quantiles = [25., 50., 75.]
elif var == 'ZDR_snow':
basedir = 'rad4alp_dataquality_PL'+rad
dsdir = 'monitoring_ZDR_snow'
mon_type = 'MONITORING'
quantiles = [25., 50., 75.]
elif var == 'dBZ_bias':
basedir = 'rad4alp_dataquality_PL'+rad
dsdir = 'monitoring_Zh_bias'
mon_type = 'MONITORING'
quantiles = [25., 50., 75.]
input_path = input_base+basedir+'/'+dsdir+'/VOL_TS/'
output_path = output_base+basedir+'/'+dsdir+'/VOL_TS/'
if not os.path.isdir(output_path):
os.makedirs(output_path)
print('- Processing Variable '+var)
for k, year in enumerate(year_vec):
print('-- Processing Year '+year.strftime('%Y'))
fname_input = (
input_path+year.strftime('%Y')+'_'+rad +
'_ts_'+mon_type+'_'+var+'.csv')
fname_output = (
output_path+year.strftime('%Y')+'_'+rad +
'_ts_'+mon_type+'_'+var+'.csv')
figfname = [
output_path+year.strftime('%Y')+'_'+rad +
'_ts_'+mon_type+'_'+var+'.png']
date, np_t_vec, cquant_vec, lquant_vec, hquant_vec = (
read_monitoring_ts(fname_input, sort_by_date=True))
if date is None:
continue
val_vec = np.ma.asarray(
[lquant_vec, cquant_vec, hquant_vec]).T
fname = write_monitoring_ts(
date, np_t_vec, val_vec, quantiles, var,
fname_output, rewrite=True)
print('written file '+fname)
if not plot_data:
continue
titldate = (date[0].strftime('%Y%m%d')+'-' +
date[-1].strftime('%Y%m%d'))
titl = rad+' Monitoring '+titldate
labely = generate_field_name_str(var)
if var == 'dBZ':
if rad == 'A':
ref_value = 49.5
vmin = 44.5
vmax = 54.5
np_min = 100000
elif rad == 'D':
ref_value = 48.5
vmin = 43.5
vmax = 53.5
np_min = 20000
elif rad == 'L':
ref_value = 67.
vmin = 62.
vmax = 72.
np_min = 100000
elif rad == 'P':
ref_value = 69.
vmin = 64.
vmax = 74.
np_min = 100000
elif rad == 'W':
ref_value = 27.5
vmin = 22.5
vmax = 32.5
np_min = 100000
elif var == 'dBZv':
if rad == 'A':
ref_value = 51.5
vmin = 46.5
vmax = 56.5
np_min = 100000
elif rad == 'D':
ref_value = 50.5
vmin = 45.5
vmax = 55.5
np_min = 20000
elif rad == 'L':
ref_value = 69.5
vmin = 64.5
vmax = 74.5
np_min = 100000
elif rad == 'P':
ref_value = 68.5
vmin = 63.5
vmax = 73.5
np_min = 100000
elif rad == 'W':
ref_value = 26.5
vmin = 21.5
vmax = 31.5
np_min = 100000
elif var == 'RhoHV_rain':
ref_value = 0.99
vmin = 0.95
vmax = 1.01
np_min = 5000
elif var == 'PhiDP0':
ref_value = 0.
vmin = -20.
vmax = 20.
np_min = 500000
elif var == 'ZDR_prec':
ref_value = 0.2
vmin = -2.
vmax = 2.
np_min = 5000
elif var == 'ZDR_snow':
ref_value = 0.2
vmin = -2.
vmax = 2.
np_min = 5000
elif var == 'dBZ_bias':
ref_value = 0.
vmin = -30.
vmax = 30.
np_min = 100
fname = plot_monitoring_ts(
date, np_t_vec, cquant_vec, lquant_vec, hquant_vec,
get_fieldname_pyart(var), figfname,
ref_value=ref_value, vmin=vmin, vmax=vmax, np_min=np_min,
labelx='Time UTC', labely=labely, titl=titl)
print('plotted file '+' '.join(fname))
def _print_end_msg(text):
"""
prints end message
Parameters
----------
text : str
the text to be printed
Returns
-------
Nothing
"""
print(text + datetime.datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S"))
# ---------------------------------------------------------
# Start main:
# ---------------------------------------------------------
if __name__ == "__main__":
main()
```
#### File: pyrad/graph/plots_aux.py
```python
import numpy as np
import pyart
import matplotlib as mpl
mpl.use('Agg')
# Increase a bit font size
mpl.rcParams.update({'font.size': 16})
mpl.rcParams.update({'font.family': "sans-serif"})
def generate_complex_range_Doppler_title(radar, field, ray, datetime_format=None):
"""
creates the fixed range plot title
Parameters
----------
radar : radar
The radar object
field : str
name of the field
stat : str
The statistic computed
datetime_forat : str or None
The date time format to use
Returns
-------
titl : str
The plot title
"""
begin_time = pyart.graph.common.generate_radar_time_begin(radar)
if datetime_format:
time_str = begin_time.strftime(datetime_format)
else:
time_str = begin_time.isoformat() + 'Z'
l1 = "%s azi%.1f-ele%.1f deg. %s " % (
pyart.graph.common.generate_radar_name(radar),
radar.azimuth['data'][ray], radar.elevation['data'][ray], time_str)
field_name = pyart.graph.common.generate_field_name(radar, field)
return l1 + '\n' + field_name
def generate_angle_Doppler_title(radar, field, ang, ind_rng,
along_azi=True, datetime_format=None):
"""
creates the angle-Doppler plot title
Parameters
----------
radar : radar
The radar object
field : str
name of the field
ang : float
The fixed angle
ind_rng : int
the index of the fixed range
along_azi : bool
If true the plot is performed along azimuth, otherwise it is performed
along elevation
datetime_forat : str or None
The date time format to use
Returns
-------
titl : str
The plot title
"""
begin_time = pyart.graph.common.generate_radar_time_begin(radar)
if datetime_format:
time_str = begin_time.strftime(datetime_format)
else:
time_str = begin_time.isoformat() + 'Z'
if along_azi:
ang_type = 'ele'
else:
ang_type = 'azi'
l1 = "%s %s%.1f deg-rng%.1f m. %s " % (
pyart.graph.common.generate_radar_name(radar),
ang_type, ang, radar.range['data'][ind_rng], time_str)
field_name = pyart.graph.common.generate_field_name(radar, field)
return l1 + '\n' + field_name
def generate_complex_Doppler_title(radar, field, ray, rng,
datetime_format=None):
"""
creates the fixed range plot title
Parameters
----------
radar : radar
The radar object
field : str
name of the field
stat : str
The statistic computed
datetime_forat : str or None
The date time format to use
Returns
-------
titl : str
The plot title
"""
begin_time = pyart.graph.common.generate_radar_time_begin(radar)
if datetime_format:
time_str = begin_time.strftime(datetime_format)
else:
time_str = begin_time.isoformat() + 'Z'
l1 = "%s azi%.1f-ele%.1f deg rng%.1f km. %s " % (
pyart.graph.common.generate_radar_name(radar),
radar.azimuth['data'][ray], radar.elevation['data'][ray],
radar.range['data'][rng]/1000., time_str)
field_name = pyart.graph.common.generate_field_name(radar, field)
return l1 + '\n' + field_name
def generate_fixed_rng_span_title(radar, field, stat, datetime_format=None):
"""
creates the fixed range plot title
Parameters
----------
radar : radar
The radar object
field : str
name of the field
stat : str
The statistic computed
datetime_forat : str or None
The date time format to use
Returns
-------
titl : str
The plot title
"""
begin_time = pyart.graph.common.generate_radar_time_begin(radar)
if datetime_format:
time_str = begin_time.strftime(datetime_format)
else:
time_str = begin_time.isoformat() + 'Z'
l1 = "%s %.1f-%.1f m %s. %s " % (
pyart.graph.common.generate_radar_name(radar),
np.min(radar.range['data']), np.max(radar.range['data']), stat,
time_str)
field_name = pyart.graph.common.generate_field_name(radar, field)
return l1 + '\n' + field_name
def generate_fixed_rng_title(radar, field, fixed_rng, datetime_format=None):
"""
creates the fixed range plot title
Parameters
----------
radar : radar
The radar object
field : str
name of the field
fixed_rng : float
The fixed range [m]
datetime_forat : str or None
The date time format to use
Returns
-------
titl : str
The plot title
"""
begin_time = pyart.graph.common.generate_radar_time_begin(radar)
if datetime_format:
time_str = begin_time.strftime(datetime_format)
else:
time_str = begin_time.isoformat() + 'Z'
l1 = "%s %.1f m. %s " % (pyart.graph.common.generate_radar_name(radar),
fixed_rng, time_str)
field_name = pyart.graph.common.generate_field_name(radar, field)
return l1 + '\n' + field_name
def get_colobar_label(field_dict, field_name):
"""
creates the colorbar label using field metadata
Parameters
----------
field_dict : dict
dictionary containing field metadata
field_name : str
name of the field
Returns
-------
label : str
colorbar label
"""
if 'standard_name' in field_dict:
standard_name = field_dict['standard_name']
elif 'long_name' in field_dict:
standard_name = field_dict['long_name']
else:
standard_name = field_name
if 'units' in field_dict:
units = field_dict['units']
else:
units = '?'
return pyart.graph.common.generate_colorbar_label(standard_name, units)
def get_field_name(field_dict, field):
"""
Return a nice field name for a particular field
Parameters
----------
field_dict : dict
dictionary containing field metadata
field : str
name of the field
Returns
-------
field_name : str
the field name
"""
if 'standard_name' in field_dict:
field_name = field_dict['standard_name']
elif 'long_name' in field_dict:
field_name = field_dict['long_name']
else:
field_name = str(field)
field_name = field_name.replace('_', ' ')
field_name = field_name[0].upper() + field_name[1:]
return field_name
def get_norm(field_name):
"""
Computes the normalization of the colormap, and gets the ticks and labels
of the colorbar from the metadata of the field. Returns None if the
required parameters are not present in the metadata
Parameters
----------
field_name : str
name of the field
Returns
-------
norm : list
the colormap index
ticks : list
the list of ticks in the colorbar
labels : list
the list of labels corresponding to each tick
"""
norm = None
ticks = None
ticklabs = None
field_dict = pyart.config.get_metadata(field_name)
cmap = mpl.cm.get_cmap(pyart.config.get_field_colormap(field_name))
if 'boundaries' in field_dict:
norm = mpl.colors.BoundaryNorm(
boundaries=field_dict['boundaries'], ncolors=cmap.N)
if 'ticks' in field_dict:
ticks = field_dict['ticks']
if 'labels' in field_dict:
ticklabs = field_dict['labels']
return norm, ticks, ticklabs
```
#### File: pyrad/graph/plots_timeseries.py
```python
from warnings import warn
import numpy as np
import matplotlib as mpl
mpl.use('Agg')
# Increase a bit font size
mpl.rcParams.update({'font.size': 16})
mpl.rcParams.update({'font.family': "sans-serif"})
import matplotlib.dates as mdates
import matplotlib.pyplot as plt
import pyart
def plot_timeseries(tvec, data_list, fname_list, labelx='Time [UTC]',
labely='Value', labels=['Sensor'], title='Time Series',
period=0, timeformat=None, colors=None, linestyles=None,
markers=None, ymin=None, ymax=None, dpi=72):
"""
plots a time series
Parameters
----------
tvec : datetime object
time of the time series
data_list : list of float array
values of the time series
fname_list : list of str
list of names of the files where to store the plot
labelx : str
The label of the X axis
labely : str
The label of the Y axis
labels : array of str
The label of the legend
title : str
The figure title
period : float
measurement period in seconds used to compute accumulation. If 0 no
accumulation is computed
timeformat : str
Specifies the tvec and time format on the x axis
colors : array of str
Specifies the colors of each line
linestyles : array of str
Specifies the line style of each line
markers: array of str
Specify the markers to be used for each line
ymin, ymax: float
Lower/Upper limit of y axis
dpi : int
dots per inch
Returns
-------
fname_list : list of str
list of names of the created plots
History
--------
201?.??.?? -fvj- creation
2017.08.21 -jgr- modified margins and grid + minor graphical updates
2018.03.05 -jgr- added x-limit of x axis to avoid unwanted error messages
"""
if period > 0:
for i, data in enumerate(data_list):
data *= (period/3600.)
data_list[i] = np.ma.cumsum(data)
fig, ax = plt.subplots(figsize=[10, 6], dpi=dpi)
lab = None
col = None
lstyle = '--'
marker = 'o'
for i, data in enumerate(data_list):
if labels is not None:
lab = labels[i]
if colors is not None:
col = colors[i]
if linestyles is not None:
lstyle = linestyles[i]
if markers is not None:
marker = markers[i]
ax.plot(tvec, data, label=lab, color=col, linestyle=lstyle,
marker=marker)
ax.set_title(title)
ax.set_xlabel(labelx)
ax.set_ylabel(labely)
ax.set_ylim(bottom=ymin, top=ymax)
ax.set_xlim([tvec[0], tvec[-1]])
# Turn on the grid
ax.grid()
if timeformat is not None:
ax.xaxis.set_major_formatter(mdates.DateFormatter(timeformat))
# rotates and right aligns the x labels, and moves the bottom of the
# axes up to make room for them
fig.autofmt_xdate()
# Make a tight layout
fig.tight_layout()
for fname in fname_list:
fig.savefig(fname, dpi=dpi)
plt.close(fig)
return fname_list
def plot_timeseries_comp(date1, value1, date2, value2, fname_list,
labelx='Time [UTC]', labely='Value',
label1='Sensor 1', label2='Sensor 2',
titl='Time Series Comparison', period1=0, period2=0,
ymin=None, ymax=None, dpi=72):
"""
plots 2 time series in the same graph
Parameters
----------
date1 : datetime object
time of the first time series
value1 : float array
values of the first time series
date2 : datetime object
time of the second time series
value2 : float array
values of the second time series
fname_list : list of str
list of names of the files where to store the plot
labelx : str
The label of the X axis
labely : str
The label of the Y axis
label1, label2 : str
legend label for each time series
titl : str
The figure title
period1, period2 : float
measurement period in seconds used to compute accumulation. If 0 no
accumulation is computed
dpi : int
dots per inch
ymin, ymax : float
The limits of the Y-axis. None will keep the default limit.
Returns
-------
fname_list : list of str
list of names of the created plots
History
--------
201?.??.?? -fvj- created
2017.08.21 -jgr- changed some graphical aspects
"""
if (period1 > 0) and (period2 > 0):
# TODO: document this and check (sometimes artefacts)
value1 *= (period1/3600.)
value1 = np.ma.cumsum(value1)
value2 *= (period2/3600.)
value2 = np.ma.cumsum(value2)
fig, ax = plt.subplots(figsize=[10, 6.5], dpi=dpi)
ax.plot(date1, value1, 'b', label=label1, linestyle='--', marker='o')
ax.plot(date2, value2, 'r', label=label2, linestyle='--', marker='s')
ax.legend(loc='best')
ax.set_xlabel(labelx)
ax.set_ylabel(labely)
ax.set_title(titl)
ax.grid()
ax.set_ylim(bottom=ymin, top=ymax)
ax.set_xlim([date2[0], date2[-1]])
# rotates and right aligns the x labels, and moves the bottom of the
# axes up to make room for them
fig.autofmt_xdate()
# Make a tight layout
fig.tight_layout()
for fname in fname_list:
fig.savefig(fname, dpi=dpi)
plt.close(fig)
return fname_list
def plot_monitoring_ts(date, np_t, cquant, lquant, hquant, field_name,
fname_list, ref_value=None, vmin=None, vmax=None,
np_min=0, labelx='Time [UTC]', labely='Value',
titl='Time Series', dpi=72):
"""
plots a time series of monitoring data
Parameters
----------
date : datetime object
time of the time series
np_t : int array
number of points
cquant, lquant, hquant : float array
values of the central, low and high quantiles
field_name : str
name of the field
fname_list : list of str
list of names of the files where to store the plot
ref_value : float
the reference value
vmin, vmax : float
The limits of the y axis
np_min : int
minimum number of points to consider the sample plotable
labelx : str
The label of the X axis
labely : str
The label of the Y axis
titl : str
The figure title
dpi : int
dots per inch
Returns
-------
fname_list : list of str
list of names of the created plots
"""
vmin_pyart, vmax_pyart = pyart.config.get_field_limits(field_name)
if vmin is None:
vmin = vmin_pyart
if vmax is None:
vmax = vmax_pyart
# plot only valid data (but keep first and last date)
date2 = np.array(date)
isvalid = np.logical_not(np.ma.getmaskarray(cquant))
if np_min > 0:
has_np = np_t > np_min
isvalid = np.logical_and(isvalid, has_np)
cquant_plt = cquant[isvalid]
lquant_plt = lquant[isvalid]
hquant_plt = hquant[isvalid]
date_plt = date2[isvalid]
if not isvalid[0]:
cquant_plt = np.ma.append(np.ma.masked, cquant_plt)
lquant_plt = np.ma.append(np.ma.masked, lquant_plt)
hquant_plt = np.ma.append(np.ma.masked, hquant_plt)
date_plt = np.ma.append(date2[0], date_plt)
if not isvalid[-1]:
cquant_plt = np.ma.append(cquant_plt, np.ma.masked)
lquant_plt = np.ma.append(lquant_plt, np.ma.masked)
hquant_plt = np.ma.append(hquant_plt, np.ma.masked)
date_plt = np.ma.append(date_plt, date2[-1])
fig = plt.figure(figsize=[15, 13], dpi=dpi)
ax = fig.add_subplot(2, 1, 1)
ax.plot(date_plt, cquant_plt, 'x-')
ax.plot(date_plt, lquant_plt, 'rx-')
ax.plot(date_plt, hquant_plt, 'rx-')
if ref_value is not None:
ax.plot(date_plt, np.zeros(len(date_plt))+ref_value, 'k--')
ax.set_ylabel(labely)
ax.set_title(titl)
ax.set_ylim([vmin, vmax])
# tight x axis
ax.autoscale(enable=True, axis='x', tight=True)
ax.grid(True)
ax = fig.add_subplot(2, 1, 2)
ax.plot(date, np_t, 'x-')
if np_min is not None:
ax.plot(date, np.zeros(len(date))+np_min, 'k--')
ax.set_ylabel('Number of Samples')
ax.set_xlabel(labelx)
# rotates and right aligns the x labels, and moves the bottom of the
# axes up to make room for them
fig.autofmt_xdate()
# tight x axis
ax.autoscale(enable=True, axis='x', tight=True)
for fname in fname_list:
fig.savefig(fname, dpi=dpi)
plt.close(fig)
return fname_list
def plot_intercomp_scores_ts(date_vec, np_vec, meanbias_vec, medianbias_vec,
quant25bias_vec, quant75bias_vec, modebias_vec,
corr_vec, slope_vec, intercep_vec,
intercep_slope1_vec, fname_list, ref_value=0.,
np_min=0, corr_min=0.,
labelx='Time UTC',
titl='RADAR001-RADAR002 intercomparison',
dpi=72):
"""
plots a time series of radar intercomparison scores
Parameters
----------
date_vec : datetime object
time of the time series
np_vec : int array
number of points
meanbias_vec, medianbias_vec, modebias_vec : float array
mean, median and mode bias
quant25bias_vec, quant75bias_vec: 25th and 75th percentile of the bias
corr_vec : float array
correlation
slope_vec, intercep_vec : float array
slope and intercep of a linear regression
intercep_slope1_vec : float
the intercep point of a inear regression of slope 1
ref_value : float
the reference value
np_min : int
The minimum number of points to consider the result valid
corr_min : float
The minimum correlation to consider the results valid
labelx : str
The label of the X axis
titl : str
The figure title
Returns
-------
fname_list : list of str
list of names of the created plots
"""
# plot only valid data (but keep first and last date)
date2 = np.array(date_vec)
isvalid = np.logical_not(np.ma.getmaskarray(meanbias_vec))
isvalid_corr = np.logical_not(np.ma.getmaskarray(corr_vec))
if np_min > 0:
has_np = np_vec > np_min
isvalid = np.logical_and(isvalid, has_np)
if corr_min > 0:
has_corr_min = corr_vec > corr_min
isvalid = np.logical_and(isvalid, has_corr_min)
meanbias_plt = meanbias_vec[isvalid]
medianbias_plt = medianbias_vec[isvalid]
quant25bias_plt = quant25bias_vec[isvalid]
quant75bias_plt = quant75bias_vec[isvalid]
modebias_plt = modebias_vec[isvalid]
intercep_plt = intercep_slope1_vec[isvalid]
corr_plt = corr_vec[isvalid_corr]
date_corr = date2[isvalid_corr]
date_plt = date2[isvalid]
if not isvalid[0]:
meanbias_plt = np.ma.append(np.ma.masked, meanbias_plt)
medianbias_plt = np.ma.append(np.ma.masked, medianbias_plt)
quant25bias_plt = np.ma.append(np.ma.masked, quant25bias_plt)
quant75bias_plt = np.ma.append(np.ma.masked, quant75bias_plt)
modebias_plt = np.ma.append(np.ma.masked, modebias_plt)
intercep_plt = np.ma.append(np.ma.masked, intercep_plt)
date_plt = np.ma.append(date2[0], date_plt)
if not isvalid[-1]:
meanbias_plt = np.ma.append(meanbias_plt, np.ma.masked)
medianbias_plt = np.ma.append(medianbias_plt, np.ma.masked)
quant25bias_plt = np.ma.append(quant25bias_plt, np.ma.masked)
quant75bias_plt = np.ma.append(quant75bias_plt, np.ma.masked)
modebias_plt = np.ma.append(modebias_plt, np.ma.masked)
intercep_plt = np.ma.append(intercep_plt, np.ma.masked)
date_plt = np.ma.append(date_plt, date2[-1])
if not isvalid_corr[0]:
corr_plt = np.ma.append(np.ma.masked, corr_plt)
date_corr = np.ma.append(date2[0], date_corr)
if not isvalid_corr[-1]:
corr_plt = np.ma.append(corr_plt, np.ma.masked)
date_corr = np.ma.append(date_corr, date2[-1])
fig = plt.figure(figsize=[10, 20], dpi=dpi)
ax = fig.add_subplot(4, 1, 1)
ax.plot(date_plt, medianbias_plt, 'bx-', label='median')
ax.plot(date_plt, meanbias_plt, 'rx-', label='mean')
ax.plot(date_plt, modebias_plt, 'gx-', label='mode')
ax.plot(date_plt, intercep_plt, 'yx-', label='intercep of slope 1 LR')
if ref_value is not None:
ax.plot(date_plt, np.zeros(len(date_plt))+ref_value, 'k--')
# plt.legend(loc='best')
ax.set_ylabel('bias [dB]')
ax.set_title(titl)
ax.set_ylim([-5., 5.])
# tight x axis
ax.autoscale(enable=True, axis='x', tight=True)
ax.grid(True)
ax = fig.add_subplot(4, 1, 2)
ax.plot(date_plt, medianbias_plt, 'bx-', label='median')
ax.plot(date_plt, quant25bias_plt, 'rx-', label='25-percentile')
ax.plot(date_plt, quant75bias_plt, 'rx-', label='75-percentile')
if ref_value is not None:
ax.plot(date_plt, np.zeros(len(date_plt))+ref_value, 'k--')
# plt.legend(loc='best')
ax.set_ylabel('bias [dB]')
ax.set_ylim([-5., 5.])
# tight x axis
ax.autoscale(enable=True, axis='x', tight=True)
ax.grid(True)
ax = fig.add_subplot(4, 1, 3)
ax.plot(date_corr, corr_plt, 'bx-')
if corr_min > 0:
ax.plot(date_corr, np.zeros(len(date_corr))+corr_min, 'k--')
ax.set_ylabel('correlation')
ax.set_ylim([0., 1.])
# tight x axis
ax.autoscale(enable=True, axis='x', tight=True)
ax.grid(True)
ax = fig.add_subplot(4, 1, 4)
ax.plot(date2, np_vec, 'bx-')
if np_min > 0:
ax.plot(date2, np.zeros(len(date2))+np_min, 'k--')
ax.set_ylabel('Number of Samples')
ax.set_xlabel(labelx)
# tight x axis
ax.autoscale(enable=True, axis='x', tight=True)
# rotates and right aligns the x labels, and moves the bottom of the
# axes up to make room for them
fig.autofmt_xdate()
for fname in fname_list:
fig.savefig(fname, dpi=dpi)
plt.close(fig)
return fname_list
def plot_ml_ts(dt_ml_arr, ml_top_avg_arr, ml_top_std_arr, thick_avg_arr,
thick_std_arr, nrays_valid_arr, nrays_total_arr, fname_list,
labelx='Time UTC', titl='Melting layer time series', dpi=72):
"""
plots a time series of melting layer data
Parameters
----------
dt_ml_arr : datetime object
time of the time series
np_vec : int array
number of points
meanbias_vec, medianbias_vec, modebias_vec : float array
mean, median and mode bias
quant25bias_vec, quant75bias_vec: 25th and 75th percentile of the bias
corr_vec : float array
correlation
slope_vec, intercep_vec : float array
slope and intercep of a linear regression
intercep_slope1_vec : float
the intercep point of a inear regression of slope 1
ref_value : float
the reference value
np_min : int
The minimum number of points to consider the result valid
corr_min : float
The minimum correlation to consider the results valid
labelx : str
The label of the X axis
titl : str
The figure title
Returns
-------
fname_list : list of str
list of names of the created plots
"""
fig = plt.figure(figsize=[10, 15], dpi=dpi)
ax = fig.add_subplot(3, 1, 1)
ax.plot(dt_ml_arr, ml_top_avg_arr, 'bx-', label='avg')
ax.plot(dt_ml_arr, ml_top_avg_arr+ml_top_std_arr, 'rx-', label='avg+std')
ax.plot(dt_ml_arr, ml_top_avg_arr-ml_top_std_arr, 'rx-', label='avg-std')
# plt.legend(loc='best')
ax.set_ylabel('Top height [m MSL]')
ax.set_title(titl)
ax.set_ylim([0., 6000.])
ax.set_xlim([dt_ml_arr[0], dt_ml_arr[-1]])
# tight x axis
ax.autoscale(enable=True, axis='x', tight=True)
ax.grid(True)
ax = fig.add_subplot(3, 1, 2)
ax.plot(dt_ml_arr, thick_avg_arr, 'bx-', label='avg')
ax.plot(dt_ml_arr, thick_avg_arr+thick_std_arr, 'rx-', label='avg+std')
ax.plot(dt_ml_arr, thick_avg_arr-thick_std_arr, 'rx-', label='avg-std')
# plt.legend(loc='best')
ax.set_ylabel('Thickness [m]')
ax.set_ylim([0., 3000.])
ax.set_xlim([dt_ml_arr[0], dt_ml_arr[-1]])
# tight x axis
ax.autoscale(enable=True, axis='x', tight=True)
ax.grid(True)
ax = fig.add_subplot(3, 1, 3)
ax.plot(dt_ml_arr, nrays_valid_arr, 'bx-', label='N valid rays')
ax.plot(dt_ml_arr, nrays_total_arr, 'rx-', label='rays total')
# plt.legend(loc='best')
ax.set_ylabel('Rays')
ax.set_xlabel(labelx)
ax.set_ylim([0, np.max(nrays_total_arr)+5])
ax.set_xlim([dt_ml_arr[0], dt_ml_arr[-1]])
# tight x axis
ax.autoscale(enable=True, axis='x', tight=True)
ax.grid(True)
# rotates and right aligns the x labels, and moves the bottom of the
# axes up to make room for them
fig.autofmt_xdate()
for fname in fname_list:
fig.savefig(fname, dpi=dpi)
plt.close(fig)
return fname_list
def plot_sun_retrieval_ts(sun_retrieval, data_type, fname_list, labelx='Date',
titl='Sun retrieval Time Series', dpi=72):
"""
plots sun retrieval time series series
Parameters
----------
sun_retrieval : tuple
tuple containing the retrieved parameters
data_type : str
parameter to be plotted
fname_list : list of str
list of names of the files where to store the plot
labelx : str
the x label
titl : str
the title of the plot
dpi : int
dots per inch
Returns
-------
fname_list : list of str
list of names of the created plots
"""
value_std = None
ref = None
date = sun_retrieval[1]
if data_type == 'nhits_h':
value = sun_retrieval[2]
labely = 'Number of sun hits H channel'
vmin = 0
vmax = np.max(sun_retrieval[2])+1
elif data_type == 'el_width_h':
value = sun_retrieval[3]
labely = 'Elevation beamwidth H channel (Deg)'
vmin = 0.
vmax = 4.
elif data_type == 'az_width_h':
value = sun_retrieval[4]
labely = 'Azimuth beamwidth H channel (Deg)'
vmin = 0.
vmax = 4.
elif data_type == 'el_bias_h':
value = sun_retrieval[5]
ref = np.zeros(len(value))
labely = 'Elevation pointing bias H channel (Deg)'
vmin = -2.
vmax = 2.
elif data_type == 'az_bias_h':
value = sun_retrieval[6]
ref = np.zeros(len(value))
labely = 'Azimuth pointing bias H channel (Deg)'
vmin = -2.
vmax = 2.
elif data_type == 'dBm_sun_est':
value = sun_retrieval[7]
value_std = sun_retrieval[8]
labely = 'Sun Power H channel (dBm)'
vmin = -110.
vmax = -90.
elif data_type == 'rx_bias_h':
value = (10.*np.ma.log10(sun_retrieval[9]) -
10.*np.ma.log10(sun_retrieval[21]))
value_std = sun_retrieval[8]
ref = np.zeros(len(value))
labely = 'Receiver bias H channel (dB)'
vmin = -5.
vmax = 5.
elif data_type == 'sf_h':
value = 10.*np.ma.log10(sun_retrieval[9])
# value_std = sun_retrieval[8]
ref = 10.*np.ma.log10(sun_retrieval[21])
labely = 'Observed solar flux H channel (dB(sfu))'
vmin = 15.
vmax = 30.
elif data_type == 'nhits_v':
value = sun_retrieval[10]
labely = 'Number of sun hits V channel'
vmin = 0
vmax = np.max(sun_retrieval[10])+1
elif data_type == 'el_width_v':
value = sun_retrieval[11]
labely = 'Elevation beamwidth V channel (Deg)'
vmin = 0.
vmax = 4.
elif data_type == 'az_width_v':
value = sun_retrieval[12]
labely = 'Azimuth beamwidth V channel (Deg)'
vmin = 0.
vmax = 4.
elif data_type == 'el_bias_v':
value = sun_retrieval[13]
ref = np.zeros(len(value))
labely = 'Elevation pointing bias V channel (Deg)'
vmin = -2.
vmax = 2.
elif data_type == 'az_bias_v':
value = sun_retrieval[14]
ref = np.zeros(len(value))
labely = 'Azimuth pointing bias V channel (Deg)'
vmin = -2.
vmax = 2.
elif data_type == 'dBmv_sun_est':
value = sun_retrieval[15]
value_std = sun_retrieval[16]
labely = 'Sun Power V channel (dBm)'
vmin = -110.
vmax = -90.
elif data_type == 'rx_bias_v':
value = (10.*np.ma.log10(sun_retrieval[17]) -
10.*np.ma.log10(sun_retrieval[21]))
value_std = sun_retrieval[16]
ref = np.zeros(len(value))
labely = 'Receiver bias V channel (dB)'
vmin = -5.
vmax = 5.
elif data_type == 'sf_v':
value = 10.*np.ma.log10(sun_retrieval[17])
# value_std = sun_retrieval[16]
ref = 10.*np.ma.log10(sun_retrieval[21])
labely = 'Observed solar flux V channel (dB(sfu))'
vmin = 15.
vmax = 30.
elif data_type == 'nhits_zdr':
value = sun_retrieval[18]
labely = 'Number of sun hits ZDR'
vmin = 0
vmax = np.max(sun_retrieval[18])+1
elif data_type == 'ZDR_sun_est':
value = sun_retrieval[19]
value_std = sun_retrieval[20]
ref = np.zeros(len(value))
labely = 'Sun ZDR (dB)'
vmin = -2.
vmax = 2.
mask = np.ma.getmaskarray(value)
if mask.all():
warn('Unable to create figure '+' '.join(fname_list) +
'. No valid data')
return None
# plot only valid data (but keep first and last date)
isvalid = np.logical_not(mask)
date2 = np.array(date)
value_plt = value[isvalid]
date_plt = date2[isvalid]
if not isvalid[0]:
value_plt = np.ma.append(np.ma.masked, value_plt)
date_plt = np.ma.append(date2[0], date_plt)
if not isvalid[-1]:
value_plt = np.ma.append(value_plt, np.ma.masked)
date_plt = np.ma.append(date_plt, date2[-1])
fig, ax = plt.subplots(figsize=[10, 6], dpi=dpi)
ax.plot(date_plt, value_plt, 'x-')
if value_std is not None:
value_std_plt = value_std[isvalid]
if not isvalid[0]:
value_std_plt = np.ma.append(np.ma.masked, value_std_plt)
if not isvalid[-1]:
value_std_plt = np.ma.append(value_std_plt, np.ma.masked)
ax.plot(date_plt, value_plt+value_std_plt, 'rx-')
ax.plot(date_plt, value_plt-value_std_plt, 'rx-')
if ref is not None:
ref_plt = ref[isvalid]
if not isvalid[0]:
ref_plt = np.ma.append(ref[0], ref_plt)
if not isvalid[-1]:
ref_plt = np.ma.append(ref_plt, ref[-1])
ax.plot(date_plt, ref_plt, 'k--')
ax.set_xlabel(labelx)
ax.set_ylabel(labely)
ax.set_title(titl)
ax.set_ylim([vmin, vmax])
ax.set_xlim([date_plt[0], date_plt[-1]])
# tight x axis
ax.autoscale(enable=True, axis='x', tight=True)
ax.grid(True)
# rotates and right aligns the x labels, and moves the bottom of the
# axes up to make room for them
fig.autofmt_xdate()
for fname in fname_list:
fig.savefig(fname, dpi=dpi)
plt.close(fig)
return fname_list
```
#### File: pyrad/graph/plots_vol.py
```python
from warnings import warn
import numpy as np
from netCDF4 import num2date
try:
import cartopy
from cartopy.io.img_tiles import Stamen
_CARTOPY_AVAILABLE = True
except ImportError:
_CARTOPY_AVAILABLE = False
try:
import shapely
_SHAPELY_AVAILABLE = True
except ImportError:
warn('shapely not available')
_SHAPELY_AVAILABLE = False
import matplotlib as mpl
mpl.use('Agg')
# Increase a bit font size
mpl.rcParams.update({'font.size': 16})
mpl.rcParams.update({'font.family': "sans-serif"})
import matplotlib.pyplot as plt
import pyart
from .plots_aux import get_colobar_label, get_norm, generate_fixed_rng_title
from .plots_aux import generate_fixed_rng_span_title
from .plots_aux import generate_complex_range_Doppler_title
from .plots import plot_quantiles, plot_histogram, _plot_time_range, _plot_sunscan
from ..util.radar_utils import compute_quantiles_sweep, find_ang_index
from ..util.radar_utils import compute_histogram_sweep
def plot_ray(radar, field_name, ind_ray, prdcfg, fname_list, titl=None,
vmin=None, vmax=None, save_fig=True):
"""
plots a ray
Parameters
----------
radar : Radar object
object containing the radar data to plot
field_name : str
name of the radar field to plot
ind_ray : int
ray index to plot
prdcfg : dict
dictionary containing the product configuration
fname_list : list of str
list of names of the files where to store the plot
plot_type : str
type of plot (PPI, QUANTILES or HISTOGRAM)
titl : str
Plot title
vmin, vmax : float
min and max values of the y axis
save_fig : bool
if true save the figure. If false it does not close the plot and
returns the handle to the figure
Returns
-------
fname_list : list of str or
fig, ax : tupple
list of names of the saved plots or handle of the figure an axes
"""
rng_km = radar.range['data']/1000.
dpi = prdcfg['ppiImageConfig'].get('dpi', 72)
xsize = prdcfg['ppiImageConfig']['xsize']
ysize = prdcfg['ppiImageConfig']['ysize']
fig = plt.figure(figsize=[xsize, ysize], dpi=dpi)
if titl is None:
titl = generate_complex_range_Doppler_title(
radar, field_name, ind_ray)
labely = get_colobar_label(radar.fields[field_name], field_name)
ax = fig.add_subplot(111)
ax.plot(rng_km, radar.fields[field_name]['data'][ind_ray, :], marker='x')
ax.set_title(titl)
ax.set_xlabel('Range (km)')
ax.set_ylabel(labely)
ax.set_ylim(bottom=vmin, top=vmax)
ax.set_xlim([rng_km[0], rng_km[-1]])
# Turn on the grid
ax.grid()
# Make a tight layout
fig.tight_layout()
if save_fig:
for fname in fname_list:
fig.savefig(fname, dpi=dpi)
plt.close(fig)
return fname_list
return (fig, ax)
def plot_ppi(radar, field_name, ind_el, prdcfg, fname_list, plot_type='PPI',
titl=None, vmin=None, vmax=None, step=None, quantiles=None,
save_fig=True):
"""
plots a PPI
Parameters
----------
radar : Radar object
object containing the radar data to plot
field_name : str
name of the radar field to plot
ind_el : int
sweep index to plot
prdcfg : dict
dictionary containing the product configuration
fname_list : list of str
list of names of the files where to store the plot
plot_type : str
type of plot (PPI, QUANTILES or HISTOGRAM)
titl : str
Plot title
vmin, vmax : float
The minimum and maximum value. If None the scale is going to be
obtained from the Py-ART config file.
step : float
step for histogram plotting
quantiles : float array
quantiles to plot
save_fig : bool
if true save the figure. If false it does not close the plot and
returns the handle to the figure
Returns
-------
fname_list : list of str or
fig, ax : tupple
list of names of the saved plots or handle of the figure an axes
"""
if plot_type == 'PPI':
dpi = prdcfg['ppiImageConfig'].get('dpi', 72)
norm = None
ticks = None
ticklabs = None
if vmin is None or vmax is None:
norm, ticks, ticklabs = get_norm(field_name)
vmin = None
vmax = None
xsize = prdcfg['ppiImageConfig']['xsize']
ysize = prdcfg['ppiImageConfig']['ysize']
fig = plt.figure(figsize=[xsize, ysize], dpi=dpi)
ax = fig.add_subplot(111, aspect='equal')
display = pyart.graph.RadarDisplay(radar)
display.plot_ppi(
field_name, title=titl, sweep=ind_el, norm=norm, ticks=ticks,
vmin=vmin, vmax=vmax, ticklabs=ticklabs, fig=fig, ax=ax)
display.set_limits(
ylim=[prdcfg['ppiImageConfig']['ymin'],
prdcfg['ppiImageConfig']['ymax']],
xlim=[prdcfg['ppiImageConfig']['xmin'],
prdcfg['ppiImageConfig']['xmax']], ax=ax)
if 'rngRing' in prdcfg['ppiImageConfig']:
if prdcfg['ppiImageConfig']['rngRing'] > 0:
display.plot_range_rings(np.arange(
0., radar.range['data'][-1]/1000.,
prdcfg['ppiImageConfig']['rngRing']), ax=ax)
display.plot_cross_hair(5., ax=ax)
# Turn on the grid
ax.grid()
# Make a tight layout
fig.tight_layout()
if save_fig:
for fname in fname_list:
fig.savefig(fname, dpi=dpi)
plt.close(fig)
return fname_list
return (fig, ax)
if plot_type == 'QUANTILES':
quantiles, values = compute_quantiles_sweep(
radar.fields[field_name]['data'],
radar.sweep_start_ray_index['data'][ind_el],
radar.sweep_end_ray_index['data'][ind_el], quantiles=quantiles)
titl = pyart.graph.common.generate_title(radar, field_name, ind_el)
labely = get_colobar_label(radar.fields[field_name], field_name)
plot_quantiles(quantiles, values, fname_list, labelx='quantile',
labely=labely, titl=titl)
elif plot_type == 'HISTOGRAM':
bins, values = compute_histogram_sweep(
radar.fields[field_name]['data'],
radar.sweep_start_ray_index['data'][ind_el],
radar.sweep_end_ray_index['data'][ind_el], field_name, step=step)
titl = pyart.graph.common.generate_title(radar, field_name, ind_el)
labelx = get_colobar_label(radar.fields[field_name], field_name)
plot_histogram(bins, values, fname_list, labelx=labelx,
labely='Number of Samples', titl=titl)
else:
warn('Unknown plot type '+plot_type)
return fname_list
def plot_ppi_map(radar, field_name, ind_el, prdcfg, fname_list,
save_fig=True):
"""
plots a PPI on a geographic map
Parameters
----------
radar : Radar object
object containing the radar data to plot
field_name : str
name of the radar field to plot
ind_el : int
sweep index to plot
prdcfg : dict
dictionary containing the product configuration
fname_list : list of str
list of names of the files where to store the plot
save_fig : bool
if true save the figure. If false it does not close the plot and
returns the handle to the figure
Returns
-------
fname_list : list of str or
fig, ax, display : tupple
list of names of the saved plots or handle of the figure an axes
"""
dpi = prdcfg['ppiImageConfig'].get('dpi', 72)
norm, ticks, ticklabs = get_norm(field_name)
xsize = prdcfg['ppiMapImageConfig']['xsize']
ysize = prdcfg['ppiMapImageConfig']['ysize']
lonstep = prdcfg['ppiMapImageConfig'].get('lonstep', 0.5)
latstep = prdcfg['ppiMapImageConfig'].get('latstep', 0.5)
min_lon = prdcfg['ppiMapImageConfig'].get('lonmin', 2.5)
max_lon = prdcfg['ppiMapImageConfig'].get('lonmax', 12.5)
min_lat = prdcfg['ppiMapImageConfig'].get('latmin', 43.5)
max_lat = prdcfg['ppiMapImageConfig'].get('latmax', 49.5)
resolution = prdcfg['ppiMapImageConfig'].get('mapres', '110m')
if resolution not in ('110m', '50m', '10m'):
warn('Unknown map resolution: '+resolution)
resolution = '110m'
background_zoom = prdcfg['ppiMapImageConfig'].get('background_zoom', 8)
lon_lines = np.arange(np.floor(min_lon), np.ceil(max_lon)+1, lonstep)
lat_lines = np.arange(np.floor(min_lat), np.ceil(max_lat)+1, latstep)
fig = plt.figure(figsize=[xsize, ysize], dpi=dpi)
ax = fig.add_subplot(111)
display_map = pyart.graph.RadarMapDisplay(radar)
display_map.plot_ppi_map(
field_name, sweep=ind_el, norm=norm, ticks=ticks, ticklabs=ticklabs,
min_lon=min_lon, max_lon=max_lon, min_lat=min_lat, max_lat=max_lat,
resolution=resolution, background_zoom=background_zoom,
lat_lines=lat_lines, lon_lines=lon_lines,
maps_list=prdcfg['ppiMapImageConfig']['maps'], ax=ax, fig=fig,
colorbar_flag=True, alpha=1)
ax = display_map.ax
if 'rngRing' in prdcfg['ppiMapImageConfig']:
if prdcfg['ppiMapImageConfig']['rngRing'] > 0:
rng_rings = np.arange(
0., radar.range['data'][-1]/1000.,
prdcfg['ppiMapImageConfig']['rngRing'])
for rng_ring in rng_rings:
display_map.plot_range_ring(rng_ring, ax=ax)
if save_fig:
for fname in fname_list:
fig.savefig(fname, dpi=dpi)
plt.close(fig)
return fname_list
return (fig, ax, display_map)
def plot_rhi(radar, field_name, ind_az, prdcfg, fname_list, plot_type='RHI',
titl=None, vmin=None, vmax=None, step=None, quantiles=None,
save_fig=True):
"""
plots an RHI
Parameters
----------
radar : Radar object
object containing the radar data to plot
field_name : str
name of the radar field to plot
ind_az : int
sweep index to plot
prdcfg : dict
dictionary containing the product configuration
fname_list : list of str
list of names of the files where to store the plot
plot_type : str
type of plot (PPI, QUANTILES or HISTOGRAM)
titl : str
Plot title
vmin, vmax : float
The minimum and maximum value. If None the scale is going to be
obtained from the Py-ART config file.
step : float
step for histogram plotting
quantiles : float array
quantiles to plot
save_fig : bool
if true save the figure. If false it does not close the plot and
returns the handle to the figure
Returns
-------
fname_list : list of str or
fig, ax : tupple
list of names of the saved plots or handle of the figure an axes
"""
if plot_type == 'RHI':
dpi = prdcfg['ppiImageConfig'].get('dpi', 72)
norm = None
ticks = None
ticklabs = None
if vmin is None or vmax is None:
norm, ticks, ticklabs = get_norm(field_name)
vmin = None
vmax = None
xsize = prdcfg['rhiImageConfig']['xsize']
ysize = prdcfg['rhiImageConfig']['ysize']
fig = plt.figure(figsize=[xsize, ysize], dpi=dpi)
ax = fig.add_subplot(111, aspect='equal')
display = pyart.graph.RadarDisplay(radar)
display.plot_rhi(
field_name, title=titl, sweep=ind_az, norm=norm, ticks=ticks,
ticklabs=ticklabs, vmin=vmin, vmax=vmax,
colorbar_orient='horizontal', reverse_xaxis=False, fig=fig, ax=ax)
display.set_limits(
ylim=[prdcfg['rhiImageConfig']['ymin'],
prdcfg['rhiImageConfig']['ymax']],
xlim=[prdcfg['rhiImageConfig']['xmin'],
prdcfg['rhiImageConfig']['xmax']],
ax=ax)
display.plot_cross_hair(5., ax=ax)
# Turn on the grid
ax.grid()
# Make a tight layout
fig.tight_layout()
if save_fig:
for fname in fname_list:
fig.savefig(fname, dpi=dpi)
plt.close(fig)
return fname_list
return (fig, ax)
if plot_type == 'QUANTILES':
quantiles, values = compute_quantiles_sweep(
radar.fields[field_name]['data'],
radar.sweep_start_ray_index['data'][ind_az],
radar.sweep_end_ray_index['data'][ind_az], quantiles=quantiles)
if titl is None:
titl = pyart.graph.common.generate_title(
radar, field_name, ind_az)
labely = get_colobar_label(radar.fields[field_name], field_name)
plot_quantiles(quantiles, values, fname_list, labelx='quantile',
labely=labely, titl=titl)
elif plot_type == 'HISTOGRAM':
bins, values = compute_histogram_sweep(
radar.fields[field_name]['data'],
radar.sweep_start_ray_index['data'][ind_az],
radar.sweep_end_ray_index['data'][ind_az], field_name, step=step)
if titl is None:
titl = pyart.graph.common.generate_title(
radar, field_name, ind_az)
labelx = get_colobar_label(radar.fields[field_name], field_name)
plot_histogram(bins, values, fname_list, labelx=labelx,
labely='Number of Samples', titl=titl)
else:
warn('Unknown plot type '+plot_type)
return fname_list
def plot_bscope(radar, field_name, ind_sweep, prdcfg, fname_list,
vmin=None, vmax=None, ray_dim='ang', xaxis_rng=True):
"""
plots a B-Scope (angle-range representation)
Parameters
----------
radar : Radar object
object containing the radar data to plot
field_name : str
name of the radar field to plot
ind_sweep : int
sweep index to plot
prdcfg : dict
dictionary containing the product configuration
fname_list : list of str
list of names of the files where to store the plot
vmin, vmax : float
Min and max values of the colorbar
ray_dim : str
the ray dimension. Can be 'ang' or 'time'
xaxis : bool
if true the range will be in the x-axis. Otherwise it will be in the
y-axis.
Returns
-------
fname_list : list of str
list of names of the created plots
"""
norm = None
ticks = None
ticklabs = None
if vmin is None or vmax is None:
norm, ticks, ticklabs = get_norm(field_name)
if norm is None: # if norm is set do not override with vmin/vmax
vmin, vmax = pyart.config.get_field_limits(field_name)
radar_aux = radar.extract_sweeps([ind_sweep])
if ray_dim == 'ang':
if radar_aux.scan_type == 'ppi':
ray = np.sort(radar_aux.azimuth['data'])
ind_ray = np.argsort(radar_aux.azimuth['data'])
field = radar_aux.fields[field_name]['data'][ind_ray, :]
ray_label = 'azimuth angle (degrees)'
elif radar_aux.scan_type == 'rhi':
ray = np.sort(radar_aux.elevation['data'])
ind_ray = np.argsort(radar_aux.elevation['data'])
field = radar_aux.fields[field_name]['data'][ind_ray, :]
ray_label = 'elevation angle (degrees)'
else:
field = radar_aux.fields[field_name]['data']
ray = np.array(range(radar_aux.nrays))
ray_label = 'ray number'
else:
ray = np.sort(radar_aux.time['data'])
start_time = ray[0]
ray -= start_time
ind_ray = np.argsort(radar_aux.time['data'])
field = radar_aux.fields[field_name]['data'][ind_ray, :]
sweep_start_time = num2date(
start_time, radar_aux.time['units'], radar_aux.time['calendar'])
ray_label = (
'time [s from ' +
sweep_start_time.strftime('%Y-%m-%d %H:%M:%S')+' UTC]')
# display data
titl = pyart.graph.common.generate_title(radar_aux, field_name, 0)
label = get_colobar_label(radar_aux.fields[field_name], field_name)
dpi = prdcfg['ppiImageConfig'].get('dpi', 72)
fig = plt.figure(figsize=[prdcfg['ppiImageConfig']['xsize'],
prdcfg['ppiImageConfig']['ysize']],
dpi=dpi)
ax = fig.add_subplot(111)
if radar_aux.ngates == 1:
ax.plot(ray, field, 'bx', figure=fig)
ax.set_xlabel(ray_label)
ax.set_ylabel(label)
ax.set_title(titl)
else:
cmap = pyart.config.get_field_colormap(field_name)
rng_aux = radar_aux.range['data']/1000.
rng_res = rng_aux[1]-rng_aux[0]
rng_aux = np.append(rng_aux-rng_res/2., rng_aux[-1]+rng_res/2.)
rng_label = 'Range (km)'
ray_res = np.ma.median(ray[1:]-ray[:-1])
ray_aux = np.append(ray-ray_res/2, ray[-1]+ray_res/2)
if xaxis_rng:
cax = ax.pcolormesh(
rng_aux, ray_aux, field, cmap=cmap, vmin=vmin, vmax=vmax,
norm=norm)
ax.set_xlabel(rng_label)
ax.set_ylabel(ray_label)
else:
cax = ax.pcolormesh(
ray_aux, rng_aux, np.ma.transpose(field), cmap=cmap,
vmin=vmin, vmax=vmax, norm=norm)
ax.set_xlabel(ray_label)
ax.set_ylabel(rng_label)
ax.set_title(titl)
cb = fig.colorbar(cax)
if ticks is not None:
cb.set_ticks(ticks)
if ticklabs:
cb.set_ticklabels(ticklabs)
cb.set_label(label)
# Make a tight layout
fig.tight_layout()
for fname in fname_list:
fig.savefig(fname, dpi=dpi)
plt.close(fig)
return fname_list
def plot_time_range(radar, field_name, ind_sweep, prdcfg, fname_list,
vmin=None, vmax=None, ylabel='range (Km)'):
"""
plots a time-range plot
Parameters
----------
radar : Radar object
object containing the radar data to plot
field_name : str
name of the radar field to plot
ind_sweep : int
sweep index to plot
prdcfg : dict
dictionary containing the product configuration
fname_list : list of str
list of names of the files where to store the plot
vmin, vmax : float
Min and max values of the colorbar
ylabel : str
The y-axis label
Returns
-------
fname_list : list of str
list of names of the created plots
"""
radar_aux = radar.extract_sweeps([ind_sweep])
field = radar_aux.fields[field_name]['data']
# display data
titl = pyart.graph.common.generate_title(radar_aux, field_name, ind_sweep)
dpi = prdcfg['ppiImageConfig'].get('dpi', 72)
xsize = prdcfg['ppiImageConfig'].get('xsize', 10)
ysize = prdcfg['ppiImageConfig'].get('ysize', 8)
rng_aux = radar_aux.range['data']
if ylabel == 'range (Km)':
rng_aux /= 1000.
rng_res = rng_aux[1]-rng_aux[0]
rng_aux = np.append(rng_aux-rng_res/2., rng_aux[-1]+rng_res/2.)
time_res = np.mean(radar_aux.time['data'][1:]-radar_aux.time['data'][0:-1])
time_aux = np.append(
radar_aux.time['data'], radar_aux.time['data'][-1]+time_res)
return _plot_time_range(
time_aux, rng_aux, field, field_name, fname_list, titl=titl,
ylabel=ylabel, vmin=vmin, vmax=vmax, figsize=[xsize, ysize], dpi=dpi)
def plot_fixed_rng(radar, field_name, prdcfg, fname_list, azi_res=None,
ele_res=None, ang_tol=1., vmin=None, vmax=None):
"""
plots a fixed range plot
Parameters
----------
radar : radar object
The radar object containing the fixed range data
field_name : str
The name of the field to plot
prdcfg : dict
dictionary containing the product configuration
fname_list : list of str
list of names of the files where to store the plot
azi_res, ele_res : float
The nominal azimuth and elevation angle resolution [deg]
ang_tol : float
The tolerance between the nominal and the actual radar angle
vmin, vmax : float
Min and Max values of the color scale. If None it is going to be taken
from the Py-ART config files
Returns
-------
fname_list : list of str
list of names of the created plots
"""
# Get radar azimuth angles within limits taking as reference
# the first elevation angle
fixed_rng = radar.range['data'][0]
if radar.scan_type == 'ppi':
ele_vec = np.sort(radar.fixed_angle['data'])
azi_vec = np.sort(
radar.azimuth['data'][radar.sweep_start_ray_index['data'][0]:
radar.sweep_end_ray_index['data'][0]+1])
else:
ele_vec = np.sort(
radar.elevation['data'][radar.sweep_start_ray_index['data'][0]:
radar.sweep_end_ray_index['data'][0]+1])
azi_vec = np.sort(radar.fixed_angle['data'])
# put data in a regular 2D grid
field_2D = np.ma.masked_all((azi_vec.size, ele_vec.size))
sweep_start_inds = radar.sweep_start_ray_index['data']
sweep_end_inds = radar.sweep_end_ray_index['data']
if radar.scan_type == 'ppi':
for j, ele in enumerate(ele_vec):
field_1D = radar.fields[field_name]['data'][
sweep_start_inds[j]:sweep_end_inds[j]+1]
azi_1D = radar.azimuth['data'][
sweep_start_inds[j]:sweep_end_inds[j]+1]
for i, azi in enumerate(azi_vec):
ind = find_ang_index(azi_1D, azi, ang_tol=ang_tol)
if ind is None:
continue
try:
field_2D[i, j] = field_1D[ind]
except ValueError:
field_2D[i, j] = field_1D[ind][0]
else:
for i, azi in enumerate(azi_vec):
field_1D = radar.fields[field_name]['data'][
sweep_start_inds[i]:sweep_end_inds[i]+1]
ele_1D = radar.elevation['data'][
sweep_start_inds[i]:sweep_end_inds[i]+1]
for j, ele in enumerate(ele_vec):
ind = find_ang_index(ele_1D, ele, ang_tol=ang_tol)
if ind is None:
continue
field_2D[i, j] = field_1D[ind]
# get limits of angle bins
if radar.scan_type == 'ppi':
if azi_res is None:
azi_res = np.median(azi_vec[1:]-azi_vec[0:-1])
if radar.ray_angle_res is not None:
azi_res = np.min(
[radar.ray_angle_res['data'][0], azi_res])
azi_vec = np.append(azi_vec-azi_res/2., azi_vec[-1]+azi_res/2.)
if ele_res is None:
ele_res = np.median(ele_vec[1:]-ele_vec[0:-1])
if radar.instrument_parameters is not None:
if 'radar_beam_width_h' in radar.instrument_parameters:
bwidth = radar.instrument_parameters[
'radar_beam_width_h']['data'][0]
ele_res = np.min([bwidth, ele_res])
elif 'radar_beam_width_v' in radar.instrument_parameters:
bwidth = radar.instrument_parameters[
'radar_beam_width_v']['data'][0]
ele_res = np.min([bwidth, ele_res])
ele_vec = np.append(ele_vec-ele_res/2., ele_vec[-1]+ele_res/2.)
else:
if ele_res is None:
ele_res = np.median(ele_vec[1:]-ele_vec[0:-1])
if radar.ray_angle_res is not None:
ele_res = np.min(
[radar.ray_angle_res['data'][0], ele_res])
ele_vec = np.append(ele_vec-ele_res/2., ele_vec[-1]+ele_res/2.)
if azi_res is None:
azi_res = np.median(azi_vec[1:]-azi_vec[0:-1])
if radar.instrument_parameters is not None:
if 'radar_beam_width_h' in radar.instrument_parameters:
bwidth = radar.instrument_parameters[
'radar_beam_width_h']['data'][0]
azi_res = np.min([bwidth, azi_res])
elif 'radar_beam_width_v' in radar.instrument_parameters:
bwidth = radar.instrument_parameters[
'radar_beam_width_v']['data'][0]
azi_res = np.min([bwidth, azi_res])
azi_vec = np.append(azi_vec-azi_res/2., azi_vec[-1]+azi_res/2.)
titl = generate_fixed_rng_title(radar, field_name, fixed_rng)
dpi = prdcfg['ppiImageConfig'].get('dpi', 72)
xsize = prdcfg['ppiImageConfig'].get('xsize', 10)
ysize = prdcfg['ppiImageConfig'].get('ysize', 8)
return _plot_time_range(
azi_vec, ele_vec, field_2D, field_name, fname_list, titl=titl,
xlabel='azimuth (deg)', ylabel='elevation (deg)',
figsize=[xsize, ysize], vmin=vmin, vmax=vmax, dpi=dpi)
def plot_fixed_rng_span(radar, field_name, prdcfg, fname_list, azi_res=None,
ele_res=None, ang_tol=1., stat='max'):
"""
plots a fixed range plot
Parameters
----------
radar : radar object
The radar object containing the fixed range data
field_name : str
The name of the field to plot
prdcfg : dict
dictionary containing the product configuration
fname_list : list of str
list of names of the files where to store the plot
azi_res, ele_res : float
The nominal azimuth and elevation angle resolution [deg]
ang_tol : float
The tolerance between the nominal and the actual radar angle
Returns
-------
fname_list : list of str
list of names of the created plots
"""
# Get radar azimuth angles within limits taking as reference
# the first elevation angle
if radar.scan_type == 'ppi':
ele_vec = np.sort(radar.fixed_angle['data'])
azi_vec = np.sort(
radar.azimuth['data'][radar.sweep_start_ray_index['data'][0]:
radar.sweep_end_ray_index['data'][0]+1])
else:
ele_vec = np.sort(
radar.elevation['data'][radar.sweep_start_ray_index['data'][0]:
radar.sweep_end_ray_index['data'][0]+1])
azi_vec = np.sort(radar.fixed_angle['data'])
# put data in a regular 2D grid
field_2D = np.ma.masked_all((azi_vec.size, ele_vec.size))
rng_2D = np.ma.masked_all((azi_vec.size, ele_vec.size))
sweep_start_inds = radar.sweep_start_ray_index['data']
sweep_end_inds = radar.sweep_end_ray_index['data']
if radar.scan_type == 'ppi':
for j, ele in enumerate(ele_vec):
field = radar.fields[field_name]['data'][
sweep_start_inds[j]:sweep_end_inds[j]+1, :]
if stat == 'max':
field_1D = np.ma.max(field, axis=-1)
ind = np.ma.argmax(field, axis=-1)
rng_1D = radar.range['data'][ind]
elif stat == 'min':
field_1D = np.ma.min(field, axis=-1)
ind = np.ma.argmin(field, axis=-1)
rng_1D = radar.range['data'][ind]
elif stat == 'mean':
field_1D = np.ma.mean(field, axis=-1)
mid_rng = radar.range['data'][int(radar.ngates/2)]
rng_1D = np.ma.zeros(np.shape(field_1D))+mid_rng
elif stat == 'median':
field_1D = np.ma.median(field, axis=-1)
mid_rng = radar.range['data'][int(radar.ngates/2)]
rng_1D = np.ma.zeros(np.shape(field_1D))+mid_rng
azi_1D = radar.azimuth['data'][
sweep_start_inds[j]:sweep_end_inds[j]+1]
for i, azi in enumerate(azi_vec):
ind = find_ang_index(azi_1D, azi, ang_tol=ang_tol)
if ind is None:
continue
field_2D[i, j] = field_1D[ind]
rng_2D[i, j] = rng_1D[ind]
else:
for i, azi in enumerate(azi_vec):
field = radar.fields[field_name]['data'][
sweep_start_inds[i]:sweep_end_inds[i]+1, :]
if stat == 'max':
field_1D = np.ma.max(field, axis=-1)
ind = np.ma.argmax(field, axis=-1)
rng_1D = radar.range['data'][ind]
elif stat == 'min':
field_1D = np.ma.min(field, axis=-1)
ind = np.ma.argmin(field, axis=-1)
rng_1D = radar.range['data'][ind]
elif stat == 'mean':
field_1D = np.ma.mean(field, axis=-1)
mid_rng = radar.range['data'][int(radar.ngates/2)]
rng_1D = np.ma.zeros(np.shape(field_1D))+mid_rng
elif stat == 'median':
field_1D = np.ma.median(field, axis=-1)
mid_rng = radar.range['data'][int(radar.ngates/2)]
rng_1D = np.ma.zeros(np.shape(field_1D))+mid_rng
ele_1D = radar.elevation['data'][
sweep_start_inds[i]:sweep_end_inds[i]+1]
for j, ele in enumerate(ele_vec):
ind = find_ang_index(ele_1D, ele, ang_tol=ang_tol)
if ind is None:
continue
field_2D[i, j] = field_1D[ind]
rng_2D[i, j] = rng_1D[ind]
# get limits of angle bins
if radar.scan_type == 'ppi':
if azi_res is None:
azi_res = np.median(azi_vec[1:]-azi_vec[0:-1])
if radar.ray_angle_res is not None:
azi_res = np.min(
[radar.ray_angle_res['data'][0], azi_res])
azi_vec = np.append(azi_vec-azi_res/2., azi_vec[-1]+azi_res/2.)
if ele_res is None:
ele_res = np.median(ele_vec[1:]-ele_vec[0:-1])
if radar.instrument_parameters is not None:
if 'radar_beam_width_h' in radar.instrument_parameters:
bwidth = radar.instrument_parameters[
'radar_beam_width_h']['data'][0]
ele_res = np.min([bwidth, ele_res])
elif 'radar_beam_width_v' in radar.instrument_parameters:
bwidth = radar.instrument_parameters[
'radar_beam_width_v']['data'][0]
ele_res = np.min([bwidth, ele_res])
ele_vec = np.append(ele_vec-ele_res/2., ele_vec[-1]+ele_res/2.)
else:
if ele_res is None:
ele_res = np.median(ele_vec[1:]-ele_vec[0:-1])
if radar.ray_angle_res is not None:
ele_res = np.min(
[radar.ray_angle_res['data'][0], ele_res])
ele_vec = np.append(ele_vec-ele_res/2., ele_vec[-1]+ele_res/2.)
if azi_res is None:
azi_res = np.median(azi_vec[1:]-azi_vec[0:-1])
if radar.instrument_parameters is not None:
if 'radar_beam_width_h' in radar.instrument_parameters:
bwidth = radar.instrument_parameters[
'radar_beam_width_h']['data'][0]
azi_res = np.min([bwidth, azi_res])
elif 'radar_beam_width_v' in radar.instrument_parameters:
bwidth = radar.instrument_parameters[
'radar_beam_width_v']['data'][0]
azi_res = np.min([bwidth, azi_res])
azi_vec = np.append(azi_vec-azi_res/2., azi_vec[-1]+azi_res/2.)
titl = generate_fixed_rng_span_title(radar, field_name, stat)
dpi = prdcfg['ppiImageConfig'].get('dpi', 72)
xsize = prdcfg['ppiImageConfig'].get('xsize', 10)
ysize = prdcfg['ppiImageConfig'].get('ysize', 8)
fname_rng_list = []
for fname in fname_list:
fname_rng_list.append(
fname.rsplit('.', 1)[0]+'_RNG.'+fname.rsplit('.', 1)[1])
_plot_time_range(
azi_vec, ele_vec, rng_2D, 'radar_range', fname_rng_list, titl=titl,
xlabel='azimuth (deg)', ylabel='elevation (deg)',
figsize=[xsize, ysize], dpi=dpi)
return _plot_time_range(
azi_vec, ele_vec, field_2D, field_name, fname_list, titl=titl,
xlabel='azimuth (deg)', ylabel='elevation (deg)',
figsize=[xsize, ysize], dpi=dpi)
def plot_fixed_rng_sun(radar, field_name, sun_hits, prdcfg, fname_list, azi_res=None,
ele_res=None, ang_tol=1., vmin=None, vmax=None):
"""
plots a fixed range plot
Parameters
----------
radar : radar object
The radar object containing the fixed range data
field_name : str
The name of the field to plot
sun_hits: dict
dictionary containing the sun hits data
prdcfg : dict
dictionary containing the product configuration
fname_list : list of str
list of names of the files where to store the plot
azi_res, ele_res : float
The nominal azimuth and elevation angle resolution [deg]
ang_tol : float
The tolerance between the nominal and the actual radar angle
vmin, vmax : float
Min and Max values of the color scale. If None it is going to be taken
from the Py-ART config files
Returns
-------
fname_list : list of str
list of names of the created plots
"""
# Get radar azimuth angles within limits taking as reference
# the first elevation angle
fixed_rng = radar.range['data'][0]
if radar.scan_type == 'ppi':
ele_vec = np.sort(radar.fixed_angle['data'])
azi_vec = np.sort(
radar.azimuth['data'][radar.sweep_start_ray_index['data'][0]:
radar.sweep_end_ray_index['data'][0]+1])
else:
ele_vec = np.sort(
radar.elevation['data'][radar.sweep_start_ray_index['data'][0]:
radar.sweep_end_ray_index['data'][0]+1])
azi_vec = np.sort(radar.fixed_angle['data'])
# put data in a regular 2D grid
field_2D = np.ma.masked_all((azi_vec.size, ele_vec.size))
sweep_start_inds = radar.sweep_start_ray_index['data']
sweep_end_inds = radar.sweep_end_ray_index['data']
if radar.scan_type == 'ppi':
for j, ele in enumerate(ele_vec):
field_1D = radar.fields[field_name]['data'][
sweep_start_inds[j]:sweep_end_inds[j]+1]
azi_1D = radar.azimuth['data'][
sweep_start_inds[j]:sweep_end_inds[j]+1]
for i, azi in enumerate(azi_vec):
ind = find_ang_index(azi_1D, azi, ang_tol=ang_tol)
#print('IND: ',ind)
if ind is None:
continue
#print('FIELD_1D: ',field_1D[ind])
field_2D[i, j] = field_1D[ind][0]
else:
for i, azi in enumerate(azi_vec):
field_1D = radar.fields[field_name]['data'][
sweep_start_inds[i]:sweep_end_inds[i]+1]
ele_1D = radar.elevation['data'][
sweep_start_inds[i]:sweep_end_inds[i]+1]
for j, ele in enumerate(ele_vec):
ind = find_ang_index(ele_1D, ele, ang_tol=ang_tol)
if ind is None:
continue
field_2D[i, j] = field_1D[ind]
# get limits of angle bins
if radar.scan_type == 'ppi':
if azi_res is None:
azi_res = np.median(azi_vec[1:]-azi_vec[0:-1])
if radar.ray_angle_res is not None:
azi_res = np.min(
[radar.ray_angle_res['data'][0], azi_res])
azi_vec = np.append(azi_vec-azi_res/2., azi_vec[-1]+azi_res/2.)
if ele_res is None:
ele_res = np.median(ele_vec[1:]-ele_vec[0:-1])
if radar.instrument_parameters is not None:
if 'radar_beam_width_h' in radar.instrument_parameters:
bwidth = radar.instrument_parameters[
'radar_beam_width_h']['data'][0]
ele_res = np.min([bwidth, ele_res])
elif 'radar_beam_width_v' in radar.instrument_parameters:
bwidth = radar.instrument_parameters[
'radar_beam_width_v']['data'][0]
ele_res = np.min([bwidth, ele_res])
ele_vec = np.append(ele_vec-ele_res/2., ele_vec[-1]+ele_res/2.)
else:
if ele_res is None:
ele_res = np.median(ele_vec[1:]-ele_vec[0:-1])
if radar.ray_angle_res is not None:
ele_res = np.min(
[radar.ray_angle_res['data'][0], ele_res])
ele_vec = np.append(ele_vec-ele_res/2., ele_vec[-1]+ele_res/2.)
if azi_res is None:
azi_res = np.median(azi_vec[1:]-azi_vec[0:-1])
if radar.instrument_parameters is not None:
if 'radar_beam_width_h' in radar.instrument_parameters:
bwidth = radar.instrument_parameters[
'radar_beam_width_h']['data'][0]
azi_res = np.min([bwidth, azi_res])
elif 'radar_beam_width_v' in radar.instrument_parameters:
bwidth = radar.instrument_parameters[
'radar_beam_width_v']['data'][0]
azi_res = np.min([bwidth, azi_res])
azi_vec = np.append(azi_vec-azi_res/2., azi_vec[-1]+azi_res/2.)
titl = generate_fixed_rng_title(radar, field_name, fixed_rng)
dpi = prdcfg['ppiImageConfig'].get('dpi', 72)
xsize = prdcfg['ppiImageConfig'].get('xsize', 10)
ysize = prdcfg['ppiImageConfig'].get('ysize', 8)
return _plot_sunscan(
azi_vec, ele_vec, field_2D, sun_hits, field_name, fname_list, titl=titl,
xlabel='azimuth (deg)', ylabel='elevation (deg)',
figsize=[xsize, ysize], vmin=vmin, vmax=vmax, dpi=dpi)
def plot_cappi(radar, field_name, altitude, prdcfg, fname_list,
beamwidth=1., beam_spacing=1., save_fig=True):
"""
plots a Constant Altitude Plan Position Indicator CAPPI
Parameters
----------
radar : Radar object
object containing the radar data to plot
field_name : str
name of the radar field to plot
altitude : float
the altitude [m MSL] to be plotted
prdcfg : dict
dictionary containing the product configuration
fname_list : list of str
list of names of the files where to store the plot
beamwidth : float
The radar beamwidth
beam_spacing : float
the ray angle resolution
save_fig : bool
if true save the figure. If false it does not close the plot and
returns the handle to the figure
Returns
-------
fname_list : list of str or
fig, ax : tupple
list of names of the saved plots or handle of the figure an axes
"""
norm, ticks, ticklabs = get_norm(field_name)
xmin = prdcfg['ppiImageConfig']['xmin']
xmax = prdcfg['ppiImageConfig']['xmax']
ymin = prdcfg['ppiImageConfig']['ymin']
ymax = prdcfg['ppiImageConfig']['ymax']
wfunc = prdcfg.get('wfunc', 'NEAREST')
cappi_res = prdcfg.get('res', 500.)
# number of grid points in cappi
ny = int((ymax-ymin)*1000./cappi_res)+1
nx = int((xmax-xmin)*1000./cappi_res)+1
# parameters to determine the gates to use for each grid point
if (radar.instrument_parameters is not None and
'radar_beam_width_h' in radar.instrument_parameters):
beamwidth = radar.instrument_parameters[
'radar_beam_width_h']['data'][0]
if radar.ray_angle_res is not None:
beam_spacing = radar.ray_angle_res['data'][0]
lat = float(radar.latitude['data'])
lon = float(radar.longitude['data'])
alt = 0.
# cartesian mapping
grid = pyart.map.grid_from_radars(
(radar,), gridding_algo='map_to_grid', weighting_function=wfunc,
roi_func='dist_beam', h_factor=1.0, nb=beamwidth, bsp=beam_spacing,
min_radius=cappi_res/2.,
grid_shape=(1, ny, nx),
grid_limits=((altitude, altitude), (ymin*1000., ymax*1000.),
(xmin*1000., xmax*1000.)),
grid_origin=(lat, lon), grid_origin_alt=alt,
fields=[field_name])
# display data
dpi = prdcfg['ppiImageConfig'].get('dpi', 72)
fig = plt.figure(figsize=[prdcfg['ppiImageConfig']['xsize'],
prdcfg['ppiImageConfig']['ysize']],
dpi=dpi)
ax = fig.add_subplot(111, aspect='equal')
cmap = pyart.config.get_field_colormap(field_name)
vmin = vmax = None
if norm is None: # if norm is set do not override with vmin/vmax
vmin, vmax = pyart.config.get_field_limits(field_name)
titl = pyart.graph.common.generate_grid_title(grid, field_name, 0)
cax = ax.imshow(
grid.fields[field_name]['data'][0], extent=(xmin, xmax, ymin, ymax),
origin='lower', cmap=cmap, vmin=vmin, vmax=vmax, norm=norm,
interpolation='none')
ax.set_xlabel('East West distance from radar(km)')
ax.set_ylabel('North South distance from radar(km)')
ax.set_title(titl)
# plot the colorbar and set the label.
cb = fig.colorbar(cax)
if ticks is not None:
cb.set_ticks(ticks)
if ticklabs:
cb.set_ticklabels(ticklabs)
label = get_colobar_label(grid.fields[field_name], field_name)
cb.set_label(label)
# Make a tight layout
fig.tight_layout()
if save_fig:
for fname in fname_list:
fig.savefig(fname, dpi=dpi)
plt.close(fig)
return fname_list
return (fig, ax)
def plot_traj(rng_traj, azi_traj, ele_traj, time_traj, prdcfg, fname_list,
rad_alt=None, rad_tstart=None, ax=None, fig=None,
save_fig=True):
"""
plots a trajectory on a Cartesian surface
Parameters
----------
rng_traj, azi_traj, ele_traj : float array
antenna coordinates of the trajectory [m and deg]
time_traj : datetime array
trajectory time
prdcfg : dict
dictionary containing the product configuration
fname_list : list of str
list of names of the files where to store the plot
rad_alt : float or None
radar altitude [m MSL]
rad_tstart : datetime object or None
start time of the radar scan
surface_alt : float
surface altitude [m MSL]
color_ref : str
What the color code represents. Can be 'None', 'rel_altitude',
'altitude' or 'time'
fig : Figure
Figure to add the colorbar to. If none a new figure will be created
ax : Axis
Axis to plot on. if fig is None a new axis will be created
save_fig : bool
if true save the figure if false it does not close the plot and
returns the handle to the figure
Returns
-------
fname_list : list of str or
fig, ax : tupple
list of names of the saved plots or handle of the figure an axes
"""
color_ref = prdcfg.get('color_ref', 'None')
if 'altitude' not in prdcfg and color_ref == 'rel_altitude':
warn('Unable to plot trajectory relative to surface altitude. ' +
'Unknown surface altitude.')
color_ref = 'None'
if rad_tstart is None and color_ref == 'time':
warn('Unable to plot trajectory relative to radar scan start time. ' +
'Unknown radar scan start time.')
color_ref = 'None'
if rad_alt is None and color_ref in ('rel_altitude', 'altitude'):
warn('Unable to plot trajectory altitude. ' +
'Unknown radar altitude.')
color_ref = 'None'
x, y, z = pyart.core.antenna_to_cartesian(
rng_traj/1000., azi_traj, ele_traj)
if color_ref == 'rel_altitude':
h = z+rad_alt
h_rel = h-prdcfg['altitude']
marker = 'x'
col = h_rel
cmap = 'coolwarm'
norm = plt.Normalize(-2000., 2000.)
cb_label = 'Altitude relative to CAPPI [m]'
plot_cb = True
elif color_ref == 'altitude':
h = z+rad_alt
marker = 'x'
col = h
cmap = 'Greys'
norm = plt.Normalize(h.min(), h.max())
cb_label = 'Altitude [m MSL]'
plot_cb = True
elif color_ref == 'time':
td_vec = time_traj-rad_tstart
tt_s = []
for td in td_vec:
tt_s.append(td.total_seconds())
tt_s = np.asarray(tt_s)
marker = 'x'
col = tt_s
cmap = 'Greys'
norm = plt.Normalize(tt_s.min(), tt_s.max())
cb_label = 'Time from start of radar scan [s]'
plot_cb = True
else:
col = 'k'
marker = 'x'
cmap = None
plot_cb = False
# display data
dpi = prdcfg['ppiImageConfig'].get('dpi', 72)
if fig is None:
fig = plt.figure(figsize=[prdcfg['ppiImageConfig']['xsize'],
prdcfg['ppiImageConfig']['ysize']],
dpi=dpi)
ax = fig.add_subplot(111, aspect='equal')
else:
ax.autoscale(False)
cax = ax.scatter(
x/1000., y/1000., c=col, marker=marker, alpha=0.5, cmap=cmap,
norm=norm)
# plot colorbar
if plot_cb:
cb = fig.colorbar(cax, orientation='horizontal')
cb.set_label(cb_label)
if save_fig:
for fname in fname_list:
fig.savefig(fname, dpi=dpi)
plt.close(fig)
return fname_list
return (fig, ax)
def plot_rhi_contour(radar, field_name, ind_az, prdcfg, fname_list,
contour_values=None, linewidths=1.5, ax=None, fig=None,
save_fig=True):
"""
plots contour data on an RHI
Parameters
----------
radar : Radar object
object containing the radar data to plot
field_name : str
name of the radar field to plot
ind_az : int
sweep index to plot
prdcfg : dict
dictionary containing the product configuration
fname_list : list of str
list of names of the files where to store the plot
contour_values : float array
list of contours to plot
linewidths : float
width of the contour lines
fig : Figure
Figure to add the colorbar to. If none a new figure will be created
ax : Axis
Axis to plot on. if fig is None a new axis will be created
save_fig : bool
if true save the figure if false it does not close the plot and
returns the handle to the figure
Returns
-------
fname_list : list of str or
fig, ax : tupple
list of names of the saved plots or handle of the figure an axes
"""
dpi = prdcfg['ppiImageConfig'].get('dpi', 72)
# get contour intervals
if contour_values is None:
field_dict = pyart.config.get_metadata(field_name)
if 'boundaries' in field_dict:
vmin = field_dict['boundaries'][0]
vmax = field_dict['boundaries'][-1]
num = len(field_dict['boundaries'])
else:
vmin, vmax = pyart.config.get_field_limits(field_name)
num = 10
contour_values = np.linspace(vmin, vmax, num=num)
# get data and position
display = pyart.graph.RadarDisplay(radar)
data = display._get_data(field_name, ind_az, None, True, None)
x_edges, y_edges, z_edges = display._get_x_y_z(ind_az, True, True)
delta_x = x_edges[1:, 1:]-x_edges[:-1, :-1]
delta_y = y_edges[1:, 1:]-y_edges[:-1, :-1]
delta_z = z_edges[1:, 1:]-z_edges[:-1, :-1]
x = x_edges[:-1, :-1]+delta_x/2.
y = y_edges[:-1, :-1]+delta_y/2.
z = z_edges[:-1, :-1]+delta_z/2.
R = np.sqrt(x ** 2 + y ** 2) * np.sign(x)
# display data
if fig is None:
xsize = prdcfg['rhiImageConfig']['xsize']
ysize = prdcfg['rhiImageConfig']['ysize']
fig = plt.figure(figsize=[xsize, ysize], dpi=dpi)
ax = fig.add_subplot(111, aspect='equal')
ax.contour(R, z, data, contour_values, colors='k',
linewidths=linewidths)
display._set_title(field_name, ind_az, None, ax)
display._label_axes_rhi((None, None), ax)
display.set_limits(
ylim=[prdcfg['rhiImageConfig']['ymin'],
prdcfg['rhiImageConfig']['ymax']],
xlim=[prdcfg['rhiImageConfig']['xmin'],
prdcfg['rhiImageConfig']['xmax']], ax=ax)
display.plot_cross_hair(5., ax=ax)
# Turn on the grid
ax.grid()
# Make a tight layout
fig.tight_layout()
else:
ax.autoscale(False)
ax.contour(R, z, data, contour_values, colors='k',
linewidths=linewidths)
if save_fig:
for fname in fname_list:
fig.savefig(fname, dpi=dpi)
plt.close(fig)
return fname_list
return (fig, ax)
def plot_ppi_contour(radar, field_name, ind_el, prdcfg, fname_list,
contour_values=None, linewidths=1.5, ax=None, fig=None,
save_fig=True):
"""
plots contour data on a PPI
Parameters
----------
radar : Radar object
object containing the radar data to plot
field_name : str
name of the radar field to plot
ind_el : int
sweep index to plot
prdcfg : dict
dictionary containing the product configuration
fname_list : list of str
list of names of the files where to store the plot
contour_values : float array
list of contours to plot
linewidths : float
width of the contour lines
fig : Figure
Figure to add the colorbar to. If none a new figure will be created
ax : Axis
Axis to plot on. if fig is None a new axis will be created
save_fig : bool
if true save the figure if false it does not close the plot and
returns the handle to the figure
Returns
-------
fname_list : list of str or
fig, ax : tupple
list of names of the saved plots or handle of the figure an axes
"""
dpi = prdcfg['ppiImageConfig'].get('dpi', 72)
# get contour intervals
if contour_values is None:
field_dict = pyart.config.get_metadata(field_name)
if 'boundaries' in field_dict:
vmin = field_dict['boundaries'][0]
vmax = field_dict['boundaries'][-1]
num = len(field_dict['boundaries'])
else:
vmin, vmax = pyart.config.get_field_limits(field_name)
num = 10
contour_values = np.linspace(vmin, vmax, num=num)
# get data and position
display = pyart.graph.RadarDisplay(radar)
data = display._get_data(field_name, ind_el, None, True, None)
x_edges, y_edges = display._get_x_y(ind_el, True, True)
delta_x = x_edges[1:, 1:]-x_edges[:-1, :-1]
delta_y = y_edges[1:, 1:]-y_edges[:-1, :-1]
x = x_edges[:-1, :-1]+delta_x/2.
y = y_edges[:-1, :-1]+delta_y/2.
# display data
if fig is None:
xsize = prdcfg['ppiImageConfig']['xsize']
ysize = prdcfg['ppiImageConfig']['ysize']
fig = plt.figure(figsize=[xsize, ysize], dpi=dpi)
ax = fig.add_subplot(111, aspect='equal')
ax.contour(x, y, data, contour_values, colors='k',
linewidths=linewidths)
display._set_title(field_name, ind_el, None, ax)
display._label_axes_ppi((None, None), ax)
display.set_limits(
ylim=[prdcfg['ppiImageConfig']['ymin'],
prdcfg['ppiImageConfig']['ymax']],
xlim=[prdcfg['ppiImageConfig']['xmin'],
prdcfg['ppiImageConfig']['xmax']], ax=ax)
display.plot_cross_hair(5., ax=ax)
# Turn on the grid
ax.grid()
# Make a tight layout
fig.tight_layout()
else:
ax.autoscale(False)
ax.contour(x, y, data, contour_values, colors='k',
linewidths=linewidths)
if save_fig:
for fname in fname_list:
fig.savefig(fname, dpi=dpi)
plt.close(fig)
return fname_list
return (fig, ax)
def plot_roi_contour(roi_dict, prdcfg, fname_list, plot_center=True,
xlabel='Lon [Deg]', ylabel='Lat [Deg]',
titl='TRT cell position', ax=None,
fig=None, save_fig=True):
"""
plots the contour of a region of interest on a map
Parameters
----------
roi_dict : dict
dictionary containing lon_roi, lat_roi, the points defining the
contour
prdcfg : dict
dictionary containing the product configuration
fname_list : list of str
list of names of the files where to store the plot
plot_center : bool
If True a marked with the center of the roi is plotted
fig : Figure
Figure to add the colorbar to. If none a new figure will be created
ax : Axis
Axis to plot on. if fig is None a new axis will be created
save_fig : bool
if true save the figure if false it does not close the plot and
returns the handle to the figure
Returns
-------
fname_list : list of str or
fig, ax : tupple
list of names of the saved plots or handle of the figure an axes
"""
if not _SHAPELY_AVAILABLE or not _CARTOPY_AVAILABLE:
warn('Unable to plot ROI contour: Missing shapely and/or'
' cartopy modules')
return None
dpi = prdcfg['ppiMapImageConfig'].get('dpi', 72)
# create polygon to plot
polygon = shapely.geometry.Polygon(list(zip(
roi_dict['lon'], roi_dict['lat'])))
# display data
if fig is None:
xsize = prdcfg['ppiMapImageConfig']['xsize']
ysize = prdcfg['ppiMapImageConfig']['ysize']
lonstep = prdcfg['ppiMapImageConfig'].get('lonstep', 0.5)
latstep = prdcfg['ppiMapImageConfig'].get('latstep', 0.5)
min_lon = prdcfg['ppiMapImageConfig'].get('lonmin', 2.5)
max_lon = prdcfg['ppiMapImageConfig'].get('lonmax', 12.5)
min_lat = prdcfg['ppiMapImageConfig'].get('latmin', 43.5)
max_lat = prdcfg['ppiMapImageConfig'].get('latmax', 49.5)
resolution = prdcfg['ppiMapImageConfig'].get('mapres', '110m')
if resolution not in ('110m', '50m', '10m'):
warn('Unknown map resolution: '+resolution)
resolution = '110m'
background_zoom = prdcfg['ppiMapImageConfig'].get(
'background_zoom', 8)
lon_lines = np.arange(np.floor(min_lon), np.ceil(max_lon)+1, lonstep)
lat_lines = np.arange(np.floor(min_lat), np.ceil(max_lat)+1, latstep)
limits = [min_lon, max_lon, min_lat, max_lat]
# get background map instance
stamen_terrain = Stamen('terrain-background')
projection = cartopy.crs.PlateCarree()
fig = plt.figure(figsize=[xsize, ysize], dpi=dpi)
# draw background
ax = fig.add_subplot(111, projection=stamen_terrain.crs)
ax.set_extent(limits, crs=projection)
ax.add_image(stamen_terrain, background_zoom)
# add countries
countries = cartopy.feature.NaturalEarthFeature(
category='cultural',
name='admin_0_countries',
scale=resolution,
facecolor='none')
ax.add_feature(countries, edgecolor='black')
# draw grid lines and labels
gl = ax.gridlines(xlocs=lon_lines, ylocs=lat_lines, draw_labels=True)
gl.xlabels_top = False
gl.ylabels_right = False
ax.text(0.5, -0.2, xlabel, va='bottom', ha='center',
rotation='horizontal', rotation_mode='anchor',
transform=ax.transAxes)
ax.text(-0.1, 0.55, ylabel, va='bottom', ha='center',
rotation='vertical', rotation_mode='anchor',
transform=ax.transAxes)
ax.set_title(titl)
else:
ax.autoscale(False)
ax.add_geometries(
[polygon], cartopy.crs.PlateCarree(), facecolor='none',
edgecolor='k')
if 'lon_center' in roi_dict and 'lat_center' in roi_dict and plot_center:
ax.scatter(
[roi_dict['lon_center']], [roi_dict['lat_center']], c='k',
marker='x', transform=cartopy.crs.PlateCarree())
if save_fig:
for fname in fname_list:
fig.savefig(fname, dpi=dpi)
plt.close(fig)
return fname_list
return (fig, ax)
def plot_rhi_profile(data_list, hvec, fname_list, labelx='Value',
labely='Height (m MSL)', labels=['Mean'],
title='RHI profile', colors=None, linestyles=None,
vmin=None, vmax=None, hmin=None, hmax=None, dpi=72):
"""
plots an RHI profile
Parameters
----------
data_list : list of float array
values of the profile
hvec : float array
height points of the profile
fname_list : list of str
list of names of the files where to store the plot
labelx : str
The label of the X axis
labely : str
The label of the Y axis
labels : array of str
The label of the legend
title : str
The figure title
colors : array of str
Specifies the colors of each line
linestyles : array of str
Specifies the line style of each line
vmin, vmax: float
Lower/Upper limit of data values
hmin, hmax: float
Lower/Upper limit of altitude
dpi : int
dots per inch
Returns
-------
fname_list : list of str
list of names of the created plots
"""
fig, ax = plt.subplots(figsize=[10, 6], dpi=dpi)
lab = None
col = None
lstyle = None
for i, data in enumerate(data_list):
if labels is not None:
lab = labels[i]
if colors is not None:
col = colors[i]
if linestyles is not None:
lstyle = linestyles[i]
ax.plot(
data, hvec, label=lab, color=col, linestyle=lstyle, marker='x')
ax.set_title(title)
ax.set_xlabel(labelx)
ax.set_ylabel(labely)
ax.set_xlim(left=vmin, right=vmax)
ax.set_ylim(bottom=hmin, top=hmax)
ax.legend(loc='best')
# Turn on the grid
ax.grid()
for fname in fname_list:
fig.savefig(fname, dpi=dpi)
plt.close(fig)
return fname_list
def plot_along_coord(xval_list, yval_list, fname_list, labelx='coord',
labely='Value', labels=None,
title='Plot along coordinate', colors=None,
linestyles=None, ymin=None, ymax=None, dpi=72):
"""
plots data along a certain radar coordinate
Parameters
----------
xval_list : list of float arrays
the x values, range, azimuth or elevation
yval_list : list of float arrays
the y values. Parameter to plot
fname_list : list of str
list of names of the files where to store the plot
labelx : str
The label of the X axis
labely : str
The label of the Y axis
labels : array of str
The label of the legend
title : str
The figure title
colors : array of str
Specifies the colors of each line
linestyles : array of str
Specifies the line style of each line
ymin, ymax: float
Lower/Upper limit of y axis
dpi : int
dots per inch
Returns
-------
fname_list : list of str
list of names of the created plots
"""
fig, ax = plt.subplots(figsize=[10, 6], dpi=dpi)
lab = None
col = None
lstyle = None
for i, xval in enumerate(xval_list):
yval = yval_list[i]
if labels is not None:
lab = labels[i]
if colors is not None:
col = colors[i]
if linestyles is not None:
lstyle = linestyles[i]
ax.plot(xval, yval, label=lab, color=col, linestyle=lstyle)
ax.set_title(title)
ax.set_xlabel(labelx)
ax.set_ylabel(labely)
ax.set_ylim(bottom=ymin, top=ymax)
ax.legend(loc='best')
for fname in fname_list:
fig.savefig(fname, dpi=dpi)
plt.close(fig)
return fname_list
def plot_field_coverage(xval_list, yval_list, fname_list,
labelx='Azimuth (deg)', labely='Range extension [m]',
labels=None, title='Field coverage', ymin=None,
ymax=None, xmeanval=None, ymeanval=None,
labelmeanval=None, dpi=72):
"""
plots a time series
Parameters
----------
xval_list : list of float arrays
the x values, azimuth
yval_list : list of float arrays
the y values. Range extension
fname_list : list of str
list of names of the files where to store the plot
labelx : str
The label of the X axis
labely : str
The label of the Y axis
labels : array of str
The label of the legend
title : str
The figure title
ymin, ymax : float
Lower/Upper limit of y axis
xmeanval, ymeanval : float array
the x and y values of a mean along elevation
labelmeanval : str
the label of the mean
dpi : int
dots per inch
Returns
-------
fname_list : list of str
list of names of the created plots
"""
fig, ax = plt.subplots(figsize=[10, 6], dpi=dpi)
lab = None
for i, xval in enumerate(xval_list):
yval = yval_list[i]
if labels is not None:
lab = labels[i]
ax.plot(xval, yval, label=lab, linestyle='None', marker='o',
fillstyle='full')
if xmeanval is not None and ymeanval is not None:
ax.plot(xmeanval, ymeanval, label=labelmeanval, linestyle='-',
color='r', marker='x')
ax.set_title(title)
ax.set_xlabel(labelx)
ax.set_ylabel(labely)
ax.set_ylim(bottom=ymin, top=ymax)
if labels is not None:
ax.legend(loc='best')
for fname in fname_list:
fig.savefig(fname, dpi=dpi)
plt.close(fig)
return fname_list
```
#### File: pyrad/io/read_data_cosmo.py
```python
from warnings import warn
import numpy as np
from scipy.interpolate import NearestNDInterpolator
from scipy.spatial import cKDTree
import netCDF4
import pyart
from pyart.config import get_metadata, get_field_name
from ..io.io_aux import get_fieldname_cosmo
# from memory_profiler import profile
# import time
def cosmo2radar_data(radar, cosmo_coord, cosmo_data, time_index=0,
slice_xy=True, slice_z=False,
field_names=['temperature'], dtype=np.float32):
"""
get the COSMO value corresponding to each radar gate using nearest
neighbour interpolation
Parameters
----------
radar : Radar
the radar object containing the information on the position of the
radar gates
cosmo_coord : dict
dictionary containing the COSMO coordinates
cosmo_data : dict
dictionary containing the COSMO data
time_index : int
index of the forecasted data
slice_xy : boolean
if true the horizontal plane of the COSMO field is cut to the
dimensions of the radar field
slice_z : boolean
if true the vertical plane of the COSMO field is cut to the dimensions
of the radar field
field_names : str
names of COSMO fields to convert (default temperature)
dtype : numpy data type object
the data type of the output data
Returns
-------
cosmo_fields : list of dict
list of dictionary with the COSMO fields and metadata
"""
# debugging
# start_time = time.time()
x_radar, y_radar, z_radar = _put_radar_in_swiss_coord(radar)
(x_cosmo, y_cosmo, z_cosmo, ind_xmin, ind_ymin, ind_zmin, ind_xmax,
ind_ymax, ind_zmax) = _prepare_for_interpolation(
x_radar, y_radar, z_radar, cosmo_coord, slice_xy=slice_xy,
slice_z=slice_z)
cosmo_fields = []
for field in field_names:
if field not in cosmo_data:
warn('COSMO field '+field+' data not available')
else:
values = cosmo_data[field]['data'][
time_index, ind_zmin:ind_zmax+1, ind_ymin:ind_ymax+1,
ind_xmin:ind_xmax+1].flatten()
# find interpolation function
tree_options = {
'compact_nodes': False,
'balanced_tree': False
}
interp_func = NearestNDInterpolator(
(z_cosmo, y_cosmo, x_cosmo), values,
tree_options=tree_options)
del values
# interpolate
data_interp = interp_func((z_radar, y_radar, x_radar))
# put field
field_dict = get_metadata(field)
field_dict['data'] = data_interp.astype(dtype)
cosmo_fields.append({field: field_dict})
del data_interp
if not cosmo_fields:
warn('COSMO data not available')
return None
return cosmo_fields
def cosmo2radar_coord(radar, cosmo_coord, slice_xy=True, slice_z=False,
field_name=None):
"""
Given the radar coordinates find the nearest COSMO model pixel
Parameters
----------
radar : Radar
the radar object containing the information on the position of the
radar gates
cosmo_coord : dict
dictionary containing the COSMO coordinates
slice_xy : boolean
if true the horizontal plane of the COSMO field is cut to the
dimensions of the radar field
slice_z : boolean
if true the vertical plane of the COSMO field is cut to the dimensions
of the radar field
field_name : str
name of the field
Returns
-------
cosmo_ind_field : dict
dictionary containing a field of COSMO indices and metadata
"""
# debugging
# start_time = time.time()
# parse the field parameters
if field_name is None:
field_name = get_field_name('cosmo_index')
x_radar, y_radar, z_radar = _put_radar_in_swiss_coord(radar)
(x_cosmo, y_cosmo, z_cosmo, ind_xmin, ind_ymin, ind_zmin, ind_xmax,
ind_ymax, _) = _prepare_for_interpolation(
x_radar, y_radar, z_radar, cosmo_coord, slice_xy=slice_xy,
slice_z=slice_z)
print('Generating tree')
# default scipy compact_nodes and balanced_tree = True
tree = cKDTree(
np.transpose((z_cosmo, y_cosmo, x_cosmo)), compact_nodes=False,
balanced_tree=False)
print('Tree generated')
_, ind_vec = tree.query(np.transpose(
(z_radar.flatten(), y_radar.flatten(), x_radar.flatten())), k=1)
# put the index in the original cosmo coordinates
nx_cosmo = len(cosmo_coord['x']['data'])
ny_cosmo = len(cosmo_coord['y']['data'])
nx = ind_xmax-ind_xmin+1
ny = ind_ymax-ind_ymin+1
ind_z = (ind_vec/(nx*ny)).astype(int)+ind_zmin
ind_y = ((ind_vec-nx*ny*ind_z)/nx).astype(int)+ind_ymin
ind_x = ((ind_vec-nx*ny*ind_z) % nx).astype(int)+ind_xmin
ind_cosmo = (ind_x+nx_cosmo*ind_y+nx_cosmo*ny_cosmo*ind_z).astype(int)
cosmo_ind_field = get_metadata(field_name)
cosmo_ind_field['data'] = ind_cosmo.reshape(radar.nrays, radar.ngates)
# debugging
# print(" generating COSMO indices takes %s seconds " %
# (time.time() - start_time))
return cosmo_ind_field
def get_cosmo_fields(cosmo_data, cosmo_ind, time_index=0,
field_names=['temperature']):
"""
Get the COSMO data corresponding to each radar gate
using a precomputed look up table of the nearest neighbour
Parameters
----------
cosmo_data : dict
dictionary containing the COSMO data and metadata
cosmo_ind : dict
dictionary containing a field of COSMO indices and metadata
time_index : int
index of the forecasted data
field_names : str
names of COSMO parameters (default temperature)
Returns
-------
cosmo_fields : list of dict
dictionary with the COSMO fields and metadata
"""
nrays, ngates = np.shape(cosmo_ind['data'])
cosmo_fields = []
for field in field_names:
if field not in cosmo_data:
warn('COSMO field '+field+' data not available')
else:
values = cosmo_data[field]['data'][time_index, :, :, :].flatten()
# put field
field_dict = get_metadata(field)
field_dict['data'] = values[cosmo_ind['data'].flatten()].reshape(
nrays, ngates).astype(float)
cosmo_fields.append({field: field_dict})
if not cosmo_fields:
warn('COSMO data not available')
return None
return cosmo_fields
# @profile
def read_cosmo_data(fname, field_names=['temperature'], celsius=True):
"""
Reads COSMO data from a netcdf file
Parameters
----------
fname : str
name of the file to read
field_names : str
name of the variable to read
celsius : Boolean
if True and variable temperature converts data from Kelvin
to Centigrade
Returns
-------
cosmo_data : dictionary
dictionary with the data and metadata
"""
# read the data
ncobj = netCDF4.Dataset(fname)
ncvars = ncobj.variables
# 4.1 Global attribute -> move to metadata dictionary
metadata = dict([(k, getattr(ncobj, k)) for k in ncobj.ncattrs()])
# read data for requested fields
cosmo_data = dict()
found = False
for field in field_names:
cosmo_name = get_fieldname_cosmo(field)
if cosmo_name not in ncvars:
warn(field+' data not present in COSMO file '+fname)
else:
var_data = _ncvar_to_dict(ncvars[cosmo_name], dtype='float16')
# remove dimension ensemble member of cosmo-1e
if var_data['data'].ndim == 5:
var_data['data'] = np.squeeze(var_data['data'], axis=1)
if field == 'temperature' and celsius:
var_data['data'] -= 273.15
var_data['units'] = 'degrees Celsius'
if field == 'vertical_wind_shear':
var_data['data'] *= 1000.
var_data['units'] = 'meters_per_second_per_km'
cosmo_data.update({field: var_data})
found = True
del var_data
if not found:
warn('No field available in COSMO file '+fname)
ncobj.close()
return None
# 4.2 put variables in dictionary
x_1 = _ncvar_to_dict(ncvars['x_1'])
y_1 = _ncvar_to_dict(ncvars['y_1'])
lon_1 = _ncvar_to_dict(ncvars['lon_1'])
lat_1 = _ncvar_to_dict(ncvars['lat_1'])
z_1 = _ncvar_to_dict(ncvars['z_1'])
z_bnds_1 = _ncvar_to_dict(ncvars['z_bnds_1'])
time_data = _ncvar_to_dict(ncvars['time'])
# close object
ncobj.close()
cosmo_data.update({
'metadata': metadata,
'time': time_data,
'x': x_1,
'y': y_1,
'z': z_1,
'z_bnds': z_bnds_1,
'lon': lon_1,
'lat': lat_1
})
return cosmo_data
def read_cosmo_coord(fname, zmin=None):
"""
Reads COSMO coordinates from a netcdf file
Parameters
----------
fname : str
name of the file to read
Returns
-------
cosmo_coord : dictionary
dictionary with the data and metadata
"""
# read the data
try:
ncobj = netCDF4.Dataset(fname)
ncvars = ncobj.variables
# 4.1 Global attribute -> move to metadata dictionary
metadata = dict([(k, getattr(ncobj, k)) for k in ncobj.ncattrs()])
# 4.2 put variables in dictionary
x_1 = _ncvar_to_dict(ncvars['x_1'])
y_1 = _ncvar_to_dict(ncvars['y_1'])
lon_1 = _ncvar_to_dict(ncvars['lon_1'])
lat_1 = _ncvar_to_dict(ncvars['lat_1'])
z_1 = _ncvar_to_dict(ncvars['z_1'])
z_bnds_1 = _ncvar_to_dict(ncvars['z_bnds_1'])
hfl = _ncvar_to_dict(ncvars['HFL'])
hsurf = _ncvar_to_dict(ncvars['HSURF'])
fr_land = _ncvar_to_dict(ncvars['FR_LAND'])
# close object
ncobj.close()
if zmin is not None:
z_1['data'] = z_1['data'][z_1['data'] >= zmin]
z_bnds_1['data'] = z_bnds_1['data'][z_bnds_1['data'] >= zmin]
cosmo_coord = {
'metadata': metadata,
'x': x_1,
'y': y_1,
'z': z_1,
'z_bnds': z_bnds_1,
'lon': lon_1,
'lat': lat_1,
'hfl': hfl,
'hsurf': hsurf,
'fr_land': fr_land,
}
return cosmo_coord
except EnvironmentError:
warn('Unable to read file '+fname)
return None
def _ncvar_to_dict(ncvar, dtype=np.float32):
""" Convert a NetCDF Dataset variable to a dictionary. """
# copy all attributes
d = dict((k, getattr(ncvar, k)) for k in ncvar.ncattrs())
d.update({'data': ncvar[:]})
if '_FillValue' in d:
d['data'] = np.ma.asarray(d['data'], dtype=dtype)
d['data'] = np.ma.masked_values(d['data'], float(d['_FillValue']))
else:
d['data'] = np.asarray(d['data'], dtype=dtype)
return d
def _prepare_for_interpolation(x_radar, y_radar, z_radar, cosmo_coord,
slice_xy=True, slice_z=False):
"""
prepares the COSMO 3D volume for interpolation:
1. if set slices the cosmo data to the area (or volume)
covered by the radar
2. creates the x, y, z grid for the interpolation
Parameters
----------
x_radar, y_radar, z_radar : arrays
The Swiss coordinates of the radar
cosmo_coord : dict
dictionary containing the COSMO coordinates
slice_xy : boolean
if true the horizontal plane of the COSMO field is cut to the
dimensions of the radar field
slice_z : boolean
if true the vertical plane of the COSMO field is cut to the dimensions
of the radar field
Returns
-------
x_cosmo, y_cosmo, z_cosmo : 1D arrays
arrays containing the flatten swiss coordinates of the COSMO data in
the area of interest
ind_xmin, ind_ymin, ind_zmin, ind_xmax, ind_ymax, ind_zmax : ints
the minimum and maximum indices of each dimension
"""
nx_cosmo = len(cosmo_coord['x']['data'])
ny_cosmo = len(cosmo_coord['y']['data'])
nz_cosmo = len(cosmo_coord['z']['data'])
if slice_xy:
# get the COSMO data within the radar range
xmin = np.min(x_radar)
xmax = np.max(x_radar)
ymin = np.min(y_radar)
ymax = np.max(y_radar)
ind_xmin = np.where(cosmo_coord['x']['data'] < xmin)[0]
if ind_xmin.size == 0:
ind_xmin = 0
else:
ind_xmin = ind_xmin[-1]
ind_xmax = np.where(cosmo_coord['x']['data'] > xmax)[0]
if ind_xmax.size == 0:
ind_xmax = nx_cosmo-1
else:
ind_xmax = ind_xmax[0]
ind_ymin = np.where(cosmo_coord['y']['data'] < ymin)[0]
if ind_ymin.size == 0:
ind_ymin = 0
else:
ind_ymin = ind_ymin[-1]
ind_ymax = np.where(cosmo_coord['y']['data'] > ymax)[0]
if ind_ymax.size == 0:
ind_ymax = ny_cosmo-1
else:
ind_ymax = ind_ymax[0]
else:
ind_xmin = 0
ind_xmax = nx_cosmo-1
ind_ymin = 0
ind_ymax = ny_cosmo-1
if slice_z:
zmin = np.min(z_radar)
zmax = np.max(z_radar)
ind_z, _, _ = np.where(cosmo_coord['hfl']['data'] < zmin)
if ind_z.size == 0:
ind_zmin = 0
else:
ind_zmin = np.min(ind_z)
ind_z, _, _ = np.where(cosmo_coord['hfl']['data'] > zmax)
if ind_z.size == 0:
ind_zmax = nz_cosmo-1
else:
ind_zmax = np.max(ind_z)
else:
ind_zmin = 0
ind_zmax = nz_cosmo-1
nx = ind_xmax-ind_xmin+1
ny = ind_ymax-ind_ymin+1
nz = ind_zmax-ind_zmin+1
x_cosmo = cosmo_coord['x']['data'][ind_xmin:ind_xmax+1]
y_cosmo = cosmo_coord['y']['data'][ind_ymin:ind_ymax+1]
z_cosmo = cosmo_coord['hfl']['data'][
ind_zmin:ind_zmax+1, ind_ymin:ind_ymax+1, ind_xmin:ind_xmax+1]
x_cosmo = (
np.broadcast_to(x_cosmo.reshape(1, 1, nx), (nz, ny, nx))).flatten()
y_cosmo = (
np.broadcast_to(y_cosmo.reshape(1, ny, 1), (nz, ny, nx))).flatten()
z_cosmo = z_cosmo.flatten()
return (x_cosmo, y_cosmo, z_cosmo, ind_xmin, ind_ymin,
ind_zmin, ind_xmax, ind_ymax, ind_zmax)
def _put_radar_in_swiss_coord(radar):
"""
puts the Cartesian grid of the radar coordinates in Swiss coordinates
Parameters
----------
radar : Radar
the radar object containing the information on the position of the
radar gates
Returns
-------
x_radar, y_radar, z_radar : 2D arrays
arrays containing swiss coordinates of the radar [in m]
"""
x0, y0, _ = pyart.core.wgs84_to_swissCH1903(
radar.longitude['data'][0], radar.latitude['data'][0],
radar.altitude['data'][0], no_altitude_transform=True)
x_radar = radar.gate_x['data']+x0
y_radar = radar.gate_y['data']+y0
z_radar = radar.gate_altitude['data']
return x_radar, y_radar, z_radar
```
#### File: pyrad/io/read_data_mxpol.py
```python
import time
import imp
import re
import os
import datetime
from copy import deepcopy
import warnings
import numpy as np
import netCDF4
try:
import h5py
_H5PY_AVAILABLE = True
except ImportError:
_H5PY_AVAILABLE = False
class MissingOptionalDependency(Exception):
""" Exception raised when a optional dependency is needed but not found. """
pass
import pyart
# -------------------------- classes - MXPOL ------------------------------ #
class pyrad_MXPOL(pyart.core.Radar):
def __init__(self, filename, field_names=None, max_range=np.Inf,
min_range=10000, pyrad_names=True):
# find information based on filename
all_files = [filename]
fname_basename = os.path.basename(filename)
if 'PPI' in fname_basename:
scan_type = 'ppi'
elif 'RHI' in fname_basename:
scan_type = 'rhi'
strdate = re.findall(r"([0-9]{8}-[0-9]{6})", fname_basename)[0]
date = datetime.datetime.strptime(strdate, '%Y%m%d-%H%M%S')
# if field name is None, take all available fields
if field_names is None:
field_names = ['Zh', 'Zdr', 'Kdp', 'Phidp', 'Rhohv', 'ZhCorr',
'ZdrCorr', 'RVel', 'Rvel', 'Sw', 'SNRh', 'SNRv', 'Psidp']
# convert fieldname if necessary
varnames = []
for fieldname in field_names:
newname = convert_polvar_name('LTE', fieldname)
varnames.append(newname)
# get labels, units etc
long_names = []
standard_names = []
units = []
vmin = []
vmax = []
for varname in varnames:
metadata = generate_polvar_metadata(varname)
standard_names.append(metadata['standard_name'])
long_names.append(metadata['long_name'])
units.append(metadata['units'])
vmin.append(metadata['valid_min'])
vmax.append(metadata['valid_max'])
# initiate empty vectors
N_sweeps = len(all_files)
fields = {}
fixed_angle = {}
fixed_angle['data'] = np.zeros(N_sweeps, )
sweep_start_ray_index = {}
sweep_start_ray_index['data'] = []
sweep_stop_ray_index = {}
sweep_stop_ray_index['data'] = []
for i, k in enumerate(varnames):
fields[k] = {}
fields[k]['data'] = []
fields[k]['long_name'] = long_names[i]
fields[k]['standard_name'] = standard_names[i]
fields[k]['units'] = units[i]
fields[k]['valid_min'] = vmin[i]
fields[k]['valid_max'] = vmax[i]
idx_start = 0
idx_stop = 0
elevations = []
azimuths = []
ranges = []
nyquist = []
# read data and create dictionaries
for i in range(N_sweeps):
metadata, data = readMXPOLRadData(
all_files[i], varnames, max_range)
if scan_type == 'rhi':
fixed_angle['data'][i] = np.round(np.mean(data['azimuth']))
elif scan_type == 'ppi':
fixed_angle['data'][i] = np.round(np.mean(data['elevation']))
[N_az, N_ranges] = data[varnames[0]].shape
idx_stop = idx_start + N_az - 1
sweep_start_ray_index['data'].append(idx_start)
sweep_stop_ray_index['data'].append(idx_stop)
idx_start = idx_stop + 1
elevations.extend(list(data['elevation']))
nyquist.extend([data['nyquist_vel']]*N_az)
azimuths.extend(list(data['azimuth']))
ranges.extend(list(data['range']))
for j, v in enumerate(varnames):
if v in data.keys():
if not(len(fields[v]['data'])):
fields[v]['data'] = data[v]
else:
fields[v]['data'] = row_stack(
fields[v]['data'], data[v])
else:
print('Variable '+v+' was not found in file!')
# mask NaNs
for v in varnames:
if not len(fields[v]['data']):
# Remove variable
fields.pop(v)
else:
fields[v]['data'] = np.ma.masked_equal(
fields[v]['data'], -99900.0)
[a, N_ranges] = fields[varnames[0]]['data'].shape
# create dictionaries according to pyART standard
latitude = {'data': np.asarray([data['latitude']]),
'units': data['lat_units']}
longitude = {'data': np.asarray([data['longitude']]),
'units': data['lon_units']}
altitude = {'data': np.asarray([data['altitude']]),
'units': data['alt_units']}
sweep_number = {'data': np.arange(0, len(all_files))}
sweep_mode = {'data': np.asarray([scan_type]*N_sweeps)}
instrument_parameters = {
'nyquist_velocity': {'data': np.asarray(nyquist)}}
azimuth = {'data': np.asarray(azimuths), 'units': data['azim_units']}
rrange = {'data': np.asarray(ranges),
'units': data['range_units']}
elevation = {'data': np.asarray(elevations),
'units': data['elev_units']}
sweep_start_ray_index['data'] = np.asarray(
sweep_start_ray_index['data'])
sweep_stop_ray_index['data'] = np.asarray(
sweep_stop_ray_index['data'])
time_units = 'seconds since ' + str(date)
time_data = {'data': data['time'], 'units': time_units}
# change keys to match pyART metranet keys
if pyrad_names:
fields_copy = deepcopy(fields)
for keys in fields_copy:
newkey = fields[keys]['standard_name']
fields[newkey] = fields.pop(keys)
# Create PyART instance
pyart.core.Radar.__init__(
self, time_data, rrange, fields, metadata, scan_type, latitude,
longitude, altitude, sweep_number, sweep_mode, fixed_angle,
sweep_start_ray_index, sweep_stop_ray_index, azimuth, elevation,
instrument_parameters=instrument_parameters)
# -------------------------- classes - IDL --------------------------- #
class pyrad_IDL(pyart.core.Radar):
def __init__(self, filename, field_names=None, max_range=np.Inf,
min_range=10000):
# find information based on filename
all_files = [filename]
fname_basename = os.path.basename(filename)
fname = netCDF4.Dataset(filename)
if 'PPI' in fname_basename:
scan_type = 'ppi'
elif 'RHI' in fname_basename:
scan_type = 'rhi'
strdate = re.findall(r"([0-9]{8}-[0-9]{6})", fname_basename)[0]
date = datetime.datetime.strptime(strdate, '%Y%m%d-%H%M%S')
# if field name is None, take all available fields
if field_names is None:
field_names = list(fname.variables.keys())
# convert fieldname if necessary
varnames = []
for fieldname in field_names:
newname = convert_polvar_name('IDL', fieldname)
varnames.append(newname)
# get labels, units etc
long_names = []
standard_names = []
units = []
vmin = []
vmax = []
for varname in varnames:
metadata = generate_polvar_metadata(varname)
standard_names.append(metadata['standard_name'])
long_names.append(metadata['long_name'])
units.append(metadata['units'])
vmin.append(metadata['valid_min'])
vmax.append(metadata['valid_max'])
# initiate empty vectors
N_sweeps = len(all_files)
fields = {}
fixed_angle = {}
fixed_angle['data'] = np.zeros(N_sweeps, )
sweep_start_ray_index = {}
sweep_start_ray_index['data'] = []
sweep_stop_ray_index = {}
sweep_stop_ray_index['data'] = []
for i, k in enumerate(varnames):
fields[k] = {}
fields[k]['data'] = []
fields[k]['long_name'] = long_names[i]
fields[k]['standard_name'] = standard_names[i]
fields[k]['units'] = units[i]
fields[k]['valid_min'] = vmin[i]
fields[k]['valid_max'] = vmax[i]
idx_start = 0
idx_stop = 0
elevations = []
azimuths = []
nyquist = []
# read data and create dictionaries
for i in range(N_sweeps):
metadata, data = readIDLRadData(
all_files[i], varnames, max_range)
if scan_type == 'rhi':
fixed_angle['data'][i] = np.round(np.mean(data['azimuth']))
elif scan_type == 'ppi':
fixed_angle['data'][i] = data['elevation'][0]
[N_az, N_ranges] = data[varnames[0]].shape
idx_stop = idx_start + N_az - 1
sweep_start_ray_index['data'].append(idx_start)
sweep_stop_ray_index['data'].append(idx_stop)
idx_start = idx_stop + 1
elevations.extend([data['elevation'][0]]*N_az)
nyquist.extend([data['nyquist_vel']]*N_az)
azimuths.extend(list(data['azimuth']))
warnings.warn("Warning, sweep rank could not be found, using first rank")
starttime, endtime = findTimes(1)
interval = ((endtime-starttime)/N_az)
#time_lapse = np.arange(starttime+(0.5*interval), endtime, interval)
# because this is a single sweep
time_lapse = np.around(
np.arange(0.+(0.5*interval), endtime-starttime, interval))
for j, v in enumerate(varnames):
if v in data.keys():
if fields[v]['data'].size == 0:
fields[v]['data'] = data[v]
else:
fields[v]['data'] = row_stack(
fields[v]['data'], data[v])
else:
print('Variable '+v+' was not found in file!')
# mask NaNs
for v in varnames:
fields[v]['data'] = np.ma.masked_equal(
fields[v]['data'], -99900.0)
[a, N_ranges] = fields[varnames[0]]['data'].shape
# create dictionaries according to pyART standard
latitude = {'data': np.asarray([data['latitude']]),
'units': data['lat_units']}
longitude = {'data': np.asarray([data['longitude']]),
'units': data['lon_units']}
altitude = {'data': np.asarray([data['altitude']]),
'units': data['alt_units']}
sweep_number = {'data': np.arange(0, len(all_files))}
sweep_mode = {'data': np.asarray([scan_type]*N_sweeps)}
instrument_parameters = {
'nyquist_velocity': {'data': np.asarray(nyquist)}}
azimuth = {'data': np.asarray(azimuths), 'units': data['azim_units']}
rrange = {'data': np.arange(N_ranges)*data['resolution'],
'units': data['range_units']}
elevation = {'data': np.asarray(elevations),
'units': data['elev_units']}
sweep_start_ray_index['data'] = np.asarray(
sweep_start_ray_index['data'])
sweep_stop_ray_index['data'] = np.asarray(
sweep_stop_ray_index['data'])
time_units = 'seconds since ' + str(date)
time_lapse = np.asarray(time_lapse)
time_data = {'data': time_lapse, 'units': time_units}
# change keys to match pyART metranet keys
fields_copy = deepcopy(fields)
for keys in fields_copy:
newkey = fields[keys]['standard_name']
fields[newkey] = fields.pop(keys)
# Create PyART instance
pyart.core.Radar.__init__(
self, time_data, rrange, fields, metadata, scan_type, latitude,
longitude, altitude, sweep_number, sweep_mode, fixed_angle,
sweep_start_ray_index, sweep_stop_ray_index, azimuth, elevation,
instrument_parameters=instrument_parameters)
# -------------------------- classes - MCH --------------------------- #
class pyrad_MCH(pyart.core.Radar):
def __init__(self, filename, field_names=None, max_range=np.Inf):
# find information based on filename
all_files = [filename]
N_sweeps = len(all_files)
fname_basename = os.path.basename(filename)
# Get name of radar
index_letter = fname_basename[2]
radar_info = generate_radar_table(index_letter)
radar_name = radar_info['radarID']
# Get radar resolution
if fname_basename[1] == 'L':
rres = 500.
else:
rres = 83.3
scan_type = 'ppi'
scandate = datetime.datetime.strptime(
fname_basename[3:12], '%y%j%H%M')
self.scan_date = scandate.timetuple()
# if field name is None, take all available fields
if field_names is None:
field_names = ['Z', 'ZDR', 'ZV', 'V', 'W', 'RHO', 'CLUT', 'PHIDP']
# convert fieldname if necessary
varnames = []
for fieldname in field_names:
newname = convert_polvar_name('MCH', fieldname)
varnames.append(newname)
# get labels, units etc
long_names = []
standard_names = []
units = []
vmin = []
vmax = []
for varname in varnames:
metadata = generate_polvar_metadata(varname)
standard_names.append(metadata['standard_name'])
long_names.append(metadata['long_name'])
units.append(metadata['units'])
vmin.append(metadata['valid_min'])
vmax.append(metadata['valid_max'])
# initiate empty vectors
fields = {}
fixed_angle = {}
fixed_angle['data'] = np.zeros(N_sweeps, )
sweep_start_ray_index = {}
sweep_start_ray_index['data'] = []
sweep_stop_ray_index = {}
sweep_stop_ray_index['data'] = []
for i, k in enumerate(varnames):
fields[k] = {}
fields[k]['data'] = []
fields[k]['long_name'] = long_names[i]
fields[k]['standard_name'] = standard_names[i]
fields[k]['units'] = units[i]
fields[k]['valid_min'] = vmin[i]
fields[k]['valid_max'] = vmax[i]
# Initialize
idx_start = 0
idx_stop = 0
elevations = []
azimuths = []
nyquist = []
time_lapse = []
# read and organise data
for i in range(N_sweeps):
data = readCHRadData(
all_files[i], radar_name, varnames, rres, max_range)
fixed_angle['data'][i] = data['elevation']
[N_ranges, N_az] = data[varnames[0]].shape
idx_stop = idx_start + N_az - 1
sweep_start_ray_index['data'].append(idx_start)
sweep_stop_ray_index['data'].append(idx_stop)
idx_start = idx_stop + 1
elevations.extend([data['elevation']]*N_az)
nyquist.extend([data['nyquist_vel']]*N_az)
azimuths.extend(list(data['azimuth']))
# create list of times at the center of each ray
sweep_rank = 1
print()
starttime, endtime = findTimes(sweep_rank)
interval = ((endtime-starttime)/len(list(data['azimuth'])))
time_lapse.extend(np.arange(
starttime+(0.5*interval), endtime, interval))
for j, v in enumerate(varnames):
if fields[v]['data'].size == 0:
fields[v]['data'] = data[v].T
else:
fields[v]['data'] = row_stack(
fields[v]['data'], data[v].T)
# mask nans
for v in varnames:
fields[v]['data'] = np.ma.array(
fields[v]['data'], mask=np.isnan(fields[v]['data']))
sweep_start_ray_index['data'] = np.asarray(
sweep_start_ray_index['data'])
sweep_stop_ray_index['data'] = np.asarray(
sweep_stop_ray_index['data'])
metadata = {}
[a, N_ranges] = fields[varnames[0]]['data'].shape
latitude = {'data': np.array([radar_info['coordinates'][0]]),
'units': "DegreesNorth"}
longitude = {'data': np.array([radar_info['coordinates'][1]]),
'units': "DegreesEast"}
altitude = {'data': np.array([radar_info['altitude']]),
'units': "MetersAboveSeaLevel"}
sweep_number = {'data': np.arange(0, len(all_files))}
sweep_mode = {'data': np.asarray(['ppi']*N_sweeps)}
instrument_parameters = {
'nyquist_velocity': {'data': np.array(nyquist)}}
metadata['Source'] = (
"Operational radar data processed at MeteoSwiss Locarno-Monti")
metadata['Institution'] = (
"MeteoSwiss, MDR, Locarno-Monti, Switzerland")
metadata['History'] = [
"created: %s, " % time.ctime(os.path.getctime(filename)) +
"last modified: %s" % time.ctime(os.path.getmtime(filename))]
metadata['ContactInformation'] = "<EMAIL>"
azimuth = {'data': np.array(azimuths), 'units': "Degrees"}
rrange = {'data': np.arange(N_ranges)*data['resolution'],
'units': "Meters"}
elevation = {'data': np.array(elevations), 'units': "Degrees"}
time_units = 'seconds since '+str(scandate)
time_lapse = np.asarray(time_lapse)
scantime = {'data': time_lapse, 'units': time_units}
# change keys to match pyART metranet keys
fields_copy = deepcopy(fields)
for keys in fields_copy:
newkey = fields[keys]['standard_name']
fields[newkey] = fields.pop(keys)
# Create PyART instance
pyart.core.Radar.__init__(
self, scantime, rrange, fields, metadata, scan_type, latitude,
longitude, altitude, sweep_number, sweep_mode, fixed_angle,
sweep_start_ray_index, sweep_stop_ray_index, azimuth, elevation,
instrument_parameters=instrument_parameters)
# ----------------------- utilities - read --------------------- #
def row_stack(a1, a2):
"""
Stacks data from subsequent sweeps, while padding "empty" columns from
subsequent sweeps.
Inputs
------
a1: np.array
destination array
a2: np.array
array which is added onto the first array
Returns
-------
out: np.array
stacked destination and additional array, with uniform shape
"""
[N1, M1] = a1.shape
[N2, M2] = a2.shape
if M1 > M2:
a2 = np.pad(a2, ((0, 0), (0, M1-M2)), mode='constant',
constant_values=-9999999)
elif M2 < M1:
a1 = np.pad(a2, ((0, 0), (0, M2-M1)), mode='constant',
constant_values=-9999999)
out = np.vstack((a1, a2))
out[out == -9999999] = np.nan
return out
def findTimes(num_sweep):
"""
Finds the times at the beginning and at the end of each sweep. Information
comes from the elapsed time since the beginning of the volume scan, from
the Rad4Alp: Specifications/ Request for Proposal (RFP) document.
Inputs
------
num_sweep: int
rank of the sweep
Returns
-------
elapsed_times[num_sweep][0]: float
the elapsed time since the beginning of the volume scan at the
beginning of the sweep
elapsed_times[num_sweep][1]: float
the elapsed time since the beginning of the volume scan at the end of
the sweep
"""
elapsed_times = {9: [0, 11.4],
7: [11.4, 22.8],
5: [22.8, 39.2],
3: [39.3, 60.5],
1: [60.5, 84.7],
19: [84.7, 97.2],
17: [97.2, 109.6],
15: [109.6, 121.6],
13: [121.6, 133.1],
11: [133.1, 144.4],
10: [144.4, 155.8],
8: [155.8, 172.2],
6: [172.2, 188.6],
4: [188.6, 204.9],
2: [204.9, 229.4],
20: [229.4, 241.9],
18: [241.9, 254.4],
16: [254.4, 266.6],
14: [266.6, 278.3],
12: [278.3, 289.9]}
return elapsed_times[num_sweep][0], elapsed_times[num_sweep][1]
def int2float_radar(data, varname, index_angle):
"""
Converts radar moments from bit to float
Inputs
------
data: np.array
moment data as loaded from h5 file
varname: str
name of the moment (i.e. 'ZH')
index_angle: int
rank of the sweep-1 (converted to base 0)
Returns
-------
output: np.array
moment data converted to float
"""
varname = convert_polvar_name('metranet', varname)
NYQUIST_VEL = get_mymetadata('nyq_vel')
output = np.zeros(data.shape)
if varname in ['ZH', 'ZV', 'Z', 'ZHC']:
output[data != 0] = (data[data != 0]-64)*0.5
output[data == 0] = float('nan')
elif varname == 'VEL':
output[data != 0] = (data[data != 0]-128)/127*NYQUIST_VEL[index_angle]
output[data == 0] = float('nan')
elif varname == 'WID':
output = data/255*NYQUIST_VEL[index_angle]
elif varname in ['ZDR', 'ZDRC']:
output[data != 0] = data[data != 0]*1.0/16.1259842 - 7.9375
output[data == 0] = float('nan')
elif varname == 'RHO':
output[data != 0] = 1.003-10**(-(data[data != 0]-1.0)/100)
output[data == 0] = float('nan')
elif varname == 'PHI':
output[data != 0] = (data[data != 0]-32768)/32767*180
output[data == 0] = float('nan')
elif varname == 'CLUT':
output = data
else:
output = data
warnings.warn(
("Warning, %s was not found and could not be converted")
% (varname))
return output
def readMXPOLRadData(filename, variableList, max_range=np.Inf, min_range=0):
"""
Reads a netcdf containing processed radar data in polar coordinates
Parameters
----------
filename: str
complete path of the file
variableList: list
list of variables to be read
Returns
-------
varPol: dict
dictionary containing the variables, the azimuth and the range
metadata: dict
dictionary containing the metadata of the file
"""
varPol = {}
metadata = {}
ncid = netCDF4.Dataset(filename)
time_data = ncid.variables['Time']
time_data -= time_data[0] # To get time in seconds from beginning of scan
rrange = ncid.variables['Range'][:]
# Get indexes between min_range and max_range
idx2keep = np.where(np.logical_and(
rrange < max_range, rrange > min_range))[0]
rrange = rrange[idx2keep]
# Get variables in polar coordinates
for varname in variableList:
try:
varPol[varname] = ncid.variables[varname][:].T
except:
pass
varPol['resolution'] = ncid.__dict__['RangeResolution-value']
varPol['range'] = rrange
varPol['range_units'] = ncid.__dict__['RangeResolution-unit']
varPol['azimuth'] = ncid.variables['Azimuth'][:]
try:
varPol['azim_units'] = ncid.__dict__['Azimuth-unit']
except KeyError:
varPol['azim_units'] = ncid.variables['Azimuth'].Units
varPol['elevation'] = ncid.variables['Elevation'][:]
try:
varPol['elev_units'] = ncid.__dict__['Elevation-unit']
except KeyError:
varPol['elev_units'] = ncid.variables['Elevation'].Units
varPol['nyquist_vel'] = ncid.__dict__['NyquistVelocity-value']
varPol['longitude'] = ncid.__dict__['Longitude-value']
varPol['lon_units'] = ncid.__dict__['Longitude-unit']
varPol['latitude'] = ncid.__dict__['Latitude-value']
varPol['lat_units'] = ncid.__dict__['Latitude-unit']
varPol['altitude'] = ncid.__dict__['Altitude-value']
varPol['alt_units'] = ncid.__dict__['Altitude-unit']
varPol['time'] = time_data
metadata['Source'] = ncid.__dict__['Source']
metadata['Institution'] = ncid.__dict__['Institution']
metadata['History'] = ncid.__dict__['History']
metadata['ContactInformation'] = ncid.__dict__['ContactInformation']
# Close netcdf
ncid.close()
return metadata, varPol
def readIDLRadData(filename, variableList, max_range=np.Inf, min_range=0):
"""
Reads a netcdf containing IDL processed radar data in polar coordinates
Parameters
----------
filename: str
complete path of the file
variableList: list
list of variables to be read
Returns
-------
varPol: dict
dictionary containing the variables, the azimuth and the range
metadata: dict
dictionary containing the metadata of the file
"""
varPol = {}
metadata = {}
ncid = netCDF4.Dataset(filename)
time_data = ncid.variables['Time']
time_data -= time_data[0] # To get time in seconds from beginning of scan
rrange = ncid.variables['Range'][:]
# Get indexes between min_range and max_range
idx2keep = np.where(np.logical_and(
rrange < max_range, rrange > min_range))[0]
rrange = rrange[idx2keep]
# Get variables in polar coordinates
for varname in variableList:
try:
varPol[varname] = ncid.variables[varname][:].T
except:
pass
varPol['resolution'] = ncid.__dict__['RangeResolution-value']
varPol['range'] = rrange
varPol['range_units'] = ncid.__dict__['RangeResolution-unit']
# because this data seems to be on -180 to 180
#varPol['azimuth'] = (ncid.variables['Azimuth'][:] + 180)%360
varPol['azimuth'] = ncid.variables['Azimuth'][:]
try:
varPol['azim_units'] = ncid.__dict__['Azimuth-unit']
except KeyError:
varPol['azim_units'] = ncid.variables['Azimuth'].Units
varPol['elevation'] = ncid.variables['Elevation'][:]
try:
varPol['elev_units'] = ncid.__dict__['Elevation-unit']
except KeyError:
varPol['elev_units'] = ncid.variables['Elevation'].Units
varPol['nyquist_vel'] = ncid.__dict__['NyquistVelocity-value']
varPol['longitude'] = ncid.__dict__['Longitude-value']
varPol['lon_units'] = ncid.__dict__['Longitude-unit']
varPol['latitude'] = ncid.__dict__['Latitude-value']
varPol['lat_units'] = ncid.__dict__['Latitude-unit']
varPol['altitude'] = ncid.__dict__['Altitude-value']
varPol['alt_units'] = ncid.__dict__['Altitude-unit']
varPol['time'] = time_data
metadata['Source'] = ncid.__dict__['Source']
metadata['Institution'] = ncid.__dict__['Institution']
metadata['History'] = ncid.__dict__['History']
metadata['ContactInformation'] = ncid.__dict__['ContactInformation']
# Close netcdf
ncid.close()
return metadata, varPol
def readCHRadData(filename, radar_name, variableList, radial_resolution,
max_range=np.Inf, min_range=0):
"""
Reads a HDF5 file containing processed radar data in polar coordinates
Parameters
----------
filename: str
complete path of the file
radar_name: str
name of MCH radar
variableList: list
list of variables to be read
radial_resolution: float
resolution of the radar in metres (i.e. high: 83.3, low: 500.)
max_range: float
maximum range upto which to read data
min_range: float
mimimum range from which to read data
Returns
-------
varPol: dict
the projected variables, the azimuth and the range
"""
# check that h5py library is available
if not _H5PY_AVAILABLE:
raise MissingOptionalDependency(
"h5py is required to use readCHRadData but is not installed")
varPol = {}
h5id = h5py.File(filename, 'r')
ELEVATION_ANGLES = get_elevation_metadata(radar_name)
radar_info = generate_radar_table(radar_name)
ANG_RES = radar_info['dbbeam']
NYQUIST_VEL = get_mymetadata('nyq_vel')
# Get dimensions
siz = h5id['moments']['Z'].shape
rng = np.arange(0, siz[1])*radial_resolution
idx2keep = np.where(np.logical_and(
rng < max_range, rng > min_range))[0]
rng = rng[idx2keep]
azimuth = np.arange(0, siz[0])*ANG_RES
index_angle = int(re.findall(r"\.([0-9]{3})\.", filename)[0])-1
elevation = ELEVATION_ANGLES[index_angle]
# Get variables in polar coordinates
for varname in variableList:
varname = convert_polvar_name('MCH', varname)
data = []
data = h5id['moments'][varname][:].T
data = np.asarray(data)
data = data.astype(float)
clut = h5id['moments']['CLUT'][:].T
data[clut >= 100] = float('nan') # Remove clutter
data = data[idx2keep, :]
varPol[varname] = int2float_radar(data, varname, index_angle)
varPol['resolution'] = rng[3]-rng[2]
varPol['range'] = rng
varPol['azimuth'] = azimuth
varPol['elevation'] = elevation
varPol['nyquist_vel'] = NYQUIST_VEL[index_angle]
# Close netcdf
h5id.close()
return varPol
# ------------------------ utilities - config ------------------------- #
_dirname = os.path.dirname(__file__)
_DEFAULT_CONFIG_FILE = os.path.join(_dirname, 'mxpol_config.py')
def load_myconfig(filename=None):
"""
Load configuration from a config file.
Parameters
----------
filename: str
Filename of the configuration file. If None the default configuration
file is loaded from the directory.
Returns
-------
_DEFAULT_METADATA: dict
Dictionary with metadata
"""
if filename is None:
filename = _DEFAULT_CONFIG_FILE
# private:
global cfile
global _DEFAULT_POLARNAMES
global _DEFAULT_METADATA
global _DEFAULT_RADAR_INFO
cfile = imp.load_source('metadata_config', filename)
_DEFAULT_METADATA = cfile.MY_METADATA
_DEFAULT_POLARNAMES = cfile.MY_POLARNAMES
_DEFAULT_RADAR_INFO = cfile.RADAR_INFO
return _DEFAULT_METADATA
def get_mymetadata(p, filename=None):
"""
Return a dictionary of metadata for a given parameter, p.
An empty dictionary will be returned if no metadata dictionary exists for
parameter p.
Parameters
----------
p: str
parameter name (i.e. Polvar) for which to return metadata
filename: str
Filename of the configuration file. If None the default configuration
file is loaded from the directory.
Returns
-------
_DEFAULT_METADATA[p].copy(): dict
a copy of the parameter of interest from the metadata dictionary
"""
load_myconfig(filename=filename)
if p in _DEFAULT_METADATA:
return _DEFAULT_METADATA[p].copy()
return {}
def get_elevation_metadata(radarname, filename=None):
"""
Gets the elevation angles for each sweep from the configuration file
Inputs
------
radarname: str
name of the radar for which to retrieve elevation angles
filename: str
name of the configuration file, if None, the default configuration
file is used
Returns
-------
_DEFAULT_RADAR_INFO['elevations'][radarname]: list
list of elevation angles in degrees
or None if not available
"""
load_myconfig(filename=filename)
if radarname in _DEFAULT_RADAR_INFO['elevations']:
return _DEFAULT_RADAR_INFO['elevations'][radarname]
else:
print(("no elevation angles in configfile for radar %s") % (radarname))
def generate_radar_table(radarname, filename=None):
"""
Generates a table with basic radar info, based on the given (or default)
configfile
Parameters
----------
radarname: str
name of the radar (i.e. 'ALB' or 'A', 'MXPOL' etc)
filename: str
path and name of the configfile, if None, the default configfile is
used
Returns
-------
radar_table: dict
table containing basic radar info
"""
load_myconfig(filename=filename)
if radarname in _DEFAULT_RADAR_INFO['radarID']:
radarname = _DEFAULT_RADAR_INFO['radarID'][radarname]
radar_table = get_mymetadata('Radar_info', filename=filename)
for key in radar_table:
if key in _DEFAULT_RADAR_INFO:
radar_table[key] = _DEFAULT_RADAR_INFO[key][radarname]
else:
radar_table[key] = None
return radar_table
return None
def generate_polvar_metadata(polvar, filename=None):
"""
Generates a dictionary with metadata for a polarimetric variable
Parameters
----------
polvar: str
polatimetric variable of interest
filename: str
Filename of the configuration file. If None the default configuration
file is loaded from the directory.
Returns
-------
polvar_metadata: dict
dictionary with metatdata for polarimetric variable of interest
"""
load_myconfig(filename=filename)
polvar = convert_polvar_name('LTE', polvar)
if polvar in _DEFAULT_POLARNAMES:
(standard_name, long_name, units, valid_min, valid_max,
plot_interval) = _DEFAULT_POLARNAMES[polvar]
else:
(standard_name, long_name, units, valid_min, valid_max,
plot_interval) = None, None, None, None, None, None
polvar_metadata = get_mymetadata('Polvar', filename)
polvar_metadata['units'] = units
polvar_metadata['standard_name'] = standard_name
polvar_metadata['short_name'] = convert_polvar_name('MCH', polvar)
polvar_metadata['long_name'] = long_name
polvar_metadata['valid_min'] = valid_min
polvar_metadata['valid_max'] = valid_max
polvar_metadata['plot_interval'] = plot_interval
return polvar_metadata
def convert_polvar_name(convention, polvar):
"""
Finds the correct variable name for a given convention (MXPOL, MCH) and
a given variable name which was spelled with a different case or
according to a different convention. For example, MXPOL convention uses
'Z' for the reflectivity variable, but if a user inserted 'Zh' this
function will convert it to 'Z'.
Parameters
----------
convention : str, destination convention; either MCH or LTE
polvar : str, key of polarimetric variable to be converted
Returns
-------
mykey : str, polarimertric variable key as used within the ProfileLab
toolbox context
"""
# Generate dictionary for the conversion
metranet_list = [
'ZH', 'ZV', 'ZDR', 'PHI', 'VEL', 'VEL', 'WID', 'RHO', 'CLUT', 'MPH',
'STA1', 'STA2', 'WBN', 'ZHC', 'ZDRC', 'ZDRP', 'Kdpc', 'Rhohvc']
MCH_list = [
'Z', 'ZV', 'ZDR', 'PHIDP', 'V', 'V', 'W', 'RHO', 'CLUT', 'MPH', 'STA1',
'STA2', 'WBN', 'Zhc', 'Zdrc', 'Hydrometeor_type_from_Besic1', 'Kdpc', 'RHOC']
# ZhCorr and ZdrCorr have been changed to Zhc and Zdrc!
LTE_list = [
'Zh', 'Zv', 'Zdr', 'Phidp', 'RVel', 'Rvel', 'Sw', 'Rhohv', 'Clut', 'mph',
'sta1', 'sta2', 'wbn', 'Zhc', 'Zdrc', 'Hydroclass', 'Kdpc', 'Rhohvc']
IDL_list = [
'Zh', 'Zv', 'Zdr', 'Phidp_raw', 'V', 'V', 'W', 'uRhohv', 'CLUT', 'MPH', 'STA1',
'STA2', 'WBN', 'Zhc', 'Zdrc', 'TYPECLUS2', 'Kdpc', 'Rhohvc']
pyrad_list = [
'reflectivity', 'reflectivity_vv', 'differential_reflectivity',
'differential_phase', 'velocity', 'velocity', 'spectrum_width',
'uncorrected_cross_correlation_ratio', 'radar_echo_id', 'MPH',
'STA1', 'STA2', 'WBN', 'corrected_reflectivity',
'corrected_differential_reflectivity', 'radar_echo_classification',
'corrected_specific_differential_phase',
'corrected_cross_correlation_ratio']
convertkeys = {}
convertkeys['MCH'] = {}
convertkeys['LTE'] = {}
convertkeys['metranet'] = {}
convertkeys['IDL'] = {}
convertkeys['pyrad'] = {}
for i, MCH in enumerate(MCH_list):
convertkeys['MCH'][MCH] = [
LTE_list[i], metranet_list[i], IDL_list[i], pyrad_list[i]]
convertkeys['LTE'] = {}
for i, LTE in enumerate(LTE_list):
convertkeys['LTE'][LTE] = [
MCH_list[i], metranet_list[i], IDL_list[i], pyrad_list[i]]
for i, metranet in enumerate(metranet_list):
convertkeys['metranet'][metranet] = [
MCH_list[i], LTE_list[i], IDL_list[i], pyrad_list[i]]
for i, IDL in enumerate(IDL_list):
convertkeys['IDL'][IDL] = [
metranet_list[i], LTE_list[i], MCH_list[i], pyrad_list[i]]
for i, pyrad in enumerate(pyrad_list):
convertkeys['pyrad'][pyrad] = [
metranet_list[i], LTE_list[i], MCH_list[i], IDL_list[i]]
# translate between conventions
mykey = polvar
for key, value in convertkeys[convention].items():
if polvar in value:
mykey = key
break
return mykey
```
#### File: pyrad/proc/process_echoclass.py
```python
from copy import deepcopy
from warnings import warn
import numpy as np
import pyart
from ..io.io_aux import get_datatype_fields, get_fieldname_pyart
def process_echo_id(procstatus, dscfg, radar_list=None):
"""
identifies echoes as 0: No data, 1: Noise, 2: Clutter,
3: Precipitation
Parameters
----------
procstatus : int
Processing status: 0 initializing, 1 processing volume,
2 post-processing
dscfg : dictionary of dictionaries
data set configuration. Accepted Configuration Keywords::
datatype : list of string. Dataset keyword
The input data types
radar_list : list of Radar objects
Optional. list of radar objects
Returns
-------
new_dataset : dict
dictionary containing the output
ind_rad : int
radar index
"""
if procstatus != 1:
return None, None
for datatypedescr in dscfg['datatype']:
radarnr, _, datatype, _, _ = get_datatype_fields(datatypedescr)
if datatype == 'dBZ':
refl_field = get_fieldname_pyart(datatype)
if datatype == 'dBuZ':
refl_field = get_fieldname_pyart(datatype)
if datatype == 'ZDR':
zdr_field = get_fieldname_pyart(datatype)
if datatype == 'ZDRu':
zdr_field = get_fieldname_pyart(datatype)
if datatype == 'RhoHV':
rhv_field = get_fieldname_pyart(datatype)
if datatype == 'uPhiDP':
phi_field = get_fieldname_pyart(datatype)
ind_rad = int(radarnr[5:8])-1
if radar_list[ind_rad] is None:
warn('No valid radar')
return None, None
radar = radar_list[ind_rad]
if ((refl_field not in radar.fields) or
(zdr_field not in radar.fields) or
(rhv_field not in radar.fields) or
(phi_field not in radar.fields)):
warn('Unable to create radar_echo_id dataset. Missing data')
return None, None
echo_id = np.ma.zeros((radar.nrays, radar.ngates), dtype=np.uint8)+3
# look for clutter
gatefilter = pyart.filters.moment_and_texture_based_gate_filter(
radar, zdr_field=zdr_field, rhv_field=rhv_field, phi_field=phi_field,
refl_field=refl_field, textzdr_field=None, textrhv_field=None,
textphi_field=None, textrefl_field=None, wind_size=7,
max_textphi=20., max_textrhv=0.3, max_textzdr=2.85,
max_textrefl=8., min_rhv=0.6)
is_clutter = gatefilter.gate_excluded == 1
echo_id[is_clutter] = 2
# look for noise
is_noise = radar.fields[refl_field]['data'].data == (
pyart.config.get_fillvalue())
echo_id[is_noise] = 1
id_field = pyart.config.get_metadata('radar_echo_id')
id_field['data'] = echo_id
id_field.update({'_FillValue': 0})
# prepare for exit
new_dataset = {'radar_out': deepcopy(radar)}
new_dataset['radar_out'].fields = dict()
new_dataset['radar_out'].add_field('radar_echo_id', id_field)
return new_dataset, ind_rad
def process_birds_id(procstatus, dscfg, radar_list=None):
"""
identifies echoes as 0: No data, 1: Noise, 2: Clutter,
3: Birds
Parameters
----------
procstatus : int
Processing status: 0 initializing, 1 processing volume,
2 post-processing
dscfg : dictionary of dictionaries
data set configuration. Accepted Configuration Keywords::
datatype : list of string. Dataset keyword
The input data types
radar_list : list of Radar objects
Optional. list of radar objects
Returns
-------
new_dataset : dict
dictionary containing the output
ind_rad : int
radar index
"""
if procstatus != 1:
return None, None
for datatypedescr in dscfg['datatype']:
radarnr, _, datatype, _, _ = get_datatype_fields(datatypedescr)
if datatype == 'dBZ':
refl_field = get_fieldname_pyart(datatype)
if datatype == 'dBuZ':
refl_field = get_fieldname_pyart(datatype)
if datatype == 'ZDR':
zdr_field = get_fieldname_pyart(datatype)
if datatype == 'ZDRu':
zdr_field = get_fieldname_pyart(datatype)
if datatype == 'RhoHV':
rhv_field = get_fieldname_pyart(datatype)
ind_rad = int(radarnr[5:8])-1
if radar_list[ind_rad] is None:
warn('No valid radar')
return None, None
radar = radar_list[ind_rad]
if ((refl_field not in radar.fields) or
(zdr_field not in radar.fields) or
(rhv_field not in radar.fields)):
warn('Unable to create radar_echo_id dataset. Missing data')
return None, None
# user defined parameters
max_zdr = dscfg.get('max_zdr', 3.)
max_rhv = dscfg.get('max_rhv', 0.9)
max_refl = dscfg.get('max_refl', 20.)
rmin = dscfg.get('rmin', 2000.)
rmax = dscfg.get('rmax', 25000.)
elmin = dscfg.get('elmin', 1.5)
elmax = dscfg.get('elmax', 85.)
echo_id = np.zeros((radar.nrays, radar.ngates), dtype=np.uint8)+3
# look for clutter
gatefilter = pyart.filters.birds_gate_filter(
radar, zdr_field=zdr_field, rhv_field=rhv_field,
refl_field=refl_field, max_zdr=max_zdr, max_rhv=max_rhv,
max_refl=max_refl, rmin=rmin, rmax=rmax, elmin=elmin, elmax=elmax)
is_clutter = gatefilter.gate_excluded == 1
echo_id[is_clutter] = 2
# look for noise
is_noise = radar.fields[refl_field]['data'].data == (
pyart.config.get_fillvalue())
echo_id[is_noise] = 1
id_field = pyart.config.get_metadata('radar_echo_id')
id_field['data'] = echo_id
# prepare for exit
new_dataset = {'radar_out': deepcopy(radar)}
new_dataset['radar_out'].fields = dict()
new_dataset['radar_out'].add_field('radar_echo_id', id_field)
return new_dataset, ind_rad
def process_clt_to_echo_id(procstatus, dscfg, radar_list=None):
"""
Converts clutter exit code from rad4alp into pyrad echo ID
Parameters
----------
procstatus : int
Processing status: 0 initializing, 1 processing volume,
2 post-processing
dscfg : dictionary of dictionaries
data set configuration. Accepted Configuration Keywords::
datatype : list of string. Dataset keyword
The input data types
radar_list : list of Radar objects
Optional. list of radar objects
Returns
-------
new_dataset : dict
dictionary containing the output
ind_rad : int
radar index
"""
if procstatus != 1:
return None, None
for datatypedescr in dscfg['datatype']:
radarnr, _, datatype, _, _ = get_datatype_fields(datatypedescr)
if datatype == 'CLT':
clt_field = get_fieldname_pyart(datatype)
break
ind_rad = int(radarnr[5:8])-1
if radar_list[ind_rad] is None:
warn('No valid radar')
return None, None
radar = radar_list[ind_rad]
if clt_field not in radar.fields:
warn('rad4alp clutter exit code not present. Unable to obtain echoID')
return None, None
echo_id = np.zeros((radar.nrays, radar.ngates), dtype=np.uint8)+3
clt = radar.fields[clt_field]['data']
echo_id[clt == 1] = 1
echo_id[clt >= 100] = 2
id_field = pyart.config.get_metadata('radar_echo_id')
id_field['data'] = echo_id
# prepare for exit
new_dataset = {'radar_out': deepcopy(radar)}
new_dataset['radar_out'].fields = dict()
new_dataset['radar_out'].add_field('radar_echo_id', id_field)
return new_dataset, ind_rad
def process_hydro_mf_to_hydro(procstatus, dscfg, radar_list=None):
"""
Converts the hydrometeor classification from Météo France to
that of MeteoSwiss
Parameters
----------
procstatus : int
Processing status: 0 initializing, 1 processing volume,
2 post-processing
dscfg : dictionary of dictionaries
data set configuration. Accepted Configuration Keywords::
datatype : list of string. Dataset keyword
The input data types
radar_list : list of Radar objects
Optional. list of radar objects
Returns
-------
new_dataset : dict
dictionary containing the output
ind_rad : int
radar index
"""
if procstatus != 1:
return None, None
for datatypedescr in dscfg['datatype']:
radarnr, _, datatype, _, _ = get_datatype_fields(datatypedescr)
if datatype == 'hydroMF':
field = get_fieldname_pyart(datatype)
break
ind_rad = int(radarnr[5:8])-1
if radar_list[ind_rad] is None:
warn('No valid radar')
return None, None
radar = radar_list[ind_rad]
if field not in radar.fields:
warn('hydroMF not present. Unable to obtain hydro')
return None, None
hydro = np.zeros((radar.nrays, radar.ngates), dtype=np.uint8)
hydroMF = radar.fields[field]['data']
# BRUIT, ZH_MQT, SOL, INSECTES, OISEAUX, MER_CHAFF, PARASITES,
# ROND_CENTRAL, TYPE_INCONNU, SIMPLE_POLAR are classified as NC
hydro[hydroMF<8] = 1
hydro[hydroMF==30] = 1
hydro[hydroMF==31] = 1
# PRECIP_INDIFFERENCIEE, PLUIE, PRECIP are classified as RN
hydro[hydroMF==8] = 6
hydro[hydroMF==9] = 6
hydro[hydroMF==32] = 6
hydro[hydroMF==10] = 8 # NEIGE_MOUILLEE is WS
hydro[hydroMF==11] = 2 # NEIGE_SECHE is AG
hydro[hydroMF==12] = 3 # GLACE is CR
hydro[hydroMF==13] = 5 # PETITE_GRELE is RP
# MOYENNE_GRELE, GROSSE_GRELE is IH/HDG
hydro[hydroMF==14] = 10
hydro[hydroMF==15] = 10
# Light rain (LR), vertically oriented ice (VI) and melting hail (MH) have
# no equivalent in the Météo France classification
hydro_field = pyart.config.get_metadata('radar_echo_classification')
hydro_field['data'] = hydro
# prepare for exit
new_dataset = {'radar_out': deepcopy(radar)}
new_dataset['radar_out'].fields = dict()
new_dataset['radar_out'].add_field(
'radar_echo_classification', hydro_field)
return new_dataset, ind_rad
def process_echo_filter(procstatus, dscfg, radar_list=None):
"""
Masks all echo types that are not of the class specified in
keyword echo_type
Parameters
----------
procstatus : int
Processing status: 0 initializing, 1 processing volume,
2 post-processing
dscfg : dictionary of dictionaries
data set configuration. Accepted Configuration Keywords::
datatype : list of string. Dataset keyword
The input data types
echo_type : int or list of ints
The type of echoes to keep: 1 noise, 2 clutter, 3 precipitation.
Default 3
radar_list : list of Radar objects
Optional. list of radar objects
Returns
-------
new_dataset : dict
dictionary containing the output
ind_rad : int
radar index
"""
if procstatus != 1:
return None, None
echoid_field = None
for datatypedescr in dscfg['datatype']:
radarnr, _, datatype, _, _ = get_datatype_fields(datatypedescr)
if datatype == 'echoID':
echoid_field = get_fieldname_pyart(datatype)
break
if echoid_field is None:
warn('echoID field required to filter data')
return None, None
ind_rad = int(radarnr[5:8])-1
if radar_list[ind_rad] is None:
warn('No valid radar')
return None, None
radar = radar_list[ind_rad]
if echoid_field not in radar.fields:
warn('Unable to filter data. Missing echo ID field')
return None, None
echo_type = dscfg.get('echo_type', 3)
mask = np.ma.isin(
radar.fields[echoid_field]['data'], echo_type, invert=True)
new_dataset = {'radar_out': deepcopy(radar)}
new_dataset['radar_out'].fields = dict()
for datatypedescr in dscfg['datatype']:
radarnr, _, datatype, _, _ = get_datatype_fields(datatypedescr)
if datatype == 'echoID':
continue
field_name = get_fieldname_pyart(datatype)
if field_name not in radar.fields:
warn('Unable to filter '+field_name+' according to echo ID. ' +
'No valid input fields')
continue
radar_field = deepcopy(radar.fields[field_name])
radar_field['data'] = np.ma.masked_where(
mask, radar_field['data'])
if field_name.startswith('corrected_'):
new_field_name = field_name
elif field_name.startswith('uncorrected_'):
new_field_name = field_name.replace(
'uncorrected_', 'corrected_', 1)
else:
new_field_name = 'corrected_'+field_name
new_dataset['radar_out'].add_field(new_field_name, radar_field)
if not new_dataset['radar_out'].fields:
return None, None
return new_dataset, ind_rad
def process_cdf(procstatus, dscfg, radar_list=None):
"""
Collects the fields necessary to compute the Cumulative Distribution
Function
Parameters
----------
procstatus : int
Processing status: 0 initializing, 1 processing volume,
2 post-processing
dscfg : dictionary of dictionaries
data set configuration. Accepted Configuration Keywords::
datatype : list of string. Dataset keyword
The input data types
radar_list : list of Radar objects
Optional. list of radar objects
Returns
-------
new_dataset : dict
dictionary containing the output
ind_rad : int
radar index
"""
if procstatus != 1:
return None, None
echoid_field = None
hydro_field = None
vis_field = None
for datatypedescr in dscfg['datatype']:
radarnr, _, datatype, _, _ = get_datatype_fields(datatypedescr)
if datatype == 'echoID':
echoid_field = get_fieldname_pyart(datatype)
elif datatype == 'hydro':
hydro_field = get_fieldname_pyart(datatype)
elif datatype == 'VIS':
vis_field = get_fieldname_pyart(datatype)
else:
field_name = get_fieldname_pyart(datatype)
ind_rad = int(radarnr[5:8])-1
if radar_list[ind_rad] is None:
warn('No valid radar')
return None, None
radar = radar_list[ind_rad]
if field_name not in radar.fields:
warn('Unable to compute CDF. Missing field')
return None, None
new_dataset = {'radar_out': deepcopy(radar)}
new_dataset['radar_out'].fields = dict()
new_dataset['radar_out'].add_field(field_name, radar.fields[field_name])
if echoid_field is not None:
if echoid_field not in radar.fields:
warn('Missing echo ID field. Clutter can not be filtered')
else:
new_dataset['radar_out'].add_field(
echoid_field, radar.fields[echoid_field])
if hydro_field is not None:
if hydro_field not in radar.fields:
warn('Missing hydrometeor type field. ' +
'Filtration according to hydrometeor type not possible')
else:
new_dataset['radar_out'].add_field(
hydro_field, radar.fields[hydro_field])
if vis_field is not None:
if vis_field not in radar.fields:
warn('Missing visibility field. Blocked gates can not be filtered')
else:
new_dataset['radar_out'].add_field(
vis_field, radar.fields[vis_field])
return new_dataset, ind_rad
def process_filter_snr(procstatus, dscfg, radar_list=None):
"""
filters out low SNR echoes
Parameters
----------
procstatus : int
Processing status: 0 initializing, 1 processing volume,
2 post-processing
dscfg : dictionary of dictionaries
data set configuration. Accepted Configuration Keywords::
datatype : list of string. Dataset keyword
The input data types
SNRmin : float. Dataset keyword
The minimum SNR to keep the data.
radar_list : list of Radar objects
Optional. list of radar objects
Returns
-------
new_dataset : dict
dictionary containing the output
ind_rad : int
radar index
"""
if procstatus != 1:
return None, None
for datatypedescr in dscfg['datatype']:
radarnr, _, datatype, _, _ = get_datatype_fields(datatypedescr)
if datatype in ('SNRh', 'SNRv'):
snr_field = get_fieldname_pyart(datatype)
break
ind_rad = int(radarnr[5:8])-1
if radar_list[ind_rad] is None:
warn('No valid radar')
return None, None
radar = radar_list[ind_rad]
new_dataset = {'radar_out': deepcopy(radar)}
new_dataset['radar_out'].fields = dict()
if snr_field not in radar.fields:
warn('Unable to filter dataset according to SNR. Missing SNR field')
return None, None
gatefilter = pyart.filters.snr_based_gate_filter(
radar, snr_field=snr_field, min_snr=dscfg['SNRmin'])
is_low_snr = gatefilter.gate_excluded == 1
for datatypedescr in dscfg['datatype']:
radarnr, _, datatype, _, _ = get_datatype_fields(datatypedescr)
if datatype in ('SNRh', 'SNRv'):
continue
field_name = get_fieldname_pyart(datatype)
if field_name not in radar.fields:
warn('Unable to filter '+field_name +
' according to SNR. '+'No valid input fields')
continue
radar_field = deepcopy(radar.fields[field_name])
radar_field['data'] = np.ma.masked_where(
is_low_snr, radar_field['data'])
if field_name.startswith('corrected_'):
new_field_name = field_name
elif field_name.startswith('uncorrected_'):
new_field_name = field_name.replace(
'uncorrected_', 'corrected_', 1)
else:
new_field_name = 'corrected_'+field_name
new_dataset['radar_out'].add_field(new_field_name, radar_field)
if not new_dataset['radar_out'].fields:
return None, None
return new_dataset, ind_rad
def process_filter_vel_diff(procstatus, dscfg, radar_list=None):
"""
filters out range gates that could not be used for Doppler velocity
estimation
Parameters
----------
procstatus : int
Processing status: 0 initializing, 1 processing volume,
2 post-processing
dscfg : dictionary of dictionaries
data set configuration. Accepted Configuration Keywords::
datatype : list of string. Dataset keyword
The input data types
radar_list : list of Radar objects
Optional. list of radar objects
Returns
-------
new_dataset : dict
dictionary containing the output
ind_rad : int
radar index
"""
if procstatus != 1:
return None, None
for datatypedescr in dscfg['datatype']:
radarnr, _, datatype, _, _ = get_datatype_fields(datatypedescr)
if datatype == 'diffV':
vel_diff_field = get_fieldname_pyart(datatype)
break
ind_rad = int(radarnr[5:8])-1
if radar_list[ind_rad] is None:
warn('No valid radar')
return None, None
radar = radar_list[ind_rad]
new_dataset = {'radar_out': deepcopy(radar)}
new_dataset['radar_out'].fields = dict()
if vel_diff_field not in radar.fields:
warn('Unable to filter dataset according to valid velocity. ' +
'Missing velocity differences field')
return None, None
mask = np.ma.getmaskarray(radar.fields[vel_diff_field]['data'])
for datatypedescr in dscfg['datatype']:
radarnr, _, datatype, _, _ = get_datatype_fields(datatypedescr)
if datatype == 'diffV':
continue
field_name = get_fieldname_pyart(datatype)
if field_name not in radar.fields:
warn('Unable to filter '+field_name +
' according to SNR. '+'No valid input fields')
continue
radar_field = deepcopy(radar.fields[field_name])
radar_field['data'] = np.ma.masked_where(mask, radar_field['data'])
if field_name.find('corrected_') != -1:
new_field_name = field_name
elif field_name.startswith('uncorrected_'):
new_field_name = field_name.replace(
'uncorrected_', 'corrected_', 1)
else:
new_field_name = 'corrected_'+field_name
new_dataset['radar_out'].add_field(new_field_name, radar_field)
if not new_dataset['radar_out'].fields:
return None, None
return new_dataset, ind_rad
def process_filter_visibility(procstatus, dscfg, radar_list=None):
"""
filters out rays gates with low visibility and corrects the reflectivity
Parameters
----------
procstatus : int
Processing status: 0 initializing, 1 processing volume,
2 post-processing
dscfg : dictionary of dictionaries
data set configuration. Accepted Configuration Keywords::
datatype : list of string. Dataset keyword
The input data types
VISmin : float. Dataset keyword
The minimum visibility to keep the data.
radar_list : list of Radar objects
Optional. list of radar objects
Returns
-------
new_dataset : dict
dictionary containing the output
ind_rad : int
radar index
"""
if procstatus != 1:
return None, None
for datatypedescr in dscfg['datatype']:
radarnr, _, datatype, _, _ = get_datatype_fields(datatypedescr)
if datatype == 'VIS':
vis_field = get_fieldname_pyart(datatype)
break
ind_rad = int(radarnr[5:8])-1
if radar_list[ind_rad] is None:
warn('No valid radar')
return None, None
radar = radar_list[ind_rad]
new_dataset = {'radar_out': deepcopy(radar)}
new_dataset['radar_out'].fields = dict()
if vis_field not in radar.fields:
warn('Unable to filter dataset according to visibility. ' +
'Missing visibility field')
return None, None
gatefilter = pyart.filters.visibility_based_gate_filter(
radar, vis_field=vis_field, min_vis=dscfg['VISmin'])
is_lowVIS = gatefilter.gate_excluded == 1
for datatypedescr in dscfg['datatype']:
_, _, datatype, _, _ = get_datatype_fields(
datatypedescr)
if datatype == 'VIS':
continue
field_name = get_fieldname_pyart(datatype)
if field_name not in radar.fields:
warn('Unable to filter '+field_name +
' according to visibility. No valid input fields')
continue
radar_aux = deepcopy(radar)
radar_aux.fields[field_name]['data'] = np.ma.masked_where(
is_lowVIS, radar_aux.fields[field_name]['data'])
if datatype in ('dBZ', 'dBZc', 'dBuZ', 'dBZv', 'dBZvc', 'dBuZv'):
radar_field = pyart.correct.correct_visibility(
radar_aux, vis_field=vis_field, field_name=field_name)
else:
radar_field = radar_aux.fields[field_name]
if field_name.startswith('corrected_'):
new_field_name = field_name
elif field_name.startswith('uncorrected_'):
new_field_name = field_name.replace(
'uncorrected_', 'corrected_', 1)
else:
new_field_name = 'corrected_'+field_name
new_dataset['radar_out'].add_field(new_field_name, radar_field)
if not new_dataset['radar_out'].fields:
return None, None
return new_dataset, ind_rad
def process_outlier_filter(procstatus, dscfg, radar_list=None):
"""
filters out gates which are outliers respect to the surrounding
Parameters
----------
procstatus : int
Processing status: 0 initializing, 1 processing volume,
2 post-processing
dscfg : dictionary of dictionaries
data set configuration. Accepted Configuration Keywords::
datatype : list of string. Dataset keyword
The input data types
threshold : float. Dataset keyword
The distance between the value of the examined range gate and the
median of the surrounding gates to consider the gate an outlier
nb : int. Dataset keyword
The number of neighbours (to one side) to analyse. i.e. 2 would
correspond to 24 gates
nb_min : int. Dataset keyword
Minimum number of neighbouring gates to consider the examined gate
valid
percentile_min, percentile_max : float. Dataset keyword
gates below (above) these percentiles (computed over the sweep) are
considered potential outliers and further examined
radar_list : list of Radar objects
Optional. list of radar objects
Returns
-------
new_dataset : dict
dictionary containing the output
ind_rad : int
radar index
"""
if procstatus != 1:
return None, None
radarnr, _, datatype, _, _ = get_datatype_fields(
dscfg['datatype'][0])
ind_rad = int(radarnr[5:8])-1
if radar_list[ind_rad] is None:
warn('No valid radar')
return None, None
radar = radar_list[ind_rad]
field_name = get_fieldname_pyart(datatype)
if field_name not in radar.fields:
warn('Unable to perform outlier removal. No valid data')
return None, None
threshold = dscfg.get('threshold', 10.)
nb = dscfg.get('nb', 2)
nb_min = dscfg.get('nb_min', 3)
percentile_min = dscfg.get('percentile_min', 5.)
percentile_max = dscfg.get('percentile_max', 95.)
field = radar.fields[field_name]
field_out = deepcopy(field)
for sweep in range(radar.nsweeps):
# find gates suspected to be outliers
sweep_start = radar.sweep_start_ray_index['data'][sweep]
sweep_end = radar.sweep_end_ray_index['data'][sweep]
nrays_sweep = radar.rays_per_sweep['data'][sweep]
data_sweep = field['data'][sweep_start:sweep_end+1, :]
# check if all elements in array are masked
if np.all(np.ma.getmaskarray(data_sweep)):
continue
percent_vals = np.nanpercentile(
data_sweep.filled(fill_value=np.nan),
(percentile_min, percentile_max))
ind_rays, ind_rngs = np.ma.where(
np.ma.logical_or(
data_sweep < percent_vals[0], data_sweep > percent_vals[1]))
for i, ind_ray in enumerate(ind_rays):
ind_rng = ind_rngs[i]
# find neighbours of suspected outlier gate
data_cube = []
for ray_nb in range(-nb, nb+1):
for rng_nb in range(-nb, nb+1):
if ray_nb == 0 and rng_nb == 0:
continue
if ((ind_ray+ray_nb >= 0) and
(ind_ray+ray_nb < nrays_sweep) and
(ind_rng+rng_nb >= 0) and
(ind_rng+rng_nb < radar.ngates)):
if (data_sweep[ind_ray+ray_nb, ind_rng+rng_nb] is not
np.ma.masked):
data_cube.append(
data_sweep[ind_ray+ray_nb, ind_rng+rng_nb])
# remove data far from median of neighbours or with not enough
# valid neighbours
if len(data_cube) < nb_min:
field_out['data'][
sweep_start+ind_ray, ind_rng] = np.ma.masked
elif (abs(np.ma.median(data_cube) -
data_sweep[ind_ray, ind_rng]) > threshold):
field_out['data'][sweep_start+ind_ray, ind_rng] = np.ma.masked
if field_name.startswith('corrected_'):
new_field_name = field_name
elif field_name.startswith('uncorrected_'):
new_field_name = field_name.replace(
'uncorrected_', 'corrected_', 1)
else:
new_field_name = 'corrected_'+field_name
new_dataset = {'radar_out': deepcopy(radar)}
new_dataset['radar_out'].fields = dict()
new_dataset['radar_out'].add_field(new_field_name, field_out)
return new_dataset, ind_rad
def process_hydroclass(procstatus, dscfg, radar_list=None):
"""
Classifies precipitation echoes
Parameters
----------
procstatus : int
Processing status: 0 initializing, 1 processing volume,
2 post-processing
dscfg : dictionary of dictionaries
data set configuration. Accepted Configuration Keywords::
datatype : list of string. Dataset keyword
The input data types
HYDRO_METHOD : string. Dataset keyword
The hydrometeor classification method. One of the following:
SEMISUPERVISED
RADARCENTROIDS : string. Dataset keyword
Used with HYDRO_METHOD SEMISUPERVISED. The name of the radar of
which the derived centroids will be used. One of the following: A
Albis, <NAME>, <NAME>, DX50
compute_entropy : bool. Dataset keyword
If true the entropy is computed and the field hydroclass_entropy
is output
output_distances : bool. Dataset keyword
If true the de-mixing algorithm based on the distances to the
centroids is computed and the field proportions of each
hydrometeor in the radar range gate is output
vectorize : bool. Dataset keyword
If true a vectorized version of the algorithm is used
radar_list : list of Radar objects
Optional. list of radar objects
Returns
-------
new_dataset : dict
dictionary containing the output
ind_rad : int
radar index
"""
if procstatus != 1:
return None, None
if 'HYDRO_METHOD' not in dscfg:
raise Exception(
"ERROR: Undefined parameter 'HYDRO_METHOD' for dataset '%s'"
% dscfg['dsname'])
if dscfg['HYDRO_METHOD'] == 'SEMISUPERVISED':
temp_field = None
iso0_field = None
for datatypedescr in dscfg['datatype']:
radarnr, _, datatype, _, _ = get_datatype_fields(datatypedescr)
if datatype == 'dBZ':
refl_field = 'reflectivity'
if datatype == 'dBZc':
refl_field = 'corrected_reflectivity'
if datatype == 'ZDR':
zdr_field = 'differential_reflectivity'
if datatype == 'ZDRc':
zdr_field = 'corrected_differential_reflectivity'
if datatype == 'RhoHV':
rhv_field = 'cross_correlation_ratio'
if datatype == 'uRhoHV':
rhv_field = 'uncorrected_cross_correlation_ratio'
if datatype == 'RhoHVc':
rhv_field = 'corrected_cross_correlation_ratio'
if datatype == 'KDP':
kdp_field = 'specific_differential_phase'
if datatype == 'KDPc':
kdp_field = 'corrected_specific_differential_phase'
if datatype == 'TEMP':
temp_field = 'temperature'
if datatype == 'H_ISO0':
iso0_field = 'height_over_iso0'
ind_rad = int(radarnr[5:8])-1
if radar_list[ind_rad] is None:
warn('No valid radar')
return None, None
radar = radar_list[ind_rad]
if temp_field is None and iso0_field is None:
warn('iso0 or temperature fields needed to create hydrometeor ' +
'classification field')
return None, None
if temp_field is not None and (temp_field not in radar.fields):
warn('Unable to create hydrometeor classification field. ' +
'Missing temperature field')
return None, None
if iso0_field is not None and (iso0_field not in radar.fields):
warn('Unable to create hydrometeor classification field. ' +
'Missing height over iso0 field')
return None, None
temp_ref = 'temperature'
if iso0_field is not None:
temp_ref = 'height_over_iso0'
if ((refl_field not in radar.fields) or
(zdr_field not in radar.fields) or
(rhv_field not in radar.fields) or
(kdp_field not in radar.fields)):
warn('Unable to create hydrometeor classification field. ' +
'Missing data')
return None, None
mass_centers = np.zeros((9, 5))
if dscfg['RADARCENTROIDS'] == 'A':
# Zh ZDR kdp RhoHV delta_Z
mass_centers[0, :] = [
13.5829, 0.4063, 0.0497, 0.9868, 1330.3] # AG
mass_centers[1, :] = [
02.8453, 0.2457, 0.0000, 0.9798, 0653.8] # CR
mass_centers[2, :] = [
07.6597, 0.2180, 0.0019, 0.9799, -1426.5] # LR
mass_centers[3, :] = [
31.6815, 0.3926, 0.0828, 0.9978, 0535.3] # RP
mass_centers[4, :] = [
39.4703, 1.0734, 0.4919, 0.9876, -1036.3] # RN
mass_centers[5, :] = [
04.8267, -0.5690, 0.0000, 0.9691, 0869.8] # VI
mass_centers[6, :] = [
30.8613, 0.9819, 0.1998, 0.9845, -0066.1] # WS
mass_centers[7, :] = [
52.3969, 2.1094, 2.4675, 0.9730, -1550.2] # MH
mass_centers[8, :] = [
50.6186, -0.0649, 0.0946, 0.9904, 1179.9] # IH/HDG
elif dscfg['RADARCENTROIDS'] == 'L':
# Zh ZDR kdp RhoHV delta_Z
mass_centers[0, :] = [
13.8231, 0.2514, 0.0644, 0.9861, 1380.6] # AG
mass_centers[1, :] = [
03.0239, 0.1971, 0.0000, 0.9661, 1464.1] # CR
mass_centers[2, :] = [
04.9447, 0.1142, 0.0000, 0.9787, -0974.7] # LR
mass_centers[3, :] = [
34.2450, 0.5540, 0.1459, 0.9937, 0945.3] # RP
mass_centers[4, :] = [
40.9432, 1.0110, 0.5141, 0.9928, -0993.5] # RN
mass_centers[5, :] = [
03.5202, -0.3498, 0.0000, 0.9746, 0843.2] # VI
mass_centers[6, :] = [
32.5287, 0.9751, 0.2640, 0.9804, -0055.5] # WS
mass_centers[7, :] = [
52.6547, 2.7054, 2.5101, 0.9765, -1114.6] # MH
mass_centers[8, :] = [
46.4998, 0.1978, 0.6431, 0.9845, 1010.1] # IH/HDG
elif dscfg['RADARCENTROIDS'] == 'D':
# Zh ZDR kdp RhoHV delta_Z
mass_centers[0, :] = [
12.567, 0.18934, 0.041193, 0.97693, 1328.1] # AG
mass_centers[1, :] = [
3.2115, 0.13379, 0.0000, 0.96918, 1406.3] # CR
mass_centers[2, :] = [
10.669, 0.18119, 0.0000, 0.97337, -1171.9] # LR
mass_centers[3, :] = [
34.941, 0.13301, 0.090056, 0.9979, 898.44] # RP
mass_centers[4, :] = [
39.653, 1.1432, 0.35013, 0.98501, -859.38] # RN
mass_centers[5, :] = [
2.8874, -0.46363, 0.0000, 0.95653, 1015.6] # VI
mass_centers[6, :] = [
34.122, 0.87987, 0.2281, 0.98003, -234.37] # WS
mass_centers[7, :] = [
53.134, 2.0888, 2.0055, 0.96927, -1054.7] # MH
mass_centers[8, :] = [
46.715, 0.030477, 0.16994, 0.9969, 976.56] # IH/HDG
elif dscfg['RADARCENTROIDS'] == 'P':
# Zh ZDR kdp RhoHV delta_Z
mass_centers[0, :] = [
13.9882, 0.2470, 0.0690, 0.9939, 1418.1] # AG
mass_centers[1, :] = [
00.9834, 0.4830, 0.0043, 0.9834, 0950.6] # CR
mass_centers[2, :] = [
05.3962, 0.2689, 0.0000, 0.9831, -0479.5] # LR
mass_centers[3, :] = [
35.3411, 0.1502, 0.0940, 0.9974, 0920.9] # RP
mass_centers[4, :] = [
35.0114, 0.9681, 0.1106, 0.9785, -0374.0] # RN
mass_centers[5, :] = [
02.5897, -0.3879, 0.0282, 0.9876, 0985.5] # VI
mass_centers[6, :] = [
32.2914, 0.7789, 0.1443, 0.9075, -0153.5] # WS
mass_centers[7, :] = [
53.2413, 1.8723, 0.3857, 0.9454, -0470.8] # MH
mass_centers[8, :] = [
44.7896, 0.0015, 0.1349, 0.9968, 1116.7] # IH/HDG
elif dscfg['RADARCENTROIDS'] == 'W':
# Zh ZDR kdp RhoHV delta_Z
mass_centers[0, :] = [
16.7650, 0.3754, 0.0442, 0.9866, 1409.0] # AG
mass_centers[1, :] = [
01.4418, 0.3786, 0.0000, 0.9490, 1415.8] # CR
mass_centers[2, :] = [
16.0987, 0.3238, 0.0000, 0.9871, -0818.7] # LR
mass_centers[3, :] = [
36.5465, 0.2041, 0.0731, 0.9952, 0745.4] # RP
mass_centers[4, :] = [
43.4011, 0.6658, 0.3241, 0.9894, -0778.5] # RN
mass_centers[5, :] = [
00.9077, -0.4793, 0.0000, 0.9502, 1488.6] # VI
mass_centers[6, :] = [
36.8091, 0.7266, 0.1284, 0.9924, -0071.1] # WS
mass_centers[7, :] = [
53.8402, 0.8922, 0.5306, 0.9890, -1017.6] # MH
mass_centers[8, :] = [
45.9686, 0.0845, 0.0963, 0.9940, 0867.4] # IH/HDG
elif dscfg['RADARCENTROIDS'] == 'DX50':
# Zh ZDR kdp RhoHV delta_Z
mass_centers[0, :] = [
19.0770, 0.4139, 0.0099, 0.9841, 1061.7] # AG
mass_centers[1, :] = [
03.9877, 0.5040, 0.0000, 0.9642, 0856.6] # CR
mass_centers[2, :] = [
20.7982, 0.3177, 0.0004, 0.9858, -1375.1] # LR
mass_centers[3, :] = [
34.7124, -0.3748, 0.0988, 0.9828, 1224.2] # RP
mass_centers[4, :] = [
33.0134, 0.6614, 0.0819, 0.9802, -1169.8] # RN
mass_centers[5, :] = [
08.2610, -0.4681, 0.0000, 0.9722, 1100.7] # VI
mass_centers[6, :] = [
35.1801, 1.2830, 0.1322, 0.9162, -0159.8] # WS
mass_centers[7, :] = [
52.4539, 2.3714, 1.1120, 0.9382, -1618.5] # MH
mass_centers[8, :] = [
44.2216, -0.3419, 0.0687, 0.9683, 1272.7] # IH/HDG
else:
warn(
' Unknown radar. ' +
'Default centroids will be used in classification.')
mass_centers = None
compute_entropy = dscfg.get('compute_entropy', False)
output_distances = dscfg.get('output_distances', False)
vectorize = dscfg.get('vectorize', False)
fields_dict = pyart.retrieve.hydroclass_semisupervised(
radar, mass_centers=mass_centers,
weights=np.array([1., 1., 1., 0.75, 0.5]), refl_field=refl_field,
zdr_field=zdr_field, rhv_field=rhv_field, kdp_field=kdp_field,
temp_field=temp_field, iso0_field=iso0_field, hydro_field=None,
entropy_field=None, temp_ref=temp_ref,
compute_entropy=compute_entropy,
output_distances=output_distances, vectorize=vectorize)
else:
raise Exception(
"ERROR: Unknown hydrometeor classification method " +
dscfg['HYDRO_METHOD'])
# prepare for exit
new_dataset = {'radar_out': deepcopy(radar)}
new_dataset['radar_out'].fields = dict()
new_dataset['radar_out'].add_field(
'radar_echo_classification', fields_dict['hydro'])
if compute_entropy:
new_dataset['radar_out'].add_field(
'hydroclass_entropy', fields_dict['entropy'])
if output_distances:
new_dataset['radar_out'].add_field(
'proportion_AG', fields_dict['prop_AG'])
new_dataset['radar_out'].add_field(
'proportion_CR', fields_dict['prop_CR'])
new_dataset['radar_out'].add_field(
'proportion_LR', fields_dict['prop_LR'])
new_dataset['radar_out'].add_field(
'proportion_RP', fields_dict['prop_RP'])
new_dataset['radar_out'].add_field(
'proportion_RN', fields_dict['prop_RN'])
new_dataset['radar_out'].add_field(
'proportion_VI', fields_dict['prop_VI'])
new_dataset['radar_out'].add_field(
'proportion_WS', fields_dict['prop_WS'])
new_dataset['radar_out'].add_field(
'proportion_MH', fields_dict['prop_MH'])
new_dataset['radar_out'].add_field(
'proportion_IH', fields_dict['prop_IH'])
return new_dataset, ind_rad
def process_melting_layer(procstatus, dscfg, radar_list=None):
"""
Detects the melting layer
Parameters
----------
procstatus : int
Processing status: 0 initializing, 1 processing volume,
2 post-processing
dscfg : dictionary of dictionaries
data set configuration. Accepted Configuration Keywords::
datatype : list of string. Dataset keyword
The input data types
radar_list : list of Radar objects
Optional. list of radar objects
Returns
-------
new_dataset : dict
dictionary containing the output
ind_rad : int
radar index
"""
if procstatus != 1:
return None, None
if 'ML_METHOD' not in dscfg:
raise Exception(
"ERROR: Undefined parameter 'ML_METHOD' for dataset '%s'"
% dscfg['dsname'])
if dscfg['ML_METHOD'] == 'GIANGRANDE':
temp_ref = None
temp_field = None
iso0_field = None
for datatypedescr in dscfg['datatype']:
radarnr, _, datatype, _, _ = get_datatype_fields(datatypedescr)
if datatype == 'dBZ':
refl_field = 'reflectivity'
if datatype == 'dBZc':
refl_field = 'corrected_reflectivity'
if datatype == 'ZDR':
zdr_field = 'differential_reflectivity'
if datatype == 'ZDRc':
zdr_field = 'corrected_differential_reflectivity'
if datatype == 'RhoHV':
rhv_field = 'cross_correlation_ratio'
if datatype == 'RhoHVc':
rhv_field = 'corrected_cross_correlation_ratio'
if datatype == 'TEMP':
temp_field = 'temperature'
if datatype == 'H_ISO0':
iso0_field = 'height_over_iso0'
ind_rad = int(radarnr[5:8])-1
if radar_list[ind_rad] is None:
warn('No valid radar')
return None, None
radar = radar_list[ind_rad]
# Check which should be the reference field for temperature
if iso0_field is not None:
if iso0_field not in radar.fields:
warn('Unable to detect melting layer. ' +
'Missing height over iso0 field')
return None, None
temp_ref = 'height_over_iso0'
if temp_field is not None:
if temp_field not in radar.fields:
warn('Unable to detect melting layer. ' +
'Missing temperature field')
return None, None
temp_ref = 'temperature'
iso0_field = 'height_over_iso0'
if temp_ref is None:
iso0_field = 'height_over_iso0'
if ((refl_field not in radar.fields) or
(zdr_field not in radar.fields) or
(rhv_field not in radar.fields)):
warn('Unable to detect melting layer. Missing data')
return None, None
# User defined variables
nVol = dscfg.get('nVol', 3)
maxh = dscfg.get('maxh', 6000.)
hres = dscfg.get('hres', 50.)
rmin = dscfg.get('rmin', 1000.)
elmin = dscfg.get('elmin', 4.)
elmax = dscfg.get('elmax', 10.)
rhomin = dscfg.get('rhomin', 0.75)
rhomax = dscfg.get('rhomax', 0.94)
zhmin = dscfg.get('zhmin', 20.)
hwindow = dscfg.get('hwindow', 500.)
mlzhmin = dscfg.get('mlzhmin', 30.)
mlzhmax = dscfg.get('mlzhmax', 50.)
mlzdrmin = dscfg.get('mlzdrmin', 1.)
mlzdrmax = dscfg.get('mlzdrmax', 5.)
htol = dscfg.get('htol', 500.)
ml_bottom_diff_max = dscfg.get('ml_bottom_diff_max', 1000.)
time_accu_max = dscfg.get('time_accu_max', 1800.)
nml_points_min = dscfg.get('nml_points_min', None)
wlength = dscfg.get('wlength', 20.)
percentile_bottom = dscfg.get('percentile_bottom', 0.3)
percentile_top = dscfg.get('percentile_top', 0.9)
interpol = dscfg.get('interpol', True)
time_nodata_allowed = dscfg.get('time_nodata_allowed', 3600.)
get_iso0 = dscfg.get('get_iso0', True)
if not dscfg['initialized']:
# initialize dataset
ml_obj, ml_dict, iso0_dict, ml_global = (
pyart.retrieve.melting_layer_giangrande(
radar, nVol=nVol, maxh=maxh, hres=hres, rmin=rmin,
elmin=elmin, elmax=elmax, rhomin=rhomin, rhomax=rhomax,
zhmin=zhmin, hwindow=hwindow, mlzhmin=mlzhmin,
mlzhmax=mlzhmax, mlzdrmin=mlzdrmin, mlzdrmax=mlzdrmax,
htol=htol, ml_bottom_diff_max=ml_bottom_diff_max,
time_accu_max=time_accu_max, nml_points_min=nml_points_min,
wlength=wlength, percentile_bottom=percentile_bottom,
percentile_top=percentile_top, interpol=interpol,
time_nodata_allowed=time_nodata_allowed,
refl_field=refl_field, zdr_field=zdr_field,
rhv_field=rhv_field, temp_field=temp_field,
iso0_field=iso0_field, ml_field='melting_layer',
ml_pos_field='melting_layer_height',
temp_ref=temp_ref, get_iso0=get_iso0, ml_global=None))
dscfg['initialized'] = True
else:
# use previous detection
ml_obj, ml_dict, iso0_dict, ml_global = (
pyart.retrieve.melting_layer_giangrande(
radar, nVol=nVol, maxh=maxh, hres=hres, rmin=rmin,
elmin=elmin, elmax=elmax, rhomin=rhomin, rhomax=rhomax,
zhmin=zhmin, hwindow=hwindow, mlzhmin=mlzhmin,
mlzhmax=mlzhmax, mlzdrmin=mlzdrmin, mlzdrmax=mlzdrmax,
htol=htol, ml_bottom_diff_max=ml_bottom_diff_max,
time_accu_max=time_accu_max, nml_points_min=nml_points_min,
wlength=wlength, percentile_bottom=percentile_bottom,
percentile_top=percentile_top, interpol=interpol,
time_nodata_allowed=time_nodata_allowed,
refl_field=refl_field, zdr_field=zdr_field,
rhv_field=rhv_field, temp_field=temp_field,
iso0_field=iso0_field, ml_field='melting_layer',
ml_pos_field='melting_layer_height',
temp_ref=temp_ref, get_iso0=get_iso0,
ml_global=dscfg['global_data']))
# update global stack
dscfg['global_data'] = ml_global
elif dscfg['ML_METHOD'] == 'WOLFENSBERGER':
for datatypedescr in dscfg['datatype']:
radarnr, _, datatype, _, _ = get_datatype_fields(datatypedescr)
if datatype == 'dBZ':
refl_field = 'reflectivity'
if datatype == 'dBZc':
refl_field = 'corrected_reflectivity'
if datatype == 'RhoHV':
rhohv_field = 'cross_correlation_ratio'
if datatype == 'RhoHVc':
rhohv_field = 'corrected_cross_correlation_ratio'
ind_rad = int(radarnr[5:8])-1
if radar_list[ind_rad] is None:
warn('No valid radar')
return None, None
radar = radar_list[ind_rad]
if ((refl_field not in radar.fields) or
(rhohv_field not in radar.fields)):
warn('Unable to detect melting layer. Missing data')
return None, None
# User defined parameters
max_range = dscfg.get('max_range', 20000.)
detect_threshold = dscfg.get('detect_threshold', 0.02)
interp_holes = dscfg.get('interp_holes', False)
max_length_holes = dscfg.get('max_length_holes', 250)
check_min_length = dscfg.get('check_min_length', True)
get_iso0 = dscfg.get('get_iso0', True)
ml_obj, ml_dict, iso0_dict, _ = pyart.retrieve.detect_ml(
radar, refl_field=refl_field, rhohv_field=rhohv_field,
ml_field='melting_layer', ml_pos_field='melting_layer_height',
iso0_field='height_over_iso0', max_range=max_range,
detect_threshold=detect_threshold, interp_holes=interp_holes,
max_length_holes=max_length_holes,
check_min_length=check_min_length, get_iso0=get_iso0)
elif dscfg['ML_METHOD'] == 'FROM_HYDROCLASS':
for datatypedescr in dscfg['datatype']:
radarnr, _, datatype, _, _ = get_datatype_fields(datatypedescr)
if datatype == 'hydro':
hydro_field = get_fieldname_pyart(datatype)
ind_rad = int(radarnr[5:8])-1
if radar_list[ind_rad] is None:
warn('No valid radar')
return None, None
radar = radar_list[ind_rad]
if hydro_field not in radar.fields:
warn('Unable to detect melting layer. Missing data')
return None, None
# User defined parameters
force_continuity = dscfg.get('force_continuity', True)
dist_max = dscfg.get('dist_max', 350.)
get_iso0 = dscfg.get('get_iso0', False)
ml_obj, ml_dict, iso0_dict = pyart.retrieve.melting_layer_hydroclass(
radar, hydro_field=hydro_field, ml_field='melting_layer',
ml_pos_field='melting_layer_height',
iso0_field='height_over_iso0', force_continuity=force_continuity,
dist_max=dist_max, get_iso0=get_iso0)
else:
raise Exception(
"ERROR: Unknown melting layer retrieval method " +
dscfg['ML_METHOD'])
# prepare for exit
if ml_dict is None:
return None, None
new_dataset = {'radar_out': deepcopy(radar)}
new_dataset['radar_out'].fields = dict()
new_dataset['radar_out'].add_field('melting_layer', ml_dict)
if iso0_dict is not None:
new_dataset['radar_out'].add_field('height_over_iso0', iso0_dict)
new_dataset.update({'ml_obj': ml_obj})
return new_dataset, ind_rad
def process_zdr_column(procstatus, dscfg, radar_list=None):
"""
Detects ZDR columns
Parameters
----------
procstatus : int
Processing status: 0 initializing, 1 processing volume,
2 post-processing
dscfg : dictionary of dictionaries
data set configuration. Accepted Configuration Keywords::
datatype : list of string. Dataset keyword
The input data types
radar_list : list of Radar objects
Optional. list of radar objects
Returns
-------
new_dataset : dict
dictionary containing the output
ind_rad : int
radar index
"""
if procstatus != 1:
return None, None
temp_field = None
iso0_field = None
for datatypedescr in dscfg['datatype']:
radarnr, _, datatype, _, _ = get_datatype_fields(datatypedescr)
if datatype == 'ZDR':
zdr_field = 'differential_reflectivity'
if datatype == 'ZDRc':
zdr_field = 'corrected_differential_reflectivity'
if datatype == 'RhoHV':
rhv_field = 'cross_correlation_ratio'
if datatype == 'RhoHVc':
rhv_field = 'corrected_cross_correlation_ratio'
if datatype == 'TEMP':
temp_field = 'temperature'
if datatype == 'H_ISO0':
iso0_field = 'height_over_iso0'
ind_rad = int(radarnr[5:8])-1
if radar_list[ind_rad] is None:
warn('No valid radar')
return None, None
radar = radar_list[ind_rad]
# Check which should be the reference field for temperature
if iso0_field is not None and (iso0_field not in radar.fields):
warn('Unable to detect melting layer. ' +
'Missing height over iso0 field')
return None, None
temp_ref = 'height_over_iso0'
if temp_field is not None and (temp_field not in radar.fields):
warn('Unable to detect melting layer. Missing temperature field')
return None, None
temp_ref = 'temperature'
iso0_field = 'height_over_iso0'
if ((zdr_field not in radar.fields) or
(rhv_field not in radar.fields)):
warn('Unable to detect melting layer. Missing data')
return None, None
rhohv_min = dscfg.get('rhohv_min', 0.8)
zdr_min = dscfg.get('zdr_min', 1.)
smooth_window = dscfg.get('smooth_window', 0.)
latlon_tol = dscfg.get('latlon_tol', 0.025) # approx 3x2 km
if smooth_window == 0:
smooth_window_len = 0
else:
smooth_window_len = int(
smooth_window/(radar.range['data'][1]-radar.range['data'][0]))
zdr_dict = deepcopy(radar.fields[zdr_field])
if smooth_window_len > 0:
zdr_dict['data'] = pyart.correct.smooth_masked(
zdr_dict['data'], wind_len=smooth_window_len, min_valid=1,
wind_type='mean')
zdr_dict['data'][
radar.fields[rhv_field]['data'] < rhohv_min] = np.ma.masked
zdr_dict['data'][zdr_dict['data'] < zdr_min] = np.ma.masked
zdr_dict['data'][radar.fields[temp_field]['data'] > 0.] = np.ma.masked
zdr_valid = np.logical_not(np.ma.getmaskarray(zdr_dict['data']))
hlowerleft, hupperright = pyart.retrieve._get_res_vol_sides(radar)
ind_ang_sorted = np.argsort(radar.fixed_angle['data'])
# get number of suspected ZDR columns
lat_cols = np.array([], dtype=int)
lon_cols = np.array([], dtype=int)
zdr_cols = np.array([], dtype=int)
g_lat = radar.gate_latitude['data']
g_lon = radar.gate_longitude['data']
for ind_ray in range(radar.nrays):
# Get bins with negative temperatures
ind_rngs = np.where(
radar.fields[temp_field]['data'][ind_ray, :] < 0.)[0]
if ind_rngs.size == 0:
continue
# Segment negative temperatures and get start of each segment
cons_list = np.split(ind_rngs, np.where(np.diff(ind_rngs) != 1)[0]+1)
for ind_rngs_cell in cons_list:
if not zdr_valid[ind_ray, ind_rngs_cell[0]]:
continue
ind_ray_col = ind_ray
ind_rng_col = ind_rngs_cell[0]
# extract data around point:
ind_rays, ind_rngs = np.where(np.logical_and.reduce((
np.logical_and(
g_lat >= g_lat[ind_ray_col, ind_rng_col]-latlon_tol,
g_lat <= g_lat[ind_ray_col, ind_rng_col]+latlon_tol),
np.logical_and(
g_lon >= g_lon[ind_ray_col, ind_rng_col]-latlon_tol,
g_lon <= g_lon[ind_ray_col, ind_rng_col]+latlon_tol),
zdr_valid)))
# get ZDR column height for each radar sweep
h_low = np.ma.masked_all(radar.nsweeps)
h_high = np.ma.masked_all(radar.nsweeps)
for sweep in range(radar.nsweeps):
ind = np.where(np.logical_and(
ind_rays >= radar.sweep_start_ray_index['data'][sweep],
ind_rays <= radar.sweep_end_ray_index['data'][sweep]))[0]
if ind.size == 0:
continue
h_low[sweep] = np.min(
hlowerleft[ind_rays[ind], ind_rngs[ind]])
h_high[sweep] = np.max(
hupperright[ind_rays[ind], ind_rngs[ind]])
# order data by elevation angle
h_low = h_low[ind_ang_sorted]
h_high = h_high[ind_ang_sorted]
# get the first segment of continuous ZDR valid values
ind_valid = np.where(np.ma.getmaskarray(h_low) == 0)[0]
ind_valid = np.split(
ind_valid, np.where(np.diff(ind_valid) != 1)[0]+1)[0]
# compute ZDR column
zdr_col = h_high[ind_valid[-1]]-h_low[ind_valid[0]]
# put data in output array
lat_cols = np.append(
lat_cols,
radar.gate_latitude['data'][ind_ray_col, ind_rng_col])
lon_cols = np.append(
lon_cols,
radar.gate_longitude['data'][ind_ray_col, ind_rng_col])
zdr_cols = np.append(zdr_cols, zdr_col)
zdr_col_dict = pyart.config.get_metadata(
'differential_reflectivity_column_height')
zdr_col_dict['data'] = zdr_cols/1000.
new_dataset = {
'field_limits': [
np.min(radar.gate_longitude['data']),
np.max(radar.gate_longitude['data']),
np.min(radar.gate_latitude['data']),
np.max(radar.gate_latitude['data'])],
'lat': lat_cols,
'lon': lon_cols,
'fields': {'differential_reflectivity_column_height': zdr_col_dict}}
return new_dataset, ind_rad
```
#### File: pyrad/proc/process_iq.py
```python
from copy import deepcopy
from warnings import warn
import numpy as np
from netCDF4 import num2date
import pyart
from ..io.io_aux import get_datatype_fields, get_fieldname_pyart
def process_raw_iq(procstatus, dscfg, radar_list=None):
"""
Dummy function that returns the initial input data set
Parameters
----------
procstatus : int
Processing status: 0 initializing, 1 processing volume,
2 post-processing
dscfg : dictionary of dictionaries
data set configuration
radar_list : list of spectra objects
Optional. list of spectra objects
Returns
-------
new_dataset : dict
dictionary containing the output
ind_rad : int
radar index
"""
if procstatus != 1:
return None, None
for datatypedescr in dscfg['datatype']:
radarnr, _, _, _, _ = get_datatype_fields(datatypedescr)
break
ind_rad = int(radarnr[5:8])-1
if (radar_list is None) or (radar_list[ind_rad] is None):
warn('ERROR: No valid radar')
return None, None
new_dataset = {'radar_out': deepcopy(radar_list[ind_rad])}
return new_dataset, ind_rad
def process_pol_variables_iq(procstatus, dscfg, radar_list=None):
"""
Computes the polarimetric variables from the IQ data
Parameters
----------
procstatus : int
Processing status: 0 initializing, 1 processing volume,
2 post-processing
dscfg : dictionary of dictionaries
data set configuration. Accepted configuration keywords::
datatype : list of string. Dataset keyword
The input data types
subtract_noise : Bool
If True noise will be subtracted from the signal
lag : int
The time lag to use in the estimators
direction : str
The convention used in the Doppler mean field. Can be
negative_away or negative_towards
variables : list of str
list of variables to compute. Default dBZ
phase_offset : float. Dataset keyword
The system differential phase offset to remove
radar_list : list of spectra objects
Optional. list of spectra objects
Returns
-------
new_dataset : dict
dictionary containing the output
ind_rad : int
radar index
"""
if procstatus != 1:
return None, None
noise_h_field = None
noise_v_field = None
signal_h_field = None
signal_v_field = None
for datatypedescr in dscfg['datatype']:
radarnr, _, datatype, _, _ = get_datatype_fields(datatypedescr)
if datatype == 'IQhhADU':
signal_h_field = get_fieldname_pyart(datatype)
elif datatype == 'IQvvADU':
signal_v_field = get_fieldname_pyart(datatype)
elif datatype == 'IQNADUh':
noise_h_field = get_fieldname_pyart(datatype)
elif datatype == 'IQNADUv':
noise_v_field = get_fieldname_pyart(datatype)
ind_rad = int(radarnr[5:8])-1
if (radar_list is None) or (radar_list[ind_rad] is None):
warn('ERROR: No valid radar')
return None, None
radar = radar_list[ind_rad]
subtract_noise = dscfg.get('subtract_noise', False)
lag = dscfg.get('lag', None)
direction = dscfg.get('direction', 'negative_away')
variables = dscfg.get('variables', ['dBZ'])
phase_offset = dscfg.get('phase_offset', 0.)
fields_list = []
for variable in variables:
fields_list.append(get_fieldname_pyart(variable))
radar = pyart.retrieve.compute_pol_variables_iq(
radar, fields_list, subtract_noise=subtract_noise, lag=lag,
direction=direction, phase_offset=phase_offset,
signal_h_field=signal_h_field, signal_v_field=signal_v_field,
noise_h_field=noise_h_field, noise_v_field=noise_v_field)
# prepare for exit
new_dataset = {'radar_out': radar}
return new_dataset, ind_rad
def process_reflectivity_iq(procstatus, dscfg, radar_list=None):
"""
Computes reflectivity from the IQ data
Parameters
----------
procstatus : int
Processing status: 0 initializing, 1 processing volume,
2 post-processing
dscfg : dictionary of dictionaries
data set configuration. Accepted configuration keywords::
datatype : list of string. Dataset keyword
The input data types
subtract_noise : Bool
If True noise will be subtracted from the signal
radar_list : list of spectra objects
Optional. list of spectra objects
Returns
-------
new_dataset : dict
dictionary containing the output
ind_rad : int
radar index
"""
if procstatus != 1:
return None, None
noise_field = None
signal_field = None
for datatypedescr in dscfg['datatype']:
radarnr, _, datatype, _, _ = get_datatype_fields(datatypedescr)
if datatype in ('IQhhADU', 'IQvvADU'):
signal_field = get_fieldname_pyart(datatype)
elif datatype in ('IQNADUh', 'IQNADUv'):
noise_field = get_fieldname_pyart(datatype)
if signal_field is None:
warn('Signal field must be specified')
return None, None
ind_rad = int(radarnr[5:8])-1
if (radar_list is None) or (radar_list[ind_rad] is None):
warn('ERROR: No valid radar')
return None, None
radar = radar_list[ind_rad]
if signal_field not in radar.fields:
warn('Unable to obtain reflectivity. Missing field ' +
signal_field)
return None, None
subtract_noise = dscfg.get('subtract_noise', False)
dBZ = pyart.retrieve.compute_reflectivity_iq(
radar, subtract_noise=subtract_noise, signal_field=signal_field,
noise_field=noise_field)
reflectivity_field = 'reflectivity'
if signal_field in ('IQ_vv_ADU',):
reflectivity_field += '_vv'
# prepare for exit
new_dataset = {'radar_out': pyart.util.radar_from_spectra(radar)}
new_dataset['radar_out'].add_field(reflectivity_field, dBZ)
return new_dataset, ind_rad
def process_st1_iq(procstatus, dscfg, radar_list=None):
"""
Computes the statistical test one lag fluctuation from the horizontal or
vertical IQ data
Parameters
----------
procstatus : int
Processing status: 0 initializing, 1 processing volume,
2 post-processing
dscfg : dictionary of dictionaries
data set configuration. Accepted configuration keywords::
datatype : list of string. Dataset keyword
The input data types
radar_list : list of spectra objects
Optional. list of spectra objects
Returns
-------
new_dataset : dict
dictionary containing the output
ind_rad : int
radar index
"""
if procstatus != 1:
return None, None
radarnr, _, datatype, _, _ = get_datatype_fields(dscfg['datatype'][0])
signal_field = get_fieldname_pyart(datatype)
ind_rad = int(radarnr[5:8])-1
if (radar_list is None) or (radar_list[ind_rad] is None):
warn('ERROR: No valid radar')
return None, None
radar = radar_list[ind_rad]
if signal_field not in radar.fields:
warn('Unable to obtain ST1. Missing fields')
return None, None
st1 = pyart.retrieve.compute_st1_iq(
radar, signal_field=signal_field)
st1_field = 'stat_test_lag1'
if signal_field == 'IQ_vv_ADU':
st1_field += '_vv'
# prepare for exit
new_dataset = {'radar_out': pyart.util.radar_from_spectra(radar)}
new_dataset['radar_out'].add_field(st1_field, st1)
return new_dataset, ind_rad
def process_st2_iq(procstatus, dscfg, radar_list=None):
"""
Computes the statistical test two lag fluctuation from the horizontal or
vertical IQ data
Parameters
----------
procstatus : int
Processing status: 0 initializing, 1 processing volume,
2 post-processing
dscfg : dictionary of dictionaries
data set configuration. Accepted configuration keywords::
datatype : list of string. Dataset keyword
The input data types
radar_list : list of spectra objects
Optional. list of spectra objects
Returns
-------
new_dataset : dict
dictionary containing the output
ind_rad : int
radar index
"""
if procstatus != 1:
return None, None
radarnr, _, datatype, _, _ = get_datatype_fields(dscfg['datatype'][0])
signal_field = get_fieldname_pyart(datatype)
ind_rad = int(radarnr[5:8])-1
if (radar_list is None) or (radar_list[ind_rad] is None):
warn('ERROR: No valid radar')
return None, None
radar = radar_list[ind_rad]
if signal_field not in radar.fields:
warn('Unable to obtain ST2. Missing fields')
return None, None
st2 = pyart.retrieve.compute_st2_iq(
radar, signal_field=signal_field)
st2_field = 'stat_test_lag2'
if signal_field == 'IQ_vv_ADU':
st2_field += '_vv'
# prepare for exit
new_dataset = {'radar_out': pyart.util.radar_from_spectra(radar)}
new_dataset['radar_out'].add_field(st2_field, st2)
return new_dataset, ind_rad
def process_wbn_iq(procstatus, dscfg, radar_list=None):
"""
Computes the wide band noise from the horizontal or vertical IQ data
Parameters
----------
procstatus : int
Processing status: 0 initializing, 1 processing volume,
2 post-processing
dscfg : dictionary of dictionaries
data set configuration. Accepted configuration keywords::
datatype : list of string. Dataset keyword
The input data types
radar_list : list of spectra objects
Optional. list of spectra objects
Returns
-------
new_dataset : dict
dictionary containing the output
ind_rad : int
radar index
"""
if procstatus != 1:
return None, None
radarnr, _, datatype, _, _ = get_datatype_fields(dscfg['datatype'][0])
signal_field = get_fieldname_pyart(datatype)
ind_rad = int(radarnr[5:8])-1
if (radar_list is None) or (radar_list[ind_rad] is None):
warn('ERROR: No valid radar')
return None, None
radar = radar_list[ind_rad]
if signal_field not in radar.fields:
warn('Unable to obtain WBN. Missing fields')
return None, None
wbn = pyart.retrieve.compute_wbn_iq(
radar, signal_field=signal_field)
wbn_field = 'wide_band_noise'
if signal_field == 'IQ_vv_ADU':
wbn_field += '_vv'
# prepare for exit
new_dataset = {'radar_out': pyart.util.radar_from_spectra(radar)}
new_dataset['radar_out'].add_field(wbn_field, wbn)
return new_dataset, ind_rad
def process_differential_reflectivity_iq(procstatus, dscfg, radar_list=None):
"""
Computes differential reflectivity from the horizontal and vertical
IQ data
Parameters
----------
procstatus : int
Processing status: 0 initializing, 1 processing volume,
2 post-processing
dscfg : dictionary of dictionaries
data set configuration. Accepted configuration keywords::
datatype : list of string. Dataset keyword
The input data types
subtract_noise : Bool
If True noise will be subtracted from the signal
lag : int
The time lag to use in the estimators
radar_list : list of spectra objects
Optional. list of spectra objects
Returns
-------
new_dataset : dict
dictionary containing the output
ind_rad : int
radar index
"""
if procstatus != 1:
return None, None
noise_h_field = None
noise_v_field = None
signal_h_field = None
signal_v_field = None
for datatypedescr in dscfg['datatype']:
radarnr, _, datatype, _, _ = get_datatype_fields(datatypedescr)
if datatype == 'IQhhADU':
signal_h_field = get_fieldname_pyart(datatype)
elif datatype == 'IQvvADU':
signal_v_field = get_fieldname_pyart(datatype)
elif datatype == 'IQNADUh':
noise_h_field = get_fieldname_pyart(datatype)
elif datatype == 'IQNADUv':
noise_v_field = get_fieldname_pyart(datatype)
ind_rad = int(radarnr[5:8])-1
if (radar_list is None) or (radar_list[ind_rad] is None):
warn('ERROR: No valid radar')
return None, None
radar = radar_list[ind_rad]
if (signal_h_field not in radar.fields or
signal_v_field not in radar.fields):
warn('Unable to obtain spectral differential reflectivity. ' +
'Missing fields')
return None, None
subtract_noise = dscfg.get('subtract_noise', False)
lag = dscfg.get('lag', 0)
zdr = pyart.retrieve.compute_differential_reflectivity_iq(
radar, subtract_noise=subtract_noise, lag=lag,
signal_h_field=signal_h_field, signal_v_field=signal_v_field,
noise_h_field=noise_h_field, noise_v_field=noise_v_field)
# prepare for exit
new_dataset = {'radar_out': pyart.util.radar_from_spectra(radar)}
new_dataset['radar_out'].add_field('differential_reflectivity', zdr)
return new_dataset, ind_rad
def process_mean_phase_iq(procstatus, dscfg, radar_list=None):
"""
Computes the mean phase from the horizontal or vertical IQ data
Parameters
----------
procstatus : int
Processing status: 0 initializing, 1 processing volume,
2 post-processing
dscfg : dictionary of dictionaries
data set configuration. Accepted configuration keywords::
datatype : list of string. Dataset keyword
The input data types
radar_list : list of spectra objects
Optional. list of spectra objects
Returns
-------
new_dataset : dict
dictionary containing the output
ind_rad : int
radar index
"""
if procstatus != 1:
return None, None
radarnr, _, datatype, _, _ = get_datatype_fields(dscfg['datatype'][0])
signal_field = get_fieldname_pyart(datatype)
ind_rad = int(radarnr[5:8])-1
if (radar_list is None) or (radar_list[ind_rad] is None):
warn('ERROR: No valid radar')
return None, None
radar = radar_list[ind_rad]
if signal_field not in radar.fields:
warn('Unable to obtain MPH. Missing fields')
return None, None
mph = pyart.retrieve.compute_mean_phase_iq(
radar, signal_field=signal_field)
mean_phase_field = 'mean_phase'
if signal_field == 'IQ_vv_ADU':
mean_phase_field += '_vv'
# prepare for exit
new_dataset = {'radar_out': pyart.util.radar_from_spectra(radar)}
new_dataset['radar_out'].add_field(mean_phase_field, mph)
return new_dataset, ind_rad
def process_differential_phase_iq(procstatus, dscfg, radar_list=None):
"""
Computes the differential phase from the horizontal and vertical IQ data
Parameters
----------
procstatus : int
Processing status: 0 initializing, 1 processing volume,
2 post-processing
dscfg : dictionary of dictionaries
data set configuration. Accepted configuration keywords::
datatype : list of string. Dataset keyword
The input data types
phase_offset : float. Dataset keyword
The system differential phase offset to remove
radar_list : list of spectra objects
Optional. list of spectra objects
Returns
-------
new_dataset : dict
dictionary containing the output
ind_rad : int
radar index
"""
if procstatus != 1:
return None, None
signal_h_field = None
signal_v_field = None
for datatypedescr in dscfg['datatype']:
radarnr, _, datatype, _, _ = get_datatype_fields(datatypedescr)
if datatype == 'IQhhADU':
signal_h_field = get_fieldname_pyart(datatype)
elif datatype == 'IQvvADU':
signal_v_field = get_fieldname_pyart(datatype)
ind_rad = int(radarnr[5:8])-1
if (radar_list is None) or (radar_list[ind_rad] is None):
warn('ERROR: No valid radar')
return None, None
radar = radar_list[ind_rad]
if (signal_h_field not in radar.fields or
signal_v_field not in radar.fields):
warn('Unable to obtain PhiDP. Missing fields')
return None, None
phase_offset = dscfg.get('phase_offset', 0.)
uphidp = pyart.retrieve.compute_differential_phase_iq(
radar, phase_offset=phase_offset, signal_h_field=signal_h_field,
signal_v_field=signal_v_field)
# prepare for exit
new_dataset = {'radar_out': pyart.util.radar_from_spectra(radar)}
new_dataset['radar_out'].add_field(
'uncorrected_differential_phase', uphidp)
return new_dataset, ind_rad
def process_rhohv_iq(procstatus, dscfg, radar_list=None):
"""
Computes RhoHV from the horizontal and vertical IQ data
Parameters
----------
procstatus : int
Processing status: 0 initializing, 1 processing volume,
2 post-processing
dscfg : dictionary of dictionaries
data set configuration. Accepted configuration keywords::
datatype : list of string. Dataset keyword
The input data types
subtract_noise : Bool
If True noise will be subtracted from the signal
lag : int
Time lag used in the computation
radar_list : list of spectra objects
Optional. list of spectra objects
Returns
-------
new_dataset : dict
dictionary containing the output
ind_rad : int
radar index
"""
if procstatus != 1:
return None, None
noise_h_field = None
noise_v_field = None
signal_h_field = None
signal_v_field = None
for datatypedescr in dscfg['datatype']:
radarnr, _, datatype, _, _ = get_datatype_fields(datatypedescr)
if datatype == 'IQhhADU':
signal_h_field = get_fieldname_pyart(datatype)
elif datatype == 'IQvvADU':
signal_v_field = get_fieldname_pyart(datatype)
elif datatype == 'IQNADUh':
noise_h_field = get_fieldname_pyart(datatype)
elif datatype == 'IQNADUv':
noise_v_field = get_fieldname_pyart(datatype)
ind_rad = int(radarnr[5:8])-1
if (radar_list is None) or (radar_list[ind_rad] is None):
warn('ERROR: No valid radar')
return None, None
radar = radar_list[ind_rad]
if (signal_h_field not in radar.fields or
signal_v_field not in radar.fields):
warn('Unable to obtain RhoHV. Missing fields')
return None, None
subtract_noise = dscfg.get('subtract_noise', False)
lag = dscfg.get('lag', 0)
rhohv = pyart.retrieve.compute_rhohv_iq(
radar, subtract_noise=subtract_noise, lag=lag,
signal_h_field=signal_h_field, signal_v_field=signal_v_field,
noise_h_field=noise_h_field, noise_v_field=noise_v_field)
# prepare for exit
new_dataset = {'radar_out': pyart.util.radar_from_spectra(radar)}
new_dataset['radar_out'].add_field('cross_correlation_ratio', rhohv)
return new_dataset, ind_rad
def process_Doppler_velocity_iq(procstatus, dscfg, radar_list=None):
"""
Compute the Doppler velocity from the spectral reflectivity
Parameters
----------
procstatus : int
Processing status: 0 initializing, 1 processing volume,
2 post-processing
dscfg : dictionary of dictionaries
data set configuration. Accepted configuration keywords::
datatype : list of string. Dataset keyword
The input data types
direction : str
The convention used in the Doppler mean field. Can be
negative_away or negative_towards
radar_list : list of spectra objects
Optional. list of spectra objects
Returns
-------
new_dataset : dict
dictionary containing the output
ind_rad : int
radar index
"""
if procstatus != 1:
return None, None
for datatypedescr in dscfg['datatype']:
radarnr, _, datatype, _, _ = get_datatype_fields(datatypedescr)
if datatype in ('IQhhADU', 'IQvvADU'):
signal_field = get_fieldname_pyart(datatype)
ind_rad = int(radarnr[5:8])-1
if (radar_list is None) or (radar_list[ind_rad] is None):
warn('ERROR: No valid radar')
return None, None
radar = radar_list[ind_rad]
if signal_field not in radar.fields:
warn('Unable to obtain Doppler velocity. ' +
'Missing field '+signal_field)
return None, None
direction = dscfg.get('direction', 'negative_away')
vel = pyart.retrieve.compute_Doppler_velocity_iq(
radar, signal_field=signal_field, direction=direction)
vel_field = 'velocity'
if signal_field == 'IQ_vv_ADU':
vel_field += '_vv'
# prepare for exit
new_dataset = {'radar_out': pyart.util.radar_from_spectra(radar)}
new_dataset['radar_out'].add_field(vel_field, vel)
return new_dataset, ind_rad
def process_Doppler_width_iq(procstatus, dscfg, radar_list=None):
"""
Compute the Doppler spectrum width from the spectral reflectivity
Parameters
----------
procstatus : int
Processing status: 0 initializing, 1 processing volume,
2 post-processing
dscfg : dictionary of dictionaries
data set configuration. Accepted configuration keywords::
datatype : list of string. Dataset keyword
The input data types
subtract_noise : Bool
If True noise will be subtracted from the signals
lag : int
Time lag used in the denominator of the computation
radar_list : list of spectra objects
Optional. list of spectra objects
Returns
-------
new_dataset : dict
dictionary containing the output
ind_rad : int
radar index
"""
if procstatus != 1:
return None, None
noise_field = None
for datatypedescr in dscfg['datatype']:
radarnr, _, datatype, _, _ = get_datatype_fields(datatypedescr)
if datatype in ('IQhhADU', 'IQvvADU'):
signal_field = get_fieldname_pyart(datatype)
elif datatype in ('IQNADUh', 'IQNADUv'):
noise_field = get_fieldname_pyart(datatype)
ind_rad = int(radarnr[5:8])-1
if (radar_list is None) or (radar_list[ind_rad] is None):
warn('ERROR: No valid radar')
return None, None
radar = radar_list[ind_rad]
if signal_field not in radar.fields:
warn('Unable to obtain Doppler spectrum width. ' +
'Missing field '+signal_field)
return None, None
subtract_noise = dscfg.get('subtract_noise', False)
lag = dscfg.get('lag', 1)
width = pyart.retrieve.compute_Doppler_width_iq(
radar, subtract_noise=True, signal_field=signal_field,
noise_field=noise_field, lag=lag)
width_field = 'spectrum_width'
if signal_field == 'IQ_vv_ADU':
width_field += '_vv'
# prepare for exit
new_dataset = {'radar_out': pyart.util.radar_from_spectra(radar)}
new_dataset['radar_out'].add_field(width_field, width)
return new_dataset, ind_rad
def process_fft(procstatus, dscfg, radar_list=None):
"""
Compute the Doppler spectra form the IQ data with a Fourier transform
Parameters
----------
procstatus : int
Processing status: 0 initializing, 1 processing volume,
2 post-processing
dscfg : dictionary of dictionaries
data set configuration. Accepted configuration keywords::
datatype : list of string. Dataset keyword
The input data types
window : list of str
Parameters of the window used to obtain the spectra. The
parameters are the ones corresponding to function
scipy.signal.windows.get_window. It can also be ['None'].
radar_list : list of spectra objects
Optional. list of spectra objects
Returns
-------
new_dataset : dict
dictionary containing the output
ind_rad : int
radar index
"""
if procstatus != 1:
return None, None
radarnr, _, datatype, _, _ = get_datatype_fields(dscfg['datatype'][0])
ind_rad = int(radarnr[5:8])-1
if (radar_list is None) or (radar_list[ind_rad] is None):
warn('ERROR: No valid radar')
return None, None
radar = radar_list[ind_rad]
wind_params = dscfg.get('window', ['None'])
if len(wind_params) == 1:
window = wind_params[0]
if window == 'None':
window = None
else:
try:
window = float(window)
except ValueError:
pass
else:
window = wind_params
for i in range(1, len(window)):
window[i] = float(window[i])
window = tuple(window)
fields_in_list = []
fields_out_list = []
for datatypedescr in dscfg['datatype']:
radarnr, _, datatype, _, _ = get_datatype_fields(datatypedescr)
field_name = get_fieldname_pyart(datatype)
if field_name not in radar.fields:
warn(field_name+' not in radar')
continue
if field_name == 'IQ_hh_ADU':
fields_out_list.append('unfiltered_complex_spectra_hh_ADU')
elif field_name == 'IQ_vv_ADU':
fields_out_list.append('unfiltered_complex_spectra_vv_ADU')
elif field_name == 'IQ_noise_power_hh_ADU':
fields_out_list.append('spectral_noise_power_hh_ADU')
elif field_name == 'IQ_noiseADU_vv':
fields_out_list.append('spectral_noise_power_vv_ADU')
else:
warn(field_name+' can not be Fourier transformed')
fields_in_list.append(field_name)
radar_out = pyart.retrieve.compute_spectra(
radar, fields_in_list, fields_out_list, window=window)
# prepare for exit
new_dataset = {'radar_out': radar_out}
return new_dataset, ind_rad
```
#### File: pyrad/proc/process_monitoring.py
```python
from copy import deepcopy
from warnings import warn
import numpy as np
import pyart
from ..io.io_aux import get_datatype_fields, get_fieldname_pyart
from ..io.read_data_other import read_selfconsistency
from ..io.read_data_radar import interpol_field
from ..util.radar_utils import get_histogram_bins
from ..util.stat_utils import ratio_bootstrapping
def process_selfconsistency_kdp_phidp(procstatus, dscfg, radar_list=None):
"""
Computes specific differential phase and differential phase in rain using
the selfconsistency between Zdr, Zh and KDP
Parameters
----------
procstatus : int
Processing status: 0 initializing, 1 processing volume,
2 post-processing
dscfg : dictionary of dictionaries
data set configuration. Accepted Configuration Keywords::
datatype : list of strings. Dataset keyword
The input data types
parametrization : str
The type of parametrization for the self-consistency curves. Can
be 'None', 'Gourley', 'Wolfensberger', 'Louf', 'Gorgucci' or
'Vaccarono'
'None' will use tables from config files. Default 'None'.
rsmooth : float. Dataset keyword
length of the smoothing window [m]. Default 2000.
min_rhohv : float. Dataset keyword
minimum valid RhoHV. Default 0.92
filter_rain : Bool. Dataset keyword
If True the hydrometeor classification is used to filter out gates
that are not rain. Default True
max_phidp : float. Dataset keyword
maximum valid PhiDP [deg]. Default 20.
ml_thickness : float. Dataset keyword
assumed melting layer thickness [m]. Default 700.
fzl : float. Dataset keyword
The default freezing level height. It will be used if no
temperature field name is specified or the temperature field is
not in the radar object. Default 2000.
frequency : float. Dataset keyword
the radar frequency [Hz]. If None that of the key
frequency in attribute instrument_parameters of the radar
object will be used. If the key or the attribute are not present
the selfconsistency will not be computed
radar_list : list of Radar objects
Optional. list of radar objects
Returns
-------
new_dataset : dict
dictionary containing the output
ind_rad : int
radar index
"""
if procstatus != 1:
return None, None
temp = None
iso0 = None
hydro = None
for datatypedescr in dscfg['datatype']:
radarnr, _, datatype, _, _ = get_datatype_fields(datatypedescr)
if datatype == 'dBZc':
refl = 'corrected_reflectivity'
if datatype == 'dBZ':
refl = 'reflectivity'
if datatype == 'ZDRc':
zdr = 'corrected_differential_reflectivity'
if datatype == 'ZDR':
zdr = 'differential_reflectivity'
if datatype == 'PhiDPc':
phidp = 'corrected_differential_phase'
if datatype == 'PhiDP':
phidp = 'differential_phase'
if datatype == 'TEMP':
temp = 'temperature'
if datatype == 'H_ISO0':
iso0 = 'height_over_iso0'
if datatype == 'hydro':
hydro = 'radar_echo_classification'
if datatype == 'RhoHV':
rhohv = 'cross_correlation_ratio'
if datatype == 'RhoHVc':
rhohv = 'corrected_cross_correlation_ratio'
if datatype == 'uRhoHV':
rhohv = 'uncorrected_cross_correlation_ratio'
ind_rad = int(radarnr[5:8])-1
if radar_list[ind_rad] is None:
warn('No valid radar')
return None, None
radar = radar_list[ind_rad]
if ((refl not in radar.fields) or
(zdr not in radar.fields) or
(phidp not in radar.fields) or
(rhohv not in radar.fields)):
warn('Unable to estimate PhiDP/KDP using selfconsistency. ' +
'Missing data')
return None, None
# determine which freezing level reference
if temp is not None:
if temp in radar.fields:
temp_ref = 'temperature'
else:
warn('COSMO temperature field not available. ' +
'Using fixed freezing level height to determine liquid phase')
temp_ref = 'fixed_fzl'
elif iso0 is not None:
if iso0 in radar.fields:
temp_ref = 'height_over_iso0'
else:
warn('Height over iso0 field not available. ' +
'Using fixed freezing level height to determine liquid phase')
temp_ref = 'fixed_fzl'
else:
warn('Field to obtain the freezing level was not specified. ' +
'Using fixed freezing level height')
temp_ref = 'fixed_fzl'
# determine freezing level height if necessary
fzl = None
if temp_ref == 'fixed_fzl':
if 'fzl' in dscfg:
fzl = dscfg['fzl']
else:
fzl = 2000.
warn('Freezing level height not defined. Using default ' +
str(fzl)+' m')
# get self-consistency parametrization or curves
parametrization = dscfg.get('parametrization', 'None')
if dscfg['initialized'] == 0:
# get frequency band
freq = dscfg.get('frequency', None)
if freq is None:
if (radar.instrument_parameters is not None and
'frequency' in radar.instrument_parameters):
freq = radar.instrument_parameters['frequency']['data'][0]
else:
warn('Unable to estimate Zh bias using ' +
'self-consistency. Unknown radar frequency')
return None, None
freq_band = pyart.retrieve.get_freq_band(freq)
if parametrization == 'None':
# find unique elevations
el_vec = np.unique(
(10.*np.round(radar.elevation['data'], decimals=1)).astype(int))
zdr_kdpzh_list = list()
el_list = list()
for el in el_vec:
fname = (
dscfg['configpath'] + 'selfconsistency/' +
'selfconsistency_zdr_zhkdp_'+freq_band+'band_temp10_elev' +
'{:03d}'.format(el)+'_mu05.txt')
zdr_kdpzh_table = read_selfconsistency(fname)
if zdr_kdpzh_table is not None:
zdr_kdpzh_list.append(zdr_kdpzh_table)
el_list.append((el/10.).astype(int))
if not el_list:
warn('Unable to retrieve PhiDP and KDP using self-consistency. ' +
'No selfconsistency files for the radar elevations.')
return None, None
zdr_kdpzh_dict = {'zdr_kdpzh': zdr_kdpzh_list,
'elev': el_list,
'freq_band': freq_band}
else:
zdr_kdpzh_dict = {'zdr_kdpzh': None,
'elev': None,
'freq_band': freq_band}
dscfg['global_data'] = zdr_kdpzh_dict
dscfg['initialized'] = 1
if dscfg['initialized'] == 1:
# get user defined values
rsmooth = dscfg.get('rsmooth', 2000.)
min_rhohv = dscfg.get('min_rhohv', 0.92)
filter_rain = dscfg.get('filter_rain', True)
max_phidp = dscfg.get('max_phidp', 20.)
ml_thickness = dscfg.get('ml_thickness', 700.)
kdpsim_field = 'specific_differential_phase'
phidpsim_field = 'differential_phase'
r_res = radar.range['data'][1]-radar.range['data'][0]
smooth_wind_len = int(rsmooth/r_res)
kdpsim, phidpsim = pyart.correct.selfconsistency_kdp_phidp(
radar, dscfg['global_data'], min_rhohv=min_rhohv,
filter_rain=filter_rain, max_phidp=max_phidp,
smooth_wind_len=smooth_wind_len, doc=15, fzl=fzl,
thickness=ml_thickness, parametrization=parametrization,
refl_field=refl, phidp_field=phidp, zdr_field=zdr,
temp_field=temp, iso0_field=iso0, hydro_field=hydro,
rhohv_field=rhohv, kdpsim_field=kdpsim_field,
phidpsim_field=phidpsim_field, temp_ref=temp_ref)
# prepare for exit
new_dataset = {'radar_out': deepcopy(radar)}
new_dataset['radar_out'].fields = dict()
new_dataset['radar_out'].add_field(kdpsim_field, kdpsim)
new_dataset['radar_out'].add_field(phidpsim_field, phidpsim)
return new_dataset, ind_rad
def process_selfconsistency_bias(procstatus, dscfg, radar_list=None):
"""
Estimates the reflectivity bias by means of the selfconsistency
algorithm by Gourley
Parameters
----------
procstatus : int
Processing status: 0 initializing, 1 processing volume,
2 post-processing
dscfg : dictionary of dictionaries
data set configuration. Accepted Configuration Keywords::
datatype : list of string. Dataset keyword
The input data types
parametrization : str
The type of parametrization for the self-consistency curves. Can
be 'None', 'Gourley', 'Wolfensberger', 'Louf', 'Gorgucci' or
'Vaccarono'
'None' will use tables from config files. Default 'None'.
fzl : float. Dataset keyword
Default freezing level height. Default 2000.
rsmooth : float. Dataset keyword
length of the smoothing window [m]. Default 2000.
min_rhohv : float. Dataset keyword
minimum valid RhoHV. Default 0.92
filter_rain : Bool. Dataset keyword
If True the hydrometeor classification is used to filter out gates
that are not rain. Default True
max_phidp : float. Dataset keyword
maximum valid PhiDP [deg]. Default 20.
ml_thickness : float. Dataset keyword
Melting layer thickness [m]. Default 700.
rcell : float. Dataset keyword
length of continuous precipitation to consider the precipitation
cell a valid phidp segment [m]. Default 15000.
dphidp_min : float. Dataset keyword
minimum phase shift [deg]. Default 2.
dphidp_max : float. Dataset keyword
maximum phase shift [deg]. Default 16.
frequency : float. Dataset keyword
the radar frequency [Hz]. If None that of the key
frequency in attribute instrument_parameters of the radar
object will be used. If the key or the attribute are not present
the selfconsistency will not be computed
check_wet_radome : Bool. Dataset keyword
if True the average reflectivity of the closest gates to the radar
is going to be check to find out whether there is rain over the
radome. If there is rain no bias will be computed. Default True.
wet_radome_refl : Float. Dataset keyword
Average reflectivity [dBZ] of the gates close to the radar to
consider the radome as wet. Default 25.
wet_radome_rng_min, wet_radome_rng_max : Float. Dataset keyword
Min and max range [m] of the disk around the radar used to compute
the average reflectivity to determine whether the radome is wet.
Default 2000 and 4000.
wet_radome_ngates_min : int
Minimum number of valid gates to consider that the radome is wet.
Default 180
valid_gates_only : Bool
If True the reflectivity bias obtained for each valid ray is going
to be assigned only to gates of the segment used. That will give
more weight to longer segments when computing the total bias.
Default False
keep_points : Bool
If True the ZDR, ZH and KDP of the gates used in the self-
consistency algorithm are going to be stored for further analysis.
Default False
rkdp : float
The length of the window used to compute KDP with the single
window least square method [m]. Default 6000.
radar_list : list of Radar objects
Optional. list of radar objects
Returns
-------
new_dataset : dict
dictionary containing the output
ind_rad : int
radar index
"""
if procstatus == 0:
return None, None
keep_points = dscfg.get('keep_points', False)
if procstatus == 2:
if not keep_points or dscfg['initialized'] == 0:
return None, None
return (
{'selfconsistency_points':
dscfg['global_data']['selfconsistency_points']}, None)
temp = None
iso0 = None
hydro = None
for datatypedescr in dscfg['datatype']:
radarnr, _, datatype, _, _ = get_datatype_fields(datatypedescr)
if datatype == 'dBZc':
refl = 'corrected_reflectivity'
if datatype == 'dBZ':
refl = 'reflectivity'
if datatype == 'ZDRc':
zdr = 'corrected_differential_reflectivity'
if datatype == 'ZDR':
zdr = 'differential_reflectivity'
if datatype == 'PhiDPc':
phidp = 'corrected_differential_phase'
if datatype == 'PhiDP':
phidp = 'differential_phase'
if datatype == 'TEMP':
temp = 'temperature'
if datatype == 'H_ISO0':
iso0 = 'height_over_iso0'
if datatype == 'hydro':
hydro = 'radar_echo_classification'
if datatype == 'RhoHV':
rhohv = 'cross_correlation_ratio'
if datatype == 'RhoHVc':
rhohv = 'corrected_cross_correlation_ratio'
if datatype == 'uRhoHV':
rhohv = 'uncorrected_cross_correlation_ratio'
ind_rad = int(radarnr[5:8])-1
if radar_list[ind_rad] is None:
warn('No valid radar')
return None, None
radar = radar_list[ind_rad]
if ((refl not in radar.fields) or
(zdr not in radar.fields) or
(phidp not in radar.fields) or
(rhohv not in radar.fields)):
warn('Unable to estimate reflectivity bias using selfconsistency. ' +
'Missing data')
return None, None
# determine which freezing level reference
if temp is not None:
if temp in radar.fields:
temp_ref = 'temperature'
else:
warn('COSMO temperature field not available. ' +
'Using fixed freezing level height to determine liquid phase')
temp_ref = 'fixed_fzl'
elif iso0 is not None:
if iso0 in radar.fields:
temp_ref = 'height_over_iso0'
else:
warn('Height over iso0 field not available. ' +
'Using fixed freezing level height to determine liquid phase')
temp_ref = 'fixed_fzl'
else:
warn('Field to obtain the freezing level was not specified. ' +
'Using fixed freezing level height')
temp_ref = 'fixed_fzl'
# determine freezing level height if necessary
fzl = None
if temp_ref == 'fixed_fzl':
if 'fzl' in dscfg:
fzl = dscfg['fzl']
else:
fzl = 2000.
warn('Freezing level height not defined. Using default ' +
str(fzl)+' m')
# get self-consistency parametrization or curves
parametrization = dscfg.get('parametrization', 'None')
if dscfg['initialized'] == 0:
# get frequency band
freq = dscfg.get('frequency', None)
if freq is None:
if (radar.instrument_parameters is not None and
'frequency' in radar.instrument_parameters):
freq = radar.instrument_parameters['frequency']['data'][0]
else:
warn('Unable to estimate Zh bias using ' +
'self-consistency. Unknown radar frequency')
return None, None
freq_band = pyart.retrieve.get_freq_band(freq)
if parametrization == 'None':
# find unique elevations
el_vec = np.unique(
(10.*np.round(radar.elevation['data'], decimals=1)).astype(int))
zdr_kdpzh_list = list()
el_list = list()
for el in el_vec:
fname = (
dscfg['configpath'] + 'selfconsistency/' +
'selfconsistency_zdr_zhkdp_'+freq_band+'band_temp10_elev' +
'{:03d}'.format(el)+'_mu05.txt')
zdr_kdpzh_table = read_selfconsistency(fname)
if zdr_kdpzh_table is not None:
zdr_kdpzh_list.append(zdr_kdpzh_table)
el_list.append((el/10.).astype(int))
if not el_list:
warn('Unable to retrieve Zh bias using self-consistency. ' +
'No selfconsistency files for the radar elevations.')
return None, None
zdr_kdpzh_dict = {'zdr_kdpzh': zdr_kdpzh_list,
'elev': el_list,
'freq_band': freq_band}
else:
zdr_kdpzh_dict = {'zdr_kdpzh': None,
'elev': None,
'freq_band': freq_band}
dscfg['global_data'] = {'zdr_kdpzh_dict': zdr_kdpzh_dict}
if keep_points:
dscfg['global_data'].update({'selfconsistency_points': {
'zdr': [],
'kdp': [],
'zh': [],
'timeinfo': dscfg['timeinfo'],
'parametrization': parametrization,
'zdr_kdpzh_dict': zdr_kdpzh_dict
}})
dscfg['initialized'] = 1
if dscfg['initialized'] == 1:
# get user defined values
rsmooth = dscfg.get('rsmooth', 2000.)
min_rhohv = dscfg.get('min_rhohv', 0.92)
filter_rain = dscfg.get('filter_rain', True)
max_phidp = dscfg.get('max_phidp', 20.)
ml_thickness = dscfg.get('ml_thickness', 700.)
rcell = dscfg.get('rcell', 15000.)
dphidp_min = dscfg.get('dphidp_min', 2.)
dphidp_max = dscfg.get('dphidp_max', 16.)
check_wet_radome = dscfg.get('check_wet_radome', True)
wet_radome_refl = dscfg.get('wet_radome_refl', 25.)
wet_radome_rng_min = dscfg.get('wet_radome_rng_min', 2000.)
wet_radome_rng_max = dscfg.get('wet_radome_rng_max', 4000.)
wet_radome_ngates_min = dscfg.get('wet_radome_ngates_min', 180)
valid_gates_only = dscfg.get('valid_gates_only', False)
rkdp = dscfg.get('rkdp', 6000.)
r_res = radar.range['data'][1]-radar.range['data'][0]
smooth_wind_len = int(rsmooth/r_res)
kdp_wind_len = int(rkdp/r_res)
min_rcons = int(rcell/r_res)
wet_radome_len_min = int(wet_radome_rng_min/r_res)
wet_radome_len_max = int(wet_radome_rng_max/r_res)
refl_bias, selfconsistency_dict = pyart.correct.selfconsistency_bias(
radar, dscfg['global_data']['zdr_kdpzh_dict'],
min_rhohv=min_rhohv, filter_rain=filter_rain, max_phidp=max_phidp,
smooth_wind_len=smooth_wind_len, doc=15, fzl=fzl,
thickness=ml_thickness, min_rcons=min_rcons,
dphidp_min=dphidp_min, dphidp_max=dphidp_max,
parametrization=parametrization, refl_field=refl,
phidp_field=phidp, zdr_field=zdr, temp_field=temp,
iso0_field=iso0, hydro_field=hydro, rhohv_field=rhohv,
temp_ref=temp_ref, check_wet_radome=check_wet_radome,
wet_radome_refl=wet_radome_refl,
wet_radome_len_min=wet_radome_len_min,
wet_radome_len_max=wet_radome_len_max,
valid_gates_only=valid_gates_only, keep_points=keep_points,
kdp_wind_len=kdp_wind_len)
if keep_points:
if selfconsistency_dict is not None:
dscfg['global_data']['selfconsistency_points']['zdr'].extend(
selfconsistency_dict['zdr'])
dscfg['global_data']['selfconsistency_points']['zh'].extend(
selfconsistency_dict['zh'])
dscfg['global_data']['selfconsistency_points']['kdp'].extend(
selfconsistency_dict['kdp'])
# prepare for exit
new_dataset = {'radar_out': deepcopy(radar)}
new_dataset['radar_out'].fields = dict()
new_dataset['radar_out'].add_field('reflectivity_bias', refl_bias)
return new_dataset, ind_rad
def process_selfconsistency_bias2(procstatus, dscfg, radar_list=None):
"""
Estimates the reflectivity bias by means of the selfconsistency
algorithm by Gourley
Parameters
----------
procstatus : int
Processing status: 0 initializing, 1 processing volume,
2 post-processing
dscfg : dictionary of dictionaries
data set configuration. Accepted Configuration Keywords::
datatype : list of string. Dataset keyword
The input data types
parametrization : str
The type of parametrization for the self-consistency curves. Can
be 'None', 'Gourley', 'Wolfensberger', 'Louf', 'Gorgucci' or
'Vaccarono'
'None' will use tables from config files. Default 'None'.
fzl : float. Dataset keyword
Default freezing level height. Default 2000.
rsmooth : float. Dataset keyword
length of the smoothing window [m]. Default 2000.
min_rhohv : float. Dataset keyword
minimum valid RhoHV. Default 0.92
filter_rain : Bool. Dataset keyword
If True the hydrometeor classification is used to filter out gates
that are not rain. Default True
max_phidp : float. Dataset keyword
maximum valid PhiDP [deg]. Default 20.
ml_thickness : float. Dataset keyword
Melting layer thickness [m]. Default 700.
rcell : float. Dataset keyword
length of continuous precipitation to consider the precipitation
cell a valid phidp segment [m]. Default 15000.
frequency : float. Dataset keyword
the radar frequency [Hz]. If None that of the key
frequency in attribute instrument_parameters of the radar
object will be used. If the key or the attribute are not present
the selfconsistency will not be computed
check_wet_radome : Bool. Dataset keyword
if True the average reflectivity of the closest gates to the radar
is going to be check to find out whether there is rain over the
radome. If there is rain no bias will be computed. Default True.
wet_radome_refl : Float. Dataset keyword
Average reflectivity [dBZ] of the gates close to the radar to
consider the radome as wet. Default 25.
wet_radome_rng_min, wet_radome_rng_max : Float. Dataset keyword
Min and max range [m] of the disk around the radar used to compute
the average reflectivity to determine whether the radome is wet.
Default 2000 and 4000.
wet_radome_ngates_min : int
Minimum number of valid gates to consider that the radome is wet.
Default 180
keep_points : Bool
If True the ZDR, ZH and KDP of the gates used in the self-
consistency algorithm are going to be stored for further analysis.
Default False
bias_per_gate : Bool
If True the bias per gate will be computed
radar_list : list of Radar objects
Optional. list of radar objects
Returns
-------
new_dataset : dict
dictionary containing the output
ind_rad : int
radar index
"""
if procstatus == 0:
return None, None
keep_points = dscfg.get('keep_points', False)
bias_type = dscfg.get('bias_type', 'cumulative')
provide_confidence = dscfg.get('provide_confidence', False)
nsamples_confidence = dscfg.get('nsamples_confidence', 1000)
if procstatus == 2:
if dscfg['initialized'] == 0:
return None, None
dataset = None
if bias_type == 'cumulative':
kdp_obs = np.ma.array(
dscfg['global_data']['kdp_data_dict']['kdp_obs'])
kdp_sim = np.ma.array(
dscfg['global_data']['kdp_data_dict']['kdp_sim'])
reflectivity_bias = {
'value': 10.*np.ma.log10(
np.ma.sum(kdp_sim)/np.ma.sum(kdp_obs)),
'npoints': kdp_obs.size,
'timeinfo': dscfg['global_data']['kdp_data_dict']['timeinfo'],
'bias_type': 'cumulative'}
if provide_confidence:
samples = ratio_bootstrapping(
kdp_sim, kdp_obs, nsamples=nsamples_confidence)
reflectivity_bias.update(
{'samples': 10.*np.ma.log10(samples)})
dataset = {'reflectivity_bias': reflectivity_bias}
if keep_points:
if dataset is None:
dataset = {'selfconsistency_points':
dscfg['global_data']['selfconsistency_points']}
else:
dataset.update(
{'selfconsistency_points':
dscfg['global_data']['selfconsistency_points']})
return dataset, None
temp = None
iso0 = None
hydro = None
phidp = None
for datatypedescr in dscfg['datatype']:
radarnr, _, datatype, _, _ = get_datatype_fields(datatypedescr)
if datatype == 'dBZc':
refl = 'corrected_reflectivity'
if datatype == 'dBZ':
refl = 'reflectivity'
if datatype == 'ZDRc':
zdr = 'corrected_differential_reflectivity'
if datatype == 'ZDR':
zdr = 'differential_reflectivity'
if datatype == 'PhiDPc':
phidp = 'corrected_differential_phase'
if datatype == 'PhiDP':
phidp = 'differential_phase'
if datatype == 'KDPc':
kdp = 'corrected_specific_differential_phase'
if datatype == 'KDP':
kdp = 'specific_differential_phase'
if datatype == 'TEMP':
temp = 'temperature'
if datatype == 'H_ISO0':
iso0 = 'height_over_iso0'
if datatype == 'hydro':
hydro = 'radar_echo_classification'
if datatype == 'RhoHV':
rhohv = 'cross_correlation_ratio'
if datatype == 'RhoHVc':
rhohv = 'corrected_cross_correlation_ratio'
if datatype == 'uRhoHV':
rhohv = 'uncorrected_cross_correlation_ratio'
ind_rad = int(radarnr[5:8])-1
if radar_list[ind_rad] is None:
warn('No valid radar')
return None, None
radar = radar_list[ind_rad]
if ((refl not in radar.fields) or
(zdr not in radar.fields) or
(kdp not in radar.fields) or
(rhohv not in radar.fields)):
warn('Unable to estimate reflectivity bias using selfconsistency. ' +
'Missing data')
return None, None
# determine which freezing level reference
if temp is not None:
if temp in radar.fields:
temp_ref = 'temperature'
else:
warn('COSMO temperature field not available. ' +
'Using fixed freezing level height to determine liquid phase')
temp_ref = 'fixed_fzl'
elif iso0 is not None:
if iso0 in radar.fields:
temp_ref = 'height_over_iso0'
else:
warn('Height over iso0 field not available. ' +
'Using fixed freezing level height to determine liquid phase')
temp_ref = 'fixed_fzl'
else:
warn('Field to obtain the freezing level was not specified. ' +
'Using fixed freezing level height')
temp_ref = 'fixed_fzl'
# determine freezing level height if necessary
fzl = None
if temp_ref == 'fixed_fzl':
if 'fzl' in dscfg:
fzl = dscfg['fzl']
else:
fzl = 2000.
warn('Freezing level height not defined. Using default ' +
str(fzl)+' m')
# get self-consistency parametrization or curves
parametrization = dscfg.get('parametrization', 'None')
if dscfg['initialized'] == 0:
# get frequency band
freq = dscfg.get('frequency', None)
if freq is None:
if (radar.instrument_parameters is not None and
'frequency' in radar.instrument_parameters):
freq = radar.instrument_parameters['frequency']['data'][0]
else:
warn('Unable to estimate Zh bias using ' +
'self-consistency. Unknown radar frequency')
return None, None
freq_band = pyart.retrieve.get_freq_band(freq)
if parametrization == 'None':
# find unique elevations
el_vec = np.unique(
(10.*np.round(radar.elevation['data'], decimals=1)).astype(int))
zdr_kdpzh_list = list()
el_list = list()
for el in el_vec:
fname = (
dscfg['configpath'] + 'selfconsistency/' +
'selfconsistency_zdr_zhkdp_'+freq_band+'band_temp10_elev' +
'{:03d}'.format(el)+'_mu05.txt')
zdr_kdpzh_table = read_selfconsistency(fname)
if zdr_kdpzh_table is not None:
zdr_kdpzh_list.append(zdr_kdpzh_table)
el_list.append((el/10.).astype(int))
if not el_list:
warn('Unable to retrieve Zh bias using self-consistency. ' +
'No selfconsistency files for the radar elevations.')
return None, None
zdr_kdpzh_dict = {'zdr_kdpzh': zdr_kdpzh_list,
'elev': el_list,
'freq_band': freq_band}
else:
zdr_kdpzh_dict = {'zdr_kdpzh': None,
'elev': None,
'freq_band': freq_band}
dscfg['global_data'] = {'zdr_kdpzh_dict': zdr_kdpzh_dict}
dscfg['global_data'].update({'kdp_data_dict': {
'kdp_obs': [],
'kdp_sim': [],
'timeinfo': dscfg['timeinfo']
}})
if keep_points:
dscfg['global_data'].update({'selfconsistency_points': {
'zdr': [],
'kdp': [],
'zh': [],
'timeinfo': dscfg['timeinfo'],
'parametrization': parametrization,
'zdr_kdpzh_dict': zdr_kdpzh_dict
}})
dscfg['initialized'] = 1
if dscfg['initialized'] == 1:
# get user defined values
rsmooth = dscfg.get('rsmooth', 2000.)
min_rhohv = dscfg.get('min_rhohv', 0.92)
min_zdr = dscfg.get('min_zdr', 0.2)
filter_rain = dscfg.get('filter_rain', True)
max_phidp = dscfg.get('max_phidp', 20.)
ml_thickness = dscfg.get('ml_thickness', 700.)
rcell = dscfg.get('rcell', 15000.)
check_wet_radome = dscfg.get('check_wet_radome', True)
wet_radome_refl = dscfg.get('wet_radome_refl', 25.)
wet_radome_rng_min = dscfg.get('wet_radome_rng_min', 2000.)
wet_radome_rng_max = dscfg.get('wet_radome_rng_max', 4000.)
wet_radome_ngates_min = dscfg.get('wet_radome_ngates_min', 180)
bias_per_gate = dscfg.get('bias_per_gate', False)
r_res = radar.range['data'][1]-radar.range['data'][0]
smooth_wind_len = int(rsmooth/r_res)
min_rcons = int(rcell/r_res)
wet_radome_len_min = int(wet_radome_rng_min/r_res)
wet_radome_len_max = int(wet_radome_rng_max/r_res)
kdp_data_dict, refl_bias, selfcons_dict = pyart.correct.selfconsistency_bias2(
radar, dscfg['global_data']['zdr_kdpzh_dict'],
min_rhohv=min_rhohv, min_zdr=min_zdr, filter_rain=filter_rain,
max_phidp=max_phidp, smooth_wind_len=smooth_wind_len, doc=15,
fzl=fzl, thickness=ml_thickness, min_rcons=min_rcons,
parametrization=parametrization, refl_field=refl,
phidp_field=phidp, zdr_field=zdr, temp_field=temp,
iso0_field=iso0, hydro_field=hydro, rhohv_field=rhohv,
kdp_field=kdp, temp_ref=temp_ref,
check_wet_radome=check_wet_radome,
wet_radome_refl=wet_radome_refl,
wet_radome_len_min=wet_radome_len_min,
wet_radome_len_max=wet_radome_len_max, keep_points=keep_points,
bias_per_gate=bias_per_gate)
if keep_points:
if selfcons_dict is not None:
dscfg['global_data']['selfconsistency_points']['zdr'].extend(
selfcons_dict['zdr'])
dscfg['global_data']['selfconsistency_points']['zh'].extend(
selfcons_dict['zh'])
dscfg['global_data']['selfconsistency_points']['kdp'].extend(
selfcons_dict['kdp'])
if kdp_data_dict is not None:
dscfg['global_data']['kdp_data_dict']['kdp_sim'].extend(
kdp_data_dict['kdp_sim'])
dscfg['global_data']['kdp_data_dict']['kdp_obs'].extend(
kdp_data_dict['kdp_obs'])
# prepare for exit
dataset = None
if bias_type == 'instant':
reflectivity_bias = {
'value': np.ma.masked,
'npoints': 0,
'timeinfo': dscfg['timeinfo'],
'bias_type': 'instant'}
if kdp_data_dict is not None:
kdp_obs = np.ma.array(kdp_data_dict['kdp_obs'])
kdp_sim = np.ma.array(kdp_data_dict['kdp_sim'])
reflectivity_bias['value'] = 10.*np.ma.log10(
np.ma.sum(kdp_sim)/np.ma.sum(kdp_obs))
reflectivity_bias['npoints'] = kdp_obs.size
if provide_confidence:
samples = ratio_bootstrapping(
kdp_sim, kdp_obs, iter=nsamples_confidence)
reflectivity_bias.update(
{'samples': 10.*np.ma.log10(samples)})
dataset = {'reflectivity_bias': reflectivity_bias}
if bias_per_gate:
if refl_bias is not None:
if dataset is None:
dataset = {'radar_out': deepcopy(radar)}
else:
dataset.update({'radar_out': deepcopy(radar)})
dataset['radar_out'].fields = dict()
dataset['radar_out'].add_field(
'reflectivity_bias', refl_bias)
return dataset, ind_rad
def process_estimate_phidp0(procstatus, dscfg, radar_list=None):
"""
estimates the system differential phase offset at each ray
Parameters
----------
procstatus : int
Processing status: 0 initializing, 1 processing volume,
2 post-processing
dscfg : dictionary of dictionaries
data set configuration. Accepted Configuration Keywords::
datatype : list of string. Dataset keyword
The input data types
rmin : float. Dataset keyword
The minimum range where to look for valid data [m]
rmax : float. Dataset keyword
The maximum range where to look for valid data [m]
rcell : float. Dataset keyword
The length of a continuous cell to consider it valid precip [m]
Zmin : float. Dataset keyword
The minimum reflectivity [dBZ]
Zmax : float. Dataset keyword
The maximum reflectivity [dBZ]
radar_list : list of Radar objects
Optional. list of radar objects
Returns
-------
new_dataset : dict
dictionary containing the output
ind_rad : int
radar index
"""
if procstatus != 1:
return None, None
for datatypedescr in dscfg['datatype']:
radarnr, _, datatype, _, _ = get_datatype_fields(datatypedescr)
if datatype == 'dBZ':
refl_field = 'reflectivity'
if datatype == 'dBZc':
refl_field = 'corrected_reflectivity'
if datatype == 'PhiDP':
psidp_field = 'differential_phase'
if datatype == 'PhiDPc':
psidp_field = 'corrected_differential_phase'
if datatype == 'uPhiDP':
psidp_field = 'uncorrected_differential_phase'
ind_rad = int(radarnr[5:8])-1
if radar_list[ind_rad] is None:
warn('No valid radar')
return None, None
radar = radar_list[ind_rad]
if (refl_field not in radar.fields) or (psidp_field not in radar.fields):
warn('Unable to estimate PhiDP system offset. Missing data')
return None, None
ind_rmin = np.where(radar.range['data'] > dscfg['rmin'])[0][0]
ind_rmax = np.where(radar.range['data'] < dscfg['rmax'])[0][-1]
r_res = radar.range['data'][1]-radar.range['data'][0]
min_rcons = int(dscfg['rcell']/r_res)
phidp0, first_gates = pyart.correct.det_sys_phase_ray(
radar, ind_rmin=ind_rmin, ind_rmax=ind_rmax, min_rcons=min_rcons,
zmin=dscfg['Zmin'], zmax=dscfg['Zmax'], phidp_field=psidp_field,
refl_field=refl_field)
# prepare for exit
new_dataset = {'radar_out': deepcopy(radar)}
new_dataset['radar_out'].fields = dict()
new_dataset['radar_out'].add_field('system_differential_phase', phidp0)
new_dataset['radar_out'].add_field(
'first_gate_differential_phase', first_gates)
return new_dataset, ind_rad
def process_rhohv_rain(procstatus, dscfg, radar_list=None):
"""
Keeps only suitable data to evaluate the 80 percentile of RhoHV in rain
Parameters
----------
procstatus : int
Processing status: 0 initializing, 1 processing volume,
2 post-processing
dscfg : dictionary of dictionaries
data set configuration. Accepted Configuration Keywords::
datatype : list of string. Dataset keyword
The input data types
rmin : float. Dataset keyword
minimum range where to look for rain [m]. Default 1000.
rmax : float. Dataset keyword
maximum range where to look for rain [m]. Default 50000.
Zmin : float. Dataset keyword
minimum reflectivity to consider the bin as precipitation [dBZ].
Default 20.
Zmax : float. Dataset keyword
maximum reflectivity to consider the bin as precipitation [dBZ]
Default 40.
ml_thickness : float. Dataset keyword
assumed thickness of the melting layer. Default 700.
fzl : float. Dataset keyword
The default freezing level height. It will be used if no
temperature field name is specified or the temperature field is
not in the radar object. Default 2000.
radar_list : list of Radar objects
Optional. list of radar objects
Returns
-------
new_dataset : dict
dictionary containing the output
ind_rad : int
radar index
"""
if procstatus != 1:
return None, None
temp_field = None
iso0_field = None
for datatypedescr in dscfg['datatype']:
radarnr, _, datatype, _, _ = get_datatype_fields(datatypedescr)
if datatype == 'RhoHV':
rhohv_field = 'cross_correlation_ratio'
if datatype == 'RhoHVc':
rhohv_field = 'corrected_cross_correlation_ratio'
if datatype == 'uRhoHV':
rhohv_field = 'uncorrected_cross_correlation_ratio'
if datatype == 'dBZc':
refl_field = 'corrected_reflectivity'
if datatype == 'dBZ':
refl_field = 'reflectivity'
if datatype == 'TEMP':
temp_field = 'temperature'
if datatype == 'H_ISO0':
iso0_field = 'height_over_iso0'
ind_rad = int(radarnr[5:8])-1
if radar_list[ind_rad] is None:
warn('No valid radar')
return None, None
radar = radar_list[ind_rad]
if ((refl_field not in radar.fields) or
(rhohv_field not in radar.fields)):
warn('Unable to estimate RhoHV in rain. Missing data')
return None, None
# determine which freezing level reference
temp_ref = 'temperature'
if temp_field is None and iso0_field is None:
warn('Field to obtain the freezing level was not specified. ' +
'Using fixed freezing level height')
temp_ref = 'fixed_fzl'
elif temp_field is not None:
if temp_field not in radar.fields:
warn('COSMO temperature field not available. ' +
'Using fixed freezing level height')
temp_ref = 'fixed_fzl'
elif iso0_field is not None:
if iso0_field not in radar.fields:
warn('Height over iso0 field not available. ' +
'Using fixed freezing level height')
temp_ref = 'fixed_fzl'
else:
temp_ref = 'height_over_iso0'
# determine freezing level height if necessary
fzl = None
if temp_ref == 'fixed_fzl':
if 'fzl' in dscfg:
fzl = dscfg['fzl']
else:
fzl = 2000.
warn('Freezing level height not defined. Using default ' +
str(fzl)+' m')
# default values
rmin = 1000.
rmax = 50000.
zmin = 20.
zmax = 40.
thickness = 700.
# user defined values
if 'rmin' in dscfg:
rmin = dscfg['rmin']
if 'rmax' in dscfg:
rmax = dscfg['rmax']
if 'Zmin' in dscfg:
zmin = dscfg['Zmin']
if 'Zmax' in dscfg:
zmax = dscfg['Zmax']
if 'ml_thickness' in dscfg:
thickness = dscfg['ml_thickness']
ind_rmin = np.where(radar.range['data'] > rmin)[0][0]
ind_rmax = np.where(radar.range['data'] < rmax)[0][-1]
rhohv_rain = pyart.correct.est_rhohv_rain(
radar, ind_rmin=ind_rmin, ind_rmax=ind_rmax, zmin=zmin,
zmax=zmax, thickness=thickness, doc=15, fzl=fzl,
rhohv_field=rhohv_field, temp_field=temp_field, iso0_field=iso0_field,
refl_field=refl_field, temp_ref=temp_ref)
# prepare for exit
new_dataset = {'radar_out': deepcopy(radar)}
new_dataset['radar_out'].fields = dict()
new_dataset['radar_out'].add_field(
'cross_correlation_ratio_in_rain', rhohv_rain)
return new_dataset, ind_rad
def process_zdr_precip(procstatus, dscfg, radar_list=None):
"""
Keeps only suitable data to evaluate the differential reflectivity in
moderate rain or precipitation (for vertical scans)
Parameters
----------
procstatus : int
Processing status: 0 initializing, 1 processing volume,
2 post-processing
dscfg : dictionary of dictionaries
data set configuration. Accepted Configuration Keywords::
datatype : list of string. Dataset keyword
The input data types
ml_filter : boolean. Dataset keyword
indicates if a filter on data in and above the melting layer is
applied. Default True.
rmin : float. Dataset keyword
minimum range where to look for rain [m]. Default 1000.
rmax : float. Dataset keyword
maximum range where to look for rain [m]. Default 50000.
Zmin : float. Dataset keyword
minimum reflectivity to consider the bin as precipitation [dBZ].
Default 20.
Zmax : float. Dataset keyword
maximum reflectivity to consider the bin as precipitation [dBZ]
Default 22.
RhoHVmin : float. Dataset keyword
minimum RhoHV to consider the bin as precipitation
Default 0.97
PhiDPmax : float. Dataset keyword
maximum PhiDP to consider the bin as precipitation [deg]
Default 10.
elmax : float. Dataset keyword
maximum elevation angle where to look for precipitation [deg]
Default None.
ml_thickness : float. Dataset keyword
assumed thickness of the melting layer. Default 700.
fzl : float. Dataset keyword
The default freezing level height. It will be used if no
temperature field name is specified or the temperature field is
not in the radar object. Default 2000.
radar_list : list of Radar objects
Optional. list of radar objects
Returns
-------
new_dataset : dict
dictionary containing the output
ind_rad : int
radar index
"""
if procstatus != 1:
return None, None
temp_field = None
iso0_field = None
for datatypedescr in dscfg['datatype']:
radarnr, _, datatype, _, _ = get_datatype_fields(datatypedescr)
if datatype == 'ZDR':
zdr_field = 'differential_reflectivity'
if datatype == 'ZDRc':
zdr_field = 'corrected_differential_reflectivity'
if datatype == 'PhiDP':
phidp_field = 'differential_phase'
if datatype == 'PhiDPc':
phidp_field = 'corrected_differential_phase'
if datatype == 'RhoHV':
rhohv_field = 'cross_correlation_ratio'
if datatype == 'RhoHVc':
rhohv_field = 'corrected_cross_correlation_ratio'
if datatype == 'uRhoHV':
rhohv_field = 'uncorrected_cross_correlation_ratio'
if datatype == 'dBZc':
refl_field = 'corrected_reflectivity'
if datatype == 'dBZ':
refl_field = 'reflectivity'
if datatype == 'TEMP':
temp_field = 'temperature'
if datatype == 'H_ISO0':
iso0_field = 'height_over_iso0'
ind_rad = int(radarnr[5:8])-1
if radar_list[ind_rad] is None:
warn('No valid radar')
return None, None
radar = radar_list[ind_rad]
if ((refl_field not in radar.fields) or
(rhohv_field not in radar.fields) or
(zdr_field not in radar.fields) or
(phidp_field not in radar.fields)):
warn('Unable to estimate ZDR in rain. Missing data')
return None, None
# if data in and above the melting layer has to be filtered determine the
# field to use
fzl = None
ml_filter = True
if 'ml_filter' in dscfg:
ml_filter = dscfg['ml_filter']
if ml_filter:
# determine which freezing level reference
temp_ref = 'temperature'
if temp_field is None and iso0_field is None:
warn('Field to obtain the freezing level was not specified. ' +
'Using fixed freezing level height')
temp_ref = 'fixed_fzl'
elif temp_field is not None:
if temp_field not in radar.fields:
warn('COSMO temperature field not available. ' +
'Using fixed freezing level height')
temp_ref = 'fixed_fzl'
elif iso0_field is not None:
if iso0_field not in radar.fields:
warn('Height over iso0 field not available. ' +
'Using fixed freezing level height')
temp_ref = 'fixed_fzl'
else:
temp_ref = 'height_over_iso0'
# determine freezing level height if necessary
if temp_ref == 'fixed_fzl':
if 'fzl' in dscfg:
fzl = dscfg['fzl']
else:
fzl = 2000.
warn('Freezing level height not defined. Using default ' +
str(fzl)+' m')
else:
temp_ref = None
# default values
rmin = 1000.
rmax = 50000.
zmin = 20.
zmax = 22.
rhohvmin = 0.97
phidpmax = 10.
elmax = None
thickness = 700.
# user defined values
if 'rmin' in dscfg:
rmin = dscfg['rmin']
if 'rmax' in dscfg:
rmax = dscfg['rmax']
if 'Zmin' in dscfg:
zmin = dscfg['Zmin']
if 'Zmax' in dscfg:
zmax = dscfg['Zmax']
if 'RhoHVmin' in dscfg:
rhohvmin = dscfg['RhoHVmin']
if 'PhiDPmax' in dscfg:
phidpmax = dscfg['PhiDPmax']
if 'elmax' in dscfg:
elmax = dscfg['elmax']
if 'ml_thickness' in dscfg:
thickness = dscfg['ml_thickness']
ind_rmin = np.where(radar.range['data'] > rmin)[0][0]
ind_rmax = np.where(radar.range['data'] < rmax)[0][-1]
zdr_precip = pyart.correct.est_zdr_precip(
radar, ind_rmin=ind_rmin, ind_rmax=ind_rmax, zmin=zmin,
zmax=zmax, rhohvmin=rhohvmin, phidpmax=phidpmax, elmax=elmax,
thickness=thickness, doc=15, fzl=fzl, zdr_field=zdr_field,
rhohv_field=rhohv_field, phidp_field=phidp_field,
temp_field=temp_field, iso0_field=iso0_field, refl_field=refl_field,
temp_ref=temp_ref)
# prepare for exit
new_dataset = {'radar_out': deepcopy(radar)}
new_dataset['radar_out'].fields = dict()
new_dataset['radar_out'].add_field(
'differential_reflectivity_in_precipitation', zdr_precip)
return new_dataset, ind_rad
def process_zdr_snow(procstatus, dscfg, radar_list=None):
"""
Keeps only suitable data to evaluate the differential reflectivity in
snow
Parameters
----------
procstatus : int
Processing status: 0 initializing, 1 processing volume,
2 post-processing
dscfg : dictionary of dictionaries
data set configuration. Accepted Configuration Keywords::
datatype : list of string. Dataset keyword
The input data types
rmin : float. Dataset keyword
minimum range where to look for rain [m]. Default 1000.
rmax : float. Dataset keyword
maximum range where to look for rain [m]. Default 50000.
Zmin : float. Dataset keyword
minimum reflectivity to consider the bin as snow [dBZ].
Default 0.
Zmax : float. Dataset keyword
maximum reflectivity to consider the bin as snow [dBZ]
Default 30.
SNRmin : float. Dataset keyword
minimum SNR to consider the bin as snow [dB].
Default 10.
SNRmax : float. Dataset keyword
maximum SNR to consider the bin as snow [dB]
Default 50.
RhoHVmin : float. Dataset keyword
minimum RhoHV to consider the bin as snow
Default 0.97
PhiDPmax : float. Dataset keyword
maximum PhiDP to consider the bin as snow [deg]
Default 10.
elmax : float. Dataset keyword
maximum elevation angle where to look for snow [deg]
Default None.
KDPmax : float. Dataset keyword
maximum KDP to consider the bin as snow [deg]
Default None
TEMPmin : float. Dataset keyword
minimum temperature to consider the bin as snow [deg C].
Default None
TEMPmax : float. Dataset keyword
maximum temperature to consider the bin as snow [deg C]
Default None
hydroclass : list of ints. Dataset keyword
list of hydrometeor classes to keep for the analysis
Default [2] (dry snow)
radar_list : list of Radar objects
Optional. list of radar objects
Returns
-------
new_dataset : dict
dictionary containing the output
ind_rad : int
radar index
"""
if procstatus != 1:
return None, None
temp_field = None
kdp_field = None
for datatypedescr in dscfg['datatype']:
radarnr, _, datatype, _, _ = get_datatype_fields(datatypedescr)
if datatype == 'ZDR':
zdr_field = 'differential_reflectivity'
if datatype == 'ZDRc':
zdr_field = 'corrected_differential_reflectivity'
if datatype == 'PhiDP':
phidp_field = 'differential_phase'
if datatype == 'PhiDPc':
phidp_field = 'corrected_differential_phase'
if datatype == 'RhoHV':
rhohv_field = 'cross_correlation_ratio'
if datatype == 'RhoHVc':
rhohv_field = 'corrected_cross_correlation_ratio'
if datatype == 'uRhoHV':
rhohv_field = 'uncorrected_cross_correlation_ratio'
if datatype == 'dBZc':
refl_field = 'corrected_reflectivity'
if datatype == 'dBZ':
refl_field = 'reflectivity'
if datatype == 'TEMP':
temp_field = 'temperature'
if datatype == 'PhiDP':
kdp_field = 'specific_differential_phase'
if datatype == 'PhiDPc':
kdp_field = 'corrected_specific_differential_phase'
if datatype == 'SNRh':
snr_field = 'signal_to_noise_ratio_hh'
if datatype == 'SNRv':
snr_field = 'signal_to_noise_ratio_vv'
if datatype == 'hydro':
hydro_field = 'radar_echo_classification'
ind_rad = int(radarnr[5:8])-1
if radar_list[ind_rad] is None:
warn('No valid radar')
return None, None
radar = radar_list[ind_rad]
if ((refl_field not in radar.fields) or
(rhohv_field not in radar.fields) or
(zdr_field not in radar.fields) or
(phidp_field not in radar.fields) or
(hydro_field not in radar.fields)):
warn('Unable to estimate ZDR in snow. Missing data')
return None, None
# User defined values
rmin = dscfg.get('rmin', 1000.)
rmax = dscfg.get('rmax', 50000.)
zmin = dscfg.get('Zmin', 0.)
zmax = dscfg.get('Zmax', 30.)
snrmin = dscfg.get('SNRmin', 10.)
snrmax = dscfg.get('SNRmax', 50.)
rhohvmin = dscfg.get('RhoHVmin', 0.97)
phidpmax = dscfg.get('PhiDPmax', 10.)
elmax = dscfg.get('elmax', None)
kdpmax = dscfg.get('KDPmax', None)
tempmin = dscfg.get('TEMPmin', None)
tempmax = dscfg.get('TEMPmax', None)
hydroclass = dscfg.get('hydroclass', [2])
ind_rmin = np.where(radar.range['data'] > rmin)[0][0]
ind_rmax = np.where(radar.range['data'] < rmax)[0][-1]
zdr_snow = pyart.correct.est_zdr_snow(
radar, ind_rmin=ind_rmin, ind_rmax=ind_rmax, zmin=zmin, zmax=zmax,
snrmin=snrmin, snrmax=snrmax, rhohvmin=rhohvmin,
kept_values=hydroclass, phidpmax=phidpmax, kdpmax=kdpmax,
tempmin=tempmin, tempmax=tempmax, elmax=elmax, zdr_field=zdr_field,
rhohv_field=rhohv_field, phidp_field=phidp_field,
temp_field=temp_field, snr_field=snr_field, hydro_field=hydro_field,
kdp_field=kdp_field, refl_field=refl_field)
# prepare for exit
new_dataset = {'radar_out': deepcopy(radar)}
new_dataset['radar_out'].fields = dict()
new_dataset['radar_out'].add_field(
'differential_reflectivity_in_snow', zdr_snow)
return new_dataset, ind_rad
def process_monitoring(procstatus, dscfg, radar_list=None):
"""
computes monitoring statistics
Parameters
----------
procstatus : int
Processing status: 0 initializing, 1 processing volume,
2 post-processing
dscfg : dictionary of dictionaries
data set configuration. Accepted Configuration Keywords::
datatype : list of string. Dataset keyword
The input data types
step : float. Dataset keyword
The width of the histogram bin. Default is None. In that case the
default step in function get_histogram_bins is used
max_rays : int. Dataset keyword
The maximum number of rays per sweep used when computing the
histogram. If set above 0 the number of rays per sweep will be
checked and if above max_rays the last rays of the sweep will be
removed
radar_list : list of Radar objects
Optional. list of radar objects
Returns
-------
new_dataset : Radar
radar object containing histogram data
ind_rad : int
radar index
"""
if procstatus == 0:
return None, None
if procstatus == 1:
for datatypedescr in dscfg['datatype']:
radarnr, _, datatype, _, _ = get_datatype_fields(datatypedescr)
field_name = get_fieldname_pyart(datatype)
break
ind_rad = int(radarnr[5:8])-1
if radar_list[ind_rad] is None:
warn('No valid radar')
return None, None
radar = radar_list[ind_rad]
if field_name not in radar.fields:
warn(field_name+' not available.')
return None, None
step = dscfg.get('step', None)
max_rays = dscfg.get('max_rays', 0)
bin_edges = get_histogram_bins(field_name, step=step)
nbins = len(bin_edges)-1
step = bin_edges[1]-bin_edges[0]
bin_centers = bin_edges[:-1]+step/2.
radar_aux = deepcopy(radar)
if max_rays > 0:
# remove excess of rays
ind_above_max = np.where(
radar.rays_per_sweep['data'] > max_rays)[0]
if ind_above_max.size > 0:
radar_aux.rays_per_sweep['data'][ind_above_max] = max_rays
for ind in ind_above_max:
excess_rays = radar.rays_per_sweep['data'][ind]-max_rays
radar_aux.sweep_end_ray_index['data'][ind:] -= (
excess_rays)
if ind < radar.nsweeps-1:
radar_aux.sweep_start_ray_index['data'][ind+1:] = (
radar_aux.sweep_end_ray_index['data'][ind:-1]+1)
radar_aux.nrays = np.sum(radar_aux.rays_per_sweep['data'])
radar_aux.fields[field_name]['data'] = np.ma.masked_all(
(radar_aux.nrays, radar_aux.ngates),
dtype=radar.fields[field_name]['data'].dtype)
radar_aux.azimuth['data'] = np.empty(
radar_aux.nrays, dtype=radar.azimuth['data'].dtype)
radar_aux.elevation['data'] = np.empty(
radar_aux.nrays, dtype=radar.elevation['data'].dtype)
radar_aux.time['data'] = np.empty(
radar_aux.nrays, dtype=radar.time['data'].dtype)
for sweep in range(radar.nsweeps):
ind_start_old = radar.sweep_start_ray_index['data'][sweep]
nrays_sweep = radar_aux.rays_per_sweep['data'][sweep]
ind_start_new = radar_aux.sweep_start_ray_index['data'][
sweep]
ind_end_new = radar_aux.sweep_end_ray_index['data'][sweep]
radar_aux.fields[field_name]['data'][
ind_start_new:ind_end_new+1, :] = (
radar.fields[field_name]['data'][
ind_start_old:ind_start_old+nrays_sweep, :])
radar_aux.azimuth['data'][ind_start_new:ind_end_new+1] = (
radar.azimuth['data'][
ind_start_old:ind_start_old+nrays_sweep])
radar_aux.elevation['data'][
ind_start_new:ind_end_new+1] = (
radar.elevation['data'][
ind_start_old:ind_start_old+nrays_sweep])
radar_aux.time['data'][ind_start_new:ind_end_new+1] = (
radar.time['data'][
ind_start_old:ind_start_old+nrays_sweep])
radar_hist = deepcopy(radar_aux)
radar_hist.fields = dict()
radar_hist.range['data'] = bin_centers
radar_hist.ngates = nbins
field_dict = pyart.config.get_metadata(field_name)
field_dict['data'] = np.ma.zeros((radar_aux.nrays, nbins), dtype=int)
field = deepcopy(radar_aux.fields[field_name]['data'])
# put gates with values off limits to limit
mask = np.ma.getmaskarray(field)
ind = np.where(np.logical_and(mask == False, field < bin_centers[0]))
field[ind] = bin_centers[0]
ind = np.where(np.logical_and(mask == False, field > bin_centers[-1]))
field[ind] = bin_centers[-1]
for ray in range(radar_aux.nrays):
field_dict['data'][ray, :], bin_edges = np.histogram(
field[ray, :].compressed(), bins=bin_edges)
radar_hist.add_field(field_name, field_dict)
start_time = pyart.graph.common.generate_radar_time_begin(radar_hist)
# keep histogram in Memory or add to existing histogram
if dscfg['initialized'] == 0:
dscfg['global_data'] = {'hist_obj': radar_hist,
'timeinfo': start_time}
dscfg['initialized'] = 1
else:
field_interp = interpol_field(
dscfg['global_data']['hist_obj'], radar_hist, field_name,
fill_value=0)
dscfg['global_data']['hist_obj'].fields[field_name]['data'] += (
field_interp['data'].filled(fill_value=0)).astype('int64')
# dscfg['global_data']['timeinfo'] = dscfg['timeinfo']
dataset = dict()
dataset.update({'hist_obj': radar_hist})
dataset.update({'hist_type': 'instant'})
dataset.update({'timeinfo': start_time})
return dataset, ind_rad
if procstatus == 2:
if dscfg['initialized'] == 0:
return None, None
for datatypedescr in dscfg['datatype']:
radarnr, _, datatype, _, _ = get_datatype_fields(datatypedescr)
field_name = get_fieldname_pyart(datatype)
break
ind_rad = int(radarnr[5:8])-1
dataset = dict()
dataset.update({'hist_obj': dscfg['global_data']['hist_obj']})
dataset.update({'hist_type': 'cumulative'})
dataset.update({'timeinfo': dscfg['global_data']['timeinfo']})
return dataset, ind_rad
```
#### File: pyrad/prod/process_traj_products.py
```python
from warnings import warn
from ..io.io_aux import get_save_dir, make_filename
from ..io.timeseries import TimeSeries
from ..graph.plots import plot_pos
def generate_traj_product(traj, prdcfg):
"""
Generates trajectory products. Accepted product types:
'TRAJ_MAP': Plots the trajectory on a lat-lon map with the altitude
color coded
'TRAJ_PLOT': Plots time series of the trajectory respect to the radar
elevation, azimuth or range
User defined parameters:
'datatype': str
The type of parameter: 'EL', 'AZ', or 'RANGE'
'TRAJ_TEXT': Writes the trajectory information in a csv file
Parameters
----------
traj : Trajectory object
prdcfg : dictionary of dictionaries
product configuration dictionary of dictionaries
Returns
-------
None
"""
dssavedir = prdcfg['dsname']
if 'dssavename' in prdcfg:
dssavedir = prdcfg['dssavename']
if prdcfg['type'] == 'TRAJ_PLOT':
timeinfo = traj.time_vector[0]
savedir = get_save_dir(
prdcfg['basepath'], prdcfg['procname'], dssavedir,
prdcfg['prdname'], timeinfo=timeinfo)
ts = TimeSeries("", traj.time_vector,
timeformat="%Y-%m-%d %H:%M:%S.%f")
if prdcfg['datatype'] == 'EL':
fname = make_filename(
'ts', prdcfg['dstype'], 'TRAJ', prdcfg['imgformat'],
prdcfginfo="EL", timeinfo=timeinfo, timeformat='%Y%m%d%H%M%S',
runinfo=prdcfg['runinfo'])
ts.add_dataseries(
"Elevation", "Elevation", "deg",
traj.radar_list[0].elevation_vec)
ts.plot(savedir + fname[0])
elif prdcfg['datatype'] == 'AZ':
fname = make_filename(
'ts', prdcfg['dstype'], 'TRAJ', prdcfg['imgformat'],
prdcfginfo="AZ", timeinfo=timeinfo, timeformat='%Y%m%d%H%M%S',
runinfo=prdcfg['runinfo'])
ts.add_dataseries(
"Azimuth", "Azimuth", "deg", traj.radar_list[0].azimuth_vec)
ts.plot(savedir + fname[0])
elif prdcfg['datatype'] == 'RANGE':
fname = make_filename(
'ts', prdcfg['dstype'], 'TRAJ', prdcfg['imgformat'],
prdcfginfo="RANGE", timeinfo=timeinfo,
timeformat='%Y%m%d%H%M%S', runinfo=prdcfg['runinfo'])
ts.add_dataseries(
"Range", "Range", "m", traj.radar_list[0].range_vec)
ts.plot(savedir + fname[0])
else:
raise Exception("ERROR: Unknown datatype '%s' (dataset: '%s')" %
(prdcfg['datatype'], prdcfg['dsname']))
return None
if prdcfg['type'] == 'TRAJ_TEXT':
timeinfo = traj.time_vector[0]
savedir = get_save_dir(prdcfg['basepath'], prdcfg['procname'],
dssavedir, prdcfg['prdname'],
timeinfo=timeinfo)
fname = make_filename('ts', prdcfg['dstype'], 'TRAJ', ['csv'],
prdcfginfo=None, timeinfo=timeinfo,
timeformat='%Y%m%d%H%M%S',
runinfo=prdcfg['runinfo'])
description = ["Description:",
"Time series of a plane trajectory in radar "
"coordinates."]
ts = TimeSeries(description, traj.time_vector,
timeformat="%Y-%m-%d %H:%M:%S.%f")
ts.add_dataseries("Elevation", "Elevation", "deg",
traj.radar_list[0].elevation_vec)
ts.add_dataseries("Azimuth", "Azimuth", "deg",
traj.radar_list[0].azimuth_vec)
ts.add_dataseries("Range", "Range", "m", traj.radar_list[0].range_vec)
ts.add_dataseries("Absolute Speed", "Absolute Speed", "m/s",
traj.radar_list[0].v_abs)
ts.add_dataseries("Radial Speed", "Radial Speed", "m/s",
traj.radar_list[0].v_r)
ts.add_dataseries("Elevation Speed", "Elevation Speed", "deg/s",
traj.radar_list[0].v_el)
ts.add_dataseries("Azimuth Speed", "Azimuth Speed", "deg/s",
traj.radar_list[0].v_az)
ts.write(savedir + fname[0])
return None
if prdcfg['type'] == 'TRAJ_MAP': #Trajectory on a map
timeinfo = traj.time_vector[0]
savedir = get_save_dir(
prdcfg['basepath'], prdcfg['procname'], dssavedir,
prdcfg['prdname'], timeinfo=timeinfo)
fname = make_filename(
'ts', prdcfg['dstype'], 'TRAJ', prdcfg['imgformat'],
prdcfginfo="MAP", timeinfo=timeinfo, timeformat='%Y%m%d%H%M%S',
runinfo=prdcfg['runinfo'])
title = "Trajectory Starting at %s" % \
traj.time_vector[0].strftime("%Y-%m-%d")
fname_list = fname
for i, fname in enumerate(fname_list):
fname_list[i] = savedir+fname
# Get traj
lat = traj.wgs84_lat_deg
lon = traj.wgs84_lon_deg
alt = traj.wgs84_alt_m
plot_pos(
lat, lon, alt, fname_list, cb_label='Altitude [m]', titl=title,
save_fig=True)
return None
warn(' Unsupported product type: ' + prdcfg['type'])
return None
```
#### File: pyrad_proc/scripts/main_process_windmill_filt_hist_all.py
```python
import datetime
import argparse
import atexit
import os
import glob
from warnings import warn
import numpy as np
from pyrad.io import read_histogram, get_fieldname_pyart
from pyrad.io import write_histogram
from pyrad.graph import plot_histogram2, get_colobar_label
from pyart.config import get_metadata
print(__doc__)
def main():
"""
"""
# parse the arguments
parser = argparse.ArgumentParser(
description='Entry to Pyrad processing framework')
# keyword arguments
parser.add_argument(
'--database', type=str,
default='/store/msrad/radar/pyrad_products/',
help='base path to the radar data')
parser.add_argument(
'--datadirs', type=str,
default=(
'mals_sha_windmills_point_psr_filtered_WM1_20200304-20200311,'
'mals_sha_windmills_point_psr_filtered_WM1_20200312-20200315,'
'mals_sha_windmills_point_psr_filtered_WM1_20200316-20200320,'
'mals_sha_windmills_point_psr_filtered_WM1_20200321-20200325'),
help='directories containing data')
parser.add_argument(
'--datatypes', type=str,
default='dBuZ,dBuZv,rcs_h,rcs_v,uPhiDPu,RhoHVu,ZDRu,Vu,Wu',
help='Data types. Coma separated')
parser.add_argument(
'--orientations', type=str,
default='0,10,20,30,40,50,60,70,80,90,100,110,120,130,140,150,160,170,180,190,200,210,220,230,240,250,260,270,280,290,300,310,320,330,340,350',
help='Orientation respect to radar')
parser.add_argument(
'--span', type=float, default=10.,
help='Span')
parser.add_argument(
'--vel_limit', type=float, default=0.,
help='Velocity limit')
args = parser.parse_args()
print("====== PYRAD windmill data processing started: %s" %
datetime.datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S"))
atexit.register(_print_end_msg,
"====== PYRAD windmill data processing finished: ")
datadirs = args.datadirs.split(',')
datatypes = args.datatypes.split(',')
orientations = np.asarray(args.orientations.split(','), dtype=float)
speeds = ['speed_GT'+str(args.vel_limit), 'speed_LE'+str(args.vel_limit)]
scan_type = 'ppi'
for ori in orientations:
for speed in speeds:
for datatype in datatypes:
first_read = False
for datadir in datadirs:
# Read data time series files
flist = glob.glob(
args.database+datadir+'/'+datatype+'_TS/TS/' +
datatype+'_span'+str(args.span)+'_ori'+str(ori)+'_' +
speed+'_hist.csv')
if not flist:
continue
hist_aux , bin_edges_aux = read_histogram(flist[0])
if not first_read:
hist = hist_aux
bin_edges = bin_edges_aux
first_read = True
continue
hist += hist_aux
if not first_read:
warn('No files for orientation '+str(ori)+' and '+speed)
continue
# Histogram plots
field_name = get_fieldname_pyart(datatype)
field_dict = get_metadata(field_name)
fname = (
args.database+datatype+'_span'+str(args.span) +
'_ori'+str(ori)+'_'+speed+'_hist.png')
titl = (
datatype+' span '+str(args.span)+' ori '+str(ori)+' ' +
speed)
bin_centers = bin_edges[:-1]+((bin_edges[1]-bin_edges[0])/2.)
fname = plot_histogram2(
bin_centers, hist, [fname],
labelx=get_colobar_label(field_dict, field_name), titl=titl)
print('Plotted '+' '.join(fname))
fname = (
args.database+datatype+'_span'+str(args.span) +
'_ori'+str(ori)+'_'+speed+'_hist.csv')
fname = write_histogram(bin_edges, hist, fname)
print('Written '+fname)
def _print_end_msg(text):
"""
prints end message
Parameters
----------
text : str
the text to be printed
Returns
-------
Nothing
"""
print(text + datetime.datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S"))
# ---------------------------------------------------------
# Start main:
# ---------------------------------------------------------
if __name__ == "__main__":
main()
```
|
{
"source": "jfigura/digester",
"score": 2
}
|
#### File: digester/tests/test_job.py
```python
from unittest.mock import patch
from digester.job import run
@patch('digester.job.get_recently_played')
@patch('digester.job.send_email')
def test_run(send_email, get_recently_played):
run()
get_recently_played.assert_called()
send_email.assert_called()
```
#### File: digester/tests/test_report.py
```python
from datetime import datetime
from digester.report import make_report
from digester.spotify import SongPlayedRecord
def test_make_report():
date = datetime.fromisoformat("2022-02-09T21:48:00-05:30")
songs = [
SongPlayedRecord('Nirvana', 'Polly', '1', datetime.now()),
SongPlayedRecord('Oasis', 'Whatever', '2', datetime.now()),
]
report = make_report(date, songs)
assert report == '\n'.join([
'You played 2 songs on Wednesday, February 09, 2022.\n',
'Nirvana: Polly',
'Oasis: Whatever',
])
```
#### File: digester/tests/test_spotify.py
```python
import json
import os
from datetime import datetime, timedelta, timezone
from unittest.mock import MagicMock
import pytest
from digester.spotify import (get_recently_played, RecentlyPlayedMode,
SpotifyClient, create_oath_cache)
@pytest.fixture
def spotify_client():
mock = MagicMock(SpotifyClient)
mock.current_user_recently_played.return_value = {
'items': [
_item('<NAME>', 'Time', '1', datetime.now()),
_item('Oasis', 'Supersonic', '2', datetime.now()),
_item('ABC', 'Overtime', '3',
datetime.now() - timedelta(days=1)),
_item('DEF', 'Whatever', '4',
datetime.now() - timedelta(days=2)),
]
}
return mock
@pytest.fixture
def oauth_env():
value = 'dummy-refresh'
os.environ['SPOTIFY_REFRESH_TOKEN'] = value
yield value
del os.environ['SPOTIFY_REFRESH_TOKEN']
def test_get_recently_played_all(spotify_client):
results = get_recently_played(RecentlyPlayedMode.ALL, spotify_client)
_check_ids(results, ['1', '2', '3', '4'])
assert results[0].artist == '<NAME>'
assert results[0].name == 'Time'
assert results[0].track_id == '1'
assert datetime.now(tz=timezone.utc) - results[0].played_at < timedelta(
seconds=1)
def test_get_recently_played_today(spotify_client):
results = get_recently_played(RecentlyPlayedMode.TODAY, spotify_client)
_check_ids(results, ['1', '2'])
def test_get_recently_played_yesterday(spotify_client):
results = get_recently_played(RecentlyPlayedMode.YESTERDAY, spotify_client)
_check_ids(results, ['3'])
def test_create_oath_cache(oauth_env, tmp_path):
cache_path = tmp_path / '.cache'
create_oath_cache(cache_path)
cache = json.loads(cache_path.read_text())
assert cache == {
'refresh_token': oauth_env,
'scope': 'user-read-recently-played',
'expires_at': 0
}
def test_create_oath_cache_missing_env(tmp_path):
cache_path = tmp_path / '.cache'
with pytest.raises(ValueError):
create_oath_cache(cache_path)
def test_create_oath_cache_existing(tmp_path):
cache_path = tmp_path / '.cache'
cache_path.write_text('test')
create_oath_cache(cache_path)
assert cache_path.read_text() == 'test'
def _item(artist, track_name, track_id, played_at):
return {
'track': {
'artists': [{
'name': artist,
}],
'id': track_id,
'name': track_name,
},
'played_at': played_at.strftime('%Y-%m-%dT%H:%M:%S.%fZ'),
}
def _check_ids(results, expected_ids):
assert [result.track_id for result in results] == expected_ids
```
|
{
"source": "jfilak/autorizator",
"score": 2
}
|
#### File: autorizator/autorizator/casbin_adapters.py
```python
from typing import List, NamedTuple, Set
import casbin.persist # type: ignore
from autorizator.errors import AutorizatorError
from autorizator.data_types import Role, RoleList, ActionList
class RoleActionPolicy(NamedTuple):
"""Policy definition"""
role: Role
# included roles
includes: RoleList
actions: ActionList
RoleActionPolicyList = List[RoleActionPolicy]
class RolePolicyDefinitionError(AutorizatorError):
"""Invalid policy definition"""
pass
class RoleActionPolicyAdapter(casbin.persist.Adapter):
"""The adapter"""
def __init__(self, role_policies: RoleActionPolicyList):
know_roles: Set[Role] = set()
for policy in role_policies:
if policy.role in know_roles:
raise RolePolicyDefinitionError(f'The role "{policy.role}" defined twice')
if policy.includes:
include = next((include for include in policy.includes if include not in know_roles), None)
if include is not None:
raise RolePolicyDefinitionError(
f'The role "{include}" included in the role "{policy.role}" does not exist')
know_roles.add(policy.role)
self._policies = list(role_policies)
def load_policy(self, model):
for policy in self._policies:
if policy.includes is not None:
for include in policy.includes:
model.model['g']['g'].policy.append([policy.role, include])
for action in policy.actions:
model.model['p']['p'].policy.append([policy.role, action])
# pylint: disable=unused-argument
def add_policy(self, sec, ptype, rule):
pass
# pylint: disable=unused-argument
def remove_policy(self, sec, ptype, rule):
pass
```
#### File: autorizator/autorizator/session_manager.py
```python
from abc import ABC, abstractmethod
from autorizator.data_types import SessionID, Login
from autorizator.errors import AutorizatorError
class SessionManagerError(AutorizatorError):
"""Session Manager errors"""
pass
class SessionManagerSessionError(SessionManagerError):
"""Cannot re-create session"""
def __init__(self, session_id, error_message):
super(SessionManagerSessionError, self).__init__()
self.session_id = session_id
self.error_message = error_message
def __str__(self):
return f'Session "{self.session_id}": {self.error_message}'
class SessionAlreadyExists(SessionManagerError):
"""Cannot re-create session"""
def __init__(self, session_id):
super(SessionAlreadyExists, self).__init__(session_id, 'already exists')
class SessionNotFound(SessionManagerError):
"""When the requested session is not known to the manager"""
def __init__(self, session_id):
super(SessionNotFound, self).__init__(session_id, 'not found')
class SessionIsClosed(SessionManagerError):
"""When trying perform an action on a closed session"""
def __init__(self, session_id):
super(SessionIsClosed, self).__init__(session_id, 'already closed')
class AbstractSessionManager(ABC):
"""Session managers deals with sessions"""
@abstractmethod
def open(self, session_id: SessionID, login: Login):
"""Creates a new session.
Raises:
autorizator.session_manager.SessionAlreadyExists: If the give session_id already exists.
"""
pass
@abstractmethod
def close(self, session_id: SessionID):
"""Marks the session closed.
Raises:
autorizator.session_manager.SessionNotFound: If the corresponding session is not know.
autorizator.session_manager.SessionIsClosed: If the corresponding session is already closed.
"""
pass
@abstractmethod
def read_session_login(self, session_id: SessionID) -> Login:
"""Returns the login associated with the give session
Raises:
autorizator.session_manager.SessionNotFound: If the corresponding session is not know.
autorizator.session_manager.SessionIsClosed: If the corresponding session is already closed.
"""
pass
```
#### File: tests/unit/test_autorizator.py
```python
import pytest
from unittest.mock import Mock
from autorizator import Autorizator, RoleActionPolicy
from autorizator.user_storage import UserStorageError
from autorizator.ldap_user_storage import LDAPUserStorage, LDAPUserAuth
import autorizator.mongodb_session_manager
@pytest.fixture
def autorizator(user_service, session_manager, policies):
return Autorizator(policies=policies, user_storage=user_service, session_manager=session_manager)
def assert_viewer_actions(session_id, autorizator, user_config):
for action in user_config.VIEWER_ACTIONS:
assert autorizator.check_user_authorization(session_id, action)
for action in user_config.SUPER_ACTIONS:
assert not autorizator.check_user_authorization(session_id, action)
assert user_config.VIEWER_ACTIONS == autorizator.enumerate_user_actions(session_id)
def test_check_all_roles_of_viewer(user_config, autorizator):
session_id = autorizator.open_session(user_config.VIEWER_LOGIN, user_config.VIEWER_PASSWORD)
assert_viewer_actions(session_id, autorizator, user_config)
autorizator.close_session(session_id)
def test_open_session_with_pin_for_viewer(user_config, autorizator):
session_id = autorizator.open_session_with_pin(user_config.VIEWER_PIN)
assert_viewer_actions(session_id, autorizator, user_config)
autorizator.close_session(session_id)
def test_open_session_with_pin_not_found(user_config, autorizator):
session_id = autorizator.open_session_with_pin('20380119031408')
assert session_id is None
def test_open_session_user_not_found(user_config, autorizator):
session_id = autorizator.open_session('pedro', 'chewing gum')
assert session_id is None
def test_open_session_role_error(user_config, session_manager, policies):
fake_us = Mock()
fake_us.authenticate = Mock()
fake_us.authenticate.return_value = True
fake_us.get_user_role = Mock()
fake_us.get_user_role.side_effect = UserStorageError()
autorizator = Autorizator(policies=policies, user_storage=fake_us, session_manager=session_manager)
session_id = autorizator.open_session('epic', 'success')
assert session_id is None
def test_open_open_close_check_viewer(user_config, autorizator):
session_id = autorizator.open_session(user_config.VIEWER_LOGIN, user_config.VIEWER_PASSWORD)
session_id_2 = autorizator.open_session(user_config.VIEWER_LOGIN, user_config.VIEWER_PASSWORD)
autorizator.close_session(session_id_2)
assert_viewer_actions(session_id, autorizator, user_config)
autorizator.close_session(session_id)
```
|
{
"source": "jfilak/hmtldirwalk",
"score": 3
}
|
#### File: jfilak/hmtldirwalk/htmldirwalk.py
```python
import os
import logging
import requests
from collections import deque
from html.parser import HTMLParser
class LinkHtmlParser(HTMLParser):
def __init__(self):
super(LinkHtmlParser, self).__init__()
self._listing_content = False
self._href = None
self.links = list()
def handle_starttag(self, tag, attrs):
self._href = None
logging.debug('Start {}'.format(tag))
if not self._listing_content:
self._listing_content = tag == 'hr'
if not self._listing_content:
return
if not tag in ('a', 'A'):
return
self._href = next((value for attr, value in attrs if attr == 'href'), None)
def handle_endtag(self, tag):
if tag in ('a', 'A'):
self._href = None
def handle_data(self, data):
if not self._href:
return
logging.debug('Handling {}'.format(data))
if data != 'Parent Directory':
logging.debug('Adding {}'.format(self._href))
self.links.append(self._href)
class HtmlDirWalker(object):
def __init__(self):
self._logger = logging.getLogger()
self._http = requests
self._parser = LinkHtmlParser
def set_logger(self, logger):
self._logger = logger
return self
def set_http_worker(self, worker):
self._http = worker
return self
def set_html_parser(self, parser):
self._parser = parser
return self
def _download(self, url):
self._logger.debug('Downloading {}'.format(url))
r = self._http.get(url)
if r.status_code >= 400:
raise RuntimeError('Cannot get {}: status {}'.format(url, r.status_code))
return r.text
def _parse(self, text):
logging.debug('Parsing ...')
html_parser = self._parser()
html_parser.feed(text)
return html_parser.links
def _directory_contents(self, url):
page = self._download(url)
links = self._parse(page)
dirs = list()
files = list()
for ref in links:
if ref[-1] == '/':
dirs.append(ref)
else:
files.append(ref)
return (dirs, files)
def walk(self, url):
dirs = deque(('', ))
while dirs:
root = dirs.pop()
content = self._directory_contents(os.path.join(url, root))
dirs.extendleft((os.path.join(root, d) for d in content[0]))
yield (root, content[0], content[1])
```
|
{
"source": "jfilipedias/algorithms",
"score": 4
}
|
#### File: algorithms/sorts/insertion_sort.py
```python
def insertion_sort():
array = [44, 2, 5, 0, 15, 22]
print('Original array:', array, sep=' ')
for index in range(1, len(array)):
key = array[index]
current_index = index - 1
while (current_index >= 0 and array[current_index] > key):
array[current_index + 1] = array[current_index]
current_index -= 1
array[current_index + 1] = key
print('Sorted array:', array, sep=' ')
if __name__ == "__main__":
insertion_sort()
```
|
{
"source": "jfilipedias/bulk-renamer",
"score": 3
}
|
#### File: bulk-renamer/bulk_renamer/functions.py
```python
from __future__ import annotations
from pathlib import Path
from os import path
from random import randint
from rich import box
from rich.console import Console
from rich.table import Table
from typer import confirm, echo, prompt
console = Console()
def get_cwd_file_paths() -> list[Path]:
"""Return a list of all file paths from the current working directory."""
cwd = Path.cwd()
glob = cwd.glob("*")
files = filter(lambda x: x.is_file(), glob)
return files
def get_value_input(
new_value_message: str, old_value_message: str = ""
) -> tuple(str, str):
"""Get the args values from the user input."""
old_value = prompt(old_value_message) if old_value_message else None
new_value = prompt(new_value_message) if new_value_message else None
return (new_value, old_value)
def rename_files(old_filenames: list[str], new_filenames: list[str]) -> None:
"""Rename a list of files from current working directory."""
if len(old_filenames) != len(new_filenames):
return
with console.status("Renaming files"):
for i in range(len(old_filenames)):
extension = path.splitext(old_filenames[i])[1]
temp_name = f"tempfile_{randint(1000, 9999)}{extension}"
file_path = Path(old_filenames[i])
file_path = file_path.rename(Path(temp_name))
file_path.rename(Path(new_filenames[i]))
console.print("All files have been renamed.")
def show_changes(old_filenames: list[str], new_filenames: list[str]) -> None:
"""Show a table with the filenames diffs"""
table = Table()
table.box = box.SIMPLE_HEAD
table.add_column("Current Filenames", header_style="bold cyan", style="cyan")
table.add_column("")
table.add_column("New Filenames", header_style="bold green", style="green")
arrows = [name.replace(name, "->") for name in old_filenames]
table.add_row("\n".join(old_filenames), "\n".join(arrows), "\n".join(new_filenames))
console.print(table)
def confirm_changes(old_filenames: list[str], new_filenames: list[str]) -> None:
"""Show the changes and ask the user to confirm the changes."""
show_changes(old_filenames, new_filenames)
if confirm("Are you sure you want to rename these files?", default=True):
rename_files(old_filenames, new_filenames)
else:
echo("Don't worry, no changes have been made.")
```
|
{
"source": "jfilipedias/dot-loops",
"score": 2
}
|
#### File: jfilipedias/dot-loops/__init__.py
```python
bl_info = {
"name" : "Dot Loops",
"author" : "<NAME>",
"description" : "Alternate selection of parallel loops starting from the active edges.",
"blender" : (2, 80, 0),
"version" : (1, 0, 0),
"category" : "Mesh"
}
import bpy
from bpy.types import Operator
from bpy.props import IntProperty
class DL_OT_SelectDotLoops(Operator):
"""Select parallel and interleaved loops starting from the active edge."""
bl_idname = "mesh.dotloop"
bl_label = "Dot Loops"
bl_options = {'REGISTER', 'UNDO'}
step = IntProperty(
name='Step',
description='Step between selected edges.',
default=1,
min=1
)
selection = IntProperty(
name='Selection',
description='Number of selected loops between intervals.',
default=1,
min=1
)
offset = IntProperty(
name='Offset',
description='Offset from the start edge.',
default=0
)
@classmethod
def poll(cls, context):
return context.active_object is not None
def execute(self, context):
bpy.ops.mesh.loop_multi_select(ring=True)
bpy.ops.mesh.select_nth(nth=self.step, skip=self.selection, offset=self.offset)
bpy.ops.mesh.loop_multi_select(ring=False)
return {'FINISHED'}
def dotloops_button(self, contex):
layout = self.layout
layout.operator(DL_OT_SelectDotLoops.bl_idname)
def register():
bpy.utils.register_class(DL_OT_SelectDotLoops)
bpy.types.VIEW3D_MT_select_edit_mesh.append(dotloops_button)
def unregister():
bpy.utils.unregister_class(DL_OT_SelectDotLoops)
bpy.types.VIEW3D_MT_select_edit_mesh.remove(dotloops_button)
if __name__ == "__main__":
register()
```
|
{
"source": "jfilipedias/InquirerPy",
"score": 3
}
|
#### File: InquirerPy/examples/example_checkbox.py
```python
from InquirerPy import inquirer, prompt
from InquirerPy.separator import Separator
def question1_choice(_):
return [
{"name": "Sydney", "value": "ap-southeast-2", "enabled": True},
{"name": "Singapore", "value": "ap-southeast-1", "enabled": False},
Separator(),
"us-east-1",
"us-west-1",
]
def question2_choice(_):
return [
{"enabled": False, "name": "Apple", "value": "Apple"},
{"enabled": False, "name": "Cherry", "value": "Cherry"},
{"enabled": False, "name": "Orange", "value": "Orange"},
{"enabled": False, "name": "Peach", "value": "Peach"},
{"enabled": False, "name": "Melon", "value": "Melon"},
{"enabled": False, "name": "Strawberry", "value": "Strawberry"},
{"enabled": False, "name": "Grapes", "value": "Grapes"},
]
def classic():
questions = [
{
"type": "checkbox",
"message": "Select regions:",
"choices": question1_choice,
"transformer": lambda result: "%s region%s selected"
% (len(result), "s" if len(result) > 1 else ""),
},
{
"type": "checkbox",
"message": "Pick your favourites:",
"choices": question2_choice,
"validate": lambda result: len(result) >= 1,
"invalid_message": "should be at least 1 selection",
"instruction": "(select at least 1)",
},
]
result = prompt(questions, vi_mode=True)
def alternate():
regions = inquirer.checkbox(
message="Select regions:",
choices=question1_choice,
transformer=lambda result: "%s region%s selected"
% (len(result), "s" if len(result) > 1 else ""),
).execute()
fruits = inquirer.checkbox(
message="Pick your favourites:",
choices=question2_choice,
validate=lambda result: len(result) >= 1,
invalid_message="should be at least 1 selection",
instruction="(select at least 1)",
).execute()
alternate()
# classic()
```
#### File: InquirerPy/examples/example_filepath.py
```python
from pathlib import Path
from InquirerPy import prompt, inquirer
from InquirerPy.validator import PathValidator
def classic():
questions = [
{
"type": "filepath",
"message": "Enter file to upload:",
"name": "location",
"default": str(Path.cwd()),
"validate": PathValidator(is_file=True, message="Input is not a file"),
"only_files": True,
},
{
"type": "filepath",
"message": "Enter path to download:",
"validate": PathValidator(is_dir=True, message="Input is not a directory"),
"name": "destination",
"only_directories": True,
},
]
result = prompt(questions)
def alternate():
src_path = inquirer.filepath(
message="Enter file to upload:",
default=str(Path.cwd()),
validate=PathValidator(is_file=True, message="Input is not a file"),
only_files=True,
).execute()
dest_path = inquirer.filepath(
message="Enter path to download:",
validate=PathValidator(is_dir=True, message="Input is not a directory"),
only_directories=True,
).execute()
alternate()
# classic()
```
#### File: InquirerPy/examples/example_input.py
```python
from InquirerPy import inquirer, prompt
from InquirerPy.validator import NumberValidator
def classic():
"""Classic syntax example."""
questions = [
{"type": "input", "message": "Enter your name:"},
{
"type": "input",
"message": "Which company would you like to apply:",
"completer": {
"Google": None,
"Facebook": None,
"Amazon": None,
"Netflix": None,
"Apple": None,
"Microsoft": None,
},
"multicolumn_complete": True,
},
{
"type": "input",
"message": "What's your salary expectation(k):",
"transformer": lambda result: "%sk" % result,
"filter": lambda result: int(result) * 1000,
"validate": NumberValidator(),
},
]
result = prompt(questions)
def alternate():
"""Alternate syntax example."""
name = inquirer.text(message="Enter your name:").execute()
company = inquirer.text(
message="Which company would you like to apply:",
completer={
"Google": None,
"Facebook": None,
"Amazon": None,
"Netflix": None,
"Apple": None,
"Microsoft": None,
},
multicolumn_complete=True,
).execute()
salary = inquirer.text(
message="What's your salary expectation(k):",
transformer=lambda result: "%sk" % result,
filter=lambda result: int(result) * 1000,
validate=NumberValidator(),
).execute()
# alternate()
classic()
```
#### File: InquirerPy/InquirerPy/base.py
```python
import os
import re
from abc import ABC, abstractmethod
from typing import Any, Callable, Dict, List, NamedTuple, Tuple, Union
from prompt_toolkit.application import Application
from prompt_toolkit.enums import EditingMode
from prompt_toolkit.filters import IsDone
from prompt_toolkit.filters.base import Condition, FilterOrBool
from prompt_toolkit.key_binding.key_bindings import KeyBindings, KeyHandlerCallable
from prompt_toolkit.keys import Keys
from prompt_toolkit.layout.containers import ConditionalContainer, HSplit, Window
from prompt_toolkit.layout.controls import FormattedTextControl
from prompt_toolkit.layout.dimension import Dimension, LayoutDimension
from prompt_toolkit.layout.layout import Layout
from prompt_toolkit.styles.style import Style
from prompt_toolkit.validation import ValidationError, Validator
from InquirerPy.enum import INQUIRERPY_KEYBOARD_INTERRUPT
from InquirerPy.exceptions import InvalidArgument, RequiredKeyNotFound
from InquirerPy.separator import Separator
from InquirerPy.utils import InquirerPyStyle, SessionResult, calculate_height, get_style
__all__ = [
"BaseSimplePrompt",
"BaseComplexPrompt",
"BaseListPrompt",
"InquirerPyUIControl",
]
class BaseSimplePrompt(ABC):
"""The base class for simple prompts.
Inherit this class to create a simple prompt that leverage `prompt_toolkit`
PromptSession.
Note: the PromptSession is not initialised in the constructor, require
a call of `self.session = PromptSession(...)`.
:param message: the question message to display
:type message: Union[str, Callable[[SessionResult], str]]
:param style: the style dictionary to apply
:type style: InquirerPyStyle
:param vi_mode: use vi kb for the prompt
:type vi_mode: str
:param qmark: the custom qmark to display infront of the question
:type qmark: str
:param validate: a callable or Validator instance to validate user input
:type validate: Union[Callable[[Any], bool], Validator]
:param invalid_message: message to display when input is invalid
:type invalid_message: str
:param transformer: a callable to transform the result, this is visual effect only
:type transformer: Callable[[Any], Any]
:param filter: a callable to filter the result, updating the user input before returning the result
:type filter: Callable[[Any], Any]
:param session_result: the current session result, this is used by callable message and choices
to generate dynamic values. If using alternate syntax, skip this value.
:type session_result: SessionResult
"""
def __init__(
self,
message: Union[str, Callable[[SessionResult], str]],
style: InquirerPyStyle = None,
vi_mode: bool = False,
qmark: str = "?",
validate: Union[Callable[[Any], bool], Validator] = None,
invalid_message: str = "Invalid input",
transformer: Callable[[Any], Any] = None,
filter: Callable[[Any], Any] = None,
session_result: SessionResult = None,
default: Any = "",
) -> None:
"""Construct the base class for simple prompts."""
self._result = session_result or {}
self._message = message if not isinstance(message, Callable) else message(self._result) # type: ignore
self._default = (
default if not isinstance(default, Callable) else default(self._result)
)
self._style = Style.from_dict(style.dict if style else get_style().dict)
self._qmark = qmark
self._status = {"answered": False, "result": None}
self._kb = KeyBindings()
self._lexer = "class:input"
self._transformer = transformer
self._filter = filter
self._editing_mode = (
EditingMode.VI
if vi_mode or bool(os.getenv("INQUIRERPY_VI_MODE", False))
else EditingMode.EMACS
)
if isinstance(validate, Validator):
self._validator = validate
else:
self._validator = Validator.from_callable(
validate if validate else lambda _: True,
invalid_message,
move_cursor_to_end=True,
)
@self._kb.add("c-c")
def _(event) -> None:
self.status["answered"] = True
self.status["result"] = INQUIRERPY_KEYBOARD_INTERRUPT
event.app.exit(result=INQUIRERPY_KEYBOARD_INTERRUPT)
@property
def status(self) -> Dict[str, Any]:
"""Get status value of the prompt."""
return self._status
@status.setter
def status(self, value) -> None:
"""Set status value of the prompt."""
self._status = value
def register_kb(
self, *keys: Union[Keys, str], filter: FilterOrBool = True
) -> Callable[[KeyHandlerCallable], KeyHandlerCallable]:
"""Decorate keybinding registration function.
Format all alt related keybindings.
Due to `prompt_toolkit` doesn't process alt related keybindings,
it requires alt-ANY to "escape" + "ANY".
Check a list of keys argument if they are alt related, change
them to escape.
:param keys: keys to bind into the keybindings
:type keys: Union[Keys, str]
:param filter: condition of whether this keybinding should be active
:type filter: FilterOrBool
:return: a decorator that should be applied to the function thats intended
to be active when the keys being pressed
:rtype: Callable[[KeyHandlerCallable], KeyHandlerCallable]
"""
alt_pattern = re.compile(r"^alt-(.*)")
def decorator(func: KeyHandlerCallable) -> KeyHandlerCallable:
formatted_keys = []
for key in keys:
match = alt_pattern.match(key)
if match:
formatted_keys.append("escape")
formatted_keys.append(match.group(1))
else:
formatted_keys.append(key)
@self._kb.add(*formatted_keys, filter=filter)
def executable(event) -> None:
func(event)
return executable
return decorator
@abstractmethod
def _get_prompt_message(
self, pre_answer: Tuple[str, str], post_answer: Tuple[str, str]
) -> List[Tuple[str, str]]:
"""Return the formatted text to display in the prompt.
Leveraging the nature of Dict in python, we can dynamically update the prompt
message of the PromptSession.
This is useful to format/customize user input for better visual.
:param pre_answer: the information to display before answering the question
:type pre_answer: Tuple[str, str]
:param post_answer: the information to display after answering the question
:type post_answer: Tuple[str, str]
:return: formatted text thats ready to be consumed by PromptSession
:rtype: List[Tuple[str, str]]
"""
display_message = []
if self.status["result"] == INQUIRERPY_KEYBOARD_INTERRUPT:
display_message.append(
("class:skipped", "%s %s " % (self._qmark, self._message))
)
else:
display_message.append(("class:questionmark", self._qmark))
display_message.append(("class:question", " %s" % self._message))
if self.status["answered"]:
display_message.append(
post_answer
if not self._transformer
else (
"class:answer",
" %s" % self._transformer(self.status["result"]),
)
)
else:
display_message.append(pre_answer)
return display_message
@abstractmethod
def execute(self) -> Any:
"""Abstractmethod to enforce a execute function is implemented for eaiser management.
All prompt instance require a execute call to initialised the `PromptSession` or `Application`.
This is being called in the resolver.
"""
pass
class InquirerPyUIControl(FormattedTextControl):
"""A UIControl class intended to be consumed by `prompt_toolkit` window.
Dynamically adapt to user input and update formatted text.
:param choices: list of choices to display as the content
:type choices: Union[Callable[[SessionResult], List[Any]], List[Any]],
:param default: default value, will impact the cursor position
:type default: Any
"""
def __init__(
self,
choices: Union[Callable[[SessionResult], List[Any]], List[Any]],
default: Any = None,
session_result: SessionResult = None,
) -> None:
"""Initialise choices and construct a FormattedTextControl object."""
self._session_result = session_result or {}
self._selected_choice_index: int = 0
self._choice_func = None
self._loading = False
self._raw_choices = []
self._default = (
default
if not isinstance(default, Callable)
else default(self._session_result)
)
if isinstance(choices, Callable):
self._loading = True
self._choices = []
self._choice_func = choices
self._loading = True
else:
self._raw_choices = choices
self._choices = self._get_choices(choices, self._default) # type: ignore
self._safety_check()
self._format_choices()
super().__init__(self._get_formatted_choices)
def _retrieve_choices(self) -> None:
"""Retrieve the callable choices and format them.
Should be called in the `after_render` call in `Application`.
:param session_result: the current result of the prompt session,
if using alternate syntax, skip this value
:type session_result: SessionResult
"""
self._raw_choices = self._choice_func(self._session_result) # type: ignore
self.choices = self._get_choices(self._raw_choices, self._default)
self._loading = False
self._safety_check()
self._format_choices()
def _get_choices(self, choices: List[Any], default: Any) -> List[Dict[str, Any]]:
"""Process the raw user input choices and format it into dictionary.
:param choices: list of choices to display
:type choices: List[Union[str, Dict[str, Any]]]
:param default: default value, this affect selected_choice_index
:type default: Any
:return: formatted choices
:rtype: List[Dict[str, Any]]
"""
processed_choices: List[Dict[str, Any]] = []
try:
for index, choice in enumerate(choices, start=0):
if isinstance(choice, dict):
if choice["value"] == default:
self.selected_choice_index = index
processed_choices.append(
{
"name": str(choice["name"]),
"value": choice["value"],
"enabled": False,
}
)
elif isinstance(choice, Separator):
if self.selected_choice_index == index:
self.selected_choice_index = (
self.selected_choice_index + 1
) % len(choices)
processed_choices.append(
{"name": str(choice), "value": choice, "enabled": False}
)
else:
if choice == default:
self.selected_choice_index = index
processed_choices.append(
{"name": str(choice), "value": choice, "enabled": False}
)
except KeyError:
raise RequiredKeyNotFound(
"dictionary choice require a name key and a value key."
)
return processed_choices
@property
def selected_choice_index(self) -> int:
"""Get current highlighted index."""
return self._selected_choice_index
@selected_choice_index.setter
def selected_choice_index(self, value) -> None:
"""Set index to highlight."""
self._selected_choice_index = value
@property
def choices(self) -> List[Dict[str, Any]]:
"""Get all processed choices."""
return self._choices
@choices.setter
def choices(self, value) -> None:
"""Set processed choices."""
self._choices = value
def _safety_check(self) -> None:
"""Validate choices, check empty or all Separator."""
if not self.choices:
raise InvalidArgument("choices cannot be empty.")
should_proceed: bool = False
for choice in self.choices:
if not isinstance(choice["value"], Separator):
should_proceed = True
break
if not should_proceed:
raise InvalidArgument(
"choices should contain content other than separator."
)
def _get_formatted_choices(self) -> List[Tuple[str, str]]:
"""Get all choices in formatted text format.
:return: a list of formatted choices
:rtype: List[Tuple[str, str]]
"""
display_choices = []
for index, choice in enumerate(self.choices):
if index == self.selected_choice_index:
display_choices += self._get_hover_text(choice)
else:
display_choices += self._get_normal_text(choice)
display_choices.append(("", "\n"))
if display_choices:
display_choices.pop()
return display_choices
@abstractmethod
def _format_choices(self) -> None:
"""Perform post processing on the choices.
Customise the choices after `self._get_choices` call.
"""
pass
@abstractmethod
def _get_hover_text(self, choice) -> List[Tuple[str, str]]:
"""Generate the formatted text for hovered choice.
:return: list of formatted text
:rtype: List[Tuple[str, str]]
"""
pass
@abstractmethod
def _get_normal_text(self, choice) -> List[Tuple[str, str]]:
"""Generate the formatted text for non-hovered choices.
:return: list of formatted text
:rtype: List[Tuple[str, str]]]
"""
pass
@property
def choice_count(self) -> int:
"""Get the choice count.
:return: total count of choices
:rtype: int
"""
return len(self.choices)
@property
def selection(self) -> Dict[str, Any]:
"""Get current selection value.
:return: a dictionary of name and value for the current pointed choice
:rtype: Dict[str, Any]
"""
return self.choices[self.selected_choice_index]
class FakeDocument(NamedTuple):
"""A fake `prompt_toolkit` document class.
Work around to allow non buffer type content_control to use the same
`Validator` class.
"""
text: str
class BaseComplexPrompt(BaseSimplePrompt):
"""A base class to create a complex prompt using `prompt_toolkit` Application.
This class does not create `Layout` nor `Application`, it just contains helper
functions to create a more complex prompt than the `BaseSimplePrompt`.
Use `BaseListPrompt` to create a complex list prompt.
Reference parameters through `BaseListPrompt` or `FuzzyPrompt`.
"""
def __init__(
self,
message: Union[str, Callable[[SessionResult], str]],
style: InquirerPyStyle = None,
vi_mode: bool = False,
qmark: str = "?",
instruction: str = "",
transformer: Callable[[Any], Any] = None,
filter: Callable[[Any], Any] = None,
validate: Union[Callable[[Any], bool], Validator] = None,
invalid_message: str = "Invalid input",
multiselect: bool = False,
keybindings: Dict[str, List[Dict[str, Union[str, FilterOrBool]]]] = None,
session_result: SessionResult = None,
) -> None:
"""Initialise the Application with Layout and keybindings."""
if not keybindings:
keybindings = {}
super().__init__(
message=message,
style=style,
vi_mode=vi_mode,
qmark=qmark,
transformer=transformer,
filter=filter,
invalid_message=invalid_message,
validate=validate,
session_result=session_result,
)
self._content_control: InquirerPyUIControl
self._instruction = instruction
self._invalid_message = invalid_message
self._multiselect = multiselect
self._rendered = False
self._invalid = False
self._application: Application
@Condition
def is_multiselect() -> bool:
return self._multiselect
@Condition
def is_vim_edit() -> bool:
return self._editing_mode == EditingMode.VI
@Condition
def is_invalid() -> bool:
return self._invalid
@Condition
def is_loading() -> bool:
return self.content_control._loading
self._is_multiselect = is_multiselect
self._is_vim_edit = is_vim_edit
self._is_invalid = is_invalid
self._is_loading = is_loading
self._kb_maps = {
"down": [
{"key": "down"},
{"key": "c-n", "filter": ~self._is_vim_edit},
{"key": "j", "filter": self._is_vim_edit},
],
"up": [
{"key": "up"},
{"key": "c-p", "filter": ~self._is_vim_edit},
{"key": "k", "filter": self._is_vim_edit},
],
"toggle": [
{"key": "space"},
],
"toggle-down": [
{"key": Keys.Tab},
],
"toggle-up": [
{"key": Keys.BackTab},
],
"toggle-all": [
{"key": "alt-r"},
],
"toggle-all-true": [
{"key": "alt-a"},
],
"toggle-all-false": [],
**keybindings,
}
self._kb_func_lookup = {
"down": [{"func": self._handle_down}],
"up": [{"func": self._handle_up}],
"toggle": [{"func": self._toggle_choice}],
"toggle-down": [{"func": self._toggle_choice}, {"func": self._handle_down}],
"toggle-up": [{"func": self._toggle_choice}, {"func": self._handle_up}],
"toggle-all": [{"func": self._toggle_all}],
"toggle-all-true": [{"func": self._toggle_all, "args": [True]}],
"toggle-all-false": [{"func": self._toggle_all, "args": [False]}],
}
self._non_multiselect_action = {"down", "up"}
def keybinding_factory(keys, filter, action):
if not isinstance(keys, list):
keys = [keys]
if action not in self._non_multiselect_action:
filter = filter & self._multiselect
@self._register_kb(*keys, filter=filter)
def _(event):
for method in self._kb_func_lookup[action]:
method["func"](*method.get("args", []))
for key, item in self._kb_maps.items():
for kb in item:
keybinding_factory(kb["key"], kb.get("filter", True), key)
@self._register_kb("enter")
def _(event):
self._handle_enter(event)
def _register_kb(
self, *keys: Union[Keys, str], filter: FilterOrBool = True
) -> Callable[[KeyHandlerCallable], KeyHandlerCallable]:
"""Decorate keybinding registration function.
Ensure that invalid state is cleared on next
keybinding entered.
"""
def decorator(func: KeyHandlerCallable) -> KeyHandlerCallable:
@self.register_kb(*keys, filter=filter)
def executable(event):
if self._invalid:
self._invalid = False
func(event)
return executable
return decorator
def _after_render(self, _) -> None:
"""Render callable choices.
Forcing a check on `self._rendered` as this event is fired up on each
render, we only want this to fire up once.
"""
if not self._rendered:
self._rendered = True
if self.content_control._choice_func:
self.content_control._retrieve_choices()
def _get_prompt_message(self) -> List[Tuple[str, str]]:
"""Get the prompt message.
:return: list of formatted text
:rtype: List[Tuple[str, str]]
"""
pre_answer = ("class:instruction", " %s" % self.instruction)
post_answer = ("class:answer", " %s" % self.status["result"])
return super()._get_prompt_message(pre_answer, post_answer)
def execute(self, raise_keyboard_interrupt: bool = True) -> Any:
"""Execute the application and get the result.
:param raise_keyboard_interrupt: raise kbi exception when user hit 'c-c'
:type raise_keyboard_interrupt: bool
:return: user selected value
:rtype: Any
"""
result = self.application.run()
if result == INQUIRERPY_KEYBOARD_INTERRUPT:
if raise_keyboard_interrupt and not os.getenv(
"INQUIRERPY_NO_RAISE_KBI", False
):
raise KeyboardInterrupt
else:
result = None
if not self._filter:
return result
return self._filter(result)
@property
def instruction(self) -> str:
"""Instruction to display next to question.
:return: instruction text
:rtype: str
"""
return self._instruction
@property
def content_control(self) -> InquirerPyUIControl:
"""Get the content controller object.
Needs to be an instance of InquirerPyUIControl.
"""
if not self._content_control:
raise NotImplementedError
return self._content_control
@content_control.setter
def content_control(self, value: InquirerPyUIControl) -> None:
"""Setter of content_control."""
self._content_control = value
@property
def result_name(self) -> Any:
"""Get the result name of the application.
In multiselect scenario, return result as a list.
"""
if self._multiselect:
return [choice["name"] for choice in self.selected_choices]
else:
return self.content_control.selection["name"]
@property
def result_value(self) -> Any:
"""Get the result value of the application.
In multiselect scenario, return result as a list.
"""
if self._multiselect:
return [choice["value"] for choice in self.selected_choices]
else:
try:
return self.content_control.selection["value"]
except IndexError:
return ""
@property
def selected_choices(self) -> List[Any]:
"""Get all user selected choices.
:return: list of selected/enabled choices
:rtype: List[Any]
"""
def filter_choice(choice):
return not isinstance(choice, Separator) and choice["enabled"]
return list(filter(filter_choice, self.content_control.choices))
@property
def application(self) -> Application:
"""Get application.
Require `self._application` to be defined since this class
doesn't implement `Layout` and `Application`.
"""
if not self._application:
raise NotImplementedError
return self._application
@application.setter
def application(self, value: Application) -> None:
"""Setter for `self._application`."""
self._application = value
@abstractmethod
def _handle_enter(self, event) -> None:
"""Handle event when user input enter key."""
pass
@abstractmethod
def _handle_down(self) -> None:
"""Handle event when user attempting to move down."""
pass
@abstractmethod
def _handle_up(self) -> None:
"""Handle event when user attempting to move down."""
pass
@abstractmethod
def _toggle_choice(self) -> None:
"""Handle event when user attempting to toggle the state of the chocie."""
pass
@abstractmethod
def _toggle_all(self, value: bool) -> None:
"""Handle event when user attempting to alter the state of all choices."""
pass
class BaseListPrompt(BaseComplexPrompt):
"""A base class to create a complex prompt using `prompt_toolkit` Application.
Consists of 2 horizontally splitted Window with one being the question and the second
window responsible to dynamically generate the content.
Upon entering the answer, update the first window's formatted text.
:param message: question to display to the user
:type message: Union[str, Callable[[SessionResult], str]]
:param style: style to apply to the prompt
:type style: InquirerPyStyle
:param vi_mode: use vi kb for the prompt
:type vi_mode: bool
:param qmark: question mark to display
:type qmark: str
:param instruction: instruction to display after the question message
:type instruction: str
:param transformer: a callable to transform the result, this is visual effect only
:type transformer: Callable[[Any], Any]
:param filter: a callable to filter the result, updating the user input before returning the result
:type filter: Callable[[Any], Any]
:param height: preferred height of the choice window
:type height: Union[str, int]
:param max_height: max height choice window should reach
:type max_height: Union[str, int]
:param validate: a callable or Validator instance to validate user selection
:type validate: Union[Callable[[Any], bool], Validator]
:param invalid_message: message to display when input is invalid
:type invalid_message: str
:param multiselect: enable multiselect mode
:type multiselect: bool
:param keybindings: custom keybindings to apply
:type keybindings: Dict[str, List[Dict[str, Union[str, FilterOrBool]]]]
:param show_cursor: display cursor at the end of the prompt
:type show_cursor: bool
"""
def __init__(
self,
message: Union[str, Callable[[SessionResult], str]],
style: InquirerPyStyle = None,
vi_mode: bool = False,
qmark: str = "?",
instruction: str = "",
transformer: Callable[[Any], Any] = None,
filter: Callable[[Any], Any] = None,
height: Union[int, str] = None,
max_height: Union[int, str] = None,
validate: Union[Callable[[Any], bool], Validator] = None,
invalid_message: str = "Invalid input",
multiselect: bool = False,
keybindings: Dict[str, List[Dict[str, Union[str, FilterOrBool]]]] = None,
show_cursor: bool = True,
session_result: SessionResult = None,
) -> None:
"""Initialise the Application with Layout and keybindings."""
super().__init__(
message=message,
style=style,
vi_mode=vi_mode,
qmark=qmark,
transformer=transformer,
filter=filter,
invalid_message=invalid_message,
validate=validate,
multiselect=multiselect,
instruction=instruction,
keybindings=keybindings,
session_result=session_result,
)
self._dimmension_height, self._dimmension_max_height = calculate_height(
height, max_height
)
self.layout = HSplit(
[
Window(
height=LayoutDimension.exact(1),
content=FormattedTextControl(
self._get_prompt_message_with_cursor
if show_cursor
else self._get_prompt_message,
show_cursor=show_cursor,
),
),
ConditionalContainer(
Window(
content=self.content_control,
height=Dimension(
max=self._dimmension_max_height,
preferred=self._dimmension_height,
),
dont_extend_height=True,
),
filter=~IsDone() & ~self._is_loading,
),
ConditionalContainer(
Window(FormattedTextControl([("", "")])),
filter=~IsDone(), # force validation bar to stay bottom
),
ConditionalContainer(
Window(
FormattedTextControl(
[
(
"class:validation-toolbar",
self._invalid_message,
)
]
),
dont_extend_height=True,
),
filter=self._is_invalid & ~IsDone(),
),
]
)
self.application = Application(
layout=Layout(self.layout),
style=self._style,
key_bindings=self._kb,
after_render=self._after_render,
)
def _get_prompt_message_with_cursor(self) -> List[Tuple[str, str]]:
"""Obtain the prompt message to display.
Introduced a new method instead of using the `_get_prompt_message`
due to `expand` and `rawlist` make changes after calling `super()._get_prompt_message()`.
This ensures that cursor is always at the end of the window no matter
when the changes is made to the `_get_prompt_message`.
"""
message = self._get_prompt_message()
message.append(("[SetCursorPosition]", ""))
message.append(("", " ")) # [SetCursorPosition] require char behind it
return message
def _toggle_choice(self) -> None:
"""Toggle the `enabled` status of the choice."""
self.content_control.selection["enabled"] = not self.content_control.selection[
"enabled"
]
def _toggle_all(self, value: bool = None) -> None:
"""Toggle all choice `enabled` status.
:param value: sepcify a value to toggle
:type value: bool
"""
for choice in self.content_control.choices:
if isinstance(choice["value"], Separator):
continue
choice["enabled"] = value if value else not choice["enabled"]
def _handle_up(self) -> None:
"""Handle the event when user attempt to move up."""
while True:
self.content_control.selected_choice_index = (
self.content_control.selected_choice_index - 1
) % self.content_control.choice_count
if not isinstance(self.content_control.selection["value"], Separator):
break
def _handle_down(self) -> None:
"""Handle the event when user attempt to move down."""
while True:
self.content_control.selected_choice_index = (
self.content_control.selected_choice_index + 1
) % self.content_control.choice_count
if not isinstance(self.content_control.selection["value"], Separator):
break
def _handle_enter(self, event) -> None:
"""Handle the event when user hit Enter key.
* Set the state to answered for an update to the prompt display.
* Set the result to user selected choice's name for display purpose.
* Let the app exit with the user selected choice's value and return the actual value back to resolver.
In multiselect scenario, if nothing is selected, return the current highlighted choice.
"""
try:
fake_document = FakeDocument(self.result_value)
self._validator.validate(fake_document) # type: ignore
except ValidationError:
self._invalid = True
else:
self.status["answered"] = True
if self._multiselect and not self.selected_choices:
self.status["result"] = [self.content_control.selection["name"]]
event.app.exit(result=[self.content_control.selection["value"]])
else:
self.status["result"] = self.result_name
event.app.exit(result=self.result_value)
```
#### File: InquirerPy/prompts/checkbox.py
```python
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
from prompt_toolkit.validation import ValidationError, Validator
from InquirerPy.base import BaseListPrompt, FakeDocument, InquirerPyUIControl
from InquirerPy.enum import (
INQUIRERPY_EMPTY_HEX_SEQUENCE,
INQUIRERPY_FILL_HEX_SEQUENCE,
INQUIRERPY_POINTER_SEQUENCE,
)
from InquirerPy.separator import Separator
from InquirerPy.utils import InquirerPyStyle, SessionResult
class InquirerPyCheckboxControl(InquirerPyUIControl):
"""A UIControl class intended to be used by `prompt_toolkit` window.
Used to dynamically update the content and indicate the current user selection
:param choices: a list of choices to display
:type choices: Union[Callable[[SessionResult], List[Any]], List[Any]],
:param default: default value for selection
:type default: Any
:param pointer: the pointer to display, indicating current line, default is unicode ">"
:type pointer: str
:param enabled_symbol: the qmark to indicate selected choices
:type enabled_symbol: str
:param disabled_symbol: the qmark to indicate not selected choices
:type disabled_symbol: str
"""
def __init__(
self,
choices: Union[Callable[[SessionResult], List[Any]], List[Any]],
default: Any = None,
pointer: str = INQUIRERPY_POINTER_SEQUENCE,
enabled_symbol: str = INQUIRERPY_FILL_HEX_SEQUENCE,
disabled_symbol: str = INQUIRERPY_EMPTY_HEX_SEQUENCE,
session_result: Optional[SessionResult] = None,
) -> None:
"""Initialise required attributes and call base class."""
self._pointer = "%s " % pointer
self._enabled_symbol = enabled_symbol
self._disabled_symbol = disabled_symbol
super().__init__(
choices=choices, default=default, session_result=session_result
)
def _format_choices(self) -> None:
for raw_choice, choice in zip(self._raw_choices, self.choices): # type: ignore
if isinstance(raw_choice, dict):
choice["enabled"] = raw_choice.get("enabled", False)
else:
choice["enabled"] = False
def _get_hover_text(self, choice) -> List[Tuple[str, str]]:
display_choices = []
display_choices.append(("class:pointer", self._pointer))
if not isinstance(choice["value"], Separator):
display_choices.append(
(
"class:checkbox",
"%s " % self._enabled_symbol
if choice["enabled"]
else "%s " % self._disabled_symbol,
)
)
display_choices.append(("[SetCursorPosition]", ""))
display_choices.append(("class:pointer", choice["name"]))
return display_choices
def _get_normal_text(self, choice) -> List[Tuple[str, str]]:
display_choices = []
display_choices.append(("", len(self._pointer) * " "))
if not isinstance(choice["value"], Separator):
display_choices.append(
(
"class:checkbox",
"%s " % self._enabled_symbol
if choice["enabled"]
else "%s " % self._disabled_symbol,
)
)
display_choices.append(("", choice["name"]))
else:
display_choices.append(("class:separator", choice["name"]))
return display_choices
class CheckboxPrompt(BaseListPrompt):
"""A wrapper class around `prompt_toolkit` Application to create a checkbox prompt.
:param message: message to display
:type message: Union[str, Callable[[SessionResult], str]]
:param choices: list of choices to display
:type choices: Union[Callable[[SessionResult], List[Any]], List[Any]],
:param default: default value
:type default: Any
:param style: a dictionary of style
:type style: InquirerPyStyle
:param vi_mode: use vi kb for the prompt
:type vi_mode: bool
:param qmark: question qmark to display
:type qmark: str
:param pointer: the pointer qmark to display
:type pointer: str
:param enabled_symbol: qmark indicating enabled box
:type enabled_symbol: str
:param disabled_symbol: qmark indicating not selected qmark
:type disabled_symbol: str
:param instruction: instruction to display after the message
:type instruction: str
:param transformer: a callable to transform the result, this is visual effect only
:type transformer: Callable[[Any], Any]
:param filter: a callable to filter the result, updating the user input before returning the result
:type filter: Callable[[Any], Any]
:param height: preferred height of the choice window
:type height: Union[str, int]
:param max_height: max height choice window should reach
:type max_height: Union[str, int]
:param validate: a callable or Validator instance to validate user selection
:type validate: Union[Callable[[Any], bool], Validator]
:param invalid_message: message to display when input is invalid
:type invalid_message: str
:param keybindings: custom keybindings to apply
:type keybindings: Dict[str, List[Dict[str, Any]]]
:param show_cursor: display cursor at the end of the prompt
:type show_cursor: bool
"""
def __init__(
self,
message: Union[str, Callable[[SessionResult], str]],
choices: Union[Callable[[SessionResult], List[Any]], List[Any]],
default: Any = None,
style: InquirerPyStyle = None,
vi_mode: bool = False,
qmark: str = "?",
pointer: str = INQUIRERPY_POINTER_SEQUENCE,
enabled_symbol: str = INQUIRERPY_FILL_HEX_SEQUENCE,
disabled_symbol: str = INQUIRERPY_EMPTY_HEX_SEQUENCE,
instruction: str = "",
transformer: Callable[[Any], Any] = None,
filter: Callable[[Any], Any] = None,
height: Union[int, str] = None,
max_height: Union[int, str] = None,
validate: Union[Callable[[Any], bool], Validator] = None,
invalid_message: str = "Invalid input",
keybindings: Dict[str, List[Dict[str, Any]]] = None,
show_cursor: bool = True,
session_result: SessionResult = None,
) -> None:
"""Initialise the content_control and create Application."""
self.content_control = InquirerPyCheckboxControl(
choices=choices,
default=default,
pointer=pointer,
enabled_symbol=enabled_symbol,
disabled_symbol=disabled_symbol,
session_result=session_result,
)
super().__init__(
message=message,
style=style,
vi_mode=vi_mode,
qmark=qmark,
instruction=instruction,
transformer=transformer,
filter=filter,
height=height,
max_height=max_height,
validate=validate,
invalid_message=invalid_message,
multiselect=True,
keybindings=keybindings,
show_cursor=show_cursor,
session_result=session_result,
)
def _handle_enter(self, event) -> None:
"""Override this method to force empty array result.
When user does not select anything, exit with empty list.
"""
try:
fake_document = FakeDocument(self.result_value)
self._validator.validate(fake_document) # type: ignore
except ValidationError:
self._invalid = True
else:
self.status["answered"] = True
self.status["result"] = self.result_name
event.app.exit(result=self.result_value)
```
#### File: InquirerPy/prompts/filepath.py
```python
import os
from pathlib import Path
from typing import Any, Callable, Generator, Union
from prompt_toolkit.completion import Completer, Completion
from prompt_toolkit.completion.base import ThreadedCompleter
from prompt_toolkit.validation import Validator
from InquirerPy.exceptions import InvalidArgument
from InquirerPy.prompts.input import InputPrompt
from InquirerPy.utils import InquirerPyStyle, SessionResult
class FilePathCompleter(Completer):
"""An auto completion class used for prompt session.
The class structure is defined by prompt_toolkit and is only intended to be used by PromptSession.
:param only_directories: complete directories only
:type only_directories: bool
"""
def __init__(self, only_directories: bool = False, only_files: bool = False):
"""Set base params."""
self._only_directories = only_directories
self._only_files = only_files
def get_completions(
self, document, complete_event
) -> Generator[Completion, None, None]:
"""Return a completion item (valid file path)."""
if document.text == "~":
return
validation = lambda file, doc_text: str(file).startswith(doc_text)
if document.cursor_position == 0:
dirname = Path.cwd()
validation = lambda file, doc_text: True
elif document.text.startswith("~"):
dirname = Path(os.path.dirname("%s%s" % (Path.home(), document.text[1:])))
validation = lambda file, doc_text: str(file).startswith(
"%s%s" % (Path.home(), doc_text[1:])
)
elif document.text.startswith("./"):
dirname = Path(os.path.dirname(document.text))
validation = lambda file, doc_text: str(file).startswith(doc_text[2:])
else:
dirname = Path(os.path.dirname(document.text))
for item in self._get_completion(document, dirname, validation):
yield item
def _get_completion(
self, document, path, validation
) -> Generator[Completion, None, None]:
"""Return filepaths based on user input path."""
if not path.is_dir():
return
for file in path.iterdir():
if self._only_directories and not file.is_dir():
continue
if self._only_files and not file.is_file():
continue
if validation(file, document.text):
file_name = file.name
display_name = file_name
if file.is_dir():
display_name = "%s/" % file_name
yield Completion(
"%s" % file.name,
start_position=-1 * len(os.path.basename(document.text)),
display=display_name,
)
class FilePathPrompt(InputPrompt):
"""A wrapper class around PromptSession.
This class is used for filepath prompt.
:param message: the question to ask
:type message: Union[str, Callable[[SessionResult], str]]
:param style: a dictionary of style to apply
:type style: InquirerPyStyle
:param vi_mode: use vi kb for the prompt
:type vi_mode: bool
:param default: the default result
:type default: Union[str, Callable[[SessionResult], str]]
:param qmark: question qmark to display
:type qmark: str
:param multicolumn_complete: complete in multi column
:type multicolumn_complete: bool
:param validate: a callable or a validation class to validate user input
:type validate: Union[Callable[[str], bool], Validator]
:param invalid_message: the error message to display when input is invalid
:type invalid_message: str
:param only_directories: only complete directories
:type only_directories: bool
:param only_files: only complete files
:type only_files: bool
:param transformer: a callable to transform the result, this is visual effect only
:type transformer: Callable[[str], Any]
:param filter: a callable to filter the result, updating the user input before returning the result
:type filter: Callable[[str], Any]
"""
def __init__(
self,
message: Union[str, Callable[[SessionResult], str]],
style: InquirerPyStyle = None,
vi_mode: bool = False,
default: Union[str, Callable[[SessionResult], str]] = "",
qmark: str = "?",
multicolumn_complete: bool = False,
validate: Union[Callable[[str], bool], Validator] = None,
invalid_message: str = "Invalid input",
only_directories: bool = False,
only_files: bool = False,
transformer: Callable[[str], Any] = None,
filter: Callable[[str], Any] = None,
session_result: SessionResult = None,
**kwargs,
) -> None:
"""Construct a PromptSession based on parameters and apply key_bindings."""
if not isinstance(default, str):
raise InvalidArgument(
"default for filepath type question should be type of str."
)
super().__init__(
message=message,
style=style,
vi_mode=vi_mode,
default=default,
qmark=qmark,
completer=ThreadedCompleter(
FilePathCompleter(
only_directories=only_directories, only_files=only_files
)
),
multicolumn_complete=multicolumn_complete,
validate=validate,
invalid_message=invalid_message,
transformer=transformer,
filter=filter,
session_result=session_result,
**kwargs,
)
```
#### File: prompts/fuzzy/fuzzy.py
```python
import asyncio
import math
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
from prompt_toolkit.application.application import Application
from prompt_toolkit.buffer import Buffer
from prompt_toolkit.filters.cli import IsDone
from prompt_toolkit.layout.containers import ConditionalContainer, HSplit, Window
from prompt_toolkit.layout.controls import BufferControl, FormattedTextControl
from prompt_toolkit.layout.dimension import Dimension, LayoutDimension
from prompt_toolkit.layout.layout import Layout
from prompt_toolkit.layout.processors import AfterInput, BeforeInput
from prompt_toolkit.lexers.base import SimpleLexer
from prompt_toolkit.validation import ValidationError, Validator
from prompt_toolkit.widgets.base import Frame
from InquirerPy.base import BaseComplexPrompt, FakeDocument, InquirerPyUIControl
from InquirerPy.enum import INQUIRERPY_POINTER_SEQUENCE
from InquirerPy.exceptions import InvalidArgument
from InquirerPy.prompts.fuzzy.fzy import fuzzy_match_py_async
from InquirerPy.separator import Separator
from InquirerPy.utils import InquirerPyStyle, SessionResult, calculate_height
class InquirerPyFuzzyControl(InquirerPyUIControl):
"""A UIControl element intended to be used by `prompt_toolkit` Window class.
This UIControl is for listing the available choices based on filtering.
The actual input buffer will be handled by a separate BufferControl.
:param choices: List of choices to display.
:param pointer: The pointer symbol.
:param marker: Marker symbol for the selected choice in the case of multiselect.
:param current_text: Current buffer text.
:param max_lines: Maximum height.
"""
def __init__(
self,
choices: Union[Callable[[SessionResult], List[Any]], List[Any]],
pointer: str,
marker: str,
current_text: Callable[[], str],
max_lines: int,
session_result: Optional[SessionResult],
) -> None:
self._pointer = pointer
self._marker = marker
self._current_text = current_text
self._max_lines = max_lines if max_lines > 0 else 1
super().__init__(choices=choices, default=None, session_result=session_result)
def _format_choices(self) -> None:
for index, choice in enumerate(self.choices):
if isinstance(choice["value"], Separator):
raise InvalidArgument("fuzzy type prompt does not accept Separator.")
choice["enabled"] = False
choice["index"] = index
choice["indices"] = []
self._filtered_choices = self.choices
self._first_line = 0
self._last_line = min(self._max_lines, self.choice_count)
self._height = self._last_line - self._first_line
def _get_hover_text(self, choice) -> List[Tuple[str, str]]:
"""Get the current highlighted line of text in `FormattedText`.
If in the middle of filtering, loop through the char and color
indices matched char into `class:fuzzy_match`.
:return: List of formatted text.
"""
display_choices = []
display_choices.append(("class:pointer", self._pointer))
display_choices.append(
(
"class:marker",
self._marker if self.choices[choice["index"]]["enabled"] else " ",
)
)
display_choices.append(("[SetCursorPosition]", ""))
if not choice["indices"]:
display_choices.append(("class:pointer", choice["name"]))
else:
indices = set(choice["indices"])
for index, char in enumerate(choice["name"]):
if index in indices:
display_choices.append(("class:fuzzy_match", char))
else:
display_choices.append(("class:pointer", char))
return display_choices
def _get_normal_text(self, choice) -> List[Tuple[str, str]]:
"""Get the line of text in `FormattedText`.
If in the middle of filtering, loop through the char and color
indices matched char into `class:fuzzy_match`.
Calculate spaces of pointer to make the choice equally align.
:return: List of formatted text.
"""
display_choices = []
display_choices.append(("class:pointer", len(self._pointer) * " "))
display_choices.append(
(
"class:marker",
self._marker if self.choices[choice["index"]]["enabled"] else " ",
)
)
if not choice["indices"]:
display_choices.append(("", choice["name"]))
else:
indices = set(choice["indices"])
for index, char in enumerate(choice["name"]):
if index in indices:
display_choices.append(("class:fuzzy_match", char))
else:
display_choices.append(("", char))
return display_choices
def _get_formatted_choices(self) -> List[Tuple[str, str]]:
"""Get all available choices in formatted text format.
Overriding this method because `self.choice` will be the
full choice list. Using `self.filtered_choice` to get
a list of choice based on current_text.
:return: List of formatted choices.
"""
display_choices = []
if self.choice_count == 0:
self._selected_choice_index = 0
return display_choices
if self._selected_choice_index < 0:
self._selected_choice_index = 0
elif self._selected_choice_index >= self.choice_count:
self._selected_choice_index = self.choice_count - 1
if (self._last_line - self._first_line) < min(self.choice_count, self._height):
self._last_line = min(self.choice_count, self._height)
self._first_line = self._last_line - min(self.choice_count, self._height)
if self._selected_choice_index <= self._first_line:
self._first_line = self._selected_choice_index
self._last_line = self._first_line + min(self._height, self.choice_count)
elif self._selected_choice_index >= self._last_line:
self._last_line = self._selected_choice_index + 1
self._first_line = self._last_line - min(self._height, self.choice_count)
if self._last_line > self.choice_count:
self._last_line = self.choice_count
self._first_line = self._last_line - min(self._height, self.choice_count)
if self._first_line < 0:
self._first_line = 0
self._last_line = self._first_line + min(self._height, self.choice_count)
for index in range(self._first_line, self._last_line):
if index == self.selected_choice_index:
display_choices += self._get_hover_text(self._filtered_choices[index])
else:
display_choices += self._get_normal_text(self._filtered_choices[index])
display_choices.append(("", "\n"))
if display_choices:
display_choices.pop()
return display_choices
async def _filter_choices(self, wait_time: float) -> List[Dict[str, Any]]:
"""Call to filter choices using fzy fuzzy match.
:param wait_time: Delay time for this task.
:return: Filtered result.
"""
if not self._current_text():
choices = self.choices
else:
await asyncio.sleep(wait_time)
choices = await fuzzy_match_py_async(self._current_text(), self.choices)
return choices
@property
def selection(self) -> Dict[str, Any]:
"""Override this value since `self.choice` does not indicate the choice displayed.
`self.filtered_choice` is the up to date choice displayed.
:return: A dictionary of name and value for the current pointed choice
"""
return self._filtered_choices[self.selected_choice_index]
@property
def choice_count(self) -> int:
"""int: Filtered choice count."""
return len(self._filtered_choices)
class FuzzyPrompt(BaseComplexPrompt):
"""A filter prompt that allows user to input value.
Filters the result using fuzzy finding. The fuzzy finding logic
is contains in the file fzy.py which is copied from `vim-clap`
python provider.
The Application have mainly 3 layers.
1. question
2. input
3. choices
The content of choices content_control is bounded by the input buffer content_control
on_text_changed event.
Once Enter is pressed, hide both input buffer and choices buffer as well as
updating the question buffer with user selection.
Override the default keybindings as j/k cannot be bind even if editing_mode is vim
due to the input buffer.
:param message: message to display to the user
:param choices: list of choices available to select
:param default: default value to insert into buffer
:param pointer: pointer symbol
:param style: style dict to apply
:param vi_mode: use vi kb for the prompt
:param qmark: question mark symbol
:param transformer: transform the result to output, this is only visual effect
:param filter: a callable to filter the result, updating the user input before returning the result
:param instruction: instruction to display after the message
:param multiselect: enable multi selection of the choices
:param prompt: prompt symbol for buffer
:param marker: marker symbol for the selected choice in the case of multiselect
:param border: enable border around the fuzzy prompt
:param info: display info as virtual text after input
:param height: preferred height of the choice window
:param max_height: max height choice window should reach
:param validate: a callable or Validator instance to validate user selection
:param invalid_message: message to display when input is invalid
:param keybindings: custom keybindings to apply
"""
def __init__(
self,
message: Union[str, Callable[[SessionResult], str]],
choices: Union[Callable[[SessionResult], List[Any]], List[Any]],
default: Union[str, Callable[[SessionResult], str]] = "",
pointer: str = INQUIRERPY_POINTER_SEQUENCE,
style: InquirerPyStyle = None,
vi_mode: bool = False,
qmark: str = "?",
transformer: Callable[[Any], Any] = None,
filter: Callable[[Any], Any] = None,
instruction: str = "",
multiselect: bool = False,
prompt: str = INQUIRERPY_POINTER_SEQUENCE,
marker: str = INQUIRERPY_POINTER_SEQUENCE,
border: bool = True,
info: bool = True,
height: Union[str, int] = None,
max_height: Union[str, int] = None,
validate: Union[Callable[[Any], bool], Validator] = None,
invalid_message: str = "Invalid input",
keybindings: Dict[str, List[Dict[str, Any]]] = None,
session_result: SessionResult = None,
) -> None:
if not keybindings:
keybindings = {}
self._prompt = prompt
self._border = border
self._info = info
self._task = None
self._rendered = False
self._content_control: InquirerPyFuzzyControl
keybindings = {
"up": [{"key": "up"}, {"key": "c-p"}],
"down": [{"key": "down"}, {"key": "c-n"}],
**keybindings,
}
super().__init__(
message=message,
style=style,
vi_mode=vi_mode,
qmark=qmark,
transformer=transformer,
filter=filter,
validate=validate,
invalid_message=invalid_message,
multiselect=multiselect,
instruction=instruction,
keybindings=keybindings,
session_result=session_result,
)
self._default = default if not isinstance(default, Callable) else default(self._result) # type: ignore
self._dimmension_height, self._dimmension_max_height = calculate_height(
height, max_height, offset=3 if not self._border else 5
)
self._content_control = InquirerPyFuzzyControl(
choices=choices,
pointer=pointer,
marker=marker,
current_text=self._get_current_text,
max_lines=self._dimmension_max_height
if not self._border
else self._dimmension_max_height - 2,
session_result=session_result,
)
self._buffer = Buffer(on_text_changed=self._on_text_changed)
message_window = Window(
height=LayoutDimension.exact(1),
content=FormattedTextControl(self._get_prompt_message, show_cursor=False),
)
input_window = Window(
height=LayoutDimension.exact(1),
content=BufferControl(
self._buffer,
[
AfterInput(self._generate_after_input),
BeforeInput(self._generate_before_input),
],
lexer=SimpleLexer("class:input"),
),
)
choice_height_dimmension = lambda: Dimension(
max=self._dimmension_max_height
if not self._border
else self._dimmension_max_height - 2,
preferred=self._dimmension_height,
min=self.content_control._height if self.content_control._height > 0 else 1,
)
self.choice_window = Window(
content=self.content_control,
height=choice_height_dimmension,
dont_extend_height=True,
)
main_content_window = HSplit([input_window, self.choice_window])
if self._border:
main_content_window = Frame(main_content_window)
self._layout = Layout(
HSplit(
[
message_window,
ConditionalContainer(
main_content_window, filter=~IsDone() & ~self._is_loading
),
ConditionalContainer(
Window(FormattedTextControl([("", "")])),
filter=~IsDone(), # force validation bar to stay bottom
),
ConditionalContainer(
Window(
FormattedTextControl(
[
(
"class:validation-toolbar",
self._invalid_message,
)
]
),
dont_extend_height=True,
),
filter=self._is_invalid & ~IsDone(),
),
]
)
)
self._layout.focus(input_window)
self._application = Application(
layout=self._layout,
style=self._style,
key_bindings=self._kb,
editing_mode=self._editing_mode,
after_render=self._after_render,
)
def _after_render(self, application) -> None:
"""Render callable choices and set the buffer default text.
Setting buffer default text has to be after application is rendered,
because `self._filter_choices` will use the event loop from `Application`.
Forcing a check on `self._rendered` as this event is fired up on each
render, we only want this to fire up once.
"""
if not self._rendered:
super()._after_render(application)
if self._default:
default_text = str(self._default)
self._buffer.text = default_text
self._buffer.cursor_position = len(default_text)
def _toggle_all(self, value: bool = None) -> None:
"""Toggle all choice `enabled` status.
:param value: Specify a value to toggle.
"""
for choice in self.content_control.choices:
if isinstance(choice["value"], Separator):
continue
choice["enabled"] = value if value else not choice["enabled"]
def _generate_after_input(self) -> List[Tuple[str, str]]:
"""Virtual text displayed after the user input."""
display_message = []
if self._info:
display_message.append(("", " "))
display_message.append(
(
"class:fuzzy_info",
"%s/%s"
% (
self.content_control.choice_count,
len(self.content_control.choices),
),
)
)
if self._multiselect:
display_message.append(
("class:fuzzy_info", " (%s)" % len(self.selected_choices))
)
return display_message
def _generate_before_input(self) -> List[Tuple[str, str]]:
"""Display prompt symbol as virtual text before user input."""
display_message = []
display_message.append(("class:fuzzy_prompt", "%s " % self._prompt))
return display_message
def _filter_callback(self, task):
"""Redraw `self._application` when the filter task is finished."""
if task.cancelled():
return
self.content_control._filtered_choices = task.result()
self._application.invalidate()
def _calculate_wait_time(self) -> float:
"""Calculate wait time to smoother the application on big data set.
Using digit of the choices lengeth to get wait time.
For digit greater than 6, using formula 2^(digit - 5) * 0.3 to increase the wait_time.
Still experimenting, require improvement.
"""
wait_table = {
2: 0.05,
3: 0.1,
4: 0.2,
5: 0.3,
}
digit = 1
if len(self.content_control.choices) > 0:
digit = int(math.log10(len(self.content_control.choices))) + 1
if digit < 2:
return 0.0
if digit in wait_table:
return wait_table[digit]
return wait_table[5] * (2 ** (digit - 5))
def _on_text_changed(self, _) -> None:
"""Handle buffer text change event.
1. Check if there is current task running.
2. Cancel if already has task, increase wait_time
3. Create a filtered_choice task in asyncio event loop
4. Add callback
1. Run a new filter on all choices.
2. Re-calculate current selected_choice_index
if it exceeds the total filtered_choice.
3. Avoid selected_choice_index less than zero,
this fix the issue of cursor lose when:
choice -> empty choice -> choice
Don't need to create or check asyncio event loop, `prompt_toolkit`
application already has a event loop running.
"""
if self._invalid:
self._invalid = False
wait_time = self._calculate_wait_time()
if self._task and not self._task.done():
self._task.cancel()
self._task = asyncio.create_task(
self.content_control._filter_choices(wait_time)
)
self._task.add_done_callback(self._filter_callback)
def _handle_down(self) -> None:
"""Move down."""
self.content_control.selected_choice_index = (
self.content_control.selected_choice_index + 1
) % self.content_control.choice_count
def _handle_up(self) -> None:
"""Move up."""
self.content_control.selected_choice_index = (
self.content_control.selected_choice_index - 1
) % self.content_control.choice_count
def _toggle_choice(self) -> None:
"""Handle tab event, alter the `selected` state of the choice."""
current_selected_index = self.content_control.selection["index"]
self.content_control.choices[current_selected_index][
"enabled"
] = not self.content_control.choices[current_selected_index]["enabled"]
def _handle_enter(self, event) -> None:
"""Handle enter event.
Validate the result first.
In multiselect scenario, if no TAB is entered, then capture the current
highlighted choice and return the value in a list.
Otherwise, return all TAB choices as a list.
In normal scenario, reutrn the current highlighted choice.
If current UI contains no choice due to filter, return None.
"""
try:
fake_document = FakeDocument(self.result_value)
self._validator.validate(fake_document) # type: ignore
if self._multiselect:
self.status["answered"] = True
if not self.selected_choices:
self.status["result"] = [self.content_control.selection["name"]]
event.app.exit(result=[self.content_control.selection["value"]])
else:
self.status["result"] = self.result_name
event.app.exit(result=self.result_value)
else:
self.status["answered"] = True
self.status["result"] = self.content_control.selection["name"]
event.app.exit(result=self.content_control.selection["value"])
except ValidationError:
self._invalid = True
except IndexError:
self.status["answered"] = True
self.status["result"] = None if not self._multiselect else []
event.app.exit(result=None if not self._multiselect else [])
@property
def content_control(self) -> InquirerPyFuzzyControl:
"""InquirerPyFuzzyControl: Override for type-hinting."""
return self._content_control
def _get_current_text(self) -> str:
"""Get current input buffer text."""
return self._buffer.text
```
#### File: InquirerPy/prompts/input.py
```python
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
from prompt_toolkit.completion import NestedCompleter
from prompt_toolkit.completion.base import Completer
from prompt_toolkit.filters.base import Condition
from prompt_toolkit.keys import Keys
from prompt_toolkit.lexers import SimpleLexer
from prompt_toolkit.shortcuts.prompt import CompleteStyle, PromptSession
from prompt_toolkit.validation import ValidationError, Validator
from InquirerPy.base import BaseSimplePrompt
from InquirerPy.enum import INQUIRERPY_KEYBOARD_INTERRUPT, INQUIRERPY_POINTER_SEQUENCE
from InquirerPy.exceptions import InvalidArgument
from InquirerPy.utils import InquirerPyStyle, SessionResult
class InputPrompt(BaseSimplePrompt):
"""A wrapper class around PromptSession.
This class is used for input prompt.
:param message: the question to ask
:type message: Union[str, Callable[[SessionResult], str]]
:param style: a dictionary of style to apply
:type style: InquirerPyStyle
:param vi_mode: use vi kb for the prompt
:type vi_mode: bool
:param default: the default result
:type default: Union[str, Callable[[SessionResult], str]]
:param qmark: question qmark to display
:type qmark: str
:param completer: add auto completer to user input
:type completer: Union[Dict[str, str], Completer]
:param multicolumn_complete: complete in multi column
:type multicolumn_complete: bool
:param multiline: enable multiline mode
:type multiline: bool
:param validate: a callable or a validation class to validate user input
:type validate: Union[Callable[[str], bool], Validator]
:param invalid_message: the error message to display when input is invalid
:type invalid_message: str
:param transformer: a callable to transform the result, this is visual effect only
:type transformer: Callable[[str], Any]
:param filter: a callable to filter the result, updating the user input before returning the result
:type filter: Callable[[str], Any]
"""
def __init__(
self,
message: Union[str, Callable[[SessionResult], str]],
style: InquirerPyStyle = None,
vi_mode: bool = False,
default: Union[str, Callable[[SessionResult], str]] = "",
qmark: str = "?",
completer: Union[Dict[str, Optional[str]], Completer] = None,
multicolumn_complete: bool = False,
multiline: bool = False,
validate: Union[Callable[[str], bool], Validator] = None,
invalid_message: str = "Invalid input",
transformer: Callable[[str], Any] = None,
filter: Callable[[str], Any] = None,
session_result: SessionResult = None,
**kwargs,
) -> None:
"""Construct a PromptSession based on parameters and apply key_bindings."""
super().__init__(
message,
style,
vi_mode=vi_mode,
qmark=qmark,
validate=validate,
invalid_message=invalid_message,
transformer=transformer,
filter=filter,
session_result=session_result,
default=default,
)
if not isinstance(self._default, str):
raise InvalidArgument(
"default for input type question should be type of str."
)
self._completer = None
if isinstance(completer, dict):
self._completer = NestedCompleter.from_nested_dict(completer)
elif isinstance(completer, Completer):
self._completer = completer
self._multiline = multiline
self._complete_style = (
CompleteStyle.COLUMN
if not multicolumn_complete
else CompleteStyle.MULTI_COLUMN
)
@Condition
def is_multiline():
return self._multiline
@Condition
def has_completion():
return self._completer is not None
@self._kb.add("c-space", filter=has_completion)
def completion(event):
buff = event.app.current_buffer
if buff.complete_state:
buff.complete_next()
else:
buff.start_completion(select_first=False)
@self._kb.add(Keys.Enter, filter=~is_multiline)
def enter(event):
try:
self._session.validator.validate(self._session.default_buffer)
except ValidationError:
self._session.default_buffer.validate_and_handle()
else:
self.status["answered"] = True
self.status["result"] = self._session.default_buffer.text
self._session.default_buffer.text = ""
event.app.exit(result=self.status["result"])
@self._kb.add(Keys.Escape, Keys.Enter, filter=is_multiline)
def multiline_enter(event):
try:
self._session.validator.validate(self._session.default_buffer)
except ValidationError:
self._session.default_buffer.validate_and_handle()
else:
self.status["answered"] = True
self.status["result"] = self._session.default_buffer.text
self._session.default_buffer.text = ""
event.app.exit(result=self.status["result"])
self._session = PromptSession(
message=self._get_prompt_message,
key_bindings=self._kb,
style=self._style,
completer=self._completer,
validator=self._validator,
validate_while_typing=False,
input=kwargs.pop("input", None),
output=kwargs.pop("output", None),
editing_mode=self._editing_mode,
lexer=SimpleLexer(self._lexer),
is_password=kwargs.pop("is_password", False),
multiline=self._multiline,
complete_style=self._complete_style,
)
def _get_prompt_message(
self,
pre_answer: Optional[Tuple[str, str]] = None,
post_answer: Optional[Tuple[str, str]] = None,
) -> List[Tuple[str, str]]:
"""Dynamically update the prompt message.
Change the user input path to the 'answer' color in style.
:param pre_answer: the formatted text to display before answering the question
:type pre_answer: Optional[Tuple[str, str]]
:param post_answer: the formatted text to display after answering the question
:type post_answer: Optional[Tuple[str, str]]
:return: the formatted text for PromptSession
:rtype: List[Tuple[str, str]]
"""
if not pre_answer:
if self._multiline:
pre_answer = ("class:instruction", " ESC + Enter to finish input")
else:
pre_answer = ("class:instruction", " ")
if not post_answer:
if self._multiline and self.status["result"]:
lines = self.status["result"].split("\n")
if len(lines) > 1:
number_of_chars = len("".join(lines[1:]))
lines[0] += "...[%s char%s]" % (
number_of_chars,
"s" if number_of_chars > 1 else "",
)
post_answer = ("class:answer", " %s" % lines[0])
else:
post_answer = ("class:answer", " %s" % self.status["result"])
formatted_message = super()._get_prompt_message(pre_answer, post_answer)
if not self.status["answered"] and self._multiline:
formatted_message.append(
("class:questionmark", "\n%s " % INQUIRERPY_POINTER_SEQUENCE)
)
return formatted_message
def execute(self, raise_keyboard_interrupt: bool = True) -> Optional[str]:
"""Display the prompt and return the result.
:param raise_keyboard_interrupt: raise kbi exception when user hit 'c-c'
:type raise_keyboard_interrupt: bool
:return: user entered value
:rtype: str
"""
result = self._session.prompt(default=self._default)
if result == INQUIRERPY_KEYBOARD_INTERRUPT:
if raise_keyboard_interrupt and not os.getenv(
"INQUIRERPY_NO_RAISE_KBI", False
):
raise KeyboardInterrupt
else:
result = None
if not self._filter:
return result
return self._filter(result)
```
#### File: InquirerPy/prompts/list.py
```python
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
from prompt_toolkit.validation import Validator
from InquirerPy.base import BaseListPrompt, InquirerPyUIControl
from InquirerPy.enum import INQUIRERPY_POINTER_SEQUENCE
from InquirerPy.separator import Separator
from InquirerPy.utils import InquirerPyStyle, SessionResult
class InquirerPyListControl(InquirerPyUIControl):
"""A UIControl class intended to be consumed by prompt_toolkit window.
Used to dynamically render the list and update the content based on input
Reference the param definition in `ListPrompt`.
"""
def __init__(
self,
choices: Union[Callable[[SessionResult], List[Any]], List[Any]],
default: Any,
pointer: str,
marker: str,
session_result: Optional[SessionResult],
) -> None:
"""Construct and init a custom FormattedTextControl object."""
self._pointer: str = pointer
self._marker: str = marker
super().__init__(
choices=choices, default=default, session_result=session_result
)
def _format_choices(self) -> None:
pass
def _get_hover_text(self, choice) -> List[Tuple[str, str]]:
display_choices = []
display_choices.append(("class:pointer", self._pointer))
display_choices.append(
(
"class:marker",
self._marker if choice["enabled"] else " ",
)
)
display_choices.append(("[SetCursorPosition]", ""))
display_choices.append(("class:pointer", choice["name"]))
return display_choices
def _get_normal_text(self, choice) -> List[Tuple[str, str]]:
display_choices = []
display_choices.append(("", len(self._pointer) * " "))
display_choices.append(
(
"class:marker",
self._marker if choice["enabled"] else " ",
)
)
if not isinstance(choice["value"], Separator):
display_choices.append(("", choice["name"]))
else:
display_choices.append(("class:separator", choice["name"]))
return display_choices
class ListPrompt(BaseListPrompt):
"""A wrapper class around prompt_toolkit Application to create a list prompt.
:param message: message to display
:type message: Union[str, Callable[[SessionResult], str]]
:param choices: list of choices to display
:type choices: Union[Callable[[SessionResult], List[Any]], List[Any]],
:param default: default value
:type default: Any
:param style: a dictionary of style
:type style: InquirerPyStyle
:param vi_mode: use vi kb for the prompt
:type vi_mode: bool
:param qmark: question qmark to display
:type qmark: str
:param pointer: the pointer qmark of hovered choice
:type pointer: str
:param instruction: instruction to display to user
:type instruction: str
:param transformer: a callable to transform the result, this is visual effect only
:type transformer: Callable[[Any], Any]
:param filter: a callable to filter the result, updating the user input before returning the result
:type filter: Callable[[Any], Any]
:param height: preferred height of the choice window
:type height: Union[str, int]
:param max_height: max height choice window should reach
:type max_height: Union[str, int]
:param multiselect: enable multiselectiion
:type multiselect: bool
:param marker: marker symbol to indicate selected choice in multiselect mode
:type marker: str
:param validate: a callable or Validator instance to validate user selection
:type validate: Union[Callable[[Any], bool], Validator]
:param invalid_message: message to display when input is invalid
:type invalid_message: str
:param keybindings: custom keybindings to apply
:type keybindings: Dict[str, List[Dict[str, Any]]]
:param show_cursor: display cursor at the end of the prompt
:type show_cursor: bool
"""
def __init__(
self,
message: Union[str, Callable[[SessionResult], str]],
choices: Union[Callable[[SessionResult], List[Any]], List[Any]],
default: Any = None,
style: InquirerPyStyle = None,
vi_mode: bool = False,
qmark: str = "?",
pointer: str = INQUIRERPY_POINTER_SEQUENCE,
instruction: str = "",
transformer: Callable[[Any], Any] = None,
filter: Callable[[Any], Any] = None,
height: Union[int, str] = None,
max_height: Union[int, str] = None,
multiselect: bool = False,
marker: str = INQUIRERPY_POINTER_SEQUENCE,
validate: Union[Callable[[Any], bool], Validator] = None,
invalid_message: str = "Invalid input",
keybindings: Dict[str, List[Dict[str, Any]]] = None,
show_cursor: bool = True,
session_result: SessionResult = None,
) -> None:
"""Initialise the content_control and create Application."""
self.content_control = InquirerPyListControl(
choices=choices,
default=default,
pointer=pointer,
marker=marker,
session_result=session_result,
)
self._instruction = instruction
super().__init__(
message=message,
style=style,
vi_mode=vi_mode,
qmark=qmark,
instruction=instruction,
transformer=transformer,
filter=filter,
height=height,
max_height=max_height,
validate=validate,
invalid_message=invalid_message,
multiselect=multiselect,
keybindings=keybindings,
show_cursor=show_cursor,
session_result=session_result,
)
```
#### File: InquirerPy/prompts/secret.py
```python
from typing import Any, Callable, List, Tuple, Union
from prompt_toolkit.validation import Validator
from InquirerPy.exceptions import InvalidArgument
from InquirerPy.prompts.input import InputPrompt
from InquirerPy.utils import InquirerPyStyle, SessionResult
class SecretPrompt(InputPrompt):
"""A wrapper class around PromptSession to create a secret prompt.
:param message: the message to display in the prompt
:type message: Union[str, Callable[[SessionResult], str]]
:param style: style to apply to the prompt
:type style: InquirerPyStyle
:param default: the default value
:type default: Union[str, Callable[[SessionResult], str]]
:param qmark: qmark to display infront of the question
:type qmark: str
:param vi_mode: use vi kb for the prompt
:type vi_mode: bool
:param validate: a callable to validate the user input
:type validate: Union[Validator, Callable[[str], bool]]
:param invalid_message: the error message to display when validator failed
:type invalid_message: str
:param transformer: a callable to transform the result, this is visual effect only
:type transformer: Callable[[str], Any]
:param filter: a callable to filter the result, updating the user input before returning the result
:type filter: Callable[[str], Any]
"""
def __init__(
self,
message: Union[str, Callable[[SessionResult], str]],
style: InquirerPyStyle = None,
default: Union[str, Callable[[SessionResult], str]] = "",
qmark: str = "?",
vi_mode: bool = False,
validate: Union[Validator, Callable[[str], bool]] = None,
invalid_message: str = "Invalid input",
transformer: Callable[[str], Any] = None,
filter: Callable[[str], Any] = None,
session_result: SessionResult = None,
**kwargs
) -> None:
"""Construct the prompt session."""
if not isinstance(default, str):
raise InvalidArgument(
"default for secret type question should be type of str."
)
super().__init__(
message=message,
style=style,
vi_mode=vi_mode,
default=default,
qmark=qmark,
validate=validate,
invalid_message=invalid_message,
is_password=True,
transformer=transformer,
filter=filter,
session_result=session_result,
**kwargs
)
def _get_prompt_message(self) -> List[Tuple[str, str]]:
"""Get formatted message to display in prompt.
:return: a list of formatted message
:rtype: List[Tuple[str, str]]
"""
pre_answer = ("class:instruction", " ")
if not self._transformer:
post_answer = (
"class:answer",
""
if not self.status["result"]
else " %s" % "".join(["*" for _ in self.status["result"]]),
)
else:
post_answer = ("class:answer", " %s" % self.status["result"])
return super()._get_prompt_message(pre_answer, post_answer)
```
#### File: InquirerPy/InquirerPy/resolver.py
```python
from typing import Any, Dict, List, Union
from InquirerPy.exceptions import InvalidArgument, RequiredKeyNotFound
from InquirerPy.prompts.checkbox import CheckboxPrompt
from InquirerPy.prompts.confirm import ConfirmPrompt
from InquirerPy.prompts.expand import ExpandPrompt
from InquirerPy.prompts.filepath import FilePathPrompt
from InquirerPy.prompts.fuzzy import FuzzyPrompt
from InquirerPy.prompts.input import InputPrompt
from InquirerPy.prompts.list import ListPrompt
from InquirerPy.prompts.rawlist import RawlistPrompt
from InquirerPy.prompts.secret import SecretPrompt
from InquirerPy.utils import SessionResult, get_style
__all__ = ["prompt"]
question_mapping = {
"confirm": ConfirmPrompt,
"filepath": FilePathPrompt,
"password": SecretPrompt,
"input": InputPrompt,
"list": ListPrompt,
"checkbox": CheckboxPrompt,
"rawlist": RawlistPrompt,
"expand": ExpandPrompt,
"fuzzy": FuzzyPrompt,
}
list_prompts = {"list", "checkbox", "rawlist", "expand", "fuzzy"}
def prompt(
questions: Union[List[Dict[str, Any]], Dict[str, Any]],
style: Dict[str, str] = None,
vi_mode: bool = False,
raise_keyboard_interrupt: bool = True,
keybindings: Dict[str, List[Dict[str, Any]]] = None,
style_override: bool = True,
) -> SessionResult:
"""Resolve user provided list of questions and get result.
if "name" param is not present, use the index as the name.
All param can be controlled via ENV var, if not present, resolver
will attempt to resolve the value from ENV var.
A default style is applied using Atom Onedark color if style is not present.
:param questions: list of questions to ask
if only one question is needed, providing a single dict is also sufficent
:type questions: Union[List[Dict[str, Any]], Dict[str, Any]]
:param style: the style to apply to the prompt
:type style: Dict[str, str]
:param vi_mode: use vi kb for the prompt
:type vi_mode: bool
:param raise_keyboard_interrupt: raise the kbi exception when user hit c-c
If false, store result as None and continue
:type raise_keyboard_interrupt: bool
:param keybindings: custom keybindings to apply
:type keybindings: Dict[str, List[Dict[str, Any]]]
:param style_override: override all default styles
:type style_override: bool
:return: dictionary of answers
:rtype: SessionResult
"""
result: SessionResult = {}
if not keybindings:
keybindings = {}
if isinstance(questions, dict):
questions = [questions]
if not isinstance(questions, list):
raise InvalidArgument("questions should be type of list.")
question_style = get_style(style, style_override)
for index, original_question in enumerate(questions):
try:
question = original_question.copy()
question_type = question.pop("type")
question_name = question.pop("name", index)
message = question.pop("message")
question_when = question.pop("when", None)
if question_when and not question_when(result):
result[question_name] = None
continue
args = {
"message": message,
"style": question_style,
"vi_mode": vi_mode,
"session_result": result,
}
if question_type in list_prompts:
args["keybindings"] = {**keybindings, **question.pop("keybindings", {})}
result[question_name] = question_mapping[question_type](
**args, **question
).execute(raise_keyboard_interrupt=raise_keyboard_interrupt)
except KeyError:
raise RequiredKeyNotFound
return result
```
#### File: tests/prompts/test_checkbox.py
```python
import unittest
from unittest.mock import ANY, call, patch
from prompt_toolkit.enums import EditingMode
from prompt_toolkit.key_binding.key_bindings import KeyBindings
from prompt_toolkit.styles.style import Style
from InquirerPy.exceptions import InvalidArgument, RequiredKeyNotFound
from InquirerPy.prompts.checkbox import CheckboxPrompt, InquirerPyCheckboxControl
from InquirerPy.separator import Separator
class TestCheckbox(unittest.TestCase):
separator = Separator()
choices = [
"boy",
"girl",
separator,
{"name": "mix", "value": "boy&girl", "enabled": True},
]
def test_checkbox_control(self):
checkbox_control = InquirerPyCheckboxControl(self.choices, "boy&girl")
self.assertEqual(
checkbox_control.choices,
[
{"name": "boy", "value": "boy", "enabled": False},
{"name": "girl", "value": "girl", "enabled": False},
{"name": 15 * "-", "value": self.separator, "enabled": False},
{"name": "mix", "value": "boy&girl", "enabled": True},
],
)
self.assertEqual(checkbox_control.selected_choice_index, 3)
self.assertEqual(
checkbox_control._get_formatted_choices(),
[
("", " "),
("class:checkbox", "⬡ "),
("", "boy"),
("", "\n"),
("", " "),
("class:checkbox", "⬡ "),
("", "girl"),
("", "\n"),
("", " "),
("class:separator", "---------------"),
("", "\n"),
("class:pointer", "❯ "),
("class:checkbox", "⬢ "),
("[SetCursorPosition]", ""),
("class:pointer", "mix"),
],
)
self.assertEqual(checkbox_control.choice_count, 4)
self.assertEqual(
checkbox_control.selection,
{"name": "mix", "value": "boy&girl", "enabled": True},
)
def test_checkbox_control_exceptions(self):
self.assertRaises(
RequiredKeyNotFound,
InquirerPyCheckboxControl,
[
{"what": "apple", "value": "peach"},
"pear",
],
"watermelon",
)
self.assertRaises(InvalidArgument, InquirerPyCheckboxControl, [])
self.assertRaises(
InvalidArgument, InquirerPyCheckboxControl, "", [Separator(), Separator()]
)
def test_checkbox_prompt(self):
prompt = CheckboxPrompt(
message="Select something",
choices=self.choices,
default="boy&girl",
style={},
vi_mode=False,
qmark="!",
pointer="<",
instruction="TAB",
)
self.assertEqual(prompt._editing_mode, EditingMode.EMACS)
self.assertIsInstance(prompt.content_control, InquirerPyCheckboxControl)
self.assertIsInstance(prompt._kb, KeyBindings)
self.assertIsInstance(prompt._style, Style)
self.assertEqual(prompt._message, "Select something")
self.assertEqual(prompt._qmark, "!")
self.assertEqual(prompt.instruction, "TAB")
def test_minimum_args(self):
CheckboxPrompt(message="yes", choices=self.choices)
def test_checkbox_prompt_message(self):
prompt = CheckboxPrompt(
message="Select something",
choices=self.choices,
instruction="TAB",
)
self.assertEqual(
prompt._get_prompt_message(),
[
("class:questionmark", "?"),
("class:question", " Select something"),
("class:instruction", " TAB"),
],
)
def test_checkbox_bindings(self):
prompt = CheckboxPrompt(message="", choices=self.choices)
self.assertEqual(prompt.content_control.selected_choice_index, 0)
prompt._handle_down()
self.assertEqual(prompt.content_control.selected_choice_index, 1)
prompt._handle_down()
self.assertEqual(prompt.content_control.selected_choice_index, 3)
prompt._handle_down()
self.assertEqual(prompt.content_control.selected_choice_index, 0)
prompt._handle_up()
self.assertEqual(prompt.content_control.selected_choice_index, 3)
prompt._handle_up()
self.assertEqual(prompt.content_control.selected_choice_index, 1)
self.assertEqual(prompt.status, {"result": None, "answered": False})
with patch("prompt_toolkit.utils.Event") as mock:
event = mock.return_value
prompt._handle_enter(event)
self.assertEqual(prompt.status, {"result": ["mix"], "answered": True})
prompt._toggle_choice()
self.assertEqual(
prompt.content_control.choices,
[
{"enabled": False, "name": "boy", "value": "boy"},
{"enabled": True, "name": "girl", "value": "girl"},
{"enabled": False, "name": "---------------", "value": ANY},
{"enabled": True, "name": "mix", "value": "boy&girl"},
],
)
prompt._toggle_all()
self.assertEqual(
prompt.content_control.choices,
[
{"enabled": True, "name": "boy", "value": "boy"},
{"enabled": False, "name": "girl", "value": "girl"},
{"enabled": False, "name": "---------------", "value": ANY},
{"enabled": False, "name": "mix", "value": "boy&girl"},
],
)
prompt._toggle_all(True)
self.assertEqual(
prompt.content_control.choices,
[
{"enabled": True, "name": "boy", "value": "boy"},
{"enabled": True, "name": "girl", "value": "girl"},
{"enabled": False, "name": "---------------", "value": ANY},
{"enabled": True, "name": "mix", "value": "boy&girl"},
],
)
def test_validator(self):
prompt = CheckboxPrompt(
message="",
choices=self.choices,
validate=lambda x: len(x) > 2,
invalid_message="hello",
)
with patch("prompt_toolkit.utils.Event") as mock:
self.assertEqual(prompt._invalid, False)
event = mock.return_value
prompt._handle_enter(event)
self.assertEqual(prompt._invalid, True)
self.assertEqual(prompt._invalid_message, "hello")
@patch.object(CheckboxPrompt, "_register_kb")
def test_kb_register(self, mocked_kb):
CheckboxPrompt(
message="",
choices=self.choices,
)
mocked_kb.assert_has_calls([call("down", filter=True)])
mocked_kb.assert_has_calls([call("c-n", filter=ANY)])
mocked_kb.assert_has_calls([call("j", filter=ANY)])
try:
mocked_kb.assert_has_calls([call("alt-r", filter=True)])
self.fail("keybinding failed to apply multiselect filter")
except:
pass
mocked_kb.assert_has_calls([call("alt-a", filter=ANY)])
def test_kb(self):
prompt = CheckboxPrompt(message="", choices=self.choices)
prompt._invalid = True
@prompt._register_kb("b")
def test(_):
pass
test("") # type: ignore
self.assertEqual(prompt._invalid, False)
def test_checkbox_enter_empty(self):
prompt = CheckboxPrompt(message="", choices=["haah", "haha", "what"])
with patch("prompt_toolkit.utils.Event") as mock:
event = mock.return_value
prompt._handle_enter(event)
self.assertEqual(prompt.status["result"], [])
def test_after_render(self):
prompt = CheckboxPrompt(message="", choices=lambda _: [1, 2, 3])
self.assertEqual(prompt.content_control.choices, [])
prompt._after_render("")
self.assertEqual(
prompt.content_control.choices,
[
{"enabled": False, "name": "1", "value": 1},
{"enabled": False, "name": "2", "value": 2},
{"enabled": False, "name": "3", "value": 3},
],
)
```
#### File: InquirerPy/tests/test_utils.py
```python
import os
import unittest
from unittest.mock import PropertyMock, patch
from prompt_toolkit.application.application import Application
from InquirerPy.exceptions import InvalidArgument
from InquirerPy.utils import InquirerPyStyle, calculate_height, color_print, get_style
class TestUtils(unittest.TestCase):
@patch("InquirerPy.utils.shutil.get_terminal_size")
def test_prompt_height(self, mocked_terminal_size):
mocked_terminal_size.return_value = (24, 80)
height, max_height = calculate_height(None, None)
self.assertEqual(height, None)
self.assertEqual(max_height, 46)
height, max_height = calculate_height("50%", None)
self.assertEqual(height, 38)
self.assertEqual(max_height, 78)
calculate_height("50%", "80")
self.assertRaises(InvalidArgument, calculate_height, "adsfa", "40%")
self.assertRaises(InvalidArgument, calculate_height, "50%", "asfasdds")
height, max_height = calculate_height(None, "80%")
self.assertEqual(height, None)
self.assertEqual(max_height, 62)
height, max_height = calculate_height("1%", None)
self.assertEqual(height, 1)
def test_style(self):
style = get_style()
self.assertEqual(
style,
InquirerPyStyle(
{
"questionmark": "#e5c07b",
"answer": "#61afef",
"input": "#98c379",
"question": "",
"instruction": "",
"pointer": "#61afef",
"checkbox": "#98c379",
"separator": "",
"skipped": "#5c6370",
"marker": "#e5c07b",
"validator": "",
"fuzzy_prompt": "#c678dd",
"fuzzy_info": "#56b6c2",
"frame.border": "#4b5263",
"fuzzy_match": "#c678dd",
},
),
)
os.environ["INQUIRERPY_STYLE_QUESTIONMARK"] = "#000000"
os.environ["INQUIRERPY_STYLE_ANSWER"] = "#111111"
os.environ["INQUIRERPY_STYLE_QUESTION"] = "#222222"
os.environ["INQUIRERPY_STYLE_INSTRUCTION"] = "#333333"
os.environ["INQUIRERPY_STYLE_INPUT"] = "#444444"
os.environ["INQUIRERPY_STYLE_POINTER"] = "#555555"
os.environ["INQUIRERPY_STYLE_CHECKBOX"] = "#66666"
os.environ["INQUIRERPY_STYLE_SEPARATOR"] = "#777777"
os.environ["INQUIRERPY_STYLE_SKIPPED"] = "#888888"
os.environ["INQUIRERPY_STYLE_FUZZY_PROMPT"] = "#999999"
os.environ["INQUIRERPY_STYLE_FUZZY_INFO"] = "#aaaaaa"
os.environ["INQUIRERPY_STYLE_MARKER"] = "#bbbbbb"
os.environ["INQUIRERPY_STYLE_FUZZY_BORDER"] = "#cccccc"
os.environ["INQUIRERPY_STYLE_FUZZY_MATCH"] = "#dddddd"
os.environ["INQUIRERPY_STYLE_VALIDATOR"] = "#dddddd"
style = get_style()
self.assertEqual(
style,
InquirerPyStyle(
{
"questionmark": "#000000",
"answer": "#111111",
"input": "#444444",
"question": "#222222",
"instruction": "#333333",
"pointer": "#555555",
"checkbox": "#66666",
"separator": "#777777",
"skipped": "#888888",
"fuzzy_prompt": "#999999",
"fuzzy_info": "#aaaaaa",
"marker": "#bbbbbb",
"validation-toolbar": "#dddddd",
"fuzzy_match": "#dddddd",
"frame.border": "#cccccc",
},
),
)
def test_format_style(self):
style = get_style(
{
"questionmark": "#000000",
"answer": "#111111",
"input": "#444444",
"question": "#222222",
"instruction": "#333333",
"pointer": "#555555",
"checkbox": "#66666",
"separator": "#777777",
"skipped": "#888888",
"fuzzy_prompt": "#999999",
"fuzzy_info": "#aaaaaa",
"marker": "#bbbbbb",
"validator": "#dddddd",
"fuzzy_match": "#dddddd",
"fuzzy_border": "#cccccc",
}
)
self.assertEqual(
style,
InquirerPyStyle(
{
"questionmark": "#000000",
"answer": "#111111",
"input": "#444444",
"question": "#222222",
"instruction": "#333333",
"pointer": "#555555",
"checkbox": "#66666",
"separator": "#777777",
"skipped": "#888888",
"fuzzy_prompt": "#999999",
"fuzzy_info": "#aaaaaa",
"marker": "#bbbbbb",
"validation-toolbar": "#dddddd",
"fuzzy_match": "#dddddd",
"frame.border": "#cccccc",
},
),
)
@patch("InquirerPy.utils.print_formatted_text")
@patch("InquirerPy.utils.run_in_terminal")
@patch.object(Application, "is_running", new_callable=PropertyMock)
def test_color_print(self, mocked_running, mocked_term, mocked_print):
mocked_running.return_value = True
color_print([("class:aa", "haha")], style={"aa": "#ffffff"})
mocked_term.assert_called_once()
mocked_term.reset_mock()
mocked_running.return_value = False
color_print([("class:aa", "haha")], style={"aa": "#ffffff"})
mocked_term.assert_not_called()
mocked_print.assert_called_once()
```
|
{
"source": "jfilliben/compare-ios-lists",
"score": 2
}
|
#### File: jfilliben/compare-ios-lists/compare_access_lists.py
```python
from contextlib import contextmanager
import netmiko
from netmiko import ConnectHandler
from netmiko.ssh_exception import NetMikoTimeoutException, NetMikoAuthenticationException
import getpass
import argparse
#
#use variables for constants
#
def build_router_dict(router_name, ssh_username, ssh_password, global_verbose):
#
# Builds dictionary to be passed to netmiko
#
# detect IOS type or read it from somewhere?
#
routerDict = {
'device_type': 'cisco_ios',
'ip': router_name,
'username': ssh_username,
'password': <PASSWORD>,
'verbose': global_verbose,
# 'global_delay_factor': 3,
}
return routerDict
@contextmanager
def ssh_manager(net_device):
'''
args -> network device mappings
returns -> ssh connection ready to be used
'''
try:
SSHClient = netmiko.ssh_dispatcher(
device_type=net_device["device_type"])
try:
conn = SSHClient(**net_device)
connected = True
except (NetMikoTimeoutException, NetMikoAuthenticationException) as e:
# if routerDict.verbose:
# print("could not connect to {}, due to {}".format(
# net_device["ip"], e))
connected = False
except (NetMikoTimeoutException, NetMikoAuthenticationException) as e:
# if routerDict.verbose:
# print("could not connect to {}, due to {}".format(
# net_device["ip"], e))
connected = False
try:
if connected:
yield conn
else:
yield False
finally:
if connected:
conn.disconnect()
#
def get_access_lists(routerDict, acl_list, global_verbose):
#
# Returns dictionary of prefix-lists; each dict item is a list of ACL lines
#
temp_return_lists = {}
return_lists = {}
with ssh_manager(routerDict) as netConnect:
for acl_name in acl_list:
command = "show ip access-list %s" % (acl_name)
try:
output = netConnect.send_command(command)
except Exception as e:
print("Encountered a non setup/teardown error", e)
return {}
if global_verbose: print "%s" % output
if not output:
print "'%s' output empty... too slow?" % (command)
temp_return_lists[acl_name] = output.splitlines()
# Remove "(xx matches)" for proper comparison
for acl_name in temp_return_lists:
return_lists[acl_name] = []
for x in temp_return_lists[acl_name]:
temp_line = x.split('(')[0]
while temp_line.endswith(" "):
temp_line = temp_line[:-1]
return_lists[acl_name].append(temp_line)
return return_lists
def parse_args():
parser = argparse.ArgumentParser(
description = 'verifies access-lists for 3rd parties')
parser.add_argument('--verbose', action='store_true',
help='provide additional output for verification')
parser.add_argument('--username', help='username for SSH connections')
parser.add_argument('--password', help='password for SSH username')
parser.add_argument('--routerfile', help='source file for list of routers',
required = True)
parser.add_argument('--accesslistfile', help='source file for list of access-list to check against routers', required = True)
args = parser.parse_args()
if args.verbose:
global_verbose = True
else:
global_verbose = False
if args.username:
ssh_username = args.username
else:
ssh_username = raw_input("Enter Username> ")
if args.password:
ssh_password = args.password
else:
ssh_password = getpass.getpass("Enter Password> ")
try:
with open(args.routerfile) as f:
router_list = f.read().splitlines()
except:
quit("router file cannot be found")
router_list = [x for x in router_list if x[0] != "#" and x[0] != " "]
try:
with open(args.accesslistfile) as f:
acl_list = f.read().splitlines()
except:
quit("router file cannot be found")
acl_list = [x for x in acl_list if x[0] != "#" and x[0] != " "]
return global_verbose, ssh_username, ssh_password, router_list, acl_list
#
# MAIN
#
# Handle arguments
def main():
# Get arguments / global variables
global_verbose, ssh_username, ssh_password, router_list, acl_list = parse_args()
#
# Get output
outputDict = {}
for router in router_list:
routerDict = build_router_dict(router, ssh_username, ssh_password, global_verbose)
outputDict[router] = get_access_lists(routerDict, acl_list, global_verbose)
#
# Remove blank lines
#for router in outputDict:
# for output in outputDict:
# outputDict[router][output].remove("")
#
# Check for inconsistencies between routers
for acl_name in outputDict[router_list[0]].iterkeys():
for router in outputDict:
missing = list(set(outputDict[router_list[0]][acl_name]) - set(outputDict[router][acl_name]))
extra = list(set(outputDict[router][acl_name]) - set(outputDict[router_list[0]][acl_name]))
print "Router %s: %s" % (router, acl_name)
if not(missing) and not(extra):
print " access-list is correct"
if missing:
missing.sort()
print "Missing Entries:"
for entry in missing:
print entry
if extra:
extra.sort()
print "Extra Entries:"
for entry in extra:
print entry
#
# Check for duplicated accesses between two ACLs in a single router
# for router in outputDict:
# list1 = []
# for x in range(2, len(outputDict[router][0])):
# list1.append(outputDict[router][0][x].split()[3])
# list2 = []
# for x in range(2, len(outputDict[router][1])):
# list2.append(outputDict[router][1][x].split()[3])
# dup_entries = list(set(list1).intersection(list2))
# print "Router %s" % (router)
# if dup_entries:
# dup_entries.sort()
# print "Duplicate Entries:"
# for entry in dup_entries:
# print entry
# else:
# print " No duaclicates"
# __main__
if __name__ == '__main__':
main()
```
|
{
"source": "jfillmore/resht",
"score": 3
}
|
#### File: resht/resht/dbg.py
```python
import inspect
import json
import os
import sys
import time
import traceback
import typing
dark_colors = {
'str': '0;37',
'bool': '1;36',
'int': '0;32',
'float': '1;32',
'NoneType': '0;36',
'object': '0;36',
'instance': '0;36',
'module': '0;36',
'classobj': '0;36',
'builtin_function_or_method': '0;36',
'ArgSpec': '0:36:40',
'list': ['1;37', '1;33', '0;33', '1;31', '0;31'],
'tuple': ['1;37', '1;33', '0;33', '1;31', '0;31'],
'dict': ['1;37', '1;33', '0;33', '1;31', '0;31'],
'bullet': '1;30',
'seperator': '1;30'
}
light_colors = {
'str': '1;30',
'bool': '0;36',
'int': '1;31',
'float': '1;31',
'NoneType': '0;36',
'object': '0;30',
'instance': '0;30',
'module': '0;30',
'classobj': '0;30',
'builtin_function_or_method': '0;30',
'ArgSpec': '0:30:40',
'list': ['0;00', '0;34', '0;35', '0;31'],
'tuple': ['0;00', '0;34', '0;35', '0;31'],
'dict': ['0;00', '0;34', '0;35', '0;31'],
'bullet': '1;30',
'seperator': '1;30'
}
class CodeFrame(typing.NamedTuple):
line: int
where: str
depth: int
ours: bool # e.g. a module or something of our own?
class CodeTrace(typing.NamedTuple):
frames: typing.List[CodeFrame]
error: str
error_type: str
@classmethod
def from_exception(cls, ex: Exception):
tb = ex.__traceback__
cwd = os.getcwd()
frames = []
for i, frame in enumerate(traceback.extract_tb(tb)):
code_line = frame.name + ' - ' + frame.filename[len(cwd) + 1:]
frames.append(CodeFrame(
depth=i,
line=frame.line,
ours=frame.filename.startswith(cwd),
where=f'{code_line}:{frame.lineno}',
))
return cls(
error=str(ex),
error_type=type(ex).__name__,
frames=frames,
)
def get_obj_info(obj, include_private=False):
obj_info = {
'type': type(obj).__name__,
'callable': callable(obj),
'value': str(obj),
'repr': repr(obj),
'description': str(getattr(obj, '__doc__', '')).strip()
}
# take a look at what it contains and build up description of what we've got
if obj_info['type'] == 'function':
obj_info['arg_spec'] = inspect.getargspec(obj)
elif not obj_info['type'] in ('str', 'int', 'float', 'bool', 'NoneType', 'ArgSpec'):
for key in dir(obj):
if key.startswith('__') and not include_private:
continue
item = getattr(obj, key)
if inspect.ismethod(item):
if not 'methods' in obj_info:
obj_info['methods'] = {}
obj_info['methods'][key] = {
'description': str(item.__doc__)[0:64].strip(),
'arg_spec': inspect.getargspec(item)
}
elif inspect.ismodule(item):
if not 'modules' in obj_info:
obj_info['modules'] = {}
obj_info['modules'][key] = str(item.__doc__)[0:64].strip()
elif inspect.isclass(item):
if not 'classes' in obj_info:
obj_info['classes'] = {}
obj_info['classes'][key] = str(item.__doc__)[0:64].strip()
else:
if not 'properties' in obj_info:
obj_info['properties'] = {}
obj_info['properties'][key] = obj2str(item, short_form=True)
return obj_info
def print_tb(exception=None):
if exception:
frames = []
for i, frame in enumerate(traceback.extract_tb(tb)):
where = frame.name + ' - ' + frame.filename[len(cwd) + 1:]
frames.append({
'line': frame.line,
'where': f'{where}:{frame.lineno}',
'depth': i,
})
else:
tb = traceback.extract_stack()
print('\n'.join([
"\tTraceback (most recent call on bottom):",
'\n'.join(['\t\t%s:%i, method "%s"\n\t\t\tLine: %s' % t for t in tb])
]))
def log(
msg, color='1;34', data=None, data_color='0;32', data_inline=False,
symbol='#', no_color=False,
):
if no_color:
color = None
data_color = None
color_alt = None
else:
color_parts = color.split(';', 1)
if len(color_parts) > 1:
is_light = bool(int(color_parts[0]))
hue = color_parts[1]
else:
is_light = False
hue = color_parts[0]
color_alt = str(int(not is_light)) + ';' + hue
log_str = (shell_color(symbol, color_alt) + ' ') if symbol else ''
log_str += shell_color(msg, color) + ('' if data_inline else '\n')
sys.stderr.write(log_str)
if data is not None:
if isinstance(data, bytes):
data = data.decode('utf-8')
data_str = data if isinstance(data, str) else json.dumps(data, indent=4)
sys.stderr.write(shell_color(data_str, data_color) + '\n')
def shell_color(obj, color=None, skip_color=False):
if color and not skip_color:
return f'\033[{color}m{str(obj)}\033[0;0m'
return str(obj)
def obj2str(
obj, depth=0, color=True, indent_char=' ', indent_size=4, inline=True,
short_form=False, invert_color=False
):
"""
Returns a formatted string, optionally with color coding
"""
palette = light_colors if invert_color else dark_colors
def rdump(obj, depth=0, indent_size=4, inline=False, short_form=False):
if short_form:
return str(obj)[0:80 - (depth * indent_size)]
obj_info = get_obj_info(obj)
# indent ourselves
dump = depth * (indent_size * indent_char)
# see what we've got and recurse as needed
if obj_info['type'] == 'list':
if not len(obj):
dump += shell_color(' []', palette['object'], skip_color=not color) + '\n'
else:
skip_next_indent = True
for i in range(0, len(obj)):
item = obj[i]
item_info = get_obj_info(item)
# handy any indentation we may need to do
if skip_next_indent:
skip_next_indent = False
else:
dump += depth * (indent_size * indent_char)
# add in the key, cycling through the available colors based on depth
dump += shell_color(
i,
palette[obj_info['type']][(depth) % (len(palette[obj_info['type']]))],
skip_color=not color,
)
# format it depending on whether we've nested list with any empty items
if item_info['type'] in ('dict', 'tuple', 'list'):
if not len(item):
dump += rdump(item, 0, indent_size, True)
else:
dump += '\n' + rdump(item, depth + 1, indent_size, True)
else:
dump += rdump(item, 1, 1)
elif obj_info['type'] == 'dict':
if not len(obj):
dump += shell_color(' {}', palette['object'], skip_color=not color) + '\n'
else:
skip_next_indent = True
for key in obj:
item = obj[key]
item_info = get_obj_info(item)
# handy any indentation we may need to do
if skip_next_indent:
skip_next_indent = False
else:
dump += depth * (indent_size * indent_char)
# add in the key, cycling through the available colors based on depth
dump += shell_color(key, palette[obj_info['type']][(depth) % (len(palette[obj_info['type']]))], skip_color=not color)
# add in a bullet
dump += shell_color(':', palette['bullet'], skip_color=not color)
# format it depending on whether we've nested list with any empty items
if item_info['type'] in ('dict', 'tuple', 'list'):
if not len(item):
dump += rdump(item, 0, indent_size, True)
else:
dump += '\n' + rdump(item, depth + 1, indent_size, True)
if item_info['type'] == 'tuple':
dump += '\n'
else:
dump += rdump(item, 1, 1)
elif obj_info['type'] == 'tuple':
if not len(obj):
dump += shell_color(' ()', palette['object'], skip_color=not color)
else:
dump += shell_color('(', palette['bullet'], skip_color=not color)
dump += ', '.join([str(item)[0:32] for item in obj if item != ()])
dump += shell_color(')', palette['bullet'], skip_color=not color)
elif obj_info['type'] == 'str':
dump += shell_color(obj, palette[obj_info['type']], skip_color=not color)
elif obj_info['type'] == 'bool':
dump += shell_color(obj, palette[obj_info['type']], skip_color=not color)
elif obj_info['type'] == 'NoneType':
dump += shell_color('(none/null)', palette[obj_info['type']], skip_color=not color)
elif obj_info['type'] == 'int':
dump += shell_color(obj, palette[obj_info['type']], skip_color=not color)
elif obj_info['type'] == 'float':
dump += shell_color(obj, palette[obj_info['type']], skip_color=not color)
elif obj_info['type'] == 'object':
dump += shell_color('(object)', palette[obj_info['type']], skip_color=not color)
elif obj_info['type'] == 'instance':
dump += rdump(obj_info, depth)
elif obj_info['type'] == 'module':
dump += rdump(obj_info, depth)
elif obj_info['type'] == 'function':
dump += rdump(obj_info, depth)
elif obj_info['type'] == 'classobj':
dump += rdump(obj_info, depth)
elif obj_info['type'] == 'builtin_function_or_method':
dump += rdump(obj_info, depth)
elif obj_info['type'] == 'ArgSpec':
dump += '\n' + rdump({
'args': obj.args,
'varargs': obj.varargs,
'keywords': obj.keywords,
'defaults': obj.defaults,
}, depth + 1, inline=True)
else:
dump += rdump(obj_info, depth)
if not inline:
dump += '\n'
return dump # hack hack hack!
return rdump(obj, depth, indent_size, inline, short_form)
def pretty_print(
obj, depth=0, color=True, indent_char=' ', indent_size=4,
stream=sys.stdout, invert_color=False
):
"""
Pretty-prints the contents of the list, tupple, sequence, etc.
"""
output = obj2str(
obj, depth, color, indent_char, indent_size, inline=True,
invert_color=invert_color
)
if not output.endswith("\n"):
output = output + "\n"
stream.write(output)
pp = pretty_print
```
#### File: resht/resht/usage.py
```python
from typing import NamedTuple
class UsageHint(NamedTuple):
overview: str
examples: str
arguments: str
params: str
data_paths: str
shell: str
def help(self):
return ''.join([
self.overview,
self.examples,
self.arguments,
self.params,
self.data_paths,
self.shell,
])
usage_overview = """
USAGE: resht HTTP-VERB PATH [API_PARAMS] [ARGUMENTS]
JSON-oriented REST API client with shell mode for session-based flows.
"""
usage_arguments = """
ARGUMENTS
---------------------------------------------------------------------------
REQUEST ARGS
-B, --basic USER:PASS HTTP basic authentication.
-d, --data DATA Set request body to the string given; cannot be
used with API params or --json
-H, --header HEADER HTTP header (e.g. 'Foo: bar', repeatable)
-I, --insecure Do not valid SSL certificates (dangerous!)
-Q, --query QUERY_DATA Extra query data to merge
(e.g. "foo=bar&food=yummy", repeatable).
-f, --form Override default of sending JSON data
-j, --json STRING Merge JSON-encoded string into API params (repeatable)
-t, --timeout SECONDS Set request timeout in seconds (0=unlimited; default=30)
-u, --url URL Base URL for API requests (default: https://localhost/).
MISC ARGS
-h, --help This information.
-s, --shell Start shell mode; run initial API
-v, --verbose Print verbose debugging info to stderr.
OUTPUT ARGS:
-c, --color Color formatted JSON responses (default=True).
-C, --no-color Do not color formatted JSON responses.
-r, --raw Don't format response data; return raw response.
-x, --extract PATH Parse JSON to return specific data; may be repeated.
-X, --exclude PATH Exclude specified path from JSON data; may be repeated.
> FILE Write API response to specified file.
>> FILE Append API response to specified file.
"""
# TODO: foo.bar[]=1 foo.bar[]= for arrays?
usage_params = \
"""
API PARAMS
---------------------------------------------------------------------------
API parameters are defined through a terse dotted notation making nested
objects easy to quickly define. Non-boolean values are assigned using
the delimiter "=" (string) or ":=" (JSON encoded).
Arrays must be created using ":=" or using "-d|--data".
BOOLEANS:
foo {"foo": true}
foo.bar {"foo": {"bar": true}}
^foo {"foo": false}
!foo {"foo": false}
STRINGS:
foo=bar {"foo": "bar"}
foo.bar=3 foo.bard=abc {"foo": {"bar": "3", "bard": "abc"}}
OTHER (RAW JSON):
foo:='{"bar":3}' {"foo": {"bar": 3}}
foo.bar:=3.14 {"foo": {"bar": 3.14}}
"""
usage_data_paths = \
"""
JSON PATHS (-x|--extract, -X|--exclude)
---------------------------------------------------------------------------
The JSON data can be filtered based on index, key matches, ranges, etc.
Arrays:
By Index:
- 'foo/0', 'foo/2', 'foo/-1' (last item)
By Range:
- 'foo/:' or 'foo/*' (all items within the array),
- 'foo/2:', 'foo/:2', 'foo/1:5', 'foo/-2:' (last 2),
- 'foo/:-2' (all but last two),
- 'foo/1:-3' (between first and up until 3rd to last)
Dictionaries:
Regular Expressions:
- 'foo/b..?r' = foo/bar, foo/beer
- 'foo/bar/.*[pP]assw(or)?d' == anything within foo/bar like a password
"""
usage_shell = \
"""
SHELL COMMANDS
---------------------------------------------------------------------------
HTTP_VERB URL [PARAMS] Perform request
cd Change the base URL (e.g. "cd customers/8; cd ../9").
help This information.
quit Adios! (quit shell).
headers [key=val, -key] List, set, or clear headers.
set [PARAMS] List or set configuration options.
sh EXPR Run a BASH shell command.
"""
# TODO:
# !python expr Execute a python expression
# \HTTP_VERB ... (no extra stuff for this one command)
# env -i or such to clear
# FIXME: these suck
usage_examples = \
"""
EXAMPLES:
---------------------------------------------------------------------------
$ resht -u https://example.com/api get foo x=1
{...}
$ resht -u https://example.com/api/
> get site/foo.com -v
> post site -j domain=foo.com
> cd site/foo.com
> get ./
"""
hints = UsageHint(
usage_overview,
usage_examples,
usage_arguments,
usage_params,
usage_data_paths,
usage_shell,
)
```
|
{
"source": "jfillmore/rest-cli",
"score": 3
}
|
#### File: rest-cli/rest_cli/util.py
```python
import re
def get_args(my_args=None, args=None, merge=False):
'''Returns a dict of items in args found in my_args.'''
args = args if isinstance(args, dict) else {}
my_args = my_args if isinstance(my_args, dict) else {}
for arg in args:
value = args[arg]
if arg in my_args or merge:
my_args[arg] = value
return my_args
def pyv(version):
'''Returns whether or not the current interpreter is the version specified or newer.'''
# e.g. >>> sys.version_info
# (2, 6, 4, 'final', 0)
import sys
i = 0
for num in version:
if num > sys.version_info[i]:
return False
i += 1
return True
def pretty_path(path, absolute=False, no_trailing=True):
if no_trailing:
path = path.rstrip('/')
if absolute:
path = '/' + path
regex = re.compile(r'/+')
path = regex.sub('/', path)
return path
def is_number(s):
try:
float(s)
return True
except ValueError:
return False
```
|
{
"source": "jfilter/foia-bot",
"score": 3
}
|
#### File: source/markov_chains/util.py
```python
import json
def save_json(filename, data):
"""save json file"""
with open(filename, 'w') as outfile:
json.dump(data, outfile)
def read_json(filename):
"""open json file"""
data = None
with open(filename, 'r') as infile:
data = json.load(infile)
return data
```
#### File: foia-bot/source/tweet.py
```python
import time
import datetime
import twitter
from markov_chains import german_text
from config import config_no, config_yes
MAX_TWEET_LENGTH = 280
greeting = ' Sehr geehrte/r Anstragssteller/in.'
ending = ' MfG'
num_tweets = 3
class FoiaBot:
def __init__(self, config):
self.api = twitter.Api(consumer_key=config["consumer_key"],
consumer_secret=config["consumer_secret"],
access_token_key=config["access_token"],
access_token_secret=config["access_token_secret"], sleep_on_rate_limit=True)
self.screen_name = config["screen_name"]
self.model = german_text.setup_model(config["model_path"])
self.hour_to_tweet = config["hour_to_tweet"]
def get_favorites(self):
favorites = self.api.GetFavorites(
screen_name=self.screen_name, count=200)
print(favorites)
fav_set = set([f.id for f in favorites])
return fav_set
def get_status_to_work_on(self):
favorites = self.get_favorites()
status_list = self.api.GetMentions(count=200, trim_user=True,
contributor_details=False, include_entities=False)
for status in status_list:
print(status)
if status.id in favorites:
continue
if status.in_reply_to_status_id is not None:
continue
if not status.text.startswith('@' + self.screen_name):
continue
self.post_replies(status)
def post_replies(self, status):
tweets = self.create_tweets()
print(tweets)
success = True
reply_to_status_id = status.id
for tweet in tweets:
response = self.api.PostUpdate(tweet, in_reply_to_status_id=reply_to_status_id, auto_populate_reply_metadata=True,
exclude_reply_user_ids=False, trim_user=True, verify_status_length=False)
if response is None:
success = False
break
else:
reply_to_status_id = response.id
if success:
self.api.CreateFavorite(status=status)
def generate_sentence(self, tweet_text, chars_left, set_limit=False):
max_length = 150
if set_limit:
max_length = chars_left
new_sent = self.model.make_short_sentence(max_length, tries=100)
if new_sent is not None and len(new_sent) < chars_left:
tweet_text += ' ' + new_sent
return tweet_text
# https://stackoverflow.com/questions/7703865/going-from-twitter-date-to-python-datetime-date
def get_date_from_twitter_string(self, created_at):
x = time.strptime(created_at, '%a %b %d %H:%M:%S +0000 %Y')
return datetime.datetime.fromtimestamp(time.mktime(x))
def tweet_once_a_day(self):
now = datetime.datetime.now()
print(now.hour)
if now.hour == self.hour_to_tweet:
last_status_list = self.api.GetUserTimeline(screen_name=self.screen_name, count=1,
include_rts=False, trim_user=True, exclude_replies=True)
print(last_status_list)
if last_status_list is None:
return
if len(last_status_list) == 0:
self.post_single_tweet()
if len(last_status_list) == 1:
last_status = last_status_list[0]
created_at_date = self.get_date_from_twitter_string(
last_status.created_at)
time_diff = now - created_at_date
print('time_diff', time_diff)
time_diff_hours = time_diff.seconds / 3600 + time_diff.days * 24
print(time_diff_hours)
if time_diff_hours > 20: # something is broken with the date but whatever
self.post_single_tweet()
def post_single_tweet(self):
tweet_text = self.generate_single_tweet_text()
response = self.api.PostUpdate(tweet_text, verify_status_length=False)
def generate_single_tweet_text(self):
tweet_text = ""
while True:
chars_left = MAX_TWEET_LENGTH - len(tweet_text)
chars_left -= 1 # for the space
if chars_left < 20:
break
if chars_left < 70:
tweet_text = self.generate_sentence(
tweet_text, chars_left, True)
else:
tweet_text = self.generate_sentence(
tweet_text, chars_left)
return tweet_text
def create_tweets(self):
tweets = []
for i in range(num_tweets):
tweet_text = f'{i + 1}/{num_tweets}'
if i == 0:
tweet_text += greeting
while True:
chars_left = MAX_TWEET_LENGTH - \
len(tweet_text) - 1 # because of space
# ensure space for the ending
if i + 1 == num_tweets:
chars_left -= len(ending)
if chars_left < 20:
# at ending
if i + 1 == num_tweets:
tweet_text += ending
break
if chars_left < 70:
tweet_text = self.generate_sentence(
tweet_text, chars_left, True)
else:
tweet_text = self.generate_sentence(
tweet_text, chars_left)
tweets.append(tweet_text)
return tweets
def run(self):
self.get_status_to_work_on()
def main():
print('main called')
no_bot = FoiaBot(config_no)
print('after setting up no bot')
yes_bot = FoiaBot(config_yes)
print('after setting up yes bot')
no_bot.run()
print('after running no bot')
yes_bot.run()
print('after running yes bot')
no_bot.tweet_once_a_day()
yes_bot.tweet_once_a_day()
print('after tweet once a day')
def lambda_handler(event, context):
print('handler called')
main()
print('handler about to finish')
# if __name__ == '__main__':
# main()
```
|
{
"source": "jfilter/geocode-lobbycontrol-parteispendendatenbank",
"score": 3
}
|
#### File: jfilter/geocode-lobbycontrol-parteispendendatenbank/fetch_bundesland_to_ort.py
```python
import csv
from time import sleep
import requests
STATES = [
"Sachsen-Anhalt",
"Niedersachsen",
"Sachsen",
"Bayern",
"Mecklenburg-Vorpommern",
"Hamburg",
"Schleswig-Holstein",
"Rheinland-Pfalz",
"Hessen",
"Baden-Württemberg",
"Thüringen",
"Saarland",
"Bremen",
"Brandenburg",
"Nordrhein-Westfalen",
"Berlin"
]
INPUT_PATH = "raw/original_data.csv"
OUTPUT_PATH = "intermediate/ort_bundesland.csv"
GEOCODE_API = "https://nominatim.openstreetmap.org/search?format=json&country=germany&accept-language=de&q="
def geocode_city(city):
res = requests.get(GEOCODE_API + city)
res_json = res.json()
for result in res_json:
# The display name string contains the Bundesland but the position in
# the string varies among the data points. Thus, go over each each
# possible candidate.
candidates = result["display_name"].split(",")
i = 0
while i < len(candidates):
trimmed = candidates[i].strip()
if trimmed in STATES: # excact case sensitive match
return trimmed
i += 1
return None
# we only need to look up each city once
city_names = set()
# read in data
with open(INPUT_PATH, newline='') as csvfile:
spamreader = csv.reader(csvfile)
for row in spamreader:
city_names.add(row[6])
print(city_names)
# main work happens here
data = []
for city in city_names:
sleep(0.1)
state = geocode_city(city)
print(city)
print(state)
data.append({'Ort': city, 'Bundesland': state})
# persist to CSV
with open(OUTPUT_PATH, 'w', newline='') as csvfile:
fieldnames = ['Ort', 'Bundesland']
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
writer.writerows(data)
```
|
{
"source": "jfilter/german-lemmatizer",
"score": 3
}
|
#### File: german-lemmatizer/german_lemmatizer/lemmatize.py
```python
import tempfile
from pathlib import Path
import docker
from joblib import Parallel, delayed
from tqdm import tqdm
docker_image_tag = "filter/german-lemmatizer:0.5.0"
# https://stackoverflow.com/a/312464/4028896
def to_chunks(l, n):
"""Yield successive n-sized chunks from l."""
for i in range(0, len(l), n):
yield l[i : i + n]
def escape_text(text):
return text.replace("\n", "\\n")
def unescape_text(text):
return text.replace("\\n", "\n")
def process_chunk(c, i, working_dir, escape, remove_stop):
with tempfile.TemporaryDirectory(
dir=working_dir, suffix=str(i) + "_input"
) as input_folder:
with tempfile.TemporaryDirectory(
dir=working_dir, suffix=str(i) + "_output"
) as output_folder:
client = docker.from_env()
if escape:
c = [escape_text(txt) for txt in c]
Path(input_folder + "/data.txt").write_text("\n".join(c))
# we need absolute path
input_folder = str(Path(input_folder + "/").resolve())
output_folder = str(Path(output_folder + "/").resolve())
commands = ["--line"]
if escape:
commands.append("--escape")
if remove_stop:
commands.append("--remove_stop")
while True:
try:
client.containers.run(
docker_image_tag,
" ".join(commands),
volumes={
input_folder: {"bind": "/input", "mode": "ro"},
output_folder: {"bind": "/output", "mode": "rw"},
},
)
break
except Exception as e:
print("failed, next try! " + str(e))
with open(Path(output_folder + "/data.txt")) as output_file:
lines = output_file.readlines()
lines = [l.strip() for l in lines]
if escape:
lines = [unescape_text(txt) for txt in lines]
return lines
def lemmatize(
texts, chunk_size=10000, working_dir=".", escape=False, n_jobs=1, remove_stop=False
):
# pull image if not present
client = docker.from_env()
images_list = sum([l.tags for l in client.images.list()], [])
if not docker_image_tag in images_list:
client.images.pull(docker_image_tag)
chunks = to_chunks(texts, chunk_size)
if n_jobs > 0:
results = Parallel(n_jobs=n_jobs, backend="multiprocessing")(
delayed(process_chunk)(c, i, working_dir, escape, remove_stop)
for i, c in tqdm(enumerate(chunks), total=(len(texts) // chunk_size) + 1)
)
else:
results = [
process_chunk(c, i, working_dir, escape, remove_stop)
for i, c in tqdm(enumerate(chunks), total=(len(texts) // chunk_size) + 1)
]
for r_chunk in results:
for r in r_chunk:
yield r
```
#### File: german-lemmatizer/tests/test_lemma.py
```python
import os
import pathlib
import shutil
import pytest
import german_lemmatizer
def test_lemma():
res = german_lemmatizer.lemmatize(["Johannes war einer von vielen guten Schülern."])
assert list(res) == ["Johannes sein einer von vielen gut Schüler."]
def test_lemma_mass():
res = german_lemmatizer.lemmatize(
["Johannes war einer von vielen guten Schülern."] * 1000,
chunk_size=400,
n_jobs=2,
)
assert list(res) == ["Johannes sein einer von vielen gut Schüler."] * 1000
def test_lemma_escape():
res = german_lemmatizer.lemmatize(
[
"Johannes war einer von vielen guten Schülern.",
"""Peter war ein
Idiot.""",
],
escape=True,
)
assert list(res) == [
"Johannes sein einer von vielen gut Schüler.",
"""Peter sein ein
Idiot.""",
]
def test_lemma_stop():
res = german_lemmatizer.lemmatize(
["Johannes war einer von vielen guten Schülern."], remove_stop=True
)
assert list(res) == ["Johannes gut Schüler."]
```
|
{
"source": "jfilter/german-preprocessing",
"score": 3
}
|
#### File: german-preprocessing/german/preprocessing.py
```python
import os
from tqdm import tqdm
import cleantext
from german_lemmatizer import lemmatize
from joblib import Parallel, delayed
def clean(x, **kwargs):
return cleantext.clean(x, lang="de", **kwargs)
def first_clean(x):
return clean(x, lower=False, no_line_breaks=True)
def second_clean(x):
return clean(
x,
fix_unicode=False,
to_ascii=False,
no_urls=True,
no_emails=True,
no_digits=True,
no_punct=True,
)
def preprocess(texts, n_jobs=None, remove_stop=True):
if n_jobs is None:
n_jobs = os.cpu_count()
texts = Parallel(n_jobs=n_jobs)(delayed(first_clean)(row) for row in tqdm(texts))
texts = lemmatize(texts, n_jobs=n_jobs, remove_stop=remove_stop)
texts = Parallel(n_jobs=n_jobs)(delayed(second_clean)(row) for row in tqdm(texts))
return texts
```
|
{
"source": "jfilter/hyperhyper",
"score": 3
}
|
#### File: hyperhyper/hyperhyper/bunch.py
```python
import logging
from pathlib import Path
from timeit import default_timer as timer
import dataset
import numpy as np
from gensim.models.keyedvectors import WordEmbeddingsKeyedVectors
from . import evaluation, pair_counts, pmi, svd
from .corpus import Corpus
from .experiment import record, results_from_db
from .utils import (delete_folder, load_arrays, load_matrix, save_arrays,
save_matrix)
logger = logging.getLogger(__name__)
class Bunch:
def __init__(
self, path, corpus=None, force_overwrite=False, text_chunk_size=100000
):
self.db = None
self.path = Path(path)
if force_overwrite and self.path.exists():
delete_folder(self.path)
if not corpus is None and not force_overwrite:
if Path(self.path / "corpus.pkl").is_file():
raise ValueError(
"There is already another corpus file saved. Set `force_overwrite` to True if you want to override it."
)
if corpus is None:
self.corpus = Corpus.load(str(self.path / "corpus.pkl"))
else:
self.path.mkdir(parents=True, exist_ok=True)
self.corpus = corpus
self.corpus.texts_to_file(self.path / "texts", text_chunk_size)
self.corpus.save(str(self.path / "corpus.pkl"))
def get_db(self):
"""
Connecting to a SQLite database.
"""
if self.db is None:
self.db = dataset.connect(f"sqlite:///{self.path}/results.db")
return self.db
def dict_to_path(self, folder, dict):
"""
Return a file path for an embedding based on parameters.
"""
# cast integer floats to ints
for k, v in dict.items():
if type(v) is float:
if v.is_integer():
dict[k] = int(v)
filenames = [f"{k}_{v}".lower() for k, v in dict.items()]
filename = "_".join(sorted(filenames))
if len(filename) == 0:
filename = "default"
filename += ".npz"
full_path = self.path / folder / filename
return full_path
def pair_counts(self, **kwargs):
"""
Count pairs.
"""
pair_path = self.dict_to_path("pair_counts", kwargs)
if pair_path.is_file():
try:
logger.info("retrieved already saved pair count")
return load_matrix(pair_path)
except Exception as e:
logger.info(f"creating pair counts, error while loading files: {e}")
print("create new pair counts")
pair_path.parent.mkdir(parents=True, exist_ok=True)
count_matrix = pair_counts.count_pairs(self.corpus, **kwargs)
save_matrix(pair_path, count_matrix)
return count_matrix
def pmi_matrix(self, cds=0.75, pair_args={}, **kwargs):
"""
Create a PMI matrix.
"""
pmi_path = self.dict_to_path("pmi", {"cds": cds, **pair_args})
if pmi_path.is_file():
try:
logger.info("retrieved already saved pmi")
return load_matrix(pmi_path)
except Exception as e:
logger.info(f"creating new pmi, error while loading files: {e}")
print("create new pmi")
counts = self.pair_counts(**pair_args, **kwargs)
start = timer()
pmi_matrix = pmi.calc_pmi(counts, cds)
end = timer()
logger.info("pmi took " + str(round(end - start, 2)) + " seconds")
pmi_path.parent.mkdir(parents=True, exist_ok=True)
save_matrix(pmi_path, pmi_matrix)
logger.info("matrix saved")
return pmi_matrix
@record
def pmi(
self,
neg=1,
cds=0.75,
pair_args={},
keyed_vectors=False,
evaluate=True,
**kwargs,
):
"""
Gets the PMI matrix.
"""
m = self.pmi_matrix(cds, pair_args, **kwargs)
embd = pmi.PPMIEmbedding(m, neg=neg)
if evaluate:
eval_results = self.eval_sim(embd)
if keyed_vectors:
# because of the large dimensions, the matrix will get huge!
return self.to_keyed_vectors(embd.m.todense(), m.shape[0])
if evaluate:
return embd, eval_results
return embd
def svd_matrix(
self, impl, impl_args={}, dim=500, neg=1, cds=0.75, pair_args={}, **kwargs
):
"""
Do the actual SVD computation.
"""
assert impl in ["scipy", "gensim", "scikit", "sparsesvd"]
svd_path = self.dict_to_path(
"svd",
{
"impl": impl,
**impl_args,
"neg": neg,
"cds": cds,
"dim": dim,
**pair_args,
},
)
logger.debug(f"looking up the file: {svd_path}")
if svd_path.is_file():
try:
logger.info("retrieved already saved svd")
return load_arrays(svd_path)
except Exception as e:
logger.info(f"creating new svd, error while loading files: {e}")
print("creating new svd")
m = self.pmi_matrix(cds, pair_args, **kwargs)
m = pmi.PPMIEmbedding(m, neg=neg, normalize=False)
start = timer()
ut, s = svd.calc_svd(m, dim, impl, impl_args)
end = timer()
logger.info("svd took " + str(round((end - start) / 60, 2)) + " minutes")
svd_path.parent.mkdir(parents=True, exist_ok=True)
save_arrays(svd_path, ut, s)
logger.info("svd arrays saved")
return ut, s
@record
def svd(
self,
dim=500,
eig=0,
neg=1,
cds=0.75,
impl="scipy",
impl_args={},
pair_args={},
keyed_vectors=False,
evaluate=True,
**kwargs,
):
"""
Gets and SVD embedding.
"""
ut, s = self.svd_matrix(
impl=impl,
impl_args=impl_args,
dim=dim,
neg=neg,
cds=cds,
pair_args=pair_args,
**kwargs,
)
embedding = svd.SVDEmbedding(ut, s, eig=eig)
if evaluate:
eval_results = self.eval_sim(embedding)
if keyed_vectors:
embedding = self.to_keyed_vectors(embedding.m, dim)
if evaluate:
return embedding, eval_results
return embedding
def to_keyed_vectors(self, embd_matrix, dim, delete_unknown=True):
"""
Transform to gensim's keyed vectors structure for further usage.
https://github.com/RaRe-Technologies/gensim/blob/develop/gensim/models/keyedvectors.py
"""
vectors = WordEmbeddingsKeyedVectors(vector_size=dim)
tokens = self.corpus.vocab.tokens
if delete_unknown:
# delete last row (for <UNK> token)
embd_matrix = np.delete(embd_matrix, (-1), axis=0)
else:
# the last token is the UNK token so append it
tokens.append("<UNK>")
vectors.add(tokens, embd_matrix)
return vectors
def eval_sim(self, embd, **kwargs):
"""
Evaluate the performance on word similarity datasets.
NB: The corpus has to be initialized with the correct language.
"""
return evaluation.eval_similarity(
embd,
self.corpus.vocab.token2id,
self.corpus.preproc_fun,
lang=self.corpus.lang,
**kwargs,
)
def eval_analogy(self, embd, **kwargs):
"""
Evaluate the performance on word analogies datasets.
NB: The corpus has to be initialized with the correct language.
"""
return evaluation.eval_analogies(
embd,
self.corpus.vocab.token2id,
self.corpus.preproc_fun,
lang=self.corpus.lang,
**kwargs,
)
def results(self, **kwargs):
"""
Retrieve evaluation results from the database.
"""
return results_from_db(self.get_db(), **kwargs)
```
#### File: hyperhyper/hyperhyper/preprocessing.py
```python
import os
import re
from gensim.parsing.preprocessing import (preprocess_string,
strip_non_alphanum, strip_tags)
from tqdm import tqdm
from .utils import map_pool
try:
import spacy
except:
spacy = None
def simple_preproc(text):
"""
replace digits with 0 and lowercase text
"""
return re.sub(r"\d", "0", text.lower())
def tokenize_string(text):
"""
tokenize based on whitespaces
"""
CUSTOM_FILTERS = [simple_preproc, strip_tags, strip_non_alphanum]
return preprocess_string(text, CUSTOM_FILTERS)
def tokenize_texts(texts):
"""
tokenize multiple texts (list of texts) based on whitespaces
"""
return [tokenize_string(t) for t in texts]
def tokenize_texts_parallel(texts):
"""
tokenize multiple texts based on whitespaces in parrallel
"""
return map_pool(texts, tokenize_string)
def texts_to_sents(texts, model="en_core_web_sm", remove_stop=True, lemmatize=True):
"""
transform list of texts to list of sents (list of tokens) and apply
simple text preprocessing
"""
texts = [strip_tags(t) for t in texts]
results = []
assert spacy is not None, 'please install spacy, i.e., "pip install spacy"'
try:
nlp = spacy.load(model, disable=["ner"])
except Exception as e:
print(e, "\ntrying to download model...")
os.system("python -m spacy download " + model)
nlp = spacy.load(model, disable=["ner"])
for doc in tqdm(nlp.pipe(texts), total=len(texts), desc="texts to sents"):
for s in doc.sents:
results.append(
[
simple_preproc(
strip_non_alphanum(t.lemma_ if lemmatize else t.text)
)
for t in s
if not any((t.is_punct, t.is_space, remove_stop and t.is_stop))
]
)
return results
```
#### File: hyperhyper/hyperhyper/svd.py
```python
import heapq
import logging
import numpy as np
from gensim.models.lsimodel import stochastic_svd
from scipy.sparse import linalg
logger = logging.getLogger(__name__)
try:
from sparsesvd import sparsesvd
except ImportError:
logger.info("no sparsvd")
try:
from sklearn.utils.extmath import randomized_svd
except ImportError:
logger.info("no sklearn")
def calc_svd(matrix, dim, impl, impl_args):
"""
apply truncated SVD with several implementations
truncated SVD:
sparsesvd: https://pypi.org/project/sparsesvd/
scipy: https://docs.scipy.org/doc/scipy/reference/generated/scipy.linalg.svd.html
randomized truncated SVD:
gensim: https://github.com/RaRe-Technologies/gensim/blob/develop/gensim/models/lsimodel.py
scikit: https://scikit-learn.org/stable/modules/generated/sklearn.utils.extmath.randomized_svd.html
Check out the comparision: https://github.com/jfilter/sparse-svd-benchmark
"""
if impl == "sparsesvd":
# originally used SVD implementation
ut, s, _ = sparsesvd(matrix.m.tocsc(), dim)
# returns in a different format
ut = ut.T
if impl == "scipy":
ut, s, _ = linalg.svds(matrix.m, dim)
# randomized (but fast) truncated SVD
if impl == "gensim":
# better default arguments
args = {"power_iters": 5, "extra_dims": 10, **impl_args}
ut, s = stochastic_svd(matrix.m, dim, matrix.m.shape[0], **args)
if impl == "scikit":
ut, s, _ = randomized_svd(matrix.m, dim, **impl_args)
return ut, s
class SVDEmbedding:
"""
SVD embeddings.
Enables controlling the weighted exponent of the eigenvalue matrix (eig).
"""
def __init__(self, ut, s, normalize=True, eig=0.0):
if eig == 0.0:
self.m = ut
elif eig == 1.0:
self.m = s * ut
else:
self.m = np.power(s, eig) * ut
# not used?
# self.dim = self.m.shape[1]
if normalize:
self.normalize()
def normalize(self):
norm = np.sqrt(np.sum(self.m * self.m, axis=1))
self.m = self.m / norm[:, np.newaxis]
def represent(self, w_idx):
return self.m[w_idx, :]
def similarity(self, w_idx_1, w_idx_2):
"""
Assumes the vectors have been normalized.
"""
return self.represent(w_idx_1).dot(self.represent(w_idx_2))
def most_similar(self, w_idx, n=10):
"""
Assumes the vectors have been normalized.
"""
scores = self.m.dot(self.represent(w_idx))
return heapq.nlargest(n, zip(scores, list(range(len(scores)))))
```
|
{
"source": "jfilter/hyperwords",
"score": 3
}
|
#### File: hyperwords/hyperwords/corpus2svd_params.py
```python
from docopt import docopt
# noinspection PyListCreation
def main():
args = docopt("""
Usage:
corpus2svd.sh [options] <corpus> <output_dir>
Options:
--thr NUM The minimal word count for being in the vocabulary [default: 100]
--win NUM Window size [default: 2]
--pos Positional contexts
--dyn Dynamic context windows
--sub NUM Subsampling threshold [default: 0]
--del Delete out-of-vocabulary and subsampled placeholders
--cds NUM Context distribution smoothing [default: 1.0]
--dim NUM Dimensionality of eigenvectors [default: 500]
--neg NUM Number of negative samples; subtracts its log from PMI [default: 1]
--w+c Use ensemble of word and context vectors
--eig NUM Weighted exponent of the eigenvalue matrix [default: 0.5]
""")
corpus = args['<corpus>']
output_dir = args['<output_dir>']
corpus2pairs_opts = []
corpus2pairs_opts.append('--thr ' + args['--thr'])
corpus2pairs_opts.append('--win ' + args['--win'])
if args['--pos']:
corpus2pairs_opts.append('--pos')
if args['--dyn']:
corpus2pairs_opts.append('--dyn')
corpus2pairs_opts.append('--sub ' + args['--sub'])
if args['--del']:
corpus2pairs_opts.append('--del')
counts2pmi_opts = []
counts2pmi_opts.append('--cds ' + args['--cds'])
pmi2svd_opts = []
pmi2svd_opts.append('--dim ' + args['--dim'])
pmi2svd_opts.append('--neg ' + args['--neg'])
svd2text_opts = []
if args['--w+c']:
svd2text_opts.append('--w+c')
svd2text_opts.append('--eig ' + args['--eig'])
print '@'.join([
corpus,
output_dir,
' '.join(corpus2pairs_opts),
' '.join(counts2pmi_opts),
' '.join(pmi2svd_opts),
' '.join(svd2text_opts)
])
if __name__ == '__main__':
main()
```
#### File: hyperwords/hyperwords/counts2chi.py
```python
from docopt import docopt
from scipy.sparse import dok_matrix, csr_matrix
import numpy as np
import sys
from representations.matrix_serializer import save_matrix, save_vocabulary, load_count_vocabulary
def main():
args = docopt("""
Usage:
counts2chi.py [options] <counts> <output_path>
Options:
--cds NUM Context distribution smoothing [default: 1.0]
""")
counts_path = args['<counts>']
vectors_path = args['<output_path>']
cds = float(args['--cds'])
counts, iw, ic = read_counts_matrix(counts_path)
chi = calc_chi(counts, cds)
save_matrix(vectors_path, chi)
save_vocabulary(vectors_path + '.words.vocab', iw)
save_vocabulary(vectors_path + '.contexts.vocab', ic)
def read_counts_matrix(counts_path):
"""
Reads the counts into a sparse matrix (CSR) from the count-word-context textual format.
"""
words = load_count_vocabulary(counts_path + '.words.vocab')
contexts = load_count_vocabulary(counts_path + '.contexts.vocab')
words = list(words.keys())
contexts = list(contexts.keys())
iw = sorted(words)
ic = sorted(contexts)
wi = dict([(w, i) for i, w in enumerate(iw)])
ci = dict([(c, i) for i, c in enumerate(ic)])
counts = csr_matrix((len(wi), len(ci)), dtype=np.float32)
tmp_counts = dok_matrix((len(wi), len(ci)), dtype=np.float32)
update_threshold = 100000
i = 0
with open(counts_path) as f:
for line in f:
count, word, context = line.strip().split()
if word in wi and context in ci:
tmp_counts[wi[word], ci[context]] = int(count)
i += 1
if i == update_threshold:
counts = counts + tmp_counts.tolil()
tmp_counts = dok_matrix((len(wi), len(ci)), dtype=np.float32)
i = 0
counts = counts + tmp_counts.tocsr()
return counts, iw, ic
def calc_chi(counts, cds):
"""
super stupid test implementation
Counts = w1,w2 matrix with count for this combiantion, not a contingency table!
"""
expected = get_expected(counts, cds)
counts.data = np.square(counts.data - expected.data)
counts.data /= expected.data
return counts
def get_expected(counts, cds):
sum_w = np.array(counts.sum(axis=1))[:, 0] # horizontal, word
sum_c = np.array(counts.sum(axis=0))[0, :] # vertical, context
if cds != 1:
sum_c = sum_c ** cds
# sparse
expected = counts.copy()
expected.data[expected.data != 0] = 1
# real calculation
expected = multiply_by_rows(expected, sum_w)
expected = multiply_by_columns(expected, sum_c)
expected.data /= sum_c.sum() # performed better than sum_w.sum()
# c proabably better as e(w,c) = p(w) * p(c) * N and p(w) = count(W) / N
return expected
def multiply_by_rows(matrix, row_coefs):
normalizer = dok_matrix((len(row_coefs), len(row_coefs)))
normalizer.setdiag(row_coefs)
return normalizer.tocsr().dot(matrix)
def multiply_by_columns(matrix, col_coefs):
normalizer = dok_matrix((len(col_coefs), len(col_coefs)))
normalizer.setdiag(col_coefs)
return matrix.dot(normalizer.tocsr())
if __name__ == '__main__':
main()
```
#### File: hyperwords/hyperwords/evaluate_multiple.py
```python
from __future__ import print_function
from docopt import docopt
import random
import copy
from collections import defaultdict
from representations.representation_factory import create_representation
import numpy as np
from scipy.stats.stats import spearmanr
import sys
import os.path
def main():
args = docopt("""
Usage:
eval_reliability.py [options] <representation> <file_name> <folders>...
Options:
--words FILE Use FILE with list of words (1 per line) to measure reliabilty
--ws FILES Testsets for word similarity evaluation, use "," as separator!
--ana FILES Testsets for analogy evaluation, use "," as separator!
--closest N Use N closest neighbors to measure reliability [default: 10]
""")
folders = args["<folders>"]
closest = int(args["--closest"])
word_list = args["--words"]
ws_test_sets = [read_ws_test_set(path) for path in args["--ws"].split(",")]
as_test_sets = [read_as_test_set(path) for path in args["--ana"].split(",")]
as_xi_and_ix = [get_vocab_as(test_set) for test_set in as_test_sets]
words = words_to_evaluate_file(word_list) if word_list else argswords_to_evaluate(representations)
#good default parameter for svd
args["--eig"] = 0
args["--w+c"] = False
#not used
args["--neg"] = 1
representations = []
for file in folders:
if os.path.isfile(file+"/"+args["<file_name>"]+".words.vocab"):
x = copy.deepcopy(args)
x["<representation_path>"] = file+"/"+args["<file_name>"]
representations.append(create_representation(x))
else:
print("Could not find "+file+"/"+args["<file_name>"]+".words.vocab", file=sys.stderr)
#comparisson over all subsets
if len(representations) < 2:
raise Exception("Need multiple models for evaluation")
evaluated = [" ".join([str(evaluate_ws(r,w)) for r in representations]) for w in ws_test_sets]
for i, test_set in enumerate(as_test_sets):
evaluated.append(" ".join([str(evaluate_as(r,test_set, as_xi_and_ix[i][0], as_xi_and_ix[i][1])) for r in representations]))
evaluated.append(reliability(representations, words, closest))
print("\t".join(evaluated))
def reliability(representations, words, closest):
results = []
for i in range(0,len(representations)):
results.append(0)
for word in words: #list(words)[:5]:#
neighbors = [get_neighbors(representation, word, closest) for representation in representations]
#comparisson over all subsets
for i in range(0,len(representations)):
results[i] += jaccard(neighbors[:i] + neighbors[i+1:])
for i in range(0,len(representations)):
results[i] /= len(words)
return " ".join([str(r) for r in results])
def jaccard(sets):
if (len(sets) < 2):
raise Exception("Need multiple sets")
for s in sets:
if len(s) == 0:
return 0
intersection = copy.copy(sets[0])
for s in sets[1:]:
intersection &= s
union = set()
for s in sets:
union |= s
return (1.0 * len(intersection))/len(union)
def words_to_evaluate_file(filename):
words = set()
with open(filename) as f:
for line in f:
words.add(line.strip())
return words
def words_to_evaluate(representations):
words = representations[0].wi.viewkeys()
for r in representations[1:]:
words &= r.wi.viewkeys()
return words
def get_neighbors(representation, word, closest):
if word in representation.wi:
dist = representation.m.dot(representation.m[representation.wi[word]].T)
dist[representation.wi[word]] = -np.Inf
return {representation.iw[x] for x in np.argsort(-dist)[:closest]}
else:
return set()
def evaluate_ws(representation, data):
results = []
for (x, y), sim in data:
results.append((representation.similarity(x, y), float(sim)))
actual, expected = zip(*results)
return spearmanr(actual, expected)[0]
def read_ws_test_set(path):
test = []
with open(path) as f:
for line in f:
x, y, sim = line.strip().lower().split()
test.append(((x, y), sim))
return test
def read_as_test_set(path):
test = []
with open(path) as f:
for line in f:
analogy = line.strip().lower().split()
test.append(analogy)
return test
def evaluate_as(representation, data, xi, ix):
sims = prepare_similarities_as(representation, ix)
correct_mul = 0.0
for a, a_, b, b_ in data:
b_mul = guess(representation, sims, xi, a, a_, b)
if b_mul == b_:
correct_mul += 1
return correct_mul/len(data)
#vocab = ix
def prepare_similarities_as(representation, vocab):
vocab_representation = representation.m[[representation.wi[w] if w in representation.wi else 0 for w in vocab]]
sims = vocab_representation.dot(representation.m.T)
dummy = None
for w in vocab:
if w not in representation.wi:
dummy = representation.represent(w)
break
if dummy is not None:
for i, w in enumerate(vocab):
if w not in representation.wi:
vocab_representation[i] = dummy
if type(sims) is not np.ndarray:
sims = np.array(sims.todense())
else:
sims = (sims+1)/2
return sims
def guess(representation, sims, xi, a, a_, b):
sa = sims[xi[a]]
sa_ = sims[xi[a_]]
sb = sims[xi[b]]
mul_sim = sa_*sb*np.reciprocal(sa+0.01)
if a in representation.wi:
mul_sim[representation.wi[a]] = 0
if a_ in representation.wi:
mul_sim[representation.wi[a_]] = 0
if b in representation.wi:
mul_sim[representation.wi[b]] = 0
return representation.iw[np.nanargmax(mul_sim)]
def get_vocab_as(data):
vocab = set()
for analogy in data:
vocab.update(analogy)
vocab = sorted(vocab)
return dict([(a, i) for i, a in enumerate(vocab)]), vocab
if __name__ == "__main__":
main()
```
|
{
"source": "jfilter/masters-thesis",
"score": 2
}
|
#### File: 04 sec lm/20k_no_NER_gen/train.py
```python
from fastai.text import *
from fastai.datasets import *
from pathlib import Path
import pandas as pd
from fastai.metrics import *
from fastai.train import *
from fastai.imports import nn, torch
from fastai.callbacks import *
import random
import math
import datetime
from sacred import Experiment
from sacred.observers import MongoObserver
import fastai
import news_utils
EX_PA = Path('/mnt/data/group07/johannes/ynacc_proc/replicate/20k')
ex = Experiment(EX_PA.stem)
ex.observers.append(MongoObserver.create(db_name=EX_PA.stem))
@ex.config
def my_config():
bs=64
epochs_start = math.ceil(random.uniform(0, 3))
epochs=math.ceil(random.uniform(5, 30))
drop_mult=random.uniform(0.4, 0.8)
lr=random.uniform(5e-2, 1e-5)
layer_factor=random.uniform(2, 3)
exp_id = datetime.datetime.now().strftime("%Y_%_m_%d_%H_%M_%S_%f")
backwards = False
frozen = 'none'
@ex.automain
def my_main(epochs, lr, drop_mult, exp_id, bs, layer_factor, epochs_start):
ex.info['path'] = EX_PA
data_lm = TextLMDataBunch.load(EX_PA, bs=bs)
learn = language_model_learner(data_lm, pretrained_fnames=['lstm_wt103', 'itos_wt103'], drop_mult=drop_mult)
learn.callbacks += [
news_utils.fastai.SacredLogger(learn, ex),
SaveModelCallback(learn, name=exp_id),
EarlyStoppingCallback(learn, patience=5)
]
learn.fit_one_cycle(epochs_start, lr)
learn.unfreeze()
learn.fit_one_cycle(epochs, [lr / (layer_factor * (3 - x)) for x in range(3)] + [lr])
```
#### File: ynacc/12/multitask.py
```python
import argparse
import datetime
from pathlib import Path
import shutil
import fastai
import pandas as pd
import pymongo
import sacred
import sklearn.metrics
from fastai.basic_train import get_preds
from fastai.callbacks import *
from fastai.datasets import *
from fastai.imports import nn, torch
from fastai.metrics import *
from fastai.text import *
from fastai.text.data import DataBunch
from fastai.train import *
from fastai.vision import *
from sacred import Experiment
from sacred.observers import MongoObserver
from sklearn import metrics
import news_utils.fastai
from types import SimpleNamespace
args = SimpleNamespace(**{'exp': 'only_threads_unlimited_30000', 'best': '2019_ 1_16_20_07_47_497762', 'device': 3})
EX_PA = Path('/mnt/data/group07/johannes/ynacc_proc/proper_threads/exp/cl/' + args.exp)
# torch.cuda.set_device(args.device)
print(fastai.__version__)
best_lm_exp_id = args.best
# all_classes = ['claudience', 'clpersuasive', 'clsentiment', 'clagreement', 'cldisagreement', 'clinformative', 'clmean', 'clcontroversial', 'cltopic']
all_classes = ['claudience', 'clpersuasive', 'clagreement', 'cldisagreement', 'clinformative', 'clmean', 'clcontroversial', 'cltopic']
data_lm = TextLMDataBunch.load(Path('/mnt/data/group07/johannes/ynacc_proc/proper_threads/exp/lm/' + args.exp))
learn_lm = language_model_learner(data_lm).load(
Path('/mnt/data/group07/johannes/ynacc_proc/proper_threads/exp/lm/'+ args.exp + "/models/" + best_lm_exp_id, device="cpu"))
learn_lm.save_encoder('encoder_' + best_lm_exp_id)
shutil.move('/mnt/data/group07/johannes/ynacc_proc/proper_threads/exp/lm/'+ args.exp + "/models/" + 'encoder_' + best_lm_exp_id + '.pth', '/mnt/data/group07/johannes/ynacc_proc/proper_threads/exp/cl/'+ args.exp + "/models/" + 'encoder_' + best_lm_exp_id + '.pth')
learn_lm_vocab = data_lm.train_ds.vocab
del data_lm
del learn_lm
print('saved enconder, best model id:', best_lm_exp_id)
# In[2]:
def setup_data():
UT = Path('~/data/ynacc_proc/proper_threads/data/cls/' + args.exp)
data_clas_train = pd.read_csv(UT/'train.csv')
data_clas_val = pd.read_csv(UT/'val.csv')
# data_clas_train = data_clas_train[[clas, 'text_proc']]
# data_clas_val = data_clas_val[[clas, 'text_proc']]
data_clas_train = data_clas_train.dropna(subset=all_classes)
data_clas_val = data_clas_val.dropna(subset=all_classes)
for clas in all_classes:
data_clas_train[clas] = data_clas_train[clas].astype(int)
data_clas_val[clas] = data_clas_val[clas].astype(int)
data_clas = TextClasDataBunch.from_df(EX_PA, data_clas_train, data_clas_val,
vocab=learn_lm_vocab, bs=50, text_cols=['text_proc'], label_cols=all_classes,tokenizer=Tokenizer(cut_n_from_behind=1398))
return data_clas
# In[3]:
data_clas = setup_data()
# In[4]:
data_clas.one_batch()
# In[5]:
encoder_name = 'encoder_' + best_lm_exp_id
drop_mult = 1
learn = text_classifier_learner(data_clas, drop_mult=drop_mult, embed_prevent_first=6)
learn.load_encoder(encoder_name)
optim_lr = news_utils.fastai.get_optimal_lr(learn, runs=3)
# In[6]:
def accuracy(input:Tensor, targs:Tensor)->Rank0Tensor:
"Compute accuracy with `targs` when `input` is bs * n_classes."
print(input.tolist())
print(targs.tolist())
n = targs.shape[0]
input = input.argmax(dim=-1).view(n,-1)
# targs = targs.view(n,-1)
targs = targs.view(-1).long()
print(targs.tolist())
print(input.tolist())
return (input==targs).float().mean()
# In[7]:
# optim_lr = 0.0042854852039743915
# @ex.config
# def my_config():
exp_id = datetime.datetime.now().strftime("%Y_%_m_%d_%H_%M_%S_%f")
factor = 2.6
wd = 1e-7
moms = (0.8, 0.7)
full_epochs = 20
bs = 50
embed_prevent=6
# lm_model_type='trained_0_embed_prevent'
# @ex.main
# def run_exp(exp_id, drop_mult, lr, moms, wd, factor, full_epochs):
lr = optim_lr
lrs = [lr / (factor ** (4 - x)) for x in range(4)] + [lr]
learn = text_classifier_learner(data_clas, drop_mult=drop_mult, embed_prevent_first=6)
learn.load_encoder(encoder_name)
learn.metrics = [accuracy]
learn.metrics =[fbeta]
# learn.metrics += [news_utils.fastai.F1Macro(),
# news_utils.fastai.F1Weighted(), news_utils.fastai.PrecisionMacro(), news_utils.fastai.RecallMacro()]
learn.callbacks += [
SaveModelCallback(learn, name=exp_id),
# news_utils.fastai.SacredLogger(learn, ex),
]
# In[8]:
# learn.fit_one_cycle(1, np.array(lrs), wd=wd, moms=moms)
# In[9]:
for i in range(1, 4):
epochs = 1
if i in [1, 2]:
learn.freeze_to(-i)
else:
learn.unfreeze()
epochs = full_epochs
learn.fit_one_cycle(epochs, np.array(lrs), wd=wd, moms=moms)
# for _ in range(it):
# ex.run(config_updates={"lr": optim_lr, "drop_mult": drop_mult})
# run_for_class(all_classes[args.cl])
# In[10]:
data_clas.valid_dl
# In[21]:
learn.predict('that is cool')
# In[14]:
b1, b2 = learn.get_preds()
# In[24]:
for i in range(8):
preds = [round(x.item()) for x in b1[i]]
targs = [round(x.item()) for x in b2[i]]
print(all_classes[i])
print(metrics.f1_score(targs, preds, average="micro"))
print(metrics.f1_score(targs, preds, average="macro"))
print()
# In[ ]:
```
|
{
"source": "jfilter/mw-category-members",
"score": 3
}
|
#### File: mw-category-members/category_members/retrieve.py
```python
import requests
def retrieve(cat_name, mw_instance='https://en.wikipedia.org', types=['page', 'subcat', 'file'], clean_subcat_names=False):
"""Retrieve pages that belong to a given category.
Args:
cat_name: Category name e.g. 'Category:Presidents_of_the_United_States'.
mw_instance: Which MediaWiki instance to use (the URL 'origin'). Defaults to 'https://en.wikipedia.org'.
types: Which types of pages to retrieve. Defaults to `['page', 'subcat', 'file']`.
clean_subcat_names: If `True`, removes the e.g. 'Category:' prefix of the titles. Defaults to `False`.
Returns:
Array of pages where a page is a dictionary of `{'name': 'some name', 'link': 'some absolute link'}`.
"""
cmtype = f'&cmtype={"|".join(types)}'
base_url = f'{mw_instance}/w/api.php?action=query&format=json&list=categorymembers&cmtitle={cat_name}&cmlimit=500{cmtype}'
cont = ''
result = []
while True:
url = f'{base_url}&cmcontinue={cont}'
r = requests.get(url, timeout=30)
r.raise_for_status()
r_json = r.json()
if 'query' in r_json:
for item in r_json['query']['categorymembers']:
title = item['title']
if clean_subcat_names and ':' in title:
# cut away ':' and evertyhing before
index_sep = title.index(':')
title = title[index_sep + 1:]
# spaces need to be converted in links
link = f'{mw_instance}/wiki/{title.replace(" ", "_")}'
result.append({'name': title, 'link': link})
if 'continue' not in r_json:
break
else:
cont = r_json['continue']['cmcontinue']
return result
```
|
{
"source": "jfilter/names-of-estonia",
"score": 3
}
|
#### File: names-of-estonia/scraper/scrape.py
```python
import requests
import csv
from lxml import html
BASE_URL = "http://www.stat.ee/public/apps/nimed/%s"
data = []
def proccesName(name):
url = BASE_URL % name
r = requests.get(url)
if r.status_code != 200:
print('error with %s' % name)
else:
success, women, men = parseText(r.text)
if success:
data.append({'name': name.title(), 'women': women, 'men': men})
# print(data)
def parseText(text):
tree = html.fromstring(text)
elements = tree.xpath('.//div[contains(@class, "item")]/meta/@content')
# print(elements)
if len(elements) == 0:
return False, None, None
t = elements[0]
women_number = None
men_number = None
women = t.find('naisel')
if women != -1:
women_end = women - 1
women_start = t.rfind(' ', 0, women_end) + 1
women_string = t[women_start:women_end]
if women_string == 'viiel':
women_number = 4
else:
women_number = int(women_string)
men = t.find('mehel')
if men != -1:
men_end = men - 1
men_start = t.rfind(' ', 0, men_end) + 1
men_string = t[men_start:men_end]
if men_string == 'viiel':
men_number = 4
else:
men_number = int(men_string)
return True, women_number, men_number
with open('uniquefirstnames.csv') as file:
reader = csv.reader(file)
i = 0
for row in reader:
print(i)
i += 1
proccesName(row[0])
with open('results.csv', 'w') as csvfile:
fieldnames = ['name', 'women', 'men']
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
writer.writerows(data)
```
|
{
"source": "jfilter/ptf",
"score": 3
}
|
#### File: 01_dynamic_bernoulli/dat/step_3_create_data_stats.py
```python
import glob
import os
import numpy as np
import pickle
# Change this to the name of the folder where your dataset is
dataset_name = 'arxiv_ML'
# Change this to a list of the time slices
time_slices = range(7,16)
time_slices = [1, 2, 3, 4, 5]
#Change this to the number of characters in the file names that should be matched to the timeslice prefix.
# i.e. if you use time_slices = [91, 92, 98, ...]
# use prefix_length = 2
# if you use time_slices = [1998, 1999, 2000, 2001]
# use prefix_length = 4
prefix_length = 2
# Change this to a list of query words you would like the algorithm to print descriptive statistics of (i.e. a trajectory of the learned dynamic embeddings)
query_words = ['deep', 'inference', 'sparse', 'neuron', 'variational']
query_words = ['gewalt', 'deutschland', 'wien']
# No need to modify any code below
#######################################################
dat_stats={}
dat_stats['name'] = dataset_name
dat_stats['T_bins'] = time_slices
dat_stats['prefix'] = prefix_length
dat_stats['query_words'] = query_words
T = len(dat_stats['T_bins'])
def count_words(split):
dat_stats[split] = np.zeros(T)
files = glob.glob(dataset_name + '/'+ split + '/*.npy')
for t, i in enumerate(dat_stats['T_bins']):
dat_files = [f for f in files if int(os.path.basename(f)[:dat_stats['prefix']]) == t]
for fname in dat_files:
dat = np.load(fname)
dat_stats[split][t] += len(dat)
count_words('train')
count_words('test')
count_words('valid')
pickle.dump(dat_stats, open(dataset_name + '/dat_stats.pkl', "a+" ) )
```
#### File: 03_scrape/scrape/spider.py
```python
from bs4 import BeautifulSoup
import scrapy
from scrapy.spiders import CrawlSpider, Rule
from scrapy.linkextractors import LinkExtractor
IGNORED_EXTENSIONS = [
# images
'mng', 'pct', 'bmp', 'gif', 'jpg', 'jpeg', 'png', 'pst', 'psp', 'tif',
'tiff', 'ai', 'drw', 'dxf', 'eps', 'ps', 'svg',
# audio
'mp3', 'wma', 'ogg', 'wav', 'ra', 'aac', 'mid', 'au', 'aiff',
# video
'3gp', 'asf', 'asx', 'avi', 'mov', 'mp4', 'mpg', 'qt', 'rm', 'swf', 'wmv',
'm4a', 'm4v', 'flv',
# office suites
'xls', 'xlsx', 'ppt', 'pptx', 'pps', 'doc', 'docx', 'odt', 'ods', 'odg',
'odp',
# other
'css', 'pdf', 'exe', 'bin', 'rss', 'zip', 'rar',
]
class MySpider(CrawlSpider):
name = 'zeit.de'
user_agent = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/42.0.2311.135 Safari/537.36 Edge/12.246'
allowed_domains = ['zeit.de']
start_urls = ['https://www.zeit.de']
rules = (
Rule(LinkExtractor(allow=r'(^[^\?]+$)|(^.*\?page.*$)', deny_extensions=IGNORED_EXTENSIONS), callback='parse_item', follow=True),
)
custom_settings = {
'LOG_LEVEL':'INFO',
'BOT_NAME': 'MOZILLA',
'FEED_FORMAT': 'jsonlines', 'FEED_URI': '/root/data_run2.json'}
def parse_item(self, response):
doc = BeautifulSoup(response.text)
comments = doc.find_all('article', {"class": "comment"})
co = []
for c in comments:
body = c.find('div', {'class': 'comment__body'}).get_text().strip()
date = c.find('a', {'class': 'comment-meta__date'}).get_text().strip()
co.append({'text': body, 'date': date})
for lm in doc.find_all('div', {'class': 'js-comment-loader'}):
yield {'more_url': lm.find('a')['data-url'] }
yield {'url': response.url, 'comments': co}
```
|
{
"source": "jfilter/text-classification-keras",
"score": 2
}
|
#### File: text-classification-keras/docs/update_docs.py
```python
import shutil
import subprocess
import sys
from texcla import corpus, data, embeddings, experiment
from texcla.models import layers, sentence_model, sequence_encoders, token_model
from texcla.preprocessing import char_tokenizer, sentence_tokenizer, tokenizer, utils, word_tokenizer
from texcla.utils import format, generators, io, sampling
from md_autogen import MarkdownAPIGenerator, to_md_file
def generate_api_docs():
modules = [
token_model,
sentence_model,
sequence_encoders,
layers,
data, corpus, embeddings, experiment, format, generators, io, sampling,
char_tokenizer, sentence_tokenizer, tokenizer, utils, word_tokenizer
]
md_gen = MarkdownAPIGenerator(
"texcla", "https://github.com/jfilter/text-classification-keras/tree/master")
for m in modules:
md_string = md_gen.module2md(m)
to_md_file(md_string, m.__name__, "sources")
def update_index_md():
shutil.copyfile('../README.md', 'sources/index.md')
def copy_templates():
shutil.rmtree('sources', ignore_errors=True)
shutil.copytree('templates', 'sources')
if __name__ == "__main__":
copy_templates()
update_index_md()
generate_api_docs()
if len(sys.argv) > 1 and sys.argv[1] == '--live':
subprocess.run("mkdocs gh-deploy", shell=True, check=True)
else:
subprocess.run(
"mkdocs build && cd site && python3 -m http.server", shell=True, check=True)
```
#### File: text-classification-keras/examples/cnn_imdb.py
```python
import sys
from texcla import corpus, data, experiment
from texcla.models import AlexCNN, AttentionRNN, BasicRNN, StackedRNN, TokenModelFactory, YoonKimCNN
from texcla.preprocessing import FastTextWikiTokenizer
# 1. `python imdb.py setup`: Setup and preprocess the data
# 2. `python imdb.py train`: Load the setup data and train
# truncate text input after 50 tokens (words)
MAX_LEN = 50
def setup():
# limit to 5k pos. and 5k neg. samples (each for train and test)
X_train, X_test, y_train, y_test = corpus.imdb(5000)
# use the special tokenizer used for constructing the embeddings
tokenizer = FastTextWikiTokenizer()
# build vocabulary only on training data
tokenizer = experiment.setup_data(
X_train, y_train, tokenizer, 'imdb_train.bin', max_len=MAX_LEN)
experiment.setup_data(X_test, y_test, tokenizer,
'imdb_test.bin', max_len=MAX_LEN)
def train():
ds_train = data.Dataset.load('imdb_train.bin')
ds_val = data.Dataset.load('imdb_test.bin')
# use the embedding trained on Simple English Wikipedia
factory = TokenModelFactory(
ds_train.num_classes, ds_train.tokenizer.token_index, max_tokens=MAX_LEN, embedding_type='fasttext.wiki.simple', embedding_dims=300)
word_encoder_model = YoonKimCNN()
# word_encoder_model = AttentionRNN()
# word_encoder_model = StackedRNN()
# word_encoder_model = BasicRNN()
# freeze word embeddings
model = factory.build_model(
token_encoder_model=word_encoder_model, trainable_embeddings=False)
# use experiment.train as wrapper for Keras.fit()
experiment.train(x=ds_train.X, y=ds_train.y, validation_data=(ds_val.X, ds_val.y), model=model,
word_encoder_model=word_encoder_model)
if __name__ == '__main__':
assert(len(sys.argv) == 2)
if sys.argv[1] == 'setup':
setup()
if sys.argv[1] == 'train':
train()
```
#### File: text-classification-keras/texcla/corpus.py
```python
import os
import io
import keras
import sklearn
def read_folder(directory):
"""read text files in directory and returns them as array
Args:
directory: where the text files are
Returns:
Array of text
"""
res = []
for filename in os.listdir(directory):
with io.open(os.path.join(directory, filename), encoding="utf-8") as f:
content = f.read()
res.append(content)
return res
def read_pos_neg_data(path, folder, limit):
"""returns array with positive and negative examples"""
training_pos_path = os.path.join(path, folder, 'pos')
training_neg_path = os.path.join(path, folder, 'neg')
X_pos = read_folder(training_pos_path)
X_neg = read_folder(training_neg_path)
if limit is None:
X = X_pos + X_neg
else:
X = X_pos[:limit] + X_neg[:limit]
y = [1] * int(len(X) / 2) + [0] * int(len(X) / 2)
return X, y
def imdb(limit=None, shuffle=True):
"""Downloads (and caches) IMDB Moview Reviews. 25k training data, 25k test data
Args:
limit: get only first N items for each class
Returns:
[X_train, y_train, X_test, y_test]
"""
movie_review_url = 'http://ai.stanford.edu/~amaas/data/sentiment/aclImdb_v1.tar.gz'
# download and extract, thus remove the suffix '.tar.gz'
path = keras.utils.get_file(
'aclImdb.tar.gz', movie_review_url, extract=True)[:-7]
X_train, y_train = read_pos_neg_data(path, 'train', limit)
X_test, y_test = read_pos_neg_data(path, 'test', limit)
if shuffle:
X_train, y_train = sklearn.utils.shuffle(X_train, y_train)
X_test, y_test = sklearn.utils.shuffle(X_test, y_test)
return X_train, X_test, y_train, y_test
```
#### File: text-classification-keras/texcla/embeddings.py
```python
from __future__ import absolute_import, unicode_literals
import gzip
import io
import logging
import os
from zipfile import ZipFile
import numpy as np
import six
from keras.utils.data_utils import get_file
logger = logging.getLogger(__name__)
_EMBEDDINGS_CACHE = dict()
# Add more types here as needed.
# – fastText: https://fasttext.cc/docs/en/english-vectors.html
# - glove: https://nlp.stanford.edu/projects/glove/
_EMBEDDING_TYPES = {
# 1 million word vectors trained on Wikipedia 2017, UMBC webbase corpus and statmt.org news dataset (16B tokens).
'fasttext.wn.1M.300d': {
'file': 'wiki-news-300d-1M.vec.zip',
'url': 'https://dl.fbaipublicfiles.com/fasttext/vectors-english/wiki-news-300d-1M.vec.zip'
},
# 1 million word vectors trained with subword infomation on Wikipedia 2017, UMBC webbase corpus and statmt.org news dataset (16B tokens).
'fasttext.wn.1M.300d.subword': {
'file': 'wiki-news-300d-1M-subword.vec.zip',
'url': 'https://dl.fbaipublicfiles.com/fasttext/vectors-english/wiki-news-300d-1M-subword.vec.zip'
},
# 2 million word vectors trained on Common Crawl (600B tokens).
'fasttext.crawl.2M.300d.subword': {
'file': 'fasttext.wn.1M.300d.subword.vec.zip',
'url': 'https://dl.fbaipublicfiles.com/fasttext/vectors-english/crawl-300d-2M.vec.zip'
},
# 42 Billion tokens Common Crawl
'glove.42B.300d': {
'file': 'glove.42B.300d.txt.zip',
'url': 'http://nlp.stanford.edu/data/glove.42B.300d.zip'
},
# 6 Billion tokens from Wikipedia 2014 + Gigaword 5
'glove.6B.50d': {
'file': 'glove.6B.zip',
'url': 'http://nlp.stanford.edu/data/glove.6B.zip',
'extract': False,
'file_in_zip': 'glove.6B.50d.txt'
},
'glove.6B.100d': {
'file': 'glove.6B.zip',
'url': 'http://nlp.stanford.edu/data/glove.6B.zip',
'extract': False,
'file_in_zip': 'glove.6B.100d.txt'
},
'glove.6B.200d': {
'file': 'glove.6B.zip',
'url': 'http://nlp.stanford.edu/data/glove.6B.zip',
'extract': False,
'file_in_zip': 'glove.6B.200d.txt'
},
'glove.6B.300d': {
'file': 'glove.6B.zip',
'url': 'http://nlp.stanford.edu/data/glove.6B.zip',
'extract': False,
'file_in_zip': 'glove.6B.300d.txt'
},
# 840 Billion tokens Common Crawl
'glove.840B.300d': {
'file': 'glove.840B.300d.txt.zip',
'url': 'http://nlp.stanford.edu/data/glove.840B.300d.zip'
},
# 2 Billion tweets, 27 Billion tokens Twitter
'glove.twitter.27B.25d': {
'file': 'glove.twitter.27B.zip',
'url': 'http://nlp.stanford.edu/data/glove.twitter.27B.zip',
'extract': False,
'file_in_zip': 'glove.twitter.27B.25d.txt'
},
'glove.twitter.27B.50d': {
'file': 'glove.twitter.27B.zip',
'url': 'http://nlp.stanford.edu/data/glove.twitter.27B.zip',
'extract': False,
'file_in_zip': 'glove.twitter.27B.50d.txt'
},
'glove.twitter.27B.100d': {
'file': 'glove.twitter.27B.zip',
'url': 'http://nlp.stanford.edu/data/glove.twitter.27B.zip',
'extract': False,
'file_in_zip': 'glove.twitter.27B.100d.txt'
},
'glove.twitter.27B.200d': {
'file': 'glove.twitter.27B.zip',
'url': 'http://nlp.stanford.edu/data/glove.twitter.27B.zip',
'extract': False,
'file_in_zip': 'glove.twitter.27B.200d.txt'
},
}
def _build_line(embedding_dims, f, is_gzip=False):
index = {}
for line in f:
# has to be done for gziped files
if is_gzip and six.PY2:
line = line.decode('utf-8')
values = line.split()
assert len(values) >= embedding_dims or len(
values) == 2, 'is the file corrupted?'
# some hack for fasttext vectors where the first line is (num_token, dimensions)
if len(values) <= 2 and embedding_dims > 1:
continue
word = ' '.join(values[:-embedding_dims])
floats = values[-embedding_dims:]
if not isinstance(word, six.text_type):
word = word.decode()
vector = np.asarray(floats, dtype='float32')
index[word] = vector
return index
def _build_embeddings_index(embeddings_path, embedding_dims):
logger.info('Building embeddings index...')
if embeddings_path.endswith('.gz'):
with gzip.open(embeddings_path, 'rt') as f:
index = _build_line(embedding_dims, f, is_gzip=True)
else:
# is ignoring errors a good idea? 🤔
with io.open(embeddings_path, encoding="utf-8", errors='ignore') as f:
index = _build_line(embedding_dims, f)
logger.info('Done')
return index
def build_embedding_weights(word_index, embeddings_index):
"""Builds an embedding matrix for all words in vocab using embeddings_index
"""
logger.info('Loading embeddings for all words in the corpus')
embedding_dim = list(embeddings_index.values())[0].shape[-1]
# setting special tokens such as UNK and PAD to 0
# all other words are also set to 0.
embedding_weights = np.zeros((len(word_index), embedding_dim))
for word, i in word_index.items():
word_vector = embeddings_index.get(word)
if word_vector is not None:
embedding_weights[i] = word_vector
return embedding_weights
def build_fasttext_wiki_embedding_obj(embedding_type):
"""FastText pre-trained word vectors for 294 languages, with 300 dimensions, trained on Wikipedia. It's recommended to use the same tokenizer for your data that was used to construct the embeddings. It's implemented as 'FasttextWikiTokenizer'. More information: https://fasttext.cc/docs/en/pretrained-vectors.html.
Args:
embedding_type: A string in the format `fastext.wiki.$LANG_CODE`. e.g. `fasttext.wiki.de` or `fasttext.wiki.es`
Returns:
Object with the URL and filename used later on for downloading the file.
"""
lang = embedding_type.split('.')[2]
return {
'file': 'wiki.{}.vec'.format(lang),
'url': 'https://dl.fbaipublicfiles.com/fasttext/vectors-wiki/wiki.{}.vec'.format(lang),
'extract': False,
}
def build_fasttext_cc_embedding_obj(embedding_type):
"""FastText pre-trained word vectors for 157 languages, with 300 dimensions, trained on Common Crawl and Wikipedia. Released in 2018, it succeesed the 2017 FastText Wikipedia embeddings. It's recommended to use the same tokenizer for your data that was used to construct the embeddings. This information and more can be find on their Website: https://fasttext.cc/docs/en/crawl-vectors.html.
Args:
embedding_type: A string in the format `fastext.cc.$LANG_CODE`. e.g. `fasttext.cc.de` or `fasttext.cc.es`
Returns:
Object with the URL and filename used later on for downloading the file.
"""
lang = embedding_type.split('.')[2]
return {
'file': 'cc.{}.300.vec.gz'.format(lang),
'url': 'https://dl.fbaipublicfiles.com/fasttext/vectors-crawl/cc.{}.300.vec.gz'.format(lang),
'extract': False
}
def get_embedding_type(embedding_type):
if embedding_type.startswith('fasttext.wiki.'):
return build_fasttext_wiki_embedding_obj(embedding_type)
if embedding_type.startswith('fasttext.cc.'):
return build_fasttext_cc_embedding_obj(embedding_type)
data_obj = _EMBEDDING_TYPES.get(embedding_type)
if data_obj is None:
raise ValueError("Embedding type should be either `fasttext.wiki.$LANG_CODE`, `fasttext.cc.$LANG_CODE` or one of the English embeddings: '{}'".format(
_EMBEDDING_TYPES.keys()))
return data_obj
def get_embeddings_index(embedding_type='glove.42B.300d', embedding_dims=None, embedding_path=None, cache=True):
"""Retrieves embeddings index from embedding name or path. Will automatically download and cache as needed.
Args:
embedding_type: The embedding type to load.
embedding_path: Path to a local embedding to use instead of the embedding type. Ignores `embedding_type` if specified.
Returns:
The embeddings indexed by word.
"""
if embedding_path is not None:
embedding_type = embedding_path # identify embedding by path
embeddings_index = _EMBEDDINGS_CACHE.get(embedding_type)
if embeddings_index is not None:
return embeddings_index
if embedding_path is None:
embedding_type_obj = get_embedding_type(embedding_type)
# some very rough wrangling of zip files with the keras util `get_file`
# a special problem: when multiple files are in one zip file
extract = embedding_type_obj.get('extract', True)
file_path = get_file(
embedding_type_obj['file'], origin=embedding_type_obj['url'], extract=extract, cache_subdir='embeddings', file_hash=embedding_type_obj.get('file_hash',))
if 'file_in_zip' in embedding_type_obj:
zip_folder = file_path.split('.zip')[0]
with ZipFile(file_path, 'r') as zf:
zf.extractall(zip_folder)
file_path = os.path.join(
zip_folder, embedding_type_obj['file_in_zip'])
else:
if extract:
if file_path.endswith('.zip'):
file_path = file_path.split('.zip')[0]
# if file_path.endswith('.gz'):
# file_path = file_path.split('.gz')[0]
else:
file_path = embedding_path
embeddings_index = _build_embeddings_index(file_path, embedding_dims)
if cache:
_EMBEDDINGS_CACHE[embedding_type] = embeddings_index
return embeddings_index
```
#### File: libs/ngrams/ngrams.py
```python
from __future__ import unicode_literals
# https://github.com/keras-team/keras/blob/master/examples/imdb_fasttext.py
# COPYRIGHT
# All contributions by <NAME>:
# Copyright (c) 2015 - 2018, <NAME>.
# All rights reserved.
# All contributions by Google:
# Copyright (c) 2015 - 2018, Google, Inc.
# All rights reserved.
# All contributions by Microsoft:
# Copyright (c) 2017 - 2018, Microsoft, Inc.
# All rights reserved.
# All other contributions:
# Copyright (c) 2015 - 2018, the respective contributors.
# All rights reserved.
# Each contributor holds copyright over their respective contributions.
# The project versioning (Git) records all such contribution source information.
# LICENSE
# The MIT License (MIT)
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
def create_ngram_set(input_list, ngram_value=2):
"""
Extract a set of n-grams from a list of integers.
>>> create_ngram_set([1, 4, 9, 4, 1, 4], ngram_value=2)
{(4, 9), (4, 1), (1, 4), (9, 4)}
>>> create_ngram_set([1, 4, 9, 4, 1, 4], ngram_value=3)
[(1, 4, 9), (4, 9, 4), (9, 4, 1), (4, 1, 4)]
"""
return set(zip(*[input_list[i:] for i in range(ngram_value)]))
def add_ngram(sequences, token_indice, ngram_range=2):
"""
Augment the input list of list (sequences) by appending n-grams values.
Example: adding bi-gram
>>> sequences = [[1, 3, 4, 5], [1, 3, 7, 9, 2]]
>>> token_indice = {(1, 3): 1337, (9, 2): 42, (4, 5): 2017}
>>> add_ngram(sequences, token_indice, ngram_range=2)
[[1, 3, 4, 5, 1337, 2017], [1, 3, 7, 9, 2, 1337, 42]]
Example: adding tri-gram
>>> sequences = [[1, 3, 4, 5], [1, 3, 7, 9, 2]]
>>> token_indice = {(1, 3): 1337, (9, 2): 42, (4, 5): 2017, (7, 9, 2): 2018}
>>> add_ngram(sequences, token_indice, ngram_range=3)
[[1, 3, 4, 5, 1337, 2017], [1, 3, 7, 9, 2, 1337, 42, 2018]]
"""
new_sequences = []
for input_list in sequences:
new_list = input_list[:]
for ngram_value in range(2, ngram_range + 1):
for i in range(len(new_list) - ngram_value + 1):
ngram = tuple(new_list[i:i + ngram_value])
if ngram in token_indice:
new_list.append(token_indice[ngram])
new_sequences.append(new_list)
return new_sequences
```
#### File: texcla/preprocessing/char_tokenizer.py
```python
try:
import spacy
except ImportError:
pass
from .tokenizer import Tokenizer
from . import utils
class CharTokenizer(Tokenizer):
def __init__(self,
lang='en',
lower=True,
charset=None):
"""Encodes text into `(samples, characters)`
Args:
lang: The spacy language to use. (Default value: 'en')
lower: Lower cases the tokens if True. (Default value: True)
charset: The character set to use. For example `charset = 'abc123'`. If None, all characters will be used.
(Default value: None)
"""
super(CharTokenizer, self).__init__(lang, lower)
self.charset = charset
def token_generator(self, texts, **kwargs):
"""Yields tokens from texts as `(text_idx, character)`
"""
for text_idx, text in enumerate(texts):
if self.lower:
text = text.lower()
for char in text:
yield text_idx, char
class SentenceCharTokenizer(CharTokenizer):
def __init__(self,
lang='en',
lower=True,
charset=None):
"""Encodes text into `(samples, sentences, characters)`
Args:
lang: The spacy language to use. (Default value: 'en')
lower: Lower cases the tokens if True. (Default value: True)
charset: The character set to use. For example `charset = 'abc123'`. If None, all characters will be used.
(Default value: None)
"""
super(SentenceCharTokenizer, self).__init__(lang, lower, charset)
def token_generator(self, texts, **kwargs):
"""Yields tokens from texts as `(text_idx, sent_idx, character)`
Args:
texts: The list of texts.
**kwargs: Supported args include:
n_threads/num_threads: Number of threads to use. Uses num_cpus - 1 by default.
batch_size: The number of texts to accumulate into a common working set before processing.
(Default value: 1000)
"""
# Perf optimization. Only process what is necessary.
n_threads, batch_size = utils._parse_spacy_kwargs(**kwargs)
nlp = spacy.load(self.lang)
kwargs = {
'batch_size': batch_size,
'n_threads': n_threads,
'disable': ['ner']
}
# Perf optimization: Lower the entire text instead of individual tokens.
texts_gen = utils._apply_generator(
texts, lambda x: x.lower()) if self.lower else texts
for text_idx, doc in enumerate(nlp.pipe(texts_gen, **kwargs)):
for sent_idx, sent in enumerate(doc.sents):
for word in sent:
for char in word:
yield text_idx, sent_idx, char
```
#### File: texcla/preprocessing/utils.py
```python
from __future__ import absolute_import, unicode_literals
import abc
import logging
from collections import OrderedDict, defaultdict
from copy import deepcopy
from multiprocessing import cpu_count
import numpy as np
import six
from keras.preprocessing.sequence import pad_sequences as keras_pad_sequences
from keras.utils.generic_utils import Progbar
try:
import spacy
except ImportError:
pass
logger = logging.getLogger(__name__)
class _CountTracker(object):
"""Helper class to track counts of various document hierarchies in the corpus.
For example, if the tokenizer can tokenize docs as (docs, paragraph, sentences, words), then this utility
will track number of paragraphs, number of sentences within paragraphs and number of words within sentence.
"""
def __init__(self):
self._prev_indices = None
self._local_counts = None
self.counts = None
def update(self, indices):
"""Updates counts based on indices. The algorithm tracks the index change at i and
update global counts for all indices beyond i with local counts tracked so far.
"""
# Initialize various lists for the first time based on length of indices.
if self._prev_indices is None:
self._prev_indices = indices
# +1 to track token counts in the last index.
self._local_counts = np.full(len(indices) + 1, 1)
self._local_counts[-1] = 0
self.counts = [[] for _ in range(len(self._local_counts))]
has_reset = False
for i in range(len(indices)):
# index value changed. Push all local values beyond i to count and reset those local_counts.
# For example, if document index changed, push counts on sentences and tokens and reset their local_counts
# to indicate that we are tracking those for new document. We need to do this at all document hierarchies.
if indices[i] > self._prev_indices[i]:
self._local_counts[i] += 1
has_reset = True
for j in range(i + 1, len(self.counts)):
self.counts[j].append(self._local_counts[j])
self._local_counts[j] = 1
# If none of the aux indices changed, update token count.
if not has_reset:
self._local_counts[-1] += 1
self._prev_indices = indices[:]
def finalize(self):
"""This will add the very last document to counts. We also get rid of counts[0] since that
represents document level which doesnt come under anything else. We also convert all count
values to numpy arrays so that stats can be computed easily.
"""
for i in range(1, len(self._local_counts)):
self.counts[i].append(self._local_counts[i])
self.counts.pop(0)
for i in range(len(self.counts)):
self.counts[i] = np.array(self.counts[i])
def _apply_generator(texts, apply_fn):
for text in texts:
yield apply_fn(text)
def _append(lst, indices, value):
"""Adds `value` to `lst` list indexed by `indices`. Will create sub lists as required.
"""
for i, idx in enumerate(indices):
# We need to loop because sometimes indices can increment by more than 1 due to missing tokens.
# Example: Sentence with no words after filtering words.
while len(lst) <= idx:
# Update max counts whenever a new sublist is created.
# There is no need to worry about indices beyond `i` since they will end up creating new lists as well.
lst.append([])
lst = lst[idx]
# Add token and update token max count.
lst.append(value)
def _recursive_apply(lst, apply_fn):
if len(lst) > 0 and not isinstance(lst[0], list):
for i in range(len(lst)):
lst[i] = apply_fn(lst[i])
else:
for sub_list in lst:
_recursive_apply(sub_list, apply_fn)
def _to_unicode(text):
if not isinstance(text, six.text_type):
text = text.decode('utf-8')
return text
def _parse_spacy_kwargs(**kwargs):
"""Supported args include:
Args:
n_threads/num_threads: Number of threads to use. Uses num_cpus - 1 by default.
batch_size: The number of texts to accumulate into a common working set before processing.
(Default value: 1000)
"""
n_threads = kwargs.get('n_threads') or kwargs.get('num_threads')
batch_size = kwargs.get('batch_size')
if n_threads is None or n_threads is -1:
n_threads = cpu_count() - 1
if batch_size is None or batch_size is -1:
batch_size = 1000
return n_threads, batch_size
def _pad_token_sequences(sequences, max_tokens,
padding, truncating, value):
# TODO: better variable names (see below)
return keras_pad_sequences(sequences, maxlen=max_tokens, padding=padding, truncating=truncating, value=value)
def _pad_sent_sequences(sequences, max_sentences, max_tokens, padding, truncating, value):
# TODO: better names (see below)
# Infer max lengths if needed.
if max_sentences is None or max_tokens is None:
max_sentences_computed = 0
max_tokens_computed = 0
for sent_seq in sequences:
max_sentences_computed = max(max_sentences_computed, len(sent_seq))
max_tokens_computed = max(max_tokens_computed, np.max(
[len(token_seq) for token_seq in sent_seq]))
# Only use inferred values for None.
if max_sentences is None:
max_sentences = max_sentences_computed
if max_tokens is None:
max_tokens = max_tokens_computed
result = np.ones(shape=(len(sequences), max_sentences, max_tokens)) * value
for idx, sent_seq in enumerate(sequences):
# empty list/array was found
if not len(sent_seq):
continue
if truncating == 'pre':
trunc = sent_seq[-max_sentences:]
elif truncating == 'post':
trunc = sent_seq[:max_sentences]
else:
raise ValueError(
'Truncating type "%s" not understood' % truncating)
# Apply padding.
if padding == 'post':
result[idx, :len(trunc)] = _pad_token_sequences(
trunc, max_tokens, padding, truncating, value)
elif padding == 'pre':
result[idx, -len(trunc):] = _pad_token_sequences(trunc,
max_tokens, padding, truncating, value)
else:
raise ValueError('Padding type "%s" not understood' % padding)
return result
def unicodify(texts):
"""Encodes all text sequences as unicode. This is a python2 hassle.
Args:
texts: The sequence of texts.
Returns:
Unicode encoded sequences.
"""
return [_to_unicode(text) for text in texts]
```
|
{
"source": "jfilter/tweets-with-images",
"score": 3
}
|
#### File: tweets-with-images/tweets_with_images/get_images.py
```python
import csv
import pathlib
import re
from io import StringIO
import os
import requests
from lxml import etree
def get_images(tweets, media_path):
pathlib.Path(media_path).mkdir(parents=True, exist_ok=True)
errors = []
tweets_with_images = []
for row in tweets:
text = row['tweet']
# filter out replies (but the more recent tweets don't have it, we have
# to filter out later on when scraping)
if text.startswith('@') or text.startswith('"@'):
continue
m = re.search(r'pic.twitter.com/\w*', text)
if m:
found = m.group(0)
print(found)
try:
r = requests.get('https://' + found)
except Exception as e:
print('request failed: ' + found)
continue
try:
res_text = r.text
parser = etree.HTMLParser()
tree = etree.parse(StringIO(res_text), parser)
# get the images, we have to exclude profile images and emojis
# also only take 'root' tweets
imgs = tree.xpath(
'//*[contains(@class, "js-initial-focus") and not (@data-has-parent-tweet="true")]//img[not(contains(@class, "profile") or contains(@class, "Emoji"))]')
except Exception as e:
print('parsing failed: ' + res_text)
continue
for i in imgs:
# use the HQ / orginal version
url = i.get('src') + ':large'
print(url)
# take filename from url (but omit the :large)
fn = url.split('/')[-1].split(':')[0]
# try for 3 times and otherwise collect error and go on
tries = 3
while tries > 0:
try:
media_res = requests.get(url)
if not media_res.ok:
raise Exception(media_res.status_code)
with open(os.path.join(media_path, fn), 'wb') as f:
f.write(media_res.content)
row.update(
{'filename': os.path.join(media_path, fn)})
tweets_with_images.append(row)
tries = 0
except Exception as e:
print(str(e))
errors.append(url + ',' + found + ',' + str(e))
tries -= 1
print()
return tweets_with_images
```
|
{
"source": "JFincher42/arcade",
"score": 3
}
|
#### File: tests/unit2/test_astar.py
```python
import arcade
SPRITE_IMAGE_SIZE = 128
SPRITE_SCALING = 0.25
SPRITE_SIZE = int(SPRITE_IMAGE_SIZE * SPRITE_SCALING)
def test_astar():
grid_size = SPRITE_SIZE
# Sprite lists
player_list = arcade.SpriteList()
wall_list = arcade.SpriteList(use_spatial_hash=True, spatial_hash_cell_size=128)
enemy_list = arcade.SpriteList()
# Set up the player
player = arcade.Sprite(":resources:images/animated_characters/female_person/femalePerson_idle.png",
SPRITE_SCALING)
player.center_x = SPRITE_SIZE * 1
player.center_y = SPRITE_SIZE * 1
player_list.append(player)
# Set enemies
enemy = arcade.Sprite(":resources:images/animated_characters/zombie/zombie_idle.png", SPRITE_SCALING)
enemy.center_x = SPRITE_SIZE * 5
enemy.center_y = SPRITE_SIZE * 5
enemy_list.append(enemy)
# Calculate the playing field size. We can't generate paths outside of
# this.
playing_field_left_boundary = -SPRITE_SIZE * 2
playing_field_right_boundary = SPRITE_SIZE * 35
playing_field_top_boundary = SPRITE_SIZE * 17
playing_field_bottom_boundary = -SPRITE_SIZE * 2
# This calculates a list of barriers. By calculating it here in the
# init, we are assuming this list does not change. In this example,
# our walls don't move, so that is ok. If we want moving barriers (such as
# moving platforms or enemies) we need to recalculate. This can be an
# time-intensive process depending on the playing field size and grid
# resolution.
# Note: If the enemy sprites are the same size, we only need to calculate
# one of these. We do NOT need a different one for each enemy. The sprite
# is just used for a size calculation.
barrier_list = arcade.AStarBarrierList(enemy,
wall_list,
grid_size,
playing_field_left_boundary,
playing_field_right_boundary,
playing_field_bottom_boundary,
playing_field_top_boundary)
# print()
path = arcade.astar_calculate_path(enemy.position,
player.position,
barrier_list,
diagonal_movement=False)
# barrier_list.recalculate()
# print(f"barrier_list: {barrier_list.barrier_list}")
# print("Path 1", path)
assert path == [(160, 160), (128, 160), (128, 128), (96, 128), (96, 96), (64, 96), (64, 64), (32, 64), (32, 32)]
path = arcade.astar_calculate_path(enemy.position,
player.position,
barrier_list,
diagonal_movement=True)
assert path == [(160, 160), (128, 128), (96, 96), (64, 64), (32, 32)]
# print("Path 2", path)
sprite = arcade.Sprite(":resources:images/tiles/grassCenter.png", SPRITE_SCALING)
sprite.center_x = SPRITE_SIZE * 3
sprite.center_y = SPRITE_SIZE * 1
wall_list.append(sprite)
sprite = arcade.Sprite(":resources:images/tiles/grassCenter.png", SPRITE_SCALING)
sprite.center_x = SPRITE_SIZE * 3
sprite.center_y = SPRITE_SIZE * 2
wall_list.append(sprite)
sprite = arcade.Sprite(":resources:images/tiles/grassCenter.png", SPRITE_SCALING)
sprite.center_x = SPRITE_SIZE * 3
sprite.center_y = SPRITE_SIZE * 3
wall_list.append(sprite)
sprite = arcade.Sprite(":resources:images/tiles/grassCenter.png", SPRITE_SCALING)
sprite.center_x = SPRITE_SIZE * 3
sprite.center_y = SPRITE_SIZE * 4
wall_list.append(sprite)
sprite = arcade.Sprite(":resources:images/tiles/grassCenter.png", SPRITE_SCALING)
sprite.center_x = SPRITE_SIZE * 3
sprite.center_y = SPRITE_SIZE * 5
wall_list.append(sprite)
barrier_list.recalculate()
path = arcade.astar_calculate_path(enemy.position,
player.position,
barrier_list,
diagonal_movement=True)
assert path == [(160, 160), (128, 160), (96, 192), (64, 160), (64, 128), (64, 96), (64, 64), (32, 32)]
```
|
{
"source": "jfindlay/Azure-MachineLearning-DataScience",
"score": 3
}
|
#### File: Recommendations/Python/checkColdRecomFeatureMatch.py
```python
f_prefix = 'PT3'
f_catalog = 'catalog.csv'
f_train = 'train-sorted.csv'
f_seed = 'seed_as_train.csv'
f_recom = 'scores-sar-cold_reversed.tsv'
f_output = 'list_of_recom_no_feature_match.csv'
f_catalog_header = True
f_seed_header = False
f_seed_sep = ','
f_recom_sep = '\t'
f_recom_beginning_comment = True
cold_upper_bound = 2
#========== Parameter for PT dataset =========
# update file names based on f_prefix. Users need to change them
# accordingly based on your own file organization.
f_train = f_prefix + '/' + f_train
f_catalog = f_prefix + '/' + f_catalog
f_seed = f_prefix + '/' + f_seed
f_recom = f_prefix + '/data/' + f_recom
f_output = f_prefix + '/data/' + f_output
#=============================================================================
# The rest should be be changed in running for different datasets.
# Read the catalog file
print('Read the catalog file')
fin_catalog = open(f_catalog)
line = fin_catalog.readline()
D_catalog = {}
if f_catalog_header:
# extract feature name
fnames = line.strip().split(',')[2:]
line = fin_catalog.readline()
else:
# use default feature name
f_num = len(line.strip().split(',')) - 2
fnames = ['f_' + str(i) for i in range(f_num)]
while line:
fs = line.strip().split(',')
itemId = fs[0]
if itemId not in D_catalog:
D_catalog[itemId] = {}
# We need to save all feature values for the current item
fs_feature = fs[2:]
fs_feature_mvalue = [v.strip().strip('"').split(';') for v in fs_feature]
for fi in range(len(fs_feature_mvalue)):
if len(fs_feature_mvalue[fi])==1 and len(fs_feature_mvalue[fi][0])==0:
# This is an empty feature value
pass
else:
# We process non-empty feature value only
fi_value_list = fs_feature_mvalue[fi]
D_catalog[itemId][fi] = {}
for fv in fi_value_list:
D_catalog[itemId][fi][fv] = 1
line = fin_catalog.readline()
fin_catalog.close()
# Read the training file
print('Read the training file')
fin_train = open(f_train)
line = fin_train.readline()
D_item_user = {}
while line:
fs = line.strip().split(',')
userId = fs[0]
itemId = fs[1]
if itemId not in D_item_user:
D_item_user[itemId] = {}
D_item_user[itemId][userId] = 1
line = fin_train.readline()
fin_train.close()
# Read the seed file
print('Read the seed file')
fin_seed = open(f_seed)
D_seed = {}
D_item_type = {}
line = fin_seed.readline()
if f_seed_header:
line = fin_seed.readline()
while line:
fs = line.strip().split(f_seed_sep)
userId = fs[0]
itemId = fs[1]
D_seed[userId] = itemId
# Determine the type of the seed item
if itemId not in D_item_type:
itemFreq = 0
if itemId in D_item_user:
itemFreq = len(D_item_user[itemId])
if itemId in D_catalog:
if itemFreq > cold_upper_bound:
itemType = 'W'
elif itemFreq > 0:
itemType = 'C2'
else:
itemType = 'C1'
else:
# M means item missing in the catalog file
itemType = 'M'
D_item_type[itemId] = itemType
line = fin_seed.readline()
fin_seed.close()
# In this function we compute the pairwise similarity of items
# based on their features.
def compareItemFeatures(D_item1, D_item2):
# This function return the number of matched feature values
# for multi-valued feature. If at least one value is matched,
# we will consider it as matched
f1_index = D_item1.keys()
c_count = 0
for fi in f1_index:
if fi in D_item2:
# if both items have this feature
# then we will compare their feature values
for fv in D_item1[fi].keys():
if fv in D_item2[fi]:
c_count += 1
break
return c_count
# Read the recomdation file
print('Read the recommendation file')
# We use D_item_sim to cache item pairwise similarity
D_item_sim = {}
# We use D_item_nomatch to cache all seed items with unmatched items returned
D_item_nomatch = {}
fout = open(f_output, 'w')
fin_recom = open(f_recom)
line = fin_recom.readline()
if f_recom_beginning_comment:
print('Skip the first few lines of comments')
while line[0]=='#':
line = fin_recom.readline()
# Process the valid lines one by one
while line:
fs = line.strip().split(f_recom_sep)
userId = fs[0]
itemId = fs[1]
if userId in D_seed:
seedItemId = D_seed[userId]
seedItemType = D_item_type[seedItemId]
if seedItemType=='C1' or seedItemType=='C2':
# compare item features
if itemId <= seedItemId:
itemA = itemId
itemB = seedItemId
else:
itemA = seedItemId
itemB = itemId
if itemA not in D_item_sim:
D_item_sim[itemA] = {}
if itemB not in D_item_sim[itemA]:
D_itemA_ft = D_catalog[itemA]
D_itemB_ft = D_catalog[itemB]
D_item_sim[itemA][itemB] = compareItemFeatures(D_itemA_ft, D_itemB_ft)
# logical check
simAB = D_item_sim[itemA][itemB]
if simAB==0:
# the case we need to investigate
fout.write('userId,' + userId + '\n')
fout.write('seedItemId,' + seedItemId + '\n')
fout.write('recomItemId,' + itemId + '\n')
D_item_nomatch[seedItemId] = D_item_nomatch.get(seedItemId, 0) + 1
line = fin_recom.readline()
fin_recom.close()
fout.close()
# For all items in the catalog, determine their types, and summarize number of
# items of different types.
for itemId in D_catalog:
if itemId not in D_item_type:
itemFreq = 0
if itemId in D_item_user:
itemFreq = len(D_item_user[itemId])
if itemFreq > cold_upper_bound:
itemType = 'W'
elif itemFreq > 0:
itemType = 'C2'
else:
itemType = 'C1'
D_item_type[itemId] = itemType
all_item_type_list = list(D_item_type.values())
n_item_warm = all_item_type_list.count('W')
n_item_C1 = all_item_type_list.count('C1')
n_item_C2 = all_item_type_list.count('C2')
# Summarize some statistics in the end
n_item_total = len(D_catalog)
n_seed_nomatch = len(D_item_nomatch)
percent_nomatch = float(n_seed_nomatch) / n_item_total
print('the total number of items in catalog is %d'%n_item_total)
print('the total number of seed items which generate recom items with no feature match is %d'%n_seed_nomatch)
print('the percentage of seed items which generate recom items with no feature match is %f'%percent_nomatch)
print('the total number of warm item is %d'%n_item_warm)
print('the percentage of warm item is %f'%(float(n_item_warm)/n_item_total))
print('the total number of C1 item is %d'%n_item_C1)
print('the percentage of C1 item is %f'%(float(n_item_C1)/n_item_total))
print('the total number of C2 item is %d'%n_item_C2)
print('the percentage of C2 item is %f'%(float(n_item_C2)/n_item_total))
```
#### File: Spark/Python/ConsumeGBNYCReg.py
```python
taxi_test_file_loc = "wasb://[email protected]/Data/NYCTaxi/JoinedTaxiTripFare.Point1Pct.Test.tsv";
# 2. PATH TO BLOB STORAGE WHICH HAS STORED MODELS WITH WHICH TEST DATA IS TO BE SCORED
modelDir = "wasb:///user/remoteuser/NYCTaxi/Models/"; # The last backslash is needed;
# 3. PATH TO BLOB STORAGE WHERE SCORED RESUTLS WILL BE OUTPUT
scoredResultDir = "wasb:///user/remoteuser/NYCTaxi/ScoredResults/"; # The last backslash is needed;
# ### Path to specific models to be used for scoring (copy and paste from the bottom of the model training notebook)
BoostedTreeRegressionFileLoc = modelDir + "GradientBoostingTreeRegression_2016-04-0116_26_52.098590";
import datetime
# ## Set spark context and import necessary libraries
import pyspark
from pyspark import SparkConf
from pyspark import SparkContext
from pyspark.sql import SQLContext
from pyspark.sql import Row
from pyspark.sql.functions import UserDefinedFunction
from pyspark.sql.types import *
import atexit
from numpy import array
import numpy as np
import datetime
sc = SparkContext(appName="PythonGBNYCPred")
sqlContext = SQLContext(sc)
atexit.register(lambda: sc.stop())
sc.defaultParallelism
# ## Data ingestion: Read in joined 0.1% taxi trip and fare file (as tsv), format and clean data, and create data-frame
## IMPORT FILE FROM PUBLIC BLOB
taxi_test_file = sc.textFile(taxi_test_file_loc)
## GET SCHEMA OF THE FILE FROM HEADER
taxi_header = taxi_test_file.filter(lambda l: "medallion" in l)
## PARSE FIELDS AND CONVERT DATA TYPE FOR SOME FIELDS
taxi_temp = taxi_test_file.subtract(taxi_header).map(lambda k: k.split("\t")) .map(lambda p: (p[0],p[1],p[2],p[3],p[4],p[5],p[6],int(p[7]),int(p[8]),int(p[9]),int(p[10]),
float(p[11]),float(p[12]),p[13],p[14],p[15],p[16],p[17],p[18],float(p[19]),
float(p[20]),float(p[21]),float(p[22]),float(p[23]),float(p[24]),int(p[25]),int(p[26])))
## GET SCHEMA OF THE FILE FROM HEADER
schema_string = taxi_test_file.first()
fields = [StructField(field_name, StringType(), True) for field_name in schema_string.split('\t')]
fields[7].dataType = IntegerType() #Pickup hour
fields[8].dataType = IntegerType() # Pickup week
fields[9].dataType = IntegerType() # Weekday
fields[10].dataType = IntegerType() # Passenger count
fields[11].dataType = FloatType() # Trip time in secs
fields[12].dataType = FloatType() # Trip distance
fields[19].dataType = FloatType() # Fare amount
fields[20].dataType = FloatType() # Surcharge
fields[21].dataType = FloatType() # Mta_tax
fields[22].dataType = FloatType() # Tip amount
fields[23].dataType = FloatType() # Tolls amount
fields[24].dataType = FloatType() # Total amount
fields[25].dataType = IntegerType() # Tipped or not
fields[26].dataType = IntegerType() # Tip class
taxi_schema = StructType(fields)
## CREATE DATA FRAME
taxi_df_test = sqlContext.createDataFrame(taxi_temp, taxi_schema)
## CREATE A CLEANED DATA-FRAME BY DROPPING SOME UN-NECESSARY COLUMNS & FILTERING FOR UNDESIRED VALUES OR OUTLIERS
taxi_df_test_cleaned = taxi_df_test.drop('medallion').drop('hack_license').drop('store_and_fwd_flag').drop('pickup_datetime') .drop('dropoff_datetime').drop('pickup_longitude').drop('pickup_latitude').drop('dropoff_latitude') .drop('dropoff_longitude').drop('tip_class').drop('total_amount').drop('tolls_amount').drop('mta_tax') .drop('direct_distance').drop('surcharge') .filter("passenger_count > 0 and passenger_count < 8 AND payment_type in ('CSH', 'CRD') AND tip_amount >= 0 AND tip_amount < 30 AND fare_amount >= 1 AND fare_amount < 150 AND trip_distance > 0 AND trip_distance < 100 AND trip_time_in_secs > 30 AND trip_time_in_secs < 7200" )
## CACHE DATA-FRAME IN MEMORY & MATERIALIZE DF IN MEMORY
taxi_df_test_cleaned.cache()
taxi_df_test_cleaned.count()
## REGISTER DATA-FRAME AS A TEMP-TABLE IN SQL-CONTEXT
taxi_df_test_cleaned.registerTempTable("taxi_test")
# ## Feature transformation and data prep for scoring with models
# #### Create traffic time feature, and indexing and one-hot encode categorical features
from pyspark.ml.feature import OneHotEncoder, StringIndexer, VectorAssembler, VectorIndexer
### CREATE FOUR BUCKETS FOR TRAFFIC TIMES
sqlStatement = """
SELECT *,
CASE
WHEN (pickup_hour <= 6 OR pickup_hour >= 20) THEN "Night"
WHEN (pickup_hour >= 7 AND pickup_hour <= 10) THEN "AMRush"
WHEN (pickup_hour >= 11 AND pickup_hour <= 15) THEN "Afternoon"
WHEN (pickup_hour >= 16 AND pickup_hour <= 19) THEN "PMRush"
END as TrafficTimeBins
FROM taxi_test
"""
taxi_df_test_with_newFeatures = sqlContext.sql(sqlStatement)
## CACHE DATA-FRAME IN MEMORY & MATERIALIZE DF IN MEMORY
taxi_df_test_with_newFeatures.cache()
taxi_df_test_with_newFeatures.count()
## INDEX AND ONE-HOT ENCODING
stringIndexer = StringIndexer(inputCol="vendor_id", outputCol="vendorIndex")
model = stringIndexer.fit(taxi_df_test_with_newFeatures) # Input data-frame is the cleaned one from above
indexed = model.transform(taxi_df_test_with_newFeatures)
encoder = OneHotEncoder(dropLast=False, inputCol="vendorIndex", outputCol="vendorVec")
encoded1 = encoder.transform(indexed)
stringIndexer = StringIndexer(inputCol="rate_code", outputCol="rateIndex")
model = stringIndexer.fit(encoded1)
indexed = model.transform(encoded1)
encoder = OneHotEncoder(dropLast=False, inputCol="rateIndex", outputCol="rateVec")
encoded2 = encoder.transform(indexed)
stringIndexer = StringIndexer(inputCol="payment_type", outputCol="paymentIndex")
model = stringIndexer.fit(encoded2)
indexed = model.transform(encoded2)
encoder = OneHotEncoder(dropLast=False, inputCol="paymentIndex", outputCol="paymentVec")
encoded3 = encoder.transform(indexed)
stringIndexer = StringIndexer(inputCol="TrafficTimeBins", outputCol="TrafficTimeBinsIndex")
model = stringIndexer.fit(encoded3)
indexed = model.transform(encoded3)
encoder = OneHotEncoder(dropLast=False, inputCol="TrafficTimeBinsIndex", outputCol="TrafficTimeBinsVec")
encodedFinal = encoder.transform(indexed)
# #### Creating RDD objects with feature arrays for input into models
from pyspark.mllib.linalg import Vectors
from pyspark.mllib.feature import StandardScaler, StandardScalerModel
from pyspark.mllib.util import MLUtils
from numpy import array
# ONE-HOT ENCODING OF CATEGORICAL TEXT FEATURES FOR INPUT INTO TREE-BASED MODELS
def parseRowIndexingRegression(line):
features = np.array([line.paymentIndex, line.vendorIndex, line.rateIndex, line.TrafficTimeBinsIndex,
line.pickup_hour, line.weekday, line.passenger_count, line.trip_time_in_secs,
line.trip_distance, line.fare_amount])
return features
# FOR REGRESSION CLASSIFICATION TRAINING AND TESTING
indexedTESTreg = encodedFinal.map(parseRowIndexingRegression)
# CACHE RDDS IN MEMORY
indexedTESTreg.cache();
from pyspark.mllib.tree import GradientBoostedTrees, GradientBoostedTreesModel
####################################################################
## REGRESSION: LOAD SAVED MODEL, SCORE AND SAVE RESULTS BACK TO BLOB
savedModel = GradientBoostedTreesModel.load(sc, BoostedTreeRegressionFileLoc)
predictions = savedModel.predict(indexedTESTreg)
# SAVE RESULTS
datestamp = unicode(datetime.datetime.now()).replace(' ','').replace(':','_');
btregressionfilename = "GradientBoostingTreeRegression_" + datestamp + ".txt";
dirfilename = scoredResultDir + btregressionfilename;
predictions.saveAsTextFile(dirfilename)
# ## Cleanup objects from memory, print final time, and print scored output file locations
# #### Unpersist objects cached in memory
taxi_df_test_cleaned.unpersist()
indexedTESTreg.unpersist();
```
|
{
"source": "jfindleyderegt/au_trap",
"score": 3
}
|
#### File: jfindleyderegt/au_trap/test_naive.py
```python
import os, glob, cv2
import numpy as np
import pylab
def file_mean(im_file):
#read image
img = cv2.imread(im_file)
(height, width, depth) = img.shape
#flatten to gray, convert to floating
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
#find point of interest
int_col = 0
x = 0
for i in range (0,width):
if np.sum(img[:,i:i+1]) > int_col:
x = i
int_col = np.sum(img[:,i:i+1])
int_col = 0
y = 0
for i in range (0,height):
if np.sum(img[i:i+1,:]) > int_col:
y = i
int_col = np.sum(img[:,i:i+1])
background = 0
roi = 0
intensity = 0
background = np.sum(img[(y-50):(y+50),(x-100):(x+100)])
roi = np.sum(img[(y-50):(y+50),(x-50):(x+50)])
intensity = 2*roi - background
return intensity
path = '/home/faedrus/Documents/au_trap/test/'
listing = os.listdir(path)
listing = sorted (listing)
amount = len(listing) / 10
count = 0
for im_file in listing:
print file_mean(path+im_file)
```
|
{
"source": "jfine2358/py-linhomy-2015",
"score": 3
}
|
#### File: py/linhomy/product.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
__metaclass__ = type
import numpy
from .fibonacci import FIB_WORDS
from .data import P_flag
from .matrices import fib_zeros_array
def word_from_IC(s):
return b''.join(
{'C': b'\x01', 'D': b'\x02'}[c]
for c in s.replace('IC', 'D')
)
def index_from_IC(s):
return FIB_WORDS.index(word_from_IC(s))
def change_product_basis(product_triple, a, b, c):
n_a, n_b, n_c = product_triple.shape
value = numpy.zeros(product_triple.shape, int)
rows = numpy.reshape(product_triple, (n_a * n_b, n_c))
for i in range(n_a):
for j in range(n_b):
# Convolve the columns to get coefficients.
coefficients = [
r * s
# White space is to slow reader - pay attention.
for r in a[ :, i]
for s in b[ :, j]
]
join_ic = sum(
c * r
for (c, r) in zip(coefficients, rows)
)
join_cd = numpy.dot(c, join_ic)
value[i, j, : ] = join_cd
return value
# TODO: Check this - it only looks right.
def product_formula(n, m):
value = fib_zeros_array(n, m, n + m)
for line in P_flag[n + m]:
pieces = line.split()
keys, column = pieces[0], tuple(map(int, pieces[1:]))
keys = keys[2:-1].split(',')
i = index_from_IC(keys[0])[1]
j = index_from_IC(keys[1])[1]
if len(keys[0]) == n:
value[i, j, : ] = column
if len(keys[0]) == m:
value[j, i, : ] = column
return value
```
#### File: py/linhomy/rankmatrices.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
__metaclass__ = type
from collections import Counter
import functools
import itertools
import numpy
from .fibonacci import FIB_WORDS
from .matrices import fib_zeros_array
from .matrices import invert_grow_list
from .matrices import C_in_CD
from .matrices import CDR_from_CD
from .matrices import CDR_from_FLAG
from .matrices import D_in_CD
from .matrices import IC_from_CDR
from .product import product_formula
from .product import change_product_basis
from .six import iterbytes
from .tools import grow_list
def identity_rule(word):
yield word
def str_expand_d(items):
return str(' ').join(
str(',').join(
str('').join(
str(c) for c in iterbytes(word)
)
for word in item
)
for item in items
)
def _AAA_from_CDR(rule, deg):
value = fib_zeros_array(deg, deg)
# Because rule(word) = matrix * e_{word}, each rule(word) gives a
# column on the matrix. And so a certain value of j.
# Iterate over the (i, j) values.
for j, j_word in enumerate(FIB_WORDS[deg]):
for i_word in rule(j_word):
i = FIB_WORDS[deg].index(i_word)
value[i, j] += 1 # Assign to the matrix.
return value
def C_in_AAA_factory(CD_from_AAA, AAA_from_CD):
# Based on C_in_CDR.
@grow_list
def C_in_AAA(self):
deg = len(self)
value = fib_zeros_array(deg + 1, deg)
tmp = numpy.dot(C_in_CD[deg], CD_from_AAA[deg])
value = numpy.dot(AAA_from_CD[deg+1], tmp)
return value
return C_in_AAA
def D_in_AAA_factory(CD_from_AAA, AAA_from_CD):
# Based on C_in_AAA_factory.
@grow_list
def D_in_AAA(self):
deg = len(self)
value = fib_zeros_array(deg + 2, deg)
tmp = numpy.dot(D_in_CD[deg], CD_from_AAA[deg])
value = numpy.dot(AAA_from_CD[deg+2], tmp)
return value
return D_in_AAA
class RankMatrices:
def __init__(self, rule=None, matrices=None):
self.rule = rule
if rule != None:
if matrices != None:
raise ValueError
else:
self.AAA_from_CDR = grow_list(
lambda self_: _AAA_from_CDR(rule, len(self_))
)
else:
if matrices == None:
raise ValueError
else:
self.AAA_from_CDR = matrices
self.CDR_from_AAA = invert_grow_list(self.AAA_from_CDR)
# Needed for C_in_AAA and D_in_AAA matrices.
self.AAA_from_CD = grow_list(
lambda self_: numpy.dot(
self.AAA_from_CDR[len(self_)],
CDR_from_CD[len(self_)]
)
)
self.CD_from_AAA = invert_grow_list(self.AAA_from_CD)
# Needed for change_product_basis.
self.IC_from_AAA = grow_list(
lambda self_: numpy.dot(
IC_from_CDR[len(self_)],
self.CDR_from_AAA[len(self_)]
)
)
# Needed for change_product_basis.
self.AAA_from_FLAG = grow_list(
lambda self_: numpy.dot(
self.AAA_from_CDR[len(self_)],
CDR_from_FLAG[len(self_)]
)
)
self.C_rule = C_in_AAA_factory(
self.CD_from_AAA,
self.AAA_from_CD
)
self.D_rule = D_in_AAA_factory(
self.CD_from_AAA,
self.AAA_from_CD
)
def doit(self, n, m):
return change_product_basis(
product_formula(n, m),
self.IC_from_AAA[n],
self.IC_from_AAA[m],
self.AAA_from_FLAG[n+m]
)
def product_stats(self, n, m):
matrix = self.doit(n, m)
counter = Counter(matrix.flatten())
return sorted(counter.items())
def print_product_stats(self, max):
for n in range(2, max + 1):
for m in range(1, n):
if 2 * m <= n:
print(n, m, self.product_stats(n - m, m))
def print_C_stats(self, max):
C_rule = self.C_rule
for d in range(max + 1):
counter = Counter(C_rule[d].flatten())
print(d, sorted(counter.items()))
def print_D_stats(self, max):
D_rule = self.D_rule
for d in range(max + 1):
counter = Counter(D_rule[d].flatten())
print(d, sorted(counter.items()))
def print_rule(self, d):
matrix = self.AAA_from_CDR[d]
for j in range(matrix.shape[1]):
src = CD_from_word(FIB_WORDS[d][j])
col = matrix[:,j]
bits = sorted(
CD_from_word(word)
for (coeff, word) in zip(col, FIB_WORDS[d])
if coeff
)
print(d, src, '->', str(' ').join(bits))
identity_matrices = RankMatrices(identity_rule)
def index_from_word(word):
'''Here, indexes is pair of tuples, not tuple of pairs.
'''
# Split into parts.
parts = word.split(b'\x01\x02')
# Turn parts into pair of sequences of ints.
C_count = tuple(item.count(b'\x01') for item in parts)
D_count = tuple(item.count(b'\x02') for item in parts)
index = C_count, D_count
return index
def word_from_index(index):
# Turn int into sequence of parts.
C_count, D_count = index
parts = tuple(
b'\x02' * d + b'\x01' * c
for (c, d) in zip(C_count, D_count)
)
# Join the parts.
return b'\x01\x02'.join(parts)
@grow_list
def EXPAND_D(self):
'''Return tuple of word generated from D * d.
'''
d = len(self)
return tuple(
b'\x02' * (d - i) + b'\x01' * (2 * i)
for i in range(d + 1)
)
def expand_d(ints):
'''Yield results of applying D -> CC rule.
Does not apply to the leading D's.
'''
head = ((b'\x02' * ints[0],),) # The leading D's.
body = tuple(EXPAND_D[d] for d in ints[1:])
# Return iterator over the product.
return itertools.product(*(head + body))
def expand_c(ints):
'''Return tuple of ints.
Move C rightwards. For example, CCCD contributes also to CCDC and
CDCC.
'''
if len(ints) <= 1:
return (ints,)
head = ints[0]
body = ints[1:]
return tuple(sorted(
# Create new tuple with possible decremented tail.
tuple((head - i,) + item)
# Iterate over the possible decrements.
for i in range(head + 1)
# For given decrement, recursively iterate over items.
for item in expand_c((body[0] + i,) + body[1:])
))
def slide_d(index):
'''Return indexes to obtained by d-sliding index.
An index is C_count, D_count.
'''
C_count, D_count = index
# First deal with special case - no change.
if len(C_count) < 2:
return (index,)
# Split the data with have.
(c_1, c_2), C_body = C_count[:2], C_count[2:]
(d_1,), D_body = D_count[:1], D_count[1:]
return tuple(sorted(
(
(c_1 + i, c_2 + i) + C_body,
(d_1 - i,) + D_body
)
for i in range(d_1 + 1)
))
def candidate_rule_1(word):
'''Rough first approximation, tests how things fit together.
'''
index = index_from_word(word)
for C_count, D_count in slide_d(index):
new_Cs = tuple(expand_c(C_count))
new_Ds = tuple(expand_d(D_count))
for mixed in itertools.product(new_Cs, new_Ds):
aaa, bbb = mixed
ccc = zip(
(b'\x01' * c for c in aaa),
bbb
)
# Oops - should be CD, not DC.
yield b'\x02\x01'.join(
ggg + hhh # Oops - transposed.
for (ggg, hhh) in ccc
)
cm_1 = RankMatrices(candidate_rule_1)
def candidate_rule_factory(condition):
def candidate_rule(word):
'''Generic rule.'''
index = index_from_word(word)
for C_count, D_count in slide_d(index):
new_Cs = tuple(expand_c(C_count))
new_Ds = tuple(expand_d(D_count))
for mixed in itertools.product(new_Cs, new_Ds):
aaa, bbb = mixed
ccc = zip(
(b'\x01' * c for c in aaa),
bbb
)
value = b'\x01\x02'.join(
hhh + ggg
for (ggg, hhh) in ccc
)
if condition(word, value):
yield value
return candidate_rule
candidate_rule_2 = candidate_rule_factory(lambda *argv: True)
cm_2 = RankMatrices(candidate_rule_2)
def condition_3(src, value):
src_head = src.split(b'\x01\x02', 1)[0]
val_head = value.split(b'\x01\x02', 1)[0]
d_diff = src_head.count(b'\x02') - val_head.count(b'\x02')
c_count = val_head.count(b'\x01')
return c_count >= d_diff
candidate_rule_3 = candidate_rule_factory(condition_3)
cm_3 = RankMatrices(candidate_rule_3)
def CD_from_word(word):
d = {
1: str('C'),
2: str('D'),
}
return str('').join(d[c] for c in iterbytes(word))
def word_from_CD(s):
d = {
str('C'): b'\x01',
str('D'): b'\x02',
}
return b''.join(d[c] for c in s)
if __name__ == '__main__':
if 0:
tmp = _AAA_from_CDR(identity_rule, 5)
print(tmp)
print(identity_matrices.AAA_from_CDR[5])
print(identity_matrices.stats(5, 3))
import doctest
print(doctest.testmod())
```
#### File: py-linhomy-2015/py/work.py
```python
def condition_3(src, value):
src_head = word.split(b'\x01\x02', 1)[0]
val_head = value.split(b'\x01\x02', 1)[0]
d_diff = src_head.count(b'\x02') - val_head.count(b'\x02')
c_count = val_head.count(b'\x01')
return c_count >= d_diff
```
|
{
"source": "jfine2358/python-kwkey",
"score": 3
}
|
#### File: python-kwkey/kwkey/test_jfine.py
```python
from . import o
from .jfine import jfine_to_key
from .jfine import key_to_jfine
class Dummy_jfine:
'''Create a dummy, to see arguments passed.
>>> d = Dummy_jfine()
# Usual behaviour.
>>> d[1]
((1,), {})
>>> d[o(1)]
((1,), {})
>>> d[1] = 'val'
(('val', 1), {})
>>> d[o(1)] = 'val'
(('val', 1), {})
# Usual behaviour.
>>> d[1, 2]
((1, 2), {})
>>> d[o(1, 2)]
((1, 2), {})
>>> d[1, 2] = 'val'
(('val', 1, 2), {})
>>> d[o(1, 2)] = 'val'
(('val', 1, 2), {})
# Keyword arguments present.
>>> d[o(1, 2, a=3, b=4)]
((1, 2), {'a': 3, 'b': 4})
# As expected, arguments same for delitem and getitem.
>>> del d[o(1, 2, a=3, b=4)]
((1, 2), {'a': 3, 'b': 4})
# Arguments passed to setitem.
>>> d[o(1, 2, a=3, b=4)] = 'val'
(('val', 1, 2), {'a': 3, 'b': 4})
'''
@key_to_jfine
def __getitem__(self, *argv, **kwargs):
print((argv, kwargs))
@key_to_jfine
def __setitem__(self, *argv, **kwargs):
print((argv, kwargs))
@key_to_jfine
def __delitem__(self, *argv, **kwargs):
print((argv, kwargs))
class Dummy_jfine_roundtrip:
'''
Create a roundtrip
>>> rt = Dummy_jfine_roundtrip()
>>> rt[1]
((1,), {})
>>> rt[1, 2]
(((1, 2),), {})
>>> rt[(0, 1, 2)]
(((0, 1, 2),), {})
>>> rt['aaa']
(('aaa',), {})
>>> rt[o(1, 2, a=3, b=4)]
((K(1, 2, a=3, b=4),), {})
>>> rt[o(1, 2, a=3, b=4)] = 'val'
(('val', K(1, 2, a=3, b=4)), {})
'''
@key_to_jfine
@jfine_to_key
def __getitem__(self, *argv, **kwargs):
print((argv, kwargs))
@key_to_jfine
@jfine_to_key
def __setitem__(self, *argv, **kwargs):
print((argv, kwargs))
@key_to_jfine
@jfine_to_key
def __delitem__(self, *argv, **kwargs):
print((argv, kwargs))
```
#### File: python-kwkey/kwkey/tools.py
```python
def isdunder(name):
return len(name) >= 5 and (name[:2] == name[-2:] == '__')
def dunder(name):
if name:
return '__' + name + '__'
else:
raise ValueError(name)
def undunder(name):
if len(name) >= 5:
if name[:2] == name[-2:] == '__':
return name[2:-2]
else:
raise ValueError(name)
def dict_from_class(cls):
exclude = {'__dict__', '__module__', '__weakref__'}
# include = {'__name__', '__mro__'}
# Gotcha: Can subtract sets, but not add sets!
# keys = set(cls.__dict__.keys()) - exclude + include
# TypeError: unsupported operand type(s) for +: 'set' and 'set'
keys = set(cls.__dict__.keys()) - exclude
# keys.update(include)
return dict((k, getattr(cls, k)) for k in keys)
```
|
{
"source": "jfinkels/sbclassifier",
"score": 2
}
|
#### File: sbclassifier/sbclassifier/strippers.py
```python
import math
import re
import urllib
# Tell SpamBayes where to cache IP address lookup information.
# Only comes into play if lookup_ip is enabled. The default
# (empty string) disables the file cache. When caching is enabled,
# the cache file is stored using the same database type as the main
# token store (only dbm and zodb supported so far, zodb has problems,
# dbm is untested, hence the default).
X_LOOKUP_IP_CACHE = ''
try:
from spambayes import dnscache
cache = dnscache.cache(cachefile=X_LOOKUP_IP_CACHE)
cache.printStatsAtEnd = False
except (IOError, ImportError):
class cache:
@staticmethod
def lookup(*args):
return []
else:
import atexit
atexit.register(cache.close)
from sbclassifier.iputils import gen_dotted_quad_clues
#: (EXPERIMENTAL) Recognize 'www.python.org' or ftp.python.org as URLs
#: instead of just long words.
FANCY_URL_RECOGNITION = False
#: (EXPERIMENTAL) Note whether url contains non-standard port or user/password
#: elements.
X_PICK_APART_URLS = False
#: (EXPERIMENTAL) Generate IP address tokens from hostnames. Requires PyDNS
#: (http://pydns.sourceforge.net/).
X_LOOKUP_IP = False
# Nuke HTML <style gimmicks.
html_style_start_re = re.compile(rb"""
< \s* style\b [^>]* >
""", re.VERBOSE)
urlsep_re = re.compile(rb"[;?:@&=+,$.]")
fname_sep_re = re.compile(rb'[/\\:]')
url_fancy_re = re.compile(rb"""
\b # the preceeding character must not be alphanumeric
(?:
(?:
(https? | ftp) # capture the protocol
:// # skip the boilerplate
)|
(?= ftp\.[^\.\s<>"'\x7f-\xff] )| # allow the protocol to be missing,
# but only if
(?= www\.[^\.\s<>"'\x7f-\xff] ) # the rest of the url starts "www.x"
# or "ftp.x"
)
# Do a reasonable attempt at detecting the end. It may or may not
# be in HTML, may or may not be in quotes, etc. If it's full of %
# escapes, cool -- that's a clue too.
([^\s<>"'\x7f-\xff]+) # capture the guts
""", re.VERBOSE) # '
url_re = re.compile(rb"""
(https? | ftp) # capture the protocol
:// # skip the boilerplate
# Do a reasonable attempt at detecting the end. It may or may not
# be in HTML, may or may not be in quotes, etc. If it's full of %
# escapes, cool -- that's a clue too.
([^\s<>"'\x7f-\xff]+) # capture the guts
""", re.VERBOSE) # '
uuencode_begin_re = re.compile(rb"""
^begin \s+
(\S+) \s+ # capture mode
(\S+) \s* # capture filename
$
""", re.VERBOSE | re.MULTILINE)
uuencode_end_re = re.compile(rb"^end\s*\n", re.MULTILINE)
def log2(n, log=math.log, c=math.log(2)):
return log(n)/c
def crack_filename(fname):
yield "fname:" + fname
components = fname_sep_re.split(fname)
morethan1 = len(components) > 1
for component in components:
if morethan1:
yield "fname comp:" + component
pieces = urlsep_re.split(component)
if len(pieces) > 1:
for piece in pieces:
yield "fname piece:" + piece
def crack_content_xyz(msg):
yield 'content-type:' + msg.get_content_type()
x = msg.get_param('type')
if x is not None:
yield 'content-type/type:' + x.lower()
try:
for x in msg.get_charsets(None):
if x is not None:
yield 'charset:' + x.lower()
except UnicodeEncodeError:
# Bad messages can cause an exception here.
# See [ 1175439 ] UnicodeEncodeError raised for bogus Content-Type
# header
yield 'charset:invalid_unicode'
x = msg.get('content-disposition')
if x is not None:
yield 'content-disposition:' + x.lower()
try:
fname = msg.get_filename()
if fname is not None:
for x in crack_filename(fname):
yield 'filename:' + x
except TypeError:
# bug in email pkg? see the thread beginning at
# http://mail.python.org/pipermail/spambayes/2003-September/008006.html
# and
# http://mail.python.org/pipermail/spambayes-dev/2003-September/001177.html
yield "filename:<bogus>"
if 0: # disabled; see comment before function
x = msg.get('content-transfer-encoding')
if x is not None:
yield 'content-transfer-encoding:' + x.lower()
class Stripper(object):
# The retained portions are catenated together with self.separator.
# CAUTION: This used to be blank. But then I noticed spam putting
# HTML comments embedded in words, like
# FR<!--slkdflskjf-->EE!
# Breaking this into "FR" and "EE!" wasn't a real help <wink>.
separator = b'' # a subclass can override if this isn't appropriate
def __init__(self, find_start, find_end):
# find_start and find_end have signature
# string, int -> match_object
# where the search starts at string[int:int]. If a match isn't found,
# they must return None. The match_object for find_start, if not
# None, is passed to self.tokenize, which returns a (possibly empty)
# list of tokens to generate. Subclasses may override tokenize().
# Text between find_start and find_end is thrown away, except for
# whatever tokenize() produces. A match_object must support method
# span() -> int, int # the slice bounds of what was matched
self.find_start = find_start
self.find_end = find_end
# Efficiency note: This is cheaper than it looks if there aren't any
# special sections. Under the covers, string[0:] is optimized to
# return string (no new object is built), and likewise ' '.join([string])
# is optimized to return string. It would actually slow this code down
# to special-case these "do nothing" special cases at the Python level!
def analyze(self, text):
i = 0
retained = []
pushretained = retained.append
tokens = []
while True:
m = self.find_start(text, i)
if not m:
pushretained(text[i:])
break
start, end = m.span()
pushretained(text[i:start])
tokens.extend(self.tokenize(m))
m = self.find_end(text, end)
if not m:
# No matching end - act as if the open
# tag did not exist.
pushretained(text[start:])
break
dummy, i = m.span()
return self.separator.join(retained), tokens
def tokenize(self, match_object):
# Override this if you want to suck info out of the start pattern.
return []
# Strip out uuencoded sections and produce tokens. The return value
# is (new_text, sequence_of_tokens), where new_text no longer contains
# uuencoded stuff. Note that we're not bothering to decode it! Maybe
# we should. One of my persistent false negatives is a spam containing
# nothing but a uuencoded money.txt; OTOH, uuencode seems to be on
# its way out (that's an old spam).
class UUencodeStripper(Stripper):
def __init__(self):
Stripper.__init__(self, uuencode_begin_re.search,
uuencode_end_re.search)
def tokenize(self, m):
mode, fname = m.groups()
return (['uuencode mode:%s' % mode] +
['uuencode:%s' % x for x in crack_filename(fname)])
class URLStripper(Stripper):
def __init__(self):
# The empty regexp matches anything at once.
if FANCY_URL_RECOGNITION:
search = url_fancy_re.search
else:
search = url_re.search
Stripper.__init__(self, search, re.compile(b'').search)
def tokenize(self, m):
proto, guts = m.groups()
assert guts
if proto is None:
if guts.lower().startswith("www"):
proto = "http"
elif guts.lower().startswith("ftp"):
proto = "ftp"
else:
proto = "unknown"
tokens = [b"proto:" + proto]
pushclue = tokens.append
if X_PICK_APART_URLS:
url = proto + "://" + guts
escapes = re.findall(r'%..', guts)
# roughly how many %nn escapes are there?
if escapes:
pushclue("url:%%%d" % int(log2(len(escapes))))
# %nn escapes are usually intentional obfuscation. Generate a
# lot of correlated tokens if the URL contains a lot of them.
# The classifier will learn which specific ones are and aren't
# spammy.
tokens.extend(["url:" + escape for escape in escapes])
# now remove any obfuscation and probe around a bit
url = urllib.parse.unquote(url)
scheme, netloc, path, params, query, frag = \
urllib.parse.urlparse(url)
if X_LOOKUP_IP:
ips = cache.lookup(netloc)
if not ips:
pushclue("url-ip:lookup error")
else:
for clue in gen_dotted_quad_clues("url-ip", ips):
pushclue(clue)
# one common technique in bogus "please (re-)authorize yourself"
# scams is to make it appear as if you're visiting a valid
# payment-oriented site like PayPal, CitiBank or eBay, when you
# actually aren't. The company's web server appears as the
# beginning of an often long username element in the URL such as
# http://www.paypal.com%65%43%99%[email protected]/iwantyourccinfo
# generally with an innocuous-looking fragment of text or a
# valid URL as the highlighted link. Usernames should rarely
# appear in URLs (perhaps in a local bookmark you established),
# and never in a URL you receive from an unsolicited email or
# another website.
user_pwd, host_port = urllib.parse.splituser(netloc)
if user_pwd is not None:
pushclue("url:has user")
host, port = urllib.parse.splitport(host_port)
# web servers listening on non-standard ports are suspicious ...
if port is not None and (scheme == "http" and port != '80' or
scheme == "https" and port != '443'):
pushclue("url:non-standard %s port" % scheme)
# ... as are web servers associated with raw ip addresses
if re.match("(\d+\.?){4,4}$", host) is not None:
pushclue("url:ip addr")
# make sure we later tokenize the unobfuscated url bits
proto, guts = url.split("://", 1)
# Lose the trailing punctuation for casual embedding, like:
# The code is at http://mystuff.org/here? Didn't resolve.
# or
# I found it at http://mystuff.org/there/. Thanks!
guts.rstrip(b'.:?!/')
for piece in guts.split(b'/'):
for chunk in urlsep_re.split(piece):
pushclue(b"url:" + chunk)
return tokens
StyleStripper = Stripper(html_style_start_re.search,
re.compile(br"</style>").search)
CommentStripper = Stripper(re.compile(br"<!--|<\s*comment\s*[^>]*>").search,
re.compile(br"-->|</comment>").search)
# Nuke stuff between <noframes> </noframes> tags.
NoframesStripper = Stripper(re.compile(br"<\s*noframes\s*>").search,
re.compile(br"</noframes\s*>").search)
```
#### File: sbclassifier/tests/test_classifiers.py
```python
from sbclassifier import Classifier
from sbclassifier.classifiers.constants import HAM_CUTOFF
from sbclassifier.classifiers.constants import SPAM_CUTOFF
def test_classifier():
ham_strings = 'dog cat horse sloth koala'.split()
spam_strings = 'shark raptor bear spider cockroach'.split()
classifier = Classifier()
classifier.learn_ham(ham_strings)
classifier.learn_spam(spam_strings)
probability = classifier.spamprob(['shark', 'bear', 'spider'])
assert SPAM_CUTOFF <= probability
probability = classifier.spamprob(['dog', 'sloth', 'koala'])
assert probability <= HAM_CUTOFF
def test_bigrams():
ham_strings = 'dog cat horse sloth koala'.split()
spam_strings = 'shark raptor bear spider cockroach'.split()
classifier = Classifier(use_bigrams=True)
classifier.learn_ham(ham_strings)
classifier.learn_spam(spam_strings)
probability = classifier.spamprob(['shark', 'bear', 'spider'])
assert SPAM_CUTOFF <= probability
probability = classifier.spamprob(['dog', 'sloth', 'koala'])
assert probability <= HAM_CUTOFF
```
#### File: sbclassifier/tests/test_iputils.py
```python
import ipaddress
from sbclassifier.iputils import gen_dotted_quad_clues
def test_gen_dotted_quad_clues():
l = list(gen_dotted_quad_clues('foo', '172.16.58.3'))
assert 'foo:1.0.0.0/8' in l
assert 'foo:192.168.3.11/16' in l
assert 'foo:172.16.58.3/24' in l
assert 'foo:172.16.58.3/32' in l
l = list(gen_dotted_quad_clues('foo', ipaddress.ip_address('172.16.58.3')))
assert 'foo:1.0.0.0/8' in l
assert 'foo:192.168.3.11/16' in l
assert 'foo:172.16.58.3/24' in l
assert 'foo:172.16.58.3/32' in l
```
#### File: sbclassifier/tests/test_storage.py
```python
import glob
import os
import tempfile
import unittest
from sbclassifier.classifiers.storage import CDBClassifier
from sbclassifier.classifiers.storage import ShelveClassifier
from sbclassifier.classifiers.storage import PickleClassifier
#from sbclassifier.classifiers.storage import ZODBClassifier
try:
import cdb
cdb_is_available = True
except ImportError:
cdb_is_available = False
# try:
# import ZODB
# zodb_is_available = True
# except ImportError:
# zodb_is_available = False
class _StorageTestBase(unittest.TestCase):
# Subclass must define a concrete StorageClass.
StorageClass = None
def setUp(self):
self.db_name = tempfile.mktemp("spambayestest")
self.classifier = self.StorageClass(self.db_name)
def tearDown(self):
self.classifier.close()
self.classifier = None
for name in glob.glob(self.db_name + "*"):
if os.path.isfile(name):
os.remove(name)
def testLoadAndStore(self):
# Simple test to verify that putting data in the db, storing and
# then loading gives back the same data.
c = self.classifier
c.learn(["some", "simple", "tokens"], True)
c.learn(["some", "other"], False)
c.learn(["ones"], False)
c.store()
c.close()
del self.classifier
self.classifier = self.StorageClass(self.db_name)
self._checkAllWordCounts((("some", 1, 1),
("simple", 0, 1),
("tokens", 0, 1),
("other", 1, 0),
("ones", 1, 0)), False)
self.assertEqual(self.classifier.nham, 2)
self.assertEqual(self.classifier.nspam, 1)
def testCounts(self):
# Check that nham and nspam are correctedly adjusted.
c = self.classifier
count = 30
for i in range(count):
c.learn(["tony"], True)
self.assertEqual(c.nspam, i + 1)
self.assertEqual(c.nham, 0)
for i in range(count):
c.learn(["tony"], False)
self.assertEqual(c.nham, i + 1)
self.assertEqual(c.nspam, count)
for i in range(count):
c.unlearn(["tony"], True)
self.assertEqual(c.nham, count)
self.assertEqual(c.nspam, count - i - 1)
for i in range(count):
c.unlearn(["tony"], False)
self.assertEqual(c.nham, count - i - 1)
self.assertEqual(c.nspam, 0)
def _checkWordCounts(self, word, expected_ham, expected_spam):
assert word
info = self.classifier._wordinfoget(word)
if info is None:
if expected_ham == expected_spam == 0:
return
self.fail("_CheckWordCounts for '%s' got None!" % word)
if info.hamcount != expected_ham:
self.fail("Hamcount '%s' wrong - got %d, but expected %d"
% (word, info.hamcount, expected_ham))
if info.spamcount != expected_spam:
self.fail("Spamcount '%s' wrong - got %d, but expected %d"
% (word, info.spamcount, expected_spam))
def _checkAllWordCounts(self, counts, do_persist):
for info in counts:
self._checkWordCounts(*info)
if do_persist:
self.classifier.store()
self.classifier.load()
self._checkAllWordCounts(counts, False)
def testHapax(self):
self._dotestHapax(False)
self._dotestHapax(True)
def _dotestHapax(self, do_persist):
c = self.classifier
c.learn(["common", "nearly_hapax", "hapax", ], False)
c.learn(["common", "nearly_hapax"], False)
c.learn(["common"], False)
# All the words should be there.
self._checkAllWordCounts((("common", 3, 0),
("nearly_hapax", 2, 0),
("hapax", 1, 0)),
do_persist)
# Unlearn the complete set.
c.unlearn(["common", "nearly_hapax", "hapax", ], False)
# 'hapax' removed, rest still there
self._checkAllWordCounts((("common", 2, 0),
("nearly_hapax", 1, 0),
("hapax", 0, 0)),
do_persist)
# Re-learn that set, so deleted hapax is reloaded
c.learn(["common", "nearly_hapax", "hapax", ], False)
self._checkAllWordCounts((("common", 3, 0),
("nearly_hapax", 2, 0),
("hapax", 1, 0)),
do_persist)
# Back to where we started - start unlearning all down to zero.
c.unlearn(["common", "nearly_hapax", "hapax", ], False)
# 'hapax' removed, rest still there
self._checkAllWordCounts((("common", 2, 0),
("nearly_hapax", 1, 0),
("hapax", 0, 0)),
do_persist)
# Unlearn the next set.
c.unlearn(["common", "nearly_hapax"], False)
self._checkAllWordCounts((("common", 1, 0),
("nearly_hapax", 0, 0),
("hapax", 0, 0)),
do_persist)
c.unlearn(["common"], False)
self._checkAllWordCounts((("common", 0, 0),
("nearly_hapax", 0, 0),
("hapax", 0, 0)),
do_persist)
def test_bug777026(self):
c = self.classifier
word = "tim"
c.learn([word], False)
c.learn([word], False)
self._checkAllWordCounts([(word, 2, 0)], False)
# Clone word's WordInfo record.
record = self.classifier.wordinfo[word]
newrecord = type(record)()
newrecord.__setstate__(record.__getstate__())
self.assertEqual(newrecord.hamcount, 2)
self.assertEqual(newrecord.spamcount, 0)
# Reduce the hamcount -- this tickled an excruciatingly subtle
# bug in a ShelveClassifier's _wordinfoset, which, at the time
# this test was written, couldn't actually be provoked by the
# way _wordinfoset got called by way of learn() and unlearn()
# methods. The code implicitly relied on that the record passed
# to _wordinfoset was always the same object as was already
# in wordinfo[word].
newrecord.hamcount -= 1
c._wordinfoset(word, newrecord)
# If the bug is present, the ShelveClassifier still believes
# the hamcount is 2.
self._checkAllWordCounts([(word, 1, 0)], False)
c.unlearn([word], False)
self._checkAllWordCounts([(word, 0, 0)], False)
# Test classes for each classifier.
class PickleStorageTestCase(_StorageTestBase):
StorageClass = PickleClassifier
class DBStorageTestCase(_StorageTestBase):
StorageClass = ShelveClassifier
def _fail_open_best(self, *args):
raise Exception("No dbm modules available!")
@unittest.skip('This is unnecessary')
def testNoDBMAvailable(self):
from sbclassifier.storage import open_storage
db_name = tempfile.mktemp("nodbmtest")
ShelveClassifier_load = ShelveClassifier.load
ShelveClassifier.load = self._fail_open_best
print("This test will print out an error, which can be ignored.")
try:
self.assertRaises(Exception, open_storage, (db_name, "dbm"))
finally:
ShelveClassifier.load = ShelveClassifier_load
for name in glob.glob(db_name+"*"):
if os.path.isfile(name):
os.remove(name)
@unittest.skipUnless(cdb_is_available, 'requires cdb')
class CDBStorageTestCase(_StorageTestBase):
StorageClass = CDBClassifier
# @unittest.skipUnless(zodb_is_available, 'requires ZODB')
# class ZODBStorageTestCase(_StorageTestBase):
# StorageClass = ZODBClassifier
```
|
{
"source": "jfinken/CudaSift",
"score": 3
}
|
#### File: CudaSift/scripts/cuda_sift.py
```python
import cudasift
import cv2
import numpy as np
from profiling import TaggedTimer
def main():
sift = cudasift.PyCudaSift(dev_num=0)
timr = TaggedTimer()
filename = "../data/CY_279b46b9_1575825158217_1575825184058.jpg"
# filename = "/home/jfinken/projects/here/sp/jfinken/faiss_gpu/AIC_query2.jpg"
data = cv2.imread(filename, cv2.IMREAD_GRAYSCALE)
# for writing out keypoints
img = cv2.imread(filename, cv2.IMREAD_GRAYSCALE)
height, width = data.shape
print(f"Input image: {width}x{height}")
# data = np.ascontiguousarray(data, dtype=np.float32).ravel()
# data = np.array(data, dtype=np.float32).ravel()
data = img.astype(np.float32).ravel()
timr("np.ascontiguousarray")
NUM_RUNS = 3
# Allocate CUDA memory for the source image: once
sift.allocate_cuda_image(
width, height, cudasift.i_align_up(width, 128), False, None, None
)
timr("allocate_cuda_image")
# Allocate storage for internal results
sift.init_sift_data(max_pts=32768, host=True, dev=True)
sift.allocate_sift_temp_memory(width, height, 5, False)
timr("allocate_sift_temp_memory")
for j in range(NUM_RUNS):
# Convenient and temporally performant optimization:
# Reuse CUDA malloc-ed device memory
# Simply download this input image to the device
sift.download_cuda_image(data)
timr("download_cuda_image")
# Run
sift.extract_sift(
# num_octaves=5, init_blur=1.0, thresh=2.0, lowest_scale=0.0, scale_up=False
num_octaves=5,
init_blur=1.0,
thresh=2.0,
lowest_scale=0.0,
scale_up=False,
)
timr("extract_sift")
print(timr)
# Get descriptors and keypoints
desc, kp = sift.get_features()
desc_np = np.asarray(desc)
kp_np = np.asarray(kp)
timr(
f"get_features done (num_pts={desc_np.shape[0]}, desc_np.shape={desc_np.shape}, kp_np.shape={kp_np.shape})"
)
print(timr)
"""
# Debug: make cv2 keypoints
kps = []
for i in range(kp_np.shape[0]):
# print(f"keypt @ {desc[i].get('xpos')}, {desc[i].get('ypos')}")
kps.append(
cv2.KeyPoint(
x=int(kp_np[i, 0]),
y=int(kp_np[i, 1]),
_size=kp_np[i, 2],
_angle=kp_np[i, 3],
)
)
timr("for-loop over keypoints")
print(timr)
img = cv2.drawKeypoints(
img,
kps,
outImage=np.array([]),
flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS,
)
timr("cv2.drawKeypoints")
cv2.imwrite(f"woo.jpg", img)
# timr("cv2.imwrite")
"""
if __name__ == "__main__":
main()
```
|
{
"source": "jfinkhaeuser/metalmittwoch",
"score": 3
}
|
#### File: jfinkhaeuser/metalmittwoch/stats.py
```python
def collect_data(path):
"""Collect data from the metalmittwoch path."""
def sanitize(string):
string = string.lower()
string = string.strip("'\" ")
for delimiter in ('|', '(', '['):
pos = string.find(delimiter)
if pos > 0:
string = string[0:pos]
string = string.strip("'\" ")
return string
data = {
'items': []
}
unreadable = 0
import re
regex = re.compile(r'^ +(?P<order>\d+) +(\d+ *[-:])?(?P<band>.*?) *[-:] *(?P<track>.*) *$')
import os, os.path
for dirpath, dirnames, filenames in os.walk(path):
for fname in filenames:
full = os.path.join(dirpath, fname)
lines = file(full).readlines()
for line in lines:
line = line.decode('utf8')
# Consider only those starting with >1 space followed by >1 digit
match = regex.match(line)
if not match:
continue
item = match.groupdict()
# format checks/coercing
try:
item['order'] = int(item['order'])
item['track'] = sanitize(item['track'])
item['band'] = sanitize(item['band'])
except:
unreadable += 1
continue
# Hopefully each item is now uniformly formatted so we can match
# multiple occurrences
data['items'].append(item)
data['unreadable'] = unreadable
return data
def get_extremes(data, key, limit = 10, top = True):
items = {}
# Get items and their counts
for item in data['items']:
if items.has_key(item[key]):
items[item[key]] += 1
else:
items[item[key]] = 1
# Sort the counts
order = items.values()
order.sort()
if top:
order.reverse()
top = []
while len(top) < limit:
# Find an item with the first count, then add it to top and remove it
# from items.
count = order.pop(0)
for item, c in items.items():
if count == c:
top.append((item, count))
del items[item]
break
return top
def print_top_bands(data, limit = 10):
# Get top bands
top = get_extremes(data, 'band', limit, True)
# Print
print "Top Bands"
print "---------"
for index in range(0, len(top)):
item = top[index]
print "#%02d: %s (%d times)" % (index + 1, item[0].title(), item[1])
print
def print_top_tracks(data, limit = 10):
# Get top tracks
tmp = get_extremes(data, 'track', limit, True)
# Get band for each top track
top = []
for item in tmp:
for entry in data['items']:
if entry['track'] == item[0]:
top.append((entry['band'], item[0], item[1]))
break
# Print
print "Top Tracks"
print "----------"
for index in range(0, len(top)):
item = top[index]
print "#%02d: %s - %s (%d times)" % (index + 1, item[0].title(), item[1].title(), item[2])
print
if __name__ == '__main__':
import os, os.path
basedir = os.path.join(os.path.abspath(os.getcwd()), 'metalmittwoch')
if not os.path.isdir(basedir):
import sys
sys.stderr.write('Path "%s" not found or is not a directory.\n' % basedir)
sys.exit(-1)
data = collect_data(basedir)
print_top_bands(data)
print_top_tracks(data)
```
|
{
"source": "jfinney10/spack-configs",
"score": 2
}
|
#### File: packages/spack-dummy-package/package.py
```python
from spack import *
class SpackDummyPackage(MakefilePackage):
"""An mpi hello world that is packaged for spack."""
homepage = "http://www.anl.gov"
#git = "<EMAIL>:ecp-test/spack-dummy-package.git"
git = "https://github.com/frankwillmore/spack-dummy-package.git"
version('master', branch='master')
depends_on('mpi')
def edit(self, spec, prefix):
env['PREFIX'] = prefix
def install(self, spec, prefix):
# print("Got prefix = " + prefix)
# configure("--prefix={0}", format(prefix))
make()
make('install')
make('check')
```
|
{
"source": "jfirminger/monkey",
"score": 2
}
|
#### File: monkey/service/server.py
```python
from monkey.service.application import PredictionServer
import argparse
import logging
import sys, os
import multiprocessing as mp
logger = logging.getLogger(__name__)
def run(application):
"""
Start server
"""
try:
process = mp.Process(target=application)
process.daemon = True
process.start()
process.join()
except Exception as e:
print(e)
print("Could not start prediction service")
def prediction_server(model_name, options, wsgi_flag):
"""
Returns callable prediction server
"""
return PredictionServer(model_name, options).run(wsgi_flag)
def main():
LOG_FORMAT = (
"%(asctime)s - %(name)s:%(funcName)s:%(lineno)s - %(levelname)s: %(message)s"
)
logging.basicConfig(level=logging.INFO, format=LOG_FORMAT)
sys.path.append(os.getcwd())
parser = argparse.ArgumentParser()
parser.add_argument("--model-name", type=str, help="Name of the user model.")
parser.add_argument("--workers", type=int, default=1, help="Number of workers.")
parser.add_argument("--port", type=int, default=5000, help="Port to expose")
parser.add_argument('--wsgi', dest='wsgi_flag', action='store_true')
parser.add_argument('--no-wsgi', dest='wsgi_flag', action='store_false')
parser.set_defaults(wsgi_flag=True)
args = parser.parse_args()
options = {
"bind": "{}:{}".format("0.0.0.0", args.port),
"loglevel": "info",
"timeout": 5000,
"reload": "true",
"workers": args.workers
}
application = prediction_server(args.model_name, options, args.wsgi_flag)
run(application)
if __name__ == "__main__":
main()
```
#### File: jfirminger/monkey/setup.py
```python
from setuptools import find_packages, setup
import os
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
with open('requirements.txt') as f:
requirements = f.read().splitlines()
tests_require = [
'pytest<=5.3.0',
'scikit-learn<=0.21.3',
]
setup(
name = "monkey",
version = "0.0.1dev",
author = "<EMAIL>l.io",
author_email = "<EMAIL>",
description = ("wrapping and deploying python models"),
license = "Apache License 2.0",
packages = find_packages(),
long_description = read('README.md'),
install_requires = requirements,
tests_requires=tests_require,
entry_points = {
"console_scripts": [
"monkey = monkey.service.server:main"
]
}
)
```
|
{
"source": "jfischer/data-workspaces-python",
"score": 2
}
|
#### File: dataworkspaces/commands/pull.py
```python
from typing import Optional, List, cast
import click
from dataworkspaces.commands.push import build_resource_list
from dataworkspaces.errors import ConfigurationError, InternalError
from dataworkspaces.workspace import Workspace, SyncedWorkspaceMixin, CentralWorkspaceMixin
def _pull_and_clone_resources(workspace, only, skip):
resource_list_names = build_resource_list(workspace, only, skip)
clone_set = frozenset(workspace.get_names_for_resources_that_need_to_be_cloned())
pull_resources = [
workspace.get_resource(rn) for rn in resource_list_names if rn not in clone_set
]
if len(pull_resources) > 0:
click.echo("Updating resources: %s" % ", ".join([r.name for r in pull_resources]))
workspace.pull_resources(pull_resources)
else:
click.echo("No resources to update.")
clone_name_list = [rn for rn in resource_list_names if rn in clone_set]
if len(clone_name_list) > 0:
click.echo("Cloning new resources: %s" % ", ".join(clone_name_list))
for rn in clone_name_list:
workspace.clone_resource(rn)
return len(pull_resources) + len(clone_name_list)
def pull_command(
workspace: Workspace,
only: Optional[List[str]] = None,
skip: Optional[List[str]] = None,
only_workspace: bool = False,
) -> int:
if isinstance(workspace, SyncedWorkspaceMixin):
# first, sync the workspace
click.echo("Syncing workspace")
mixin = workspace.pull_workspace()
workspace = cast(Workspace, mixin)
if not only_workspace:
rcount = _pull_and_clone_resources(workspace, only, skip)
else:
rcount = 0
elif isinstance(workspace, CentralWorkspaceMixin):
if only_workspace:
raise ConfigurationError(
"--only-workspace not valid for central workspace %s" % workspace.name
)
rcount = _pull_and_clone_resources(workspace, only, skip)
else:
raise InternalError(
"Workspace %s is neither a SyncedWorkspaceMixin nor a CentralWorkspaceMixin"
% workspace.name
)
workspace.save("Pull command")
return rcount
```
#### File: dataworkspaces/commands/restore.py
```python
from typing import Optional, List, cast
import click
from dataworkspaces.errors import ConfigurationError, UserAbort, InternalError, ApiParamError
from dataworkspaces.workspace import (
Workspace,
SnapshotWorkspaceMixin,
SnapshotResourceMixin,
ResourceRoles,
)
def restore_command(
workspace: Workspace,
tag_or_hash: str,
only: Optional[List[str]] = None,
leave: Optional[List[str]] = None,
strict: bool = False,
) -> int:
"""Run the restore and return the number of resources affected.
"""
if not isinstance(workspace, SnapshotWorkspaceMixin):
raise ConfigurationError("Workspace %s does not support snapshots" % workspace.name)
mixin = cast(SnapshotWorkspaceMixin, workspace)
# First, find the history entry
md = mixin.get_snapshot_by_tag_or_hash(tag_or_hash)
# process the lists of resources
current_names = set(workspace.get_resource_names())
# get the non-null resources in snapshot
snapshot_names = set(
[rn for rn in md.restore_hashes.keys() if md.restore_hashes[rn] is not None]
)
all_names = current_names.union(snapshot_names)
if (only is not None) and (leave is not None):
raise ApiParamError("Cannot specify both only and leave for restore command.")
elif only is not None:
# For only, we will be a little stricter, as the user is explicitly
# specifying the resources.
restore_set = set(only)
strict = True
elif leave is not None:
restore_set = all_names.difference(leave)
else:
restore_set = all_names
# We need to remove result resources from the restore set, as we
# do not restore them to their prior state.
result_resources = {
rname
for rname in restore_set
if workspace.get_resource_role(rname) == ResourceRoles.RESULTS
}
result_resources_in_restore_set = result_resources.intersection(restore_set)
if len(result_resources_in_restore_set) > 0:
if strict:
raise ConfigurationError(
"Restore set contains result resources, which cannot be restored. The following are result resources: %s"
% ", ".join(result_resources_in_restore_set)
)
else:
click.echo(
"Skipping the restore of the following result resources, which are left in their latest state: %s"
% ", ".join(result_resources_in_restore_set)
)
restore_set = restore_set.difference(result_resources)
# error checking
invalid = restore_set.difference(all_names)
if len(invalid) > 0:
raise ConfigurationError("Resource name(s) not found: %s" % ", ".join(sorted(invalid)))
removed_names = restore_set.difference(current_names)
if len(removed_names) > 0:
if strict:
raise ConfigurationError(
"Resources have been removed from workspace or have no restore hash and strict mode is enabled."
+ " Removed resources: %s" % ", ".join(sorted(removed_names))
)
else:
click.echo(
"Skipping restore of resources that have been removed from workspace or have no restore hash: %s"
% ", ".join(sorted(removed_names)),
err=True,
)
restore_set = restore_set.difference(removed_names)
added_names = restore_set.difference(snapshot_names)
if len(added_names) > 0:
if strict:
raise ConfigurationError(
"Resources have been added to workspace since restore, and strict mode enabled."
+ " Added resources: %s" % ", ".join(sorted(added_names))
)
else:
click.echo(
"Resources have been added to workspace since restore, will leave them as-is: %s"
% ", ".join(sorted(added_names)),
err=True,
)
restore_set = restore_set.difference(added_names)
# get ordered list of names and resources as well as restore hashes
restore_name_list = [rn for rn in workspace.get_resource_names() if rn in restore_set]
if len(restore_name_list) == 0:
click.echo("No resources to restore.")
return 0
restore_resource_list = [workspace.get_resource(rn) for rn in restore_name_list]
for r in restore_resource_list:
if not isinstance(r, SnapshotResourceMixin):
raise InternalError(
"Resource %s was in snapshot, but is not a SnapshotResourceMixin" % r.name
)
restore_hashes = {rn: md.restore_hashes[rn] for rn in restore_set}
tagstr = " (%s)" % ",".join(md.tags) if len(md.tags) > 0 else ""
click.echo("Restoring snapshot %s%s" % (md.hashval, tagstr))
def fmt_rlist(rnames):
if len(rnames) > 0:
return ", ".join(rnames)
else:
return "None"
click.echo(" Resources to restore: %s" % fmt_rlist(restore_name_list))
names_to_leave = sorted(current_names.difference(restore_set))
click.echo(" Resources to leave: %s" % fmt_rlist(names_to_leave))
if not workspace.batch:
# Unless in batch mode, we always want to ask for confirmation
resp = input("Should I perform this restore? [Y/n]")
if resp.lower() != "y" and resp != "":
raise UserAbort()
# do the work!
mixin.restore(
md.hashval, restore_hashes, cast(List[SnapshotResourceMixin], restore_resource_list)
)
workspace.save("Restore to %s" % md.hashval)
return len(restore_name_list)
```
#### File: dataworkspaces/commands/run.py
```python
from typing import List
from dataworkspaces.workspace import Workspace
from dataworkspaces.errors import InternalError
# import dataworkspaces.commands.actions as actions
# from dataworkspaces.errors import ConfigurationError
# from dataworkspaces.utils.lineage_utils import get_current_lineage_dir
# EXECUTABLES_TO_EXCLUDE_FROM_STEP_NAME = ['python', 'python3', 'python2']
# class RemovePreviousLineage(actions.Action):
# def __init__(self, ns, verbose, lineage_file):
# super().__init__(ns, verbose)
# self.lineage_file = lineage_file
# def run(self):
# if exists(self.lineage_file):
# os.remove(self.lineage_file)
# def __str__(self):
# return "Remove old lineage file '%s', if present" % self.lineage_file
# class RunCommand(actions.Action):
# @actions.provides_to_ns('start_ts', datetime.datetime)
# @actions.provides_to_ns('end_ts', datetime.datetime)
# def __init__(self, ns, verbose, command_and_args, cwd):
# super().__init__(ns, verbose)
# self.command_and_args = command_and_args
# self.cwd = cwd
# def run(self):
# self.ns.start_ts = datetime.datetime.now()
# if self.verbose:
# click.echo(" ".join(self.command_and_args + "[run in %s]"%self.cwd))
# cp = subprocess.run(self.command_and_args, cwd=self.cwd, encoding='utf-8')
# cp.check_returncode()
# self.ns.end_ts = datetime.datetime.now()
# def __str__(self):
# return "Run %s from directory %s" % (self.command_and_args, self.cwd)
# class WriteLineage(actions.Action):
# @actions.requires_from_ns('start_ts', datetime.datetime)
# @actions.requires_from_ns('end_ts', datetime.datetime)
# def __init__(self, ns, verbose, lineage_file, lineage_data):
# super().__init__(ns, verbose)
# self.lineage_file = lineage_file
# self.lineage_data = lineage_data
# def run(self):
# self.lineage_data['timestamp'] = self.ns.start_ts.isoformat()
# self.lineage_data['elapsed_time_seconds'] = \
# round((self.ns.end_ts-self.ns.start_ts).total_seconds(), 1)
# parent_dir = dirname(self.lineage_file)
# if not isdir(parent_dir):
# os.mkdir(parent_dir)
# with open(self.lineage_file, 'w') as f:
# json.dump(self.lineage_data, f, indent=2)
# def __str__(self):
# return "Write lineage data to %s" % self.lineage_file
# def remove_extension(fname):
# try:
# return fname[:fname.rindex('.')]
# except ValueError:
# return fname
def run_command(workspace: Workspace, step_name: str, cwd: str, command: str, args: List[str]):
raise InternalError("Run command not yet supported")
# XXX Need to port and finish implementing
# ns = actions.Namespace()
# plan = [ ]
# # find the command executable
# if isabs(command):
# if not exists(command):
# raise ConfigurationError("Command executable '%s' does not exist" % command)
# if not os.access(command, os.X_OK):
# raise ConfigurationError("Command '%s' is not executable" % command)
# command_path = command
# else:
# command_path = shutil.which(command)
# if command_path is None:
# raise ConfigurationError("Could not find command '%s'" % command)
# command_and_args = [command_path] + list(args)
# cwd = expanduser(cwd)
# # figure out the step name
# if step_name is None:
# if basename(command_path) in EXECUTABLES_TO_EXCLUDE_FROM_STEP_NAME:
# step_name = remove_extension(basename(args[0]))
# else:
# step_name = remove_extension(basename(command_path))
# lineage_file = join(get_current_lineage_dir(workspace_dir), '%s.json' % step_name)
# lineage_data = {
# 'step_name':step_name,
# 'command_path':command_path,
# 'args': args,
# 'cwd': cwd,
# }
# plan.append(RemovePreviousLineage(ns, verbose, lineage_file))
# plan.append(RunCommand(ns, verbose, command_and_args, cwd))
# plan.append(WriteLineage(ns, verbose, lineage_file, lineage_data))
# try:
# actions.run_plan(plan, "Run command with lineage", "run command with lineage", batch=batch, verbose=verbose)
# except:
# # if we get an error, we need to wipe out the lineage file
# if exists(lineage_file):
# os.remove(lineage_file)
# raise
```
#### File: dataworkspaces/commands/snapshot.py
```python
from typing import Optional, cast
import click
from dataworkspaces.utils.hash_utils import is_a_git_hash, is_a_shortened_git_hash
from dataworkspaces.errors import ConfigurationError, UserAbort
from dataworkspaces.workspace import Workspace, SnapshotMetadata, SnapshotWorkspaceMixin
_CONF_MESSAGE = (
"A snapshot with this hash already exists. Do you want to update "
+ "the message from '%s' to '%s'?"
)
def merge_snapshot_metadata(
old: SnapshotMetadata, new: SnapshotMetadata, batch: bool
) -> SnapshotMetadata:
"""Merge two snapshot metadatas for when someone creates
a snapshot without making changes. They might have
added more tags or changed the message.
"""
assert old.hashval == new.hashval
tags = old.tags + [tag for tag in new.tags if tag not in old.tags]
if (
old.message != new.message
and (new.message is not None)
and (new.message != "")
and (batch is False)
and click.confirm(_CONF_MESSAGE % (old.message, new.message))
): # type:ignore
message = new.message
else:
message = old.message
if old.tags == new.tags and old.message == new.message:
raise ConfigurationError("No differences from previous snapshot, doing nothing.")
click.echo("Snapshot %s already exists, updating metadata..." % old.hashval)
return SnapshotMetadata(
hashval=old.hashval,
tags=tags,
message=message,
hostname=new.hostname,
timestamp=old.timestamp,
relative_destination_path=new.relative_destination_path,
# The restore hash may have changed, even if the content did not.
# E.g., in the git subdirectory, the restore hash reflects the hash of the overall
# repo rather than just the subdirectory.
restore_hashes=new.restore_hashes,
metrics=new.metrics,
updated_timestamp=new.timestamp,
)
def snapshot_command(workspace: Workspace, tag: Optional[str] = None, message: str = "") -> str:
if (tag is not None) and (is_a_git_hash(tag) or is_a_shortened_git_hash(tag)):
raise ConfigurationError(
"Tag '%s' looks like a git hash. Please pick something else." % tag
)
if not isinstance(workspace, SnapshotWorkspaceMixin):
raise ConfigurationError("Workspace %s does not support snapshots." % workspace.name)
mixin = cast(SnapshotWorkspaceMixin, workspace)
# Remove existing tag if present
if tag is not None:
try:
existing_tag_md = mixin.get_snapshot_by_tag(tag) # type: Optional[SnapshotMetadata]
except ConfigurationError:
existing_tag_md = None
if existing_tag_md is not None:
msg = "Tag '%s' already exists for snapshot %s taken %s" % (
tag,
existing_tag_md.hashval,
existing_tag_md.timestamp,
)
if workspace.batch:
raise ConfigurationError(msg)
elif not click.confirm(msg + ". Remove this tag so we can add it to the new snapshot?"):
raise UserAbort()
else:
mixin.remove_tag_from_snapshot(existing_tag_md.hashval, tag)
(md, manifest) = mixin.snapshot(tag, message)
try:
old_md = mixin.get_snapshot_metadata(md.hashval) # type: Optional[SnapshotMetadata]
except:
old_md = None
if old_md is not None:
md = merge_snapshot_metadata(old_md, md, workspace.batch)
mixin.save_snapshot_metadata_and_manifest(md, manifest)
workspace.save("Completed snapshot %s" % md.hashval)
if tag:
click.echo(
"Have successfully taken snapshot of workspace, tagged with '%s', hash is %s."
% (tag, md.hashval)
)
else:
click.echo("Have successfully taken snapshot of workspace, hash is %s." % md.hashval)
return md.hashval
```
#### File: dataworkspaces/commands/status.py
```python
import click
from typing import Optional, cast, Dict, List, Any
assert Dict and List and Any # for pyflakes
from collections import Counter
from dataworkspaces.workspace import RESOURCE_ROLE_CHOICES, Workspace, SnapshotWorkspaceMixin
from dataworkspaces.utils.print_utils import print_columns, ColSpec
METRIC_NAME_WIDTH = 18
METRIC_VAL_WIDTH = 12
NUM_METRICS = 2
def print_snapshot_history(
workspace: SnapshotWorkspaceMixin, reverse: bool = True, max_count: Optional[int] = None
):
history = workspace.list_snapshots(reverse, max_count)
# find the most common metrics
mcounter = Counter() # type: Counter
for md in history:
if md.metrics is not None:
mcounter.update(md.metrics.keys())
metric_names = [m for (m, cnt) in mcounter.most_common(NUM_METRICS)]
spec = {
"Hash": ColSpec(width=8),
"Tags": ColSpec(width=20),
"Created": ColSpec(width=19),
"Message": ColSpec(width=30),
}
hashes = [] # type: List[str]
tags = [] # type: List[str]
created = [] # type: List[str]
metrics = {n: [] for n in metric_names} # type: Dict[str,List[Any]]
messages = [] # type: List[str]
returned = 0
for md in history:
hashes.append(md.hashval[0:7])
tags.append(", ".join(md.tags))
created.append(md.timestamp[0:-7])
messages.append(md.message)
for m in metric_names:
metrics[m].append(md.metrics[m] if md.metrics is not None and m in md.metrics else None)
returned += 1
columns = {"Hash": hashes, "Tags": tags, "Created": created}
for m in metric_names:
columns[m] = metrics[m]
spec[m] = ColSpec(width=25, truncate=True)
columns["Message"] = messages
click.echo("\n")
print_columns(columns, null_value="", spec=spec, paginate=False, title="History of snapshots")
if max_count is not None and returned == max_count:
click.echo("Showing first %d snapshots" % max_count)
else:
click.echo("%d snapshots total" % returned)
def print_resource_status(workspace: Workspace):
names_by_role = {role: [] for role in RESOURCE_ROLE_CHOICES} # type:Dict[str,List[str]]
resource_names = []
roles = []
types = []
params = []
missing_roles = []
# we are going to order resources by role
for rname in workspace.get_resource_names():
role = workspace.get_resource_role(rname)
names_by_role[role].append(rname)
for role in RESOURCE_ROLE_CHOICES:
if len(names_by_role[role]) > 0:
for rname in names_by_role[role]:
resource_names.append(rname)
roles.append(role)
types.append(workspace.get_resource_type(rname))
params.append(
",\n".join(
[
"%s=%s" % (pname, pval)
for (pname, pval) in workspace._get_resource_params(rname).items()
if pname not in ("resource_type", "name", "role")
]
)
)
else:
missing_roles.append(role)
print_columns(
{"Resource": resource_names, "Role": roles, "Type": types, "Parameters": params},
# spec={'Parameters':ColSpec(width=40)},
null_value="",
title="Resources for workspace: %s" % workspace.name,
paginate=False,
)
if len(missing_roles) > 0:
click.echo("No resources for the following roles: %s." % ", ".join(missing_roles))
def status_command(workspace: Workspace, history: bool, limit: Optional[int] = None):
print("Status for workspace: %s" % workspace.name)
print_resource_status(workspace)
if history:
if not isinstance(workspace, SnapshotWorkspaceMixin):
click.echo(
"Workspace %s cannot perform snapshots, ignoring --history option" % workspace.name,
err=True,
)
else:
print_snapshot_history(
cast(SnapshotWorkspaceMixin, workspace), reverse=True, max_count=limit
)
```
#### File: dataworkspaces/kits/wrapper_utils.py
```python
import datetime
from typing import Optional, Union, cast, Dict
from os.path import exists
from dataworkspaces.workspace import Workspace, ResourceRoles, ResourceRef
from dataworkspaces.utils.lineage_utils import LineageError, infer_step_name
from dataworkspaces.kits.jupyter import get_step_name_for_notebook
from dataworkspaces.lineage import ResultsLineage
from dataworkspaces.resources.api_resource import API_RESOURCE_TYPE, ApiResource
from dataworkspaces.errors import ConfigurationError
import numpy as np # type: ignore
try:
import pandas # type: ignore
except ImportError:
pandas = None
try:
import tensorflow # type: ignore
except ImportError:
tensorflow = None # type: ignore
class NotSupportedError(ConfigurationError):
"""Thrown when a wrapper encounters an unsupported configuration.
"""
pass
def _infer_step_name() -> str:
"""Come up with a step name by looking at whether this is a notebook
and then the command line arguments.
"""
try:
notebook_name = get_step_name_for_notebook()
if notebook_name is not None:
return notebook_name
except:
pass # not a notebook
return infer_step_name()
def _metric_scalar_to_json(v):
if isinstance(v, int) or isinstance(v, str):
return v
elif isinstance(v, np.int64) or isinstance(v, np.int32):
return int(v)
elif isinstance(v, np.float64) or isinstance(v, np.float32):
return float(v)
elif isinstance(v, datetime.datetime):
return v.isoformat()
else:
return v
def _metric_obj_to_json(v):
if isinstance(v, dict):
return {k: _metric_obj_to_json(vi) for (k, vi) in v.items()}
elif isinstance(v, list) or isinstance(v, tuple):
return [_metric_obj_to_json(vi) for vi in v]
else:
return _metric_scalar_to_json(v)
def _add_to_hash(array_data, hash_state):
if isinstance(array_data, np.ndarray):
hash_state.update(array_data.data)
elif (pandas is not None) and isinstance(array_data, pandas.DataFrame):
for c in array_data.columns:
hash_state.update(array_data[c].to_numpy(copy=False).data)
elif (pandas is not None) and isinstance(array_data, pandas.Series):
hash_state.update(array_data.to_numpy(copy=False).data)
elif isinstance(array_data, tuple) or isinstance(array_data, list):
# Tensorflow frequently puts the parts of a dataset in a tuple.
# For example: (features, labels)
for element in array_data:
_add_to_hash(element, hash_state)
elif isinstance(array_data, dict):
# Tensorflow uses a dict (specifically OrderedDict) to store
# the columns of a CSV.
for column in array_data.values():
_add_to_hash(column, hash_state)
elif (tensorflow is not None) and isinstance(array_data, tensorflow.data.Dataset): # type: ignore
# We need to iterate through the dataset, to force an eager evaluation
for t in array_data:
_add_to_hash(t, hash_state)
elif (tensorflow is not None) and isinstance(array_data, tensorflow.Tensor): # type: ignore
if hasattr(array_data, "numpy"):
_add_to_hash(array_data.numpy(), hash_state)
else:
raise Exception(
"Tensor type %s is not in eager mode, cannot convert to numpy, value was: %s"
% (type(array_data), repr(array_data))
)
elif (
isinstance(array_data, np.uint8)
or isinstance(array_data, np.int8)
or isinstance(array_data, np.int32)
or isinstance(array_data, np.int64)
):
hash_state.update(bytes(int(array_data)))
else:
raise Exception(
"Unable to hash data type %s, data was: %s" % (type(array_data), array_data)
)
def _find_resource(
workspace: Workspace, role: str, name_or_ref: Optional[Union[str, ResourceRef]] = None
) -> ResourceRef:
resource_names = [n for n in workspace.get_resource_names()]
if isinstance(name_or_ref, str):
if (
(not name_or_ref.startswith("./"))
and (not name_or_ref.startswith("/"))
and (name_or_ref in resource_names)
):
return ResourceRef(name_or_ref)
elif exists(name_or_ref):
return workspace.map_local_path_to_resource(
name_or_ref, expecting_a_code_resource=False
)
else:
raise LineageError(
"Could not find a resource for '"
+ name_or_ref
+ "' with role '"
+ role
+ "' in your workspace. Please create a resource"
+ " using the 'dws add' command or correct the name. "
+ "Currently defined resources are: "
+ ", ".join(
["%s (role %s)" % (n, workspace.get_resource_role(n)) for n in resource_names]
)
+ "."
)
elif isinstance(name_or_ref, ResourceRef):
workspace.validate_resource_name(name_or_ref.name, name_or_ref.subpath)
return name_or_ref
else:
# no resource specified. If we have exactly one for that role,
# we will use it
resource_for_role = None
for rname in workspace.get_resource_names():
if workspace.get_resource_role(rname) == role:
if resource_for_role is None:
resource_for_role = ResourceRef(rname, subpath=None)
else:
raise LineageError(
"There is more than one resource for role "
+ role
+ " in your workspace. Please specify the resource you want"
+ " in model wrapping function or use a wrapped data set"
)
if resource_for_role is not None:
return resource_for_role
else:
raise LineageError(
"Could not find a "
+ role
+ " resource in your workspace. Please create a resource"
+ " using the dws add command."
)
class _DwsModelState:
def __init__(
self,
workspace: Workspace,
input_resource: Optional[Union[str, ResourceRef]] = None,
results_resource: Optional[Union[str, ResourceRef]] = None,
):
self.workspace = workspace
self.results_ref = _find_resource(workspace, ResourceRoles.RESULTS, results_resource)
self.default_input_resource = input_resource
self.api_resource_cache = {} # type: Dict[str,ApiResource]
self.lineage = ResultsLineage(
_infer_step_name(), datetime.datetime.now(), {}, [], [], self.results_ref, workspace
)
def find_input_resources_and_return_if_api(
self, data, target_data=None
) -> Optional[ApiResource]:
if hasattr(data, "resource"):
ref = data.resource
else:
ref = _find_resource(
self.workspace, ResourceRoles.SOURCE_DATA_SET, self.default_input_resource
)
self.lineage.add_input_ref(ref)
data_resource_type = self.workspace.get_resource_type(ref.name)
if target_data is not None and hasattr(target_data, "resource"):
target_ref = data.resource
if target_ref != ref: # only can happen if resource is specified on data
if (
data_resource_type == API_RESOURCE_TYPE
or self.workspace.get_resource_type(target_ref.name) == API_RESOURCE_TYPE
):
raise NotSupportedError(
"Currently, we do not support API Resources where the feature and target data are from different resources (%s and %s)."
% (ref, target_ref)
)
self.lineage.add_input_ref(target_ref)
if data_resource_type == API_RESOURCE_TYPE:
if ref.name not in self.api_resource_cache:
self.api_resource_cache[ref.name] = cast(
ApiResource, self.workspace.get_resource(ref.name)
)
return self.api_resource_cache[ref.name]
else:
return None
def write_metrics_and_complete(self, metrics):
metrics = _metric_obj_to_json(metrics)
if self.workspace.verbose:
print("dws>> Metrics: %s" % repr(metrics))
self.lineage.write_results(metrics)
self.lineage.complete()
def reset_lineage(self):
"""If you are rerunning a step, call this to reset the start and execution
times as well as the in_progress marker in the lineage.
"""
self.lineage.step.execution_time_seconds = None
self.lineage.step.start_time = datetime.datetime.now()
self.lineage.in_progress = True
```
#### File: dataworkspaces/third_party/rclone.py
```python
import logging
import subprocess
import tempfile
from dataworkspaces.errors import ConfigurationError
class RCloneException(ConfigurationError):
pass
class RClone:
"""
Wrapper class for rclone.
"""
def __init__(self, cfgfile=None, cfgstring=None):
self.log = logging.getLogger("RClone")
self._ensure_rclone_exists()
self.cfgstring = ''
self.cfgfile = None
if cfgstring:
self.cfgstring = cfgstring.replace("\\n", "\n")
elif cfgfile:
self.cfgfile = cfgstring.replace("\\n", "\n")
else:
# find the default config file used by the rclone installation
ret = self._execute(['rclone', 'config', 'file'])
self.log.debug(ret)
if ret['code'] == 0:
# rclone config file output looks like:
#
# Configuration file is stored at:
# filename
# so we skip until the '\n'
self.cfgfile = ret['out'].splitlines()[1].decode('utf_8')
else:
print(ret)
raise ConfigurationError("RClone requires either a configuration file or a configuration string")
assert(self.cfgstring or self.cfgfile), 'Either a config string is given or a filename is given'
def _ensure_rclone_exists(self):
ret = self._execute(['rclone', 'version'])
if ret['code'] == -20:
raise ConfigurationError("rclone executable not found")
def _execute(self, command_with_args):
"""
Execute the given `command_with_args` using Popen
Args:
- command_with_args (list) : An array with the command to execute,
and its arguments. Each argument is given
as a new element in the list.
"""
self.log.debug("Invoking : %s", " ".join(command_with_args))
try:
with subprocess.Popen(
command_with_args,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE) as proc:
(out, err) = proc.communicate()
#out = proc.stdout.read()
#err = proc.stderr.read()
self.log.debug(out)
if err:
self.log.warning(err.decode("utf-8").replace("\\n", "\n"))
return {
"code": proc.returncode,
"out": out,
"error": err
}
except FileNotFoundError as not_found_e:
self.log.error("Executable not found. %s", not_found_e)
return {
"code": -20,
"error": not_found_e
}
except Exception as generic_e:
self.log.exception("Error running command. Reason: %s", generic_e)
return {
"code": -30,
"error": generic_e
}
def run_cmd(self, command, extra_args=[]):
"""
Execute rclone command
Args:
- command (string): the rclone command to execute.
- extra_args (list): extra arguments to be passed to the rclone command
"""
if self.cfgfile: # pass the file name to rclone
command_with_args = ["rclone", command, "--config", self.cfgfile]
command_with_args += extra_args
command_result = self._execute(command_with_args)
return command_result
# config is given in a string
# save the configuration in a temporary file
# and invoke rclone with the temporary file
with tempfile.NamedTemporaryFile(mode='wt', delete=True) as cfg_file:
# cfg_file is automatically cleaned up by python
self.log.debug("rclone config: ~%s~", self.cfgstring)
cfg_file.write(self.cfgstring)
cfg_file.flush()
command_with_args = ["rclone", command, "--config", cfg_file.name]
command_with_args += extra_args
command_result = self._execute(command_with_args)
cfg_file.close()
return command_result
def copy(self, source, dest, flags=[]):
"""
Executes: rclone copy source:path dest:path [flags]
Args:
- source (string): A string "source:path"
- dest (string): A string "dest:path"
- flags (list): Extra flags as per `rclone copy --help` flags.
"""
return self.run_cmd(command="copy", extra_args=[source] + [dest] + flags)
def sync(self, source, dest, flags=[]):
"""
Executes: rclone sync source:path dest:path [flags]
Args:
- source (string): A string "source:path"
- dest (string): A string "dest:path"
- flags (list): Extra flags as per `rclone sync --help` flags.
"""
return self.run_cmd(command="sync", extra_args=[source] + [dest] + flags)
def listremotes(self, flags=[]):
"""
Executes: rclone listremotes [flags]
Args:
- flags (list): Extra flags as per `rclone listremotes --help` flags.
"""
ret = self.run_cmd(command="listremotes", extra_args=flags)
if ret['code'] == 0:
list_remotes = map((lambda s: s[0:-1].decode('utf_8')), ret['out'].split(b'\n'))
#print(list_remotes)
return list(list_remotes)[0:-1]
else:
raise RCloneException('listremotes returns %d %s' % (ret['code'], ret['error']))
def ls(self, dest, flags=[]):
"""
Executes: rclone ls remote:path [flags]
Args:
- dest (string): A string "remote:path" representing the location to list.
"""
return self.run_cmd(command="ls", extra_args=[dest] + flags)
def lsjson(self, dest, flags=[]):
"""
Executes: rclone lsjson remote:path [flags]
Args:
- dest (string): A string "remote:path" representing the location to list.
"""
return self.run_cmd(command="lsjson", extra_args=[dest] + flags)
def delete(self, dest, flags=[]):
"""
Executes: rclone delete remote:path
Args:
- dest (string): A string "remote:path" representing the location to delete.
"""
return self.run_cmd(command="delete", extra_args=[dest] + flags)
def check(self, src, dest, flags=[]):
"""
Executes: rclone check src dest
"""
ret = self.run_cmd(command="check", extra_args=[src, dest] + flags)
if ret['code'] == 0:
return (0, ret['out'] )
else:
raise RCloneException('rclone.check returns error %d (%s)' % (ret['code'], ret['error']))
def with_config(cfgstring):
"""
Configure a new RClone instance.
"""
inst = RClone(cfgstring=cfgstring)
return inst
def test():
rc = with_config("""[local]
type = local
nounc = true""")
result = rc.listremotes()
print("result = ", result)
print("With default cfg file")
rc = RClone()
result = rc.listremotes()
print("result = ", result)
if __name__ == "__main__":
test()
```
#### File: data-workspaces-python/tests/test_local_files_resource.py
```python
import unittest
import sys
import os
import os.path
from os.path import join, exists
import shutil
import subprocess
import filecmp
import json
from utils_for_tests import BaseCase, SimpleCase, TEMPDIR, WS_DIR, WS_ORIGIN, OTHER_WS
from dataworkspaces.api import get_filesystem_for_resource
LOCAL_RESOURCE=join(WS_DIR, 'local-data')
LOCAL_RESOURCE_NAME='local-data'
DATA=join(LOCAL_RESOURCE, 'data.txt')
class TestLocalFiles(BaseCase):
def test_local_files_resource(self):
# create a primary ws, the origin, and the second ws
self._setup_initial_repo(create_resources=None)
self._clone_second_repo()
os.makedirs(LOCAL_RESOURCE)
with open(DATA, 'w') as f:
f.write("testing\n")
self._run_dws(['add', 'local-files', '--role', 'source-data', LOCAL_RESOURCE])
self._run_dws(['snapshot', 'S1'], cwd=WS_DIR)
# push and pull
self._run_dws(['push'], cwd=WS_DIR)
self._run_dws(['pull'], cwd=OTHER_WS)
def test_local_path_override(self):
# create a primary ws, the origin, and the second ws
self._setup_initial_repo(create_resources=None)
os.makedirs(LOCAL_RESOURCE)
with open(DATA, 'w') as f:
f.write("testing\n")
# make a copy of the data resource
LOCAL_RESOURCE_COPY=join(TEMPDIR, 'data-copy')
shutil.copytree(LOCAL_RESOURCE, LOCAL_RESOURCE_COPY)
self._run_dws(['add', 'local-files', '--role', 'source-data', LOCAL_RESOURCE])
self._run_dws(['snapshot', 'S1'], cwd=WS_DIR)
self._run_dws(['push'], cwd=WS_DIR)
shutil.rmtree(WS_DIR) # remove the original to simulate a remote clone
self._run_dws_with_input(['clone', WS_ORIGIN, 'workspace2'],
dws_input='localhost\n%s\n'%LOCAL_RESOURCE_COPY,
cwd=TEMPDIR)
class TestFileSystemAPIs(SimpleCase):
def test_filesystem_apis(self):
"""test open() and ls()"""
self._setup_initial_repo()
os.makedirs(LOCAL_RESOURCE)
with open(DATA, 'w') as f:
f.write("testing\n")
SUBDIR=join(LOCAL_RESOURCE, 'subdir')
os.makedirs(SUBDIR)
DATA2=join(SUBDIR, 'data2.txt')
with open(DATA2, 'w') as f:
f.write("testing2\n")
DATA3=join(SUBDIR, 'data3.txt')
with open(DATA3, 'w') as f:
f.write("testing3\n")
self._run_dws(['add', 'local-files', '--role', 'source-data', LOCAL_RESOURCE])
fs = get_filesystem_for_resource(LOCAL_RESOURCE_NAME, WS_DIR)
file_list = fs.ls('')
file_list.sort() # response is not deterministic
print(f"ls('') => {repr(file_list)}")
self.assertEqual(['data.txt', 'subdir'], file_list)
self.assertEqual(['data.txt'], fs.ls('data.txt'))
self.assertTrue(fs.isfile('data.txt'))
self.assertFalse(fs.isdir('data.txt'))
self.assertFalse(fs.isfile('subdir'))
self.assertTrue(fs.isdir('subdir'))
file_list2 = fs.ls('subdir')
file_list2.sort()
print(f"ls('subdir') => {repr(file_list2)}")
self.assertEqual(['subdir/data2.txt', 'subdir/data3.txt'], file_list2)
self.assertTrue(fs.isfile('subdir/data2.txt'))
self.assertFalse(fs.isdir('subdir/data2.txt'))
if __name__ == '__main__':
unittest.main()
```
#### File: data-workspaces-python/tests/test_tensorflow.py
```python
import unittest
import sys
import os.path
from os.path import exists, join
import json
import functools
import inspect
from utils_for_tests import SimpleCase, WS_DIR
try:
import tensorflow
TF_INSTALLED=True
if tensorflow.__version__.startswith('2.'):
TF_VERSION=2
else:
TF_VERSION=1
except ImportError:
TF_INSTALLED=False
try:
import numpy
NUMPY_INSTALLED=True
except ImportError:
NUMPY_INSTALLED=False
try:
import pandas
PANDAS_INSTALLED=True
except ImportError:
PANDAS_INSTALLED=False
from dataworkspaces.kits.wrapper_utils import NotSupportedError
def generator_from_arrays(x, y):
assert len(x)==len(y)
# keras expects the same number of dimensions, so, we reshape to add one more
old_shape = x[0].shape
new_shape = (1, old_shape[0], old_shape[1])
for i in range(len(y)):
yield(x[i].reshape(new_shape), y[i].reshape((1,1)))
class TestTensorflowKit(SimpleCase):
def setUp(self):
super().setUp()
if TF_INSTALLED:
import tensorflow as tf
self.sequential = tf.keras.Sequential
def tearDown(self):
super().tearDown()
if TF_INSTALLED:
import tensorflow as tf
tf.keras.Sequential = self.sequential
def _take_snapshot(self):
self._run_dws(['snapshot', 'S1'], cwd=WS_DIR)
@unittest.skipUnless(TF_INSTALLED, "SKIP: Tensorflow not available")
def test_wrapper_for_numpy(self):
"""This test follows the basic classification tutorial.
"""
import tensorflow as tf
import tensorflow.keras as keras
self._setup_initial_repo(git_resources='results', api_resources='fashion-mnist-data')
from dataworkspaces.kits.tensorflow import add_lineage_to_keras_model_class, CheckpointConfig
keras.Sequential = add_lineage_to_keras_model_class(keras.Sequential,
input_resource='fashion-mnist-data',
verbose=True,
workspace_dir=WS_DIR,
checkpoint_config=CheckpointConfig('fashion',
monitor='loss',
save_best_only=True))
fashion_mnist = keras.datasets.fashion_mnist
(train_images, train_labels), (test_images, test_labels) = fashion_mnist.load_data()
train_images = train_images / 255.0
test_images = test_images / 255.0
model = keras.Sequential([
keras.layers.Flatten(input_shape=(28, 28)),
keras.layers.Dense(128, activation='relu'),
keras.layers.Dense(10, activation='softmax')
])
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
model.fit(train_images, train_labels, epochs=5)
test_loss, test_acc = model.evaluate(test_images, test_labels, verbose=2)
print("test accuracy: %s" % test_acc)
results_file = join(WS_DIR, 'results/results.json')
self.assertTrue(exists(results_file), "missing file %s" % results_file)
with open(results_file, 'r') as f:
data = json.load(f)
self.assertAlmostEqual(test_acc, data['metrics']['accuracy' if TF_VERSION==2 else 'acc'])
self.assertAlmostEqual(test_loss, data['metrics']['loss'])
self._take_snapshot()
@unittest.skipUnless(TF_INSTALLED, "SKIP: Tensorflow not available")
@unittest.skipUnless(NUMPY_INSTALLED, "SKIP: numpy not installed")
@unittest.skipUnless(PANDAS_INSTALLED, 'SKIP: pandas not available')
def test_wrapper_for_dataset(self):
"""This follows the csv tutorial (titanic data set)
"""
import tensorflow as tf
import pandas as pd
import numpy as np
self._setup_initial_repo(git_resources='results', api_resources='titanic-data')
TRAIN_DATA_URL = "https://storage.googleapis.com/tf-datasets/titanic/train.csv"
TEST_DATA_URL = "https://storage.googleapis.com/tf-datasets/titanic/eval.csv"
train_file_path = tf.keras.utils.get_file("train.csv", TRAIN_DATA_URL)
test_file_path = tf.keras.utils.get_file("eval.csv", TEST_DATA_URL)
LABEL_COLUMN = 'survived'
LABELS = [0, 1]
def get_dataset(file_path, **kwargs):
dataset = tf.data.experimental.make_csv_dataset(
file_path,
batch_size=5, # Artificially small to make examples easier to show.
label_name=LABEL_COLUMN,
na_value="?",
num_epochs=1,
ignore_errors=True,
**kwargs)
return dataset
raw_train_data = get_dataset(train_file_path)
raw_test_data = get_dataset(test_file_path)
SELECT_COLUMNS = ['survived', 'age', 'n_siblings_spouses', 'parch', 'fare']
DEFAULTS = [0, 0.0, 0.0, 0.0, 0.0]
temp_dataset = get_dataset(train_file_path,
select_columns=SELECT_COLUMNS,
column_defaults = DEFAULTS)
def pack(features, label):
return tf.stack(list(features.values()), axis=-1), label
packed_dataset = temp_dataset.map(pack)
class PackNumericFeatures(object):
def __init__(self, names):
self.names = names
def __call__(self, features, labels):
numeric_freatures = [features.pop(name) for name in self.names]
numeric_features = [tf.cast(feat, tf.float32) for feat in numeric_freatures]
numeric_features = tf.stack(numeric_features, axis=-1)
features['numeric'] = numeric_features
#print('features type: %s, labels type: %s' % (type(features), type(labels)))
return features, labels
NUMERIC_FEATURES = ['age','n_siblings_spouses','parch', 'fare']
packed_train_data = raw_train_data.map(PackNumericFeatures(NUMERIC_FEATURES))
packed_test_data = raw_test_data.map(
PackNumericFeatures(NUMERIC_FEATURES))
desc = pd.read_csv(train_file_path)[NUMERIC_FEATURES].describe()
MEAN = np.array(desc.T['mean'])
STD = np.array(desc.T['std'])
def normalize_numeric_data(data, mean, std):
# Center the data
return (data-mean)/std
normalizer = functools.partial(normalize_numeric_data, mean=MEAN, std=STD)
numeric_column = tf.feature_column.numeric_column('numeric', normalizer_fn=normalizer, shape=[len(NUMERIC_FEATURES)])
numeric_columns = [numeric_column]
numeric_layer = tf.keras.layers.DenseFeatures(numeric_columns)
CATEGORIES = {
'sex': ['male', 'female'],
'class' : ['First', 'Second', 'Third'],
'deck' : ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J'],
'embark_town' : ['Cherbourg', 'Southhampton', 'Queenstown'],
'alone' : ['y', 'n']
}
categorical_columns = []
for feature, vocab in CATEGORIES.items():
cat_col = tf.feature_column.categorical_column_with_vocabulary_list(
key=feature, vocabulary_list=vocab)
categorical_columns.append(tf.feature_column.indicator_column(cat_col))
categorical_layer = tf.keras.layers.DenseFeatures(categorical_columns)
preprocessing_layer = tf.keras.layers.DenseFeatures(categorical_columns+numeric_columns)
from dataworkspaces.kits.tensorflow import add_lineage_to_keras_model_class, CheckpointConfig
tf.keras.Sequential = add_lineage_to_keras_model_class(tf.keras.Sequential, input_resource='titanic-data',
workspace_dir=WS_DIR,
checkpoint_config=CheckpointConfig('fashion',
monitor='loss',
save_best_only=True),
verbose=True)
model = tf.keras.Sequential([
preprocessing_layer,
tf.keras.layers.Dense(128, activation='relu'),
tf.keras.layers.Dense(128, activation='relu'),
tf.keras.layers.Dense(1, activation='sigmoid'),
])
model.compile(
loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy'])
train_data = packed_train_data.shuffle(500)
test_data = packed_test_data
if TF_VERSION==1:
with self.assertRaises(NotSupportedError):
model.fit(train_data, epochs=20)
return # stop early, not supported in 1.x
else:
model.fit(train_data, epochs=20)
test_loss, test_accuracy = model.evaluate(test_data)
print('\n\nTest Loss {}, Test Accuracy {}'.format(test_loss, test_accuracy))
self.assertAlmostEqual(test_accuracy, 0.88, delta=0.2)
self.assertAlmostEqual(test_loss, 0.31, delta=0.3)
predictions = model.predict(test_data)
results_file = join(WS_DIR, 'results/results.json')
self.assertTrue(exists(results_file), "missing file %s" % results_file)
with open(results_file, 'r') as f:
data = json.load(f)
self.assertAlmostEqual(test_accuracy, data['metrics']['accuracy' if TF_VERSION==2 else 'acc'])
self.assertAlmostEqual(test_loss, data['metrics']['loss'])
self._take_snapshot()
@unittest.skipUnless(TF_INSTALLED, "SKIP: Tensorflow not available")
def test_wrapper_for_generators(self):
"""This test follows the basic classification tutorial, modified for using
the fit_generator() and eval_generator() methods.
"""
import tensorflow as tf
import tensorflow.keras as keras
self._setup_initial_repo(git_resources='results', api_resources='fashion-mnist-data')
from dataworkspaces.kits.tensorflow import add_lineage_to_keras_model_class, CheckpointConfig
keras.Sequential = add_lineage_to_keras_model_class(keras.Sequential,
input_resource='fashion-mnist-data',
verbose=True,
workspace_dir=WS_DIR,
checkpoint_config=CheckpointConfig('fashion',
monitor='loss',
save_best_only=True))
fashion_mnist = keras.datasets.fashion_mnist
(train_images, train_labels), (test_images, test_labels) = fashion_mnist.load_data()
train_images = train_images / 255.0
test_images = test_images / 255.0
model = keras.Sequential([
keras.layers.Flatten(input_shape=(28, 28)),
keras.layers.Dense(128, activation='relu'),
keras.layers.Dense(10, activation='softmax')
])
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
g = generator_from_arrays(train_images, train_labels)
self.assertTrue(inspect.isgenerator(g))
model.fit(g, epochs=5, steps_per_epoch=2)
g2 = generator_from_arrays(test_images, test_labels)
test_loss, test_acc = model.evaluate(g2, steps=len(test_labels), verbose=2)
print("test accuracy: %s" % test_acc)
results_file = join(WS_DIR, 'results/results.json')
self.assertTrue(exists(results_file), "missing file %s" % results_file)
with open(results_file, 'r') as f:
data = json.load(f)
self.assertAlmostEqual(test_acc, data['metrics']['accuracy' if TF_VERSION==2 else 'acc'])
self.assertAlmostEqual(test_loss, data['metrics']['loss'])
self._take_snapshot()
@unittest.skipUnless(TF_INSTALLED, "SKIP: Tensorflow not available")
def test_wrapper_for_keras_sequence(self):
"""This test follows the basic classification tutorial, modified for using
the fit_generator() and eval_generator() methods.
"""
import tensorflow as tf
import tensorflow.keras as keras
import tensorflow.keras.utils as kerasutils
class KSequence(kerasutils.Sequence):
def __init__(self, x, y):
assert len(x)==len(y)
self.x = x
self.y = y
old_shape = x[0].shape
self.new_shape = (1, old_shape[0], old_shape[1])
def __iter__(self):
return generator_from_arrays(self.x, self.y)
def __getitem__(self, idx):
return (self.x[idx].reshape(self.new_shape), self.y[idx].reshape((1,1)))
def __len__(self):
return len(self.y)
self._setup_initial_repo(git_resources='results', api_resources='fashion-mnist-data')
from dataworkspaces.kits.tensorflow import add_lineage_to_keras_model_class, CheckpointConfig
keras.Sequential = add_lineage_to_keras_model_class(keras.Sequential,
input_resource='fashion-mnist-data',
verbose=True,
workspace_dir=WS_DIR,
checkpoint_config=CheckpointConfig('fashion',
monitor='loss',
save_best_only=True))
fashion_mnist = keras.datasets.fashion_mnist
(train_images, train_labels), (test_images, test_labels) = fashion_mnist.load_data()
train_images = train_images / 255.0
test_images = test_images / 255.0
model = keras.Sequential([
keras.layers.Flatten(input_shape=(28, 28)),
keras.layers.Dense(128, activation='relu'),
keras.layers.Dense(10, activation='softmax')
])
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
g = KSequence(train_images, train_labels)
model.fit(g, epochs=5, steps_per_epoch=2)
g2 = KSequence(test_images, test_labels)
test_loss, test_acc = model.evaluate(g2, steps=len(test_labels), verbose=2)
print("test accuracy: %s" % test_acc)
results_file = join(WS_DIR, 'results/results.json')
self.assertTrue(exists(results_file), "missing file %s" % results_file)
with open(results_file, 'r') as f:
data = json.load(f)
self.assertAlmostEqual(test_acc, data['metrics']['accuracy' if TF_VERSION==2 else 'acc'])
self.assertAlmostEqual(test_loss, data['metrics']['loss'])
self._take_snapshot()
if __name__ == '__main__':
unittest.main()
```
|
{
"source": "jfischer/killproc",
"score": 2
}
|
#### File: killproc/killproc/killproc.py
```python
import sys
import os
import subprocess
from signal import SIGKILL, SIGTERM, NSIG
from optparse import OptionParser
if sys.platform == 'linux2':
psargs = '-ef'
pid_field = 1
cmd_field = 7
else:
psargs = '-Ax'
pid_field = 0
cmd_field = 4
def get_matching_processes(process_name, this_program=sys.argv[0]):
subproc = subprocess.Popen(["/bin/ps", psargs],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
result = []
for line in subproc.stdout:
if (line.find(process_name) != -1):
if line.find(this_program) != -1:
continue
fields = line.split()
result.append((int(fields[pid_field]), ' '.join(fields[cmd_field:])))
(pid, exit_status) = os.waitpid(subproc.pid, 0)
return result
def get_signal_name(signal_no):
signal_names = {
SIGKILL: "SIGKILL",
SIGTERM: "SIGTERM"
}
if signal_names.has_key(signal_no):
return signal_names[signal_no]
else:
return str(signal_no)
def kill_procs_interactive(process_name, signal_no):
matches = get_matching_processes(process_name)
signame = get_signal_name(signal_no)
if len(matches)>0:
cnt = 0
for (pid, cmd) in matches:
print "%s" % cmd
data = raw_input("Kill process %d? [y]" % pid)
if len(data)==0 or data[0]=="Y" or data[0]=="y":
print "Sending signal %s to process %d" % (signame, pid)
os.kill(pid, signal_no)
cnt = cnt + 1
print "Sent signal %s to %d processes" % (signame, cnt)
return 0
else:
print "No matches for pattern '%s'" % process_name
return 1
def kill_procs_noninteractive(process_name, signal_no):
matches = get_matching_processes(process_name)
if len(matches)>0:
cnt = 0
for (pid, cmd) in matches:
print "[%d] %s" % (pid, cmd)
os.kill(pid, signal_no)
cnt = cnt + 1
print "Sent signal %s to %d processes" % (get_signal_name(signal_no), cnt)
return 0
else:
print "No matches for pattern '%s'" % process_name
return 1
def main(argv=sys.argv[1:]):
usage = "usage: %prog [options] process_name"
parser = OptionParser(usage=usage)
parser.add_option("-k", "--with-extreme-prejudice", action="store_true", dest="use_sig_kill",
default=False, help="If specified, use SIGKILL (default is SIGTERM)")
parser.add_option("-s", "--signal", action="store", type="int", dest="signal_no",
default=None, help="Use the specified signal. Defaults to %d (SIGTERM)" % SIGTERM)
parser.add_option("-n", "--non-interactive", action="store_true", dest="non_interactive",
default=False, help="If specified, don't ask user for confirmation")
(options, args) = parser.parse_args(args=argv)
if len(args) == 0:
parser.print_help()
return 1
if len(args) > 1:
parser.error("Expecting exactly one argument: process_name")
process_name = args[0]
if options.use_sig_kill and options.signal_no!=None and options.signal_no!=SIGKILL:
parser.error("Please specify only one of --with-extreme-prejudice and --signal")
elif options.use_sig_kill:
signal_no = SIGKILL
elif options.signal_no!=None:
if (options.signal_no >= NSIG) or (options.signal_no < 0):
parser.error("Invalid signal number %d, signals are from 0 to %d" % (options.signal_no, NSIG-1))
signal_no = options.signal_no
else:
signal_no = SIGTERM
if options.non_interactive:
return kill_procs_noninteractive(process_name, signal_no)
else:
return kill_procs_interactive(process_name, signal_no)
if __name__ == "__main__":
sys.exit(main())
```
|
{
"source": "jfischer/neo4j-db-utils",
"score": 3
}
|
#### File: neo4j-db-utils/neo4j_db_utils/import_defs.py
```python
from abc import ABCMeta, abstractmethod
from collections import namedtuple
import string
import sys
from six import add_metaclass
PYTHON_BASE_VERSION=sys.version_info[0]
##########################################################################
# Types for the Graph Components (nodes and relationships)
##########################################################################
@add_metaclass(ABCMeta)
class Node(object):
"""Subclass from this to represent your nodes.
"""
@abstractmethod
def get_node_type(self):
pass
@abstractmethod
def get_node_id(self):
"""Return the id for this node. It should be unique
within the node type. It does not necessarily have to
be unique across all node types."""
pass
@abstractmethod
def reduce(self, other):
"""Combine two nodes with the same id"""
pass
@abstractmethod
def to_csv_row(self):
"""Convert the node to a single row in the final csv. This should
return a list of values, one per each column."""
pass
@add_metaclass(ABCMeta)
class Relationship(object):
"""Subclass for your relationships. You will want to use __slots__
when possible to reduce the memory usage of each instance. If you
do not have any properties on the relationship, just use the
SimpleRelationship subclass.
"""
@abstractmethod
def get_rel_id(self):
"""Return the unique id for this relationship.
"""
pass
@abstractmethod
def get_source_node_type(self):
pass
@abstractmethod
def get_dest_node_type(self):
passs
@abstractmethod
def reduce(self, other):
"""Combine two relationships with the same id.
"""
pass
@abstractmethod
def to_csv_row(self):
"""Convert the relationship to a single row in the final csv. This should
return a list of values, one per each column"""
pass
# One way to uniquely define relationships
# We assume that there is only one edge of a given types
# between any two nodes.
RelId = namedtuple('RelId',
['source_type', 'source_id', 'rel_type', 'dest_type', 'dest_id'])
class SimpleRelationship(Relationship, RelId):
"""This is a concrete class you can use for relationships that don't have
any properties, just a type. In that case, the "id" of the relationship
provides the full specification.
"""
def get_rel_id(self):
return self
def reduce(self, other):
assert self==other
return self
def to_csv_row(self):
return [self.source_id, self.dest_id, self.rel_type]
def get_source_node_type(self):
return self.source_type
def get_dest_node_type(self):
return self.dest_type
def get_rel_type(self):
return self.rel_type
@staticmethod
def get_header_row(rel_type, from_type, to_type):
"""Can use this in MapReduceTemplate.get_rel_header_row()
if all your relationships have no properties.
"""
return [':START_ID(%s)' % from_type,
':END_ID(%s)' % to_type,
':TYPE']
@add_metaclass(ABCMeta)
class MapReduceTemplate(object):
"""This abstract class contains methods which provide the necessary
implementation needed for the map-reduce algorithm.
"""
@abstractmethod
def map_input(self, input):
"""Take a single input and returns (node_list, relationship_list) tuple
"""
pass
@abstractmethod
def get_node_header_row(self, node_type):
"""For the specified node type, return the header row to be used by
the ndoe csv file. See
https://neo4j.com/docs/operations-manual/current/tools/import/file-header-format/
"""
pass
@abstractmethod
def get_rel_header_row(self, rel_type, source_type, dest_type):
"""For the specific relationship type, return the header row to be used
by the relationship csv file.
"""
pass
##########################################################################
# General utilities for data cleansing
##########################################################################
NONPRINTABLE=set('\n\r\x0b\x0c').union(set([chr(i) for i in range(128)]).difference(string.printable))
if PYTHON_BASE_VERSION==3:
XLATE={ord(character):chr(183) for character in NONPRINTABLE}
# tab converted to space
XLATE[ord('\t')] = ' '
def cleanup_text(text):
"""Remove problematic characters for the CSV import"""
return text.translate(XLATE) if text is not None else None
XLATE_IDS={ord(character):chr(183) for character in NONPRINTABLE}
XLATE_IDS[ord(' ')]=''
XLATE_IDS[ord('\t')]=''
def cleanup_id(text):
"""Remove problematic characters for the CSV import. Neo4j seems to ignore spaces in ids,
so we remove them completely."""
return text.translate(XLATE_IDS) if text is not None else None
else: # The translate() method behaves a little differently in python 2
DELETECHARS=''.join(NONPRINTABLE)+'\t'
def cleanup_text(text):
"""Remove problematic characters for the CSV import"""
return text.translate(None, DELETECHARS) if text is not None else None
IDDELETECHARS=DELETECHARS+' '
def cleanup_id(text):
"""Remove problematic characters for the CSV import. Neo4j seems to ignore spaces in ids,
so we remove them completely."""
return text.translate(NONE, IDDELETECHARS) if text is not None else None
```
|
{
"source": "jfisher993/api-ai-weather-test",
"score": 3
}
|
#### File: jfisher993/api-ai-weather-test/app.py
```python
import urllib.request
import json
import os
#print(json.dumps(data, indent=4))
from flask import Flask
from flask import request
from flask import make_response
# Flask app should start in global layout
app = Flask(__name__)
@app.route('/webhook', methods=['POST'])
def webhook():
req = request.get_json(silent=True, force=True)
res = processRequest(req)
res = json.dumps(res, indent=4)
r = make_response(res)
r.headers['Content-Type'] = 'application/json'
return r
def processRequest(req):
if req.get("result").get("action") != "searchQVC":
return {}
res = makeWebhookResult(req)
return res
def get_jsonparsed_data(url):
response = urllib.request.urlopen(url)
data = response.read()
return json.loads(data.decode('utf-8'))
def makeWebhookResult(req):
result = req.get("result")
parameters = result.get("parameters")
countryCode = parameters.get("countryCode")
productNumber = parameters.get("productNumber")
url = 'https://api.qvc.com/api/sales/presentation/v3/' + countryCode + '/products/' + productNumber + '?response-depth=full'
data = get_jsonparsed_data(url)
speech = data.get('productNumber') + "\n" + "Brand Name: " + data.get('brandName') + "\n" + data.get('shortDescription')
return {
"speech": speech,
"displayText": speech,
# "data": {"slack": speech},
# "data": {"facebook": speech},
"source": "apiai-weather-webhook-test"
}
if __name__ == '__main__':
port = int(os.getenv('PORT', 5000))
app.run(debug=False, port=port, host='0.0.0.0')
```
|
{
"source": "jfishe/vimwiki_docx",
"score": 3
}
|
#### File: panvimwiki/filter/delete_taskwiki_heading.py
```python
import re
import panflute as pf
def prepare(doc):
"""Pre-filter—do nothing."""
pass
def action(elem, doc):
"""Remove taskwiki heading."""
regex = re.compile(r"\|.*$", re.MULTILINE)
subst = ""
if isinstance(elem, pf.Header):
result = re.sub(regex, subst, elem.identifier, 0)
elem.content = pf.convert_text(result)[0].content
elem.identifier = result
return elem
# return None -> element unchanged
# return [] -> delete element
return None
def finalize(doc):
"""Post-filter—do nothing."""
pass
def main(doc=None):
"""Remove taskwiki heading.
Pandoc filter using panflute
"""
return pf.run_filter(action, prepare=prepare, finalize=finalize, doc=doc)
if __name__ == "__main__":
main()
```
#### File: tests/func/test_vimwiki_week.py
```python
import datetime
from pathlib import Path
import pytest
from panvimwiki.vimwiki_week import concatenate_diary
@pytest.mark.freeze_time("2017-04-27")
@pytest.mark.parametrize(
"start_date, end_date, expected",
[
pytest.param(None, None, (2, 2521), id="No start or end date"),
pytest.param(
"2017-04-24", "2017-04-26", (2, 2521), id="Specify start and end dates"
),
],
)
def test_concatenate_diary(start_date, end_date, expected):
"""Test concatenate_diary with no dates provided.
Given no start or end dates, concatenate_diary will find 2 diary files,
return diary path for concatenated size of 2334.
Parameters
----------
None
Returns
-------
None
"""
assert datetime.date.today() == datetime.date(2017, 4, 27)
diary_path: Path = Path(__file__).parents[0] / "vimwiki/diary"
assert len(list(diary_path.glob("*.wiki"))) == expected[0]
diaryout: Path = concatenate_diary(
diary_path, start_date=start_date, end_date=end_date
)
assert diaryout.stat().st_size == expected[1]
```
|
{
"source": "J-Fit/JFit",
"score": 2
}
|
#### File: JFit/source/flux_Honda.py
```python
class flux_Honda:
def __init__(self, exp_site='juno'):
import os
import numpy as np
curPath = os.path.dirname(os.path.realpath(__file__))
# all direction
self.all_diret_solmin = np.loadtxt(curPath + '/data/' + exp_site +
'-ally-01-01-solmin.d',
skiprows=2)
from scipy import interpolate
self.f_flux_all_direct = {}
self.f_flux_all_direct[14] = interpolate.InterpolatedUnivariateSpline(
self.all_diret_solmin[:, 0], self.all_diret_solmin[:, 1])
self.f_flux_all_direct[-14] = interpolate.InterpolatedUnivariateSpline(
self.all_diret_solmin[:, 0], self.all_diret_solmin[:, 2])
self.f_flux_all_direct[12] = interpolate.InterpolatedUnivariateSpline(
self.all_diret_solmin[:, 0], self.all_diret_solmin[:, 3])
self.f_flux_all_direct[-12] = interpolate.InterpolatedUnivariateSpline(
self.all_diret_solmin[:, 0], self.all_diret_solmin[:, 4])
self.particle_list = {12, -12, 14, -14}
# cos\theta_z
import pandas as pd
filename = curPath + '/data/' + exp_site + '-ally-20-01-solmin.d'
keys_data = ['Enu(GeV)', 'NuMu', 'NuMubar', 'NuE', 'NuEbar']
N_data_pts = 101
self.data_coz_E_solmin = [
pd.read_table(filename,
skiprows=2 + (N_data_pts + 2) * i,
delim_whitespace=True,
names=keys_data,
nrows=N_data_pts) for i in range(20)
]
energy_x = self.data_coz_E_solmin[0][keys_data[0]]
cosz_y = []
phi_z = {key: [] for key in keys_data}
for i in range(20):
cosz_y.append(0.95 - 0.1 * i)
for key in keys_data:
phi_z[key].append(self.data_coz_E_solmin[i][key])
# outer most boundary conditions
cosz_y[0] = 1.0
cosz_y[-1] = -1.0
self.f_flux_ecz = {}
self.f_flux_ecz[14] = interpolate.interp2d(x=energy_x.values,
y=cosz_y,
z=phi_z['NuMu'])
self.f_flux_ecz[-14] = interpolate.interp2d(x=energy_x.values,
y=cosz_y,
z=phi_z['NuMubar'])
self.f_flux_ecz[12] = interpolate.interp2d(x=energy_x.values,
y=cosz_y,
z=phi_z['NuE'])
self.f_flux_ecz[-12] = interpolate.interp2d(x=energy_x.values,
y=cosz_y,
z=phi_z['NuEbar'])
def get_flux(self, Enu, cosz, flavor_ID=14):
import numpy as np
if flavor_ID in self.particle_list:
return self.f_flux_ecz[flavor_ID](Enu, cosz)
else:
print("WRONG PDGID! Need one of:",self.particle_list)
return np.zeros_like(Enu)
def get_flux_all_direct(self, Enu, flavor_ID=12):
import numpy as np
if flavor_ID in self.particle_list:
return self.f_flux_all_direct[flavor_ID](Enu)
else:
print("WRONG PDGID! Need one of:",self.particle_list)
return np.zeros_like(Enu)
def get_flavor_ratio(self, Enu, flavor_a=12, flavor_b=14):
'''
Enu: neutrino energy in GeV.
Flavor: PDGID
'''
if {flavor_a, flavor_b}.issubset(self.particle_list):
return self.f_flux_all_direct[flavor_a](
Enu) / self.f_flux_all_direct[flavor_b](Enu)
else:
print("WRONG PDGID!")
return np.zeros_like(Enu)
def get_parser():
import argparse
parser = argparse.ArgumentParser(description="For JUNO NMO analysis.")
parser.add_argument("--JUNOFlux",
action="store_true",
default=False,
help="Show atmospheric Flux at JUNO.")
parser.add_argument("--INO",
action="store_true",
default=False,
help="Show atmospheric Flux at INO.")
parser.add_argument("--JUNOINO",
action="store_true",
default=False,
help="Show atmospheric Flux at diff of INO and JUNO.")
return parser
def ShowJUNOFlux():
my_juno_flux = flux_Honda()
Enu = np.linspace(1, 20, 100)
phi_mu = my_juno_flux.get_flux_all_direct(Enu, flavor_ID=14)
phi_mu_bar = my_juno_flux.get_flux_all_direct(Enu, flavor_ID=-14)
phi_e = my_juno_flux.get_flux_all_direct(Enu, flavor_ID=12)
phi_e_bar = my_juno_flux.get_flux_all_direct(Enu, flavor_ID=-12)
plt.plot(Enu, phi_mu / phi_mu_bar, label=r'$\nu_{\mu}$/$\bar{\nu}_{\mu}$')
plt.plot(Enu, phi_e / phi_e_bar, label=r'$\nu_{e}$/$\bar{\nu}_{e}$')
plt.plot(
Enu, (phi_mu + phi_mu_bar) / (phi_e + phi_e_bar),
label=r'($\nu_{\mu}$+$\bar{\nu}_{\mu}$)/($\nu_{e}$+$\bar{\nu}_{e}$)')
# plt.plot(Enu,
# my_juno_flux.get_flux_all_direct(Enu, flavor_ID=14),
# label=r'$\nu_{\mu}$')
# plt.plot(Enu,
# my_juno_flux.get_flux_all_direct(Enu, flavor_ID=-14),
# label=r'$\bar{\nu}_{\mu}$')
# plt.plot(Enu, my_juno_flux.get_flux_all_direct(Enu, flavor_ID=12), label=r'$\nu_{e}$')
# plt.plot(Enu,
# my_juno_flux.get_flux_all_direct(Enu, flavor_ID=-12),
# label=r'$\bar{\nu}_{e}$')
# plt.yscale('log')
plt.xscale('log')
plt.xlabel('Neutrino Energy [GeV]')
plt.ylabel(r'$(m^2\cdot sec\cdot sr\cdot GeV)^{-1}$')
plt.ylabel(r'Flavour ratio')
plt.legend()
plt.show()
def ShowJUNO_INOFLux():
my_juno_flux = flux_Honda()
my_ino_flux = flux_Honda(exp_site='ino')
Enu = np.linspace(1, 20, 100)
# plt.plot(Enu, my_juno_flux.get_flavor_ratio(Enu=Enu, flavor_a=12, flavor_b=14))
plt.plot(Enu,
my_juno_flux.get_flux_all_direct(Enu, flavor_ID=14) /
my_ino_flux.get_flux_all_direct(Enu, flavor_ID=14),
label=r'$\nu_{\mu}$')
plt.plot(Enu,
my_juno_flux.get_flux_all_direct(Enu, flavor_ID=-14) /
my_ino_flux.get_flux_all_direct(Enu, flavor_ID=-14),
label=r'$\bar{\nu}_{\mu}$')
plt.plot(Enu,
my_juno_flux.get_flux_all_direct(Enu, flavor_ID=12) /
my_ino_flux.get_flux_all_direct(Enu, flavor_ID=12),
label=r'$\nu_{e}$')
plt.plot(Enu,
my_juno_flux.get_flux_all_direct(Enu, flavor_ID=-12) /
my_ino_flux.get_flux_all_direct(Enu, flavor_ID=-12),
label=r'$\bar{\nu}_{e}$')
# plt.yscale('log')
# plt.xscale('log')
plt.xlabel('Neutrino Energy [GeV]')
# plt.ylabel(r'$(m^2\cdot sec\cdot sr\cdot GeV)^{-1}$')
plt.ylabel(r'JUNO/INO(YB)')
plt.legend()
plt.show()
if __name__ == "__main__":
import matplotlib.pyplot as plt
import numpy as np
plt.style.use('../Style/Paper.mplstyle')
parser = get_parser()
args = parser.parse_args()
if args.JUNOFlux:
ShowJUNOFlux()
if args.JUNOINO:
ShowJUNO_INOFLux()
```
|
{
"source": "jfitz/code-stat",
"score": 2
}
|
#### File: jfitz/code-stat/assembly_examiner.py
```python
import string
import math
from codestat_token import Token
from codestat_tokenizer import Tokenizer
from token_builders import (
InvalidTokenBuilder,
NullTokenBuilder,
WhitespaceTokenBuilder,
NewlineTokenBuilder,
EscapedStringTokenBuilder,
PrefixedStringTokenBuilder,
IntegerTokenBuilder,
IntegerExponentTokenBuilder,
PrefixedIntegerTokenBuilder,
SuffixedIntegerTokenBuilder,
RealTokenBuilder,
IdentifierTokenBuilder,
CaseInsensitiveListTokenBuilder,
CaseSensitiveListTokenBuilder,
LeadToEndOfLineTokenBuilder,
SingleCharacterTokenBuilder
)
from assembly_token_builders import (
LabelTokenBuilder,
AssemblyCommentTokenBuilder,
MultilineCommentTokenBuilder,
HashQuoteCharTokenBuilder
)
from examiner import Examiner
class AssemblyExaminer(Examiner):
@staticmethod
def __escape_z__():
InvalidTokenBuilder.__escape_z__()
WhitespaceTokenBuilder.__escape_z__()
NewlineTokenBuilder.__escape_z__()
EscapedStringTokenBuilder.__escape_z__()
PrefixedStringTokenBuilder.__escape_z__()
IntegerTokenBuilder.__escape_z__()
IntegerExponentTokenBuilder.__escape_z__()
PrefixedIntegerTokenBuilder.__escape_z__()
SuffixedIntegerTokenBuilder.__escape_z__()
RealTokenBuilder.__escape_z__()
IdentifierTokenBuilder.__escape_z__()
CaseInsensitiveListTokenBuilder.__escape_z__()
CaseSensitiveListTokenBuilder.__escape_z__()
LeadToEndOfLineTokenBuilder.__escape_z__()
SingleCharacterTokenBuilder.__escape_z__()
LabelTokenBuilder.__escape_z__()
AssemblyCommentTokenBuilder.__escape_z__()
MultilineCommentTokenBuilder.__escape_z__()
HashQuoteCharTokenBuilder.__escape_z__()
return 'Escape ?Z'
def __init__(self, code, tab_size, processor):
super().__init__()
self.newlines_important = 'always'
operand_types = []
whitespace_tb = WhitespaceTokenBuilder()
newline_tb = NewlineTokenBuilder()
comment_tb = LeadToEndOfLineTokenBuilder(';', True, 'comment')
if processor in ['pdp-8']:
comment_tb = LeadToEndOfLineTokenBuilder('/', True, 'comment')
comment_2_tb = NullTokenBuilder()
if processor in ['1802']:
comment_2_tb = LeadToEndOfLineTokenBuilder('..', True, 'comment')
line_comment_star_tb = AssemblyCommentTokenBuilder('*')
line_comment_hash_tb = NullTokenBuilder()
if processor in ['68000']:
line_comment_hash_tb = AssemblyCommentTokenBuilder('#')
stmt_separator_tb = NullTokenBuilder()
if processor in ['pdp-8']:
stmt_separator_tb = SingleCharacterTokenBuilder(';', 'statement separator', False)
integer_tb = IntegerTokenBuilder("'")
integer_exponent_tb = IntegerExponentTokenBuilder("'")
integer_1_tb = NullTokenBuilder()
integer_2_tb = NullTokenBuilder()
prefixed_integer_tb = PrefixedIntegerTokenBuilder('#', True, '0123456789')
if processor in ['pdp-11']:
integer_1_tb = SuffixedIntegerTokenBuilder('$', True, '0123456789')
if processor in ['z80']:
integer_1_tb = SuffixedIntegerTokenBuilder('O', True, '0123456789')
integer_2_tb = SuffixedIntegerTokenBuilder('D', True, '0123456789')
hex_integer_1_tb = PrefixedIntegerTokenBuilder('&', True, '0123456789abcdefABCDEF')
hex_integer_2_tb = SuffixedIntegerTokenBuilder('h', False, '0123456789abcdefABCDEF')
hex_integer_3_tb = PrefixedIntegerTokenBuilder('$', True, '0123456789abcdefABCDEF')
hex_integer_4_tb = PrefixedIntegerTokenBuilder('#$', True, '0123456789abcdefABCDEF')
hash_quote_value_tb = NullTokenBuilder()
if processor in ['pdp-11']:
hash_quote_value_tb = HashQuoteCharTokenBuilder()
operand_types.append('number')
leads = '_.$@#'
extras = '_.$@#'
identifier_tb = IdentifierTokenBuilder(leads, extras)
operand_types.append('identifier')
label_tb = LabelTokenBuilder(leads, extras, ':')
quotes = ['"', "'", "’"]
string_tb = EscapedStringTokenBuilder(quotes, 0)
operand_types.append('string')
known_operators = [
'+', '-', '*', '/', '&', '|', '=', '??', '#', '@', "'", '!'
]
self.unary_operators = [
'+', '-', '??', '#', '@', "'"
]
self.postfix_operators = ['+']
groupers = ['(', ')', ',', '[', ']', '<', '>', ':']
group_starts = ['(', '[', ',', '<']
group_ends = [')', ']', '>']
group_mids = [',', ':']
groupers_tb = CaseInsensitiveListTokenBuilder(groupers, 'group', False)
known_operator_tb = CaseSensitiveListTokenBuilder(known_operators, 'operator', False)
preprocessors = [
'if', 'ifne', 'ifeq',
'else', 'endif', 'endc',
'error'
]
preprocessors_68000 = [
'MACRO', 'ENDM'
]
preprocessors_8080 = [
'MACRO', 'ENDM'
]
preprocessors_8086 = [
'ELSE', 'ELSEIF', 'ELSEIF2', 'ENDM', 'EXITM',
'FOR', 'FORC',
'GOTO',
'IF', 'IF2', 'IFB', 'IFNB', 'IFDEF', 'IFNDEF',
'IFDIF', 'IFDIF[[I]]', 'IFE', 'IFIDN', 'IFIDN[[I]]',
'LOCAL',
'MACRO',
'PURGE',
'.BREAK', '.CONTINUE',
'.ELSE', '.ELSEIF', '.ENDIF',
'.ERR', '.ERR2', '.ERRB', '.ERRDEF',
'.ERRDIF', '.ERRDIF[[I]]]', '.ERRE', '.ERRIDN', '.ERRIDN[[I]]',
'.ERRNB', '.ERRNDEF', '.ERRNZ', '.EXIT',
'.IF',
'.REPEAT', '.UNTIL', '.UNTILCXZ',
'.WHILE'
]
if processor in ['68000']:
preprocessors += preprocessors_68000
if processor in ['8080']:
preprocessors += preprocessors_8080
if processor in ['8086']:
preprocessors += preprocessors_8086
preprocessor_tb = CaseInsensitiveListTokenBuilder(preprocessors, 'preprocessor', False)
directives = [
'DB', 'DW', 'DS',
'EJECT', 'END', 'EQU', 'EXTRN',
'INCLUDE',
'NAME',
'ORG',
'PAGE',
'SECTION', 'SEGMENT', 'START', 'SUBTITLE',
'TEXT'
]
directives_6502 = [
'DFB', 'DFW'
]
directives_6800 = [
'CPU',
'NAM'
]
directives_68000 = [
'=',
'EVEN',
'ODD'
]
directives_8080 = [
'ASEG',
'CPU',
'LOCAL',
'TITLE',
'.8080', '.8086', '.6800', '.6502', ".386",
]
directives_z80 = [
'DEFB', 'DEFS', 'DEFW'
]
directives_8086 = [
'=',
'ABSOLUTE', 'ALIAS', 'ALIGN', 'AS', 'ASSUME', 'AT',
'BITS', 'BYTE',
'COMM', 'COMMON', 'CPU', 'CSEG',
'DEFAULT', 'DSEG', 'DWORD',
'ECHO', 'ENDP', 'ENDS', 'EVEN', 'EXTERNDEF',
'FWORD', 'FORMAT',
'GLOBAL', 'GROUP',
'INCLUDELIB', 'INS86', 'INVOKE',
'LABEL',
'MMWORD',
'OPTION',
'POPCONTEXT', 'PROC', 'PROTO', 'PUBLIC', 'PUSHCONTEXT',
'SEGMENT'
'QWORD',
'REAL4', 'REAL8', 'REAL10', 'RECORD',
'STRUCT',
'TEXTEQU', 'TBYTE', 'TYPEDEF',
'WORD',
'SBYTE', 'SDWORD', 'SWORD',
'SECT', 'SECTION', 'SEGMENT', 'STATIC'
'UNION', 'USE16', 'USE32', 'USE64',
'VIRTUAL',
'XMMWORD', 'YMMWORD',
'.386', '.386P', '.387', '.486', '.486P', '.586', '.586P',
'.686', '.686P', '.K3D',
'.ALLOCSTACK', '.ALPHA',
'.CODE', '.CONST', '.CREF',
'.DATA', '.DATA?', '.DOSSEG',
'.ENDW', '.ENDPROLOG',
'.FARDATA', '.FARDATA?', '.FPO',
'.LIST', '.LISTALL', '.LISTIF', '.LISTMACRO', '.LISTMACROALL',
'.MODEL', '.MMX',
'.NOCREF', '.NOLIST', '.NOLISTIF', '.NOLISTMACRO',
'.PUSHFRAME', '.PUSHREG',
'.RADIX',
'.SAFESEH', '.SALL', '.SAVEREG', '.SAVEXMM128', '.STACK', '.STARTUP',
'.SEQ', '.SETFRAME',
'.TFCOND',
'.XLIST', '.XMM',
]
directives_80386 = [
'ALIGN',
'BITS',
'GLOBAL',
'PROC',
'SECTION',
'RESB', 'RESD',
'.386',
'.CODE',
'.DATA',
'.MODEL',
'.TEXT',
'%INCLUDE',
]
directives_pdp8 = [
'='
]
directives_pdp11 = [
'=',
'BYTE',
'WORD',
'.odd', '.even', '.blkb', '.blkw', '.byte', '.word',
'.ascii', '.asciz', '.end', '.hex', '.radix',
'.ident', '.if', '.ift', '.endc', '.psect', '.mcall',
'.macro', '.endm', '.restore', '.print', '.error',
'.list', '.nlist'
]
if processor in ['6502']:
directives += directives_6502
if processor in ['6800']:
directives += directives_6800
if processor in ['68000']:
directives += directives_68000
if processor in ['8080']:
directives += directives_8080
if processor in ['z80']:
directives += directives_z80
if processor in ['8086']:
directives += directives_8086
if processor in ['80386']:
directives += directives_80386
if processor in ['pdp-8']:
directives += directives_pdp8
if processor in ['pdp-11']:
directives += directives_pdp11
directive_tb = CaseInsensitiveListTokenBuilder(directives, 'directive', False)
title_directive_tb = LeadToEndOfLineTokenBuilder('TITLE', False, 'directive')
title_directive_2_tb = LeadToEndOfLineTokenBuilder('.TITLE', False, 'directive')
subtitle_directive_tb = LeadToEndOfLineTokenBuilder('SUBTTL', False, 'directive')
subtitle_directive_2_tb = LeadToEndOfLineTokenBuilder('.SUBTTL', False, 'directive')
subtitle_directive_3_tb = LeadToEndOfLineTokenBuilder('.SBTTL', False, 'directive')
include_directive_tb = LeadToEndOfLineTokenBuilder('INCLUDE', False, 'directive')
include_directive_2_tb = LeadToEndOfLineTokenBuilder('.INCLUDE', False, 'directive')
multiline_comment_tb = MultilineCommentTokenBuilder()
opcodes_1802 = [
'IDL', 'LDN', 'INC', 'DEC', 'BR', 'BO', 'BZ', 'BDF', 'BPZ', 'BGE',
'B1', 'B2', 'B3', 'B4', 'SKP', 'NBR', 'BNO', 'BNZ', 'BNF', 'BM', 'BL',
'BN1', 'BN2', 'BN3', 'BN4', 'LDA', 'STR', 'IRX', 'OUT', 'INP',
'RET', 'DIS', 'LDXA', 'STXD', 'ADC', 'SDB', 'SHRC', 'RSHR', 'SMB',
'SAV', 'MARK', 'REQ', 'SEQ', 'ADCI', 'SDBI', 'SHLC', 'RSHL', 'SMBI',
'GLO', 'GHI', 'PLO', 'PHI', 'LBO', 'LBZ', 'LBDF', 'NOP', 'LSNO',
'LSNZ', 'LSNF', 'LSKP', 'NLBR', 'LBNQ', 'LBNZ', 'LBNF', 'LSIE', 'LSQ',
'LSZ', 'LSDF', 'SEP', 'SEX', 'LDX', 'OR', 'AND', 'XOR', 'ADD', 'SD',
'SHR', 'SM', 'LDI', 'ORI', 'ANI', 'XRI', 'ADI', 'SDI', 'SHL', 'SMI'
]
registers_1802 = []
opcodes_6502 = [
'ADC', 'AND', 'ASL', 'AST',
'BCC', 'BCS', 'BEQ', 'BIT', 'BMI', 'BNE', 'BPL', 'BRK', 'BVC', 'BVS',
'CLC', 'CLD', 'CLI', 'CLV', 'CMP', 'CPR', 'CPX', 'CPY',
'DEC', 'DEX', 'DEY',
'EOR',
'INC', 'INX', 'INY',
'JMP', 'JSR',
'LDA', 'LDX', 'LDY', 'LSR',
'NOP',
'ORA',
'PHA', 'PHP', 'PLA', 'PLP',
'ROL', 'ROR', 'RTI', 'RTS',
'SBC', 'SEC', 'SED', 'SEI', 'STA', 'STX', 'STY',
'TAX', 'TAY', 'TSX', 'TXA', 'TXS', 'TYA'
]
registers_6502 = ['A', 'X', 'Y', 'P', 'S']
opcodes_6800 = [
'ABA', 'ADC', 'ADCA', 'ADCB', 'ADD', 'AND', 'ASL', 'ASR',
'BCC', 'BCS', 'BEQ', 'BGE', 'BGT', 'BHI', 'BIT', 'BLE', 'BLS', 'BLT', 'BMI', 'BNE', 'BPL', 'BRA', 'BSR', 'BVC', 'BVS',
'CBA', 'CLC', 'CLI', 'CLR', 'CLRA', 'CLRB', 'CLV', 'CMP', 'COM', 'CPX',
'DAA', 'DEC', 'DES', 'DEX',
'EOR', 'EORA', 'EROB',
'INC', 'INS', 'INX',
'JMP', 'JSR',
'LDA', 'LDAA', 'LDAB', 'LDS', 'LDX', 'LSR',
'NEG', 'NOP',
'ORA',
'PSH', 'PUL',
'ROL', 'ROR', 'RTI', 'RTS',
'SBA', 'SBC', 'SEC', 'SEI', 'SEV', 'STA', 'STAA', 'STAB', 'STS', 'STX', 'SUB', 'SWI',
'TAB', 'TAP', 'TBA', 'TPA', 'TST', 'TSX', 'TXS',
'WAI'
]
registers_6800 = ['A', 'B', 'IX', 'PC', 'SP']
opcodes_68000 = [
'AND', 'ANDI', 'EOR', 'EORI', 'NOT', 'OR', 'ORI', 'CLR',
'BCHG', 'BCLR', 'BSET', 'BTST', 'EXT', 'EXTB',
'MOVE', 'MOVEA', 'MOVEM', 'MOVEP', 'MOVEQ',
'CMP', 'CMPA', 'CMPI', 'CMPM', 'CMP2',
'LEA', 'PEA', 'TAS', 'CHK',
'ADD', 'ADDA', 'ADDI', 'ADDQ', 'ADDX',
'SUB', 'SUBA', 'SUBI', 'SUBQ', 'SUBX',
'MULS', 'MULU', 'DIVS', 'DIVU', 'NEG', 'NEGX',
'ASL', 'ASR', 'LSL', 'LSR', 'ROL', 'ROR', 'ROXL', 'ROXR',
'DBCC', 'SWAP', 'TST',
'ANDB', 'ANDIB', 'EORB', 'EORIB', 'NOTB', 'ORB', 'ORIB', 'CLRB',
'BCHGB', 'BCLRB', 'BSETB', 'BTSTB', 'EXTB', 'EXTBB',
'MOVEB', 'MOVEAB', 'MOVEMB', 'MOVEPB', 'MOVEQB',
'CMPB', 'CMPAB', 'CMPIB', 'CMPMB', 'CMP2B',
'LEAB', 'PEAB', 'TASB', 'CHKB',
'ADDB', 'ADDAB', 'ADDIB', 'ADDQB', 'ADDXB',
'SUBB', 'SUBAB', 'SUBIB', 'SUBQB', 'SUBXB',
'MULSB', 'MULUB', 'DIVSB', 'DIVUB', 'NEGB', 'NEGXB',
'ASLB', 'ASRB', 'LSLB', 'LSRB', 'ROLB', 'RORB', 'ROXLB', 'ROXRB',
'DBCCB', 'SWAPB', 'TSTB',
'ANDW', 'ANDIW', 'EORW', 'EORIW', 'NOTW', 'ORW', 'ORIW', 'CLRW',
'BCHGW', 'BCLRW', 'BSETW', 'BTSTW', 'EXTW', 'EXTBW',
'MOVEW', 'MOVEAW', 'MOVEMW', 'MOVEPW', 'MOVEQW',
'CMPW', 'CMPAW', 'CMPIW', 'CMPMW', 'CMP2W',
'LEAW', 'PEAW', 'TASW', 'CHKW',
'ADDW', 'ADDAW', 'ADDIW', 'ADDQW', 'ADDXW',
'SUBW', 'SUBAW', 'SUBIW', 'SUBQW', 'SUBXW',
'MULSW', 'MULUW', 'DIVSW', 'DIVUW', 'NEGW', 'NEGXW',
'ASLW', 'ASRW', 'LSLW', 'LSRW', 'ROLW', 'RORW', 'ROXLW', 'ROXRW',
'DBCCW', 'SWAPW', 'TSTW',
'ANDL', 'ANDIL', 'EORL', 'EORIL', 'NOTL', 'ORL', 'ORIL', 'CLRL',
'BCHGL', 'BCLRL', 'BSETL', 'BTSTL', 'EXTL', 'EXTBL',
'MOVEL', 'MOVEAL', 'MOVEML', 'MOVEPL', 'MOVEQL',
'CMPL', 'CMPAL', 'CMPIL', 'CMPML', 'CMP2L',
'LEAL', 'PEAL', 'TASL', 'CHKL',
'ADDL', 'ADDAL', 'ADDIL', 'ADDQL', 'ADDXL',
'SUBL', 'SUBAL' 'SUBIL', 'SUBQL', 'SUBXL',
'MULSL', 'MULUL', 'DIVSL', 'DIVUL', 'NEGL', 'NEGXL',
'ASLL', 'ASRL', 'LSLL', 'LSRL', 'ROLL', 'RORL', 'ROXLL', 'ROXRL',
'DBCCL', 'SWAPL', 'TSTL',
'ABCD', 'NBCD', 'PACK', 'SBCD', 'UNPK',
'BSR', 'BRA', 'BT', 'BF',
'BEQ', 'BNE', 'BLS', 'BLT', 'BLE', 'BGT', 'BGE',
'BCC', 'BCS', 'BPL', 'BMI', 'BHI', 'BVC', 'BVS',
'BSRS', 'BRAS', 'BEQS', 'BNES', 'BLSS', 'BLTS', 'BLES', 'BGTS', 'BGES',
'BCCS', 'BCSS', 'BPLS', 'BMIS', 'BHIS', 'BVCS', 'BVSS',
'DBSR', 'DBRA', 'DBT', 'DBF',
'DBEQ', 'DBNE', 'DBLS', 'DBLT', 'DBLE', 'DBGT', 'DBGE',
'DBCC', 'DBCS', 'DBPL', 'DBMI', 'DBHI', 'DBVC', 'DBVS',
'JSR', 'JMP',
'TRAP', 'HALT', 'STOP',
'RTD', 'RTE', 'RTR', 'RTS',
'TRAP', 'HALT', 'STOP', 'NOP', 'MOVE16', 'EXG',
'BFCHG', 'BFCLR', 'BFEXTS', 'BFEXTU', 'BFFFO', 'BFINS', 'BFSET', 'BFTST',
'FNOP', 'FABS', 'FACOS', 'FASIN', 'FATAN', 'FCOS', 'FCOSH', 'FETOX',
'FETOXM1', 'FGETMAN', 'FINT', 'FINTRZ', 'FLOGN', 'FLOGNP1', 'FLOG10',
'FLOG2', 'FNEG', 'FSIN', 'FSINH', 'FSQRT', 'FTAN', 'FTANH',
'FTENTOX', 'FTWOTOX', 'FTST',
'DSB', 'DSW', 'DSL', 'DCB', 'DCW', 'DCL',
'AND.B', 'ANDI.B', 'EOR.B', 'EORI.B', 'NOT.B', 'OR.B', 'ORI.B', 'CLR.B',
'BCHG.B', 'BCLR.B', 'BSET.B', 'BTST.B', 'EXT.B', 'EXTB.B',
'MOVE.B', 'MOVEA.B', 'MOVEM.B', 'MOVEP.B', 'MOVEQ.B',
'CMP.B', 'CMPA.B', 'CMPI.B', 'CMPM.B', 'CMP2.B',
'LEA.B', 'PEA.B', 'TAS.B', 'CHK.B',
'ADD.B', 'ADDA.B', 'ADDI.B', 'ADDQ.B', 'ADDX.B',
'SUB.B', 'SUBA.B', 'SUBI.B', 'SUBQ.B', 'SUBX.B',
'MULS.B', 'MULU.B', 'DIVS.B', 'DIVU.B', 'NEG.B', 'NEGX.B',
'ASL.B', 'ASR.B', 'LSL.B', 'LSR.B', 'ROL.B', 'ROR.B', 'ROXL.B', 'ROXR.B',
'DBCC.B', 'SWAP.B', 'TST.B',
'AND.W', 'ANDI.W', 'EOR.W', 'EORI.W', 'NOT.W', 'OR.W', 'ORI.W', 'CLR.W',
'BCHG.W', 'BCLR.W', 'BSET.W', 'BTST.W', 'EXT.W', 'EXTB.W',
'MOVE.W', 'MOVEA.W', 'MOVEM.W', 'MOVEP.W', 'MOVEQ.W',
'CMP.W', 'CMPA.W', 'CMPI.W', 'CMPM.W', 'CMP2.W',
'LEA.W', 'PEA.W', 'TAS.W', 'CHK.W',
'ADD.W', 'ADDA.W', 'ADDI.W', 'ADDQ.W', 'ADDX.W',
'SUB.W', 'SUBA.W', 'SUBI.W', 'SUBQ.W', 'SUBX.W',
'MULS.W', 'MULU.W', 'DIVS.W', 'DIVU.W', 'NEG.W', 'NEGX.W',
'ASL.W', 'ASR.W', 'LSL.W', 'LSR.W', 'ROL.W', 'ROR.W', 'ROXL.W', 'ROXR.W',
'DBCC.W', 'SWAP.W', 'TST.W',
'AND.L', 'ANDI.L', 'EOR.L', 'EORI.L', 'NOT.L', 'OR.L', 'ORI.L', 'CLR.L',
'BCHG.L', 'BCLR.L', 'BSET.L', 'BTST.L', 'EXT.L', 'EXTB.L',
'MOVE.L', 'MOVEA.L', 'MOVEM.L', 'MOVEP.L', 'MOVEQ.L',
'CMP.L', 'CMPA.L', 'CMPI.L', 'CMPM.L', 'CMP2.L',
'LEA.L', 'PEA.L', 'TAS.L', 'CHK.L',
'ADD.L', 'ADDA.L', 'ADDI.L', 'ADDQ.L', 'ADDX.L',
'SUB.L', 'SUBA.L', 'SUBI.L', 'SUBQ.L', 'SUBX.L',
'MULS.L', 'MULU.L', 'DIVS.L', 'DIVU.L', 'NEG.L', 'NEGX.L',
'ASL.L', 'ASR.L', 'LSL.L', 'LSR.L', 'ROL.L', 'ROR.L', 'ROXL.L', 'ROXR.L',
'DBCC.L', 'SWAP.L', 'TST.L',
'BSR.S', 'BRA.S', 'BT.S', 'BF.S',
'BEQ.S', 'BNE.S', 'BLS.S', 'BLT.S', 'BLE.S', 'BGT.S', 'BGE.S',
'BCC.S', 'BCS.S', 'BPL.S', 'BMI.S', 'BHI.S', 'BVC.S', 'BVS.S',
'DS.B', 'DS.W', 'DS.L', 'DC.B', 'DC.W', 'DC.L'
]
registers_68000 = [
'D0', 'D1', 'D2', 'D3', 'D4', 'D5', 'D6', 'D7',
'A0', 'A1', 'A2', 'A3', 'A4', 'A5', 'A6', 'A7',
'FP0', 'FP1', 'FP2', 'FP3', 'FP4', 'FP5', 'FP6', 'FP7',
'PC', 'SR'
]
opcodes_8080 = [
'ACI', 'ADC', 'ADD', 'ADI', 'ANA', 'ANI',
'CALL', 'CC', 'CM', 'CMA', 'CMC', 'CMP', 'CNC', 'CNZ', 'CP', 'CPE', 'CPI',
'CPO', 'CZ',
'DAA', 'DAD', 'DCR', 'DCX', 'DI',
'EI',
'HLT',
'IN', 'INR', 'INX',
'JC', 'JM', 'JMP', 'JNC', 'JNZ', 'JP', 'JPE', 'JPO', 'JZ',
'LDAX', 'LHLD', 'LXI',
'MOV', 'MVI',
'NOP',
'ORA', 'ORI', 'OUT',
'PCHL', 'POP', 'PUSH',
'RAL', 'RAR', 'RC', 'RIM', 'RLC', 'RET', 'RM', 'RNC', 'RNZ', 'RP', 'RPE',
'RPO', 'RRC', 'RST', 'RZ ',
'SBB', 'SBI', 'SHLD', 'SIM', 'SPHL', 'STA', 'STC', 'STAX', 'SUB', 'SUI',
'XCHG', 'XRA', 'XRI', 'XTHL',
]
registers_8080 = [
'A', 'B', 'C', 'D', 'E', 'H', 'L', 'M', 'PSW', 'F'
]
opcodes_z80 = [
'ADC', 'ADD', 'AND',
'BIT',
'CALL', 'CCF', 'CP', 'CPD', 'CPDR', 'CPI', 'CPIR', 'CPL',
'DAA', 'DEC', 'DI', 'DJNZ',
'EI', 'EX', 'EXX',
'HALT',
'IM', 'IN', 'INC', 'IND', 'INDR', 'INI', 'INIR',
'JP', 'JR',
'LD', 'LDD', 'LDDR', 'LDI', 'LDIR',
'NEG', 'NOP',
'OR', 'OTDR', 'OTIR', 'OUT', 'OUTD', 'OUTI',
'POP', 'PUSH',
'RES', 'RET', 'RETI', 'RETN', 'RL', 'RLA', 'RLC', 'RLCA', 'RLD',
'RR', 'RRA', 'RRC', 'RRCA', 'RRD', 'RST',
'SBC', 'SCF', 'SET', 'SLA', 'SRA', 'SRL', 'SUB',
'XOR'
]
registers_z80 = [
'A', 'B', 'C', 'D', 'E', 'H', 'L', 'F', 'AF', 'BC', 'DE', 'HL',
"A'", "B'", "C'", "D'", "E'", "H'", "L'", "AF'", "F'", "BC'", "DE'", "HL'",
'IX', 'IY', 'PSW', 'M'
]
opcodes_8086 = [
'AAA', 'AAD', 'AAM', 'AAS', 'ADC', 'ADD', 'AND',
'CALL', 'CBW', 'CLC', 'CLD', 'CLI', 'CMC', 'CMP', 'CMPS', 'CMPSB', 'CMPW', 'CMPXCHG', 'CWD',
'DAA', 'DAS', 'DEC', 'DIV',
'ESC',
'FWAIT',
'F2XM1', 'FABS', 'FADD', 'FADDP', 'FBLD', 'FBSTP', 'FCHS', 'FCLEX', 'FCOM', 'FCOMP',
'FCOMPP', 'FCOS', 'FDECSTP', 'FDISI', 'FDIV', 'FDIVP', 'FDIVR', 'FDIVRP',
'FENI', 'FFREE', 'FIADD', 'FICOM', 'FICOMP', 'FIDIV', 'FIDIVR', 'FILD',
'FIMUL', 'FINCSTP', 'FINIT', 'FIST', 'FISTP', 'FISUB', 'FISUBR', 'FLD', 'FLD1',
'FLDCW', 'FLDENV', 'FLDL2E', 'FLDL2T', 'FLDLG2', 'FLDLN2', 'FLDPI',
'FLDZ', 'FMUL', 'FMULP', 'FNCLEX', 'FNDISI', 'FNENI', 'FNINIT', 'FNOP', 'FNSAVE',
'FNSTCW', 'FNSTENV', 'FNSTSW', 'FPATAN', 'FPREM', 'FPREM1', 'FPTAN', 'FRNDINT',
'FRSTOR', 'FSAVE', 'FSCALE', 'FSETPM', 'FSIN', 'FSINCOS', 'FSQRT', 'FST', 'FSTCW',
'FSTENV', 'FSTP', 'FSTSW', 'FSUB', 'FSUBP', 'FSUBRP', 'FTST', 'FUCOM', 'FUCOMP',
'FUCOMPP', 'FXAM', 'FXCH', 'FXTRACT', 'FYL2X', 'FYL2XP1',
'HLT',
'IDIV', 'IMUL', 'IN', 'INC', 'INT', 'INTO', 'INVD', 'IRET', 'IRETD',
'JA', 'JAE', 'JB', 'JBE', 'JC', 'JCXZ', 'JE', 'JECXZ', 'JG', 'JGE', 'JL', 'JLE', 'JMP', 'JNA', 'JNAE', 'JNB', 'JNBE', 'JNC', 'JNE', 'JNG', 'JNGE', 'JNL', 'JNLE', 'JNO', 'JNP', 'JNS', 'JO', 'JP', 'JPE', 'JPO', 'JNZ', 'JS', 'JZ',
'LAHF', 'LAR', 'LDS', 'LEA', 'LES', 'LOCK', 'LODS', 'LODSB', 'LODSW', 'LOOP', 'LOOPE', 'LOOPNE', 'LOOPNZ', 'LOOPZ',
'MOV', 'MOVS', 'MOVSB', 'MOVSW', 'MUL',
'NEG', 'NOP', 'NOT',
'OR', 'OUT',
'POP', 'POPF', 'POPFD', 'PUSH', 'PUSHF', 'PUSHFD',
'RCL', 'RCR', 'REP', 'REPE', 'REPNE', 'REPNZ', 'REPZ', 'RET', 'RETF', 'ROL', 'ROR',
'SAHF', 'SAL', 'SAR', 'SBB', 'SCAS', 'SCASB', 'SCASW', 'SHL', 'SHR', 'STC', 'STD', 'STI', 'STOS', 'STOSB', 'STOSW', 'SUB',
'TEST',
'WAIT', 'WBINVD',
'XCHG', 'XLAT', 'XLATB', 'XOR',
]
registers_8086 = [
'AL', 'AH', 'BL', 'BH', 'CL', 'CH', 'DL', 'DH',
'AX', 'BX', 'CX', 'DX', 'CS', 'DS', 'SS', 'ES',
'IP', 'SI', 'DI', 'BP', 'SP', 'FLAGS'
]
opcodes_80186 = [
'BOUND',
'ENTER',
'INS',
'LEAVE',
'OUTS',
'POPA', 'POPAD', 'PUSHA', 'PUSHAD'
]
opcodes_80286 = [
'ARPL',
'CLTS',
'LGDT', 'LIDT', 'LLDT', 'LMSW', 'LSL', 'LSS',
'SGDT', 'SIDT', 'SLDT', 'SMSW', 'STR',
'VERR', 'VERW'
]
registers_80286 = [
'TR'
]
opcodes_80386 = [
'BSF', 'BSR', 'BT', 'BTC', 'BTR', 'BTS',
'CDQ', 'CWDE',
'LFS', 'LGS', 'LSS',
'MOVSX', 'MOVZX',
'SETAE', 'SETB', 'SETC', 'SETNAE', 'SETNB', 'SETNE', 'SETNZ', 'SETG', 'SETGE', 'SETL', 'SETLE', 'SETNC', 'SETNG', 'SETNGE', 'SETNL', 'SETNLE', 'SETNO', 'SETNP', 'SETNS', 'SETE', 'SETO', 'SETP', 'SETPE', 'SETPO', 'SETS', 'SETZ',
'SHLD', 'SHRD'
]
registers_80386 = [
'EAX', 'EBX', 'ECX', 'EDX', 'ESI', 'EDI', 'EBP', 'ESP',
'FS', 'GS', 'EFLAGS'
]
opcodes_80486 = [
'BSWAP',
'INVPLG'
]
opcodes_pdp8 = [
'AND', 'TAD', 'ISZ', 'DCA', 'JMS', 'JMP',
'CDF', 'CIF', 'RDF', 'RIF', 'RIB', 'RMF',
'CLA', 'CLL', 'CMA', 'CML', 'IAC', 'RAR', 'RAL', 'RTR', 'RTL', 'BSW',
'SMA', 'SZA', 'SNL', 'SPA', 'SNA', 'SZL', 'OSR', 'HLT', 'MQA', 'MQL',
'SEL', 'LCD', 'XDR', 'STR', 'SER', 'SDN', 'INTR', 'INIT',
'DILC', 'DICD', 'DISD', 'DILX', 'DILY', 'DIXY', 'DILE', 'DIRE',
'RCSF', 'RCRA', 'RCRB', 'RCNO', 'RCRC', 'RCNI', 'RCSD', 'RCSE',
'RCRD', 'RCSI', 'RCTF',
'RPE', 'RSF', 'RRB', 'RFC', 'PCE', 'PSF', 'PCF', 'PPC', 'PLS',
'KCF', 'KSF', 'KCC', 'KRS', 'KIE', 'KRB', 'TFL', 'TSF', 'TCF',
'TPC', 'TSK', 'TLS'
]
opcodes_pdp11 = [
'CLR', 'CLRB', 'COM', 'COMB', 'INC', 'INCB', 'DEC', 'DECB', 'NEG', 'NEGB',
'NOP', 'TST', 'TSTB', 'TSTSET', 'WRTLCK', 'ASR', 'ASRB', 'ASL', 'ASLB',
'ROR', 'RORB', 'ROL', 'ROLB', 'SWAB', 'ADC', 'ADCB', 'SBC', 'SBCB', 'SXT',
'MOV', 'MOVB', 'ADD', 'SUB', 'CMP', 'CMPB', 'ASH', 'ASHC',
'MUL', 'DIV', 'BIT', 'BITB', 'BIC', 'BICB', 'BIS', 'BISB',
'XOR', 'CLR', 'CLRB', 'BR', 'BNE', 'BPL', 'BEQ', 'BMI', 'BVC',
'BVS', 'BCC', 'BCS', 'BGE', 'BLT', 'BGT', 'BLE', 'SOB', 'BHI',
'BLOS', 'BHIS', 'BLO',
'JMP', 'JSR', 'RTS', 'MARK', 'EMT', 'TRAP', 'BPT', 'IOT', 'CSM',
'RTI', 'RTT', 'HALT', 'WAIT', 'RESET',
'MTPD', 'MTPI', 'MFPD', 'MTPS', 'MFPS', 'MFPT',
'CLC', 'CLV', 'CLZ', 'CLN', 'CCC', 'SEC', 'SEV', 'SEZ', 'SEN', 'SCC',
'FADD', 'FSUB', 'FMUL', 'FDIV',
'DIV', 'MUL'
]
registers_pdp11 = [
'r0', 'r1', 'r2', 'r3', 'r4', 'r5', 'r6', 'r7'
]
opcodes = []
registers = []
if processor in ['1802']:
opcodes += opcodes_1802
registers += registers_1802
if processor in ['6502']:
opcodes += opcodes_6502
registers += registers_6502
if processor in ['6800']:
opcodes += opcodes_6800
registers += registers_6800
if processor in ['68000']:
opcodes += opcodes_68000
registers += registers_68000
if processor in ['8080']:
opcodes += opcodes_8080
registers += registers_8080
if processor in ['z80']:
opcodes += opcodes_z80
registers += registers_z80
if processor in ['8086', '80186', '80286', '80386', '80486']:
opcodes += opcodes_8086
registers += registers_8086
if processor in ['80286', '80386', '80486']:
opcodes += opcodes_80186
opcodes += opcodes_80286
registers += registers_80286
if processor in ['80386', '80486']:
opcodes += opcodes_80386
registers += registers_80386
if processor in ['80486']:
opcodes += opcodes_80486
if processor in ['pdp-8']:
opcodes += opcodes_pdp8
# registers += registers_pdp8
if processor in ['pdp-11']:
opcodes += opcodes_pdp11
registers += registers_pdp11
opcode_tb = CaseInsensitiveListTokenBuilder(opcodes, 'keyword', False)
register_tb = CaseInsensitiveListTokenBuilder(registers, 'register', True)
values = ['*', '$', '.']
values_tb = CaseSensitiveListTokenBuilder(values, 'value', True)
operand_types.append('value')
invalid_token_builder = InvalidTokenBuilder()
tokenbuilders = [
newline_tb,
whitespace_tb,
stmt_separator_tb,
integer_tb,
integer_exponent_tb,
integer_1_tb,
integer_2_tb,
prefixed_integer_tb,
hex_integer_1_tb,
hex_integer_2_tb,
hex_integer_3_tb,
hex_integer_4_tb,
hash_quote_value_tb,
values_tb,
groupers_tb,
register_tb,
opcode_tb,
directive_tb,
title_directive_tb,
title_directive_2_tb,
subtitle_directive_tb,
subtitle_directive_2_tb,
subtitle_directive_3_tb,
include_directive_tb,
include_directive_2_tb,
multiline_comment_tb,
preprocessor_tb,
identifier_tb,
label_tb,
string_tb,
comment_tb,
comment_2_tb,
line_comment_star_tb,
line_comment_hash_tb,
known_operator_tb,
self.unknown_operator_tb,
invalid_token_builder
]
opcode_tokenbuilders = [
opcode_tb,
directive_tb,
title_directive_tb,
subtitle_directive_tb,
include_directive_tb,
preprocessor_tb,
invalid_token_builder
]
args_tokenbuilders = [
integer_tb,
integer_exponent_tb,
hex_integer_1_tb,
hex_integer_2_tb,
hex_integer_3_tb,
hex_integer_4_tb,
values_tb,
groupers_tb,
known_operator_tb,
register_tb,
identifier_tb,
label_tb,
string_tb,
comment_tb,
line_comment_star_tb,
line_comment_hash_tb,
self.unknown_operator_tb,
invalid_token_builder
]
tokenizer = Tokenizer(tokenbuilders)
opcode_tokenizer = Tokenizer(opcode_tokenbuilders)
args_tokenizer = Tokenizer(args_tokenbuilders)
# tokenize as free-format
tokens_free = tokenizer.tokenize(code)
tokens_free = Examiner.combine_adjacent_identical_tokens(tokens_free, 'invalid operator')
tokens_free = Examiner.combine_adjacent_identical_tokens(tokens_free, 'invalid')
tokens_free = Examiner.combine_identifier_colon(tokens_free, ['newline'], [], [])
tokens_free = Tokenizer.combine_number_and_adjacent_identifier(tokens_free)
tokens_free = Examiner.convert_values_to_operators(tokens_free, known_operators)
self.tokens = tokens_free
self.convert_asm_identifiers_to_labels()
self.convert_asm_keywords_to_operators()
self.convert_asm_keywords_to_identifiers()
self.calc_statistics()
statistics_free = self.statistics
self.statistics = {}
self.calc_confidences(operand_types, group_starts, group_mids, group_ends, None)
self.calc_line_length_confidence(code, self.max_expected_line)
confidences_free = self.confidences
self.confidences = {}
errors_free = self.errors
self.errors = []
if processor in ['pdp-8', 'pdp-11']:
# do not try space-format, it never exists for these processors
tokens_space = []
statistics_space = {}
confidences_space = {}
errors_space = []
else:
# tokenize as space-format
opcode_extras = '.&=,()+-*/'
label_leads = '.&$@#'
label_mids = '.&$#@_'
label_ends = ':'
comment_leads = '*;'
line_comment_leads = ''
use_line_id = False
tokens_space, indents = Tokenizer.tokenize_asm_code(code, tab_size, opcode_tokenizer, opcode_extras, args_tokenizer, label_leads, label_mids, label_ends, comment_leads, line_comment_leads, use_line_id)
tokens_space = Examiner.combine_adjacent_identical_tokens(tokens_space, 'invalid operator')
tokens_space = Examiner.combine_adjacent_identical_tokens(tokens_space, 'invalid')
tokens_space = Examiner.combine_identifier_colon(tokens_space, ['newline'], [], [])
tokens_space = Tokenizer.combine_number_and_adjacent_identifier(tokens_space)
tokens_space = Examiner.convert_values_to_operators(tokens_space, known_operators)
self.tokens = tokens_space
self.convert_asm_identifiers_to_labels()
self.calc_statistics()
statistics_space = self.statistics
self.statistics = {}
self.calc_confidences(operand_types, group_starts, group_mids, group_ends, indents)
self.calc_line_length_confidence(code, self.max_expected_line)
confidences_space = self.confidences
self.confidences = {}
errors_space = self.errors
self.errors = []
# compute confidence for free-format and spaced-format
confidence_free = 1.0
if len(confidences_free) == 0:
confidence_free = 0.0
else:
for key in confidences_free:
factor = confidences_free[key]
confidence_free *= factor
confidence_space = 1.0
if len(confidences_space) == 0:
confidence_space = 0.0
else:
for key in confidences_space:
factor = confidences_space[key]
confidence_space *= factor
# select the better of free-format and spaced-format
if confidence_space > confidence_free:
self.tokens = tokens_space
self.statistics = statistics_space
self.confidences = confidences_space
self.errors = errors_space
else:
self.tokens = tokens_free
self.statistics = statistics_free
self.confidences = confidences_free
self.errors = errors_free
# combine numbers followed by identfiers to identifiers
@staticmethod
def combine_number_and_adjacent_identifier(tokens):
new_list = []
new_token = None
for token in tokens:
if token.group == 'identifier' and \
new_token is not None and new_token.group == 'number':
new_token = Token(new_token.text + token.text, 'identifier', True)
else:
if new_token is not None:
new_list.append(new_token)
new_token = token
if new_token is not None:
new_list.append(new_token)
return new_list
```
#### File: jfitz/code-stat/assembly_token_builders.py
```python
from string import punctuation
from codestat_token import Token
from token_builders import TokenBuilder
# token reader for comment
class AssemblyCommentTokenBuilder(TokenBuilder):
@staticmethod
def __escape_z__():
Token.__escape_z__()
return 'Escape ?Z'
def __init__(self, legals):
self.legals = legals
self.text = ''
def get_tokens(self):
if self.text is None:
return None
return [Token(self.text, 'comment', False)]
def accept(self, candidate, c):
if c in ['\n', '\r']:
return False
if candidate == '':
return c in self.legals
return True
def get_score(self, line_printable_tokens):
if self.text is None:
return 0
if len(line_printable_tokens) > 0:
return 0
if self.text[0] in self.legals:
return len(self.text)
return 0
# token reader for identifier
class LabelTokenBuilder(TokenBuilder):
@staticmethod
def __escape_z__():
Token.__escape_z__()
return 'Escape ?Z'
def __init__(self, lead_extras, extras, suffixes):
self.lead_extras = lead_extras
self.suffixes = suffixes
self.extras = extras
self.text = None
def get_tokens(self):
if self.text is None:
return None
return [Token(self.text, 'label', True)]
def accept(self, candidate, c):
if len(candidate) == 0:
return c.isalpha() or c in self.lead_extras
if len(candidate) > 1 and candidate[-1] in self.suffixes:
return False
return c.isalpha() or c.isdigit() or c in self.extras or c in self.suffixes
def get_score(self, line_printable_tokens):
if self.text is None:
return 0
if len(line_printable_tokens) > 0:
return 0
if self.text[-1] in self.suffixes:
return len(self.text)
return 0
# token reader for prefixed text literal (string)
class MultilineCommentTokenBuilder(TokenBuilder):
@staticmethod
def __escape_z__():
Token.__escape_z__()
return 'Escape ?Z'
def __init__(self):
self.prefix = 'comment'
self.text = ''
def attempt(self, text, start):
self.text = None
len_text = len(text)
n1 = len(self.prefix)
n2 = n1 + start
t3 = text[start:n2].lower()
if t3 != self.prefix:
return
# end of prefix
index = start + len(self.prefix)
if index >= len_text:
return
# at least one space
n_spaces = 0
while index < len_text and text[index] in [' ', '\t']:
index += 1
n_spaces += 1
# if no spaces, return
if n_spaces == 0:
return
# at least one nonspace
delimiter = ''
while index < len_text and not text[index].isspace():
delimiter += text[index]
index += 1
# if newline no nonspace, return
if len(delimiter) == 0:
return
# find delimiter text after index
e = text.find(delimiter, index)
# if not found, return
if e == -1:
return
# extract all text as comment token
end = e + len(delimiter)
self.text = text[start:end]
def get_tokens(self):
if self.text is None:
return None
return [Token(self.text, 'comment', True)]
def get_score(self, line_printable_tokens):
if self.text is None:
return 0
return len(self.text)
# token reader for identifier
class IbmAsmIdentifierTokenBuilder(TokenBuilder):
@staticmethod
def __escape_z__():
Token.__escape_z__()
return 'Escape ?Z'
def __init__(self, lead_extras, extras):
self.lead_extras = lead_extras
self.extras = extras
self.text = None
def get_tokens(self):
if self.text is None:
return None
return [Token(self.text, 'identifier', True)]
def accept(self, candidate, c):
if len(candidate) == 0:
return c.isalpha() or c in self.lead_extras
return c.isalpha() or c.isdigit() or c in self.extras
def get_score(self, line_printable_tokens):
if self.text is None:
return 0
# if at least one alpha character
for i in self.text:
if i.isalpha():
return len(self.text)
return 0
# token reader for comment
class HashQuoteCharTokenBuilder(TokenBuilder):
@staticmethod
def __escape_z__():
Token.__escape_z__()
return 'Escape ?Z'
def __init__(self):
self.text = ''
def get_tokens(self):
if self.text is None:
return None
return [Token(self.text, 'value', True)]
def accept(self, candidate, c):
if candidate == '':
return c == '#'
if candidate == '#':
return c in punctuation
return len(candidate) == 2
def get_score(self, line_printable_tokens):
if self.text is None:
return 0
if len(self.text) != 3:
return 0
return len(self.text)
```
#### File: jfitz/code-stat/coffeescript_examiner.py
```python
import string
import math
from codestat_tokenizer import Tokenizer
from token_builders import (
InvalidTokenBuilder,
WhitespaceTokenBuilder,
NewlineTokenBuilder,
EscapedStringTokenBuilder,
IntegerTokenBuilder,
SuffixedIntegerTokenBuilder,
IntegerExponentTokenBuilder,
RealTokenBuilder,
RealExponentTokenBuilder,
IdentifierTokenBuilder,
CaseInsensitiveListTokenBuilder,
CaseSensitiveListTokenBuilder,
SingleCharacterTokenBuilder,
PrefixedIntegerTokenBuilder,
RegexTokenBuilder,
LeadToEndOfLineTokenBuilder
)
from examiner import Examiner
class CoffeeScriptExaminer(Examiner):
@staticmethod
def __escape_z__():
InvalidTokenBuilder.__escape_z__()
WhitespaceTokenBuilder.__escape_z__()
NewlineTokenBuilder.__escape_z__()
EscapedStringTokenBuilder.__escape_z__()
IntegerTokenBuilder.__escape_z__()
SuffixedIntegerTokenBuilder.__escape_z__()
IntegerExponentTokenBuilder.__escape_z__()
RealTokenBuilder.__escape_z__()
RealExponentTokenBuilder.__escape_z__()
IdentifierTokenBuilder.__escape_z__()
CaseInsensitiveListTokenBuilder.__escape_z__()
CaseSensitiveListTokenBuilder.__escape_z__()
SingleCharacterTokenBuilder.__escape_z__()
PrefixedIntegerTokenBuilder.__escape_z__()
RegexTokenBuilder.__escape_z__()
LeadToEndOfLineTokenBuilder.__escape_z__()
return 'Escape ?Z'
def __init__(self, code):
super().__init__()
operand_types = []
whitespace_tb = WhitespaceTokenBuilder()
newline_tb = NewlineTokenBuilder()
integer_tb = IntegerTokenBuilder(None)
integer_exponent_tb = IntegerExponentTokenBuilder(None)
big_integer_tb = SuffixedIntegerTokenBuilder(['n', 'N'], False, '_')
real_tb = RealTokenBuilder(False, False, None)
real_exponent_tb = RealExponentTokenBuilder(False, False, 'E', None)
hex_constant_tb = PrefixedIntegerTokenBuilder('0X', False, '0123456789ABCDEFabcdef')
octal_constant_tb = PrefixedIntegerTokenBuilder('0O', False, '01234567')
binary_constant_tb = PrefixedIntegerTokenBuilder('0B', False, '01')
operand_types.append('number')
leads = '_'
extras = '_'
identifier_tb = IdentifierTokenBuilder(leads, extras)
dollar_sign_tb = SingleCharacterTokenBuilder('$', 'identifier', False)
operand_types.append('identifier')
quotes = ['"', "'", "’"]
string_tb = EscapedStringTokenBuilder(quotes, 0)
template_string_tb = EscapedStringTokenBuilder(['`'], 10)
operand_types.append('string')
comment_tb = LeadToEndOfLineTokenBuilder('#', False, 'comment')
terminators_tb = SingleCharacterTokenBuilder(';', 'statement terminator', False)
known_operators = [
'+', '-', '*', '/', '%',
'=', '==', '!=', '===', '!==', '>', '>=', '<', '<=',
'+=', '-=', '*=', '/=', '%=', '**=', '&=', '|=', '^=', '<<=', '>>=',
'!', '&', '|', '~', '<<', '>>', '>>>', '>>>=',
'^', '**',
'->', '=>',
'.', ':', '...',
'++', '--', '&&', '||',
'?', '?=', '?.',
'in', 'of',
'is', 'isnt',
'and', 'or', 'not',
'@', '//', '%%', '::',
'new', 'delete'
]
known_operator_tb = CaseSensitiveListTokenBuilder(known_operators, 'operator', False)
self.unary_operators = [
'+', '-',
'!', '~',
'++', '--', ':',
'not',
'->', '=>',
'.', '@',
'new', 'delete'
]
self.postfix_operators = [
'++', '--', ':', '...'
]
groupers = ['(', ')', ',', '[', ']', '{', '}']
# group_starts = ['(', '[', ',', '{']
group_mids = [',']
group_ends = [')', ']', '}']
groupers_tb = CaseInsensitiveListTokenBuilder(groupers, 'group', False)
regex_tb = RegexTokenBuilder()
operand_types.append('regex')
keywords = [
'for', 'while', 'loop', 'by',
'break', 'continue',
'if', 'then', 'else', 'unless',
'switch', 'when', 'default',
'return',
'do',
'throw', 'try', 'catch', 'finally',
'class', 'extends', 'typeof', 'instanceof',
'await', 'defer', 'yield',
'export', 'import', 'package', 'let',
'case',
'debugger',
'function', 'var', 'with',
'private', 'protected', 'public', 'native', 'static', 'const',
'implements', 'interface', 'enum'
]
keyword_tb = CaseSensitiveListTokenBuilder(keywords, 'keyword', False)
values = [
'true', 'yes', 'on', 'false', 'no', 'off',
'super', 'this', 'arguments',
'null', 'undefined', 'Infinity', 'Nan'
]
values_tb = CaseSensitiveListTokenBuilder(values, 'value', True)
operand_types.append('value')
invalid_token_builder = InvalidTokenBuilder()
tokenbuilders = [
whitespace_tb,
newline_tb,
terminators_tb,
integer_tb,
integer_exponent_tb,
big_integer_tb,
real_tb,
real_exponent_tb,
hex_constant_tb,
octal_constant_tb,
binary_constant_tb,
keyword_tb,
values_tb,
known_operator_tb,
groupers_tb,
regex_tb,
identifier_tb,
dollar_sign_tb,
string_tb,
template_string_tb,
comment_tb,
self.unknown_operator_tb,
invalid_token_builder
]
tokenizer = Tokenizer(tokenbuilders)
tokens = tokenizer.tokenize(code)
tokens = Examiner.combine_adjacent_identical_tokens(tokens, 'invalid operator')
self.tokens = Examiner.combine_adjacent_identical_tokens(tokens, 'invalid')
self.calc_statistics()
tokens = self.source_tokens()
tokens = Examiner.join_all_lines(tokens)
self.calc_token_confidence()
self.calc_token_2_confidence()
num_operators = self.count_my_tokens(['operator', 'invalid operator'])
if num_operators > 0:
self.calc_operator_confidence(num_operators)
allow_pairs = []
self.calc_operator_2_confidence(tokens, num_operators, allow_pairs)
self.calc_operator_3_confidence(tokens, num_operators, group_ends, allow_pairs)
# self.calc_operator_4_confidence(tokens, num_operators, group_starts, allow_pairs)
self.calc_group_confidence(tokens, group_mids)
operand_types_2 = ['number', 'string', 'symbol']
self.calc_operand_n_confidence(tokens, operand_types_2, 2)
self.calc_operand_n_confidence(tokens, operand_types, 4)
self.calc_keyword_confidence()
self.calc_paired_blockers_confidence(['{'], ['}'])
self.calc_line_length_confidence(code, self.max_expected_line)
```
#### File: jfitz/code-stat/csharp_examiner.py
```python
import string
import math
from codestat_token import Token
from codestat_tokenizer import Tokenizer
from token_builders import (
InvalidTokenBuilder,
WhitespaceTokenBuilder,
NewlineTokenBuilder,
EscapedStringTokenBuilder,
PrefixedStringTokenBuilder,
IntegerTokenBuilder,
IntegerExponentTokenBuilder,
RealTokenBuilder,
RealExponentTokenBuilder,
IdentifierTokenBuilder,
CaseInsensitiveListTokenBuilder,
CaseSensitiveListTokenBuilder,
SingleCharacterTokenBuilder,
LeadToEndOfLineTokenBuilder
)
from cx_token_builders import (
SlashSlashCommentTokenBuilder,
SlashStarCommentTokenBuilder
)
from examiner import Examiner
class CsharpExaminer(Examiner):
@staticmethod
def __escape_z__():
InvalidTokenBuilder.__escape_z__()
WhitespaceTokenBuilder.__escape_z__()
NewlineTokenBuilder.__escape_z__()
EscapedStringTokenBuilder.__escape_z__()
PrefixedStringTokenBuilder.__escape_z__()
IntegerTokenBuilder.__escape_z__()
IntegerExponentTokenBuilder.__escape_z__()
RealTokenBuilder.__escape_z__()
RealExponentTokenBuilder.__escape_z__()
IdentifierTokenBuilder.__escape_z__()
CaseInsensitiveListTokenBuilder.__escape_z__()
CaseSensitiveListTokenBuilder.__escape_z__()
SingleCharacterTokenBuilder.__escape_z__()
LeadToEndOfLineTokenBuilder.__escape_z__()
SlashSlashCommentTokenBuilder.__escape_z__()
SlashStarCommentTokenBuilder.__escape_z__()
return 'Escape ?Z'
def __init__(self, code):
super().__init__()
operand_types = []
whitespace_tb = WhitespaceTokenBuilder()
newline_tb = NewlineTokenBuilder()
integer_tb = IntegerTokenBuilder(None)
integer_exponent_tb = IntegerExponentTokenBuilder(None)
real_tb = RealTokenBuilder(False, False, None)
real_exponent_tb = RealExponentTokenBuilder(False, False, 'E', None)
operand_types.append('number')
leads = '_'
extras = '_'
identifier_tb = IdentifierTokenBuilder(leads, extras)
operand_types.append('identifier')
quotes = ['"', "'", "’"]
string_tb = EscapedStringTokenBuilder(quotes, 10)
prefixed_string_tb = PrefixedStringTokenBuilder('@', False, ['"'])
operand_types.append('string')
slash_slash_comment_tb = SlashSlashCommentTokenBuilder()
slash_star_comment_tb = SlashStarCommentTokenBuilder()
directives = [
'#if', '#else', '#elif', '#endif',
'#define', '#undef',
'#line', '#pragma'
]
preprocessor_tb = CaseSensitiveListTokenBuilder(directives, 'preprocessor', False)
c_warning_tb = LeadToEndOfLineTokenBuilder('#warning', True, 'preprocessor')
c_error_tb = LeadToEndOfLineTokenBuilder('#error', True, 'preprocessor')
c_region_tb = LeadToEndOfLineTokenBuilder('#region', True, 'preprocessor')
c_endregion_tb = LeadToEndOfLineTokenBuilder('#endregion', True, 'preprocessor')
terminators_tb = SingleCharacterTokenBuilder(';', 'statement terminator', False)
known_operators = [
'+', '-', '*', '/', '%',
'=', '==', '!=', '>', '>=', '<', '<=',
'+=', '-=', '*=', '/=', '%=', '&=', '|=', '^=', '<<=', '>>=',
'!', '&', '|', '~', '<<', '>>',
'^',
'.',
'++', '--', '->', '&&', '||',
'?', '??', '?.', '?[',
'=>',
'as', 'is', 'await', 'sizeof',
'typeof', 'new'
]
self.unary_operators = [
'+', '-',
'!', '~',
'++', '--',
'new', 'sizeof', 'typeof'
]
self.postfix_operators = [
'++', '--'
]
known_operator_tb = CaseSensitiveListTokenBuilder(known_operators, 'operator', False)
groupers = ['(', ')', ',', '[', ']', '{', '}', ':']
group_starts = ['(', '[', ',', '{']
group_ends = [')', ']', '}']
group_mids = [',', ':']
groupers_tb = CaseInsensitiveListTokenBuilder(groupers, 'group', False)
keywords = [
'abstract', 'break',
'case', 'catch', 'checked', 'class', 'const',
'continue', 'default', 'delegate', 'do',
'else', 'enum', 'event', 'explicit', 'extern',
'finally', 'fixed', 'for', 'foreach', 'goto',
'if', 'implicit', 'in', 'interface', 'internal',
'lock', 'namespace', 'operator',
'out', 'override', 'params', 'partial', 'private', 'protected', 'public',
'readonly', 'ref', 'return', 'sealed',
'stackalloc', 'static', 'struct', 'switch',
'throw', 'try',
'unchecked', 'unsafe', 'using', 'using static',
'virtual', 'volatile', 'while'
]
keyword_tb = CaseSensitiveListTokenBuilder(keywords, 'keyword', False)
types = [
'bool', 'byte', 'char', 'decimal', 'double', 'float', 'int', 'long', 'object',
'sbyte', 'short', 'string', 'uint', 'ulong', 'ushort', 'void'
]
types_tb = CaseSensitiveListTokenBuilder(types, 'type', True)
operand_types.append('type')
values = [
'base', 'false', 'null', 'this', 'true'
]
values_tb = CaseSensitiveListTokenBuilder(values, 'value', True)
operand_types.append('value')
invalid_token_builder = InvalidTokenBuilder()
tokenbuilders = [
newline_tb,
whitespace_tb,
terminators_tb,
integer_tb,
integer_exponent_tb,
real_tb,
real_exponent_tb,
keyword_tb,
types_tb,
values_tb,
known_operator_tb,
groupers_tb,
identifier_tb,
string_tb,
prefixed_string_tb,
slash_slash_comment_tb,
slash_star_comment_tb,
preprocessor_tb,
c_error_tb,
c_warning_tb,
c_region_tb,
c_endregion_tb,
self.unknown_operator_tb,
invalid_token_builder
]
tokenizer = Tokenizer(tokenbuilders)
tokens = tokenizer.tokenize(code)
tokens = Examiner.combine_adjacent_identical_tokens(tokens, 'invalid operator')
tokens = Examiner.combine_adjacent_identical_tokens(tokens, 'invalid')
tokens = Examiner.combine_identifier_colon(tokens, ['statement terminator', 'newline'], ['{'], ['whitespace', 'comment'])
self.tokens = tokens
self.convert_identifiers_to_labels()
number_suffixes = ['f', 'F', 'd', 'D', 'm', 'M']
self.tokens = self.combine_tokens_and_adjacent_types(tokens, 'number', 'identifier', number_suffixes)
self.calc_statistics()
tokens = self.source_tokens()
tokens = Examiner.join_all_lines(tokens)
self.calc_token_confidence()
self.calc_token_2_confidence()
num_operators = self.count_my_tokens(['operator', 'invalid operator'])
if num_operators > 0:
self.calc_operator_confidence(num_operators)
allow_pairs = []
self.calc_operator_2_confidence(tokens, num_operators, allow_pairs)
self.calc_operator_3_confidence(tokens, num_operators, group_ends, allow_pairs)
self.calc_operator_4_confidence(tokens, num_operators, group_starts, allow_pairs)
self.calc_group_confidence(tokens, group_mids)
operand_types_2 = ['number', 'string', 'symbol']
self.calc_operand_n_confidence(tokens, operand_types_2, 2)
self.calc_operand_n_confidence(tokens, operand_types, 4)
self.calc_keyword_confidence()
self.calc_preprocessor_confidence()
self.calc_paired_blockers_confidence(['{'], ['}'])
self.calc_line_length_confidence(code, self.max_expected_line)
```
#### File: jfitz/code-stat/cx_token_builders.py
```python
from codestat_token import Token
from token_builders import TokenBuilder
# token reader for // comment
class SlashSlashCommentTokenBuilder(TokenBuilder):
@staticmethod
def __escape_z__():
Token.__escape_z__()
return 'Escape ?Z'
def __init__(self):
self.text = ''
def get_tokens(self):
if self.text is None:
return None
return [Token(self.text, 'comment', False)]
def accept(self, candidate, c):
if c in ['\n', '\r']:
return False
if candidate.startswith('//'):
return True
if candidate == '/':
return c == '/'
if candidate == '':
return c == '/'
return False
def get_score(self, line_printable_tokens):
if self.text is None:
return 0
if self.text.startswith('//'):
return len(self.text)
return 0
# token reader for /// comment
class TripleSlashCommentTokenBuilder(TokenBuilder):
@staticmethod
def __escape_z__():
Token.__escape_z__()
return 'Escape ?Z'
def __init__(self):
self.text = ''
def get_tokens(self):
if self.text is None:
return None
return [Token(self.text, 'comment', False)]
def accept(self, candidate, c):
if c in ['\n', '\r']:
return False
if candidate.startswith('///'):
return True
if candidate == '':
return c == '/'
if candidate == '/':
return c == '/'
if candidate == '//':
return c == '/'
return False
def get_score(self, line_printable_tokens):
if self.text is None:
return 0
if self.text.startswith('///'):
return len(self.text)
return 0
# token reader for /* */ comment
class SlashStarCommentTokenBuilder(TokenBuilder):
@staticmethod
def __escape_z__():
Token.__escape_z__()
return 'Escape ?Z'
def __init__(self):
self.text = ''
def get_tokens(self):
if self.text is None:
return None
return [Token(self.text, 'comment', False)]
def accept(self, candidate, c):
if len(candidate) == 0:
return c == '/'
if len(candidate) == 1:
return c == '*'
return not candidate.endswith('*/')
def get_score(self, line_printable_tokens):
if self.text is None:
return 0
if self.text.startswith('/*') and self.text.endswith('*/'):
return len(self.text)
return 0
# token reader for <name> class identifier
class ClassTypeTokenBuilder(TokenBuilder):
@staticmethod
def __escape_z__():
Token.__escape_z__()
return 'Escape ?Z'
def __init__(self):
self.text = ''
def get_tokens(self):
if self.text is None:
return None
return [Token(self.text, 'type', True)]
def accept(self, candidate, c):
if len(candidate) == 0:
return c == '<'
level = 0
for ch in candidate:
if ch == '<':
level += 1
if ch == '>' and level > 0:
level -= 1
if level > 0:
return c.isalpha() or c.isdigit() or c in "</\\ ,_.:*>'"
return False
def get_score(self, line_printable_tokens):
if self.text is None:
return 0
level = 0
for ch in self.text:
if ch == '<':
level += 1
if ch == '>':
level -= 1
if level != 0:
return 0
if self.text[0] == '<' and self.text[-1] == '>':
return len(self.text)
return 0
```
#### File: jfitz/code-stat/dbase_token_builders.py
```python
import re
from codestat_token import Token
from token_builders import (
TokenBuilder,
IdentifierTokenBuilder,
BlockTokenBuilder
)
# token reader for deleted record function
class DbaseSpecialFunctionTokenBuilder(TokenBuilder):
@staticmethod
def __escape_z__():
return 'Escape ?Z'
def __init__(self, chars, previous):
self.chars = chars
self.previous = previous
self.text = None
def get_tokens(self):
if self.text is None:
return None
return [Token(self.text, 'function', True)]
def accept(self, candidate, c):
return len(candidate) == 0 and c in self.chars
def get_score(self, line_printable_tokens):
if self.text is None:
return 0
if len(line_printable_tokens) == 0:
return 0
if line_printable_tokens[-1].text.lower() not in self.previous:
return 0
return len(self.text)
# token reader for identifier
class DbaseFilenameTokenBuilder(TokenBuilder):
@staticmethod
def __escape_z__():
return 'Escape ?Z'
def __init__(self):
self.text = None
def get_tokens(self):
if self.text is None:
return None
return [Token(self.text, 'filename', True)]
def accept(self, candidate, c):
return c.isalpha() or c.isdigit() or c in '.-'
def get_score(self, line_printable_tokens):
if self.text is None:
return 0
# a file name can have at most one dot (for the extension)
num_dots = self.text.count('.')
if num_dots > 1:
return 0
# must follow DO, [SET ... TO, USE, INDEX, LOAD, CALL
if len(line_printable_tokens) == 0:
return 0
# file names always follow these keywords
predecessors = ['do', 'use', 'index', 'to', 'load', 'call']
if line_printable_tokens[-1].text.lower() not in predecessors:
return 0
# TO is a special case; line must start with SET (not STORE)
if line_printable_tokens[-1].text.lower() == 'to' and \
line_printable_tokens[0].text.lower() not in ['set', 'copy']:
return 0
# some keywords look like file names but are not
if self.text.lower() in ['screen', 'print', 'file']:
return 0
return len(self.text)
# token reader for LIKE wildcards
class WildCardIdentifierTokenBuilder(IdentifierTokenBuilder):
def get_score(self, line_printable_tokens):
if self.text is None:
return 0
if len(line_printable_tokens) == 0:
return 0
if line_printable_tokens[-1].text.lower() != 'like':
return 0
return len(self.text)
# token reader for text literal (string)
class BracketedStringTokenBuilder(TokenBuilder):
@staticmethod
def __escape_z__():
Token.__escape_z__()
return 'Escape ?Z'
def __init__(self):
self.text = ''
def get_tokens(self):
if self.text is None:
return None
return [Token(self.text, 'string', True)]
def accept(self, candidate, c):
# newline breaks a string
if c in ['\n', '\r']:
return False
if len(candidate) == 0:
return c == '['
if len(candidate) == 1:
return True
return candidate[-1] != ']'
def get_score(self, line_printable_tokens):
if self.text is None:
return 0
if len(self.text) < 2:
return 0
if self.text[-1] != ']':
return 0
return len(self.text)
# accept characters to match item in list
class KeywordCommentTokenBuilder(TokenBuilder):
@staticmethod
def __escape_z__():
Token.__escape_z__()
return 'Escape ?Z'
def __init__(self, legals, case_sensitive):
if case_sensitive:
self.legals = legals
else:
self.legals = list(map(str.lower, legals))
self.abbrevs = {}
for legal in self.legals:
for i in range(len(legal)):
self.abbrevs[legal[:i+1]] = 1
self.case_sensitive = case_sensitive
self.token1 = None
self.token2 = ''
def attempt(self, text, start):
self.text = None
self.token1 = None
self.token2 = ''
best_candidate = None
candidate = ''
i = start
while i < len(text):
c = text[i]
# match a keyword
if not self.accept(candidate, c):
break
candidate += c
i += 1
if self.case_sensitive:
if candidate in self.legals:
best_candidate = candidate
else:
if candidate.lower() in self.legals:
best_candidate = candidate
if best_candidate is not None:
self.token1 = best_candidate
# absorb all characters until newline (or end of text)
while i < len(text):
c = text[i]
if c in ['\n', '\r']:
break
self.token2 += c
i += 1
if self.token1 is not None:
self.text = self.token1 + self.token2
def get_tokens(self):
if self.token1 is None:
return None
if self.token2 is None:
token1 = Token(self.token1, 'keyword', False)
tokens = [token1]
else:
token1 = Token(self.token1, 'keyword', False)
token2 = Token(self.token2, 'comment', False)
tokens = [token1, token2]
return tokens
def accept(self, candidate, c):
if c in ['\n', '\r']:
return False
token = candidate + c
if self.case_sensitive:
return token in self.abbrevs
return token.lower() in self.abbrevs
def get_score(self, line_printable_tokens):
if self.token1 is None:
return 0
score = 0
if self.case_sensitive:
if self.token1 in self.legals:
score = len(self.text)
else:
if self.token1.lower() in self.legals:
score = len(self.text)
return score
# accept characters to match item in list
class KeywordComment2TokenBuilder(TokenBuilder):
@staticmethod
def __escape_z__():
Token.__escape_z__()
return 'Escape ?Z'
def __init__(self, legals, case_sensitive):
if case_sensitive:
self.legals = legals
else:
self.legals = list(map(str.lower, legals))
self.case_sensitive = case_sensitive
if case_sensitive:
self.regex = re.compile('(DO)( +)(CASE)(.*)')
else:
self.regex = re.compile('(DO)( +)(CASE)(.*)', re.IGNORECASE)
self.text = None
def get_tokens(self):
if self.text is None:
return None
m = re.match(self.regex, self.text)
if m is not None:
g = m.groups()
if len(g) != 4:
return None
token1 = Token(g[0], 'keyword', False)
token2 = Token(g[1], 'whitespace', False)
token3 = Token(g[2], 'keyword', False)
token4 = Token(g[3], 'comment', False)
tokens = [token1, token2, token3, token4]
return tokens
def accept(self, candidate, c):
if c in ['\n', '\r']:
return False
if self.case_sensitive:
if re.match(self.regex, candidate) is not None:
return True
else:
if re.match(self.regex, candidate.lower()) is not None:
return True
if len(candidate) == 0:
return c.lower() == 'd'
if len(candidate) == 1:
return c.lower() == 'o'
if candidate[-1].lower() == 'o':
return c.lower() == ' '
if candidate[-1].lower() == ' ':
return c.lower() in [' ', 'c']
if candidate[-1].lower() == 'c':
return c.lower() == 'a'
if candidate[-1].lower() == 'a':
return c.lower() == 's'
if candidate[-1].lower() == 's':
return c.lower() == 'e'
return False
def get_score(self, line_printable_tokens):
if self.text is None:
return 0
score = 0
if self.case_sensitive:
if re.match('do +case.+', self.text) is not None:
score = len(self.text)
else:
if re.match('do +case.+', self.text.lower()) is not None:
score = len(self.text)
return score
# token reader for identifier
class TextBlockTokenBuilder(BlockTokenBuilder):
@staticmethod
def __escape_z__():
Token.__escape_z__()
return 'Escape ?Z'
def __init__(self, prefix, suffix):
super().__init__(prefix, suffix, 'string')
def get_tokens(self):
if self.text is None:
return None
# split the text into 'TEXT', content, and 'ENDTEXT' tokens
len_start = len(self.prefix)
len_end = len(self.suffix)
starter_token = Token(self.text[:len_start], 'keyword', False)
ender_token = Token(self.text[-len_end:], 'keyword', False)
content = Token(self.text[len_start:-len_end], 'string', True)
return [
starter_token,
content,
ender_token
]
```
#### File: jfitz/code-stat/d_token_builders.py
```python
from codestat_token import Token
from token_builders import TokenBuilder
# token reader for real with exponent
class HexRealExponentTokenBuilder(TokenBuilder):
@staticmethod
def __escape_z__():
Token.__escape_z__()
return 'Escape ?Z'
def __init__(self):
self.prefix = '0x'
self.letter = 'p'
self.extra_char = '_'
self.text = None
def get_tokens(self):
if self.text is None:
return None
return [Token(self.text, 'number', True)]
def accept(self, candidate, c):
result = False
hex_digits = ['abcdefABCDEF']
if len(candidate) < len(self.prefix):
result = c.lower() == self.prefix[len(candidate)]
if c.isdigit():
result = True
if c in hex_digits:
result = self.letter not in candidate.lower()
if self.extra_char is not None and c == self.extra_char:
result = len(candidate) > 0 and (candidate[-1].isdigit() or c in hex_digits)
if c.lower() == self.letter\
and len(candidate) > 0 and\
self.letter not in candidate.lower():
result = True
if c in ['+', '-'] and\
len(candidate) > 0 and\
candidate[-1].lower() == self.letter:
result = True
return result
def get_score(self, line_printable_tokens):
if self.text is None:
return 0
# must have prefix, digit, 'P', and digit
if len(self.text) <= len(self.prefix) + 3:
return 0
if not self.letter in self.text.lower():
return 0
if self.text[-1].lower() == self.letter:
return 0
return len(self.text)
```
#### File: jfitz/code-stat/intercal_token_builders.py
```python
from codestat_token import Token
from token_builders import TokenBuilder
# token reader for identifier
class ParensLabelTokenBuilder(TokenBuilder):
@staticmethod
def __escape_z__():
Token.__escape_z__()
return 'Escape ?Z'
def __init__(self):
self.text = None
def get_tokens(self):
if self.text is None:
return None
return [Token(self.text, 'identifier', True)]
def accept(self, candidate, c):
if len(candidate) == 0:
return c == '('
if candidate[-1] == ')':
return False
return c.isdigit() or c == ')'
def get_score(self, line_printable_tokens):
if self.text is None:
return 0
# must have at least three chars
if len(self.text) < 3:
return 0
if self.text[0] != '(':
return 0
if self.text[-1] != ')':
return 0
return len(self.text)
```
#### File: jfitz/code-stat/jcl_token_builders.py
```python
from codestat_token import Token
from token_builders import TokenBuilder
# token reader for JCL
class JCLTokenBuilder(TokenBuilder):
@staticmethod
def __escape_z__():
Token.__escape_z__()
return 'Escape ?Z'
def __init__(self):
self.text = ''
def get_tokens(self):
if self.text is None:
return None
return [Token(self.text, 'jcl', False)]
def accept(self, candidate, c):
if c in ['\n', '\r']:
return False
if candidate == '':
return c == '/'
if candidate == '/':
return c == '/'
return candidate.startswith('//')
def get_score(self, line_printable_tokens):
if self.text is None:
return 0
# print('TEXT ' + self.text)
if len(line_printable_tokens) > 0:
return 0
if not self.text.startswith('//'):
return 0
return len(self.text)
```
#### File: jfitz/code-stat/lua_token_builders.py
```python
from codestat_token import Token
from token_builders import TokenBuilder
# token reader for --[[ ]] comment
class LuaBlockCommentTokenBuilder(TokenBuilder):
@staticmethod
def __escape_z__():
Token.__escape_z__()
return 'Escape ?Z'
def __init__(self):
self.text = ''
def get_tokens(self):
if self.text is None:
return None
return [Token(self.text, 'comment', False)]
def accept(self, candidate, c):
if len(candidate) == 0:
return c == '-'
if len(candidate) == 1:
return c == '-'
if len(candidate) == 2:
return c == '['
if len(candidate) == 3:
return c == '['
return not candidate.endswith(']]')
def get_score(self, line_printable_tokens):
if self.text is None:
return 0
if not self.text.endswith(']]'):
return 0
score = len(self.text)
return score
# token reader for triple quote string
class DoubleBracketStringTokenBuilder(TokenBuilder):
@staticmethod
def __escape_z__():
Token.__escape_z__()
return 'Escape ?Z'
def __init__(self):
self.opener = '[['
self.closer = ']]'
self.text = ''
def get_tokens(self):
if self.text is None:
return None
return [Token(self.text, 'string', False)]
def accept(self, candidate, c):
if len(candidate) < 2:
return c == '['
return not candidate.endswith(']]')
def get_score(self, line_printable_tokens):
if self.text is None:
return 0
if len(self.text) < 4:
return 0
if not self.text.startswith('[['):
return 0
if not self.text.endswith(']]'):
return 0
return len(self.text)
```
#### File: jfitz/code-stat/python_token_builders.py
```python
from codestat_token import Token
from token_builders import TokenBuilder
# token reader for raw triple quote string
class RawTripleQuoteCommentTokenBuilder(TokenBuilder):
@staticmethod
def __escape_z__():
Token.__escape_z__()
return 'Escape ?Z'
def __init__(self):
self.prefix = 'r'
self.text = ''
def get_tokens(self):
if self.text is None:
return None
return [Token(self.text, 'string', True)]
def accept(self, candidate, c):
if len(candidate) > 6:
return candidate[-3:] != candidate[1:4]
if len(candidate) > 3:
return candidate[1:4] in ['"""', "'''"]
if len(candidate) > 1:
return c == candidate[1]
if len(candidate) == 1:
return c in '"\''
return c == 'r'
```
#### File: jfitz/code-stat/sql_examiner.py
```python
import string
from codestat_exception import CodeStatException
from codestat_tokenizer import Tokenizer
from token_builders import (
InvalidTokenBuilder,
WhitespaceTokenBuilder,
NewlineTokenBuilder,
StuffedQuoteStringTokenBuilder,
IdentifierTokenBuilder,
IntegerTokenBuilder,
IntegerExponentTokenBuilder,
RealTokenBuilder,
RealExponentTokenBuilder,
CaseInsensitiveListTokenBuilder,
CaseSensitiveListTokenBuilder,
SingleCharacterTokenBuilder,
PrefixedIntegerTokenBuilder,
LeadToEndOfLineTokenBuilder,
NullTokenBuilder
)
from sql_token_builders import SqlBracketedIdentifierTokenBuilder
from examiner import Examiner
class SqlExaminer(Examiner):
@staticmethod
def __escape_z__():
InvalidTokenBuilder.__escape_z__()
WhitespaceTokenBuilder.__escape_z__()
NewlineTokenBuilder.__escape_z__()
StuffedQuoteStringTokenBuilder.__escape_z__()
IdentifierTokenBuilder.__escape_z__()
IntegerTokenBuilder.__escape_z__()
IntegerExponentTokenBuilder.__escape_z__()
RealTokenBuilder.__escape_z__()
RealExponentTokenBuilder.__escape_z__()
CaseInsensitiveListTokenBuilder.__escape_z__()
CaseSensitiveListTokenBuilder.__escape_z__()
SingleCharacterTokenBuilder.__escape_z__()
PrefixedIntegerTokenBuilder.__escape_z__()
LeadToEndOfLineTokenBuilder.__escape_z__()
NullTokenBuilder.__escape_z__()
SqlBracketedIdentifierTokenBuilder.__escape_z__()
return 'Escape ?Z'
def __init__(self, code, extension):
super().__init__()
operand_types = []
whitespace_tb = WhitespaceTokenBuilder()
newline_tb = NewlineTokenBuilder()
integer_tb = IntegerTokenBuilder(None)
integer_exponent_tb = IntegerExponentTokenBuilder(None)
real_tb = RealTokenBuilder(True, True, None)
real_exponent_tb = RealExponentTokenBuilder(True, True, 'E', None)
operand_types.append('number')
quotes = ["'", '"']
string_tb = StuffedQuoteStringTokenBuilder(quotes, False)
operand_types.append('string')
leads = '_'
extras = '_'
identifier_tb = IdentifierTokenBuilder(leads, extras)
bracketed_identifier_tb = NullTokenBuilder()
if extension in ['microsoft', 't-sql']:
bracketed_identifier_tb = SqlBracketedIdentifierTokenBuilder()
operand_types.append('identifier')
terminators_tb = SingleCharacterTokenBuilder(';', 'statement terminator', False)
comment_tb = LeadToEndOfLineTokenBuilder('--', True, 'comment')
known_operators = [
'=', '>', '>=', '<', '<=', '<>', '!=',
'AND', 'OR', 'NOT',
'IN', 'EXISTS', 'LIKE', 'BETWEEN', 'ANY', 'ALL',
'.'
]
known_operator_tb = CaseSensitiveListTokenBuilder(known_operators, 'operator', False)
self.unary_operators = [
'NOT', 'EXISTS', 'ANY', 'ALL'
]
groupers = ['(', ')', ',']
group_starts = ['(', ',']
group_mids = [',']
group_ends = [')']
groupers_tb = CaseInsensitiveListTokenBuilder(groupers, 'group', False)
keywords = [
'ACOS', 'ASIN', 'ATAN',
'ABSOLUTE', 'ACTION', 'ADD', 'ALL', 'ALLOCATE', 'ALTER', 'ARE',
'ABS', 'ARRAY_AGG', 'AVG',
'AS', 'ASC', 'ASSERTION', 'AT', 'AUTHORIZATION',
'AFTER', 'ARRAY', 'ASENSITIVE', 'ASYMMETRIC', 'ATOMIC',
'ARRAY_MAX_CARDINALITY',
'BEFORE', 'BEGIN', 'BETWEEN', 'BIT_LENGTH', 'BOTH', 'BY',
'BEGIN_FRAME', 'BEGIN_PARTITION',
'BINARY', 'BOOLEAN', 'BREADTH',
'CALL', 'CASCADE', 'CASCADED', 'CASE', 'CAST', 'CATALOG',
'CALLED',
'CHAR_LENGTH', 'CHARACTER_LENGTH',
'CHECK', 'COALESCE', 'COLLATE', 'COLLATION',
'COLUMN', 'COMMIT', 'CONDITION', 'CONNECT',
'CONNECTION', 'CONSTRAINT', 'CONSTRAINTS', 'CONTAINS', 'CONTINUE',
'CONVERT', 'CORRESPONDING', 'COUNT', 'CREATE', 'CROSS',
'CURRENT', 'CURRENT_DATE', 'CURRENT_PATH', 'CURRENT_TIME', 'CURRENT_TIMESTAMP',
'CURRENT_USER', 'CURSOR',
'CLOSE', 'CONSTRUCTOR', 'CUBE',
'CURRENT_DEFAULT_TRANSFORM_GROUP', 'CURRENT_ROLE',
'CURRENT_TRANSFORM_GROUP_FOR_TYPE', 'CYCLE',
'CARDINALITY', 'CEIL', 'CEILING', 'CONVERT', 'CORR', 'COVAR_POP', 'COVAR_SAMPLE',
'CUME_DIST', 'CURRENT_CATALOG', 'CURRENT_SCHEMA',
'CLASSIFIER', 'COS', 'COSH',
'DAY', 'DEALLOCATE', 'DEC', 'DECLARE', 'DEFAULT',
'DECFLOAT', 'DEFINE',
'DEFERRABLE', 'DEFERRED', 'DELETE', 'DEPTH', 'DESC', 'DESCRIBE',
'DENSE_RANK',
'DESCRIPTOR', 'DETERMINISTIC', 'DIAGNOSTICS', 'DISCONNECT', 'DISTINCT',
'DO', 'DOMAIN', 'DROP',
'DYNAMIC',
'ELSE', 'END', 'ESCAPE', 'EXCEPT', 'EXCEPTION',
'ELEMENT',
'EXEC', 'EXECUTE', 'EXISTS', 'EXIT', 'EXTERNAL', 'EXTRACT',
'EACH', 'ELSEIF', 'EQUALS',
'END_EXEC', 'EVERY',
'EXP',
'EMPTY', 'EQUALS',
'FETCH', 'FIRST', 'FOR', 'FOREIGN', 'FOUND',
'FROM', 'FULL', 'FUNCTION', 'FUSION',
'FILTER', 'FREE',
'FIRST_VALUE', 'FRAME_ROW',
'GENERAL', 'GET', 'GLOBAL', 'GO', 'GOTO', 'GRANT', 'GROUP',
'GROUPING',
'GROUPS',
'HANDLER', 'HAVING', 'HOUR',
'HOLD',
'IDENTITY', 'IF', 'IMMEDIATE', 'IN', 'INDICATOR', 'INITIALLY', 'INNER',
'INOUT', 'INPUT', 'INSENSITIVE', 'INSERT', 'INT', 'INTERSECT',
'INITIAL',
'INTERVAL', 'INTO', 'IS', 'ISOLATION',
'INTERSECTION',
'ITERATE',
'JOIN',
'JSON_ARRY', 'JSON_ARRAYAGG', 'JSON_EXISTS', 'JSON_OBJECT',
'JSON_OBJECTAGG', 'JSON_QUERY', 'JSON_TABLE', 'JSON_TABLE_PRIMITIVE',
'JSON_VALUE',
'KEY',
'LANGUAGE', 'LAST', 'LEADING', 'LEFT', 'LEVEL', 'LIKE', 'LOCAL',
'LARGE', 'LATERAL', 'LEAVE', 'LOCALTIME', 'LOCALTIMESTAMP', 'LOCATOR', 'LOOP',
'LAG', 'LISTAGG', 'LOG', 'LOG10',
'LIKE_REGEX', 'LN',
'LOWER',
'LAST_VALUE', 'LEAD',
'MATCH', 'MAX', 'MIN', 'MINUTE', 'MODULE', 'MONTH',
'MAP', 'METHOD', 'MODIFIES',
'MATCH_NUMBER', 'MATCH_RECOGNIZE', 'MATCHES',
'MEMBER', 'MERGE', 'MULTISET',
'MOD',
'NAMES', 'NATIONAL', 'NATURAL', 'NEXT', 'NO', 'NOT',
'NULLIF', 'NUMERIC',
'NTH_VALUE', 'NTILE',
'NEW',
'NORMALIZE',
'OCTET_LENGTH', 'OF', 'ONLY', 'OPEN', 'OPTION', 'ORDER',
'OUTPUT', 'OVERLAPS',
'OBJECT', 'OLD', 'ORDINALITY', 'OUT', 'OUTER',
'OCTET_LENGTH', 'OFFSET',
'OMIT',
'OCCURRENCES_REGEX', 'ONE', 'OVER',
'OVERLAY',
'PAD', 'PARAMETER', 'PARTIAL', 'PRECISION', 'PREPARE', 'PRESERVE',
'PRIMARY', 'PRIOR', 'PRIVILEGES', 'PROCEDURE', 'PUBLIC',
'PATTERN', 'PER', 'PTF',
'PARTITION',
'PERCENT_RANK', 'PERCENTILE_CONT', 'PERCENTILE_DISC', 'POSITION',
'PERCENT', 'PERIOD', 'PORTION', 'PRECEDES',
'POSITION_REGEX', 'POWER',
'RANGE',
'READ', 'REFERENCES', 'RELATIVE', 'RESTRICT',
'RETURN', 'RETURNS', 'REVOKE', 'RIGHT', 'ROLLBACK', 'ROLLUP',
'READS', 'ROWS',
'RECURSIVE', 'REF', 'REFERENCING', 'RELEASE', 'REPEAT', 'REGIONAL',
'RESULT', 'ROW',
'RANK', 'REGR_AVGX', 'REGR_AVGY', 'REGR_COUNT', 'REGR_INTERCEPT', 'REGR_R2',
'REGR_SLOPE', 'REGR_SXX', 'REGR_SXY', 'REGR_SYY', 'ROW_NUMBER',
'RUNNING',
'SCHEMA', 'SCROLL', 'SECOND', 'SECTION', 'SELECT', 'SESSION',
'SESSION_USER', 'SET', 'SIZE', 'SOME', 'SPACE',
'SPECIFIC', 'SQL', 'SQLCODE', 'SQLERROR',
'SQLEXCEPTION', 'SQLSTATE', 'SQLWARNING', 'SUBSTRING', 'SUM',
'SQRT', 'STDDEV_POP', 'STDDEV_SAMP', 'SUBSTRING_REGEX', 'SUM',
'SEEK', 'SHOW', 'SIN', 'SINH', 'SUBSET',
'SUBMULTISET',
'SYSTEM_USER',
'SAVEPOINT', 'SCOPE', 'SEARCH', 'SENSITIVE', 'SETS', 'SIGNAL', 'SIMILAR',
'SPECIFICTYPE', 'START', 'STATE', 'STATIC', 'SYMMETRIC', 'SYSTEM',
'TABLE', 'TEMPORARY', 'THEN', 'TIME', 'TIMESTAMP', 'TIMEZONE_HOUR',
'TABLESAMPLE'
'TAN', 'TANH'
'TIMEZONE_MINUTE', 'TO', 'TRAILING', 'TRANSACTION', 'TRANSLATE',
'TRANSLATION', 'TRIM',
'TRANSLATE', 'TRANSLATE_REGEX', 'TRUNCATE',
'TREAT', 'TRIGGER',
'TRIM_ARRAY',
'UNDO', 'UNION', 'UNIQUE', 'UNKNOWN', 'UPDATE', 'UPPER', 'USAGE',
'USER', 'USING',
'UNDER', 'UNNEST', 'UNTIL',
'UESCAPE', 'UPPER',
'VALUE', 'VALUES', 'VARYING', 'VIEW',
'VAR_POP', 'VAR_SAMP',
'VALUE_OF', 'VERSIONING'
'WHEN', 'WHENEVER', 'WHERE', 'WITH', 'WORK', 'WRITE',
'WHILE', 'WINDOW', 'WITHIN', 'WITHOUT'
'WIDTH_BUCKET'
'YEAR',
'ZONE'
]
keywords_tsql = [
'INSTEAD', 'CASE', 'UPDLOCK', 'DATEADD', 'GETDATE',
'TEXTIMAGE_ON', 'CLUSTERED',
'GENERATED', 'DECLARE', 'SET',
'BEGIN', 'END', 'BREAK', 'CONTINUE', 'GOTO', 'ELSE', 'RETURN',
'WAITFOR', 'BULK', 'TRY', 'CATCH'
]
keywords_plsql = [
'%TYPE', 'BEFORE', 'DECODE', 'DESCRIBE', 'DUAL', 'INTERSECT', 'MINUS',
'SYSDATE', 'USER'
]
if extension in ['microsoft', 't-sql']:
keywords += keywords_tsql
if extension in ['oracle', 'pl-sql']:
keywords += keywords_plsql
keyword_tb = CaseInsensitiveListTokenBuilder(keywords, 'keyword', False)
values = ['TRUE', 'FALSE', 'NULL', 'OFF', 'ON', 'NONE']
values_tsql = [
'ALLOW_ROW_LOCKS', 'ALLOW_PAGE_LOCKS', 'ALWAYS', 'IGNORE_DUP_KEY',
'FILLFACTOR', 'HISTORY_TABLE', 'PAD_INDEX',
'STATISTICS_NORECOMPUTE', 'SUSER_SNAME', 'SYSTEM_VERSIONING',
'SYSTEM_TIME'
]
if extension in ['microsoft', 't-sql']:
values += values_tsql
values_tb = CaseInsensitiveListTokenBuilder(values, 'value', True)
operand_types.append('value')
types = [
'BIGINT', 'BIT', 'BLOB',
'CHAR', 'CHARACTER', 'CLOB',
'DATE', 'DECIMAL', 'DOUBLE',
'FLOAT',
'INTEGER',
'NCHAR', 'NCLOB',
'REAL',
'SMALLINT',
'VARCHAR'
]
types_tsql = [
'nvarchar', 'bigint', 'datetime', 'datetime2', 'geography'
]
if extension in ['microsoft', 't-sql']:
types += types_tsql
type_tb = CaseInsensitiveListTokenBuilder(types, 'type', True)
operand_types.append('value')
invalid_token_builder = InvalidTokenBuilder()
tokenbuilders = [
newline_tb,
whitespace_tb,
integer_tb,
integer_exponent_tb,
real_tb,
real_exponent_tb,
string_tb,
known_operator_tb,
terminators_tb,
groupers_tb,
keyword_tb,
values_tb,
identifier_tb,
type_tb,
bracketed_identifier_tb,
comment_tb,
self.unknown_operator_tb,
invalid_token_builder
]
tokenizer = Tokenizer(tokenbuilders)
tokens = tokenizer.tokenize(code)
tokens = Examiner.combine_adjacent_identical_tokens(tokens, 'invalid operator')
tokens = Examiner.combine_adjacent_identical_tokens(tokens, 'invalid')
tokens = Examiner.combine_identifier_colon(tokens, ['statement terminator', 'newline'], [], ['whitespace', 'comment'])
self.tokens = tokens
self.convert_identifiers_to_labels()
self.calc_statistics()
tokens = self.source_tokens()
tokens = Examiner.join_all_lines(tokens)
self.calc_token_confidence()
self.calc_token_2_confidence()
num_operators = self.count_my_tokens(['operator', 'invalid operator'])
if num_operators > 0:
self.calc_operator_confidence(num_operators)
allow_pairs = []
self.calc_operator_2_confidence(tokens, num_operators, allow_pairs)
self.calc_operator_3_confidence(tokens, num_operators, group_ends, allow_pairs)
self.calc_operator_4_confidence(tokens, num_operators, group_starts, allow_pairs)
self.calc_group_confidence(tokens, group_mids)
# operand_types_2 = ['number', 'string', 'symbol']
# self.calc_operand_n_confidence(tokens, operand_types_2, 2)
# self.calc_operand_n_confidence(tokens, operand_types, 4)
self.calc_keyword_confidence()
self.calc_line_length_confidence(code, self.max_expected_line)
```
#### File: jfitz/code-stat/swift_token_builders.py
```python
from codestat_token import Token
from token_builders import (
TokenBuilder,
PrefixedIdentifierTokenBuilder
)
# token reader for integer
class SwiftArgumentTokenBuilder(TokenBuilder):
@staticmethod
def __escape_z__():
Token.__escape_z__()
return 'Escape ?Z'
def __init__(self):
self.text = None
def get_tokens(self):
if self.text is None:
return None
return [Token(self.text, 'identifier', True)]
def accept(self, candidate, c):
result = False
if len(candidate) == 0:
result = c == '$'
if len(candidate) > 0:
result = c in '0123456789'
return result
def get_score(self, line_printable_tokens):
if self.text is None:
return 0
if len(self.text) < 2:
return 0
return len(self.text)
class SwiftSymbolTokenBuilder(PrefixedIdentifierTokenBuilder):
@staticmethod
def __escape_z__():
Token.__escape_z__()
return 'Escape ?Z'
def __init__(self, prefix, group, is_operand):
super().__init__(prefix, group, is_operand)
def get_score(self, line_printable_tokens):
if self.text is None:
return 0
if len(self.text) < 2:
return 0
types = ['identifier', 'number']
if len(line_printable_tokens) > 0 and line_printable_tokens[-1].group in types:
return 0
if not self.text.startswith(self.prefix):
return 0
return len(self.text)
```
#### File: test/bin/run_confidence_test.py
```python
import argparse
import requests
import json
import sys
# build set of confidence factors, one group for each language
def make_confidences(code, params, languages):
confidences = {}
target = 'localhost:5000'
for language in languages:
# send request, get response
params2 = params.copy()
params2.append('language=' + language)
paramstext = '&'.join(params2)
url = "http://" + target + "/" + "confidence" + "?" + paramstext
resp = requests.post(url, data=code)
content = resp.content
if len(content) > 0:
confidence = json.loads(content)
confidences[language] = confidence
return confidences
# build set of confidence errors, one group for each language
def make_confidence_errors(code, params, languages):
confidence_errors = {}
target = 'localhost:5000'
for language in languages:
# send request, get response
params2 = params.copy()
params2.append('errors')
params2.append('language=' + language)
paramstext = '&'.join(params2)
url = "http://" + target + "/" + "confidence" + "?" + paramstext
resp = requests.post(url, data=code)
content = resp.content
if len(content) > 0:
confidence = json.loads(content)
confidence_errors[language] = confidence
return confidence_errors
# compute confidence (product of all elements)
def calc_confidence(confidence):
value = 1.0
for name in confidence:
value *= confidence[name]
return value
# count tokens in statistics
def count_tokens(statistic, groups):
count = 0
for group in statistic:
if group in groups:
count += statistic[group]
return count
# parse command line
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--input')
parser.add_argument('--tabsize')
parser.add_argument('--wide', action='store_true')
parser.add_argument('--comment')
parser.add_argument('--block-comment-limit')
parser.add_argument('--languages')
args = parser.parse_args()
# read code (input)
filename = args.input.strip()
try:
with open(filename, 'rb') as f:
contents = f.read()
except UnicodeDecodeError:
print('"Cannot read text"')
sys.exit()
try:
code = contents.decode('ASCII')
sys.stderr.write('ENCODE ASCII\n')
except UnicodeDecodeError:
# try:
# code = contents.decode('Latin-1')
# sys.stderr.write('ENCODE LATIN-1\n')
# except UnicodeDecodeError:
try:
code = contents.decode('UTF-8')
sys.stderr.write('ENCODE UTF-8\n')
except UnicodeDecodeError:
try:
code = contents.decode('UTF-16')
sys.stderr.write('ENCODE UTF-16\n')
except UnicodeDecodeError:
print('"Cannot encode text as ASCII, UTF-8, or UTF-16"')
sys.exit()
sys.stderr.write(code)
```
#### File: test/bin/run_detect_test.py
```python
import argparse
import requests
import json
import sys
# build set of confidence factors, one group for each language
def make_confidences(contents, params, languages):
confidences = {}
target = 'localhost:5000'
for language in languages:
# send request, get response
params2 = params.copy()
params2.append('language=' + language)
paramstext = '&'.join(params2)
url = "http://" + target + "/" + "confidence" + "?" + paramstext
resp = requests.post(url, data=contents)
content = resp.content
if content is not None and len(content) > 0:
try:
confidence = json.loads(content)
confidences[language] = confidence
except json.decoder.JSONDecodeError:
pass
return confidences
# build set of confidence errors, one group for each language
def make_confidence_errors(contents, params, languages):
confidence_errors = {}
target = 'localhost:5000'
for language in languages:
# send request, get response
params2 = params.copy()
params2.append('errors')
params2.append('language=' + language)
paramstext = '&'.join(params2)
url = "http://" + target + "/" + "confidence" + "?" + paramstext
resp = requests.post(url, data=contents)
content = resp.content
if len(content) > 0:
confidence = json.loads(content)
confidence_errors[language] = confidence
return confidence_errors
# compute confidence (product of all elements)
def calc_confidence(confidence):
value = 1.0
for name in confidence:
value *= confidence[name]
return value
# count tokens in statistics
def count_tokens(statistic, groups):
count = 0
for group in statistic:
if group in groups:
count += statistic[group]
return count
# identify language with highest confidence
# break ties
def identify_language(contents, params, tiebreak_keywords, tiebreak_tokens, tiebreak_simple, languages):
tiebreak_override = True
confidences = make_confidences(contents, params, languages)
# debug
# errors = make_confidence_errors(contents, params, languages)
# get confidence values
retval = {}
highest_confidence = 0
for language in confidences:
confidence = calc_confidence(confidences[language])
# debug 3
# sys.stderr.write('CONF: ' + language + ' ' + str(confidence) + '\n')
# sys.stderr.write('CONF: ' + language + ' ' + str(confidences[language]) + '\n')
# sys.stderr.write('CONF: ' + language + ' ' + str(errors[language]) + '\n')
retval[language] = confidence
# find the highest value
if confidence > highest_confidence:
highest_confidence = confidence
# sys.stderr.write('HIGH CONF: ' + str(highest_confidence) + '\n')
if tiebreak_keywords:
# sys.stderr.write('TIEBREAK KEYWORDS\n')
# count how many have the greatest confidence
high_languages = []
for language in confidences:
confidence = calc_confidence(confidences[language])
if confidence == highest_confidence:
high_languages.append(language)
# debug
# sys.stderr.write(' ' + language + '\n')
# if a tie among multiple examiners
if len(high_languages) > 1:
statistics = {}
highest_keyword_count = 0
for language in high_languages:
# todo: get statistics, save in dictionary
params2 = params.copy()
params2.append('language=' + language)
paramstext = '&'.join(params2)
url = "http://" + target + "/" + "statistics" + "?" + paramstext
resp = requests.post(url, data=contents)
content = resp.content
statistic = json.loads(content)
statistics[language] = statistic
groups = [
'keyword', 'type', 'function', 'register', 'directive', 'preprocessor'
]
keyword_count = count_tokens(statistic, groups)
if keyword_count > highest_keyword_count:
highest_keyword_count = keyword_count
if highest_keyword_count > 0:
# assign confidence to number of keywords and types (more is better)
for language in high_languages:
count = count_tokens(statistics[language], groups)
keyword_count_confidence = count / highest_keyword_count
confidences[language]['keyword_count'] = keyword_count_confidence
# debug
# sys.stderr.write(' ADJ: ' + language + ' ' + str(keyword_count_confidence) + '\n')
# recalculate confidence with new factor
for language in high_languages:
confidence = calc_confidence(confidences[language])
retval[language] = confidence
# debug
# sys.stderr.write('HIGH CONF: ' + str(highest_confidence) + '\n')
if tiebreak_simple:
# sys.stderr.write('TIEBREAK SIMPLE\n')
# count how many have the greatest confidence
high_languages = []
for language in confidences:
confidence = calc_confidence(confidences[language])
if confidence == highest_confidence:
high_languages.append(language)
# sys.stderr.write(' ' + language + '\n')
# if a tie among multiple examiners
if len(high_languages) > 1:
url = "http://" + target + "/" + "simple"
# request languages
resp = requests.get(url)
content = resp.content
simpler_languages = json.loads(content)
for language in high_languages:
b = language
a = simpler_languages[language]
while a is not None:
if a in high_languages:
# when there is a simpler language in the high names list
# decrease confidence for this language
confidences[b]['simplest'] = 0.99
# sys.stderr.write(' ADJ: ' + b + ' ' + str(keyword_count_confidence + '\n'))
b = a
a = simpler_languages[a]
# recalculate confidence with new factor
for language in high_languages:
confidence = calc_confidence(confidences[language])
retval[language] = confidence
if tiebreak_override:
# count how many have the greatest confidence
high_languages = []
for language in confidences:
confidence = calc_confidence(confidences[language])
if confidence == highest_confidence:
high_languages.append(language)
# if a tie among multiple examiners
if len(high_languages) > 1:
url = "http://" + target + "/" + "override"
# request languages
resp = requests.get(url)
content = resp.content
override_language = json.loads(content)
for language in high_languages:
if language in override_language:
loser = override_language[language]
if loser in high_languages:
print('in high languages\n')
# decrease confidence for loser language
confidences[loser]['overridden'] = 0.99
# sys.stderr.write(' ADJ OVER: ' + loser + ' ' + str(keyword_count_confidence + '\n'))
# recalculate confidence with new factor
for language in high_languages:
confidence = calc_confidence(confidences[language])
retval[language] = confidence
return retval, confidences
# parse command line
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--input')
parser.add_argument('--tabsize')
parser.add_argument('--wide', action='store_true')
parser.add_argument('--comment')
parser.add_argument('--block-comment-limit')
parser.add_argument('--languages')
args = parser.parse_args()
# read code (input)
filename = args.input.strip()
with open(filename, 'rb') as f:
contents = f.read()
params = []
tabsize = args.tabsize
if tabsize:
tabsize = int(tabsize)
params.append('tabsize=' + str(tabsize))
wide = args.wide
if wide:
params.append('wide')
tiebreak_keywords = True
tiebreak_tokens = False
tiebreak_simple = True
comment = args.comment
if comment:
params.append('comment=' + comment)
block_comment_limit = args.block_comment_limit
if block_comment_limit:
block_comment_limit = int(block_comment_limit)
params.append('block_comment_limit=' + str(block_comment_limit))
target = 'localhost:5000'
url = "http://" + target + "/" + "languages"
# request languages
resp = requests.get(url)
content = resp.content
codes_and_names = json.loads(content)
# get list of languages
languages = args.languages
if languages is not None:
languages = languages.split(',')
else:
# if languages not specified, get list from web service
languages = codes_and_names.keys()
# get set of confidence factors
detected_languages, __ = identify_language(contents, params, tiebreak_keywords, tiebreak_tokens, tiebreak_simple, languages)
# print result
mydict = {}
for key in detected_languages:
new_key = codes_and_names[key]
mydict[new_key] = detected_languages[key]
json_text = json.dumps(mydict)
sys.stdout.write(json_text)
```
|
{
"source": "jfitzinger/docs",
"score": 2
}
|
#### File: extensions/shortcodes/extension.py
```python
import os
import pkgutil
import sys
import bbcode
from grow import extensions
from grow.documents import document, static_document
from grow.extensions import hooks
SHORTCODES_DIR = 'shortcodes'
class ShortcodesPreRenderHook(hooks.PreRenderHook):
"""Handle the post-render hook."""
def should_trigger(self, previous_result, doc, original_body, *_args,
**_kwargs):
"""Only trigger for non-empty documents"""
# Do not run for empty documents
content = previous_result if previous_result else original_body
if content is None:
return False
# Check that it's not a StaticDocument
if isinstance(doc, static_document.StaticDocument):
return False
# TODO: This is extremly ugly but without it Grow will cache
# the result of rendered shortcodes. At least try to only bust
# needed caches
doc.pod.podcache.reset()
return True
def trigger(self, previous_result, doc, raw_content, *_args, **_kwargs):
content = previous_result if previous_result else raw_content
return self.extension.parser.format(content, doc=doc)
class ShortcodesExtension(extensions.BaseExtension):
"""Shortcodes Extension."""
def __init__(self, pod, config):
super(ShortcodesExtension, self).__init__(pod, config)
self.parser = bbcode.Parser(
newline='\n',
install_defaults=False,
escape_html=False,
replace_cosmetic=False,
replace_links=False)
self.shortcodes = []
self.load_shortcodes()
def load_shortcodes(self):
"""Verifies the pod has a shortcode module and loads all shortcodes"""
shortcodes_path = '{}/{}'.format(self.pod.root, SHORTCODES_DIR)
if os.path.exists(shortcodes_path + '/__init__.py'):
for importer, package_name, _ in pkgutil.iter_modules(
[shortcodes_path]):
full_package_name = '{}.{}'.format(shortcodes_path,
package_name)
if full_package_name not in sys.modules:
module = importer.find_module(package_name).load_module(
package_name)
self.register_shortcode(module)
else:
self.pod.logger.warning(
'There is no shortcode package in this pod')
def register_shortcode(self, module):
"""Checks if a loaded module exports a shortcode class and if so instantiates
one and tries to register it with the BBCode parser"""
if module.shortcode:
shortcode = module.shortcode(self.pod)
self.shortcodes.append(shortcode)
shortcode.register(self.parser)
@property
def available_hooks(self):
"""Returns the available hook classes."""
return [
ShortcodesPreRenderHook,
]
```
|
{
"source": "jfitz/mpk",
"score": 3
}
|
#### File: mpk/src/mpk.py
```python
import argparse
from datetime import date, datetime
import fileinput
import re
from MpkError import (
MpkDirectiveError,
MpkParseError,
MpkTaskError,
MpkTokenError
)
from Task import Task
def remove_comments(line):
# remove comments
if '#' in line:
line, _ = line.split('#', maxsplit=1)
return line.rstrip()
def is_directive(word):
return re.match(r'\.[A-Za-z0-9-_]*$', word) is not None
def is_date(word):
return re.match(r'\d\d\d\d-\d\d-\d\d$', word) is not None
def is_ident(word):
return re.match(r'[A-Za-z][A-Za-z0-9-_]*$', word) is not None
def is_duration(word):
return re.match(r'\d+[dw]$', word) is not None
def split_to_lists(words, known_dow_keywords, known_ref_keywords):
directives = []
dow_keywords = []
ref_keywords = []
idents = []
durations = []
dates = []
for word in words:
handled = False
if is_directive(word) and not handled:
directives.append(word)
handled = True
if word in known_dow_keywords:
dow_keywords.append(word)
handled = True
if word in known_ref_keywords:
ref_keywords.append(word)
handled = True
if is_date(word) and not handled:
d = datetime.strptime(word, "%Y-%m-%d").date()
dates.append(d)
handled = True
if is_duration(word) and not handled:
durations.append(word)
handled = True
if is_ident(word) and not handled:
idents.append(word)
handled = True
if not handled:
raise MpkTokenError('Unknown token ' + word)
return {
'directives': directives,
'dow_keywords': dow_keywords,
'ref_keywords': ref_keywords,
'identifiers': idents,
'durations': durations,
'dates': dates
}
def calculate_level(line, levels, level_tids, known_tids):
level = len(line) - len(line.lstrip())
if level > levels[-1]:
levels.append(level)
level_tids[level] = known_tids[-1]
if level < levels[-1]:
if level not in level_tids:
raise MpkParseError('Unexpected indentation',
fileinput.filelineno(), line)
while levels[-1] > level:
del level_tids[levels[-1]]
del levels[-1]
return level
def build_task(tokens, known_tids, tasks, project_first_date, dates, level, parent_tid, nonwork_dows, nonwork_dates):
idents = tokens['identifiers']
durations = tokens['durations']
ref_keywords = tokens['ref_keywords']
task = Task(idents, durations, known_tids, tasks, project_first_date, dates, level, parent_tid, nonwork_dows, nonwork_dates, ref_keywords)
tid = task.tid
tasks[tid] = task
known_tids.append(tid)
def process_directive(tokens, known_dow_keywords, nonwork_dows, nonwork_dates):
directives = tokens['directives']
dow_keywords = tokens['dow_keywords']
dates = tokens['dates']
if len(directives) != 1:
raise MpkDirectiveError('No single directive')
directive = directives[0]
handled = False
if directive == '.no-work':
for keyword in dow_keywords:
dow = known_dow_keywords[keyword]
nonwork_dows.append(dow)
for d in dates:
nonwork_dates.append(d)
handled = True
if not handled:
raise MpkDirectiveError('Unknown directive ' + directive)
def read_tasks():
project_first_date = date.today()
known_tids = []
tasks = {}
level_tids = { 0: None }
levels = [0]
known_dow_keywords = {
'monday': 0,
'tuesday': 1,
'wednesday': 2,
'thursday': 3,
'friday':4,
'saturday': 5,
'sunday': 6
}
known_ref_keywords = [ '->' ]
nonwork_dows = []
nonwork_dates = []
for line in fileinput.input([]):
line = remove_comments(line)
if len(line) > 0:
level = calculate_level(line, levels, level_tids, known_tids)
parent_tid = level_tids[level]
# build a task
try:
words = line.split()
# divide into lists for ident, duration, dates
tokens = split_to_lists(words, known_dow_keywords, known_ref_keywords)
directives = tokens['directives']
dow_keywords = tokens['dow_keywords']
ref_keywords = tokens['ref_keywords']
idents = tokens['identifiers']
durations = tokens['durations']
dates = tokens['dates']
if len(directives) > 0:
process_directive(tokens, known_dow_keywords, nonwork_dows, nonwork_dates)
else:
if len(idents) > 0 or len(durations) > 0:
build_task(tokens, known_tids, tasks, project_first_date, dates, level, parent_tid, nonwork_dows, nonwork_dates)
else:
if len(dates) == 1:
project_first_date = dates[0]
else:
raise MpkParseError('Unknown line', fileinput.filelineno(), line)
except (MpkTokenError, MpkTaskError) as error:
raise MpkParseError(
'Cannot build task: ' + error.message, fileinput.filelineno(), line)
return known_tids, tasks
def print_list(tids, tasks):
print('task\tduration\tpredecessors\tresources')
for tid in tids:
task = tasks[tid]
print(task.format_list())
def print_schedule(tids, tasks):
project_first_date = None
project_last_date = None
if len(tasks) > 0:
tid = tids[0]
project_first_date = tasks[tid].first_day
project_last_date = tasks[tid].last_day
for tid in tids:
task = tasks[tid]
if task.first_day < project_first_date:
project_first_date = task.first_day
if task.last_day > project_last_date:
project_last_date = task.last_day
print('Project first day: ' + str(project_first_date))
print('Project last day: ' + str(project_last_date))
print()
print('task\tstart\tend')
for tid in tids:
task = tasks[tid]
print(task.format_schedule())
parser = argparse.ArgumentParser()
parser.add_argument('--list', help='List items', action='store_true')
parser.add_argument('--schedule', help='Calculate schedule',
action='store_true')
args = parser.parse_args()
# read input and parse tasks and durations
try:
tids, tasks = read_tasks()
except MpkParseError as error:
print(error.line)
print('Error: ' + error.message + ' in line: ' + str(error.lineno))
print('Stopped.')
quit()
if args.list:
print_list(tids, tasks)
if args.schedule:
print_schedule(tids, tasks)
```
#### File: mpk/src/Task.py
```python
from datetime import (date, timedelta)
from MpkError import (
MpkDurationError,
MpkParseError,
MpkScheduleError,
MpkTaskError,
MpkTokenError
)
def split_idents(idents, known_idents):
new_idents = []
old_idents = []
for ident in idents:
if ident in known_idents:
old_idents.append(ident)
else:
new_idents.append(ident)
return new_idents, old_idents
def decode_duration(word):
if len(word) == 0:
raise MpkDurationError('Empty duration')
duration = timedelta(days = 0)
if word[-1] == 'd':
daycount = int(word[:-1])
duration = timedelta(days = daycount)
unit = 'd'
if word[-1] == 'w':
weekcount = int(word[:-1])
duration = timedelta(days = weekcount * 7)
unit = 'w'
return duration, unit
def is_nonworkday(d, nonwork_dows, nonwork_dates):
dow = d.weekday()
return dow in nonwork_dows or d in nonwork_dates
def is_w_nonworkday(d, nonwork_dows):
dow = d.weekday()
return dow in nonwork_dows
def find_next_work_day(walk, nonwork_dows, nonwork_dates, limit):
one_day = timedelta(days = 1)
count = 0
while is_nonworkday(walk, nonwork_dows, nonwork_dates):
walk = walk + one_day
count += 1
# don't skip more than the limit
if count > limit:
raise MpkScheduleError(
'More than ' + str(limit) + ' non-workdays')
return walk
def calc_work_days(first_day, duration, nonwork_dows, nonwork_dates, limit):
day_count = duration.days
one_day = timedelta(days = 1)
last_day = first_day - one_day
walk = first_day
work_days = []
for _ in range(0, day_count):
walk = find_next_work_day(walk, nonwork_dows, nonwork_dates, limit)
work_days.append(walk)
walk = walk + one_day
if len(work_days) > 0:
last_day = work_days[-1]
return last_day, work_days
def calc_w_work_days(first_day, duration, nonwork_dows, limit):
day_count = duration.days
one_day = timedelta(days = 1)
last_day = first_day + duration
walk = first_day
work_days = []
for _ in range(0, day_count):
if not is_w_nonworkday(walk, nonwork_dows):
work_days.append(walk)
walk = walk + one_day
if len(work_days) > 0:
last_day = work_days[-1]
return last_day, work_days
class Task:
def __init__(self, idents, durations, known_tids, tasks, project_first_day_date, dates, level, parent_tid, nonwork_dows, nonwork_dates, ref_keywords):
new_idents, old_idents = split_idents(idents, known_tids)
# validation
if len(new_idents) != 1:
raise MpkTaskError('No single new identifier')
if len(durations) > 1:
raise MpkTaskError('More than one duration')
if len(dates) > 1:
raise MpkTaskError('More than one task date')
# assign values
self.tid = new_idents[0]
self.predecessors = old_idents
self.dates = dates
self.parent_tid = parent_tid
if parent_tid is not None:
self.predecessors.append(parent_tid)
self.duration = None
self.unit = None
self.level = level
# add '->' reference (most recent task at same level)
if '->' in ref_keywords:
i = len(known_tids) -1
found = False
while i > -1 and not found:
possible_tid = known_tids[i]
possible_task = tasks[possible_tid]
if possible_task.level == level:
self.predecessors.append(possible_tid)
found = True
i -= 1
if not found:
raise MpkRefError("Cannot find previous task for '->'")
# must start no earlier than project start date
possible_first_day = project_first_day_date
# must start no earlier than predecessor end date + 1
one_day = timedelta(days = 1)
nonwork_day_limit = 14
for tid in self.predecessors:
task = tasks[tid]
task_possible_first_day = task.last_day + one_day
if task_possible_first_day > possible_first_day:
possible_first_day = task_possible_first_day
for d in dates:
if d > possible_first_day:
possible_first_day = d
self.first_day = find_next_work_day(possible_first_day,
nonwork_dows, nonwork_dates,
nonwork_day_limit)
# decode task duration and compute work days and last work day
self.work_days = []
self.last_day = self.first_day - one_day
if len(durations) == 1:
try:
self.duration, self.unit = decode_duration(durations[0])
if self.unit == 'd':
self.last_day, self.work_days = calc_work_days(self.first_day, self.duration, nonwork_dows, nonwork_dates, nonwork_day_limit)
if self.unit == 'w':
self.last_day, self.work_days = calc_w_work_days(self.first_day, self.duration, nonwork_dows, nonwork_day_limit)
except (MpkDurationError, MpkScheduleError) as error:
raise MpkTaskError(error.message)
while parent_tid is not None:
parent_task = tasks[parent_tid]
parent_task.update_last_day(self.last_day)
parent_tid = parent_task.parent_tid
def update_last_day(self, last_day):
if last_day > self.last_day:
self.last_day = last_day
def format_list(self):
s = self.tid
if self.parent_tid is not None:
s += ' P[' + str(self.parent_tid) + ']'
s += ' (' + str(self.level) + ')'
if self.duration is not None:
if self.unit == 'd':
days = self.duration.days
s += '\t' + str(days) + 'd'
if self.unit == 'w':
weeks = int(self.duration.days / 7)
s += '\t' + str(weeks) + 'w'
preds = self.predecessors.copy()
for d in self.dates:
preds.append(str(d))
predecessors = ', '.join(preds)
s += '\t' + '[' + predecessors + ']'
if len(self.work_days) > 0:
strings = []
for work_day in self.work_days:
strings.append(str(work_day))
s += '\t' + '[' + ', '.join(strings) + ']'
return s
def format_schedule(self):
s = ' ' * self.level + self.tid
s += '\t' + str(self.first_day)
s += '\t' + str(self.last_day)
return s
```
|
{
"source": "jfizzle/nest-tensorflow-1",
"score": 3
}
|
#### File: nest-tensorflow-1/wwn/access_token.py
```python
import urllib
import urllib2
import json
nest_auth_url = 'https://home.nest.com/login/oauth2'
nest_access_token_url = 'https://api.home.nest.com/oauth2/access_token'
product_id = ''
product_secret = ''
def get_access_token(authorization_code):
"""Paste get_access_token(authorization_code) snippet below this line"""
return
def authorization_url():
query = urllib.urlencode({
'client_id': product_id,
'state': 'STATE'
})
return "{0}?{1}".format(nest_auth_url, query)
```
|
{
"source": "jfizzy/ResistanceDB",
"score": 2
}
|
#### File: ResistanceDB/leo/leo.pyw
```python
import sys
import os
from PyQt5 import QtWidgets, QtGui
from PyQt5.QtWidgets import QApplication, QMainWindow
from gui.leo_gui import Ui_MainWindow
class ApplicationWindow(QtWidgets.QMainWindow):
ICON = os.path.join(os.path.abspath(os.path.dirname(__file__)), "gui", "leo-icon.png")
def __init__(self, parent):
super(ApplicationWindow, self).__init__()
self.parent = parent
self.main_window = Ui_MainWindow(self)
self.main_window.setupUi(self)
#self._shutdown = False
self.setWindowIcon(QtGui.QIcon(self.ICON))
def shut_er_down(self):
QtWidgets.QMainWindow.close(self)
def closeEvent(self, event):
self.main_window.shutdown()
def main():
app = QApplication(sys.argv)
window = ApplicationWindow(app)
window.show()
sys.exit(app.exec_())
if __name__ == "__main__":
main()
```
#### File: mia/mia_backend/ez_logger.py
```python
import time
import datetime
import logging
import logging.handlers
class EZLogger:
def __init__(self, logger_name, log_filename, filesize, backupCount, filemode):
self._log_name = logger_name
self._log_filename = log_filename
self._filesize = filesize
self._backupCount = backupCount
self._logger = logging.getLogger(self._log_name)
self._logger.setLevel(logging.DEBUG)
handler = logging.handlers.RotatingFileHandler(\
self._log_filename, maxBytes=self._filesize, backupCount=self._backupCount)
self._logger.addHandler(handler)
def debug(self, msg, *args, **kwargs):
""" logs a debug messaege to the log file """
self._logger.debug(self.set_timestamp("DEBUG: {}".format(msg)), *args, **kwargs)
def info(self, msg, *args, **kwargs):
""" logs an info message to the log file """
self._logger.info(self.set_timestamp("INFO: {}".format(msg)), *args, **kwargs)
def warning(self, msg, *args, **kwargs):
""" logs a warning message to the log file """
self._logger.warning(self.set_timestamp("WARNING: {}".format(msg)), *args, **kwargs)
def error(self, msg, *args, **kwargs):
""" logs an error message to the log file """
self._logger.error(self.set_timestamp("ERROR: {}".format(msg)), *args, **kwargs)
def critical(self, msg, *args, **kwargs):
""" logs a critical message to the log file """
self._logger.critical(self.set_timestamp("CRITICAL: {}".format(msg)), *args, **kwargs)
def set_timestamp(self, msg):
""" prepends a timestamp to the message """
stamp = time.time()
formatted = st = datetime.datetime.fromtimestamp(stamp).strftime('%Y-%m-%d %H:%M:%S - ')
return "{}{}".format(formatted, msg)
```
#### File: mia/mia_backend/fileutil.py
```python
import os
import re
def get_files_by_ext(src, dst, ext, end_ext):
""" gets a list of files from a directory by extension 'ext'.
Returns a list of tuples in the format
(src/filename.ext, dst/filename.end_ext)
If end_ext is none, filename is kept the same
"""
good_files = []
ext_re = ext
if not ext_re.startswith('.'):
ext_re = '.' + ext_re
directory_files = [f for f in os.walk(src)]
for directory_tuple in directory_files:
for file in directory_tuple[2]:
if file.endswith(ext_re):
# if the file has the extension, add to good_file list which is a tuple of
# (current directory, destination directory)
if end_ext:
f_renamed = file.replace(ext, end_ext)
else:
f_renamed = file
good_files.append((os.path.join(directory_tuple[0], file),
os.path.join(dst, f_renamed)))
return good_files
```
#### File: mia/mia_backend/mia_manager.py
```python
from enum import Enum
import threading
import os
import queue
from mia_backend.ez_logger import EZLogger
from mia_backend.file_mover import FileMover, FileMoverException
#from mia_backend.fileparser import FileParser, FileParserException
from mia_backend.config import Config
class Instruction(Enum):
""" defines an enum for mia instruction set """
START = 1
UPDATE_CONFIG = 2
UPDATE_INTERVAL = 3
QUIT = 4
SHUTDOWN = 5
class MiaManager():
"""
Main backend driver class for the Mia application -
nterfaces with gui and backend components
"""
LOG_FILE = os.path.join(os.path.abspath(os.path.dirname(__file__)), "mia_log.log")
CONFIG = os.path.join(os.path.abspath(os.path.dirname(__file__)), ".miaconfig")
def __init__(self, parent):
print(self.LOG_FILE)
print(self.CONFIG)
self._parent = parent
self._logger = EZLogger(logger_name="MiaLogger",
log_filename=self.LOG_FILE,
filesize=10*1024*1024, backupCount=5,
filemode='w')
self._config = Config(self._logger)
self._config.read_config(self.CONFIG)
#if self._config.
self._parent.update_status("Initialized!")
self._config_lock = threading.Lock()
self._work_queue = queue.Queue()
self._running = False
self._worker_thread = threading.Thread(None, self.run, "worker_thread", {}, {})
self._worker_thread.start()
self._transfer_thread = None
self._max_threads = 5
def get_worker(self):
""" get the worker thread """
return self._worker_thread
def get_config(self):
""" get configuration settings """
return self._config
def start(self, config, callback):
""" starts mia
callback passed by caller, mia will call this callback on successfull execution
"""
if not self._running and (config is None or self.check_config(config)):
try:
# file mover will throw exception if directories are not found
# file_mover = FileMover(config.SRC_DIRS, config.INTERIM,
# config.FILE_EXT, None, self._logger)
# file_mover.move_files()
if config:
self._config_lock.acquire()
self._config.cpy_config(config)
self._config.write_config(self.CONFIG)
self._config_lock.release()
self._parent.update_status("Config valid. Starting Mia!")
print(str(self._config))
self._work_queue.put(Instruction.START)
if callback:
callback()
except FileMoverException as ex:
self._parent.update_status(ex)
else:
self._parent.update_status("Mia is already running.")
def transfer(self):
""" handles file transfer and file parsing
Creates a FileMover object to handle file moving, if there are files left to move
and we have not received the quit signal, then grab a file and perform transfer.
If we have not receieved quit signal at the end of the transfer, reset the interval to
do work again.
"""
self._config_lock.acquire()
file_mover = FileMover(self._config.SRC_DIRS,
self._config.INTERIM,
self._config.DST_DIR,
self._config.FILE_EXT,
self._config.CONVERTER,
self._config.CONVERTER_FLAGS,
self._logger,
self._config.DATABASE)
#number of threads is max_threads if threaded, otherwise 1 (synchronous)
num_threads = self._max_threads if self._config.THREADED else 1
self._config_lock.release()
threads = []
#file_movers = file_mover.FileMover(src, self._config.DST_DIR, self._config.FILE_EXT, None
#self._transfer_thread = threading.Timer(0, self.do_transfer, {}, {})
# while we are still running and have files to move
while self._running and file_mover.files_left():
if self._config.THREADED:
for i in range(0, num_threads):
if file_mover.files_left() and self._running:
threads.append(threading.Thread(None, file_mover.process_next_file, "thread_{}".format(i), {None}, {}))
threads[i].start()
for i in range(0, len(threads)):
if threads[i]:
threads[i].join()
threads.clear()
else:
print("Non-threaded")
file_mover.process_next_file(lambda x : self._parent.update_status_bar())#"Processing: {}".format(x.get_full_file_src())))
print("Exitting...")
#(lambda x : self._parent.update_status("Processing file {}".format(x.get_full_file_src())),))
#file_mover.process_next_file(lambda x : self._parent.update_status("Processing file {}".format(x.get_full_file_src())))
# if still running at end of file, reset interval to do another move
if self._running:
self._parent.update_status("Finished parsing raw files. Waiting {} minutes to check again.".format(self._config.INTERVAL))
self._parent.update_status_bar("Running... waiting to transfer files.")
self._transfer_thread = threading.Timer(self._config.INTERVAL * 60, self.transfer, {}, {})
self._transfer_thread.start()
def run(self):
""" handles the actual running of mia """
instruction = self._work_queue.get(block=True, timeout=None)
while instruction != Instruction.SHUTDOWN:
self.parse_instruction(instruction)
print("parsed instruction {}".format(instruction))
instruction = self._work_queue.get(block=True, timeout=None)
self._running = False
print("Received shutdown signal")
def stop(self, callback):
""" GUI is requesting mia shut down """
self._work_queue.put(Instruction.QUIT, block=True, timeout=None)
self._parent.update_status("Quit signal received, waiting for mia to finish processes...")
self._parent.mia_stopping()
if self._transfer_thread and self._transfer_thread.is_alive():
self._transfer_thread.join()
self._parent.update_status("Mia has stopped transfers.")
if callback:
callback()
def shutdown(self):
""" shuts down all worker and timer threads, informs parent when threads have joined """
self._work_queue.put(Instruction.QUIT, block=True, timeout=None)
self._work_queue.put(Instruction.SHUTDOWN, block=True, timeout=None)
self._parent.update_status("Shutdown signal received, waiting for mia to finish processes...")
self._parent.mia_stopping()
if self._transfer_thread and self._transfer_thread.is_alive():
print("Waiting on transfer thread?")
print(self._transfer_thread)
self._transfer_thread.join()
print("Transfer joined")
self._worker_thread.join()
self._parent.update_status("Mia has shut down.")
def parse_instruction(self, instruct):
"""
"""
if instruct == Instruction.START:
print("start!")
#immediatelys tart transfer, wait time of 0 milliseconds
self._running = True
self._transfer_thread = threading.Timer(0, self.transfer, {}, {})
self._transfer_thread.start()
elif instruct == Instruction.UPDATE_CONFIG:
print("update config!")
elif instruct == Instruction.UPDATE_INTERVAL:
print("update interval!")
elif instruct == Instruction.QUIT:
self._running = False
if self._transfer_thread:
self._transfer_thread.cancel()
print("quit!")
def check_config(self, config):
""" checks that mia has received the default minimum valid arguments for config """
valid = True
if not config.DATABASE or config.DATABASE == '':
self._parent.update_status(\
"Error detected in config: No Database Selected")
valid = False
if not config.SRC_DIRS:
self._parent.update_status(\
"Error detected in config: No Source Directories")
valid = False
if not config.DST_DIR or config.DST_DIR == '':
self._parent.update_status(\
"Error detected in config: No Destination Directory")
valid = False
if not config.CONVERTER or config.CONVERTER == '':
self._parent.update_status(\
"Error detected in config: ReAdW.exe path not set")
valid = False
if not config.INTERIM or config.INTERIM == '':
self._parent.update_status(\
"Error detected in config: No Interim Directory"
)
valid = False
return valid
```
#### File: ResistanceDB/pablo/state-1.py
```python
import dash
from dash.dependencies import Input, Output
import dash_core_components as dcc
import dash_html_components as html
app = dash.Dash(__name__)
app.layout = html.Div([
dcc.Input(id='input-1', type="text", value='Montréal'),
dcc.Input(id='input-2', type="text", value='Canada'),
html.Div(id='output')
])
@app.callback(Output('output', 'children'),
[Input('input-1', 'value'),
Input('input-2', 'value')])
def update_output(input1, input2):
return u'Input 1 is "{}" and Input 2 is "{}"'.format(input1, input2)
if __name__ == '__main__':
app.run_server(debug=True)
```
|
{
"source": "jfizzy/UC_JSON_4_WRLD",
"score": 3
}
|
#### File: UC_JSON_4_WRLD/src/wrldification_tool.py
```python
import json #need this for pretty much everything
from pprint import pprint
import os
from fnmatch import fnmatch
import pathlib
from collections import OrderedDict
from enum import Enum
print("Starting conversion...")
def lookup(dic, key, *keys):
if keys:
return lookup(dic.get(key, {}), *keys)
return dic.get(key)
def wtype_switch(wtype):
return { #this is a start, may want to make this better
'Mechanical': 'room',
'Stairway': 'stairs',
'Elevator': 'elevator',
'Hallway': 'hallway',
'Door': 'door',
'Escalator': 'escalator',
'Janitor': 'room',
'Washroom': 'bathroom',
'room': 'room',
'stairs': 'stairs',
'elevator': 'elevator',
'hallway': 'hallway',
'door': 'door',
'escalator': 'escalator',
'bathroom': 'bathroom',
'wall': 'wall',
'pathway': 'pathway'
}.get(wtype,'room')
def removekey(d, key):
r = dict(d)
del r[key]
return r
def alter_props(properties_dict):
properties = {}
id = None
wtype = None
name = None
for value in properties_dict:
properties[value] = properties_dict[value]
try:
id = str(properties['OBJECTID'])
except KeyError:
id = properties['id'] # allows partially processed files
try:
wtype = properties['RM_USE_DESC']
wtype = wtype_switch(wtype)
except KeyError:
wtype = properties['type'] # allows partially processed files
try:
nameparts = str(properties['BLD_FLR_RM_ID']).split("_")
name = nameparts[0]+ " " + nameparts[2]
except KeyError:
try:
if isinstance(properties['name'], str) and "Room: " in properties['name']:
name = properties['name'].replace('Room: ', '')
else:
name = properties['name']
if isinstance(wtype, str):
if wtype == 'bathroom' and name != 'Men\'s Bathroom' and name != 'Women\'s Bathroom':
name = 'Bathroom'
elif wtype == 'elevator':
name = 'Elevator'
elif wtype == 'escalator':
name = 'Escalator'
elif wtype == 'stairs':
name = 'Stairs'
elif wtype == 'hallway':
name = None
except KeyError:
pass
properties = {}
properties.update({'id': id, 'type': wtype, 'name': name}) # add the new ones
return properties
def order_levels(level_list):
sorted = []
for level in level_list:
if sorted:
this = LEVELS.get(level['id'].split('Level')[1])
for i in range(len(sorted)):
that = LEVELS.get(sorted[i]['id'].split('Level')[1])
#print(str(level['id']) + ' vs ' + str(sorted[i]['id']))
if this < that:
#print(str(level['id']) + ' < ' + str(sorted[i]['id']))
#print('placed at index '+str(i))
sorted.insert(i, level)
break
elif this > that:
#print(str(level['id']) + ' > ' + str(sorted[i]['id']))
if i == len(sorted)-1:
#print('placed at end')
sorted.append(level)
break
else:
continue
else:
pprint('found the same level twice!!!!')
else:
sorted = [level]
#print('-----------------------')
for i in range(len(sorted)):
sorted[i]['z_order'] = i
return sorted
def fix_level_names(level_list):
for level in level_list:
if 'Level' in level['name']:
level['name'] = level['name'].split('Level')[1]
if not ' ' in level['readable_name']:
if 'Level' in level['readable_name']:
l = level['readable_name'].split('Level')[1]
level['readable_name'] = 'Level '+l
return level_list
root = '.'
geo_pattern = "*.geojson"
json_pattern = '*main.json'
LEVELS = {'B3': 0, 'B2':1, 'B1':2, 'M1':3, 'G1':4, 'G1A':5, '01':6, '01A':7,
'02':8, '02A':9, '03':10, '03A':11, '04':12, '04A':13, '05':14,
'05A':15, '06':16, '06A':17, '07':18, '08':19, '09':20, '10':21,
'11':22, '12':23, 'P1':24, 'P2':25}
for path, subdirs, files in os.walk(root):
for name in files:
if fnmatch(name, geo_pattern) and not name.startswith('Path-'):
with open(os.path.join(path, name), 'r') as f:
data = json.load(f)
feature_list = data['features']
for feature_dict in feature_list:
properties_dict = feature_dict['properties']
properties_dict = alter_props(properties_dict)
feature_dict['properties'] = properties_dict # apply changes
if feature_list[len(feature_list)-1]['properties'].get('type') == 'building_outline':
feature_list.insert(0, feature_list.pop(len(feature_list)-1))
with open(os.path.join(path, name), 'w') as f:
json.dump(data, f, sort_keys=True, indent=4, separators=(',', ': '))
pprint('Wrote cleaned file: '+path+'/'+name)
elif fnmatch(name, json_pattern) and not name.startswith('Path-'):
with open(os.path.join(path, name), 'r') as f:
data = json.load(f)
level_list = data.get('levels')
sorted = order_levels(level_list) # orders the levels and applies the correct z order
#get the entrance level
entrance_index = 1
for level in sorted:
if level['id'] == 'Level01':
entrance_index = level['z_order']
data.update({'entrance_level': entrance_index})
fixed_names = fix_level_names(sorted)
data['levels'] = fixed_names
with open(os.path.join(path, name), 'w') as f:
json.dump(data, f, sort_keys=True, indent=4, separators=(',', ': '))
pprint('Wrote cleaned main file: '+path+'/'+name)
if __name__ == "__main__":
pass
```
|
{
"source": "jfjeld15/HackED-2021-Bussifiers",
"score": 3
}
|
#### File: jfjeld15/HackED-2021-Bussifiers/resortsnow.py
```python
import tkinter
from tkinter.constants import NW
""" LABELS:
wpop: Whistler yearly snowfall (https://www.onthesnow.com/british-columbia/whistler-blackcomb/historical-snowfall)
SPpop: Sun Peaks (https://www.onthesnow.com/british-columbia/sun-peaks/historical-snowfall)
SSpop: Silverstar (https://www.onthesnow.com/british-columbia/silver-star/ski-resort)
Revpop: Revelstoke (https://www.onthesnow.com/british-columbia/revelstoke-mountain/historical-snowfall)
kickpop: Kicking Horse (https://www.onthesnow.com/british-columbia/kicking-horse/historical-snowfall)
louisepop: Lake Louise (https://www.onthesnow.com/alberta/lake-louise/historical-snowfall)
SVpop: Sunshine Village (https://www.onthesnow.com/alberta/sunshine-village/historical-snowfall)
fernpop: Fernie (https://www.onthesnow.com/british-columbia/fernie-alpine/historical-snowfall)
RMpop: Red Mountain (https://www.onthesnow.com/british-columbia/red-resort/historical-snowfall)
BWpop: Big White (https://www.onthesnow.com/british-columbia/big-white/historical-snowfall)
"""
def genLabels(root, yearS):
if yearS.get() == 2011:
wpop,SPpop,SSpop,Revpop,kickpop,louisepop,SVpop,fernpop,RMpop,BWpop = "711","414","406","485","376","467","531","754","353","1379"
elif yearS.get() == 2012:
wpop,SPpop,SSpop,Revpop,kickpop,louisepop,SVpop,fernpop,RMpop,BWpop = "696","437","366","561","561","757","688","787","290","424"
elif yearS.get() == 2013:
wpop,SPpop,SSpop,Revpop,kickpop,louisepop,SVpop,fernpop,RMpop,BWpop = "556","386","376","549","498","465","429","544","391","508"
elif yearS.get() == 2014:
wpop,SPpop,SSpop,Revpop,kickpop,louisepop,SVpop,fernpop,RMpop,BWpop = "1105","545","643","724","518","338","574","688","620","927"
elif yearS.get() == 2015:
wpop,SPpop,SSpop,Revpop,kickpop,louisepop,SVpop,fernpop,RMpop,BWpop = "1283","932","630","638","569","544","759","960","335","810"
elif yearS.get() == 2016:
wpop,SPpop,SSpop,Revpop,kickpop,louisepop,SVpop,fernpop,RMpop,BWpop = "803","467","574","615","617","737","762","919","841","861"
elif yearS.get() == 2017:
wpop,SPpop,SSpop,Revpop,kickpop,louisepop,SVpop,fernpop,RMpop,BWpop = "950","277","432","470","480","505","442","701","348","579"
elif yearS.get() == 2018:
wpop,SPpop,SSpop,Revpop,kickpop,louisepop,SVpop,fernpop,RMpop,BWpop = "749","414","386","531","447","536","686","643","297","630"
elif yearS.get() == 2019:
wpop,SPpop,SSpop,Revpop,kickpop,louisepop,SVpop,fernpop,RMpop,BWpop = "714","178","203","551","384","445","754","523","312","404"
whistler = tkinter.Label(root, text="Whistler total snowfall in {}:\n{} cm".format(yearS.get(),wpop), width=28, bg="gray")
whistler.place(x=833, y=66, anchor=NW)
sunP = tkinter.Label(root, text="Sun Peaks total snowfall in {}:\n{} cm".format(yearS.get(),SPpop), width=28, bg="gray")
sunP.place(x=833, y=116, anchor=NW)
silv = tkinter.Label(root, text="SilverStar total snowfall in {}:\n{} cm".format(yearS.get(),SSpop), width=28, bg="gray")
silv.place(x=833, y=166, anchor=NW)
revy = tkinter.Label(root, text="Revelstoke total snowfall in {}:\n{} cm".format(yearS.get(),Revpop), width=28, bg="gray")
revy.place(x=833, y=216, anchor=NW)
kicky = tkinter.Label(root, text="Kicking Horse total snowfall in {}:\n{} cm".format(yearS.get(),kickpop), width=28, bg="gray")
kicky.place(x=833, y=266, anchor=NW)
louise = tkinter.Label(root, text="Lake Louise total snowfall in {}:\n{} cm".format(yearS.get(),louisepop), width=28, bg="gray")
louise.place(x=833, y=316, anchor=NW)
SV = tkinter.Label(root, text="Sunshine Village total snowfall in {}:\n{} cm".format(yearS.get(),SVpop), width=28, bg="gray")
SV.place(x=833, y=366, anchor=NW)
fernie = tkinter.Label(root, text="Fernie total snowfall in {}:\n{} cm".format(yearS.get(),fernpop), width=28, bg="gray")
fernie.place(x=833, y=416, anchor=NW)
RM = tkinter.Label(root, text="Red Mountain total snowfall in {}:\n{} cm".format(yearS.get(),RMpop), width=28, bg="gray")
RM.place(x=833, y=466, anchor=NW)
BW = tkinter.Label(root, text="Big White total snowfall in {}:\n{} cm".format(yearS.get(),BWpop), width=28, bg="gray")
BW.place(x=833, y=516, anchor=NW)
```
|
{
"source": "jfjlaros/arduino-serial-mux",
"score": 3
}
|
#### File: examples/demo/demo.py
```python
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, FileType
from time import sleep
from typing import BinaryIO
from serial import Serial
from struct import pack
def demo(handle: BinaryIO, device: str) -> None:
"""Run a simple ping loop.
:arg handle: Output file.
:arg device: Device name.
"""
interface = Serial(device)
while True:
for i in range(256):
interface.write(pack('B', i));
sleep(1);
handle.write(interface.read(interface.in_waiting).decode())
sleep(1);
def main() -> None:
"""Main entry point."""
parser = ArgumentParser(
formatter_class=ArgumentDefaultsHelpFormatter)
parser.add_argument(
'device', metavar='DEVICE', type=str, help='device')
parser.add_argument(
'-o', dest='handle', metavar='OUTPUT', type=FileType('w'),
default='-', help='output file')
try:
args = parser.parse_args()
except IOError as error:
parser.error(error)
demo(args.handle, args.device)
if __name__ == '__main__':
main()
```
#### File: arduino-serial-mux/serial_mux/cli.py
```python
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, FileType
from threading import Thread
from typing import BinaryIO
from . import doc_split, usage, version
from .serial_mux import SerialMux
def serial_mux(
handle: BinaryIO, log_handle: BinaryIO,
device: str, baudrate: int, wait: int) -> None:
"""Serial multiplexer service.
:arg handle: Output handle.
:arg log_handle: Log handle.
:arg device: Serial device name.
:arg baudrate: Baud rate.
:arg wait: Time in seconds before communication starts.
"""
mux = SerialMux(device, baudrate, wait, log_handle)
threads = [Thread(target=mux.update, daemon=True)]
threads[0].start()
handle.write('Detected {} virtual serial ports.\n'.format(
len(mux.devices)))
for i, device in enumerate(mux.devices):
handle.write(
' Virtual serial device {}: {}\n'.format(i + 1, device.name))
threads.append(Thread(target=device.update, daemon=True))
threads[-1].start()
handle.write('\nPress Ctrl+C to exit.\n')
for thread in threads:
thread.join()
def _arg_parser() -> object:
"""Command line argument parsing."""
parser = ArgumentParser(
formatter_class=ArgumentDefaultsHelpFormatter,
description=usage[0], epilog=usage[1])
parser.add_argument(
'-o', dest='handle', metavar='OUTPUT', type=FileType('w'),
default='-', help='output file')
parser.add_argument(
'-l', dest='log_handle', metavar='LOG', type=FileType('w'),
default=None, help='log file')
parser.add_argument(
'-b', dest='baudrate', type=int, default=9600, help='baud rate')
parser.add_argument(
'-w', dest='wait', type=int, default=2,
help='time before communication starts')
parser.add_argument(
'-v', action='version', version=version(parser.prog))
parser.add_argument('device', metavar='DEVICE', type=str, help='device')
return parser
def main() -> None:
"""Main entry point."""
parser = _arg_parser()
try:
args = parser.parse_args()
except IOError as error:
parser.error(error)
try:
serial_mux(**{k: v for k, v in vars(args).items()
if k not in ('func', 'subcommand')})
except (IOError, ValueError) as error:
parser.error(error)
except KeyboardInterrupt:
pass
if __name__ == '__main__':
main()
```
#### File: arduino-serial-mux/serial_mux/__init__.py
```python
from pkg_resources import DistributionNotFound, get_distribution
from .serial_mux import SerialMux
from .vserial import VSerial
def _get_metadata(name: str) -> str:
try:
pkg = get_distribution('arduino_serial_mux')
except DistributionNotFound:
pkg = get_distribution('serial_mux')
for line in pkg.get_metadata_lines(pkg.PKG_INFO):
if line.startswith('{}: '.format(name)):
return line.split(': ')[1]
return ''
_copyright_notice = 'Copyright (c) {} <{}>'.format(
_get_metadata('Author'), _get_metadata('Author-email'))
usage = [_get_metadata('Summary'), _copyright_notice]
def doc_split(func: callable) -> str:
return func.__doc__.split('\n\n')[0]
def version(name: str) -> str:
return '{} version {}\n\n{}\nHomepage: {}'.format(
_get_metadata('Name'), _get_metadata('Version'), _copyright_notice,
_get_metadata('Home-page'))
```
#### File: arduino-serial-mux/serial_mux/serial_mux.py
```python
from serial import serial_for_url
from threading import Lock
from time import sleep, time
from typing import BinaryIO
from .vserial import VSerial
_protocol = b'serialMux'
_version = (2, 0, 0)
_escape = b'\xff'
_control_port = 0xfe
_commands = {
'protocol': b'\x00',
'version': b'\x01',
'get_ports': b'\x02',
'enable': b'\x03',
'disable': b'\x04',
'reset': b'\x05'}
class SerialMux():
"""Serial multiplexer."""
def __init__(
self: object, device: str, baudrate: int=9600, wait: int=2,
log: BinaryIO=None) -> None:
"""
:arg device: Device name.
:arg baudrate: Baud rate.
:arg wait: Time in seconds before communication starts.
:arg log: Open writeable handle to a log file.
"""
self._log = log
self._mutex = Lock()
self._port_rx = _control_port
self._port_tx = _control_port
self._serial = serial_for_url(device, baudrate=baudrate)
sleep(wait)
_assert_protocol(self._cmd('protocol', len(_protocol)))
_assert_version(self._cmd('version', len(_version)))
number_of_ports = ord(self._cmd('get_ports', 1))
self.devices = []
for i in range(number_of_ports):
self.devices.append(VSerial(self, i))
self._cmd('enable', 1)
def send(self: object, port: int, data: bytes) -> None:
"""Send data from a virtual serial device to the serial device.
:arg port: Virtual serial port.
:arg data: Data.
"""
with self._mutex:
self._write(port, data)
def update(self: object) -> None:
"""Receive serial data and send it to the corresponding virtual
serial device."""
while True:
self._update()
def _cmd(self: object, cmd: str, size: int) -> bytes:
"""Send a control comand.
:arg cmd: Command.
:returns: Resonse of control command.
"""
self._write(_control_port, _commands[cmd])
data = self.read(size)
if self._port_rx != _control_port or not data:
raise IOError('invalid control command response')
return data
def _update(self: object) -> None:
"""Receive serial data and send it to the corresponding virtual
serial device."""
data = self.read(1)
if self._port_rx != _control_port:
self.devices[self._port_rx].receive(data)
def read(self: object, size: int) -> bytes:
"""Read from the serial device.
:size: Number of bytes to read.
:returns: Virtual serial port and data.
"""
data = b''.join([self._read() for _ in range(size)])
self._msg(self._port_rx, data, '-->')
return data
def _msg(self: object, port: str, data: bytes, way: str) -> None:
"""Log a message.
:arg port: Virtual serial port.
:arg data: Data.
:arg way: Data transfer direction.
"""
if self._log:
self._log.write('{:012.2f} {} {:02x} : {} ({})\n'.format(
time(), way, port, _hex(data), len(data)))
self._log.flush()
def _read(self: object) -> bytes:
"""Read from the serial device.
:returns: Data.
"""
while (byte := self._serial.read()) == _escape:
port = self._serial.read()
if port == _escape:
return _escape
else:
self._port_rx = ord(port)
return byte
def _write(self: object, port: int, data: bytes) -> None:
"""Write to the serial device.
:arg port: Virtual serial port.
:arg data: Data.
"""
if port != self._port_tx:
self._port_tx = port
self._serial.write(_escape + bytes([self._port_tx]))
for byte in [bytes([b]) for b in data]:
if byte == _escape:
self._serial.write(_escape)
self._serial.write(byte)
self._msg(self._port_tx, data, '<--')
def _assert_protocol(protocol: str) -> None:
if protocol != _protocol:
raise IOError('invalid protocol header')
def _assert_version(version: tuple) -> None:
if version[0] != _version[0] or version[1] > _version[1]:
raise IOError(
'version mismatch (device: {}, client: {})'.format(
'.'.join(map(str, version)),
'.'.join(map(str, _version))))
def _hex(data: bytes) -> str:
return ' '.join(list(map(lambda x: '{:02x}'.format(x), data)))
```
#### File: arduino-serial-mux/serial_mux/vserial.py
```python
from os import getpid, openpty, read, ttyname, write
from tty import setcbreak
class VSerial():
"""Virtual serial device."""
def __init__(self: object, mux: object, port: int) -> None:
"""
:arg mux: Serial multiplexer.
:arg port: Virtual serial port.
"""
self._mux = mux
self._port = port
self._master, self._slave = openpty()
setcbreak(self._master)
self.name = ttyname(self._slave)
def receive(self: object, data: bytes) -> None:
"""Receive serial data.
:arg data: Data.
"""
write(self._master, data)
def update(self: object) -> None:
"""Send serial data."""
while True:
self._update()
def _update(self: object) -> None:
"""Send serial data."""
self._mux.send(self._port, read(self._master, 32))
```
#### File: arduino-serial-mux/tests/test_serial_mux.py
```python
from serial_mux import SerialMux
from serial_mux.serial_mux import (
_assert_protocol, _assert_version, _commands, _protocol,
_version)
from _serial import serial_for_url
from serial_mux import serial_mux
serial_mux.serial_for_url = serial_for_url
def test_assert_protocol_pass() -> None:
_assert_protocol(_protocol)
def test_assert_protocol_fail() -> None:
try:
_assert_protocol('')
except IOError as error:
assert str(error) == 'invalid protocol header'
else:
assert False
def test_assert_version_pass() -> None:
_assert_version(_version)
def test_assert_version_fail() -> None:
try:
_assert_version((0, 0, 0))
except IOError as error:
assert str(error).startswith('version mismatch')
else:
assert False
def test_init() -> None:
assert SerialMux('', wait=0)
def test_cmd_fail_1() -> None:
mux = SerialMux('', wait=0)
try:
mux._cmd('protocol', 3)
except IOError as error:
assert str(error) == 'invalid control command response'
else:
assert False
def test_cmd_fail_2() -> None:
mux = SerialMux('', wait=0)
mux._serial.prepare(b'\xff\x00\x00\x00\x00')
try:
mux._cmd('protocol', 3)
except IOError as error:
assert str(error) == 'invalid control command response'
else:
assert False
def test_cmd() -> None:
mux = SerialMux('', wait=0)
for k, v in _commands.items():
mux._serial.prepare(b'\x00')
assert mux._cmd(k, 1) == b'\x00'
assert mux._serial.inspect(1) == v
def test_read() -> None:
mux = SerialMux('', wait=0)
mux._serial.prepare(b'\xff\x00\xff\xff')
assert mux._read() == b'\xff'
def test_write() -> None:
mux = SerialMux('', wait=0)
mux._write(0, b'\xff')
assert mux._serial.inspect(4) == b'\xff\x00\xff\xff'
def test_send() -> None:
mux = SerialMux('', wait=0)
mux.send(0, b'\xff')
assert mux._serial.inspect(4) == b'\xff\x00\xff\xff'
def test_devices() -> None:
mux = SerialMux('', wait=0)
assert len(mux.devices) == 2
def test_send_device1() -> None:
mux = SerialMux('', wait=0)
handle = open(mux.devices[0].name, 'wb')
handle.write(b'\x01\x02\x03')
handle.flush()
mux.devices[0]._update()
assert mux._serial.inspect(5) == b'\xff\x00\x01\x02\x03'
def test_send_device2() -> None:
mux = SerialMux('', wait=0)
handle = open(mux.devices[1].name, 'wb')
handle.write(b'\x01\x02\x03')
handle.flush()
mux.devices[1]._update()
assert mux._serial.inspect(5) == b'\xff\x01\x01\x02\x03'
def test_receive_direct_device1() -> None:
mux = SerialMux('', wait=0)
handle = open(mux.devices[0].name, 'rb')
mux.devices[0].receive(b'\xff')
assert handle.read(1) == b'\xff'
def test_receive_device1() -> None:
mux = SerialMux('', wait=0)
handle = open(mux.devices[0].name, 'rb')
mux._serial.prepare(b'\xff\x00\xff\xff')
mux._update()
assert handle.read(1) == b'\xff'
def test_receive_direct_device2() -> None:
mux = SerialMux('', wait=0)
handle = open(mux.devices[1].name, 'rb')
mux.devices[1].receive(b'\xff')
assert handle.read(1) == b'\xff'
def test_receive_device2() -> None:
mux = SerialMux('', wait=0)
handle = open(mux.devices[1].name, 'rb')
mux._serial.prepare(b'\xff\x01\xff\xff')
mux._update()
assert handle.read(1) == b'\xff'
```
|
{
"source": "jfjlaros/arduino-simple-rpc",
"score": 3
}
|
#### File: arduino-simple-rpc/tests/test_io.py
```python
from io import BytesIO
from typing import Any
from simple_rpc.io import (
_read_basic, _read_bytes_until, _write_basic, cast, read, write)
def _test_invariance_basic(
f_read: callable, f_write: callable, endianness: str, basic_type: str,
data: bytes, value: Any) -> None:
stream = BytesIO(data)
value_ = f_read(stream, endianness, basic_type)
assert value_ == value
stream = BytesIO()
f_write(stream, endianness, basic_type, value)
assert stream.getvalue() == data
def _test_invariance(
f_read: callable, f_write: callable, endianness: str, size_t: str,
obj_def: Any, data: bytes, obj: Any) -> None:
stream = BytesIO(data)
obj_ = f_read(stream, endianness, size_t, obj_def)
assert obj_ == obj
stream = BytesIO()
f_write(stream, endianness, size_t, obj_def, obj)
assert stream.getvalue() == data
def test_cast() -> None:
assert cast('?') == bool
assert cast('c') == bytes
assert cast('s') == bytes
assert cast('f') == float
assert cast('d') == float
assert cast('h') == int
assert cast('i') == int
def test_read_bytes_until() -> None:
stream = BytesIO(b'abcdef\0abc')
assert _read_bytes_until(stream, b'\0') == b'abcdef'
def test_basic_string() -> None:
_test_invariance_basic(
_read_basic, _write_basic, '<', 's', b'abcdef\0', b'abcdef')
def test_basic_int_le() -> None:
_test_invariance_basic(
_read_basic, _write_basic, '<', 'i', b'\2\0\0\0', 2)
def test_basic_int_be() -> None:
_test_invariance_basic(
_read_basic, _write_basic, '>', 'i', b'\0\0\0\2', 2)
def test_list_char() -> None:
_test_invariance(
read, write, '<', 'h', ['c'], b'\3\0a\0c', [b'a', b'\0', b'c'])
def test_list_nibble() -> None:
_test_invariance(
read, write, '<', 'h', ['h'], b'\3\0\1\0\2\0\3\0', [1, 2, 3])
def test_list_list() -> None:
_test_invariance(
read, write, '<', 'h', [['b']], b'\2\0\2\0\0\1\2\0\2\3',
[[0, 1], [2, 3]])
def test_object_char_int() -> None:
_test_invariance(
read, write, '<', 'h', ('c', 'i'), b'a\3\0\0\0', (b'a', 3))
def test_object_nibble_string_char() -> None:
_test_invariance(
read, write, '<', 'h', ('h', 's', 'c'), b'\2\0abcdef\0x',
(2, b'abcdef', b'x'))
def test_object_object() -> None:
_test_invariance(
read, write, '<', 'h', ((('c', ), ), ('c', ), ), b'ab',
(((b'a', ), ), (b'b', )))
def test_list_tuple() -> None:
_test_invariance(
read, write, '<', 'h', ['c', 'c', 'c'], b'\2\0abcabc',
[b'a', b'b', b'c', b'a', b'b', b'c'])
def test_list_object() -> None:
_test_invariance(
read, write, '<', 'h', [('c', 'c', 'c')], b'\2\0abcabc',
[(b'a', b'b', b'c'), (b'a', b'b', b'c')])
def test_list_object_tuple() -> None:
_test_invariance(
read, write, '<', 'h', [('c', 'c'), 'c'], b'\2\0abcabc',
[(b'a', b'b'), b'c', (b'a', b'b'), b'c'])
```
#### File: arduino-simple-rpc/tests/test_protocol.py
```python
from simple_rpc.protocol import (
_add_doc, _parse_signature, _strip_split, _type_name, _parse_type,
parse_line)
def test_parse_type_none() -> None:
assert _parse_type(b'') == ''
def test_parse_type_basic() -> None:
assert _parse_type(b'i') == 'i'
def test_parse_type_tuple() -> None:
try:
_parse_type(b'ic')
except ValueError as error:
assert str(error) == 'top level type can not be tuple'
else:
assert False
def test_parse_type_list_basic() -> None:
assert _parse_type(b'[i]') == ['i']
def test_parse_type_object_basic() -> None:
assert _parse_type(b'(i)') == ('i', )
def test_parse_type_list_tuple() -> None:
assert _parse_type(b'[ic]') == ['i', 'c']
def test_parse_type_list_object() -> None:
assert _parse_type(b'[(ic)]') == [('i', 'c')]
def test_parse_type_list_list() -> None:
assert _parse_type(b'[[i]]') == [['i']]
def test_parse_type_object_tuple() -> None:
assert _parse_type(b'(ic)') == ('i', 'c')
def test_parse_type_object_list() -> None:
assert _parse_type(b'([i])') == (['i'], )
def test_parse_type_object_object() -> None:
assert _parse_type(b'((ic))') == (('i', 'c'), )
def test_parse_type_complex() -> None:
assert _parse_type(b'(((cc)c)i([c]))') == (
(('c', 'c'), 'c'), 'i', (['c'], ), )
def test_type_name_none() -> None:
assert _type_name(None) == ''
def test_type_name_basic() -> None:
assert _type_name('c') == 'bytes'
assert _type_name('i') == 'int'
assert _type_name('?') == 'bool'
assert _type_name('f') == 'float'
def test_type_name_tuple_basic() -> None:
assert _type_name(['i', 'c']) == '[int, bytes]'
assert _type_name(['?', 'f']) == '[bool, float]'
def test_type_name_list_basic() -> None:
assert _type_name([['i']]) == '[[int]]'
def test_type_name_object_basic() -> None:
assert _type_name([('i',)]) == '[(int)]'
def test_type_name_complex() -> None:
assert (_type_name([(('c', 'c'), 'c'), 'i', (['c'], )]) ==
'[((bytes, bytes), bytes), int, ([bytes])]')
def test_parse_signature_basic() -> None:
assert _parse_signature(1, b': c f') == {
'doc': '',
'index': 1,
'name': 'method1',
'parameters': [{
'doc': '', 'fmt': 'c', 'name': 'arg0', 'typename': 'bytes'}, {
'doc': '', 'fmt': 'f', 'name': 'arg1', 'typename': 'float'}],
'return': {'doc': '', 'fmt': '', 'typename': ''}}
def test_parse_signature_complex() -> None:
assert _parse_signature(2, b'(ff): [c] (cf)') == {
'doc': '',
'index': 2,
'name': 'method2',
'parameters': [{
'doc': '', 'fmt': ['c'], 'name': 'arg0',
'typename': '[bytes]'}, {
'doc': '', 'fmt': ('c', 'f'), 'name': 'arg1',
'typename': '(bytes, float)'}],
'return': {'doc': '', 'fmt': ('f', 'f'),
'typename': '(float, float)'}}
def test_split_strip() -> None:
assert _strip_split(' p1 : Param 1. ', ':') == ['p1', 'Param 1.']
assert _strip_split('p1:Param 1.', ':') == ['p1', 'Param 1.']
def test_add_doc_basic() -> None:
method = _parse_signature(1, b'i: c f')
_add_doc(method, b'name: Test. @p1: Char. @p2: Float. @return: Int.')
assert method['name'] == 'name'
assert method['doc'] == 'Test.'
assert method['parameters'][0]['name'] == 'p1'
assert method['parameters'][0]['doc'] == 'Char.'
assert method['parameters'][1]['name'] == 'p2'
assert method['parameters'][1]['doc'] == 'Float.'
assert method['return']['doc'] == 'Int.'
def test_add_doc_missing_name() -> None:
method = _parse_signature(1, b': c f')
_add_doc(method, b'@p1: Char. @p2: Float.')
assert method['name'] == 'method1'
assert method['doc'] == ''
assert method['parameters'][0]['name'] == 'arg0'
def test_add_doc_missing_parameter() -> None:
method = _parse_signature(1, b': c f')
_add_doc(method, b'name: Test. @p1: Char')
assert method['name'] == 'name'
assert method['parameters'][0]['name'] == 'p1'
assert method['parameters'][1]['name'] == 'arg1'
def test_parse_line() -> None:
method = parse_line(
1, b'i: c f;name: Test. @p1: Char. @p2: Float. @return: Int.')
assert method['index'] == 1
assert method['name'] == 'name'
```
|
{
"source": "jfjlaros/barcode",
"score": 3
}
|
#### File: barcode/barcode/barcode.py
```python
import Levenshtein
_nucleotides = ['A', 'C', 'G', 'T']
def _all_words(bucket, word, length, result):
"""Generate all possible words of a certain length over a specified
alphabet.
:arg list bucket: An alphabet.
:arg str word: A word over the alphabet {bucket}.
:arg int length: Lenth of the barcodes.
:arg list result: Constructed words.
"""
if length:
for i in bucket:
_all_words(bucket, word + i, length - 1, result)
else:
result.append(word)
def _filter_stretch(barcode, stretches):
"""Test whether {barcode} contains none of the stretches in {stretches}.
:arg str barcode: A barcode.
:arg list stretches:
:returns bool: True if the barcode is clean, False otherwise.
"""
for i in stretches:
if i in barcode:
return False
return True
def _filter_distance(barcodes, candidate, min_dist, distance):
"""Test whether {candidate} can be added to {barcodes} based on the minimum
distance between {candidate} and all barcodes in {barcodes}.
:arg list barcodes: List of barcodes.
:arg str candidate: Candidate barcode.
:arg int min_dist: Minimum distance between the barcodes.
:arg function distance: Distance function.
:returns bool: True if the barcode is clean, False otherwise.
"""
for i in barcodes:
if distance(i, candidate) < min_dist:
return False
return True
def all_barcodes(length):
"""Generate all possible barcodes of a certain length.
:arg int length: Lenth of the barcodes.
:returns list: List of barcodes.
"""
result = []
_all_words(_nucleotides, '', length, result)
return result
def filter_stretches(barcodes, max_stretch):
"""Filter a list of barcodes for mononucleotide stretches.
:arg list barcodes: List of barcodes.
:arg int max_stretch: Maximum mononucleotide stretch length.
:returns list: List of barcodes filtered for mononucleotide stretches.
"""
stretches = list(map(lambda x: (max_stretch + 1) * x, _nucleotides))
result = []
for i in barcodes:
if _filter_stretch(i, stretches):
result.append(i)
return result
def filter_distance(barcodes, min_dist, distance=Levenshtein.distance):
"""Filter a list of barcodes for distance to other barcodes.
:arg list barcodes: List of barcodes.
:arg int min_dist: Minimum distance between the barcodes.
:arg function distance: Distance function.
:returns list: List of barcodes filtered for distance to other
barcodes.
"""
result = []
for i in barcodes:
if _filter_distance(result, i, min_dist, distance):
result.append(i)
return result
```
|
{
"source": "jfjlaros/bin-parser",
"score": 3
}
|
#### File: bin-parser/tests/test_functions.py
```python
from bin_parser import functions
class TestParser(object):
"""Test the bin_parser.functions module."""
def setup(self):
self._brf = functions.BinReadFunctions()
self._bwf = functions.BinWriteFunctions()
def _idem(self, func, data, **kwargs):
"""General idempotency test.
This test decodes `data`, encodes the result and checks whether this
equals `data`.
:arg function func: Function to be tested.
:arg any data: Data for `func`.
:arg dict **kwargs: Arguments for `func`.
"""
assert getattr(self._bwf, func)(
getattr(self._brf, func)(data, **kwargs), **kwargs) == data
def test_raw_single(self):
assert self._brf.raw(b'\x03') == '03'
def test_raw_multi(self):
assert self._brf.raw(b'\x00\x01\x02') == '00 01 02'
def test_raw_idem(self):
self._idem('raw', b'\x00\x01\x02')
def test_bit(self):
assert self._brf.bit(b'\x03') == '00000011'
def test_bit_idem(self):
self._idem('bit', b'\x03')
def test_le_s_short(self):
assert self._brf.struct(b'\x01\x00', fmt='<h') == 1
def test_be_s_short(self):
assert self._brf.struct(b'\x00\x01', fmt='>h') == 1
def test_le_s_short_idem(self):
self._idem('struct', b'\x01\x02', fmt='<h')
def test_be_s_float_idem(self):
self._idem('struct', b'\x01\x02\x03\x04', fmt='>f')
def test_labels(self):
assert self._brf.struct(
b'\x01\x02', fmt='BB', labels=['a', 'b']) == {'a': 1, 'b': 2}
def test_labels_idem(self):
self._idem('struct', b'\x01\x02', fmt='BB', labels=['a', 'b'])
def test_annotation(self):
assert self._brf.struct(
b'\x01\x02', fmt='BB', annotation={1: 'x'}) == ['x', 2]
def test_annotation_idem(self):
self._idem('struct', b'\x01\x02', fmt='BB', annotation={1: 'a'})
def test_labels_annotation(self):
assert self._brf.struct(
b'\x01\x02', fmt='BB', labels=['a', 'b'],
annotation={1: 'x'}) == {'a': 'x', 'b': 2}
def test_labels_annotation_idem(self):
self._idem(
'struct', b'\x01\x02', fmt='BB', labels=['a', 'b'],
annotation={1: 'x'})
def test_flags(self):
assert self._brf.flags(b'\x03', {}) == {
'flag_01': True, 'flag_02': True}
def test_flags_false(self):
assert self._bwf.flags(
{'flag_01': True, 'flag_02': False}, {}) == b'\x01'
def test_flags_idem(self):
self._idem('flags', b'\x03', annotation={})
def test_flags_annotation(self):
assert self._brf.flags(b'\x03', {2: 'a'}) == {
'flag_01': True, 'a': True}
def test_flags_annotation_false(self):
assert self._brf.flags(b'\x01', {2: 'a'}) == {
'flag_01': True, 'a': False}
def test_flags_annotation_idem(self):
self._idem('flags', b'\x03', annotation={2: 'a'})
```
|
{
"source": "jfjlaros/fake-open",
"score": 2
}
|
#### File: fake-open/fake_open/__init__.py
```python
from .fake_open import FakeOpen, md5_check, make_fake_file
__version_info__ = ('0', '0', '2')
__version__ = '.'.join(__version_info__)
__author__ = 'LUMC, <NAME>'
__contact__ = '<EMAIL>'
__homepage__ = 'https://git.lumc.nl/j.f.j.laros/fake-open'
usage = __doc__.split('\n\n\n')
def doc_split(func):
return func.__doc__.split('\n\n')[0]
def version(name):
return '{} version {}\n\nAuthor : {} <{}>\nHomepage : {}'.format(
name, __version__, __author__, __contact__, __homepage__)
```
|
{
"source": "jfjlaros/fastools",
"score": 3
}
|
#### File: fastools/fastools/peeker.py
```python
from io import BytesIO, TextIOWrapper
from os import SEEK_END
class Peeker(object):
def __init__(self, handle):
self._buf = BytesIO()
self._handle = handle
self.name = handle.name
def _append_to_buf(self, data):
position = self._buf.tell()
self._buf.seek(0, SEEK_END)
self._buf.write(data)
self._buf.seek(position)
def peek(self, size):
data = self._handle.read(size)
self._append_to_buf(data)
return data
def read(self, size=None):
if size is None:
return self._buf.read() + self._handle.read()
data = self._buf.read(size)
if len(data) < size:
data += self._handle.read(size - len(data))
return data
def readline(self):
line = self._buf.readline()
if not line.endswith('\n'):
line += self.handle.readline()
return line
class Pkr(TextIOWrapper):
def __init__(self, *args, **kwargs):
TextIOWrapper.__init__(self, *args, **kwargs)
self._buf = BytesIO()
def _append_to_buf(self, data):
position = self._buf.tell()
self._buf.seek(0, SEEK_END)
self._buf.write(data)
self._buf.seek(position)
def peek(self, size):
data = self.buffer.read(size)
self._append_to_buf(data)
return data
def read(self, size=None):
if size is None:
return self._buf.read() + self.buffer.read()
data = self._buf.read(size)
if len(data) < size:
data += self.buffer.read(size - len(data))
return data
def readline(self):
line = self._buf.readline()
if not line.endswith('\n'):
line += self.buffer.readline()
return line
```
#### File: fastools/tests/test_cli.py
```python
from io import StringIO
from Bio import SeqIO
from fastools import cli as fastools
from shared import md5_check
class TestCLI(object):
def setup(self):
self._sanitised_fa = open('data/sanitised.fa')
self._sanitised_fq = open('data/sanitised.fq')
self._length_fa = open('data/length.fa')
self._output = StringIO()
self._null = open('/dev/null', 'w')
def _md5_check(self, md5sum):
return md5_check(self._output.getvalue(), md5sum)
def test_add(self):
fastools.add(self._sanitised_fq, self._output, 'ACGT', 31)
assert self._md5_check('a9931faafa17c50b7f9737dac07ca95d')
def test_aln(self):
assert fastools.aln(
[self._sanitised_fa, open('data/sanitised_ed.fa')])[0][2] == 2
def test_cat(self):
assert next(fastools.cat(self._sanitised_fa)).startswith('TGAGCGGAAC')
def test_collapse(self):
fastools.collapse(self._sanitised_fa, self._output, 2)
assert self._md5_check('d2b7778038d8b61beeca74f0a3306e35')
def test_csv2fa2_1(self):
fastools.csv2fa2(
open('data/primers.csv'), [self._output, self._null], True)
assert self._md5_check('58b4f8a3832fdbbe7c91a8547c39c472')
def test_csv2fa2_2(self):
fastools.csv2fa2(
open('data/primers.csv'), [self._null, self._output], True)
assert self._md5_check('33259a9357b75e7baca2909bfc254e61')
def test_descr(self):
assert next(fastools.descr(self._sanitised_fa)) == 'sanitised'
def test_dna2rna(self):
fastools.dna2rna(self._sanitised_fa, self._output)
assert self._md5_check('df76f76e77d5d5f785837c91a73f1a72')
def test_edit(self):
fastools.edit(self._sanitised_fa, open('data/edits.fa'), self._output)
assert self._md5_check('b865c2069b8900df35d7733abd8c39e0')
def test_fa2fq(self):
fastools.fa2fq(self._sanitised_fa, self._output, 30)
assert self._md5_check('75d8abea91a1ec4d24c0e10235a28a7f')
def test_fa2gb(self):
fastools.fa2gb(self._sanitised_fa, self._output, 'NM_000000.0')
assert self._md5_check('a58ca021a538f76737fe641209451f09')
def test_famotif2bed(self):
fastools.famotif2bed(self._sanitised_fa, self._output, 'AC')
assert self._md5_check('d2b0dec731be890350bca49357d753f4')
def test_fq2fa(self):
fastools.fq2fa(self._sanitised_fq, self._output)
assert self._md5_check('33d5334b5e210f681f5a23b9c03806be')
def test_gb2fa(self):
fastools.gb2fa(open('data/sanitised.gb'), self._output)
assert self._md5_check('3c44cba3e9269ca729a6cd6292e4c05e')
def test_gen(self):
fastools.gen(10, self._output, 'name', 'description')
self._output.seek(0)
record = next(SeqIO.parse(self._output, 'fasta'))
assert (
len(record.seq) == 10 and record.description == 'name description')
def test_get(self):
pass # FIXME.
def test_length(self):
assert fastools.length(self._length_fa) == [1, 2, 3]
def test_lenfilt_1(self):
fastools.lenfilt(
self._length_fa, [self._output, self._null], 2)
assert self._md5_check('df04208746b395c26bee2589798ed084')
def test_lenfilt_2(self):
fastools.lenfilt(
self._length_fa, [self._null, self._output], 2)
assert self._md5_check('329a7a950ee7f48e65f784900158d0f8')
def test_list_enzymes(self):
assert 'EcoRI' in fastools.list_enzymes()
def test_maln(self):
pass # FIXME.
def test_mangle(self):
fastools.mangle(self._sanitised_fa, self._output)
assert self._md5_check('b427c19cf161cf23256cd76a044977d0')
def test_merge(self):
fastools.merge(
[self._sanitised_fa, open('data/sanitised.fa')], self._output, 3)
assert self._md5_check('075b6720e3bd94f777fbb2b7ffa25ada')
def test_raw2fa(self):
fastools.raw2fa(
StringIO('ACGT'), self._output, 'name', 'description')
assert self._md5_check('6361fecba38154e9f3563d13c521154d')
def test_restrict(self):
assert fastools.restrict(
self._sanitised_fa, ['BssMI', 'AgsI']) == [11, 16, 74]
def test_reverse(self):
fastools.reverse(self._sanitised_fa, self._output)
assert self._md5_check('89a3e1c61aecfff9f15a6702661d7170')
def test_rna2dna(self):
fastools.rna2dna(open('data/sanitised_rna.fa'), self._output)
assert self._md5_check('33d5334b5e210f681f5a23b9c03806be')
def test_rselect(self):
fastools.rselect(self._sanitised_fa, self._output, 'sanitised', 5, 10)
assert self._md5_check('8a3a5bfd1de1c054e6274aa6cfcf93b0')
def test_s2i(self):
fastools.s2i(self._sanitised_fq, self._output)
assert self._md5_check('aa58abde3d3042a1eee9ac5f4064f29b')
def test_sanitise(self):
fastools.sanitise(open('data/unsanitised.fa'), self._output)
assert self._md5_check('95c6eb5c9ae6949bbc4b4153cf976de4')
def test_select(self):
fastools.select(self._sanitised_fa, self._output, 5, 10)
assert self._md5_check('8a3a5bfd1de1c054e6274aa6cfcf93b0')
def test_splitseq_1(self):
fastools.splitseq(
self._length_fa, [self._output, self._null], 'C')
assert self._md5_check('df04208746b395c26bee2589798ed084')
def test_splitseq_2(self):
fastools.splitseq(
self._length_fa, [self._null, self._output], 'C')
assert self._md5_check('329a7a950ee7f48e65f784900158d0f8')
def test_tagcount(self):
assert fastools.tagcount(self._length_fa, 'ACG', 1) == 2
```
#### File: fastools/tests/test_split_fasta.py
```python
from fastools import split_fasta
from shared import FakeOpen, md5_check
class TestSplitFasta(object):
def setup(self):
fake_open = FakeOpen()
self._handles = fake_open.handles
split_fasta.open = fake_open.open
self._input = open('data/split_fasta.fa')
self._library = open('data/library.csv')
self._output = fake_open.open('stdout')
def _md5_check(self, name, md5sum):
return md5_check(self._handles[name].getvalue(), md5sum)
def test_split(self):
split_fasta.split_fasta(self._input, self._library, self._output)
assert len(self._handles) == 7
assert self._md5_check('stdout', 'e1c685ef32bc0e5eff44b4471d428c62')
#assert self._md5_check(
# 'one_counted.txt', '32989fa6c7577611c81d18479712589d')
assert self._md5_check(
'Unrecognised.txt', '7950997464325678f3f8c1f87d6511ec')
```
|
{
"source": "jfjlaros/gene-disease-pubmed",
"score": 2
}
|
#### File: gene-disease-pubmed/gene_disease_pubmed/gene_disease_pubmed.py
```python
import argparse
import collections
import json
import re
import sys
import restkit
from Bio import Entrez
from . import ProtectedFileType, usage, version
def _get_key(record, keys):
"""
:arg object record:
:arg tuple keys:
:returns object:
"""
if record.has_key(keys[0]):
if len(keys) == 1:
return record[keys[0]]
return _get_key(record[keys[0]], keys[1:])
return None
def hgnc_genes(query='status:Approved'):
"""
:arg str query:
:returns object:
"""
hgnc_resource = 'http://rest.genenames.org/search/'
resource = restkit.Resource(hgnc_resource)
response = resource.get(query, headers={'Accept': 'application/json'})
return json.load(response.body_stream())
def abstract_walker(search_results, batch_size=1000):
"""
:arg object search_results:
:arg int batch_size:
:returns object:
"""
for start in range(0, int(search_results['Count']), batch_size):
fetch_handle = Entrez.efetch(
db='pubmed', rettype='medline', retmode='xml', retstart=start,
retmax=batch_size, webenv=search_results['WebEnv'],
query_key=search_results['QueryKey'])
records = Entrez.read(fetch_handle)['PubmedArticle']
fetch_handle.close()
for record in records:
body = _get_key(record, ('MedlineCitation', 'Article'))
if body:
title = _get_key(body, ('ArticleTitle', ))
text = _get_key(body, ('Abstract', 'AbstractText'))
pmid = '0'
for i in _get_key(record, ('PubmedData', 'ArticleIdList')):
if i.attributes['IdType'] == 'pubmed':
pmid = str(i)
if text:
yield title, text[0], pmid
else:
yield title, '', pmid
def search_abstracts(search_terms, reldate):
"""
:arg list search_terms:
:arg int reldate:
:returns object:
"""
query = '"{}"'.format('" OR "'.join(search_terms))
return Entrez.read(Entrez.esearch(
db='pubmed', term=query, reldate=reldate, datetype='pdat',
usehistory='y'))
def gene_disease_pubmed(
input_handle, output_handle, log_handle, email, reldate=730,
progress_indicator=100):
"""
:arg stream input_handle:
:arg stream output_handle:
:arg stream log_handle:
:arg str email:
:arg int reldate:
:arg int progress_indicator:
"""
Entrez.email = email
trim_pattern = '[\W_]+'
log_handle.write('Retrieving gene list ... ')
log_handle.flush()
genes = set(map(lambda x: x['symbol'], hgnc_genes()['response']['docs']))
log_handle.write('found {}.\n'.format(len(genes)))
log_handle.write('Searching abstracts ... ')
log_handle.flush()
search_terms = map(lambda x: x.strip(), input_handle.readlines())
search_results = search_abstracts(search_terms, reldate)
log_handle.write('found {}.\n'.format(search_results['Count']))
walker = abstract_walker(search_results)
log_handle.write('Starting analysis ({} abstracts per dot) '.format(
progress_indicator))
log_handle.flush()
indicator = 0
hits = collections.defaultdict(lambda: [0, []])
trim_prefix = re.compile('^{}'.format(trim_pattern))
trim_suffix = re.compile('{}$'.format(trim_pattern))
for abstract in walker:
if not indicator % progress_indicator:
log_handle.write('.')
log_handle.flush()
indicator += 1
title = abstract[0].split()
text = abstract[1].split()
pmid = abstract[2]
total_text = set(map(lambda x: trim_prefix.sub(
'', trim_suffix.sub('', x)), set(title + text)))
for gene in genes & total_text:
hits[gene][0] += 1
hits[gene][1].append(pmid)
for hit in hits:
output_handle.write('{}\t{}\t{}\n'.format(
hit, hits[hit][0], ' '.join(hits[hit][1])))
log_handle.write(' done.\n')
def main():
"""Main entry point."""
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description=usage[0], epilog=usage[1])
parser.add_argument('-v', action='version', version=version(parser.prog))
parser.add_argument(
'input_handle', metavar='INPUT', type=argparse.FileType('r'),
help='file containing a list of queries')
parser.add_argument(
'output_handle', metavar='OUTPUT', type=ProtectedFileType('w'),
help='output file')
parser.add_argument(
'email', metavar='EMAIL', type=str, help='email address (%(type)s)')
parser.add_argument(
'-o', dest='log_handle', default=sys.stdout,
type=argparse.FileType('w'), help='log file (default=<stdout>)')
parser.add_argument(
'-d', dest='reldate', type=int, default=730,
help='history window in days (%(type)s default=%(default)s)')
try:
arguments = parser.parse_args()
except IOError, error:
parser.error(error)
try:
gene_disease_pubmed(
arguments.input_handle, arguments.output_handle,
arguments.log_handle, arguments.email, arguments.reldate)
except ValueError, error:
parser.error(error)
if __name__ == '__main__':
main()
```
|
{
"source": "jfjlaros/hamilton",
"score": 4
}
|
#### File: hamilton/hamilton/hamilton.py
```python
from sys import stdout
import math
class Hamilton(object):
"""Find a Hamiltonian path or cycle in a graph that is induced by a
rectangular board and a list of moves.
"""
def __init__(
self, moves, x_size, y_size, x_start, y_start, closed=False,
max_retries=0):
"""
:arg list moves: Definition of allowed moves.
:arg int x_size: Height of the board.
:arg int y_size: Width of the board.
:arg int x: x-coordinate.
:arg int y: y-coordinate.
:arg bool closed: Find a closed path.
:arg int max_reties: Stop after a given number of retries (0=disabled).
"""
self._moves = moves
self._x_size = x_size
self._y_size = y_size
self._max_retries = max_retries
self._max_depth = self._x_size * self._y_size
self._decimals = int(math.log(self._max_depth, 10) + 1)
self.reset(x_start, y_start, closed)
def __str__(self):
return '\n'.join(
map(lambda r: ''.join(
map(lambda e: ' {{:{}d}}'.format(self._decimals).format(e), r)),
self.board)) + '\n'
def _push(self, moves):
self._stack.append([len(moves), moves])
def _pop(self):
return self._stack.pop()[1]
def _current(self):
return self._stack[-1][1][self._stack[-1][0]]
def _next(self):
if not self._stack[-1][0]:
return ()
self._stack[-1][0] -= 1
return self._current()
def _valid_moves(self, x, y):
"""Determine all valid moves, given a position.
:arg int x: x-coordinate.
:arg int y: y-coordinate.
:returns list: List of valid moves.
"""
moves = []
for move in self._moves:
_x = x + move[0]
_y = y + move[1]
if (
_x >= 0 and _x < self._x_size and
_y >= 0 and _y < self._y_size and self.board[_x][_y] < 1):
moves.append((_x, _y))
return moves
def _update(self, moves, amount):
"""Update accessibility of a list of moves.
:arg list moves: List of moves.
:arg int amount: Increase or decrease accessibility (1 or -1).
"""
for move in moves:
self.board[move[0]][move[1]] += amount
def _prioritise(self, moves):
"""Prioritise a list of moves based on accessibility.
:arg list moves: List of moves.
:returns list: List of moves sorted by accessibility.
"""
if not moves:
return []
weights = map(lambda x: self.board[x[0]][x[1]], moves)
return list(zip(*sorted(zip(weights, moves))))[1]
def _solve_recursive(self, x, y, depth=1):
"""
:arg int x: x-coordinate.
:arg int y: y-coordinate.
:arg int depth: Move number.
:returns bool: True for success, False for failure.
"""
# Making any field inaccessible would be a better check.
if self._closed and not self._valid_moves(
self._x_start, self._y_start):
return False
self.board[x][y] = depth
if depth == self._max_depth:
return True
moves = self._valid_moves(x, y)
self._update(moves, 1)
for move in self._prioritise(moves)[::-1]:
if self._solve_recursive(move[0], move[1], depth + 1):
return True
self.retries += 1
self.board[x][y] = -len(moves)
self._update(moves, -1)
return False
def reset(self, x, y, closed=False):
"""Initialise the board and set the parameters for the path finding.
:arg int x: x-coordinate.
:arg int y: y-coordinate.
:arg bool closed: Find a closed path.
"""
self.board = [[0] * self._y_size for _ in range(self._x_size)]
for i in range(self._x_size):
for j in range(self._y_size):
self.board[i][j] = -len(self._valid_moves(i, j))
self._x_start = x
self._y_start = y
self._closed = closed
self._stack = []
self.retries = 0
def solve_recursive(self):
"""Find a Hamiltonian path or cycle.
:returns bool: True for success, False for failure.
"""
return self._solve_recursive(self._x_start, self._y_start)
def _stop_condition(self, move):
if not move:
return False
if self._closed:
if not self._valid_moves(self._x_start, self._y_start):
return False
priorities = list(map(
lambda x: len(self._valid_moves(x[0], x[1])), self._stack[-1][1]))
if len(priorities) > 1:
# If one of the squares is unreachable, there is no point in
# exploring other possibilities.
if priorities[-1] == 0:
return False
if self._closed:
# If we see two squares that are accessible from one
# position, either one of those squares is an end point.
if priorities[-2] < 2:
#stdout.write('\n{}'.format(self))
return False
else:
# If we see three squares that are accessible from one
# position, either two of those squares is an end point. Since
# we can only have one end point, we can stop here.
if len(priorities) > 2 and priorities[-3] < 2:
return False
return True
def solve(self):
"""Find a Hamiltonian path or cycle.
:returns bool: True for success, False for failure.
"""
depth = 1
self.board[self._x_start][self._y_start] = depth
moves = self._prioritise(
self._valid_moves(self._x_start, self._y_start))
self._update(moves, 1)
self._push(moves)
while True:
#stdout.write('{}'.format(depth))
move = self._next()
if self._stop_condition(move):
depth += 1
self.board[move[0]][move[1]] = depth
if depth == self._max_depth:
return True
moves = self._prioritise(self._valid_moves(move[0], move[1]))
self._update(moves, 1)
self._push(moves)
else:
if self._max_retries and self.retries >= self._max_retries:
return False
self.retries += 1
moves = self._pop()
if not self._stack:
return False
undo = self._current()
self.board[undo[0]][undo[1]] = -len(moves)
self._update(moves, -1)
depth -= 1
```
|
{
"source": "jfjlaros/jit-open",
"score": 3
}
|
#### File: jit-open/tests/test_library.py
```python
from fake_open import FakeOpen
from jit_open import jit_open, Handle, Queue
class TestLibrary(object):
def setup(self):
fake_open = FakeOpen()
self._handles = fake_open.handles
self._open = fake_open.open
self._queue = Queue()
def test_unused(self):
handle = Handle('test.txt', self._queue, f_open=self._open)
assert 'test.txt' not in self._handles
def test_used_1(self):
handle = Handle('test.txt', self._queue, f_open=self._open)
handle.write('line 1\n')
handle.close()
assert self._handles['test.txt'].getvalue() == 'line 1\n'
def test_used_2(self):
handle = Handle('test.txt', self._queue, f_open=self._open)
handle.write('line 1\n')
handle.write('line 2\n')
handle.close()
assert self._handles['test.txt'].getvalue() == 'line 1\nline 2\n'
```
|
{
"source": "jfjlaros/max7301",
"score": 3
}
|
#### File: max7301/host/host.py
```python
import serial
import struct
import sys
import time
OUTPUT = 0x01
INPUT = 0x02
INPUT_PULLUP = 0x03
LOW = 0x00
HIGH = 0x01
class MAX7301(object):
"""Simple serial interface for the MAX7301."""
def __init__(self):
"""Initialise the interface."""
self._connection = serial.Serial('/dev/ttyACM0')
self._commands = [
'enable', 'disable', 'pinmode', 'get_pinmode', 'enable_td',
'disable_td', 'conf_td', 'dig_read', 'dig_write', 'dig_read_range',
'dig_write_range', 'read', 'write', 'check_int']
self._byte_commands = [
'get_pinmode', 'dig_read', 'dig_read_range', 'read', 'check_int']
def cmd(self, command, *args):
"""Send a command to the serial connection.
:arg str command: MAX7301 command.
:arg list *args: Parameters for {command}.
:arg bool result: When True, read one byte from the serial connection.
:returns any: Either the result of {command} or None.
"""
self._connection.write(struct.pack(
'BBB', self._commands.index(command),
*(args + (0,) * (2 - len(args)))))
if command in self._byte_commands:
return struct.unpack('B', self._connection.read(1))[0]
return None
def pulse(self, pin, up, down):
"""Send a short pulse to a pin.
:arg int pin: Pin number.
:arg float up: Pin high duration.
:arg float down: Pin low duration.
"""
self.cmd('dig_write', pin, HIGH)
time.sleep(up)
self.cmd('dig_write', pin, LOW)
time.sleep(down)
def dump(self):
"""Dump all registers of the MAX7301 to standard output."""
for address in range(0x60):
data = self.cmd('read', address)
sys.stdout.write(
'0x{:02x}: 0x{:02x} 0b{:08b}\n'.format(address, data, data))
def main():
max7301 = MAX7301()
max7301.cmd('enable')
max7301.cmd('pinmode', 12, INPUT_PULLUP)
max7301.cmd('pinmode', 22, OUTPUT)
max7301.cmd('pinmode', 31, OUTPUT)
max7301.cmd('pinmode', 24, INPUT_PULLUP)
max7301.cmd('conf_td', 24, True)
max7301.cmd('enable_td')
max7301.cmd('dig_write', 22, LOW)
while True:
if not max7301.cmd('dig_read', 12):
max7301.pulse(22, 0.01, 0.05)
if max7301.cmd('check_int'):
for _ in range(3):
max7301.pulse(22, 0.01, 0.05)
max7301.cmd('enable_td')
time.sleep(1.0)
if __name__ == '__main__':
main()
```
|
{
"source": "jfjlaros/memoise",
"score": 4
}
|
#### File: memoise/examples/benchmark.py
```python
import time
from memoise import Cache
def fib(n):
if n < 2:
return n
return fib(n - 1) + fib(n - 2)
@Cache()
def fib_c(n):
if n < 2:
return n
return fib_c(n - 1) + fib_c(n - 2)
def benchmark(n):
start = time.time()
result = fib(n)
print "fib({}) took {} seconds with result {}".format(
n, time.time() - start, result)
start = time.time()
result = fib_c(n)
print "fib_c({}) took {} seconds with result {}".format(
n, time.time() - start, result)
def main():
benchmark(33)
if __name__ == '__main__':
main()
```
#### File: memoise/memoise/memoise.py
```python
import hashlib
import pylibmc
def _get_params(func, *args, **kwargs):
"""Turn an argument list into a dictionary.
:arg function func: A function.
:arg list *args: Positional arguments of `func`.
:arg dict **kwargs: Keyword arguments of `func`.
:returns dict: Dictionary representation of `args` and `kwargs`.
"""
params = dict(zip(func.func_code.co_varnames[:len(args)], args))
if func.func_defaults:
params.update(dict(zip(
func.func_code.co_varnames[-len(func.func_defaults):],
func.func_defaults)))
params.update(kwargs)
return params
class Cache(object):
"""Memoisation decorator.
"""
host = '127.0.0.1'
port = '11211'
def __init__(self, timeout=86400, ignore=[], key=''):
"""Constructor.
:arg int timeout: Timeout for used entries.
:arg list ignore: List of parameter positions and keywords to ignore.
:arg str key: Prefix for generating the key.
"""
self.cache = pylibmc.Client(['{}:{}'.format(self.host, self.port)])
self.timeout = timeout
self.ignore = ignore
self.key = key
def __call__(self, func):
"""Entry point.
:arg function func: A function.
"""
def wrapper(*args, **kwargs):
"""Wrapper function that does cache administration.
"""
params = _get_params(func, *args, **kwargs)
key_data = [self.key, func.__module__, func.func_name]
for param, value in sorted(params.items()):
key_data += [type(value).__name__, param]
if param not in self.ignore:
key_data.append(value)
key = hashlib.md5('__'.join(map(str, key_data))).hexdigest()
result = self.cache.get(key)
if not result:
result = func(*args, **kwargs)
self.cache.add(key, result, time=self.timeout)
return result
return wrapper
def flush(self):
self.cache.flush_all()
```
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.