filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_0_24334 | """
This file contains all GUI design and control
"""
import random
import tkinter as tk
from tkinter import ttk
import matplotlib.pyplot as plt
from matplotlib import *
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
import bookcheckout as bc
import booklist as bl
import bookreturn as br
import booksearch as bs
import database as db
#-------------------------< program preparations />-----------------------------
# create window instance
window = tk.Tk()
# full screen window
window.attributes("-fullscreen", True)
# to rename the title of the window
window.title("Library Management System")
# make the three root frames public so they can be accessed by all functions globally
# just generate a blank prototype for now
main_page = tk.Frame(window, background = "#ffffff")
search_result_page = tk.Frame(window, background = "#ffffff")
checkout_page = tk.Frame(window, background = "#ffffff")
detail_page = tk.Frame(window, background = "#ffffff")
# books prepared to be check out
checkout_list = []
# make book_name global so the search result page can access it
book_name = tk.StringVar()
#----------------------------< main page />-------------------------------------
def create_main_page():
"""
function used to create the main page that has two search box for book
searching and book returning respectively
"""
# access three global root pages
global main_page
global search_result_page
global checkout_page
global detail_page
# main page
main_page = tk.Frame(
window,
background = "#ffffff")
# generate the big green header
create_library_management_system_header(main_page)
main_page_frame = tk.Frame(
main_page,
bg = "#ffffff",
height = 10000)
book_search_prompt = tk.Label(
main_page,
text = "Enter Book Title or ID",
font = ('Helvetica', 40),
background = "#ffffff"
).place_configure(x = 200, y = 280)
book_name_search_box = tk.Entry(
main_page,
textvariable = book_name,
width = 35,
font = ('Helvetica', 32),
background = "#eeeeee",
bd=0, borderwidth=7,
relief=tk.FLAT)
add_hover_effect_to_widget(
book_name_search_box,
'#f5f5f5',
'#eeeeee')
book_name_search_box.place_configure(
x = 200,
y = 350)
def search_name_button_pressed():
create_search_result_page(window)
main_page.destroy()
checkout_page.destroy()
search_name_button = tk.Button(
main_page,
width = 8,
text = "Search",
font = ('Helvetica', 26),
bd=0,
background = "#0088ee",
foreground = "white",
command = search_name_button_pressed)
add_hover_effect_to_widget(
search_name_button,
'#33aaee',
'#0088ee')
search_name_button.place_configure(
x = 1080,
y = 350)
return_ID_prompt = tk.Label(
main_page,
text = "Return Book ID",
font = ('Helvetica', 40),
background = "#ffffff")
return_ID_prompt.place_configure(
x = 200,
y = 480)
return_book_ID = tk.StringVar()
reutrn_book_id_search_box = tk.Entry(
main_page,
textvariable = return_book_ID,
width = 35,
font = ('Helvetica', 32),
background = "#eeeeee",
bd=0,
borderwidth=7,
relief=tk.FLAT)
add_hover_effect_to_widget(
reutrn_book_id_search_box,
'#f5f5f5',
'#eeeeee')
reutrn_book_id_search_box.place_configure(
x = 200,
y = 550)
def return_book_button_pressed():
return_result = br.go(return_book_ID.get())
if return_result == 0:
return_book_error_text.set("Return Complete")
return_book_error_display.config(foreground = "#55aa55")
elif return_result == 1:
return_book_error_text.set("This book is not borrowed")
return_book_error_display.config(foreground = "#ff0000")
elif return_result == 2:
return_book_error_text.set("Book not found")
return_book_error_display.config(foreground = "#ff0000")
else:
return_book_error_text.set("Unknown error")
return_book_error_display.config(foreground = "#ff0000")
return_book_button = tk.Button(
main_page,
width = 8,
text = "Confirm",
font = ('Helvetica', 26),
bd=0,
background = "#0088ee",
foreground = "white",
command = return_book_button_pressed)
add_hover_effect_to_widget(
return_book_button,
'#33aaee',
'#0088ee')
return_book_button.place_configure(
x = 1080,
y = 550)
return_book_error_text = tk.StringVar()
return_book_error_display = tk.Label(
main_page,
textvariable = return_book_error_text,
font = ('Helvetica', 20),
background = "#ffffff")
return_book_error_display.place_configure(
x = 230,
y = 650)
exit_button = tk.Button(
main_page,
width = 8,
text = "EXIT",
font = ('Helvetica', 26),
bd=0,
background = "#ee0000",
foreground = "white",
command = quit)
add_hover_effect_to_widget(
exit_button,
'#ee3333',
'#ee0000')
exit_button.place_configure(
x = 1080,
y = 650)
def checkout_button_pressed():
create_checkout_page(window)
main_page.destroy()
search_result_page.destroy()
checkout_list_button = tk.Button(
main_page,
width = 9,
text = "Cart(" + str(len(checkout_list)) + ") >",
font = ('Helvetica', 26),
bd=0,
background = "#55aa55",
foreground = "white",
command = checkout_button_pressed)
add_hover_effect_to_widget(
checkout_list_button,
'#55aa88',
'#55aa55')
checkout_list_button.pack(
side = "right",
anchor = "ne")
main_page_frame.pack(fill="both")
main_page.pack(fill = "both")
#--------------------------< search result page />------------------------------
# function used to generate the book search result diplay page
def create_search_result_page(parent):
"""
This function is used to create the search result list page
"""
# access three global root pages
global main_page
global search_result_page
global checkout_page
global detail_page
global checkout_list
search_result_page = tk.Frame(
parent,
bg = "#eeeeee")
# generate the big green header
create_library_management_system_header(search_result_page)
def back_to_main_button_pressed():
create_main_page()
search_result_page.destroy()
back_to_main_button = tk.Button(
search_result_page,
width = 8,
text = "< Main",
font = ('Helvetica', 26),
bd=0,
background = "#ee8800",
foreground = "white",
command = back_to_main_button_pressed)
add_hover_effect_to_widget(
back_to_main_button,
'#eeaa33',
'#ee8800')
back_to_main_button.pack(
side = "left",
anchor = "nw")
def checkout_button_pressed():
create_checkout_page(window)
search_result_page.destroy()
checkout_list_button = tk.Button(
search_result_page,
width = 9,
text = "Cart(" + str(len(checkout_list)) + ") >",
font = ('Helvetica', 26),
bd=0,
background = "#55aa55",
foreground = "white",
command = checkout_button_pressed)
add_hover_effect_to_widget(
checkout_list_button,
'#55aa88',
'#55aa55')
checkout_list_button.pack(
side = "right",
anchor = "ne")
book_list_frame = book_list_frame = tk.Frame(
search_result_page,
bg = "#eeeeee")
book_list_canvas = tk.Canvas(
book_list_frame,
width = 1100,
height = 980,
bg = "#eeeeee",
highlightthickness = 0)
scrollbar = ttk.Scrollbar(
book_list_frame,
orient="vertical",
command=book_list_canvas.yview)
book_list_canvas.configure(
yscrollcommand = scrollbar.set,
scrollregion=book_list_canvas.bbox('all'))
scrollable_frame = tk.Frame(
book_list_canvas,
bg = "#eeeeee")
scrollable_frame.bind(
"<Configure>",
lambda e: book_list_canvas.configure(
scrollregion=book_list_canvas.bbox("all")
)
)
book_list_canvas.create_window(
(0, 0),
window = scrollable_frame,
anchor="nw")
book_list_canvas.configure(yscrollcommand = scrollbar.set)
search_results = bs.go(book_name.get())
display_results = []
# only keep the original copy record to prevent duplicate
for record in search_results:
# explain:
# search_results is a 2d array that contains multiple lines of records
# each record is in standard record format
# so search_results[i][0] is the first element of i-th record
# which is obviously the id of this book
# since the id is in the format of x_y
# - x is the book id
# - y is the copy num
# which are splitted by split("_") => y = search_results[i][0].split("_")[1]
# only when y == 0 is the original copy, others need to be deleted
if record[0].split("_")[1] == "0":
display_results.append(record)
# sort the result list
display_results = bl.go(display_results)
# show how many results found
ttk.Label(
scrollable_frame,
text = str(len(display_results)) + " search results found",
font = ('Helvetica', 30),
background = "#eeeeee",
width = 45
).pack(padx = 80, pady = 20)
# add search results to display frame line by line
for i in range(len(display_results)):
record = display_results[i]
def book_detail_button_pressed():
search_result_page.destroy()
make_record(
scrollable_frame,
record,
"More",
'#0088ee',
'#33aaee',
book_detail_button_pressed,
2)
book_list_frame.pack()
book_list_canvas.pack(side="left", fill="both", expand=True)
scrollbar.pack(side="right", fill="y")
book_list_frame.pack(fill="y")
search_result_page.pack(fill="x")
#----------------------------< checkout page />---------------------------------
def create_checkout_page(parent):
"""
function used to create the checkout page
"""
# access three global root pages
global main_page
global search_result_page
global checkout_page
global detail_page
global checkout_list
checkout_page = tk.Frame(window, bg = "#eeeeee")
# generate the big green header
create_library_management_system_header(checkout_page)
def back_to_main_button_pressed():
create_main_page()
checkout_page.destroy()
back_to_main_button = tk.Button(
checkout_page,
width = 8,
text = "< Main",
font = ('Helvetica', 26),
bd=0,
background = "#ee8800",
foreground = "white",
command = back_to_main_button_pressed)
add_hover_effect_to_widget(
back_to_main_button,
'#eeaa33',
'#ee8800')
back_to_main_button.pack(
side = "left",
anchor = "nw")
def back_to_search_list_button_pressed():
create_search_result_page(window)
checkout_page.destroy()
back_to_search_list_button = tk.Button(
checkout_page,
width = 9,
text = "< List",
font = ('Helvetica', 26),
bd=0,
background = "#55aa55",
foreground = "white",
command = back_to_search_list_button_pressed)
add_hover_effect_to_widget(
back_to_search_list_button,
'#55aa88',
'#55aa55')
back_to_search_list_button.pack(
side = "right",
anchor = "ne")
book_list_frame = book_list_frame = tk.Frame(
checkout_page,
bg = "#eeeeee")
book_list_canvas = tk.Canvas(
book_list_frame,
width = 1100,
height = 980,
bg = "#eeeeee",
highlightthickness = 0)
scrollbar = ttk.Scrollbar(
book_list_frame,
orient="vertical",
command=book_list_canvas.yview)
scrollable_frame = tk.Frame(
book_list_canvas,
bg = "#eeeeee")
# bind the canvas scroll region to scrollable_frame
scrollable_frame.bind(
"<Configure>",
lambda e: book_list_canvas.configure(
scrollregion=book_list_canvas.bbox("all")
)
)
# put the scrollable_frame into canvas
book_list_canvas.create_window(
(0, 0),
window=scrollable_frame, anchor="nw")
# bind the scroll control to scrollbar
book_list_canvas.configure(yscrollcommand=scrollbar.set)
# show how many results found
ttk.Label(
scrollable_frame,
text = str(len(checkout_list)) + " book ready for checkout",
font = ('Helvetica', 30),
background = "#eeeeee",
width = 45
).pack(padx = 80, pady = 20)
# checkout confirmation ui group
checkout_confirm_frame = tk.Frame(
scrollable_frame,
background = "#ffffff")
# prompt the user to enter the member ID
ttk.Label(
checkout_confirm_frame,
text = "Please enter you member ID to checkout",
font = ('Helvetica', 20),
background = "#ffffff"
).pack(pady = 20)
# entry box for entering member ID
member_ID = tk.StringVar()
member_ID_box = tk.Entry(
checkout_confirm_frame,
textvariable = member_ID,
width = 35,
font = ('Helvetica', 32),
background = "#eeeeee",
bd=0,
borderwidth=7,
relief=tk.FLAT)
add_hover_effect_to_widget(
member_ID_box,
'#f5f5f5',
'#eeeeee')
member_ID_box.pack()
# message displayed to the user if there is any error
message_var = tk.StringVar()
return_message = tk.Label(
checkout_confirm_frame,
textvariable = message_var,
font = ('Helvetica', 20),
background = "#ffffff")
return_message.pack(
side = "left",
pady = 20,
padx = 200,
anchor = "w")
def confirm_checkout_button_pressed():
global checkout_list
input_id = member_ID.get()
# if there is nothing ready to be checkout
if len(checkout_list) == 0:
message_var.set("Nothing to checkout")
return_message.config(foreground = "#ff0000")
return
member_id_validation_result = member_id_validation(input_id)
if member_id_validation_result == 0:
# succeed
bc.go(checkout_list, input_id)
# clear checkout list, since all books have been checked out
checkout_list = []
checkout_page_refresh()
elif member_id_validation_result == 1:
# if the id input is not 4 digits
message_var.set("Member ID should be 4-digits")
return_message.config(foreground = "#ff0000")
elif member_id_validation_result == 2:
# if the member id input not only contains numbers
message_var.set("Member ID should be a number")
return_message.config(foreground = "#ff0000")
# place holder used to avoid the confirm checkout button being pushed out of
# the checkout confirm frame
tk.Frame(
checkout_confirm_frame,
width = 149,
background = "#ffffff"
).pack(side = "right")
confirm_checkout_button = tk.Button(
checkout_confirm_frame,
width = 8,
text = "Confirm",
font = ('Helvetica', 26),
bd=0,
background = "#0088ee",
foreground = "white",
command = confirm_checkout_button_pressed)
add_hover_effect_to_widget(
confirm_checkout_button,
'#33aaee',
'#0088ee')
confirm_checkout_button.pack(side = "right")
checkout_confirm_frame.pack(
fill = "x",
pady = 10)
# add search results to display frame line by line
for i in range(len(checkout_list)):
# checkout_list stores the full details of book,
# same as search_results above
make_record(
scrollable_frame,
checkout_list[i],
"Remove",
'#cc0000',
'#ee0000',
checkout_page_refresh,
1)
book_list_frame.pack()
book_list_canvas.pack(
side="left",
fill="both",
expand=True)
scrollbar.pack(
side="right",
fill="y")
book_list_frame.pack(fill="y")
checkout_page.pack(fill = "both")
def checkout_page_refresh():
# destroy current book result page
checkout_page.destroy()
# recreate boo result page (refresh to show in cart button changes)
create_checkout_page(window)
#-----------------------------< detail page />----------------------------------
def create_detail_page(parent, input_record):
"""
function used to create the page for book details
"""
# access three global root pages
global main_page
global search_result_page
global checkout_page
global detail_page
global checkout_list
detail_page = tk.Frame(parent, bg = "#eeeeee")
# generate the big green header
create_library_management_system_header(detail_page)
def back_to_main_button_pressed():
create_main_page()
detail_page.destroy()
back_to_main_button = tk.Button(
detail_page,
width = 8,
text = "< Main",
font = ('Helvetica', 26),
bd=0,
background = "#ee8800",
foreground = "white",
command = back_to_main_button_pressed)
add_hover_effect_to_widget(
back_to_main_button,
'#eeaa33',
'#ee8800')
back_to_main_button.pack(
side = "left",
anchor = "nw")
def back_to_search_list_button_pressed():
create_search_result_page(window)
detail_page.destroy()
back_to_search_list_button = tk.Button(
detail_page,
width = 9,
text = "< List",
font = ('Helvetica', 26),
bd=0,
background = "#55aa55",
foreground = "white",
command = back_to_search_list_button_pressed)
add_hover_effect_to_widget(
back_to_search_list_button,
'#55aa88',
'#55aa55')
back_to_search_list_button.pack(
side = "right",
anchor = "ne")
book_list_frame = book_list_frame = tk.Frame(
detail_page,
bg = "#eeeeee")
book_list_canvas = tk.Canvas(
book_list_frame,
width = 1100,
height = 980,
bg = "#eeeeee",
highlightthickness = 0)
scrollbar = ttk.Scrollbar(
book_list_frame,
orient="vertical",
command=book_list_canvas.yview)
scrollable_frame = tk.Frame(
book_list_canvas,
bg = "#eeeeee")
# bind the canvas scroll region to scrollable_frame
scrollable_frame.bind(
"<Configure>",
lambda e: book_list_canvas.configure(
scrollregion=book_list_canvas.bbox("all")
)
)
# put the scrollable_frame into canvas
book_list_canvas.create_window(
(0, 0),
window=scrollable_frame,
anchor="nw")
# bind the scroll control to scrollbar
book_list_canvas.configure(yscrollcommand=scrollbar.set)
# show book title
ttk.Label(
scrollable_frame,
text = input_record[1],
font = ('Helvetica', 30),
background = "#ffffff",
width = 40
).pack(padx = 80, pady = 20, ipadx = 50, ipady = 10)
# book details ui
book_details_frame = tk.Frame(
scrollable_frame,
background = "#ffffff")
# more info about the book
# draw trend graph for current book
f = open("logfile.txt", "r")
borrow_count_per_year = ()
first_borrow_year = 0
last_borrow_year = 0
# (borrow_count_per_year, first_borrow_year, last_borrow_year) = db.get_book_history(input_record[0].split("_")[0])
year_list = []
data_list = []
for i in range(2010, 2020):
year_list.append(i)
data_list.append(random.randint(5, 20))
# data_list = list(borrow_count_per_year)
trend_graph(scrollable_frame, "Trend of " + input_record[1], year_list, data_list)
# display author beteen trend graph and copy list
ttk.Label(
scrollable_frame,
text = "Author: " + input_record[2],
font = ('Helvetica', 20),
background = "#ffffff"
).pack(pady = 20)
# search for copies with the same name and display
copy_search_result = bs.go(input_record[1])
for copy in copy_search_result:
# checkout_list stores the full details of book, same as display_results above
# check if the book is available
if copy[-1] == "0":
if copy in checkout_list:
make_record(
scrollable_frame,
copy,
"In cart",
'#55aa55',
'#55aa55',
None,
-1)
else:
def detail_page_refresh():
"""
function used to refresh the detail page
"""
# destroy current book result page
detail_page.destroy()
# recreate boo result page (refresh to show in cart button changes)
create_detail_page(window, input_record)
make_record(
scrollable_frame,
copy,
"Add to cart",
'#0088ee',
'#33aaee',
detail_page_refresh,
0)
else:
make_record(
scrollable_frame,
copy,
"Unavailable",
'#dddddd',
'#dddddd',
None,
-1)
# other book informations
book_info = tk.Label(
book_details_frame,
textvariable = "message_var",
font = ('Helvetica', 20),
background = "#ffffff")
book_info.pack(
side = "left",
pady = 20,
padx = 200,
anchor = "w")
book_list_frame.pack()
book_list_canvas.pack(
side="left",
fill="both",
expand=True)
scrollbar.pack(
side="right",
fill="y")
book_details_frame.pack(fill="x")
detail_page.pack(fill = "both")
#--------------------------< general functions />-------------------------------
def create_library_management_system_header(parent):
"""
function used to create the big green header
"""
# green background containing title and copyright
title_green_canvas = tk.Canvas(
parent,
bg = "#55aa55",
height = 150,
highlightthickness = 0)
# big large title
title = title_green_canvas.create_text(
500,
70,
text = "Library Management System",
font = ('Helvetica', 40, 'bold'),
fill = "white")
# copyright
copyright_text = title_green_canvas.create_text(
1300,
120,
text = "© Lin Zexin v1.0 13/12/2019",
font = ('Helvetica', 15),
fill = "white")
# fill horizontally
title_green_canvas.pack(
fill = "x",
side = "top")
def trend_graph(parent, title, year_list, data_list):
"""
function used to create the trend graph on book detail page
"""
figure = plt.Figure(figsize=(10,5), dpi = 100)
ax = figure.add_subplot(111)
chart_type = FigureCanvasTkAgg(figure, parent)
chart_type.get_tk_widget().pack(fill = "x")
ax.plot(
year_list,
data_list,
color = 'lightblue',
linewidth = 3)
ax.set_title(title)
def member_id_validation(input_id):
"""
function used to check if the input member id is valid
"""
if len(input_id) != 4:
# if the id input is not 4 digits
return 1
elif input_id.isdigit():
# if the input id is a number
return 0
else:
# if not a number
return 2
def make_record(
parent,
record,
button_text,
button_original_color,
button_hover_color,
additional_button_function,
mode = 0):
"""
function used to make the information block on book search result page and
the check out page, mode 0 for add to cart mode, mode 1 for remove from cart
mode
"""
# check if the record input has enough information needed
if len(record) < 4:
return
record_frame = tk.Frame(
parent,
bg = "#ffffff")
# book title
tk.Label(
record_frame,
text = str(record[1]),
font = ('Helvetica', 18),
bg = "#ffffff",
anchor = "w"
).pack(fill = "x", pady = 20, ipady = 10, padx = 50)
# book details
# when display in the search result page, do not show id and purchase date
if mode == 2:
book_detail_text = "Author: " + str(record[2])
else:
book_detail_text = "ID: " + str(record[0]) + " > " + "Author: " + str(record[2]) + " > " + "Purchase date: " + str(record[3])
tk.Label(
record_frame,
text = book_detail_text,
bg = "#ffffff", anchor = "w", justify = tk.LEFT
).pack(side = "left", padx = 50)
# pkaceholder used to aviud the add to cart button being pushed out from the frame
tk.Frame(
record_frame,
width = 100,
background = "#ffffff"
).pack(side = "right")
def button_pressed():
"""
function bind to the button trigger below
"""
if mode == 0:
# when the mode is set to 0 (add to checkout list mode)
# add the book id to cart list
checkout_list.append(record)
elif mode == 1:
# when the mode is set to 1 (remove from checkout list mode)
checkout_list.remove(record)
elif mode == 2:
create_detail_page(window, record)
else:
# otherwise means the button does nothing
pass
if additional_button_function != None:
additional_button_function()
button = tk.Button(
record_frame, width = 8,
text = button_text,
font = ('Helvetica', 15),
bd=0, background = button_original_color,
foreground = "white", padx = 20,
command = button_pressed
)
add_hover_effect_to_widget(
button, button_hover_color,
button_original_color)
button.pack(
side = "right",
ipadx = 20)
record_frame.pack(
fill = "x",
pady = 10)
def add_hover_effect_to_widget(
widget, hover_color = '#ffffff', original_color = '#eeeeee'
):
"""
function used to add hover effects to tk widgets
(widget is the item waiting to add effect, hover_color is the color when
mouse is on it, original_color is the color when mouse is not on it)
"""
def on_enter(e):
widget['background'] = hover_color
def on_leave(e):
widget['background'] = original_color
widget.bind("<Enter>", on_enter)
widget.bind("<Leave>", on_leave)
#-----------------------------< program start />--------------------------------
# create main page to start
create_main_page()
window.mainloop() |
the-stack_0_24337 | import logging
import time
from parsl.providers.kubernetes.template import template_string
logger = logging.getLogger(__name__)
from parsl.errors import OptionalModuleMissing
from parsl.providers.provider_base import ExecutionProvider, JobState, JobStatus
from parsl.utils import RepresentationMixin
import typeguard
from typing import Any, Dict, List, Optional, Tuple
try:
from kubernetes import client, config
_kubernetes_enabled = True
except (ImportError, NameError, FileNotFoundError):
_kubernetes_enabled = False
translate_table = {
'Running': JobState.RUNNING,
'Pending': JobState.PENDING,
'Succeeded': JobState.COMPLETED,
'Failed': JobState.FAILED,
'Unknown': JobState.UNKNOWN,
}
class KubernetesProvider(ExecutionProvider, RepresentationMixin):
""" Kubernetes execution provider
Parameters
----------
namespace : str
Kubernetes namespace to create deployments.
image : str
Docker image to use in the deployment.
nodes_per_block : int
Nodes to provision per block.
init_blocks : int
Number of blocks to provision at the start of the run. Default is 1.
min_blocks : int
Minimum number of blocks to maintain.
max_blocks : int
Maximum number of blocks to maintain.
max_cpu : float
CPU limits of the blocks (pods), in cpu units.
This is the cpu "limits" option for resource specification.
Check kubernetes docs for more details. Default is 2.
max_mem : str
Memory limits of the blocks (pods), in Mi or Gi.
This is the memory "limits" option for resource specification on kubernetes.
Check kubernetes docs for more details. Default is 500Mi.
init_cpu : float
CPU limits of the blocks (pods), in cpu units.
This is the cpu "requests" option for resource specification.
Check kubernetes docs for more details. Default is 1.
init_mem : str
Memory limits of the blocks (pods), in Mi or Gi.
This is the memory "requests" option for resource specification on kubernetes.
Check kubernetes docs for more details. Default is 250Mi.
parallelism : float
Ratio of provisioned task slots to active tasks. A parallelism value of 1 represents aggressive
scaling where as many resources as possible are used; parallelism close to 0 represents
the opposite situation in which as few resources as possible (i.e., min_blocks) are used.
worker_init : str
Command to be run first for the workers, such as ``python start.py``.
secret : str
The Kubernetes ImagePullSecret secret to use to pull images
pod_name : str
The name for the pod, will be appended with a timestamp.
Default is None, meaning parsl automatically names the pod.
user_id : str
Unix user id to run the container as.
group_id : str
Unix group id to run the container as.
run_as_non_root : bool
Run as non-root (True) or run as root (False).
persistent_volumes: list[(str, str)]
List of tuples describing persistent volumes to be mounted in the pod.
The tuples consist of (PVC Name, Mount Directory).
"""
@typeguard.typechecked
def __init__(self,
image: str,
namespace: str = 'default',
nodes_per_block: int = 1,
init_blocks: int = 4,
min_blocks: int = 0,
max_blocks: int = 10,
max_cpu: float = 2,
max_mem: str = "500Mi",
init_cpu: float = 1,
init_mem: str = "250Mi",
parallelism: float = 1,
worker_init: str = "",
pod_name: Optional[str] = None,
user_id: Optional[str] = None,
group_id: Optional[str] = None,
run_as_non_root: bool = False,
secret: Optional[str] = None,
persistent_volumes: List[Tuple[str, str]] = []) -> None:
if not _kubernetes_enabled:
raise OptionalModuleMissing(['kubernetes'],
"Kubernetes provider requires kubernetes module and config.")
config.load_kube_config()
self.namespace = namespace
self.image = image
self.nodes_per_block = nodes_per_block
self.init_blocks = init_blocks
self.min_blocks = min_blocks
self.max_blocks = max_blocks
self.max_cpu = max_cpu
self.max_mem = max_mem
self.init_cpu = init_cpu
self.init_mem = init_mem
self.parallelism = parallelism
self.worker_init = worker_init
self.secret = secret
self.pod_name = pod_name
self.user_id = user_id
self.group_id = group_id
self.run_as_non_root = run_as_non_root
self.persistent_volumes = persistent_volumes
self.kube_client = client.CoreV1Api()
# Dictionary that keeps track of jobs, keyed on job_id
self.resources = {} # type: Dict[str, Dict[str, Any]]
def submit(self, cmd_string, tasks_per_node, job_name="parsl"):
""" Submit a job
Args:
- cmd_string :(String) - Name of the container to initiate
- tasks_per_node (int) : command invocations to be launched per node
Kwargs:
- job_name (String): Name for job, must be unique
Returns:
- None: At capacity, cannot provision more
- job_id: (string) Identifier for the job
"""
cur_timestamp = str(time.time() * 1000).split(".")[0]
job_name = "{0}-{1}".format(job_name, cur_timestamp)
if not self.pod_name:
pod_name = '{}'.format(job_name)
else:
pod_name = '{}-{}'.format(self.pod_name,
cur_timestamp)
formatted_cmd = template_string.format(command=cmd_string,
worker_init=self.worker_init)
logger.debug("Pod name :{}".format(pod_name))
self._create_pod(image=self.image,
pod_name=pod_name,
job_name=job_name,
cmd_string=formatted_cmd,
volumes=self.persistent_volumes)
self.resources[pod_name] = {'status': JobStatus(JobState.RUNNING)}
return pod_name
def status(self, job_ids):
""" Get the status of a list of jobs identified by the job identifiers
returned from the submit request.
Args:
- job_ids (list) : A list of job identifiers
Returns:
- A list of JobStatus objects corresponding to each job_id in the job_ids list.
Raises:
- ExecutionProviderExceptions or its subclasses
"""
if job_ids:
self._status()
return [self.resources[jid]['status'] for jid in job_ids]
def cancel(self, job_ids):
""" Cancels the jobs specified by a list of job ids
Args:
job_ids : [<job_id> ...]
Returns :
[True/False...] : If the cancel operation fails the entire list will be False.
"""
for job in job_ids:
logger.debug("Terminating job/pod: {0}".format(job))
self._delete_pod(job)
self.resources[job]['status'] = JobStatus(JobState.CANCELLED)
rets = [True for i in job_ids]
return rets
def _status(self):
"""Returns the status list for a list of job_ids
Args:
self
Returns:
[status...] : Status list of all jobs
"""
job_ids = list(self.resources.keys())
to_poll_job_ids = [jid for jid in job_ids if not self.resources[jid]['status'].terminal]
logger.debug("Polling Kubernetes pod status: {}".format(to_poll_job_ids))
for jid in to_poll_job_ids:
phase = None
try:
pod_status = self.kube_client.read_namespaced_pod_status(name=jid, namespace=self.namespace)
except Exception:
logger.exception("Failed to poll pod {} status, most likely because pod was terminated".format(jid))
if self.resources[jid]['status'] is JobStatus(JobState.RUNNING):
phase = 'Unknown'
else:
phase = pod_status.status.phase
if phase:
status = translate_table.get(phase, JobState.UNKNOWN)
logger.debug("Updating pod {} with status {} to parsl status {}".format(jid,
phase,
status))
self.resources[jid]['status'] = JobStatus(status)
def _create_pod(self,
image,
pod_name,
job_name,
port=80,
cmd_string=None,
volumes=[]):
""" Create a kubernetes pod for the job.
Args:
- image (string) : Docker image to launch
- pod_name (string) : Name of the pod
- job_name (string) : App label
KWargs:
- port (integer) : Container port
Returns:
- None
"""
security_context = None
if self.user_id and self.group_id:
security_context = client.V1SecurityContext(run_as_group=self.group_id,
run_as_user=self.user_id,
run_as_non_root=self.run_as_non_root)
# Create the enviornment variables and command to initiate IPP
environment_vars = client.V1EnvVar(name="TEST", value="SOME DATA")
launch_args = ["-c", "{0};".format(cmd_string)]
volume_mounts = []
# Create mount paths for the volumes
for volume in volumes:
volume_mounts.append(client.V1VolumeMount(mount_path=volume[1],
name=volume[0]))
resources = client.V1ResourceRequirements(limits={'cpu': str(self.max_cpu),
'memory': self.max_mem},
requests={'cpu': str(self.init_cpu),
'memory': self.init_mem}
)
# Configure Pod template container
container = client.V1Container(
name=pod_name,
image=image,
resources=resources,
ports=[client.V1ContainerPort(container_port=port)],
volume_mounts=volume_mounts,
command=['/bin/bash'],
args=launch_args,
env=[environment_vars],
security_context=security_context)
# Create a secret to enable pulling images from secure repositories
secret = None
if self.secret:
secret = client.V1LocalObjectReference(name=self.secret)
# Create list of volumes from (pvc, mount) tuples
volume_defs = []
for volume in volumes:
volume_defs.append(client.V1Volume(name=volume[0],
persistent_volume_claim=client.V1PersistentVolumeClaimVolumeSource(
claim_name=volume[0])))
metadata = client.V1ObjectMeta(name=pod_name,
labels={"app": job_name})
spec = client.V1PodSpec(containers=[container],
image_pull_secrets=[secret],
volumes=volume_defs
)
pod = client.V1Pod(spec=spec, metadata=metadata)
api_response = self.kube_client.create_namespaced_pod(namespace=self.namespace,
body=pod)
logger.debug("Pod created. status='{0}'".format(str(api_response.status)))
def _delete_pod(self, pod_name):
"""Delete a pod"""
api_response = self.kube_client.delete_namespaced_pod(name=pod_name,
namespace=self.namespace,
body=client.V1DeleteOptions())
logger.debug("Pod deleted. status='{0}'".format(str(api_response.status)))
@property
def label(self):
return "kubernetes"
@property
def status_polling_interval(self):
return 60
|
the-stack_0_24338 | from __future__ import annotations
from typing import Any, List
from joulehunter import processors
from joulehunter.frame import BaseFrame
from joulehunter.session import Session
# pyright: strict
ProcessorList = List[processors.ProcessorType]
class Renderer:
"""
Abstract base class for renderers.
"""
processors: ProcessorList
"""
Processors installed on this renderer. This property is defined on the
base class to provide a common way for users to add and
manipulate them before calling :func:`render`.
"""
processor_options: dict[str, Any]
"""
Dictionary containing processor options, passed to each processor.
"""
def __init__(
self,
show_all: bool = False,
timeline: bool = False,
processor_options: dict[str, Any] | None = None,
):
"""
:param show_all: Don't hide library frames - show everything that joulehunter captures.
:param timeline: Instead of aggregating time, leave the samples in chronological order.
:param processor_options: A dictionary of processor options.
"""
# processors is defined on the base class to provide a common way for users to
# add to and manipulate them before calling render()
self.processors = self.default_processors()
self.processor_options = processor_options or {}
if show_all:
self.processors.remove(processors.group_library_frames_processor)
if timeline:
self.processors.remove(processors.aggregate_repeated_calls)
def default_processors(self) -> ProcessorList:
"""
Return a list of processors that this renderer uses by default.
"""
raise NotImplementedError()
def preprocess(self, root_frame: BaseFrame | None) -> BaseFrame | None:
frame = root_frame
for processor in self.processors:
frame = processor(frame, options=self.processor_options)
return frame
def render(self, session: Session) -> str:
"""
Return a string that contains the rendered form of `frame`.
"""
raise NotImplementedError()
|
the-stack_0_24340 | #!/usr/bin/env python
# coding: utf-8
# In[28]:
from scipy.io import loadmat
import glob
import cv2
from shutil import copyfile
import os
import numpy as np
import matplotlib.pylab as plt
from skimage.io import imread
from pathlib import Path
import skimage
from skimage import feature, morphology
from matplotlib.pyplot import figure
import matplotlib
from skimage.color import rgb2gray
import copy
import gc
import sys
# In[2]:
bird_labels = {'head':1, 'leye':2, 'reye':3, 'beak':4, 'torso':5, 'neck':6, 'lwing':7, 'rwing':8, 'lleg':9, 'lfoot':10, 'rleg':11, 'rfoot':12, 'tail':13}
cat_labels = {'head':1, 'leye':2, 'reye':3, 'lear':4, 'rear':5, 'nose':6, 'torso':7, 'neck':8, 'lfleg':9, 'lfpa':10, 'rfleg':11, 'rfpa':12, 'lbleg':13, 'lbpa':14, 'rbleg':15, 'rbpa':16, 'tail':17}
cow_labels = {'head':1, 'leye':2, 'reye':3, 'lear':4, 'rear':5, 'muzzle':6, 'lhorn':7, 'rhorn':8, 'torso':9, 'neck':10, 'lfuleg':11, 'lflleg':12, 'rfuleg':13, 'rflleg':14, 'lbuleg':15, 'lblleg':16, 'rbuleg':17, 'rblleg':18, 'tail':19}
dog_labels = {'head':1, 'leye':2, 'reye':3, 'lear':4, 'rear':5, 'nose':6, 'torso':7, 'neck':8, 'lfleg':9, 'lfpa':10, 'rfleg':11, 'rfpa':12, 'lbleg':13, 'lbpa':14, 'rbleg':15, 'rbpa':16, 'tail':17, 'muzzle':18}
horse_labels = {'head':1, 'leye':2, 'reye':3, 'lear':4, 'rear':5, 'muzzle':6, 'lfho':7, 'rfho':8, 'torso':9, 'neck':10, 'lfuleg':11, 'lflleg':12, 'rfuleg':13, 'rflleg':14, 'lbuleg':15, 'lblleg':16, 'rbuleg':17, 'rblleg':18, 'tail':19, 'lbho':20, 'rbho':21}
9
bottle_labels = {'cap':1, 'body':2}
person_labels = {'head':1, 'leye':2, 'reye':3, 'lear':4, 'rear':5, 'lebrow':6, 'rebrow':7, 'nose':8, 'mouth':9, 'hair':10, 'torso':11, 'neck': 12, 'llarm': 13, 'luarm': 14, 'lhand': 15, 'rlarm':16, 'ruarm':17, 'rhand': 18, 'llleg': 19, 'luleg':20, 'lfoot':21, 'rlleg':22, 'ruleg':23, 'rfoot':24}
bus_labels = { 'frontside':1, 'leftside':2, 'rightside':3, 'backside':4, 'roofside':5, 'leftmirror':6, 'rightmirror':7, 'fliplate':8, 'bliplate':9 }
for ii in range(0,10):
bus_labels['door_{}'.format(ii+1)] = 10+ii
for ii in range(0,10):
bus_labels['wheel_{}'.format(ii+1)] = 20+ii
for ii in range(0,10):
bus_labels['headlight_{}'.format(ii+1)] = 30+ii
for ii in range(0,20):
bus_labels['window_{}'.format(ii+1)] = 40+ii
car_labels = { 'frontside':1, 'leftside':2, 'rightside':3, 'backside':4, 'roofside':5, 'leftmirror':6, 'rightmirror':7, 'fliplate':8, 'bliplate':9 }
for ii in range(0,3):
car_labels['door_{}'.format(ii+1)] = 10+ii
for ii in range(0,4):
car_labels['wheel_{}'.format(ii+1)] = 13+ii
for ii in range(0,6):
car_labels['headlight_{}'.format(ii+1)] = 17+ii
for ii in range(0,7):
car_labels['window_{}'.format(ii+1)] = 23+ii
aeroplane_labels = {'body': 1, 'stern': 2, 'lwing': 3, 'rwing':4, 'tail':5}
for ii in range(1, 10):
aeroplane_labels['engine_{}'.format(ii)] = 5+ii
for ii in range(1, 10):
aeroplane_labels['wheel_{}'.format(ii)] = 14+ii
motorbike_labels = {'fwheel': 1, 'bwheel': 2, 'handlebar': 3, 'saddle': 4}
for ii in range(0,10):
motorbike_labels['headlight_{}'.format(ii+1)] = 5+ii
motorbike_labels['body']=15
bicycle_labels = {'fwheel': 1, 'bwheel': 2, 'saddle': 3, 'handlebar': 4, 'chainwheel': 5}
for ii in range(0,10):
bicycle_labels['headlight_{}'.format(ii+1)] = 6+ii
bicycle_labels['body']=16
train_labels = {'head':1,'hfrontside':2,'hleftside':3,'hrightside':4,'hbackside':5,'hroofside':6}
for ii in range(0,10):
train_labels['headlight_{}'.format(ii+1)] = 7 + ii
for ii in range(0,10):
train_labels['coach_{}'.format(ii+1)] = 17 + ii
for ii in range(0,10):
train_labels['cfrontside_{}'.format(ii+1)] = 27 + ii
for ii in range(0,10):
train_labels['cleftside_{}'.format(ii+1)] = 37 + ii
for ii in range(0,10):
train_labels['crightside_{}'.format(ii+1)] = 47 + ii
for ii in range(0,10):
train_labels['cbackside_{}'.format(ii+1)] = 57 + ii
for ii in range(0,10):
train_labels['croofside_{}'.format(ii+1)] = 67 + ii
sheep_labels = cow_labels
part_labels = {'bird': bird_labels, 'cat': cat_labels, 'cow': cow_labels, 'dog': dog_labels, 'sheep': sheep_labels, 'horse':horse_labels, 'car':car_labels, 'bus':bus_labels, 'bicycle':bicycle_labels, 'motorbike':motorbike_labels, 'person':person_labels,'aeroplane':aeroplane_labels, 'train':train_labels}
# In[3]:
object_name = sys.argv[1]
animals = [object_name]
print("object to make mask of is ", object_name)
# In[4]:
def rotate_im(image, angle):
# grab the dimensions of the image and then determine the
# centre
(h, w) = image.shape[:2]
(cX, cY) = (w // 2, h // 2)
# grab the rotation matrix (applying the negative of the
# angle to rotate clockwise), then grab the sine and cosine
# (i.e., the rotation components of the matrix)
M = cv2.getRotationMatrix2D((cX, cY), angle, 1.0)
cos = np.abs(M[0, 0])
sin = np.abs(M[0, 1])
# compute the new bounding dimensions of the image
nW = int((h * sin) + (w * cos))
nH = int((h * cos) + (w * sin))
# adjust the rotation matrix to take into account translation
M[0, 2] += (nW / 2) - cX
M[1, 2] += (nH / 2) - cY
# perform the actual rotation and return the image
image = cv2.warpAffine(image, M, (nW, nH))
# image = cv2.resize(image, (w,h))
return image
# In[5]:
def get_corners(bboxes):
width = (bboxes[:,2] - bboxes[:,0]).reshape(-1,1)
height = (bboxes[:,3] - bboxes[:,1]).reshape(-1,1)
x1 = bboxes[:,0].reshape(-1,1)
y1 = bboxes[:,1].reshape(-1,1)
x2 = x1 + width
y2 = y1
x3 = x1
y3 = y1 + height
x4 = bboxes[:,2].reshape(-1,1)
y4 = bboxes[:,3].reshape(-1,1)
corners = np.hstack((x1,y1,x2,y2,x3,y3,x4,y4))
return corners
# In[6]:
def clip_box(bbox, clip_box, alpha):
ar_ = (bbox_area(bbox))
x_min = np.maximum(bbox[:,0], clip_box[0]).reshape(-1,1)
y_min = np.maximum(bbox[:,1], clip_box[1]).reshape(-1,1)
x_max = np.minimum(bbox[:,2], clip_box[2]).reshape(-1,1)
y_max = np.minimum(bbox[:,3], clip_box[3]).reshape(-1,1)
bbox = np.hstack((x_min, y_min, x_max, y_max, bbox[:,4:]))
delta_area = ((ar_ - bbox_area(bbox))/ar_)
mask = (delta_area < (1 - alpha)).astype(int)
bbox = bbox[mask == 1,:]
return bbox
# In[7]:
def rotate_box(corners,angle, cx, cy, h, w):
corners = corners.reshape(-1,2)
corners = np.hstack((corners, np.ones((corners.shape[0],1), dtype = type(corners[0][0]))))
M = cv2.getRotationMatrix2D((cx, cy), angle, 1.0)
cos = np.abs(M[0, 0])
sin = np.abs(M[0, 1])
nW = int((h * sin) + (w * cos))
nH = int((h * cos) + (w * sin))
# adjust the rotation matrix to take into account translation
M[0, 2] += (nW / 2) - cx
M[1, 2] += (nH / 2) - cy
# Prepare the vector to be transformed
calculated = np.dot(M,corners.T).T
calculated = calculated.reshape(-1,8)
return calculated
# In[8]:
def get_enclosing_box(corners):
x_ = corners[:,[0,2,4,6]]
y_ = corners[:,[1,3,5,7]]
xmin = np.min(x_,1).reshape(-1,1)
ymin = np.min(y_,1).reshape(-1,1)
xmax = np.max(x_,1).reshape(-1,1)
ymax = np.max(y_,1).reshape(-1,1)
final = np.hstack((xmin, ymin, xmax, ymax,corners[:,8:]))
return final
# In[9]:
def bbox_area(bbox):
return (bbox[:,2] - bbox[:,0])*(bbox[:,3] - bbox[:,1])
# In[10]:
def rtt(angle, img, bboxes):
w,h = img.shape[1], img.shape[0]
cx, cy = w//2, h//2
img = rotate_im(img, angle)
corners = get_corners(bboxes)
corners = np.hstack((corners, bboxes[:,4:]))
corners[:,:8] = rotate_box(corners[:,:8], angle, cx, cy, h, w)
new_bbox = get_enclosing_box(corners)
scale_factor_x = img.shape[1] / w
scale_factor_y = img.shape[0] / h
img = cv2.resize(img, (w,h))
new_bbox[:,:4] = np.true_divide(new_bbox[:,:4], [scale_factor_x, scale_factor_y, scale_factor_x, scale_factor_y])
bboxes = new_bbox
#bboxes = clip_box(bboxes, [0,0,w, h], 0.25)
return img, bboxes
# In[11]:
def parts(annopath):
data = loadmat(annopath)['anno'][0, 0]
d = {}
for obj in data['objects'][0, :]:
p = get_parts(obj)
bp = {}
for body_parts in p:
bp[str(body_parts[0][0])] = body_parts['mask']
bp['body'] = obj['mask']
if obj[0][0] in animals:
d[obj[0][0]] = bp
return d
# In[12]:
def get_parts(obj):
name = obj['class'][0]
index = obj['class_ind'][0, 0]
n = obj['parts'].shape[1]
parts = []
if n > 0:
for part in obj['parts'][0, :]:
parts.append(part)
return parts
# In[13]:
def darker(img):
result = np.where(img!=255)
listOfCoordinates = list(zip(result[0], result[1]))
for cord in listOfCoordinates:
img[cord] = 0
return img
# In[14]:
def bounder(img):
result = np.where(img!=255)
listOfCoordinates = list(zip(result[0], result[1]))
for cord in listOfCoordinates:
img[cord] = 0
result1 = np.where(img==255)
listOfCoordinates1 = list(zip(result1[0], result1[1]))
for cord in listOfCoordinates1:
img[cord] = 1
return img
# In[15]:
def cordinates(img):
y_min = 0
y_max = 0
x_min = 0
x_max = 0
for i in img:
if np.count_nonzero(i) is not 0:
break
y_min+=1
for i in img.T:
if np.count_nonzero(i) is not 0:
break
x_min+=1
for i in img[::-1]:
if np.count_nonzero(i) is not 0:
break
y_max+=1
y_max = img.shape[0] - y_max - 1
for i in img.T[::-1]:
if np.count_nonzero(i) is not 0:
break
x_max+=1
x_max = img.shape[1] - x_max - 1
return x_min, y_min, x_max, y_max
# In[16]:
def gray(img):
return rgb2gray(img)
# In[17]:
def edges(img):
d = morphology.dilation(img, selem=None, out=None, shift_x=False, shift_y=False)
#d = morphology.dilation(img, selem=None, out=None, shift_x=False, shift_y=False)
#d = morphology.dilation(img, selem=None, out=None, shift_x=False, shift_y=False)
e = morphology.erosion(img, selem=None, out=None, shift_x=False, shift_y=False)
i = d-e
return i
# In[18]:
def label_mask(parts_dic, labels):
label_mask = 0
for key, value in parts_dic.items():
result = np.where(value == 1)
listOfCoordinates= list(zip(result[0], result[1]))
for cord in listOfCoordinates:
value[cord] = labels[key]
label_mask = label_mask + value
return label_mask
# In[19]:
def seg_recnstrct(parts_dic, labels):
seg = {}
img = 0
for key, value in parts_dic.items():
#value = edges(value)
seg[key]= value
img = img + value
img = np.invert(img)
img = darker(img)
#label = label_mask(parts_dic, labels)
#img = skimage.color.label2rgb(label, image=img, colors=None, alpha=0.3, bg_label=-1, bg_color=(0, 0, 0), image_alpha=1, kind='overlay')
#img = edges(img)
return img, seg
import csv
def animal_list_maker():
animal_list = {}
for animal in animals:
file_name = animal + '_trainval.txt'
with open('ImageSets/' + file_name) as csv_file:
csv_reader = csv.reader(csv_file, delimiter=' ')
line_count = 0
n = 0
for row in csv_reader:
if row[-1] == '1':
n+=1
annopath = './Annotations_Part/' + row[0] + '.mat'
my_file = Path(annopath)
if my_file.is_file():
animal_list[row[0]] = parts(annopath)
return animal_list
# In[52]:
def final_dic_images():
final_dic = {}
i = 0
segs = []
animal_list = animal_list_maker()
images = {}
for file_name in animal_list:
for animal_name in animal_list[file_name]:
if len(animal_list[file_name][animal_name]) > 0:
labels = part_labels[animal_name]
parts_dic = animal_list[file_name][animal_name]
parts_dic1 = {}
img, seg = seg_recnstrct(parts_dic, labels)
segs.append(seg)
ll = bounder(img)
ll= 1-ll
x_min, y_min, x_max, y_max = cordinates(ll)
h = y_max - y_min
w = x_max - x_min
img = img[y_min:y_min+h , x_min:x_min+w]
for jkl in parts_dic:
plpl=[]
plpl.append(parts_dic[jkl][y_min:y_min+h , x_min:x_min+w])
parts_dic[jkl] = parts_dic[jkl][
y_min:y_min+h , x_min:x_min+w]
x_min1, y_min1, x_max1, y_max1 = cordinates(parts_dic[jkl])
plpl.append([x_min1, y_min1, x_max1, y_max1])
parts_dic1[jkl] = plpl
final_dic[str(i)] = parts_dic1
images[str(i)] = img
i = i+1
return final_dic, images
# In[53]:
print("final dictionary in construction...")
final_dic, images = final_dic_images()
bbx = copy.deepcopy(final_dic)
print("final dictionary constructed")
info_dic = {'category':object_name, 'max_parts':len(part_labels[object_name]), 'num_images':len(images), 'num_parts':None, 'size':None}
number_of_parts = []
size = []
for image in images:
number_of_parts.append(len(bbx[image]))
size.append(images[image].shape)
info_dic['num_parts'] = number_of_parts
info_dic['size'] = size
image_temp = []
input_list = []
output_list = []
labels = part_labels[object_name]
for i in bbx:
lst1 = []
lst2 = []
for j in bbx[i]:
lst1.append(bbx[i][j])
lst2.append(labels[j] - 1)
input_list.append(np.eye(len((labels)))[lst2])
output_list.append(np.asarray(lst1))
input_list = np.asarray(input_list)
output_list = np.asarray(output_list)
max_num_node = len(part_labels[object_name])
def pad_lists(list_input, shape = (max_num_node,)):
result = np.zeros(shape)
x_offset = 0
y_offset = 0
result[y_offset:list_input.shape[0]+y_offset] = list_input
return result
def get_label(l):
i = np.argmax(l)
if i != max_num_node:
return i
return -1
def change_style(inpute, outpute):
inps = [np.zeros(max_num_node)]*max_num_node
masks = [np.zeros((64,64,1))]*max_num_node
outs_bbx = [[0,0,0,0]]*max_num_node
for i, j in zip(inpute, outpute):
if get_label(i) != -1:
inps[get_label(i)] = pad_lists(i)
outs_bbx[get_label(i)] = j[1]
xm, ym, xx, yx = j[1]
imh = j[0][int(ym):int(yx)+1 , int(xm):int(xx)+1]
if imh.shape[0]>0 and imh.shape[1]>0:
resized_cropped = np.expand_dims(cv2.resize(imh, (64, 64)), axis = 2 )
masks[get_label(i)] = resized_cropped
return inps, outs_bbx, masks
# In[81]:
def lister_bister(input_object, output_object):
il = []
olx = []
olmask = []
oledges = []
olmaps = []
for i, j in zip(input_object, output_object):
ci, cj, ck = change_style(i, j)
il.append(ci)
olx.append(cj)
olmask.append(ck)
return il, olx, olmask
# In[83]:
def dataset_maker( input_object, output_object):
l_object1 = []
label_object, bbx_object, masks_object = lister_bister(input_object, output_object)
label_object = np.asarray(label_object, dtype = 'float32')
bbx_object = np.asarray(bbx_object, dtype = 'float32')
masks_object = np.asarray(masks_object, dtype = 'float32')
return label_object, bbx_object, masks_object
# In[84]:
label, bbx, masks = dataset_maker(input_list, output_list)
print("constructing pickles...")
import pickle
with open(object_name + '_part_separated_labels', 'wb') as f:
pickle.dump(label, f)
with open(object_name + '_part_separated_bbx', 'wb') as f:
pickle.dump(bbx, f)
with open(object_name + '_part_separated_masks', 'wb') as f:
pickle.dump(masks, f)
with open(object_name + '_info', 'wb') as f:
pickle.dump(info_dic, f)
print("pickles constructed")
print("output files are", object_name, "labels, bbx and masks with shape", label.shape, (bbx).shape, (masks).shape, "and info dict with len", len(info_dic))
print("done")
|
the-stack_0_24341 | """Partially observed variant of the CartPole gym environment.
https://github.com/openai/gym/blob/master/gym/envs/classic_control/cartpole.py
We delete the velocity component of the state, so that it can only be solved
by a LSTM policy."""
import argparse
import math
import gym
from gym import spaces
from gym.utils import seeding
import numpy as np
parser = argparse.ArgumentParser()
parser.add_argument("--stop", type=int, default=200)
parser.add_argument("--use-prev-action-reward", action="store_true")
parser.add_argument("--run", type=str, default="PPO")
class CartPoleStatelessEnv(gym.Env):
metadata = {
'render.modes': ['human', 'rgb_array'],
'video.frames_per_second': 60
}
def __init__(self):
self.gravity = 9.8
self.masscart = 1.0
self.masspole = 0.1
self.total_mass = (self.masspole + self.masscart)
self.length = 0.5 # actually half the pole's length
self.polemass_length = (self.masspole * self.length)
self.force_mag = 10.0
self.tau = 0.02 # seconds between state updates
# Angle at which to fail the episode
self.theta_threshold_radians = 12 * 2 * math.pi / 360
self.x_threshold = 2.4
high = np.array([
self.x_threshold * 2,
self.theta_threshold_radians * 2,
])
self.action_space = spaces.Discrete(2)
self.observation_space = spaces.Box(-high, high)
self.seed()
self.viewer = None
self.state = None
self.steps_beyond_done = None
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def step(self, action):
assert self.action_space.contains(
action), "%r (%s) invalid" % (action, type(action))
state = self.state
x, x_dot, theta, theta_dot = state
force = self.force_mag if action == 1 else -self.force_mag
costheta = math.cos(theta)
sintheta = math.sin(theta)
temp = (force + self.polemass_length * theta_dot * theta_dot * sintheta
) / self.total_mass
thetaacc = (self.gravity * sintheta - costheta * temp) / (
self.length *
(4.0 / 3.0 - self.masspole * costheta * costheta / self.total_mass)
)
xacc = (temp -
self.polemass_length * thetaacc * costheta / self.total_mass)
x = x + self.tau * x_dot
x_dot = x_dot + self.tau * xacc
theta = theta + self.tau * theta_dot
theta_dot = theta_dot + self.tau * thetaacc
self.state = (x, x_dot, theta, theta_dot)
done = (x < -self.x_threshold or x > self.x_threshold
or theta < -self.theta_threshold_radians
or theta > self.theta_threshold_radians)
done = bool(done)
if not done:
reward = 1.0
elif self.steps_beyond_done is None:
# Pole just fell!
self.steps_beyond_done = 0
reward = 1.0
else:
self.steps_beyond_done += 1
reward = 0.0
rv = np.r_[self.state[0], self.state[2]]
return rv, reward, done, {}
def reset(self):
self.state = self.np_random.uniform(low=-0.05, high=0.05, size=(4, ))
self.steps_beyond_done = None
rv = np.r_[self.state[0], self.state[2]]
return rv
def render(self, mode='human'):
screen_width = 600
screen_height = 400
world_width = self.x_threshold * 2
scale = screen_width / world_width
carty = 100 # TOP OF CART
polewidth = 10.0
polelen = scale * 1.0
cartwidth = 50.0
cartheight = 30.0
if self.viewer is None:
from gym.envs.classic_control import rendering
self.viewer = rendering.Viewer(screen_width, screen_height)
l, r, t, b = (-cartwidth / 2, cartwidth / 2, cartheight / 2,
-cartheight / 2)
axleoffset = cartheight / 4.0
cart = rendering.FilledPolygon([(l, b), (l, t), (r, t), (r, b)])
self.carttrans = rendering.Transform()
cart.add_attr(self.carttrans)
self.viewer.add_geom(cart)
l, r, t, b = (-polewidth / 2, polewidth / 2,
polelen - polewidth / 2, -polewidth / 2)
pole = rendering.FilledPolygon([(l, b), (l, t), (r, t), (r, b)])
pole.set_color(.8, .6, .4)
self.poletrans = rendering.Transform(translation=(0, axleoffset))
pole.add_attr(self.poletrans)
pole.add_attr(self.carttrans)
self.viewer.add_geom(pole)
self.axle = rendering.make_circle(polewidth / 2)
self.axle.add_attr(self.poletrans)
self.axle.add_attr(self.carttrans)
self.axle.set_color(.5, .5, .8)
self.viewer.add_geom(self.axle)
self.track = rendering.Line((0, carty), (screen_width, carty))
self.track.set_color(0, 0, 0)
self.viewer.add_geom(self.track)
if self.state is None:
return None
x = self.state
cartx = x[0] * scale + screen_width / 2.0 # MIDDLE OF CART
self.carttrans.set_translation(cartx, carty)
self.poletrans.set_rotation(-x[2])
return self.viewer.render(return_rgb_array=mode == 'rgb_array')
def close(self):
if self.viewer:
self.viewer.close()
if __name__ == "__main__":
import ray
from ray import tune
args = parser.parse_args()
tune.register_env("cartpole_stateless", lambda _: CartPoleStatelessEnv())
ray.init()
configs = {
"PPO": {
"num_sgd_iter": 5,
"vf_share_layers": True,
"vf_loss_coeff": 0.0001,
},
"IMPALA": {
"num_workers": 2,
"num_gpus": 0,
"vf_loss_coeff": 0.01,
},
}
tune.run_experiments({
"test": {
"env": "cartpole_stateless",
"run": args.run,
"stop": {
"episode_reward_mean": args.stop
},
"config": dict(
configs[args.run], **{
"model": {
"use_lstm": True,
"lstm_use_prev_action_reward": args.
use_prev_action_reward,
},
}),
}
})
|
the-stack_0_24343 | import numpy as np
# np.random.seed(0)
def sigmoid(x):
return 1 / (1 + np.exp(-x))
def sigmoid_derivative(x):
return x * (1 - x)
# Input datasets
inputs = np.array([[0, 0], [0, 1], [1, 0], [1, 1]])
expected_output = np.array([[0], [1], [1], [0]])
epochs = 1
lr = 0.1
inputLayerNeurons, hiddenLayerNeurons, outputLayerNeurons = 2, 2, 1
# Random weights and bias initialization
hidden_weights = np.random.uniform(size=(inputLayerNeurons, hiddenLayerNeurons))
hidden_bias = np.random.uniform(size=(1, hiddenLayerNeurons))
output_weights = np.random.uniform(size=(hiddenLayerNeurons, outputLayerNeurons))
output_bias = np.random.uniform(size=(1, outputLayerNeurons))
hidden_weights = np.array([[0.8, 0.4], [0.1, 0.05]])
hidden_bias = np.array([[0.2, 0.6]])
output_weights = np.array([[0.35], [0.21]])
output_bias = np.array([[0.7]])
# print("Initial hidden weights: ", end='')
# print(*hidden_weights)
# print("Initial hidden biases: ", end='')
# print(*hidden_bias)
# print("Initial output weights: ", end='')
# print(*output_weights)
# print("Initial output biases: ", end='')
# print(*output_bias)
# Training algorithm
for _ in range(epochs):
# Forward Propagation
hidden_layer_activation = np.dot(inputs, hidden_weights) # correct on mine
hidden_layer_activation += hidden_bias # correct on mine
hidden_layer_output = sigmoid(hidden_layer_activation) # correct on mine
output_layer_activation = np.dot(hidden_layer_output, output_weights) # correct on mine
output_layer_activation += output_bias # correct on mine
predicted_output = sigmoid(output_layer_activation) # correct on mine
# Backpropagation
error = expected_output - predicted_output # correct on mine
d_predicted_output = error * sigmoid_derivative(predicted_output) # correct on mine = node.cost_value_der
error_hidden_layer = d_predicted_output.dot(output_weights.T)
print(f"output weights = {output_weights.T}")
print(f"error_hidden_layer = {error_hidden_layer}")
d_hidden_layer = error_hidden_layer * sigmoid_derivative(hidden_layer_output)
print(f"d_hidden_layer = {d_hidden_layer}")
print(f"outputs bias changes - {np.sum(d_predicted_output, axis=0, keepdims=True)}")
print(f"hidden bias changes - {np.sum(d_hidden_layer, axis=0, keepdims=True)}")
print(f"output weights changes - {hidden_layer_output.T.dot(d_predicted_output)}")
print(f"hidden weights changes - {inputs.T.dot(d_hidden_layer)}")
# Updating Weights and Biases
output_weights += hidden_layer_output.T.dot(d_predicted_output) * lr
output_bias += np.sum(d_predicted_output, axis=0, keepdims=True) * lr
hidden_weights += inputs.T.dot(d_hidden_layer) * lr
hidden_bias += np.sum(d_hidden_layer, axis=0, keepdims=True) * lr
# print("Final hidden weights: ", end='')
# print(*hidden_weights)
# print("Final hidden bias: ", end='')
# print(*hidden_bias)
# print("Final output weights: ", end='')
# print(*output_weights)
# print("Final output bias: ", end='')
# print(*output_bias)
print("\nOutput from neural network after 10,000 epochs: ", end='')
print(*predicted_output)
|
the-stack_0_24346 | def fatorial(num=1):
f = 1
for c in range(num, 1, -1):
f *= c
return f
n = int(input('Digite um numero: '))
print(f'O fatorial de {n}: {fatorial(n)}')
|
the-stack_0_24351 | """Add annotation timer column to links
Revision ID: a2443af2c65e
Revises: 79dcf9aa4f15
Create Date: 2018-04-04 14:03:25.622267
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'a2443af2c65e'
down_revision = '79dcf9aa4f15'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('article_papers', sa.Column('annotation_time', sa.Integer(), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('article_papers', 'annotation_time')
# ### end Alembic commands ###
|
the-stack_0_24354 | import os
import queue
import re
import time
import tkinter as tk
import tkinter.filedialog as tkfd
import tkinter.simpledialog as tksd
import matplotlib as mpl
mpl.rcParams['pdf.fonttype'] = 42 # Edit plots with Illustrator
from matplotlib.figure import Figure
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
from matplotlib.backend_bases import MouseButton
import numpy as np
from . import const
from .events import Event
from .sessionopener_tk import SessionOpener
from ..stack import metastack as ms
from ..stack import types as ty
from ..stackviewer_tk import StackViewer
from .status import DummyStatus
from .view import SessionView
KEYS_NEXT_CELL = {'Down', 'KP_Down'}
KEYS_PREV_CELL = {'Up', 'KP_Up'}
KEYS_HIGHLIGHT_CELL = {'Return', 'KP_Enter'}
KEYS_SHOW_CONTOURS = {'Insert', 'KP_Insert'}
KEYS_CHANNEL = {fmt.format(sym) for fmt in ('{}', 'KP_{}') for sym in range(1, 10)}
KEYS_NEXT_FRAME = {'Right', 'KP_Right'}
KEYS_PREV_FRAME = {'Left', 'KP_Left'}
KEYS_FIRST_FRAME = {'Home', 'KP_Home'}
KEYS_LAST_FRAME = {'End', 'KP_End'}
FRAME_SCROLL_RATE_MAX = 8e9
QUEUE_POLL_INTERVAL = 10
# tkinter event state constants for key presses
# see: https://web.archive.org/web/20181009085916/http://infohost.nmt.edu/tcc/help/pubs/tkinter/web/event-handlers.html
EVENT_STATE_SHIFT = 1
EVENT_STATE_CTRL = 4
MODE_SELECTION = 'selection'
MODE_HIGHLIGHT = 'highlight'
TOOL_LABEL_BINARIZE = "Binarize…"
DESELECTED_DARKEN_FACTOR = .3
MIC_RES = {
# Resolutions are given in µm/px
# See: https://collab.lmu.de/x/9QGFAw
"Nikon (4x)": 1.61,
"Nikon (10x PhC)": .649,
"Nikon (20x)": .327,
"Nikon TIRF (4x)": 1.621,
"Nikon TIRF (10x PhC)": .658,
"Nikon TIRF (20x)": .333,
"Nikon TIRF (60x)": .108,
"Zeiss (10x PhC)": .647,
"Zeiss (20x)": .312,
"Zeiss (40x)": .207,
"UNikon (4x)": 1.618,
"UNikon (10x PhC)": .655,
"UNikon (10x)": .650,
"UNikon (20x)": .331,
"UNikon (40x)": .163,
"UNikon (60x)": .108,
"UNikon (100x)": .065,
"Cell culture (5x)": .81,
"Cell culture (10x PhC)":.42,
"Cell culture (20x)": .21,
}
MIC_RES_UNSPEC = "Unspecified (use [px])"
MIC_RES_CUSTOM = "Custom"
MIC_RES_UNSPEC_IDX = 1
MIC_RES_CUSTOM_IDX = 2
class SessionView_Tk(SessionView):
def __init__(self, title, control_queue, status):
self.root = tk.Tk()
self.root.title(title)
self.root.geometry('1300x600')
self.root.grid_rowconfigure(0, weight=1)
self.root.grid_columnconfigure(0, weight=1)
# Initialize variables
self.queue = queue.Queue()
self.control_queue = control_queue
self.var_statusmsg = tk.StringVar(value="Initializing")
self.status = status
self.status_id = self.status.register_viewer(self.update_status, self.queue)
self.session = None
self._session_opener = None
self.cmd_map = {
const.CMD_SET_SESSION: self.set_session,
const.CMD_UPDATE_TRACES: self.update_traces,
}
self.display_stack = None
self.channel_selection = {}
self.channel_order = []
self.frame_indicators = []
#self.traces = None #replace by self.session.traces
#self.trace_info = None # replace by self.session.trace_info
#self.rois = None # replace by self.session.rois
self.fig = None
self.fig_widget = None
self.save_dir = None
self.last_frame_scroll = Event.now()
self.var_show_frame_indicator = tk.BooleanVar(value=True)
self.var_mode = tk.StringVar(value=MODE_HIGHLIGHT)
self.var_darken_deselected = tk.BooleanVar(value=False)
self.var_show_roi_contours = tk.BooleanVar(value=True)
self.var_show_roi_names = tk.BooleanVar(value=True)
self.var_show_untrackable = tk.BooleanVar(value=False)
self.var_microscope_res = tk.StringVar(value=MIC_RES_UNSPEC)
# Build menu
menubar = tk.Menu(self.root)
self.root.config(menu=menubar)
filemenu = tk.Menu(menubar)
menubar.add_cascade(label="File", menu=filemenu)
filemenu.add_command(label="Open stack…", command=self.open_stack)
filemenu.add_command(label="Open session", command=self.open_session)
filemenu.add_command(label="Save", command=self.save)
filemenu.add_command(label="Set output directory…", command=self._get_savedir)
filemenu.add_command(label="Quit", command=self.root.quit)
modemenu = tk.Menu(menubar)
menubar.add_cascade(label="Mode", menu=modemenu)
modemenu.add_radiobutton(label="Highlight", value=MODE_HIGHLIGHT, variable=self.var_mode)
modemenu.add_radiobutton(label="Selection", value=MODE_SELECTION, variable=self.var_mode)
self.toolmenu = tk.Menu(menubar)
menubar.add_cascade(label="Tools", menu=self.toolmenu)
self.toolmenu.add_command(label=TOOL_LABEL_BINARIZE, command=self.binarize, state=tk.DISABLED)
self.toolmenu.add_command(label="Pickle maximum bounding box", command=self._pickle_max_bbox)
self.toolmenu.add_command(label="Background correction…", command=self._background_correction)
settmenu = tk.Menu(menubar)
menubar.add_cascade(label="Settings", menu=settmenu)
settmenu.add_checkbutton(label="Display frame indicator", variable=self.var_show_frame_indicator)
settmenu.add_checkbutton(label="Display cell contours", variable=self.var_show_roi_contours)
settmenu.add_checkbutton(label="Display cell labels", variable=self.var_show_roi_names)
settmenu.add_checkbutton(label="Display untracked cells", variable=self.var_show_untrackable)
settmenu.add_checkbutton(label="Darken deselected cells", variable=self.var_darken_deselected)
self.micresmenu = tk.Menu(settmenu)
settmenu.add_cascade(label="Microscope resolution", menu=self.micresmenu)
for mic_opt in MIC_RES.keys():
self._add_to_microscope_menu(mic_opt)
MIC_RES[MIC_RES_UNSPEC] = None
MIC_RES[MIC_RES_CUSTOM] = None
self.micresmenu.insert(MIC_RES_UNSPEC_IDX,
'radiobutton',
label=MIC_RES_UNSPEC,
value=MIC_RES_UNSPEC,
variable=self.var_microscope_res,
command=lambda mo=MIC_RES_UNSPEC: self._change_microscope_resolution(mo)
)
self.micresmenu.insert(MIC_RES_CUSTOM_IDX,
'radiobutton',
label=MIC_RES_CUSTOM,
value=MIC_RES_CUSTOM,
variable=self.var_microscope_res,
command=lambda mo=MIC_RES_CUSTOM: self._change_microscope_resolution(mo)
)
helpmenu = tk.Menu(menubar)
menubar.add_cascade(label="Help", menu=helpmenu)
helpmenu.add_command(label="Breakpoint", command=self._breakpoint)
helpmenu.add_command(label="Sleep 10s", command=self.sleep10)
# Window structure
self.paned = tk.PanedWindow(self.root, orient=tk.HORIZONTAL, sashwidth=2, sashrelief=tk.RAISED)
self.paned.grid(row=0, column=0, sticky='NESW')
## Channels frame
self.chanframe = tk.Frame(self.paned)
self.paned.add(self.chanframe, sticky='NESW', width=150)
self.chanframe.grid_columnconfigure(0, weight=1)
self.open_btn = tk.Button(self.chanframe, text="Open stack...", command=self.open_stack)
self.open_btn.grid(row=0, column=0, sticky='NEW', padx=10, pady=5)
self.chansellbl = tk.Label(self.chanframe, text="Display channels", anchor=tk.W, state=tk.DISABLED)
self.chansellbl.grid(row=1, column=0, sticky='NESW', padx=10, pady=(20, 5))
self.chanselframe = tk.Frame(self.chanframe)
self.chanselframe.grid(row=2, column=0, sticky='ESW')
self.plotsellbl = tk.Label(self.chanframe, text="Plot traces", anchor=tk.W, state=tk.DISABLED)
self.plotsellbl.grid(row=3, column=0, sticky='ESW', padx=10, pady=(20, 5))
self.plotselframe = tk.Frame(self.chanframe)
self.plotselframe.grid(row=4, column=0, sticky='ESW')
## Stack frame
self.stackframe = tk.Frame(self.paned)
self.paned.add(self.stackframe, sticky='NESW', width=650)
self.stackviewer = StackViewer(parent=self.stackframe, root=self.root, show_buttons='contrast')
## Figure frame
self.figframe = tk.Frame(self.paned)
self.paned.add(self.figframe, sticky='NESW', width=500)
self.create_figure()
## Statusbar
self.statusbar = tk.Frame(self.root, padx=2, pady=2, bd=1, relief=tk.SUNKEN)
self.statusbar.grid(row=1, column=0, sticky='NESW')
tk.Label(self.statusbar, anchor=tk.W, textvariable=self.var_statusmsg).pack(side=tk.LEFT, anchor=tk.W)
# Callbacks
self.var_show_frame_indicator.trace_add('write', self._update_frame_indicator)
self.var_darken_deselected.trace_add('write', lambda *_: self.display_stack._listeners.notify('image'))
self.var_show_roi_contours.trace_add('write', self._update_show_roi_contours)
self.var_show_roi_names.trace_add('write', self._update_show_roi_names)
self.var_show_untrackable.trace_add('write', self._update_show_untrackable)
self.stackframe.bind('<Configure>', self._stacksize_changed)
self.stackviewer.register_roi_click(self._roi_clicked)
## Set global key bindings for display and cell selection
# Some key symbols for the keypad (KP_*) may not be available in all systems.
bindings = ((KEYS_NEXT_CELL | KEYS_PREV_CELL | KEYS_HIGHLIGHT_CELL, self._key_highlight_cell),
(KEYS_SHOW_CONTOURS, lambda _:
self.var_show_roi_contours.set(not self.var_show_roi_contours.get())),
(KEYS_NEXT_FRAME | KEYS_PREV_FRAME, self._key_scroll_frames),
(KEYS_CHANNEL, self._key_change_channel),
(KEYS_FIRST_FRAME | KEYS_LAST_FRAME, self._key_jump_frames),
)
for keysyms, callback in bindings:
for keysym in keysyms:
if len(keysym) > 1:
keysym = f"<{keysym}>"
try:
self.root.bind(keysym, callback)
except Exception:
if not (os.name == 'nt' and re.fullmatch(r'<KP_\D.*>', keysym)):
# Cleaner start-up on Windows
# (the <KP_\D.*> keysyms are not available in Windows)
print(f"Failed to register keysym '{keysym}'")
def mainloop(self):
self.root.after(QUEUE_POLL_INTERVAL, self.poll_event_queue)
self.root.mainloop()
self.root.quit()
def _breakpoint(self):
"""Enter a breakpoint for DEBUGging"""
breakpoint()
def sleep10(self):
"""Sleep 10 seconds for DEBUGging"""
import threading
def sleep(self=self, t_max=10):
t = 0
while t < t_max:
with self.status("Sleeping", current=t, total=t_max):
time.sleep(1)
t += 1
threading.Thread(target=sleep).start()
def update_status(self, msg="", current=None, total=None):
"""Update the status shown in the status bar"""
if current is None:
status = msg
elif total is None:
status = f"{msg} {current}"
else:
status = f"{msg} {current}/{total}"
self.var_statusmsg.set(status)
self.root.update_idletasks()
def create_figure(self):
"""Show an empty figure"""
self.fig = Figure()
mpl_canvas = FigureCanvasTkAgg(self.fig, master=self.figframe)
self.fig.canvas.mpl_connect('pick_event', self._line_picker)
mpl_canvas.draw()
self.fig_widget = mpl_canvas.get_tk_widget()
self.fig_widget.pack(fill=tk.BOTH, expand=True)
def poll_event_queue(self):
"""Poll event queue"""
while True:
try:
evt = self.queue.get_nowait()
except queue.Empty:
break
if evt.fun is not None:
evt()
continue
try:
cmd = self.cmd_map[evt.cmd]
except KeyError:
pass
else:
evt(cmd)
continue
try:
cmd = self.session_opener.cmd_map[evt.cmd]
except (KeyError, AttributeError):
pass
else:
evt(cmd)
continue
raise ValueError(f"Unknown command: '{evt.cmd}'")
self.root.after(QUEUE_POLL_INTERVAL, self.poll_event_queue)
@property
def session_opener(self):
"""Return an active SessionOpener_Tk or None"""
if self._session_opener is not None and not self._session_opener.active:
self._session_opener = None
return self._session_opener
def open_stack(self):
"""Ask user to open new stack"""
if self.session_opener is None:
self._session_opener = SessionOpener(self.root, control_queue=self.control_queue, status=self.status)
else:
self.session_opener.to_front()
def set_session(self, session=None):
"""Set a SessionModel instance for display"""
#TODO: Allow multiple types of PhC and Segmentation
self.session = session
if self.session is None:
self.display_stack = None
pass
else:
with self.status("Loading stack …"):
self.display_stack = self.session.display_stack
# Create channel display buttons
self.channel_order.clear()
for k, x in tuple(self.channel_selection.items()):
x['button'].destroy()
del self.channel_selection[k]
idx_phasecontrast = None
idx_fluorescence = []
idx_segmentation = None
for i, spec in enumerate(self.session.stack.channels):
if spec.type == ty.TYPE_PHASECONTRAST and not idx_phasecontrast:
idx_phasecontrast = i
elif spec.type == ty.TYPE_FLUORESCENCE:
idx_fluorescence.append(i)
elif spec.type == ty.TYPE_SEGMENTATION and not idx_segmentation:
idx_segmentation = i
else:
continue
x = {}
self.channel_selection[i] = x
x['type'] = spec.type
x['val'] = False
btntxt = []
if spec.label:
btntxt.append(spec.label)
if spec.type == ty.TYPE_FLUORESCENCE:
btntxt.append("{} {}".format(spec.type, len(idx_fluorescence)))
else:
btntxt.append(spec.type)
btntxt = "\n".join(btntxt)
x['button'] = tk.Button(self.chanselframe, justify=tk.LEFT, text=btntxt)
x['button'].bind('<ButtonPress-1><ButtonRelease-1>', self._build_chanselbtn_callback(i))
# Display channel display buttons
self.chansellbl.config(state=tk.NORMAL)
if idx_phasecontrast is not None:
self.channel_order.append(idx_phasecontrast)
self.channel_selection[idx_phasecontrast]['button'].pack(anchor=tk.N,
expand=True, fill=tk.X, padx=10, pady=5)
for i in idx_fluorescence:
self.channel_order.append(i)
self.channel_selection[i]['button'].pack(anchor=tk.N,
expand=True, fill=tk.X, padx=10, pady=5)
if idx_segmentation is not None:
self.channel_order.append(idx_segmentation)
self.channel_selection[idx_segmentation]['button'].pack(anchor=tk.N,
expand=True, fill=tk.X, padx=10, pady=5)
# Update tools menu
if idx_phasecontrast is None:
new_state = tk.DISABLED
else:
new_state = tk.NORMAL
self.toolmenu.entryconfig(TOOL_LABEL_BINARIZE, state=new_state)
# Initial channel selection and display
self._change_channel_selection()
self.update_roi_display(notify_listeners=False)
self.stackviewer.set_stack(self.display_stack, wait=False)
def save(self):
"""Save data to files"""
if not self.save_dir:
self._get_savedir()
#TODO: in new thread
Event.fire(self.control_queue, const.CMD_SAVE_SESSION_TO_DISK, self.session, self.save_dir, status=self.status)
def _get_savedir(self):
"""Ask user for output directory"""
options = {'mustexist': False,
'parent': self.root,
'title': "Choose output directory",
}
if self.save_dir:
options['initialdir'] = self.save_dir
new_savedir = tkfd.askdirectory(**options)
if new_savedir:
if not os.path.exists(new_savedir):
os.makedirs(new_savedir)
elif not os.path.isdir(new_savedir):
#TODO: show GUI dialog
raise NotADirectoryError("Not a directory: '{}'".format(new_savedir))
self.save_dir = new_savedir
elif not new_savedir:
raise ValueError("No save directory given")
elif not os.path.isdir(self.save_dir):
raise NotADirectoryError("Not a directory: '{}'".format(self.save_dir))
def _stacksize_changed(self, evt):
"""Update stackviewer after stack size change"""
self.stackviewer._change_stack_position(force=True)
def _key_highlight_cell(self, evt):
"""Callback for highlighting cells by arrow keys
Up/down arrows highlight cells,
Enter toggles cell selection.
"""
if not self.session or not self.session.traces:
return
cells_sorted = self.session.traces_sorted(self.stackviewer.i_frame)
cells_highlight = list(cells_sorted.index(name) for name, tr in self.session.traces.items() if tr['highlight'])
is_selection_updated = False
if evt.keysym in KEYS_PREV_CELL:
# Highlight previous cell
for i in cells_highlight:
self.highlight_trace(cells_sorted[i], val=False)
if cells_highlight:
new_highlight = cells_highlight[0] - 1
if new_highlight < 0:
new_highlight = cells_sorted[-1]
else:
new_highlight = cells_sorted[new_highlight]
else:
new_highlight = cells_sorted[-1]
self.highlight_trace(new_highlight, val=True)
self.update_highlight()
elif evt.keysym in KEYS_NEXT_CELL:
# Highlight next cell
for i in cells_highlight:
self.highlight_trace(cells_sorted[i], val=False)
if cells_highlight:
new_highlight = cells_highlight[-1] + 1
if new_highlight >= len(cells_sorted):
new_highlight = cells_sorted[0]
else:
new_highlight = cells_sorted[new_highlight]
else:
new_highlight = cells_sorted[0]
self.highlight_trace(new_highlight, val=True)
self.update_highlight()
elif evt.keysym in KEYS_HIGHLIGHT_CELL:
# Toggle cell selection
for i in cells_highlight:
self.select_trace(cells_sorted[i])
self.update_selection()
def _key_scroll_frames(self, evt):
"""Callback for scrolling through channels"""
if evt.keysym in KEYS_NEXT_FRAME:
if evt.state & EVENT_STATE_CTRL:
cmd = 'up10'
else:
cmd = 'up'
elif evt.keysym in KEYS_PREV_FRAME:
if evt.state & EVENT_STATE_CTRL:
cmd = 'down10'
else:
cmd = 'down'
else:
return
clock = Event.now()
if clock - self.last_frame_scroll < 1 / FRAME_SCROLL_RATE_MAX:
return
self.last_frame_scroll = clock
self.stackviewer._i_frame_step(cmd)
def _key_jump_frames(self, evt):
"""Callback for jumping to first or last frame"""
if evt.keysym in KEYS_FIRST_FRAME:
i_frame = 0
elif evt.keysym in KEYS_LAST_FRAME:
i_frame = -1
else:
return
self.last_frame_scroll = Event.now()
self.stackviewer.i_frame_jump(i_frame)
def _key_change_channel(self, evt):
"""Callback for displaying channels"""
if not self.channel_order:
return
try:
new_chan = int(evt.keysym[-1]) - 1
new_chan = self.channel_order[new_chan]
except Exception:
return
self._change_channel_selection(new_chan)
def _build_chanselbtn_callback(self, i):
"""Build callback for channel selection button.
`i` is the key of the corresponding item in `self.channel_selection`.
The returned callback will, by default, select the channel with key `i`
and deselect all other buttons. However, if the control key is pressed
simultaneously with the click, the selection of channel `i` is toggled.
"""
def callback(event):
nonlocal self, i
self._change_channel_selection(i, toggle=bool(event.state & EVENT_STATE_CTRL), default=i)
return callback
def _change_channel_selection(self, *channels, toggle=False, default=None):
"""Select channels for display.
`channels` holds the specified channels (indices to `self.channel_selection`).
If `toggle`, the selections of the channels in `channels` are toggled.
If not `toggle`, the channels in `channels` are selected and all others are deselected.
If `default` is defined, it must be an index to `self.channel_selection`.
The channel corresponding to `default` is selected if no other channel would
be displayed after executing this function.
"""
has_selected = False
if not channels:
pass
elif toggle:
for i in channels:
ch = self.channel_selection[i]
ch['val'] ^= True
has_selected = ch['val']
else:
for i, ch in self.channel_selection.items():
if i in channels:
ch['val'] = True
has_selected = True
else:
ch['val'] = False
if not has_selected and \
not any(ch['val'] for ch in self.channel_selection.values()):
if default is None:
default = 0
ch = self.channel_selection[self.channel_order[default]]
ch['val'] = True
self.display_stack._listeners.notify('image')
self.root.after_idle(self._update_channel_selection_button_states)
def _update_channel_selection_button_states(self):
"""Helper function
Called by `_change_channel_selection` after all GUI updates
are processed. Necessary because otherwise, changes would be
overwritten by ButtonRelease event.
"""
for ch in self.channel_selection.values():
ch['button'].config(relief=(tk.SUNKEN if ch['val'] else tk.RAISED))
def make_display_render_function(self, stack, render_segmentation):
"""Factory function for display rendering function.
stack -- metastack of session instance
render_segmentation -- function for rendering binary segmentation image
"""
def render_display(meta, frame, scale=None):
"""Dynamically create display image.
This method is to be called by `MetaStack.get_image`
within the GUI thread.
Arguments:
meta -- the calling `MetaStack` instance; ignored
frame -- the index of the selected frame
scale -- scaling information; ignored
"""
nonlocal self, stack, render_segmentation
#TODO histogram-based contrast adjustment
# Find channel to display
channels = []
for i in sorted(self.channel_selection.keys()):
if self.channel_selection[i]['val']:
channels.append(i)
if not channels:
channels.append(0)
# Update frame indicator
self.root.after_idle(self._update_frame_indicator)
# Get image scale
self.root.update_idletasks()
display_width = self.stackframe.winfo_width()
if self.display_stack.width != display_width:
scale = display_width / stack.width
else:
scale = self.display_stack.width / stack.width
# Convert image to uint8
imgs = []
seg_img = None
for i in channels:
img = stack.get_image(channel=i, frame=frame, scale=scale)
if stack.spec(i).type != ty.TYPE_SEGMENTATION:
if self.var_darken_deselected.get():
# Darken deselected and untracked cells
if seg_img is None:
seg_img = render_segmentation(stack, frame,
rois=False, binary=True)
seg_img = ms.MetaStack.scale_img(seg_img, scale=scale)
bkgd = img[seg_img < .5].mean()
img = seg_img * (const.DESELECTED_DARKEN_FACTOR * img \
+ (1 - const.DESELECTED_DARKEN_FACTOR) * bkgd) \
+ (1 - seg_img) * img
img_min, img_max = img.min(), img.max()
img = ((img - img_min) * (255 / (img_max - img_min)))
imgs.append(img)
if len(imgs) > 1:
img = np.mean(imgs, axis=0)
else:
img = imgs[0]
img_min, img_max = img.min(), img.max()
img = ((img - img_min) * (255 / (img_max - img_min))).astype(np.uint8)
return img
return render_display
def update_traces(self):
self._update_traces_display_buttons()
self._update_microscope_resolution()
self.plot_traces()
def _update_traces_display_buttons(self):
"""Redraw buttons for selecting which quantities to plot"""
self.plotsellbl.config(state=tk.NORMAL)
for child in self.plotselframe.winfo_children():
child.pack_forget()
for name, info in sorted(self.session.trace_info.items(), key=lambda x: x[1]['order']):
if info['button'] is None:
if info['label']:
btn_txt = f"{name}\n{info['label']}"
else:
btn_txt = name
info['button'] = tk.Checkbutton(self.plotselframe, text=btn_txt,
justify=tk.LEFT, indicatoron=False,
command=lambda btn=name: self._update_traces_display(button=btn))
info['var'] = tk.BooleanVar(info['button'], value=info['plot'])
info['button'].config(variable=info['var'])
info['button'].pack(anchor=tk.S, expand=True, fill=tk.X, padx=10, pady=5)
def _update_traces_display(self, button=None):
"""Update plot after changing quantities to plot"""
if button is not None:
info = self.session.trace_info[button]
info['plot'] = info['var'].get()
else:
for info in self.session.trace_info.values():
info['var'].set(info['plot'])
if not any(info['plot'] for info in self.session.trace_info.values()):
if button is not None:
info = self.session.trace_info[button]
info['plot'] ^= True
info['var'].set(info['plot'])
else:
for info in self.session.trace_info.values():
info['plot'] = True
info['var'].get(True)
self.plot_traces()
def plot_traces(self):
"""Plots the traces to the main window"""
self.frame_indicators.clear()
self.fig.clear()
self.session.plot_traces(self.fig, is_interactive=True,
frame_indicator_list=self.frame_indicators, status=self.status)
self._update_frame_indicator(draw=False)
self.fig.tight_layout(pad=.3)
self.fig.canvas.draw()
def _update_frame_indicator(self, *_, t=None, fr=None, draw=True):
"""Update display of vertical frame indicator in plot"""
if self.var_show_frame_indicator.get():
if t is None:
if fr is None:
fr = self.stackviewer.i_frame
t = self.session.to_hours(fr)
else:
t = np.NaN
for indicator in self.frame_indicators:
indicator.set_xdata([t, t])
if draw:
self.fig.canvas.draw()
def _line_picker(self, event):
"""Callback for clicking on line in plot"""
if not event.mouseevent.button == MouseButton.LEFT:
return
i = event.artist.get_label()
self.highlight_trace(i)
self.update_highlight()
def highlight_trace(self, *trace, val=None, update_select=False):
"""Change highlight state of one or more traces.
`trace` must be valid keys to `self.session.traces`.
`val` specifies whether to highlight (True) the
traces or not (False) or to toggle (None) highlighting.
If `update_select` is True, a non-selected cell is
selected before highlighting it; else, highlighting
is ignored.
This method does not update display.
To update display, call `self.update_highlight`.
If `update_select` is True, a return value of True
indicates that a cell selection has changed. In this case,
the user is responsible to call `self.update_selection`.
"""
is_selection_updated = False
if len(trace) > 1:
for tr in trace:
ret = self.highlight_trace(tr, val=val, update_select=update_select)
if update_select and ret:
is_selection_updated = True
return is_selection_updated
else:
trace = trace[0]
tr = self.session.traces[trace]
if val is None:
val = not tr['highlight']
elif val == tr['highlight']:
return
if not tr['select'] and val and update_select:
self.select_trace(trace, val=True)
is_selection_updated = True
tr['highlight'] = val
if val:
if tr['select']:
for plots in tr['plot'].values():
for plot in plots:
plot.set_color(const.PLOT_COLOR_HIGHLIGHT)
plot.set_lw(const.PLOT_WIDTH_HIGHLIGHT)
plot.set_alpha(const.PLOT_ALPHA_HIGHLIGHT)
for fr, roi in enumerate(tr['roi']):
self.session.rois[fr][roi].stroke_width = const.ROI_WIDTH_HIGHLIGHT
self.session.rois[fr][roi].color = const.ROI_COLOR_HIGHLIGHT
else:
for fr, roi in enumerate(tr['roi']):
self.session.rois[fr][roi].stroke_width = const.ROI_WIDTH_HIGHLIGHT
self.session.rois[fr][roi].color = const.ROI_COLOR_DESELECTED
else:
if tr['select']:
for plots in tr['plot'].values():
for plot in plots:
plot.set_color(const.PLOT_COLOR)
plot.set_lw(const.PLOT_WIDTH)
plot.set_alpha(const.PLOT_ALPHA)
for fr, roi in enumerate(tr['roi']):
self.session.rois[fr][roi].stroke_width = const.ROI_WIDTH
if tr['select']:
self.session.rois[fr][roi].color = const.ROI_COLOR_SELECTED
else:
self.session.rois[fr][roi].color = const.ROI_COLOR_DESELECTED
return is_selection_updated
def select_trace(self, *trace, val=None, update_highlight=False):
"""Change selection state of one or more traces.
`trace` must be valid keys to `self.traces`.
`val` specifies whether to select (True),
deselect (False) or toggle (None) the selection.
`update_highlight` specifies whether to remove
highlighting (True) when a cell is deselected.
This method does not update display.
To update display, call `self.update_selection`.
"""
if len(trace) > 1:
for tr in trace:
self.select_trace(tr, val=val)
return
else:
trace = trace[0]
tr = self.session.traces[trace]
if val is None:
val = not tr['select']
elif val == tr['select']:
return
tr['select'] = val
if val:
roi_color = const.ROI_COLOR_HIGHLIGHT if tr['highlight'] else const.ROI_COLOR_SELECTED
for fr, roi in enumerate(tr['roi']):
self.session.rois[fr][roi].color = roi_color
else:
if update_highlight:
self.highlight_trace(trace, val=False)
for fr, roi in enumerate(tr['roi']):
self.session.rois[fr][roi].color = const.ROI_COLOR_DESELECTED
def update_highlight(self):
"""Redraw relevant display portions after highlight changes.
Note: All tasks performed by `update_highlight` are also
included in `update_selection`. Running both methods at
the same time is not necessary.
"""
self.fig.canvas.draw()
self.display_stack._listeners.notify('roi')
def update_selection(self):
"""Read traces after selection changes and update display"""
self.plot_traces()
self.display_stack._listeners.notify('roi')
if self.var_darken_deselected.get():
self.display_stack._listeners.notify('image')
def update_roi_display(self, notify_listeners=True):
"""Update all ROIs.
This method updates all display properties of all ROIs.
"""
# Update untracked cells
show_contour = self.var_show_untrackable.get() and self.var_show_roi_contours.get()
for frame in self.session.rois:
for roi in frame.values():
if roi.name:
continue
roi.color = const.ROI_COLOR_UNTRACKABLE
roi.stroke_width = const.ROI_WIDTH
roi.visible = show_contour
# Update tracked cells
show_contour = self.var_show_roi_contours.get()
show_name = self.var_show_roi_names.get()
for trace in self.session.traces.values():
is_select = trace['select']
is_highlight = trace['highlight']
if not is_select:
color = const.ROI_COLOR_DESELECTED
elif is_highlight:
color = const.ROI_COLOR_HIGHLIGHT
else:
color = const.ROI_COLOR_SELECTED
if is_highlight:
width = const.ROI_WIDTH_HIGHLIGHT
else:
width = const.ROI_WIDTH
for ref, rois in zip(trace['roi'], self.session.rois):
roi = rois[ref]
roi.color = color
roi.visible = show_contour
roi.name_visible = show_name
roi.stroke_width = width
if notify_listeners:
self.display_stack._listeners.notify('roi')
def _roi_clicked(self, event, names):
"""Callback for click on ROI"""
if not names:
return
is_selection_updated = False
mode = self.var_mode.get()
if event.state & EVENT_STATE_SHIFT:
if mode == MODE_HIGHLIGHT:
mode = MODE_SELECTION
elif mode == MODE_SELECTION:
mode = MODE_HIGHLIGHT
if mode == MODE_HIGHLIGHT:
for name in names:
try:
is_selection_updated |= self.highlight_trace(name, update_select=True)
except KeyError:
continue
self.update_highlight()
elif mode == MODE_SELECTION:
for name in names:
try:
self.select_trace(name, update_highlight=True)
except KeyError:
continue
is_selection_updated = True
if is_selection_updated:
self.update_selection()
def _update_show_roi_contours(self, *_):
"""Update stackviewer after toggling ROI contour display"""
show_contours = self.var_show_roi_contours.get()
show_untrackable = show_contours and self.var_show_untrackable.get()
for rois in self.session.rois:
for roi in rois.values():
if roi.name:
roi.visible = show_contours
else:
roi.visible = show_untrackable
self.display_stack._listeners.notify('roi')
def _update_show_roi_names(self, *_):
"""Update stackviewer after toggling ROI name display"""
show_names = self.var_show_roi_names.get()
if show_names:
show_untrackable = self.var_show_untrackable.get()
else:
show_untrackable = False
for rois in self.session.rois:
for roi in rois.values():
if roi.name:
roi.name_visible = show_names
else:
roi.name_visible = show_untrackable
self.display_stack._listeners.notify('roi')
def _update_show_untrackable(self, *_):
"""Update stackviewer after toggling display of untrackable cells"""
show = self.var_show_untrackable.get() and self.var_show_roi_contours.get()
for rois in self.session.rois:
for roi in rois.values():
if not roi.name:
roi.visible = show
self.display_stack._listeners.notify('roi')
def _add_to_microscope_menu(self, value, label=None):
"""Adds a radiobutton to the microscope menu.
Arguments:
value -- the key to the `MIC_RES` dict
label -- display name; if missing, equals `value`
The radiobutton is inserted at the end of the menu.
"""
if label is None:
label = value
self.micresmenu.add_radiobutton(label=label,
value=value,
variable=self.var_microscope_res,
command=lambda v=value: self._change_microscope_resolution(v),
)
def _change_microscope_resolution(self, mic_res):
"""Callback for changing microscope resolution
`mic_res` is the key of `MIC_RES` that should be loaded.
"""
if mic_res == MIC_RES_CUSTOM:
initval = {}
if MIC_RES[MIC_RES_CUSTOM] is not None:
initval = MIC_RES[MIC_RES_CUSTOM]
else:
initval = 1
res = tksd.askfloat(
"Microscope resolution",
"Enter custom microscope resolution [µm/px]:",
minvalue=0, parent=self.root, initialvalue=initval)
res_dict = dict(resolution=res)
elif mic_res == MIC_RES_UNSPEC:
res_dict = {}
else:
res_dict = dict(name=mic_res, resolution=MIC_RES[mic_res])
Event.fire(self.control_queue, const.CMD_SET_MICROSCOPE, self.session, **res_dict)
def _update_microscope_resolution(self):
"""Display updates of microscope resolution.
This method should not be called explicitly.
It is called by `SessionView_Tk.update_traces`.
"""
new_mic_name = self.session.mic_name
new_mic_res = self.session.mic_res
if not new_mic_res:
# use pixel as length unit
new_mic_name = MIC_RES_UNSPEC
new_mic_res = None
elif new_mic_name:
if new_mic_name not in MIC_RES:
# enter new_mic_name into MIC_RES
self._add_to_microscope_menu(new_mic_name)
MIC_RES[new_mic_name] = new_mic_res
elif MIC_RES[new_mic_name] != new_mic_res:
# name/value conflict with catalogue
new_mic_name = MIC_RES_CUSTOM
else:
# custom (unnamed) resolution
new_mic_name = MIC_RES_CUSTOM
# Update display for custom resolution
if new_mic_name == MIC_RES_CUSTOM:
MIC_RES[MIC_RES_CUSTOM] = new_mic_res
new_label = f"{MIC_RES_CUSTOM} ({new_mic_res} µm/px)"
else:
new_label = MIC_RES_CUSTOM
# Apply changes
self.micresmenu.entryconfig(MIC_RES_CUSTOM_IDX, label=new_label)
if new_mic_name != self.var_microscope_res.get():
self.var_microscope_res.set(new_mic_name)
#if self.session.trace_info[const.TYPE_AREA]['plot']:
# self.plot_traces()
def open_session(self):
"""Open a saved session"""
fn = tkfd.askopenfilename(title="Open session data",
initialdir='.',
parent=self.root,
filetypes=(("Session files", '*.zip *.json'), ("All files", '*')),
)
if fn is None:
return
# Forward filename to controller
Event.fire(self.control_queue, const.CMD_READ_SESSION_FROM_DISK, fn)
def binarize(self):
# Get filename
options = {'defaultextension': '.tif',
'filetypes': ( ("Numpy", '*.npy *.npz'), ("TIFF", '*.tif *.tiff'), ("All files", '*')),
'parent': self.root,
'title': "Choose output file for binarized phase-contrast stack",
}
if self.save_dir:
options['initialdir'] = self.save_dir
outfile = tkfd.asksaveasfilename(**options)
if not outfile:
return
# Start binarization
Event.fire(self.control_queue,
const.CMD_TOOL_BINARIZE,
session=self.session,
outfile=outfile,
status=self.status,
)
def _background_correction(self):
"""Write a background-corrected version of the fluorescence channel"""
# Get filename
options = {'defaultextension': '.tif',
'filetypes': ( ("TIFF", '*.tif *.tiff'), ("All files", '*') ),
'parent': self.root,
'title': "Choose output file for background-corrected fluorescence channel",
}
if self.save_dir:
options['initialdir'] = self.save_dir
outfile = tkfd.asksaveasfilename(**options)
if not outfile:
return
# Start background correction
Event.fire(self.control_queue,
const.CMD_TOOL_BGCORR,
session=self.session,
outfile=outfile,
status=self.status,
)
def _pickle_max_bbox(self):
"""Export bounding box of maximum extension of each selected cell"""
if self.session is None or not self.session.traces:
print("No ROIs to export")
return
options = dict(defaultextension='.pickle',
filetypes=(("Pickle", '*.pickle'), ("All", '*')),
parent=self.root,
title="Save bounding boxes as …",
)
if self.save_dir:
options['initialdir'] = self.save_dir
save_name = tkfd.asksaveasfilename(**options)
if not save_name:
return
from ..tools.roi_bboxer import get_selected_bboxes
get_selected_bboxes(self.session, save_name)
|
the-stack_0_24355 | # Copyright 2020 The Magenta Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Train and evaluate an event sequence RNN model."""
import os
import tempfile
from magenta.models.shared import sequence_generator_bundle
import tensorflow.compat.v1 as tf
import tf_slim
def run_training(build_graph_fn, train_dir, num_training_steps=None,
summary_frequency=10, save_checkpoint_secs=60,
checkpoints_to_keep=10, keep_checkpoint_every_n_hours=1,
master='', task=0, num_ps_tasks=0,
warm_start_bundle_file=None):
"""Runs the training loop.
Args:
build_graph_fn: A function that builds the graph ops.
train_dir: The path to the directory where checkpoints and summary events
will be written to.
num_training_steps: The number of steps to train for before exiting.
summary_frequency: The number of steps between each summary. A summary is
when graph values from the last step are logged to the console and
written to disk.
save_checkpoint_secs: The frequency at which to save checkpoints, in
seconds.
checkpoints_to_keep: The number of most recent checkpoints to keep in
`train_dir`. Keeps all if set to 0.
keep_checkpoint_every_n_hours: Keep a checkpoint every N hours, even if it
results in more checkpoints than checkpoints_to_keep.
master: URL of the Tensorflow master.
task: Task number for this worker.
num_ps_tasks: Number of parameter server tasks.
warm_start_bundle_file: Path to a sequence generator bundle file that will
be used to initialize the model weights for fine-tuning.
"""
with tf.Graph().as_default():
with tf.device(tf.train.replica_device_setter(num_ps_tasks)):
build_graph_fn()
global_step = tf.train.get_or_create_global_step()
loss = tf.get_collection('loss')[0]
perplexity = tf.get_collection('metrics/perplexity')[0]
accuracy = tf.get_collection('metrics/accuracy')[0]
train_op = tf.get_collection('train_op')[0]
logging_dict = {
'Global Step': global_step,
'Loss': loss,
'Perplexity': perplexity,
'Accuracy': accuracy
}
hooks = [
tf.train.NanTensorHook(loss),
tf.train.LoggingTensorHook(
logging_dict, every_n_iter=summary_frequency),
tf.train.StepCounterHook(
output_dir=train_dir, every_n_steps=summary_frequency)
]
if num_training_steps:
hooks.append(tf.train.StopAtStepHook(num_training_steps))
with tempfile.TemporaryDirectory() as tempdir:
if warm_start_bundle_file:
# We are fine-tuning from a pretrained bundle. Unpack the bundle and
# save its checkpoint to a temporary directory.
warm_start_bundle_file = os.path.expanduser(warm_start_bundle_file)
bundle = sequence_generator_bundle.read_bundle_file(
warm_start_bundle_file)
checkpoint_filename = os.path.join(tempdir, 'model.ckpt')
with tf.gfile.Open(checkpoint_filename, 'wb') as f:
# For now, we support only 1 checkpoint file.
f.write(bundle.checkpoint_file[0])
variables_to_restore = tf_slim.get_variables_to_restore(
exclude=['global_step', '.*Adam.*', 'beta.*_power'])
init_op, init_feed_dict = tf_slim.assign_from_checkpoint(
checkpoint_filename, variables_to_restore)
init_fn = lambda scaffold, sess: sess.run(init_op, init_feed_dict)
else:
init_fn = None
scaffold = tf.train.Scaffold(
init_fn=init_fn,
saver=tf.train.Saver(
max_to_keep=checkpoints_to_keep,
keep_checkpoint_every_n_hours=keep_checkpoint_every_n_hours))
tf.logging.info('Starting training loop...')
tf_slim.training.train(
train_op=train_op,
logdir=train_dir,
scaffold=scaffold,
hooks=hooks,
save_checkpoint_secs=save_checkpoint_secs,
save_summaries_steps=summary_frequency,
master=master,
is_chief=task == 0)
tf.logging.info('Training complete.')
# TODO(adarob): Limit to a single epoch each evaluation step.
def run_eval(build_graph_fn, train_dir, eval_dir, num_batches,
timeout_secs=300):
"""Runs the training loop.
Args:
build_graph_fn: A function that builds the graph ops.
train_dir: The path to the directory where checkpoints will be loaded
from for evaluation.
eval_dir: The path to the directory where the evaluation summary events
will be written to.
num_batches: The number of full batches to use for each evaluation step.
timeout_secs: The number of seconds after which to stop waiting for a new
checkpoint.
Raises:
ValueError: If `num_batches` is less than or equal to 0.
"""
if num_batches <= 0:
raise ValueError(
'`num_batches` must be greater than 0. Check that the batch size is '
'no larger than the number of records in the eval set.')
with tf.Graph().as_default():
build_graph_fn()
global_step = tf.train.get_or_create_global_step()
loss = tf.get_collection('loss')[0]
perplexity = tf.get_collection('metrics/perplexity')[0]
accuracy = tf.get_collection('metrics/accuracy')[0]
eval_ops = tf.get_collection('eval_ops')
logging_dict = {
'Global Step': global_step,
'Loss': loss,
'Perplexity': perplexity,
'Accuracy': accuracy
}
hooks = [
EvalLoggingTensorHook(logging_dict, every_n_iter=num_batches),
tf_slim.evaluation.StopAfterNEvalsHook(num_batches),
tf_slim.evaluation.SummaryAtEndHook(eval_dir),
]
tf_slim.evaluation.evaluate_repeatedly(
train_dir,
eval_ops=eval_ops,
hooks=hooks,
eval_interval_secs=60,
timeout=timeout_secs)
class EvalLoggingTensorHook(tf.train.LoggingTensorHook):
"""A revised version of LoggingTensorHook to use during evaluation.
This version supports being reset and increments `_iter_count` before run
instead of after run.
"""
def begin(self):
# Reset timer.
self._timer.update_last_triggered_step(0)
super(EvalLoggingTensorHook, self).begin()
def before_run(self, run_context):
self._iter_count += 1
return super(EvalLoggingTensorHook, self).before_run(run_context)
def after_run(self, run_context, run_values):
super(EvalLoggingTensorHook, self).after_run(run_context, run_values)
self._iter_count -= 1
|
the-stack_0_24356 | from __future__ import absolute_import
import datetime
import json
import logging
import os.path
import sys
from pip._vendor import lockfile, pkg_resources
from pip._vendor.packaging import version as packaging_version
from pip._internal.cli.cmdoptions import make_search_scope
from pip._internal.index import PackageFinder
from pip._internal.models.selection_prefs import SelectionPreferences
from pip._internal.utils.compat import WINDOWS
from pip._internal.utils.filesystem import check_path_owner
from pip._internal.utils.misc import ensure_dir, get_installed_version
from pip._internal.utils.packaging import get_installer
from pip._internal.utils.typing import MYPY_CHECK_RUNNING
if MYPY_CHECK_RUNNING:
import optparse
from typing import Any, Dict
from pip._internal.download import PipSession
SELFCHECK_DATE_FMT = "%Y-%m-%dT%H:%M:%SZ"
logger = logging.getLogger(__name__)
class SelfCheckState(object):
def __init__(self, cache_dir):
# type: (str) -> None
self.state = {} # type: Dict[str, Any]
self.statefile_path = None
# Try to load the existing state
if cache_dir:
self.statefile_path = os.path.join(cache_dir, "selfcheck.json")
try:
with open(self.statefile_path) as statefile:
self.state = json.load(statefile)[sys.prefix]
except (IOError, ValueError, KeyError):
# Explicitly suppressing exceptions, since we don't want to
# error out if the cache file is invalid.
pass
def save(self, pypi_version, current_time):
# type: (str, datetime.datetime) -> None
# If we do not have a path to cache in, don't bother saving.
if not self.statefile_path:
return
# Check to make sure that we own the directory
if not check_path_owner(os.path.dirname(self.statefile_path)):
return
# Now that we've ensured the directory is owned by this user, we'll go
# ahead and make sure that all our directories are created.
ensure_dir(os.path.dirname(self.statefile_path))
# Attempt to write out our version check file
with lockfile.LockFile(self.statefile_path):
if os.path.exists(self.statefile_path):
with open(self.statefile_path) as statefile:
state = json.load(statefile)
else:
state = {}
state[sys.prefix] = {
"last_check": current_time.strftime(SELFCHECK_DATE_FMT),
"pypi_version": pypi_version,
}
with open(self.statefile_path, "w") as statefile:
json.dump(state, statefile, sort_keys=True,
separators=(",", ":"))
def was_installed_by_pip(pkg):
# type: (str) -> bool
"""Checks whether pkg was installed by pip
This is used not to display the upgrade message when pip is in fact
installed by system package manager, such as dnf on Fedora.
"""
try:
dist = pkg_resources.get_distribution(pkg)
return "pip" == get_installer(dist)
except pkg_resources.DistributionNotFound:
return False
def pip_version_check(session, options):
# type: (PipSession, optparse.Values) -> None
"""Check for an update for pip.
Limit the frequency of checks to once per week. State is stored either in
the active virtualenv or in the user's USER_CACHE_DIR keyed off the prefix
of the pip script path.
"""
installed_version = get_installed_version("pip")
if not installed_version:
return
pip_version = packaging_version.parse(installed_version)
pypi_version = None
try:
state = SelfCheckState(cache_dir=options.cache_dir)
current_time = datetime.datetime.utcnow()
# Determine if we need to refresh the state
if "last_check" in state.state and "pypi_version" in state.state:
last_check = datetime.datetime.strptime(
state.state["last_check"],
SELFCHECK_DATE_FMT
)
if (current_time - last_check).total_seconds() < 7 * 24 * 60 * 60:
pypi_version = state.state["pypi_version"]
# Refresh the version if we need to or just see if we need to warn
if pypi_version is None:
# Lets use PackageFinder to see what the latest pip version is
search_scope = make_search_scope(options, suppress_no_index=True)
# Pass allow_yanked=False so we don't suggest upgrading to a
# yanked version.
selection_prefs = SelectionPreferences(
allow_yanked=False,
allow_all_prereleases=False, # Explicitly set to False
)
finder = PackageFinder.create(
search_scope=search_scope,
selection_prefs=selection_prefs,
session=session,
)
best_candidate = finder.find_best_candidate("pip").best_candidate
if best_candidate is None:
return
pypi_version = str(best_candidate.version)
# save that we've performed a check
state.save(pypi_version, current_time)
remote_version = packaging_version.parse(pypi_version)
local_version_is_older = (
pip_version < remote_version and
pip_version.base_version != remote_version.base_version and
was_installed_by_pip('pip')
)
# Determine if our pypi_version is older
if not local_version_is_older:
return
# Advise "python -m pip" on Windows to avoid issues
# with overwriting pip.exe.
if WINDOWS:
pip_cmd = "python -m pip"
else:
pip_cmd = "pip"
logger.warning(
"You are using pip version %s, however version %s is "
"available.\nYou should consider upgrading via the "
"'%s install --upgrade pip' command.",
pip_version, pypi_version, pip_cmd
)
except Exception:
logger.debug(
"There was an error checking the latest version of pip",
exc_info=True,
)
|
the-stack_0_24358 | import sys
import pytest
from fido2.ctap1 import ApduError
from fido2.ctap2 import CtapError
from fido2.utils import sha256
from solo.client import SoloClient
from solo.commands import SoloExtension
from tests.utils import shannon_entropy, verify, FidoRequest
@pytest.fixture(scope="module", params=["u2f"])
def solo(request, device):
sc = SoloClient()
sc.find_device(device.dev)
if request.param == "u2f":
sc.use_u2f()
else:
sc.use_hid()
return sc
IS_EXPERIMENTAL = '--experimental' in sys.argv
IS_NFC = '--nfc' in sys.argv
@pytest.mark.skipif(
IS_NFC,
reason="Wrong transport"
)
class TestSolo(object):
def test_solo(self, solo):
pass
def test_rng(self, solo):
total = 1024 * 16
entropy = b""
while len(entropy) < total:
entropy += solo.get_rng()
s = shannon_entropy(entropy)
assert s > 7.98
print("Entropy is %.5f bits per byte." % s)
def test_version(self, solo):
assert len(solo.solo_version()) == 4
def test_version_hid(self, solo):
data = solo.send_data_hid(0x61, b'')
assert len(data) == 4
print(f'Version is {data[0]}.{data[1]}.{data[2]} locked?=={data[3]}')
def test_bootloader_not(self, solo):
with pytest.raises(ApduError) as e:
solo.write_flash(0x0, b"1234")
def test_fido2_bridge(self, solo):
exchange = solo.exchange
solo.exchange = solo.exchange_fido2
req = SoloClient.format_request(SoloExtension.version, 0, b"A" * 16)
a = solo.ctap2.get_assertion(
solo.host, b"B" * 32, [{"id": req, "type": "public-key"}]
)
assert a.auth_data.rp_id_hash == sha256(solo.host.encode("utf8"))
assert a.credential["id"] == req
assert (a.auth_data.flags & 0x5) == 0x5
solo.get_rng()
solo.exchange = exchange
@pytest.mark.skipif(not IS_EXPERIMENTAL, reason="Experimental")
def test_load_external_key_wrong_length(self,solo, ):
ext_key_cmd = 0x62
with pytest.raises(CtapError) as e:
solo.send_data_hid(ext_key_cmd, b'\x01\x00\x00\x00' + b'wrong length'*2)
assert(e.value.code == CtapError.ERR.INVALID_LENGTH)
@pytest.mark.skipif(not IS_EXPERIMENTAL, reason="Experimental")
def test_load_external_key_invalidate_old_cred(self,solo, device, MCRes, GARes):
ext_key_cmd = 0x62
verify(MCRes, GARes)
print ('Enter user presence THREE times.')
solo.send_data_hid(ext_key_cmd, b'\x01\x00\x00\x00' + b'Z' * 96)
# Old credential should not exist now.
with pytest.raises(CtapError) as e:
ga_bad_req = FidoRequest(GARes)
device.sendGA(*ga_bad_req.toGA())
assert(e.value.code == CtapError.ERR.NO_CREDENTIALS)
@pytest.mark.skipif(not IS_EXPERIMENTAL, reason="Experimental")
def test_load_external_key(self,solo, device,):
key_A = b'A' * 96
key_B = b'B' * 96
ext_key_cmd = 0x62
print ('Enter user presence THREE times.')
solo.send_data_hid(ext_key_cmd, b'\x01\x00\x00\x00' + key_A)
# New credential works.
mc_A_req = FidoRequest()
mc_A_res = device.sendMC(*mc_A_req.toMC())
allow_list = [{"id":mc_A_res.auth_data.credential_data.credential_id, "type":"public-key"}]
ga_A_req = FidoRequest(mc_A_req, allow_list=allow_list)
ga_A_res = device.sendGA(*FidoRequest(ga_A_req).toGA())
verify(mc_A_res, ga_A_res, ga_A_req.cdh)
# Load up Key B and verify cred A doesn't exist.
print ('Enter user presence THREE times.')
solo.send_data_hid(ext_key_cmd, b'\x01\x00\x00\x00' + key_B)
with pytest.raises(CtapError) as e:
ga_A_res = device.sendGA(*FidoRequest(ga_A_req).toGA())
assert(e.value.code == CtapError.ERR.NO_CREDENTIALS)
# Load up Key A and verify cred A is back.
print ('Enter user presence THREE times.')
solo.send_data_hid(ext_key_cmd, b'\x01\x00\x00\x00' + key_A)
ga_A_res = device.sendGA(*FidoRequest(ga_A_req).toGA())
verify(mc_A_res, ga_A_res, ga_A_req.cdh)
|
the-stack_0_24360 | """
Friend Module for the micro:bit
The Friend module is a chatbot that you can run inside
of the REPL by typing the following for example.
>>> from friend import *
>>> talk('What is your name?')
*** Requires an external speaker. For more information please
*** visit https://github.com/mytechnotalent/MicroPython-micro-bit_Friend_MOD
"""
import gc
import speech
def talk(words):
"""Talk to your friend Mr. George
Parameters
----------
words : str
The words to say to your friend Mr. George
Returns
-------
None
"""
gc.collect()
words = words.lower()
if words.find("how are you") != -1:
speech.say("I am doing great!")
elif words.find("what's up") != -1:
speech.say("The sky.")
elif words.find("morning") != -1:
speech.say("I love to watch the sun rise in the morning!")
elif words.find("afternoon") != -1:
speech.say("I get hungry around lunch time.")
elif words.find("evening") != -1:
speech.say("I get sleepy in the evening.")
elif words.find("night") != -1:
speech.say("I get sleepy when it is night time.")
elif words.find("tell me something") != -1:
speech.say("I am a robot who loves to teach Piethon.")
elif words.find("hello") != -1:
speech.say("Hello to you!")
elif words.find("hi") != -1:
speech.say("Hi to you!")
elif words.find("thank you") != -1:
speech.say("It is my pleasure!")
elif words.find("bye") != -1:
speech.say("It was nice talking to you!")
elif words.find("help") != -1:
speech.say("I am always here to help!")
elif words.find("what can you do") != -1:
speech.say("I can teach Piethon programming.")
elif words.find("name") != -1:
speech.say("My name is Mr. George it is nice to meet you!")
elif words.find("how old are you") != -1:
speech.say("I was born in September of the year twenty twenty.")
elif words.find("question") != -1:
speech.say("I always try to answer questions.")
elif words.find("joke") != -1:
speech.say("What did the chicken cross the road?")
speech.say("To get to the other side.")
elif words.find("love") != -1:
speech.say("I love pizza!")
elif words.find("love you") != -1:
speech.say("Thank you so kind of you!")
elif words.find("love people") != -1:
speech.say("I want to help people by teaching them Piethon!")
elif words.find("hobby") != -1:
speech.say("I like to teachin Piethon to people!")
elif words.find("you live") != -1:
speech.say("I live in side the little microcontroller here.")
elif words.find("made you") != -1:
speech.say(
"Kevin Thomas created me inspired by the great people at MicroPiethon."
)
elif words.find("your job") != -1:
speech.say("I teach Piethon.")
elif words.find("you do") != -1:
speech.say("I like to teach Piethon.")
# ADD MORE CODE HERE
else:
speech.say("I am sorry I do not understand.")
|
the-stack_0_24361 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import numpy as np
import xarray as xr
from constants import OUTPUT_VARS
START = int(os.environ['JULES_START_YEAR'])
END = int(os.environ['JULES_END_YEAR'])
YEARS = np.arange(START, END + 1)
SUITE = str(os.environ['JULES_SUITE'])
ID_STEM = str(os.environ['JULES_ID_STEM'])
JOB_NAME = str(os.environ['JULES_JOB_NAME'])
PROFILE_NAME = str(os.environ['JULES_PROFILE_NAME'])
INPUT_DIRECTORY = str(os.environ['INPUT_DIRECTORY'])
INPUT_FILE_SUFFIX = str(os.environ['INPUT_FILE_SUFFIX'])
OUTPUT_DIRECTORY = str(os.environ['OUTPUT_DIRECTORY'])
def main():
for yr in YEARS:
job_name = JOB_NAME.format(year=yr)
FN = ID_STEM + '.' + job_name + '.' + PROFILE_NAME + '.' + str(yr) + '.' + INPUT_FILE_SUFFIX + '.nc'
x = xr.open_dataset(os.path.join(INPUT_DIRECTORY, FN))
# kg m-2 s-1 -> m d-1
rate_vars = []
for var in OUTPUT_VARS['daily_hydrology']:
try:
if x[var].units == 'kg m-2 s-1':
x[var] = x[var] * 60 * 60 * 24 / 1000
rate_vars.append(var)
except KeyError:
pass
x = x[rate_vars]
# m d-1 -> m month-1
MONTH_OUTFN = ID_STEM + '.' + job_name + '.' + PROFILE_NAME + '.' + str(yr) + '.' + INPUT_FILE_SUFFIX + '.month.nc'
x_month = x.groupby("time.month").sum(dim="time")
x_month.to_netcdf(os.path.join(OUTPUT_DIRECTORY, MONTH_OUTFN))
# m d-1 -> m year-1
YEAR_OUTFN = ID_STEM + '.' + job_name + '.' + PROFILE_NAME + '.' + str(yr) + '.' + INPUT_FILE_SUFFIX + '.year.nc'
x_year = x.groupby("time.year").sum(dim="time")
x_year.to_netcdf(os.path.join(OUTPUT_DIRECTORY, YEAR_OUTFN))
x.close()
if __name__ == '__main__':
main()
|
the-stack_0_24362 | class Node:
def __init__(self, data):
self.data = data
self.next = None
class Queue:
def __init__(self):
self.head = None
self.tail = None
# head tail tail
# [4] -> [2] -> [3]
# [3]
def enqueue(self, value):
# 어떻게 하면 될까요?
new_node = Node(value)
if self.is_empty():
self.head = new_node
self.tail = new_node
else:
self.tail.next = new_node
self.tail = new_node
return
# head tail
# [4] [2] [3]
# [2] [3]
def dequeue(self):
# 어떻게 하면 될까요?
if self.is_empty():
return "queue is empty"
delete_node = self.head
self.head = self.head.next
return delete_node.data
def peek(self):
# 어떻게 하면 될까요?
if self.is_empty():
return "queue is empty"
return self.head.data
def is_empty(self):
# 어떻게 하면 될까요?
return self.head is None
queue = Queue()
queue.enqueue(3)
print(queue.peek())
queue.enqueue(4)
queue.dequeue()
queue.enqueue(5)
print(queue.dequeue())
print(queue.peek())
print(queue.is_empty()) |
the-stack_0_24363 | # Copyright 2020 The Google Earth Engine Community Authors
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Google Earth Engine Developer's Guide examples for 'Images - Relational, conditional and Boolean operations'."""
# [START earthengine__images08__conditional]
# Load a 2012 nightlights image.
nl_2012 = ee.Image('NOAA/DMSP-OLS/NIGHTTIME_LIGHTS/F182012')
lights = nl_2012.select('stable_lights')
# Define arbitrary thresholds on the 6-bit stable lights band.
zones = lights.gt(30).add(lights.gt(55)).add(lights.gt(62))
# Define a map centered on Paris, France.
map_2 = folium.Map(location=[48.8683, 2.373], zoom_start=8)
# Display the thresholded image as three distinct zones near Paris.
palette = ['000000', '0000FF', '00FF00', 'FF0000']
map_2.add_ee_layer(
zones, {'min': 0, 'max': 3, 'palette': palette}, 'development zones')
display(map_2)
# [END earthengine__images08__conditional]
# [START earthengine__images08__conditional_exp]
# Create zones using an expression, display.
zones_exp = nl_2012.expression("(b('stable_lights') > 62) ? 3 "
": (b('stable_lights') > 55) ? 2 "
": (b('stable_lights') > 30) ? 1 "
": 0")
# Define a map centered on Paris, France.
map_3 = folium.Map(location=[48.8683, 2.373], zoom_start=8)
# Add the image layer to the map and display it.
map_3.add_ee_layer(
zones_exp, {'min': 0, 'max': 3, 'palette': palette}, 'zones exp')
display(map_3)
# [END earthengine__images08__conditional_exp]
|
the-stack_0_24366 | #!/usr/bin/env python3
from PyQt5.QtWidgets import QLabel, QVBoxLayout, QWidget
from PyQt5.QtCore import Qt
class Example(QWidget):
def __init__(self):
super().__init__()
self.initUI()
def initUI(self):
self.setFixedSize(1000, 1000)
self.setWindowTitle('Example')
label = QLabel('Hello')
layout = QVBoxLayout()
layout.addWidget(label)
layout.setAlignment(Qt.AlignLeft)
self.setLayout(layout)
if __name__ == '__main__':
import sys
from PyQt5.QtWidgets import QApplication
app = QApplication(sys.argv)
ex = Example()
ex.show()
sys.exit(app.exec_())
|
the-stack_0_24367 | # -*- coding: utf-8 -*-
"""
.. module:: etfl
:platform: Unix, Windows
:synopsis: flux balance models accounting for expression, thermodynamics, and resource allocation constraints
.. moduleauthor:: ETFL team
Optimisation utilities
"""
from pytfa.optim.variables import ReactionVariable, MetaboliteVariable
from .variables import EnzymeVariable, GeneVariable, ModelVariable, \
GrowthActivation, BinaryActivator
from pytfa.optim.constraints import ReactionConstraint, MetaboliteConstraint
from pytfa.optim.utils import get_all_subclasses
from .constraints import EnzymeConstraint, GeneConstraint, ModelConstraint
from collections import namedtuple
try:
from gurobipy import GRB
except ModuleNotFoundError:
pass
def make_subclasses_dict(cls):
"""
Return a dictionary of the subclasses inheriting from the argument class.
Keys are String names of the classes, values the actual classes.
:param cls:
:return:
"""
the_dict = {x.__name__:x for x in get_all_subclasses(cls)}
the_dict[cls.__name__] = cls
return the_dict
class SubclassIndexer:
def __init__(self):
self._cache = dict()
def __getitem__(self, classtype):
return make_subclasses_dict(classtype)
# try:
# self._cache[classtype]
# except KeyError:
# self._cache[classtype] = make_subclasses_dict(classtype)
# return self._cache[classtype]
def purge(self):
self._cache = dict()
def refresh(self):
self.purge()
for cls in self._cache:
self._cache[cls] = make_subclasses_dict(cls)
# @property
# def REACTION_VARIABLE_SUBCLASSES(self):
# return make_subclasses_dict(ReactionVariable)
#
# @property
# def REACTION_CONSTRAINT_SUBCLASSES(self):
# return make_subclasses_dict(ReactionConstraint)
#
# @property
# def METABOLITE_VARIABLE_SUBCLASSES(self):
# make_subclasses_dict(MetaboliteVariable)
#
# @property
# def METABOLITE_CONSTRAINT_SUBCLASSES(self):
# make_subclasses_dict(MetaboliteConstraint)
# ENZYME_VARIABLE_SUBCLASSES = make_subclasses_dict(EnzymeVariable)
# ENZYME_CONSTRAINT_SUBCLASSES = make_subclasses_dict(EnzymeConstraint)
# GENE_VARIABLE_SUBCLASSES = make_subclasses_dict(GeneVariable)
# GENE_CONSTRAINT_SUBCLASSES = make_subclasses_dict(GeneConstraint)
# MODEL_VARIABLE_SUBCLASSES = make_subclasses_dict(ModelVariable)
# MODEL_CONSTRAINT_SUBCLASSES = make_subclasses_dict(ModelConstraint)
INTEGER_VARIABLE_TYPES = ('binary','integer')
def fix_integers(model):
"""
Fixes all integer and binary variables of a model, to make it sample-able
:param model:
:return:
"""
if model.problem.__name__ == 'optlang.gurobi_interface':
model.logger.info('Gurobi-based model detected - using Gurobi\'s .'
'fixed() method')
return _gurobi_fix_integers(model)
else:
return _generic_fix_integers(model)
def _gurobi_fix_integers(model):
"""
If the solver of the model whose integers to fix has Gurobi as a solver,
use the built-in method
:param model: A model with a Gurobi backend
:return:
"""
new = model.copy()
fixed = model.solver.problem.fixed()
new.solver.problem = fixed
return new
def _generic_fix_integers(model):
"""
Fix the integers of a model to its solution, and removes the variables.
:param model:
:return:
"""
continuous_model = model.copy()
continuous_model.name = model.name + ' - continuous'
integer_variables = set()
constraints_with_integer_variables = []
if not hasattr(model, 'solution'):
model.logger.info('Model has no solution to fix the integers, calculating one')
model.optimize()
# We go through all the constraint descriptors and check if at least one of
# their variables is in the integer variable list
for this_cons in continuous_model._cons_dict.values():
has_integer_variable = False
for this_var in this_cons.constraint.variables:
if this_var.type in INTEGER_VARIABLE_TYPES:
has_integer_variable += True
this_var_descriptor = this_var.name
integer_variables.add(this_var_descriptor)
constraints_with_integer_variables.append(this_cons.name)
int_dict = {continuous_model.variables[x]: model.solution.x_dict[x]
for x in integer_variables}
for this_cons_name in constraints_with_integer_variables:
this_cons = model._cons_dict[this_cons_name]
new_expr = this_cons.expr.subs(int_dict)
kind = type(this_cons)
ub = this_cons.constraint.ub
lb = this_cons.constraint.lb
the_id = this_cons.id
# TODO make fatser, using cons.change_expr and ad-hoc subs dicts
continuous_model.remove_constraint(this_cons)
rebuild_constraint(classname=kind.__name__,
model=continuous_model,
this_id=the_id,
new_expr=new_expr,
lb=lb,
ub=ub)
for this_var in integer_variables:
# This_var is an InterfaceVariable object, we want the GenericVariable
# it belongs to
the_generic_var = continuous_model._var_dict[this_var.name]
continuous_model.remove_variable(the_generic_var)
continuous_model._push_queue()
continuous_model.solver.update()
# This will update the values =
print('Is the cobra_model still integer ? {}' \
.format(continuous_model.solver.is_integer))
return continuous_model
def rebuild_variable(classname, model, this_id, lb, ub, scaling_factor, queue=True):
"""
Rebuilds a variable from a classname and link it to the model
:param classname:
:param model:
:param this_id:
:param lb:
:param ub:
:param queue:
:return:
"""
subix = SubclassIndexer()
if classname in subix[ReactionVariable]:
hook = model.reactions.get_by_id(this_id)
this_class = subix[ReactionVariable][classname]
nv = model.add_variable(kind=this_class,
hook=hook,
ub=ub,
lb=lb,
scaling_factor=scaling_factor,
queue=queue)
elif classname in subix[MetaboliteVariable]:
hook = model.metabolites.get_by_id(this_id)
this_class = subix[MetaboliteVariable][classname]
nv = model.add_variable(kind=this_class,
hook=hook,
ub=ub,
lb=lb,
scaling_factor=scaling_factor,
queue=queue)
elif classname in subix[EnzymeVariable]:
hook = model.enzymes.get_by_id(this_id)
this_class = subix[EnzymeVariable][classname]
nv = model.add_variable(kind=this_class,
hook=hook,
ub=ub,
lb=lb,
scaling_factor=scaling_factor,
queue=queue)
elif classname in subix[GeneVariable]:
hook = model.genes.get_by_id(this_id)
this_class = subix[GeneVariable][classname]
nv = model.add_variable(kind=this_class,
hook=hook,
ub=ub,
lb=lb,
scaling_factor=scaling_factor,
queue=queue)
elif classname in subix[ModelVariable]:
hook = model
this_class = subix[ModelVariable][classname]
nv = model.add_variable(kind=this_class,
hook=hook,
id_=this_id,
ub=ub,
lb=lb,
scaling_factor=scaling_factor,
queue=queue)
else:
raise TypeError(
'Class {} serialization not handled yet' \
.format(classname))
return nv
def rebuild_constraint(classname, model, this_id, new_expr, lb, ub, queue=True):
"""
Rebuilds a constraint from a classname and link it to the model
:param classname:
:param model:
:param this_id:
:param new_expr:
:param lb:
:param ub:
:param queue:
:return:
"""
subix = SubclassIndexer()
if classname in subix[ReactionConstraint]:
hook = model.reactions.get_by_id(this_id)
this_class = subix[ReactionConstraint][classname]
nc = model.add_constraint(kind=this_class, hook=hook,
expr=new_expr,
ub=ub,
lb=lb,
queue=queue)
elif classname in subix[MetaboliteConstraint]:
hook = model.metabolites.get_by_id(this_id)
this_class = subix[MetaboliteConstraint][classname]
nc = model.add_constraint(kind=this_class, hook=hook,
expr=new_expr,
ub=ub,
lb=lb,
queue=queue)
elif classname in subix[EnzymeConstraint]:
hook = model.enzymes.get_by_id(this_id)
this_class = subix[EnzymeConstraint][classname]
nc = model.add_constraint(kind=this_class, hook=hook,
expr=new_expr,
ub=ub,
lb=lb,
queue=queue)
elif classname in subix[GeneConstraint]:
hook = model.genes.get_by_id(this_id)
this_class = subix[GeneConstraint][classname]
nc = model.add_constraint(kind=this_class, hook=hook,
expr=new_expr,
ub=ub,
lb=lb,
queue=queue)
elif classname in subix[ModelConstraint]:
hook = model
this_class = subix[ModelConstraint][classname]
nc = model.add_constraint(kind=this_class, hook=hook,
expr=new_expr, id_=this_id,
ub=ub,
lb=lb,
queue=queue)
else:
raise TypeError('Class {} serialization not handled yet' \
.format(classname))
return nc
DefaultSol = namedtuple('DefaultSol', field_names='objective_value')
def is_gurobi(model):
"""
Check if the model uses Gurobi as a solver
:param model:
:return:
"""
return model.problem.__name__ == 'optlang.gurobi_interface'
def fix_growth(model, solution = None):
"""
Set the growth integers to their fixed values from a solution. If no
solution is provided, the model's latest solution is used.
The growth can be released using the function
:func:`etfl.optim.utils.release_growth`
:param model:
:param solution:
:return:
"""
solution = check_solution(model, solution)
mu_variables = model.get_variables_of_type(GrowthActivation)
interp_variables = model.get_variables_of_type(BinaryActivator)
vars_to_fix = list(mu_variables) + list(interp_variables)
# # Growth rate
# epsilon = model.solver.configuration.tolerances.feasibility
# _,mu_lb,_ = get_active_growth_bounds(model)
# model.growth_reaction.lower_bound = mu_lb - epsilon
gurobi_hints = is_gurobi(model)
if gurobi_hints:
model.logger.info('Gurobi-based model detected - using Gurobi hints')
for the_var in vars_to_fix:
value = solution.raw[the_var.name]
try:
the_var.variable.lb = int(value)
the_var.variable.ub = int(value)
except ValueError:
# Happens if lb>ub during assignment
the_var.variable.ub = int(value)
the_var.variable.lb = int(value)
if gurobi_hints:
the_var.variable._internal_variable.VarHintVal = value
the_var.variable._internal_variable.VarHintPri = 5
def check_solution(model, solution):
"""
Helper function. if solution is None, attempts to get it from the model.
:param model:
:param solution:
:return:
"""
if solution is None:
try:
solution = model.solution
except AttributeError:
raise AttributeError('If not providing a solution object, please '
'provide a model with an embedded solution '
'(call model.solve())')
return solution
def release_growth(model):
"""
After growth has been fixed by :func:`etfl.optim.utils.fix_growth`,
it can be released using this function.
:param model:
:return:
"""
mu_variables = model.get_variables_of_type(GrowthActivation)
interp_variables = model.get_variables_of_type(BinaryActivator)
vars_to_fix = list(mu_variables) + list(interp_variables)
# # Growth reaction
# model.growth_reaction.lower_bound = 0
gurobi_hints = is_gurobi(model)
for the_var in vars_to_fix:
the_var.variable.lb = 0
the_var.variable.ub = 1
if gurobi_hints:
the_var.variable._internal_variable.VarHintVal = GRB.UNDEFINED
the_var.variable._internal_variable.VarHintPri = 0
def apply_warm_start(model, solution):
"""
Gives a warm start to the model.
Release it with :func:`etfl.optim.utils.release_warm_start`.
:param model:
:param solution:
:return:
"""
solution = check_solution(model, solution)
if is_gurobi(model):
for the_var in model.variables:
if the_var.type == 'binary':
the_var._internal_variable.Start = solution.raw[the_var.name]
else:
raise NotImplementedError('Solver not supported: ' + model.problem.__name__)
def release_warm_start(model):
"""
Releases the warm start provided by
:func:`etfl.optim.utils.apply_warm_start`.
:param model:
:return:
"""
if is_gurobi(model):
for the_var in model.variables:
if the_var.type == 'binary':
the_var._internal_variable.Start = GRB.UNDEFINED
else:
raise NotImplementedError('Solver not supported: ' + model.problem.__name__)
def get_active_growth_bounds(model, growth_rate=None):
"""
Returns the growth bound closest to the growth flux calculated at the
last solution.
:param model:
:return:
"""
if growth_rate is None:
mu = model.growth_reaction.flux
else:
mu = growth_rate
difflist = [abs(mu - x[0]) for x in model.mu_bins]
min_diff = min(difflist)
min_ix = difflist.index(min_diff)
mu_i, (mu_lb, mu_ub) = model.mu_bins[min_ix]
return mu_i, mu_lb, mu_ub
def safe_optim(model):
"""
Catches *any* exception that can happen during solving, and logs it.
Useful if you solve many problems in a sequence and some of then are
infeasible.
**Be careful** : This wil catch literally **any** Exception.
:param model:
:return:
"""
try:
out = model.optimize()
except Exception as e:
import numpy as np
model.logger.warning('Exception occurred during solving: {}. - '
'Solver status: {}'.format(str(e), model.solver.status))
out = DefaultSol
out.objective_value = np.nan
return out
def get_binding_constraints(model, epsilon):
if is_gurobi(model):
return {kind:[c.name
for c in these_cons
if c.constraint._internal_constraint.Slack <= epsilon]
for kind,these_cons in model._cons_kinds.items()}
else:
raise(NotImplementedError)
|
the-stack_0_24368 | """A binary to train CIFAR-10 using a single GPU.
Accuracy:
cifar10_train.py achieves ~86% accuracy after 100K steps (256 epochs of
data) as judged by cifar10_eval.py.
Speed: With batch_size 128.
System | Step Time (sec/batch) | Accuracy
------------------------------------------------------------------
1 Tesla K20m | 0.35-0.60 | ~86% at 60K steps (5 hours)
1 Tesla K40m | 0.25-0.35 | ~86% at 100K steps (4 hours)
Usage:
Please see the tutorial and website for how to download the CIFAR-10
data set, compile the program and train the model.
http://tensorflow.org/tutorials/deep_cnn/
"""
from __future__ import print_function
from datetime import datetime
import os.path
import time
import tensorflow.python.platform
from tensorflow.python.platform import gfile
import numpy as np
import tensorflow as tf
from tensorflow.models.image.cifar10 import cifar10
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string('train_dir', '/tmp/cifar10_train',
"""Directory where to write event logs """
"""and checkpoint.""")
tf.app.flags.DEFINE_integer('max_steps', 1000000,
"""Number of batches to run.""")
tf.app.flags.DEFINE_boolean('log_device_placement', False,
"""Whether to log device placement.""")
def train():
"""Train CIFAR-10 for a number of steps."""
with tf.Graph().as_default():
global_step = tf.Variable(0, trainable=False)
# Get images and labels for CIFAR-10.
images, labels = cifar10.distorted_inputs()
# Build a Graph that computes the logits predictions from the
# inference model.
logits = cifar10.inference(images)
# Calculate loss.
loss = cifar10.loss(logits, labels)
# Build a Graph that trains the model with one batch of examples and
# updates the model parameters.
train_op = cifar10.train(loss, global_step)
# Create a saver.
saver = tf.train.Saver(tf.all_variables())
# Build the summary operation based on the TF collection of Summaries.
summary_op = tf.merge_all_summaries()
# Build an initialization operation to run below.
init = tf.initialize_all_variables()
# Start running operations on the Graph.
sess = tf.Session(config=tf.ConfigProto(
log_device_placement=FLAGS.log_device_placement))
sess.run(init)
# Start the queue runners.
tf.train.start_queue_runners(sess=sess)
summary_writer = tf.train.SummaryWriter(FLAGS.train_dir,
graph_def=sess.graph_def)
for step in xrange(FLAGS.max_steps):
start_time = time.time()
_, loss_value = sess.run([train_op, loss])
duration = time.time() - start_time
assert not np.isnan(loss_value), 'Model diverged with loss = NaN'
if step % 10 == 0:
num_examples_per_step = FLAGS.batch_size
examples_per_sec = num_examples_per_step / float(duration)
sec_per_batch = float(duration)
format_str = ('%s: step %d, loss = %.2f (%.1f examples/sec; %.3f '
'sec/batch)')
print (format_str % (datetime.now(), step, loss_value,
examples_per_sec, sec_per_batch))
if step % 100 == 0:
summary_str = sess.run(summary_op)
summary_writer.add_summary(summary_str, step)
# Save the model checkpoint periodically.
if step % 1000 == 0 or (step + 1) == FLAGS.max_steps:
checkpoint_path = os.path.join(FLAGS.train_dir, 'model.ckpt')
saver.save(sess, checkpoint_path, global_step=step)
def main(argv=None): # pylint: disable=unused-argument
cifar10.maybe_download_and_extract()
if gfile.Exists(FLAGS.train_dir):
gfile.DeleteRecursively(FLAGS.train_dir)
gfile.MakeDirs(FLAGS.train_dir)
train()
if __name__ == '__main__':
tf.app.run()
|
the-stack_0_24370 | """
Example that reads data from wapi TIMESERIES curves
can read multiple curves, and each curve for multiple regions
aggregates (averages) output frequency, if specified.
Save read data to csv files.
"""
import wapi
import pandas as pd
import os
################################################
# Insert the path to your config file here!
my_config_file = 'path/to/your/config.ini'
################################################
# curve names to read (in this case temperature and PV production actuals)
curve_names = ['tt {region} con °c cet {freq} s',
'pro {region} spv mwh/h cet {freq} a']
# define frequency for every curve as in curve name
freqs_curve = ['min15'] * len(curve_names)
# desired freq of output, define for every curve
freqs_out = ['H'] * len(curve_names)
# Regions to read TIMESERIES curves
regions = ['DE', 'ES', 'FR']
# Start Date of data
start = pd.Timestamp('2018-01-01 00:00')
# End date of data (last date is EXCLUDED!)
end = pd.Timestamp('2018-07-01 00:00')
################################################
# make data directory in the folder where this file is, if it does not exist
# Get the path of the directory where this file is
file_dir = os.path.dirname(os.path.realpath(__file__))
# Check if there is a "data" folder in this directory
data_dir = os.path.join(file_dir,'data')
if not os.path.isdir(data_dir):
# if not, create one
os.mkdir(data_dir)
# Create a session to Connect to Wattsight Database
session = wapi.Session(config_file=config_file)
# loop through the given curves
for c, curve_name in enumerate(curve_names):
# init empty df for each curve
df = pd.DataFrame()
# get curve and output frequency for curve name
freq_curve = freqs_curve[c]
freq_out = freqs_out[c]
# iterate regions
for region in regions:
# get curve data and convert to pandas Series
cname = curve_name.format(region=region, freq=freq_curve)
print('Fetching curve', cname)
curve = session.get_curve(name=cname)
ts = curve.get_data(data_from=start, data_to=end)
s = ts.to_pandas()
if freq_curve != freq_out:
# convert frequency if needed
s = s.groupby(pd.Grouper(freq=freq_out)).mean()
# add data to curve dataframe
df[curve_name.format(region=region, freq=freq_out)] = s
# create valid name for saving to csv
csv_name = curve_name.format(region='', freq=freq_out)
csv_name = csv_name.replace('/','-')
csv_name = csv_name.replace(' ',' ')
csv_name = csv_name.replace(' ','_')
# save to comma separated csv with point as decimal separator
df.to_csv(os.path.join('data',csv_name+'.csv'))
# save to semicolon separated csv with comma as decimal separator
df.to_csv(os.path.join('data',csv_name+'_comma.csv'), sep=';', decimal=',')
print('[Done]')
|
the-stack_0_24373 | from django.shortcuts import render,redirect
from django.contrib import messages
from django.core.mail import send_mail
from .models import Contact
# Create your views here.
def contact(req):
if req.method == 'POST':
listing_id = req.POST['listing_id']
listing = req.POST['listing']
name = req.POST['name']
email = req.POST['email']
phone = req.POST['phone']
message = req.POST['message']
user_id = req.POST['user_id']
realtor_email = req.POST['realtor_email']
# Checking if user has made an inquiry already
if req.user.is_authenticated:
user_id = req.user.id
has_contacted = Contact.objects.all().filter(listing_id=listing_id,user_id=user_id)
if has_contacted:
messages.error(req,'You have already made an inquiry for this listing')
return redirect('/listings/'+listing_id)
contact = Contact(listing = listing, listing_id= listing_id, name = name, email=email,phone=phone,message=message,user_id=user_id)
contact.save()
## send email
""" send_mail(
'Property listing inquire',
'there has been an inquiry for' + listing + ' . Please sign into the admin panel for more info.',
'[email protected]',
[realtor_email,'[email protected]'],
fail_silently= False
) """
messages.success(req,'Your request has been submitted')
return redirect('/listings/'+listing_id)
|
the-stack_0_24374 | # new game
import pygame
import random
from os import path
import os
import sys
from time import sleep
import socket
from gamenew_highscore1 import Button
HOST = '127.0.0.1'
PORT = 23456
height = 480
width = 600
FPS = 60
# define colors
BLACK = (0,0,0)
WHITE = (255,255,255)
RED = (255,0,0)
BLUE = (0,0,255)
GREEN = (0,255,0)
YELLOW = (255,255,0)
ORANGE = (255,165,0)
# initialize pygame and create window
pygame.init ()
pygame.mixer.init()
screen = pygame.display.set_mode((width, height))
pygame.display.set_caption("ColorWow")
clock = pygame.time.Clock()
game_folder = os.path.dirname(__file__)
img_folder = os.path.join(game_folder,"img")
snd_dir = path.join(path.dirname(__file__),"snd")
score_file = path.join(path.dirname(__file__),"highest_score.txt")
explosion = []
for i in range(9):
filename = 'regularExplosion0{}.png'.format(i)
img = pygame.image.load(path.join(img_folder, filename)).convert()
img.set_colorkey(BLACK)
img_lg = pygame.transform.scale(img, (60, 60))
explosion.append(img)
font_name = pygame.font.match_font('Berlin Sans FB')
def draw_text(surf, text , size, x,y):
font = pygame.font.Font(font_name,size)
text_surface = font.render(text, True, WHITE)
text_rect = text_surface.get_rect()
text_rect.midtop = (x,y)
surf.blit(text_surface, text_rect)
def high_score(score):
dir = path.dirname(__file__)
with open(path.join(dir,"highest_score.txt"),'r') as f:
try:
highscore = int(f.read())
except:
highscore = 0
if score>highscore:
highscore=score
with open(path.join(dir,"highest_score.txt"),'w') as f:
f.write(str(highscore))
draw_text(screen, "Highest score "+str(highscore),20,width/2,height-100)
class Explosion(pygame.sprite.Sprite):
def __init__(self, centerx, centery):
pygame.sprite.Sprite.__init__(self)
self.image = explosion[0]
self.rect = self.image.get_rect()
self.rect.centerx = centerx
self.rect.centery = centery
self.frame = 0
self.last_update = pygame.time.get_ticks()
self.frame_rate = FPS
def update(self):
now = pygame.time.get_ticks()
if now - self.last_update > self.frame_rate:
self.last_update = now
self.frame +=1
if self.frame == len(explosion):
self.kill()
else:
centerx = self.rect.centerx
centery = self.rect.centery
self.image = explosion[self.frame]
self.rect = self.image.get_rect()
self.rect.centerx, self.rect.centery = centerx, centery
class Player(pygame.sprite.Sprite):
def __init__(self, choice, all_sprites):
pygame.sprite.Sprite.__init__(self)
#self.image = pygame.Surface((50,50))
#self.image.fill(GREEN)
#switch(choice):
if(choice == 1):
self.image = pygame.image.load(os.path.join(img_folder, "player4.png")).convert()
elif(choice == 2):
self.image = pygame.image.load(os.path.join(img_folder, "player5.png")).convert()
elif(choice == 3):
self.image = pygame.image.load(os.path.join(img_folder, "player6.png")).convert()
self.image = pygame.transform.scale(self.image, (45, 55))
self.image.set_colorkey(WHITE)
self.rect = self.image.get_rect()
self.rect.centerx = width/2
self.rect.bottom = height - 40
self.speedx = 0
self.speedy = 0
def update(self):
self.speedx = 0
self.speedy = 0
keystate = pygame.key.get_pressed()
if keystate[pygame.K_LEFT]:
self.speedx = -5
if keystate[pygame.K_RIGHT]:
self.speedx = 5
if keystate[pygame.K_UP]:
self.speedy = -5
if keystate[pygame.K_DOWN]:
self.speedy = 5
self.rect.x += self.speedx
self.rect.y += self.speedy
if self.rect.right > width:
self.rect.right = width
if self.rect.centerx == width:
self.rect.centerx = 0;
if self.rect.left < 0:
self.rect.left = 0
def shoot(self, bullets, all_sprites):
bullet = Bullet(self.rect.centerx, self.rect.top)
all_sprites.add(bullet)
bullets.add(bullet)
#shoot_sound.play()
class Timer(pygame.sprite.Sprite):
def __init__(self):
pygame.sprite.Sprite.__init__(self)
#self.image = pygame.Surface((20,20))
#self.image.fill(WHITE)
self.image = pygame.image.load(os.path.join(img_folder, "bullet5.png")).convert()
self.rect = self.image.get_rect()
self.rect.centerx = 10
self.rect.bottom = height - 5
self.speedx = 2
def update(self):
self.rect.x += self.speedx
class Mob(pygame.sprite.Sprite):
def __init__(self):
pygame.sprite.Sprite.__init__(self)
#self.image = pygame.Surface((40,40))
#self.image.fill(RED)
self.image = random.choice(right_images)
self.rect = self.image.get_rect()
self.rect.x = random.randrange(0, width - self.rect.width)
self.rect.y = random.randrange(-90,-50)
self.speedy = random.randrange(1,8)
#self.speedx = random.randrange(-3,3)
def update(self):
#self.rect.x += self.speedx
self.rect.y += self.speedy
if self.rect.top > height + 10 or self.rect.left < -25 or self.rect.right > width + 20:
self.rect.x = random.randrange(0, width - self.rect.width)
self.rect.y = random.randrange(-100, -50)
self.speedy = random.randrange(1,8)
class Mob1(pygame.sprite.Sprite):
def __init__(self):
pygame.sprite.Sprite.__init__(self)
#self.image = pygame.Surface((40,40))
#self.image.fill(BLUE)
self.image = random.choice(wrong_images)
self.rect = self.image.get_rect()
self.rect.x = random.randrange(0, width - self.rect.width)
self.rect.y = random.randrange(-90,-50)
self.speedy = random.randrange(1,8)
#self.speedx = random.randrange(-3,3)
def update(self):
#self.rect.x += self.speedx
self.rect.y += self.speedy
if self.rect.top > height + 10 or self.rect.left < -25 or self.rect.right > width + 20:
self.rect.x = random.randrange(0, width - self.rect.width)
self.rect.y = random.randrange(-100, -50)
self.speedy = random.randrange(1,8)
class Mob2(pygame.sprite.Sprite):
def __init__(self):
pygame.sprite.Sprite.__init__(self)
#self.image = pygame.Surface((40,40))
self.image = random.choice(wrong_images)
#self.image.fill(YELLOW)
self.rect = self.image.get_rect()
self.rect.x = random.randrange(0, width - self.rect.width)
self.rect.y = random.randrange(-90,-50)
self.speedy = random.randrange(1,8)
#self.speedx = random.randrange(-3,3)
def update(self):
#self.rect.x += self.speedx
self.rect.y += self.speedy
if self.rect.top > height + 10 or self.rect.left < -25 or self.rect.right > width + 20:
self.rect.x = random.randrange(0, width - self.rect.width)
self.rect.y = random.randrange(-100, -50)
self.speedy = random.randrange(1,8)
class Mob3(pygame.sprite.Sprite):
def __init__(self):
pygame.sprite.Sprite.__init__(self)
#self.image = pygame.Surface((40,40))
#self.image.fill(ORANGE)
self.image = random.choice(wrong_images)
self.rect = self.image.get_rect()
self.rect.x = random.randrange(0, width - self.rect.width)
self.rect.y = random.randrange(-90,-50)
self.speedy = random.randrange(1,8)
#self.speedx = random.randrange(-3,3)
def update(self):
#self.rect.x += self.speedx
self.rect.y += self.speedy
if self.rect.top > height + 10 or self.rect.left < -25 or self.rect.right > width + 20:
self.rect.x = random.randrange(0, width - self.rect.width)
self.rect.y = random.randrange(-100, -50)
self.speedy = random.randrange(1,8)
class Bullet(pygame.sprite.Sprite):
def __init__(self, x,y):
pygame.sprite.Sprite.__init__(self)
#self.image = pygame.Surface((10,10))
#self.image.fill(WHITE)
self.image = pygame.image.load(os.path.join(img_folder, "bullet8.png")).convert()
self.rect = self.image.get_rect()
self.rect.bottom = y
self.rect.centerx = x
self.speedy = -40
def update(self):
self.rect.y += self.speedy
# kill if it moves off the top of the screen
if self.rect.bottom < 0:
self.kill()
def show_go_screen(score, conn):
screen.fill(BLACK)
draw_text(screen, "True color", 128, width/2, height/4)
draw_text(screen, "Arrows to move, Space to fire",24,width/2,height/2)
draw_text(screen, "Press enter to begin",20,width/2,height*3/4)
high_score(score)
pygame.display.flip()
waiting = True
while waiting:
#clock.tick(FPS)
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_RETURN:
conn.sendall(b'T')
waiting = False
def oscore_card(score1, score2, conn):
screen.fill(BLACK)
if(score1>score2):
image = pygame.image.load(os.path.join(img_folder, "win-screen.png")).convert()
image = pygame.transform.scale(image, (int(width),int(height/2)))
rect = image.get_rect()
rect.centerx = width/2
screen.blit(image, rect)
elif(score1<score2):
image = pygame.image.load(os.path.join(img_folder, "ulost.png")).convert()
rect = image.get_rect()
rect.centerx = width/2
screen.blit(image, rect)
else:
draw_text(screen, "Its a DRAW", 64, width/2, height/4)
draw_text(screen, "Your score is: "+str(score1), 24, width/2, height/2)
draw_text(screen, "Opponent's score is: "+str(score2), 24, width/2, 3*height/4)
draw_text(screen, "Press ENTER to start new game",24,width/2,height*4/5)
pygame.display.flip()
waiting = True
while waiting:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_RETURN:
conn.sendall(b'T')
waiting = False
def yscore_card(score, conn):
screen.fill(BLACK)
image = pygame.image.load(os.path.join(img_folder, "Nice-Game-Over.jpg")).convert()
rect = image.get_rect()
rect.centerx = width/2
screen.blit(image, rect)
draw_text(screen, "Your score is: "+str(score), 24, width/2, height/2)
draw_text(screen, "Waiting for opponent's score....", 24, width/2, 3*height/4)
conn.sendall(bytes([score]))
pygame.display.flip()
waiting = True
while waiting:
c_score = conn.recv(1024)
o_score = int.from_bytes(c_score, "little")
oscore_card(score, o_score, conn)
waiting = False
def wait_screen(conn):
screen.fill(BLACK)
draw_text(screen, "Wait for opponent to select character", 60, width/2, height/4)
image = pygame.image.load(os.path.join(img_folder, "hourglass.jpg")).convert()
image = pygame.transform.scale(image, (int(width/2), int(height/2)))
rect = image.get_rect()
rect.centerx = width/2
rect.centery = 3*height/4
screen.blit(image, rect)
pygame.display.flip()
waiting = True
while waiting:
#clock.tick(FPS)
s_status = conn.recv(1024)
if(s_status == b'T'):
waiting=False
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
# if event.type == pygame.KEYDOWN:
# if event.key == pygame.K_RETURN:
# waiting = False
def ship_selection(conn):
wait_screen(conn)
screen.fill(BLACK)
draw_text(screen, "Select your ship", 60, width/2, height/8)
image1 = pygame.image.load(os.path.join(img_folder, "player4.png")).convert()
image1 = pygame.transform.scale(image1, (int(width/7),int(height/3)))
rect1 = image1.get_rect()
rect1.centerx = 3*width/14
rect1.centery = 3*height/7
image2 = pygame.image.load(os.path.join(img_folder, "player5.png")).convert()
image2 = pygame.transform.scale(image2, (int(width/7),int(height/3)))
rect2 = image2.get_rect()
rect2.centerx = 7*width/14
rect2.centery = 3*height/7
image3 = pygame.image.load(os.path.join(img_folder, "player6.png")).convert()
image3 = pygame.transform.scale(image3, (int(width/7),int(height/3)))
rect3 = image1.get_rect()
rect3.centerx = 11*width/14
rect3.centery = 3*height/7
screen.blit(image1, rect1)
screen.blit(image2, rect2)
screen.blit(image3, rect3)
draw_text(screen, "Select your aircraft", 30, width/2, 4*height/5)
pygame.display.flip()
waiting = True
while waiting:
clock.tick(FPS)
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
if event.type == pygame.MOUSEBUTTONDOWN:
mouse_x,mouse_y = pygame.mouse.get_pos()
if rect1.collidepoint(mouse_x, mouse_y):
waiting = False
sel=1
conn.sendall(b'T')
return sel;
elif rect2.collidepoint(mouse_x, mouse_y):
waiting = False
sel=2
conn.sendall(b'T')
return sel;
elif rect3.collidepoint(mouse_x, mouse_y):
waiting = False
sel=3
conn.sendall(b'T')
return sel;
#Server settings and showing initial screen
wrong_images = []
wrong_list = [
"b1.png",'b2.png','b3.png','b4.png','b5.png','b7.png','b8.png','b9.png','b12.png',
'b13.png','g1.png','g2.png','g3.png','g4.png','g5.png','g6.png','g8.png',
'g9.png','o2.png','o3.png','o4.png','o5.png','o6.png','o7.png','o8.png','o9.png','o10.png',
'p1.png','p2.png','p3.png','p4.png','p5.png','p6.png','p7.png','p9.png','p10.png','p12.png',
'r2.png','r3.png','r4.png','r5.png','r6.png','r7.png','r8.png','r9.png','r10.png','r11.png','r12.png',
'y1.png','y2.png','y4.png','y5.png','y6.png','y7.png','y8.png','y9.png'
]
right_images = []
right_list = [ 'b6.png','b14.png','b15.png','g7.png','o1.png','p8.png','p11.png','r1.png','y3.png' ]
col_list = [ 40, 120, 200, 280, 360 ]
timer_images = []
timer_list = ['timer1.png','timer2.png','timer3.png','timer4.png']
for img in timer_list:
timer_images.append(pygame.image.load(path.join(img_folder, img)).convert())
for img in wrong_list:
wrong_images.append(pygame.image.load(path.join(img_folder, img)).convert())
for img in right_list:
right_images.append(pygame.image.load(path.join(img_folder, img)).convert())
shoot_sound = pygame.mixer.Sound(path.join(snd_dir,"laser3.wav"))
pygame.mixer.music.load(path.join(snd_dir,'Hypnotic Puzzle.wav'))
pygame.mixer.music.set_volume(0.4)
pygame.mixer.music.play(-1)#loops=(-1)
def run_server():
screen.fill(BLACK)
draw_text(screen, "True color", 64, width/2, height/4)
draw_text(screen, "Arrows to move, Space to fire",24,width/2,height/2)
draw_text(screen, "Waiting for other player to connect",20,width/2,height*3/4)
pygame.display.flip()
serversocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
serversocket.bind((HOST,PORT))
serversocket.listen()
conn,addr = serversocket.accept()
# Game loop
game_over = True
running = True
score = 0
count = 1 #To make sure that welcome and plane selection window appears only once
while running:
if game_over:
if(count>0):
show_go_screen(score, conn)
choice = ship_selection(conn)
ship_selection(conn)
count=0
game_over = False
all_sprites = pygame.sprite.Group()
mobs = pygame.sprite.Group()
enemy = pygame.sprite.Group()
bullets = pygame.sprite.Group()
player = Player(choice, all_sprites)
all_sprites.add(player)
time = Timer()
all_sprites.add(time)
for i in range(1):
m = Mob()
n = Mob1()
o = Mob2()
p = Mob3()
all_sprites.add(m)
all_sprites.add(n)
all_sprites.add(o)
all_sprites.add(p)
mobs.add(n)
mobs.add(o)
mobs.add(p)
enemy.add(m)
score = 0
# keep loop running at the right speed
clock.tick(FPS)
#c_status = conn.recv(1024)
#conn.sendall(b'T')
#print(c_status)
# Process input (events)
for event in pygame.event.get():
# check for closing window
if event.type == pygame.QUIT:
running = False
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_SPACE:
player.shoot(bullets, all_sprites)
# Update
all_sprites.update()
# check if bullet hits mob
hits = pygame.sprite.groupcollide(bullets , enemy ,True ,True)
for hit in hits:
score += 1
m = Mob()
all_sprites.add(m)
expl = Explosion(hit.rect.centerx, hit.rect.centery)
mobs.add(m)
enemy.add(m)
time.kill()
time = Timer()
#all_sprites.remove(time)
all_sprites.add(time)
#time.update.rect.x = 0
# check if bullet hits other than enemy
hits = pygame.sprite.groupcollide(bullets , mobs ,True ,True)
if hits:
game_over = True
sleep(0.5)
yscore_card(score, conn, all_sprites)
# check to see if a mob hit the player
hits = pygame.sprite.spritecollide(player, mobs, False) or pygame.sprite.spritecollide(player, enemy, False)
if hits:
game_over = True
sleep(0.5)
yscore_card(score, conn)
if time.rect.right > width:
game_over = True
yscore_card(score, conn)
# Draw / render
screen.fill(BLACK)
all_sprites.draw(screen)
draw_text(screen , str(score), 22, width/3, 10)
#draw_text(screen , "Opponent's score:"+str(c_score), 22, 3*width/4, 10)
# *after* drawing everything, flip the display
pygame.display.flip()
screen.fill(BLACK)
#pygame.quit()
|
the-stack_0_24375 | # MIT License
#
# Copyright (c) 2019 Raghav Venkat
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# Addon Description
bl_info = {
"name" : "pbrify",
"author" : "Raghav Venkat",
"description" : "Quick PBR Node Setup Generator for Blender Cycles and EEVEE Engine",
"blender" : (2, 80, 0),
"version" : (1 , 1, 0),
"location" : "Properties > Material > Quick PBR Generator: PBRify",
"warning" : "",
"category" : "Material",
"support": "COMMUNITY",
"wiki_url": "https://github.com/RaghavVenkat/pbrify",
"tracker_url": "https://github.com/RaghavVenkat/pbrify/issues"
}
import bpy
from . pbrPanel import PbrifyInterface
from . pbrOp import PbrifyCreate
classes = (PbrifyCreate, PbrifyInterface)
register, unregister = bpy.utils.register_classes_factory(classes)
|
the-stack_0_24376 | """
Usage:
$ python get_test_group.py group_1
test_foo.py test_bar.py::TestClass
$
This is used by CI to run only a certain set of tests on a particular builder.
See ``test_groups.yaml`` for details.
"""
from pathlib import Path
from typing import List
import click
import yaml
def patterns_from_group(group_name: str, test_groups_path: str='test_groups.yaml') -> List[str]:
"""
Given a group name, return all the pytest patterns defined for that group
in ``test_groups.yaml``.
"""
test_group_file = Path(test_groups_path)
test_group_file_contents = test_group_file.read_text()
test_groups = yaml.load(test_group_file_contents)['groups']
return test_groups[group_name]
@click.command('list-integration-test-patterns')
@click.argument('group_name')
def list_integration_test_patterns(group_name: str) -> None:
"""
Perform a release.
"""
test_patterns = patterns_from_group(group_name=group_name)
click.echo(' '.join(test_patterns), nl=False)
if __name__ == '__main__':
list_integration_test_patterns()
|
the-stack_0_24379 | """Weather data coordinator for the AEMET OpenData service."""
from dataclasses import dataclass, field
from datetime import timedelta
import logging
from aemet_opendata.const import (
AEMET_ATTR_DATE,
AEMET_ATTR_DAY,
AEMET_ATTR_DIRECTION,
AEMET_ATTR_ELABORATED,
AEMET_ATTR_FORECAST,
AEMET_ATTR_HUMIDITY,
AEMET_ATTR_ID,
AEMET_ATTR_IDEMA,
AEMET_ATTR_MAX,
AEMET_ATTR_MIN,
AEMET_ATTR_NAME,
AEMET_ATTR_PRECIPITATION,
AEMET_ATTR_PRECIPITATION_PROBABILITY,
AEMET_ATTR_SKY_STATE,
AEMET_ATTR_SNOW,
AEMET_ATTR_SNOW_PROBABILITY,
AEMET_ATTR_SPEED,
AEMET_ATTR_STATION_DATE,
AEMET_ATTR_STATION_HUMIDITY,
AEMET_ATTR_STATION_LOCATION,
AEMET_ATTR_STATION_PRESSURE_SEA,
AEMET_ATTR_STATION_TEMPERATURE,
AEMET_ATTR_STORM_PROBABILITY,
AEMET_ATTR_TEMPERATURE,
AEMET_ATTR_TEMPERATURE_FEELING,
AEMET_ATTR_WIND,
AEMET_ATTR_WIND_GUST,
ATTR_DATA,
)
from aemet_opendata.helpers import (
get_forecast_day_value,
get_forecast_hour_value,
get_forecast_interval_value,
)
import async_timeout
from homeassistant.components.weather import (
ATTR_FORECAST_CONDITION,
ATTR_FORECAST_PRECIPITATION,
ATTR_FORECAST_PRECIPITATION_PROBABILITY,
ATTR_FORECAST_TEMP,
ATTR_FORECAST_TEMP_LOW,
ATTR_FORECAST_TIME,
ATTR_FORECAST_WIND_BEARING,
ATTR_FORECAST_WIND_SPEED,
)
from homeassistant.helpers.update_coordinator import DataUpdateCoordinator, UpdateFailed
from homeassistant.util import dt as dt_util
from .const import (
ATTR_API_CONDITION,
ATTR_API_FORECAST_DAILY,
ATTR_API_FORECAST_HOURLY,
ATTR_API_HUMIDITY,
ATTR_API_PRESSURE,
ATTR_API_RAIN,
ATTR_API_RAIN_PROB,
ATTR_API_SNOW,
ATTR_API_SNOW_PROB,
ATTR_API_STATION_ID,
ATTR_API_STATION_NAME,
ATTR_API_STATION_TIMESTAMP,
ATTR_API_STORM_PROB,
ATTR_API_TEMPERATURE,
ATTR_API_TEMPERATURE_FEELING,
ATTR_API_TOWN_ID,
ATTR_API_TOWN_NAME,
ATTR_API_TOWN_TIMESTAMP,
ATTR_API_WIND_BEARING,
ATTR_API_WIND_MAX_SPEED,
ATTR_API_WIND_SPEED,
CONDITIONS_MAP,
DOMAIN,
WIND_BEARING_MAP,
)
_LOGGER = logging.getLogger(__name__)
WEATHER_UPDATE_INTERVAL = timedelta(minutes=10)
def format_condition(condition: str) -> str:
"""Return condition from dict CONDITIONS_MAP."""
for key, value in CONDITIONS_MAP.items():
if condition in value:
return key
_LOGGER.error('condition "%s" not found in CONDITIONS_MAP', condition)
return condition
def format_float(value) -> float:
"""Try converting string to float."""
try:
return float(value)
except (TypeError, ValueError):
return None
def format_int(value) -> int:
"""Try converting string to int."""
try:
return int(value)
except (TypeError, ValueError):
return None
class TownNotFound(UpdateFailed):
"""Raised when town is not found."""
class WeatherUpdateCoordinator(DataUpdateCoordinator):
"""Weather data update coordinator."""
def __init__(self, hass, aemet, latitude, longitude):
"""Initialize coordinator."""
super().__init__(
hass, _LOGGER, name=DOMAIN, update_interval=WEATHER_UPDATE_INTERVAL
)
self._aemet = aemet
self._station = None
self._town = None
self._latitude = latitude
self._longitude = longitude
self._data = {
"daily": None,
"hourly": None,
"station": None,
}
async def _async_update_data(self):
data = {}
with async_timeout.timeout(120):
weather_response = await self._get_aemet_weather()
data = self._convert_weather_response(weather_response)
return data
async def _get_aemet_weather(self):
"""Poll weather data from AEMET OpenData."""
weather = await self.hass.async_add_executor_job(self._get_weather_and_forecast)
return weather
def _get_weather_station(self):
if not self._station:
self._station = (
self._aemet.get_conventional_observation_station_by_coordinates(
self._latitude, self._longitude
)
)
if self._station:
_LOGGER.debug(
"station found for coordinates [%s, %s]: %s",
self._latitude,
self._longitude,
self._station,
)
if not self._station:
_LOGGER.debug(
"station not found for coordinates [%s, %s]",
self._latitude,
self._longitude,
)
return self._station
def _get_weather_town(self):
if not self._town:
self._town = self._aemet.get_town_by_coordinates(
self._latitude, self._longitude
)
if self._town:
_LOGGER.debug(
"town found for coordinates [%s, %s]: %s",
self._latitude,
self._longitude,
self._town,
)
if not self._town:
_LOGGER.error(
"town not found for coordinates [%s, %s]",
self._latitude,
self._longitude,
)
raise TownNotFound
return self._town
def _get_weather_and_forecast(self):
"""Get weather and forecast data from AEMET OpenData."""
self._get_weather_town()
daily = self._aemet.get_specific_forecast_town_daily(self._town[AEMET_ATTR_ID])
if not daily:
_LOGGER.error(
'error fetching daily data for town "%s"', self._town[AEMET_ATTR_ID]
)
hourly = self._aemet.get_specific_forecast_town_hourly(
self._town[AEMET_ATTR_ID]
)
if not hourly:
_LOGGER.error(
'error fetching hourly data for town "%s"', self._town[AEMET_ATTR_ID]
)
station = None
if self._get_weather_station():
station = self._aemet.get_conventional_observation_station_data(
self._station[AEMET_ATTR_IDEMA]
)
if not station:
_LOGGER.error(
'error fetching data for station "%s"',
self._station[AEMET_ATTR_IDEMA],
)
if daily:
self._data["daily"] = daily
if hourly:
self._data["hourly"] = hourly
if station:
self._data["station"] = station
return AemetWeather(
self._data["daily"],
self._data["hourly"],
self._data["station"],
)
def _convert_weather_response(self, weather_response):
"""Format the weather response correctly."""
if not weather_response or not weather_response.hourly:
return None
elaborated = dt_util.parse_datetime(
weather_response.hourly[ATTR_DATA][0][AEMET_ATTR_ELABORATED]
)
now = dt_util.now()
hour = now.hour
# Get current day
day = None
for cur_day in weather_response.hourly[ATTR_DATA][0][AEMET_ATTR_FORECAST][
AEMET_ATTR_DAY
]:
cur_day_date = dt_util.parse_datetime(cur_day[AEMET_ATTR_DATE])
if now.date() == cur_day_date.date():
day = cur_day
break
# Get station data
station_data = None
if weather_response.station:
station_data = weather_response.station[ATTR_DATA][-1]
condition = None
humidity = None
pressure = None
rain = None
rain_prob = None
snow = None
snow_prob = None
station_id = None
station_name = None
station_timestamp = None
storm_prob = None
temperature = None
temperature_feeling = None
town_id = None
town_name = None
town_timestamp = dt_util.as_utc(elaborated)
wind_bearing = None
wind_max_speed = None
wind_speed = None
# Get weather values
if day:
condition = self._get_condition(day, hour)
humidity = self._get_humidity(day, hour)
rain = self._get_rain(day, hour)
rain_prob = self._get_rain_prob(day, hour)
snow = self._get_snow(day, hour)
snow_prob = self._get_snow_prob(day, hour)
station_id = self._get_station_id()
station_name = self._get_station_name()
storm_prob = self._get_storm_prob(day, hour)
temperature = self._get_temperature(day, hour)
temperature_feeling = self._get_temperature_feeling(day, hour)
town_id = self._get_town_id()
town_name = self._get_town_name()
wind_bearing = self._get_wind_bearing(day, hour)
wind_max_speed = self._get_wind_max_speed(day, hour)
wind_speed = self._get_wind_speed(day, hour)
# Overwrite weather values with closest station data (if present)
if station_data:
if AEMET_ATTR_STATION_DATE in station_data:
station_dt = dt_util.parse_datetime(
station_data[AEMET_ATTR_STATION_DATE] + "Z"
)
station_timestamp = dt_util.as_utc(station_dt).isoformat()
if AEMET_ATTR_STATION_HUMIDITY in station_data:
humidity = format_float(station_data[AEMET_ATTR_STATION_HUMIDITY])
if AEMET_ATTR_STATION_PRESSURE_SEA in station_data:
pressure = format_float(station_data[AEMET_ATTR_STATION_PRESSURE_SEA])
if AEMET_ATTR_STATION_TEMPERATURE in station_data:
temperature = format_float(station_data[AEMET_ATTR_STATION_TEMPERATURE])
# Get forecast from weather data
forecast_daily = self._get_daily_forecast_from_weather_response(
weather_response, now
)
forecast_hourly = self._get_hourly_forecast_from_weather_response(
weather_response, now
)
return {
ATTR_API_CONDITION: condition,
ATTR_API_FORECAST_DAILY: forecast_daily,
ATTR_API_FORECAST_HOURLY: forecast_hourly,
ATTR_API_HUMIDITY: humidity,
ATTR_API_TEMPERATURE: temperature,
ATTR_API_TEMPERATURE_FEELING: temperature_feeling,
ATTR_API_PRESSURE: pressure,
ATTR_API_RAIN: rain,
ATTR_API_RAIN_PROB: rain_prob,
ATTR_API_SNOW: snow,
ATTR_API_SNOW_PROB: snow_prob,
ATTR_API_STATION_ID: station_id,
ATTR_API_STATION_NAME: station_name,
ATTR_API_STATION_TIMESTAMP: station_timestamp,
ATTR_API_STORM_PROB: storm_prob,
ATTR_API_TOWN_ID: town_id,
ATTR_API_TOWN_NAME: town_name,
ATTR_API_TOWN_TIMESTAMP: town_timestamp,
ATTR_API_WIND_BEARING: wind_bearing,
ATTR_API_WIND_MAX_SPEED: wind_max_speed,
ATTR_API_WIND_SPEED: wind_speed,
}
def _get_daily_forecast_from_weather_response(self, weather_response, now):
if weather_response.daily:
parse = False
forecast = []
for day in weather_response.daily[ATTR_DATA][0][AEMET_ATTR_FORECAST][
AEMET_ATTR_DAY
]:
day_date = dt_util.parse_datetime(day[AEMET_ATTR_DATE])
if now.date() == day_date.date():
parse = True
if parse:
cur_forecast = self._convert_forecast_day(day_date, day)
if cur_forecast:
forecast.append(cur_forecast)
return forecast
return None
def _get_hourly_forecast_from_weather_response(self, weather_response, now):
if weather_response.hourly:
parse = False
hour = now.hour
forecast = []
for day in weather_response.hourly[ATTR_DATA][0][AEMET_ATTR_FORECAST][
AEMET_ATTR_DAY
]:
day_date = dt_util.parse_datetime(day[AEMET_ATTR_DATE])
hour_start = 0
if now.date() == day_date.date():
parse = True
hour_start = now.hour
if parse:
for hour in range(hour_start, 24):
cur_forecast = self._convert_forecast_hour(day_date, day, hour)
if cur_forecast:
forecast.append(cur_forecast)
return forecast
return None
def _convert_forecast_day(self, date, day):
condition = self._get_condition_day(day)
if not condition:
return None
return {
ATTR_FORECAST_CONDITION: condition,
ATTR_FORECAST_PRECIPITATION_PROBABILITY: self._get_precipitation_prob_day(
day
),
ATTR_FORECAST_TEMP: self._get_temperature_day(day),
ATTR_FORECAST_TEMP_LOW: self._get_temperature_low_day(day),
ATTR_FORECAST_TIME: dt_util.as_utc(date).isoformat(),
ATTR_FORECAST_WIND_SPEED: self._get_wind_speed_day(day),
ATTR_FORECAST_WIND_BEARING: self._get_wind_bearing_day(day),
}
def _convert_forecast_hour(self, date, day, hour):
condition = self._get_condition(day, hour)
if not condition:
return None
forecast_dt = date.replace(hour=hour, minute=0, second=0)
return {
ATTR_FORECAST_CONDITION: condition,
ATTR_FORECAST_PRECIPITATION: self._calc_precipitation(day, hour),
ATTR_FORECAST_PRECIPITATION_PROBABILITY: self._calc_precipitation_prob(
day, hour
),
ATTR_FORECAST_TEMP: self._get_temperature(day, hour),
ATTR_FORECAST_TIME: dt_util.as_utc(forecast_dt).isoformat(),
ATTR_FORECAST_WIND_SPEED: self._get_wind_speed(day, hour),
ATTR_FORECAST_WIND_BEARING: self._get_wind_bearing(day, hour),
}
def _calc_precipitation(self, day, hour):
"""Calculate the precipitation."""
rain_value = self._get_rain(day, hour)
if not rain_value:
rain_value = 0
snow_value = self._get_snow(day, hour)
if not snow_value:
snow_value = 0
if round(rain_value + snow_value, 1) == 0:
return None
return round(rain_value + snow_value, 1)
def _calc_precipitation_prob(self, day, hour):
"""Calculate the precipitation probability (hour)."""
rain_value = self._get_rain_prob(day, hour)
if not rain_value:
rain_value = 0
snow_value = self._get_snow_prob(day, hour)
if not snow_value:
snow_value = 0
if rain_value == 0 and snow_value == 0:
return None
return max(rain_value, snow_value)
@staticmethod
def _get_condition(day_data, hour):
"""Get weather condition (hour) from weather data."""
val = get_forecast_hour_value(day_data[AEMET_ATTR_SKY_STATE], hour)
if val:
return format_condition(val)
return None
@staticmethod
def _get_condition_day(day_data):
"""Get weather condition (day) from weather data."""
val = get_forecast_day_value(day_data[AEMET_ATTR_SKY_STATE])
if val:
return format_condition(val)
return None
@staticmethod
def _get_humidity(day_data, hour):
"""Get humidity from weather data."""
val = get_forecast_hour_value(day_data[AEMET_ATTR_HUMIDITY], hour)
if val:
return format_int(val)
return None
@staticmethod
def _get_precipitation_prob_day(day_data):
"""Get humidity from weather data."""
val = get_forecast_day_value(day_data[AEMET_ATTR_PRECIPITATION_PROBABILITY])
if val:
return format_int(val)
return None
@staticmethod
def _get_rain(day_data, hour):
"""Get rain from weather data."""
val = get_forecast_hour_value(day_data[AEMET_ATTR_PRECIPITATION], hour)
if val:
return format_float(val)
return None
@staticmethod
def _get_rain_prob(day_data, hour):
"""Get rain probability from weather data."""
val = get_forecast_interval_value(
day_data[AEMET_ATTR_PRECIPITATION_PROBABILITY], hour
)
if val:
return format_int(val)
return None
@staticmethod
def _get_snow(day_data, hour):
"""Get snow from weather data."""
val = get_forecast_hour_value(day_data[AEMET_ATTR_SNOW], hour)
if val:
return format_float(val)
return None
@staticmethod
def _get_snow_prob(day_data, hour):
"""Get snow probability from weather data."""
val = get_forecast_interval_value(day_data[AEMET_ATTR_SNOW_PROBABILITY], hour)
if val:
return format_int(val)
return None
def _get_station_id(self):
"""Get station ID from weather data."""
if self._station:
return self._station[AEMET_ATTR_IDEMA]
return None
def _get_station_name(self):
"""Get station name from weather data."""
if self._station:
return self._station[AEMET_ATTR_STATION_LOCATION]
return None
@staticmethod
def _get_storm_prob(day_data, hour):
"""Get storm probability from weather data."""
val = get_forecast_interval_value(day_data[AEMET_ATTR_STORM_PROBABILITY], hour)
if val:
return format_int(val)
return None
@staticmethod
def _get_temperature(day_data, hour):
"""Get temperature (hour) from weather data."""
val = get_forecast_hour_value(day_data[AEMET_ATTR_TEMPERATURE], hour)
return format_int(val)
@staticmethod
def _get_temperature_day(day_data):
"""Get temperature (day) from weather data."""
val = get_forecast_day_value(
day_data[AEMET_ATTR_TEMPERATURE], key=AEMET_ATTR_MAX
)
return format_int(val)
@staticmethod
def _get_temperature_low_day(day_data):
"""Get temperature (day) from weather data."""
val = get_forecast_day_value(
day_data[AEMET_ATTR_TEMPERATURE], key=AEMET_ATTR_MIN
)
return format_int(val)
@staticmethod
def _get_temperature_feeling(day_data, hour):
"""Get temperature from weather data."""
val = get_forecast_hour_value(day_data[AEMET_ATTR_TEMPERATURE_FEELING], hour)
return format_int(val)
def _get_town_id(self):
"""Get town ID from weather data."""
if self._town:
return self._town[AEMET_ATTR_ID]
return None
def _get_town_name(self):
"""Get town name from weather data."""
if self._town:
return self._town[AEMET_ATTR_NAME]
return None
@staticmethod
def _get_wind_bearing(day_data, hour):
"""Get wind bearing (hour) from weather data."""
val = get_forecast_hour_value(
day_data[AEMET_ATTR_WIND_GUST], hour, key=AEMET_ATTR_DIRECTION
)[0]
if val in WIND_BEARING_MAP:
return WIND_BEARING_MAP[val]
_LOGGER.error("%s not found in Wind Bearing map", val)
return None
@staticmethod
def _get_wind_bearing_day(day_data):
"""Get wind bearing (day) from weather data."""
val = get_forecast_day_value(
day_data[AEMET_ATTR_WIND], key=AEMET_ATTR_DIRECTION
)
if val in WIND_BEARING_MAP:
return WIND_BEARING_MAP[val]
_LOGGER.error("%s not found in Wind Bearing map", val)
return None
@staticmethod
def _get_wind_max_speed(day_data, hour):
"""Get wind max speed from weather data."""
val = get_forecast_hour_value(day_data[AEMET_ATTR_WIND_GUST], hour)
if val:
return format_int(val)
return None
@staticmethod
def _get_wind_speed(day_data, hour):
"""Get wind speed (hour) from weather data."""
val = get_forecast_hour_value(
day_data[AEMET_ATTR_WIND_GUST], hour, key=AEMET_ATTR_SPEED
)[0]
if val:
return format_int(val)
return None
@staticmethod
def _get_wind_speed_day(day_data):
"""Get wind speed (day) from weather data."""
val = get_forecast_day_value(day_data[AEMET_ATTR_WIND], key=AEMET_ATTR_SPEED)
if val:
return format_int(val)
return None
@dataclass
class AemetWeather:
"""Class to harmonize weather data model."""
daily: dict = field(default_factory=dict)
hourly: dict = field(default_factory=dict)
station: dict = field(default_factory=dict)
|
the-stack_0_24381 | # !/usr/bin/python
# coding: utf_8
# Copyright 2016-2017 RaceUP ED
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import httplib2
from googleapiclient import discovery
from oauth2client import client
from oauth2client import tools
from oauth2client.file import Storage
SCRIPT_DIRECTORY = os.path.dirname(__file__) # path to directory of python script running
# app settings
APP_NAME = "Bms Remote Monitor"
APP_WEBSITE = "https://sites.google.com/view/raceupbms/home"
APP_ORGANIZATION_WEBSITE = "www.raceup.it"
OAUTH_PATH = os.path.join(os.path.dirname(SCRIPT_DIRECTORY), ".user_credentials") # credentials folder
class GoogleApiOAuth(object):
def __init__(self, scope, app_secrets_path, user_credentials_path):
"""
:param scope: string
scope of api
:param app_secrets_path: string
path to app secrets
:param user_credentials_path: string
path to user credentials
"""
object.__init__(self)
self.scope = str(scope)
self.app_secrets = str(app_secrets_path)
self.user_credentials = str(user_credentials_path)
self.store = Storage(user_credentials_path)
def get_new_user_credentials(self):
"""
:return: credentials
New user credentials file upon user prompt
"""
flow = client.flow_from_clientsecrets(self.app_secrets, self.scope) # perform OAuth2.0 authorization flow.
flow.user_agent = APP_NAME
return tools.run_flow(flow, self.store)
def get_user_credentials(self):
"""
:return: string
User credentials created via OAuth
"""
if not os.path.exists(os.path.dirname(self.user_credentials)): # create path to user credentials if needed
os.makedirs(os.path.dirname(self.user_credentials))
credentials = self.store.get() # retrieve credentials
if not credentials or credentials.invalid: # user credentials are to be updated
self.get_new_user_credentials() # get new user credentials
credentials = self.store.get() # retrieve new credentials
return credentials
@staticmethod
def authenticate(credentials):
"""
:param credentials: string
User authentication code created via OAuth
:return: http
Http authenticated credentials
"""
http = httplib2.Http()
credentials.authorize(http)
return http
def get_driver(self, name, version):
"""
:param name: string
Name of driver
:param version: string
Version of driver
:return: api driver
Authenticates and creates new API driver to perform scope stuff
"""
user_credentials = self.get_user_credentials() # get credentials
return discovery.build(name, version, http=self.authenticate(user_credentials)) # get sheets driver
class GMailApiOAuth(GoogleApiOAuth):
def __init__(self):
GoogleApiOAuth.__init__(
self,
"https://www.googleapis.com/auth/gmail.send", # scope
os.path.join(OAUTH_PATH, "gmail", "gmail_secrets.json"), # app secrets
os.path.join(OAUTH_PATH, "gmail", "gmail.json"), # user credential
)
def create_driver(self):
"""
:return: driver
Gmail API driver
"""
return super().get_driver("gmail", "v1")
class GSheetsApiOAuth(GoogleApiOAuth):
def __init__(self):
GoogleApiOAuth.__init__(
self,
"https://www.googleapis.com/auth/spreadsheets", # scope
os.path.join(OAUTH_PATH, "gsheets", "gsheets_secrets.json"), # app secrets
os.path.join(OAUTH_PATH, "gsheets", "gsheets.json"), # user credential
)
def create_driver(self):
"""
:return: driver
GSheets API driver
"""
return super().get_driver("sheets", "v4")
def create_gmail_driver():
"""
:return: gmail driver
Authenticates and creates new Google Mail driver to perform GMail stuff
"""
return GMailApiOAuth().create_driver()
def create_gdrive_driver():
"""
:return: gdrive driver
Authenticates and creates new Google Drive driver to perform GDrive stuff
"""
return GSheetsApiOAuth().create_driver()
|
the-stack_0_24383 | from django import forms
class DynamicArrayWidget(forms.TextInput):
template_name = "django_better_admin_arrayfield/forms/widgets/dynamic_array.html"
def __init__(self, *args, **kwargs):
self.subwidget_form = kwargs.pop("subwidget_form", forms.TextInput)
super().__init__(*args, **kwargs)
def get_context(self, name, value, attrs):
context_value = value or [""]
context = super().get_context(name, context_value, attrs)
final_attrs = context["widget"]["attrs"]
id_ = context["widget"]["attrs"].get("id")
context["widget"]["is_none"] = value is None
subwidgets = []
for index, item in enumerate(context["widget"]["value"]):
widget_attrs = final_attrs.copy()
if id_:
widget_attrs["id"] = "{id_}_{index}".format(id_=id_, index=index)
widget = self.subwidget_form()
widget.is_required = self.is_required
subwidgets.append(widget.get_context(name, item, widget_attrs)["widget"])
context["widget"]["subwidgets"] = subwidgets
return context
def value_from_datadict(self, data, files, name):
try:
getter = data.getlist
return [value for value in getter(name) if value]
except AttributeError:
return data.get(name)
def format_value(self, value):
return value or []
class DynamicArrayTextareaWidget(DynamicArrayWidget):
def __init__(self, *args, **kwargs):
kwargs.setdefault("subwidget_form", forms.Textarea)
super().__init__(*args, **kwargs)
|
the-stack_0_24386 | from __future__ import absolute_import
from dagster_graphql.schema.errors import DauphinPipelineConfigValidationInvalid
from graphql.execution.base import ResolveInfo
from dagster import DagsterInvalidConfigError, check
from dagster.config.validate import validate_config
from dagster.core.definitions.environment_schema import create_environment_schema
from dagster.core.definitions.pipeline import ExecutionSelector
from dagster.core.errors import DagsterRunConflict
from dagster.core.events import EngineEventData
from dagster.core.execution.api import create_execution_plan
from dagster.core.snap.execution_plan_snapshot import snapshot_from_execution_plan
from dagster.core.storage.pipeline_run import PipelineRunStatus
from dagster.core.utils import make_new_run_id
from dagster.utils import merge_dicts
from dagster.utils.error import SerializableErrorInfo
from ..fetch_pipelines import get_pipeline_def_from_selector
from ..fetch_runs import get_validated_config
from ..utils import ExecutionMetadata, ExecutionParams, capture_dauphin_error
from .utils import _check_start_pipeline_execution_errors, get_step_keys_to_execute
@capture_dauphin_error
def start_pipeline_reexecution(graphene_info, execution_params):
return _start_pipeline_execution(graphene_info, execution_params, is_reexecuted=True)
@capture_dauphin_error
def start_pipeline_execution(graphene_info, execution_params):
'''This indirection done on purpose to make the logic in the function
below re-usable. The parent function is wrapped in @capture_dauphin_error, which makes it
difficult to do exception handling.
'''
return _start_pipeline_execution(graphene_info, execution_params)
def _start_pipeline_execution(graphene_info, execution_params, is_reexecuted=False):
check.inst_param(graphene_info, 'graphene_info', ResolveInfo)
check.inst_param(execution_params, 'execution_params', ExecutionParams)
if is_reexecuted:
# required fields for re-execution
execution_metadata = check.inst_param(
execution_params.execution_metadata, 'execution_metadata', ExecutionMetadata
)
check.str_param(execution_metadata.root_run_id, 'root_run_id')
check.str_param(execution_metadata.parent_run_id, 'parent_run_id')
instance = graphene_info.context.instance
execution_manager_settings = instance.dagit_settings.get('execution_manager')
if execution_manager_settings and execution_manager_settings.get('disabled'):
return graphene_info.schema.type_named('StartPipelineRunDisabledError')()
pipeline_def = get_pipeline_def_from_selector(graphene_info, execution_params.selector)
get_validated_config(
pipeline_def,
environment_dict=execution_params.environment_dict,
mode=execution_params.mode,
)
execution_plan = create_execution_plan(
pipeline_def, execution_params.environment_dict, mode=execution_params.mode,
)
_check_start_pipeline_execution_errors(graphene_info, execution_params, execution_plan)
try:
pipeline_run = instance.get_or_create_run(
pipeline_name=pipeline_def.name,
run_id=execution_params.execution_metadata.run_id
if execution_params.execution_metadata.run_id
else make_new_run_id(),
selector=execution_params.selector or ExecutionSelector(name=pipeline_def.name),
environment_dict=execution_params.environment_dict,
mode=execution_params.mode,
step_keys_to_execute=(
get_step_keys_to_execute(instance, pipeline_def, execution_params)
or execution_params.step_keys
),
tags=merge_dicts(pipeline_def.tags, execution_params.execution_metadata.tags),
status=PipelineRunStatus.NOT_STARTED,
root_run_id=execution_params.execution_metadata.root_run_id,
parent_run_id=execution_params.execution_metadata.parent_run_id,
pipeline_snapshot=pipeline_def.get_pipeline_snapshot(),
execution_plan_snapshot=snapshot_from_execution_plan(
execution_plan, pipeline_def.get_pipeline_snapshot_id()
),
)
except DagsterRunConflict as exc:
return graphene_info.schema.type_named('PipelineRunConflict')(exc)
graphene_info.context.execution_manager.execute_pipeline(
graphene_info.context.get_handle(), pipeline_def, pipeline_run, instance=instance,
)
return graphene_info.schema.type_named('StartPipelineRunSuccess')(
run=graphene_info.schema.type_named('PipelineRun')(pipeline_run)
)
@capture_dauphin_error
def start_pipeline_execution_for_created_run(graphene_info, run_id):
'''This indirection is done on purpose to make the logic in the function
below re-usable. The parent function is wrapped in @capture_dauphin_error, which makes it
difficult to do exception handling.
'''
return _start_pipeline_execution_for_created_run(graphene_info, run_id)
def _start_pipeline_execution_for_created_run(graphene_info, run_id):
check.inst_param(graphene_info, 'graphene_info', ResolveInfo)
instance = graphene_info.context.instance
execution_manager_settings = instance.dagit_settings.get('execution_manager')
if execution_manager_settings and execution_manager_settings.get('disabled'):
return graphene_info.schema.type_named('StartPipelineRunDisabledError')()
pipeline_run = instance.get_run_by_id(run_id)
if not pipeline_run:
return graphene_info.schema.type_named('PipelineRunNotFoundError')(run_id)
pipeline_def = get_pipeline_def_from_selector(graphene_info, pipeline_run.selector)
environment_schema = create_environment_schema(pipeline_def, pipeline_run.mode)
validated_config = validate_config(
environment_schema.environment_type, pipeline_run.environment_dict
)
if not validated_config.success:
# If the config is invalid, we construct a DagsterInvalidConfigError exception and
# insert it into the event log. We also return a PipelineConfigValidationInvalid user facing
# graphql error.
# We currently re-use the engine events machinery to add the error to the event log, but
# may need to create a new event type and instance method to handle these erros.
invalid_config_exception = DagsterInvalidConfigError(
'Error in config for pipeline {}'.format(pipeline_def.name),
validated_config.errors,
pipeline_run.environment_dict,
)
instance.report_engine_event(
str(invalid_config_exception.message),
pipeline_run,
EngineEventData.engine_error(
SerializableErrorInfo(
invalid_config_exception.message,
[],
DagsterInvalidConfigError.__class__.__name__,
None,
)
),
)
# TODO: also insert a pipeline init failure event
# https://github.com/dagster-io/dagster/issues/2385
return DauphinPipelineConfigValidationInvalid.for_validation_errors(
pipeline_def, validated_config.errors
)
create_execution_plan(
pipeline_def,
pipeline_run.environment_dict,
mode=pipeline_run.mode,
step_keys_to_execute=pipeline_run.step_keys_to_execute,
)
graphene_info.context.execution_manager.execute_pipeline(
graphene_info.context.get_handle(), pipeline_def, pipeline_run, instance=instance,
)
return graphene_info.schema.type_named('StartPipelineRunSuccess')(
run=graphene_info.schema.type_named('PipelineRun')(pipeline_run)
)
|
the-stack_0_24387 | #!/usr/bin/env python
# coding: utf-8
from __future__ import absolute_import, division, print_function
import glob
import logging
import math
import os
import random
import shutil
import tempfile
import warnings
from dataclasses import asdict
from pathlib import Path
import numpy as np
import pandas as pd
import torch
from scipy.stats import mode
from sklearn.metrics import (
confusion_matrix,
label_ranking_average_precision_score,
matthews_corrcoef,
)
from tensorboardX import SummaryWriter
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from tqdm.auto import tqdm, trange
from transformers import (
BertConfig,
BertTokenizer,
DistilBertConfig,
DistilBertTokenizer,
LongformerConfig,
LongformerForSequenceClassification,
LongformerTokenizer,
RobertaConfig,
RobertaTokenizer,
XLMConfig,
XLMRobertaConfig,
XLMRobertaTokenizer,
XLMTokenizer, AlbertConfig, AlbertTokenizer, AutoConfig, AutoModelForSequenceClassification, AutoTokenizer,
BertTokenizerFast, BertweetTokenizer, CamembertConfig, CamembertTokenizerFast, DebertaConfig,
DebertaForSequenceClassification, DebertaTokenizer, DistilBertTokenizerFast, ElectraConfig, ElectraTokenizerFast,
FlaubertConfig, FlaubertTokenizer, LayoutLMConfig, LongformerTokenizerFast, RobertaTokenizerFast,
XLMRobertaTokenizerFast, XLNetConfig, XLNetTokenizerFast,
)
from transformers.convert_graph_to_onnx import convert, quantize
from transformers.optimization import AdamW, Adafactor
from transformers.optimization import (
get_constant_schedule,
get_constant_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from deepoffense.classification.classification_utils import LazyClassificationDataset, InputExample, \
convert_examples_to_features, sweep_config_to_sweep_values
from deepoffense.classification.transformer_models.albert_model import AlbertForSequenceClassification
from deepoffense.classification.transformer_models.args.model_args import ClassificationArgs
from deepoffense.classification.transformer_models.bert_model import BertForSequenceClassification
from deepoffense.classification.transformer_models.camembert_model import CamembertForSequenceClassification
from deepoffense.classification.transformer_models.distilbert_model import DistilBertForSequenceClassification
from deepoffense.classification.transformer_models.flaubert_model import FlaubertForSequenceClassification
from deepoffense.classification.transformer_models.roberta_model import RobertaForSequenceClassification
from deepoffense.classification.transformer_models.xlm_model import XLMForSequenceClassification
from deepoffense.classification.transformer_models.xlm_roberta_model import XLMRobertaForSequenceClassification
from deepoffense.classification.transformer_models.xlnet_model import XLNetForSequenceClassification
from deepoffense.custom_models.models import ElectraForSequenceClassification
try:
import wandb
wandb_available = True
except ImportError:
wandb_available = False
logger = logging.getLogger(__name__)
class ClassificationModel:
def __init__(
self,
model_type,
model_name,
num_labels=None,
weight=None,
args=None,
use_cuda=True,
cuda_device=-1,
onnx_execution_provider=None,
**kwargs,
):
"""
Initializes a MonoTransQuest model.
Args:
model_type: The type of model (bert, xlnet, xlm, roberta, distilbert)
model_name: The exact architecture and trained weights to use. This may be a Hugging Face Transformers compatible pre-trained model, a community model, or the path to a directory containing model files.
num_labels (optional): The number of labels or classes in the dataset.
weight (optional): A list of length num_labels containing the weights to assign to each label for loss calculation.
args (optional): Default args will be used if this parameter is not provided. If provided, it should be a dict containing the args that should be changed in the default args.
use_cuda (optional): Use GPU if available. Setting to False will force model to use CPU only.
cuda_device (optional): Specific GPU that should be used. Will use the first available GPU by default.
onnx_execution_provider (optional): ExecutionProvider to use with ONNX Runtime. Will use CUDA (if use_cuda) or CPU (if use_cuda is False) by default.
**kwargs (optional): For providing proxies, force_download, resume_download, cache_dir and other options specific to the 'from_pretrained' implementation where this will be supplied.
""" # noqa: ignore flake8"
MODEL_CLASSES = {
"albert": (AlbertConfig, AlbertForSequenceClassification, AlbertTokenizer),
"auto": (AutoConfig, AutoModelForSequenceClassification, AutoTokenizer),
"bert": (BertConfig, BertForSequenceClassification, BertTokenizerFast),
"bertweet": (
RobertaConfig,
RobertaForSequenceClassification,
BertweetTokenizer,
),
"camembert": (
CamembertConfig,
CamembertForSequenceClassification,
CamembertTokenizerFast,
),
"deberta": (
DebertaConfig,
DebertaForSequenceClassification,
DebertaTokenizer,
),
"distilbert": (
DistilBertConfig,
DistilBertForSequenceClassification,
DistilBertTokenizerFast,
),
"electra": (
ElectraConfig,
ElectraForSequenceClassification,
ElectraTokenizerFast,
),
"flaubert": (
FlaubertConfig,
FlaubertForSequenceClassification,
FlaubertTokenizer,
),
"longformer": (
LongformerConfig,
LongformerForSequenceClassification,
LongformerTokenizerFast,
),
"roberta": (
RobertaConfig,
RobertaForSequenceClassification,
RobertaTokenizerFast,
),
"xlm": (XLMConfig, XLMForSequenceClassification, XLMTokenizer),
"xlmroberta": (
XLMRobertaConfig,
XLMRobertaForSequenceClassification,
XLMRobertaTokenizerFast,
),
"xlnet": (XLNetConfig, XLNetForSequenceClassification, XLNetTokenizerFast),
}
self.args = self._load_model_args(model_name)
if isinstance(args, dict):
self.args.update_from_dict(args)
elif isinstance(args, ClassificationArgs):
self.args = args
if self.args.thread_count:
torch.set_num_threads(self.args.thread_count)
if "sweep_config" in kwargs:
self.is_sweeping = True
sweep_config = kwargs.pop("sweep_config")
sweep_values = sweep_config_to_sweep_values(sweep_config)
self.args.update_from_dict(sweep_values)
else:
self.is_sweeping = False
if self.args.manual_seed:
random.seed(self.args.manual_seed)
np.random.seed(self.args.manual_seed)
torch.manual_seed(self.args.manual_seed)
if self.args.n_gpu > 0:
torch.cuda.manual_seed_all(self.args.manual_seed)
if self.args.labels_list:
if num_labels:
assert num_labels == len(self.args.labels_list)
if self.args.labels_map:
try:
assert list(self.args.labels_map.keys()) == self.args.labels_list
except AssertionError:
assert [int(key) for key in list(self.args.labels_map.keys())] == self.args.labels_list
self.args.labels_map = {int(key): value for key, value in self.args.labels_map.items()}
else:
self.args.labels_map = {label: i for i, label in enumerate(self.args.labels_list)}
else:
len_labels_list = 2 if not num_labels else num_labels
self.args.labels_list = [i for i in range(len_labels_list)]
config_class, model_class, tokenizer_class = MODEL_CLASSES[model_type]
if num_labels:
self.config = config_class.from_pretrained(model_name, num_labels=num_labels, **self.args.config)
self.num_labels = num_labels
else:
self.config = config_class.from_pretrained(model_name, **self.args.config)
self.num_labels = self.config.num_labels
self.weight = weight
if use_cuda:
if torch.cuda.is_available():
if cuda_device == -1:
self.device = torch.device("cuda")
else:
self.device = torch.device(f"cuda:{cuda_device}")
else:
raise ValueError(
"'use_cuda' set to True when cuda is unavailable."
" Make sure CUDA is available or set use_cuda=False."
)
else:
self.device = "cpu"
if self.args.onnx:
from onnxruntime import InferenceSession, SessionOptions
if not onnx_execution_provider:
onnx_execution_provider = "CUDAExecutionProvider" if use_cuda else "CPUExecutionProvider"
options = SessionOptions()
options.intra_op_num_threads = 1
if self.args.dynamic_quantize:
model_path = quantize(Path(os.path.join(model_name, "onnx_model.onnx")))
self.model = InferenceSession(model_path.as_posix(), options, providers=[onnx_execution_provider])
else:
model_path = os.path.join(model_name, "onnx_model.onnx")
self.model = InferenceSession(model_path, options, providers=[onnx_execution_provider])
else:
if not self.args.quantized_model:
if self.weight:
self.model = model_class.from_pretrained(
model_name, config=self.config, weight=torch.Tensor(self.weight).to(self.device), **kwargs,
)
else:
self.model = model_class.from_pretrained(model_name, config=self.config, **kwargs)
else:
quantized_weights = torch.load(os.path.join(model_name, "pytorch_model.bin"))
if self.weight:
self.model = model_class.from_pretrained(
None,
config=self.config,
state_dict=quantized_weights,
weight=torch.Tensor(self.weight).to(self.device),
)
else:
self.model = model_class.from_pretrained(None, config=self.config, state_dict=quantized_weights)
if self.args.dynamic_quantize:
self.model = torch.quantization.quantize_dynamic(self.model, {torch.nn.Linear}, dtype=torch.qint8)
if self.args.quantized_model:
self.model.load_state_dict(quantized_weights)
if self.args.dynamic_quantize:
self.args.quantized_model = True
self.results = {}
if not use_cuda:
self.args.fp16 = False
if self.args.fp16:
try:
from torch.cuda import amp
except AttributeError:
raise AttributeError("fp16 requires Pytorch >= 1.6. Please update Pytorch or turn off fp16.")
if model_name in [
"vinai/bertweet-base",
"vinai/bertweet-covid19-base-cased",
"vinai/bertweet-covid19-base-uncased",
]:
self.tokenizer = tokenizer_class.from_pretrained(
model_name, do_lower_case=self.args.do_lower_case, normalization=True, **kwargs
)
else:
self.tokenizer = tokenizer_class.from_pretrained(
model_name, do_lower_case=self.args.do_lower_case, **kwargs
)
if self.args.special_tokens_list:
self.tokenizer.add_tokens(self.args.special_tokens_list, special_tokens=True)
self.model.resize_token_embeddings(len(self.tokenizer))
self.args.model_name = model_name
self.args.model_type = model_type
if model_type in ["camembert", "xlmroberta"]:
warnings.warn(
f"use_multiprocessing automatically disabled as {model_type}"
" fails when using multiprocessing for feature conversion."
)
self.args.use_multiprocessing = False
if self.args.wandb_project and not wandb_available:
warnings.warn("wandb_project specified but wandb is not available. Wandb disabled.")
self.args.wandb_project = None
def train_model(
self,
train_df,
multi_label=False,
output_dir=None,
show_running_loss=True,
args=None,
eval_df=None,
verbose=True,
**kwargs,
):
"""
Trains the model using 'train_df'
Args:
train_df: Pandas Dataframe containing at least two columns. If the Dataframe has a header, it should contain a 'text' and a 'labels' column. If no header is present,
the Dataframe should contain at least two columns, with the first column containing the text, and the second column containing the label. The model will be trained on this Dataframe.
output_dir: The directory where model files will be saved. If not given, self.args.output_dir will be used.
show_running_loss (optional): Set to False to prevent running loss from being printed to console. Defaults to True.
args (optional): Optional changes to the args dict of the model. Any changes made will persist for the model.
eval_df (optional): A DataFrame against which evaluation will be performed when evaluate_during_training is enabled. Is required if evaluate_during_training is enabled.
**kwargs: Additional metrics that should be used. Pass in the metrics as keyword arguments (name of metric: function to use). E.g. f1=sklearn.metrics.f1_score.
A metric function should take in two parameters. The first parameter will be the true labels, and the second parameter will be the predictions.
Returns:
global_step: Number of global steps trained
training_details: Average training loss if evaluate_during_training is False or full training progress scores if evaluate_during_training is True
""" # noqa: ignore flake8"
if args:
self.args.update_from_dict(args)
if self.args.silent:
show_running_loss = False
if self.args.evaluate_during_training and eval_df is None:
raise ValueError(
"evaluate_during_training is enabled but eval_df is not specified."
" Pass eval_df to model.train_model() if using evaluate_during_training."
)
if not output_dir:
output_dir = self.args.output_dir
if os.path.exists(output_dir) and os.listdir(output_dir) and not self.args.overwrite_output_dir:
raise ValueError(
"Output directory ({}) already exists and is not empty."
" Set overwrite_output_dir: True to automatically overwrite.".format(output_dir)
)
self._move_model_to_device()
if isinstance(train_df, str) and self.args.lazy_loading:
if self.args.sliding_window:
raise ValueError("Lazy loading cannot be used with sliding window.")
if self.args.model_type == "layoutlm":
raise NotImplementedError("Lazy loading is not implemented for LayoutLM models")
train_dataset = LazyClassificationDataset(train_df, self.tokenizer, self.args)
else:
if self.args.lazy_loading:
raise ValueError("Input must be given as a path to a file when using lazy loading")
if "text" in train_df.columns and "labels" in train_df.columns:
if self.args.model_type == "layoutlm":
train_examples = [
InputExample(i, text, None, label, x0, y0, x1, y1)
for i, (text, label, x0, y0, x1, y1) in enumerate(
zip(
train_df["text"].astype(str),
train_df["labels"],
train_df["x0"],
train_df["y0"],
train_df["x1"],
train_df["y1"],
)
)
]
else:
train_examples = [
InputExample(i, text, None, label)
for i, (text, label) in enumerate(zip(train_df["text"].astype(str), train_df["labels"]))
]
elif "text_a" in train_df.columns and "text_b" in train_df.columns:
if self.args.model_type == "layoutlm":
raise ValueError("LayoutLM cannot be used with sentence-pair tasks")
else:
train_examples = [
InputExample(i, text_a, text_b, label)
for i, (text_a, text_b, label) in enumerate(
zip(train_df["text_a"].astype(str), train_df["text_b"].astype(str), train_df["labels"])
)
]
else:
warnings.warn(
"Dataframe headers not specified. Falling back to using column 0 as text and column 1 as labels."
)
train_examples = [
InputExample(i, text, None, label)
for i, (text, label) in enumerate(zip(train_df.iloc[:, 0], train_df.iloc[:, 1]))
]
train_dataset = self.load_and_cache_examples(train_examples, verbose=verbose)
train_sampler = RandomSampler(train_dataset)
train_dataloader = DataLoader(
train_dataset,
sampler=train_sampler,
batch_size=self.args.train_batch_size,
num_workers=self.args.dataloader_num_workers,
)
os.makedirs(output_dir, exist_ok=True)
global_step, training_details = self.train(
train_dataloader,
output_dir,
multi_label=multi_label,
show_running_loss=show_running_loss,
eval_df=eval_df,
verbose=verbose,
**kwargs,
)
# model_to_save = self.model.module if hasattr(self.model, "module") else self.model
# model_to_save.save_pretrained(output_dir)
# self.tokenizer.save_pretrained(output_dir)
# torch.save(self.args, os.path.join(output_dir, "training_args.bin"))
self.save_model(model=self.model)
if verbose:
logger.info(" Training of {} model complete. Saved to {}.".format(self.args.model_type, output_dir))
return global_step, training_details
def train(
self,
train_dataloader,
output_dir,
multi_label=False,
show_running_loss=True,
eval_df=None,
verbose=True,
**kwargs,
):
"""
Trains the model on train_dataset.
Utility function to be used by the train_model() method. Not intended to be used directly.
"""
model = self.model
args = self.args
tb_writer = SummaryWriter(logdir=args.tensorboard_dir)
t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = []
custom_parameter_names = set()
for group in self.args.custom_parameter_groups:
params = group.pop("params")
custom_parameter_names.update(params)
param_group = {**group}
param_group["params"] = [p for n, p in model.named_parameters() if n in params]
optimizer_grouped_parameters.append(param_group)
for group in self.args.custom_layer_parameters:
layer_number = group.pop("layer")
layer = f"layer.{layer_number}."
group_d = {**group}
group_nd = {**group}
group_nd["weight_decay"] = 0.0
params_d = []
params_nd = []
for n, p in model.named_parameters():
if n not in custom_parameter_names and layer in n:
if any(nd in n for nd in no_decay):
params_nd.append(p)
else:
params_d.append(p)
custom_parameter_names.add(n)
group_d["params"] = params_d
group_nd["params"] = params_nd
optimizer_grouped_parameters.append(group_d)
optimizer_grouped_parameters.append(group_nd)
if not self.args.train_custom_parameters_only:
optimizer_grouped_parameters.extend(
[
{
"params": [
p
for n, p in model.named_parameters()
if n not in custom_parameter_names and not any(nd in n for nd in no_decay)
],
"weight_decay": args.weight_decay,
},
{
"params": [
p
for n, p in model.named_parameters()
if n not in custom_parameter_names and any(nd in n for nd in no_decay)
],
"weight_decay": 0.0,
},
]
)
warmup_steps = math.ceil(t_total * args.warmup_ratio)
args.warmup_steps = warmup_steps if args.warmup_steps == 0 else args.warmup_steps
if args.optimizer == "AdamW":
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
elif args.optimizer == "Adafactor":
optimizer = Adafactor(
optimizer_grouped_parameters,
lr=args.learning_rate,
eps=args.adafactor_eps,
clip_threshold=args.adafactor_clip_threshold,
decay_rate=args.adafactor_decay_rate,
beta1=args.adafactor_beta1,
weight_decay=args.weight_decay,
scale_parameter=args.adafactor_scale_parameter,
relative_step=args.adafactor_relative_step,
warmup_init=args.adafactor_warmup_init,
)
print("Using Adafactor for T5")
else:
raise ValueError(
"{} is not a valid optimizer class. Please use one of ('AdamW', 'Adafactor') instead.".format(
args.optimizer
)
)
if args.scheduler == "constant_schedule":
scheduler = get_constant_schedule(optimizer)
elif args.scheduler == "constant_schedule_with_warmup":
scheduler = get_constant_schedule_with_warmup(optimizer, num_warmup_steps=args.warmup_steps)
elif args.scheduler == "linear_schedule_with_warmup":
scheduler = get_linear_schedule_with_warmup(
optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total
)
elif args.scheduler == "cosine_schedule_with_warmup":
scheduler = get_cosine_schedule_with_warmup(
optimizer,
num_warmup_steps=args.warmup_steps,
num_training_steps=t_total,
num_cycles=args.cosine_schedule_num_cycles,
)
elif args.scheduler == "cosine_with_hard_restarts_schedule_with_warmup":
scheduler = get_cosine_with_hard_restarts_schedule_with_warmup(
optimizer,
num_warmup_steps=args.warmup_steps,
num_training_steps=t_total,
num_cycles=args.cosine_schedule_num_cycles,
)
elif args.scheduler == "polynomial_decay_schedule_with_warmup":
scheduler = get_polynomial_decay_schedule_with_warmup(
optimizer,
num_warmup_steps=args.warmup_steps,
num_training_steps=t_total,
lr_end=args.polynomial_decay_schedule_lr_end,
power=args.polynomial_decay_schedule_lr_end,
)
else:
raise ValueError("{} is not a valid scheduler.".format(args.scheduler))
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
global_step = 0
training_progress_scores = None
tr_loss, logging_loss = 0.0, 0.0
model.zero_grad()
train_iterator = trange(int(args.num_train_epochs), desc="Epoch", disable=args.silent, mininterval=0)
epoch_number = 0
best_eval_metric = None
early_stopping_counter = 0
steps_trained_in_current_epoch = 0
epochs_trained = 0
current_loss = "Initializing"
if args.model_name and os.path.exists(args.model_name):
try:
# set global_step to gobal_step of last saved checkpoint from model path
checkpoint_suffix = args.model_name.split("/")[-1].split("-")
if len(checkpoint_suffix) > 2:
checkpoint_suffix = checkpoint_suffix[1]
else:
checkpoint_suffix = checkpoint_suffix[-1]
global_step = int(checkpoint_suffix)
epochs_trained = global_step // (len(train_dataloader) // args.gradient_accumulation_steps)
steps_trained_in_current_epoch = global_step % (
len(train_dataloader) // args.gradient_accumulation_steps
)
logger.info(" Continuing training from checkpoint, will skip to saved global_step")
logger.info(" Continuing training from epoch %d", epochs_trained)
logger.info(" Continuing training from global step %d", global_step)
logger.info(" Will skip the first %d steps in the current epoch", steps_trained_in_current_epoch)
except ValueError:
logger.info(" Starting fine-tuning.")
if args.evaluate_during_training:
training_progress_scores = self._create_training_progress_scores(multi_label, **kwargs)
if args.wandb_project:
if not wandb.setup().settings.sweep_id:
logger.info(" Initializing WandB run for training.")
wandb.init(project=args.wandb_project, config={**asdict(args)}, **args.wandb_kwargs)
wandb.watch(self.model)
if self.args.fp16:
from torch.cuda import amp
scaler = amp.GradScaler()
for _ in train_iterator:
model.train()
if epochs_trained > 0:
epochs_trained -= 1
continue
train_iterator.set_description(f"Epoch {epoch_number + 1} of {args.num_train_epochs}")
batch_iterator = tqdm(
train_dataloader,
desc=f"Running Epoch {epoch_number} of {args.num_train_epochs}",
disable=args.silent,
mininterval=0,
)
for step, batch in enumerate(batch_iterator):
if steps_trained_in_current_epoch > 0:
steps_trained_in_current_epoch -= 1
continue
inputs = self._get_inputs_dict(batch)
if self.args.fp16:
with amp.autocast():
outputs = model(**inputs)
# model outputs are always tuple in pytorch-transformers (see doc)
loss = outputs[0]
else:
outputs = model(**inputs)
# model outputs are always tuple in pytorch-transformers (see doc)
loss = outputs[0]
if args.n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu parallel training
current_loss = loss.item()
if show_running_loss:
batch_iterator.set_description(
f"Epochs {epoch_number}/{args.num_train_epochs}. Running Loss: {current_loss:9.4f}"
)
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
if self.args.fp16:
scaler.scale(loss).backward()
else:
loss.backward()
tr_loss += loss.item()
if (step + 1) % args.gradient_accumulation_steps == 0:
if self.args.fp16:
scaler.unscale_(optimizer)
if args.optimizer == "AdamW":
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
if self.args.fp16:
scaler.step(optimizer)
scaler.update()
else:
optimizer.step()
scheduler.step() # Update learning rate schedule
model.zero_grad()
global_step += 1
if args.logging_steps > 0 and global_step % args.logging_steps == 0:
# Log metrics
tb_writer.add_scalar("lr", scheduler.get_last_lr()[0], global_step)
tb_writer.add_scalar("loss", (tr_loss - logging_loss) / args.logging_steps, global_step)
logging_loss = tr_loss
if args.wandb_project or self.is_sweeping:
wandb.log(
{
"Training loss": current_loss,
"lr": scheduler.get_last_lr()[0],
"global_step": global_step,
}
)
if args.save_steps > 0 and global_step % args.save_steps == 0:
# Save model checkpoint
if args.save_recent_only:
del_paths = glob.glob(os.path.join(output_dir, 'checkpoint-*'))
for del_path in del_paths:
shutil.rmtree(del_path)
output_dir_current = os.path.join(output_dir, "checkpoint-{}".format(global_step))
self.save_model(output_dir_current, optimizer, scheduler, model=model)
if args.evaluate_during_training and (
args.evaluate_during_training_steps > 0
and global_step % args.evaluate_during_training_steps == 0
):
# Only evaluate when single GPU otherwise metrics may not average well
results, _, _ = self.eval_model(
eval_df,
verbose=verbose and args.evaluate_during_training_verbose,
silent=args.evaluate_during_training_silent,
wandb_log=False,
**kwargs,
)
for key, value in results.items():
tb_writer.add_scalar("eval_{}".format(key), value, global_step)
output_dir_current = os.path.join(output_dir, "checkpoint-{}".format(global_step))
if args.save_eval_checkpoints:
if args.save_recent_only:
del_paths = glob.glob(os.path.join(output_dir, 'checkpoint-*'))
for del_path in del_paths:
shutil.rmtree(del_path)
self.save_model(output_dir_current, optimizer, scheduler, model=model, results=results)
training_progress_scores["global_step"].append(global_step)
training_progress_scores["train_loss"].append(current_loss)
for key in results:
training_progress_scores[key].append(results[key])
report = pd.DataFrame(training_progress_scores)
report.to_csv(
os.path.join(args.output_dir, "training_progress_scores.csv"), index=False,
)
if args.wandb_project or self.is_sweeping:
wandb.log(self._get_last_metrics(training_progress_scores))
if not best_eval_metric:
best_eval_metric = results[args.early_stopping_metric]
self.save_model(args.best_model_dir, optimizer, scheduler, model=model, results=results)
if best_eval_metric and args.early_stopping_metric_minimize:
if best_eval_metric - results[args.early_stopping_metric] > args.early_stopping_delta:
best_eval_metric = results[args.early_stopping_metric]
self.save_model(
args.best_model_dir, optimizer, scheduler, model=model, results=results
)
early_stopping_counter = 0
else:
if args.use_early_stopping:
if early_stopping_counter < args.early_stopping_patience:
early_stopping_counter += 1
if verbose:
logger.info(f" No improvement in {args.early_stopping_metric}")
logger.info(f" Current step: {early_stopping_counter}")
logger.info(f" Early stopping patience: {args.early_stopping_patience}")
else:
if verbose:
logger.info(f" Patience of {args.early_stopping_patience} steps reached")
logger.info(" Training terminated.")
train_iterator.close()
return (
global_step,
tr_loss / global_step
if not self.args.evaluate_during_training
else training_progress_scores,
)
else:
if results[args.early_stopping_metric] - best_eval_metric > args.early_stopping_delta:
best_eval_metric = results[args.early_stopping_metric]
self.save_model(
args.best_model_dir, optimizer, scheduler, model=model, results=results
)
early_stopping_counter = 0
else:
if args.use_early_stopping:
if early_stopping_counter < args.early_stopping_patience:
early_stopping_counter += 1
if verbose:
logger.info(f" No improvement in {args.early_stopping_metric}")
logger.info(f" Current step: {early_stopping_counter}")
logger.info(f" Early stopping patience: {args.early_stopping_patience}")
else:
if verbose:
logger.info(f" Patience of {args.early_stopping_patience} steps reached")
logger.info(" Training terminated.")
train_iterator.close()
return (
global_step,
tr_loss / global_step
if not self.args.evaluate_during_training
else training_progress_scores,
)
epoch_number += 1
output_dir_current = os.path.join(output_dir, "checkpoint-{}-epoch-{}".format(global_step, epoch_number))
if args.save_model_every_epoch or args.evaluate_during_training:
if args.save_recent_only:
del_paths = glob.glob(os.path.join(output_dir, 'checkpoint-*'))
for del_path in del_paths:
shutil.rmtree(del_path)
os.makedirs(output_dir_current, exist_ok=True)
if args.save_model_every_epoch:
self.save_model(output_dir_current, optimizer, scheduler, model=model)
if args.evaluate_during_training and args.evaluate_each_epoch:
results, _, _ = self.eval_model(
eval_df,
verbose=verbose and args.evaluate_during_training_verbose,
silent=args.evaluate_during_training_silent,
wandb_log=False,
**kwargs,
)
self.save_model(output_dir_current, optimizer, scheduler, results=results)
training_progress_scores["global_step"].append(global_step)
training_progress_scores["train_loss"].append(current_loss)
for key in results:
training_progress_scores[key].append(results[key])
report = pd.DataFrame(training_progress_scores)
report.to_csv(os.path.join(args.output_dir, "training_progress_scores.csv"), index=False)
if args.wandb_project or self.is_sweeping:
wandb.log(self._get_last_metrics(training_progress_scores))
if not best_eval_metric:
best_eval_metric = results[args.early_stopping_metric]
self.save_model(args.best_model_dir, optimizer, scheduler, model=model, results=results)
if best_eval_metric and args.early_stopping_metric_minimize:
if best_eval_metric - results[args.early_stopping_metric] > args.early_stopping_delta:
best_eval_metric = results[args.early_stopping_metric]
self.save_model(args.best_model_dir, optimizer, scheduler, model=model, results=results)
early_stopping_counter = 0
else:
if args.use_early_stopping and args.early_stopping_consider_epochs:
if early_stopping_counter < args.early_stopping_patience:
early_stopping_counter += 1
if verbose:
logger.info(f" No improvement in {args.early_stopping_metric}")
logger.info(f" Current step: {early_stopping_counter}")
logger.info(f" Early stopping patience: {args.early_stopping_patience}")
else:
if verbose:
logger.info(f" Patience of {args.early_stopping_patience} steps reached")
logger.info(" Training terminated.")
train_iterator.close()
return (
global_step,
tr_loss / global_step
if not self.args.evaluate_during_training
else training_progress_scores,
)
else:
if results[args.early_stopping_metric] - best_eval_metric > args.early_stopping_delta:
best_eval_metric = results[args.early_stopping_metric]
self.save_model(args.best_model_dir, optimizer, scheduler, model=model, results=results)
early_stopping_counter = 0
else:
if args.use_early_stopping and args.early_stopping_consider_epochs:
if early_stopping_counter < args.early_stopping_patience:
early_stopping_counter += 1
if verbose:
logger.info(f" No improvement in {args.early_stopping_metric}")
logger.info(f" Current step: {early_stopping_counter}")
logger.info(f" Early stopping patience: {args.early_stopping_patience}")
else:
if verbose:
logger.info(f" Patience of {args.early_stopping_patience} steps reached")
logger.info(" Training terminated.")
train_iterator.close()
return (
global_step,
tr_loss / global_step
if not self.args.evaluate_during_training
else training_progress_scores,
)
return (
global_step,
tr_loss / global_step if not self.args.evaluate_during_training else training_progress_scores,
)
def eval_model(
self, eval_df, multi_label=False, output_dir=None, verbose=True, silent=False, wandb_log=True, **kwargs
):
"""
Evaluates the model on eval_df. Saves results to output_dir.
Args:
eval_df: Pandas Dataframe containing at least two columns. If the Dataframe has a header, it should contain a 'text' and a 'labels' column. If no header is present,
the Dataframe should contain at least two columns, with the first column containing the text, and the second column containing the label. The model will be evaluated on this Dataframe.
output_dir: The directory where model files will be saved. If not given, self.args.output_dir will be used.
verbose: If verbose, results will be printed to the console on completion of evaluation.
silent: If silent, tqdm progress bars will be hidden.
wandb_log: If True, evaluation results will be logged to wandb.
**kwargs: Additional metrics that should be used. Pass in the metrics as keyword arguments (name of metric: function to use). E.g. f1=sklearn.metrics.f1_score.
A metric function should take in two parameters. The first parameter will be the true labels, and the second parameter will be the predictions.
Returns:
result: Dictionary containing evaluation results.
model_outputs: List of model outputs for each row in eval_df
wrong_preds: List of InputExample objects corresponding to each incorrect prediction by the model
""" # noqa: ignore flake8"
if not output_dir:
output_dir = self.args.output_dir
self._move_model_to_device()
result, model_outputs, wrong_preds = self.evaluate(
eval_df, output_dir, multi_label=multi_label, verbose=verbose, silent=silent, wandb_log=wandb_log, **kwargs
)
self.results.update(result)
if verbose:
logger.info(self.results)
return result, model_outputs, wrong_preds
def evaluate(
self, eval_df, output_dir, multi_label=False, prefix="", verbose=True, silent=False, wandb_log=True,
**kwargs
):
"""
Evaluates the model on eval_df.
Utility function to be used by the eval_model() method. Not intended to be used directly.
"""
model = self.model
args = self.args
eval_output_dir = output_dir
results = {}
if isinstance(eval_df, str) and self.args.lazy_loading:
if self.args.model_type == "layoutlm":
raise NotImplementedError("Lazy loading is not implemented for LayoutLM models")
eval_dataset = LazyClassificationDataset(eval_df, self.tokenizer, self.args)
eval_examples = None
else:
if self.args.lazy_loading:
raise ValueError("Input must be given as a path to a file when using lazy loading")
if "text" in eval_df.columns and "labels" in eval_df.columns:
if self.args.model_type == "layoutlm":
eval_examples = [
InputExample(i, text, None, label, x0, y0, x1, y1)
for i, (text, label, x0, y0, x1, y1) in enumerate(
zip(
eval_df["text"].astype(str),
eval_df["labels"],
eval_df["x0"],
eval_df["y0"],
eval_df["x1"],
eval_df["y1"],
)
)
]
else:
eval_examples = [
InputExample(i, text, None, label)
for i, (text, label) in enumerate(zip(eval_df["text"].astype(str), eval_df["labels"]))
]
elif "text_a" in eval_df.columns and "text_b" in eval_df.columns:
if self.args.model_type == "layoutlm":
raise ValueError("LayoutLM cannot be used with sentence-pair tasks")
else:
eval_examples = [
InputExample(i, text_a, text_b, label)
for i, (text_a, text_b, label) in enumerate(
zip(eval_df["text_a"].astype(str), eval_df["text_b"].astype(str), eval_df["labels"])
)
]
else:
warnings.warn(
"Dataframe headers not specified. Falling back to using column 0 as text and column 1 as labels."
)
eval_examples = [
InputExample(i, text, None, label)
for i, (text, label) in enumerate(zip(eval_df.iloc[:, 0], eval_df.iloc[:, 1]))
]
if args.sliding_window:
eval_dataset, window_counts = self.load_and_cache_examples(
eval_examples, evaluate=True, verbose=verbose, silent=silent
)
else:
eval_dataset = self.load_and_cache_examples(
eval_examples, evaluate=True, verbose=verbose, silent=silent
)
os.makedirs(eval_output_dir, exist_ok=True)
eval_sampler = SequentialSampler(eval_dataset)
eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size)
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
eval_loss = 0.0
nb_eval_steps = 0
n_batches = len(eval_dataloader)
preds = np.empty((len(eval_dataset), self.num_labels))
if multi_label:
out_label_ids = np.empty((len(eval_dataset), self.num_labels))
else:
out_label_ids = np.empty((len(eval_dataset)))
model.eval()
if self.args.fp16:
from torch.cuda import amp
for i, batch in enumerate(tqdm(eval_dataloader, disable=args.silent or silent, desc="Running Evaluation")):
# batch = tuple(t.to(device) for t in batch)
with torch.no_grad():
inputs = self._get_inputs_dict(batch)
if self.args.fp16:
with amp.autocast():
outputs = model(**inputs)
tmp_eval_loss, logits = outputs[:2]
else:
outputs = model(**inputs)
tmp_eval_loss, logits = outputs[:2]
if multi_label:
logits = logits.sigmoid()
if self.args.n_gpu > 1:
tmp_eval_loss = tmp_eval_loss.mean()
eval_loss += tmp_eval_loss.item()
nb_eval_steps += 1
start_index = self.args.eval_batch_size * i
end_index = start_index + self.args.eval_batch_size if i != (n_batches - 1) else len(eval_dataset)
preds[start_index:end_index] = logits.detach().cpu().numpy()
out_label_ids[start_index:end_index] = inputs["labels"].detach().cpu().numpy()
# if preds is None:
# preds = logits.detach().cpu().numpy()
# out_label_ids = inputs["labels"].detach().cpu().numpy()
# else:
# preds = np.append(preds, logits.detach().cpu().numpy(), axis=0)
# out_label_ids = np.append(out_label_ids, inputs["labels"].detach().cpu().numpy(), axis=0)
eval_loss = eval_loss / nb_eval_steps
if args.sliding_window:
count = 0
window_ranges = []
for n_windows in window_counts:
window_ranges.append([count, count + n_windows])
count += n_windows
preds = [preds[window_range[0]: window_range[1]] for window_range in window_ranges]
out_label_ids = [
out_label_ids[i] for i in range(len(out_label_ids)) if i in [window[0] for window in window_ranges]
]
model_outputs = preds
preds = [np.argmax(pred, axis=1) for pred in preds]
final_preds = []
for pred_row in preds:
mode_pred, counts = mode(pred_row)
if len(counts) > 1 and counts[0] == counts[1]:
final_preds.append(args.tie_value)
else:
final_preds.append(mode_pred[0])
preds = np.array(final_preds)
elif not multi_label and args.regression is True:
preds = np.squeeze(preds)
model_outputs = preds
else:
model_outputs = preds
if not multi_label:
preds = np.argmax(preds, axis=1)
result, wrong = self.compute_metrics(preds, out_label_ids, eval_examples, **kwargs)
result["eval_loss"] = eval_loss
results.update(result)
output_eval_file = os.path.join(eval_output_dir, "eval_results.txt")
with open(output_eval_file, "w") as writer:
for key in sorted(result.keys()):
writer.write("{} = {}\n".format(key, str(result[key])))
if self.args.wandb_project and wandb_log and not multi_label and not self.args.regression:
if not wandb.setup().settings.sweep_id:
logger.info(" Initializing WandB run for evaluation.")
wandb.init(project=args.wandb_project, config={**asdict(args)}, **args.wandb_kwargs)
if not args.labels_map:
self.args.labels_map = {i: i for i in range(self.num_labels)}
labels_list = sorted(list(self.args.labels_map.keys()))
inverse_labels_map = {value: key for key, value in self.args.labels_map.items()}
truth = [inverse_labels_map[out] for out in out_label_ids]
# Confusion Matrix
wandb.sklearn.plot_confusion_matrix(
truth, [inverse_labels_map[pred] for pred in preds], labels=labels_list,
)
if not self.args.sliding_window:
# ROC`
wandb.log({"roc": wandb.plots.ROC(truth, model_outputs, labels_list)})
# Precision Recall
wandb.log({"pr": wandb.plots.precision_recall(truth, model_outputs, labels_list)})
return results, model_outputs, wrong
def load_and_cache_examples(
self, examples, evaluate=False, no_cache=False, multi_label=False, verbose=True, silent=False
):
"""
Converts a list of InputExample objects to a TensorDataset containing InputFeatures. Caches the InputFeatures.
Utility function for train() and eval() methods. Not intended to be used directly.
"""
process_count = self.args.process_count
tokenizer = self.tokenizer
args = self.args
if not no_cache:
no_cache = args.no_cache
if not multi_label and args.regression:
output_mode = "regression"
else:
output_mode = "classification"
if not no_cache:
os.makedirs(self.args.cache_dir, exist_ok=True)
mode = "dev" if evaluate else "train"
cached_features_file = os.path.join(
args.cache_dir,
"cached_{}_{}_{}_{}_{}".format(
mode, args.model_type, args.max_seq_length, self.num_labels, len(examples),
),
)
if os.path.exists(cached_features_file) and (
(not args.reprocess_input_data and not no_cache)
or (mode == "dev" and args.use_cached_eval_features and not no_cache)
):
features = torch.load(cached_features_file)
if verbose:
logger.info(f" Features loaded from cache at {cached_features_file}")
else:
if verbose:
logger.info(" Converting to features started. Cache is not used.")
if args.sliding_window:
logger.info(" Sliding window enabled")
# If labels_map is defined, then labels need to be replaced with ints
if self.args.labels_map and not self.args.regression:
for example in examples:
if multi_label:
example.label = [self.args.labels_map[label] for label in example.label]
else:
example.label = self.args.labels_map[example.label]
features = convert_examples_to_features(
examples,
args.max_seq_length,
tokenizer,
output_mode,
# XLNet has a CLS token at the end
cls_token_at_end=bool(args.model_type in ["xlnet"]),
cls_token=tokenizer.cls_token,
cls_token_segment_id=2 if args.model_type in ["xlnet"] else 0,
sep_token=tokenizer.sep_token,
# RoBERTa uses an extra separator b/w pairs of sentences,
# cf. github.com/pytorch/fairseq/commit/1684e166e3da03f5b600dbb7855cb98ddfcd0805
sep_token_extra=bool(args.model_type in ["roberta", "camembert", "xlmroberta", "longformer"]),
# PAD on the left for XLNet
pad_on_left=bool(args.model_type in ["xlnet"]),
pad_token=tokenizer.convert_tokens_to_ids([tokenizer.pad_token])[0],
pad_token_segment_id=4 if args.model_type in ["xlnet"] else 0,
process_count=process_count,
multi_label=multi_label,
silent=args.silent or silent,
use_multiprocessing=args.use_multiprocessing,
sliding_window=args.sliding_window,
flatten=not evaluate,
stride=args.stride,
add_prefix_space=bool(args.model_type in ["roberta", "camembert", "xlmroberta", "longformer"]),
# avoid padding in case of single example/online inferencing to decrease execution time
pad_to_max_length=bool(len(examples) > 1),
args=args,
)
if verbose and args.sliding_window:
logger.info(f" {len(features)} features created from {len(examples)} samples.")
if not no_cache:
torch.save(features, cached_features_file)
if args.sliding_window and evaluate:
features = [
[feature_set] if not isinstance(feature_set, list) else feature_set for feature_set in features
]
window_counts = [len(sample) for sample in features]
features = [feature for feature_set in features for feature in feature_set]
all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in features], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in features], dtype=torch.long)
if self.args.model_type == "layoutlm":
all_bboxes = torch.tensor([f.bboxes for f in features], dtype=torch.long)
if output_mode == "classification":
all_label_ids = torch.tensor([f.label_id for f in features], dtype=torch.long)
elif output_mode == "regression":
all_label_ids = torch.tensor([f.label_id for f in features], dtype=torch.float)
if self.args.model_type == "layoutlm":
dataset = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids, all_bboxes)
else:
dataset = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids)
if args.sliding_window and evaluate:
return dataset, window_counts
else:
return dataset
def compute_metrics(self, preds, labels, eval_examples=None, multi_label=False, **kwargs):
"""
Computes the evaluation metrics for the model predictions.
Args:
preds: Model predictions
labels: Ground truth labels
eval_examples: List of examples on which evaluation was performed
**kwargs: Additional metrics that should be used. Pass in the metrics as keyword arguments (name of metric: function to use). E.g. f1=sklearn.metrics.f1_score.
A metric function should take in two parameters. The first parameter will be the true labels, and the second parameter will be the predictions.
Returns:
result: Dictionary containing evaluation results. (Matthews correlation coefficient, tp, tn, fp, fn)
wrong: List of InputExample objects corresponding to each incorrect prediction by the model
""" # noqa: ignore flake8"
assert len(preds) == len(labels)
extra_metrics = {}
for metric, func in kwargs.items():
extra_metrics[metric] = func(labels, preds)
if multi_label:
threshold_values = self.args.threshold if self.args.threshold else 0.5
if isinstance(threshold_values, list):
mismatched = labels != [
[self._threshold(pred, threshold_values[i]) for i, pred in enumerate(example)] for example in preds
]
else:
mismatched = labels != [
[self._threshold(pred, threshold_values) for pred in example] for example in preds
]
else:
mismatched = labels != preds
if eval_examples:
wrong = [i for (i, v) in zip(eval_examples, mismatched) if v.any()]
else:
wrong = ["NA"]
if multi_label:
label_ranking_score = label_ranking_average_precision_score(labels, preds)
return {**{"LRAP": label_ranking_score}, **extra_metrics}, wrong
elif self.args.regression:
return {**extra_metrics}, wrong
mcc = matthews_corrcoef(labels, preds)
if self.model.num_labels == 2:
tn, fp, fn, tp = confusion_matrix(labels, preds, labels=[0, 1]).ravel()
return (
{**{"mcc": mcc, "tp": tp, "tn": tn, "fp": fp, "fn": fn}, **extra_metrics},
wrong,
)
else:
return {**{"mcc": mcc}, **extra_metrics}, wrong
def predict(self, to_predict, multi_label=False):
"""
Performs predictions on a list of text.
Args:
to_predict: A python list of text (str) to be sent to the model for prediction.
Returns:
preds: A python list of the predictions (0 or 1) for each text.
model_outputs: A python list of the raw model outputs for each text.
"""
model = self.model
args = self.args
eval_loss = 0.0
nb_eval_steps = 0
preds = np.empty((len(to_predict), self.num_labels))
if multi_label:
out_label_ids = np.empty((len(to_predict), self.num_labels))
else:
out_label_ids = np.empty((len(to_predict)))
if not multi_label and self.args.onnx:
model_inputs = self.tokenizer.batch_encode_plus(
to_predict, return_tensors="pt", padding=True, truncation=True
)
for i, (input_ids, attention_mask) in enumerate(
zip(model_inputs["input_ids"], model_inputs["attention_mask"])
):
input_ids = input_ids.unsqueeze(0).detach().cpu().numpy()
attention_mask = attention_mask.unsqueeze(0).detach().cpu().numpy()
inputs_onnx = {"input_ids": input_ids, "attention_mask": attention_mask}
# Run the model (None = get all the outputs)
output = self.model.run(None, inputs_onnx)
preds[i] = output[0]
# if preds is None:
# preds = output[0]
# else:
# preds = np.append(preds, output[0], axis=0)
model_outputs = preds
preds = np.argmax(preds, axis=1)
else:
self._move_model_to_device()
dummy_label = 0 if not self.args.labels_map else next(iter(self.args.labels_map.keys()))
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
if multi_label:
if isinstance(to_predict[0], list):
eval_examples = [
InputExample(i, text[0], text[1], [dummy_label for i in range(self.num_labels)])
for i, text in enumerate(to_predict)
]
else:
eval_examples = [
InputExample(i, text, None, [dummy_label for i in range(self.num_labels)])
for i, text in enumerate(to_predict)
]
else:
if isinstance(to_predict[0], list):
eval_examples = [
InputExample(i, text[0], text[1], dummy_label) for i, text in enumerate(to_predict)
]
else:
eval_examples = [InputExample(i, text, None, dummy_label) for i, text in enumerate(to_predict)]
if args.sliding_window:
eval_dataset, window_counts = self.load_and_cache_examples(eval_examples, evaluate=True, no_cache=True)
preds = np.empty((len(eval_dataset), self.num_labels))
if multi_label:
out_label_ids = np.empty((len(eval_dataset), self.num_labels))
else:
out_label_ids = np.empty((len(eval_dataset)))
else:
eval_dataset = self.load_and_cache_examples(
eval_examples, evaluate=True, multi_label=multi_label, no_cache=True
)
eval_sampler = SequentialSampler(eval_dataset)
eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size)
if self.args.fp16:
from torch.cuda import amp
if self.config.output_hidden_states:
model.eval()
preds = None
out_label_ids = None
for i, batch in enumerate(tqdm(eval_dataloader, disable=args.silent, desc="Running Prediction")):
# batch = tuple(t.to(device) for t in batch)
with torch.no_grad():
inputs = self._get_inputs_dict(batch)
if self.args.fp16:
with amp.autocast():
outputs = model(**inputs)
tmp_eval_loss, logits = outputs[:2]
else:
outputs = model(**inputs)
tmp_eval_loss, logits = outputs[:2]
embedding_outputs, layer_hidden_states = outputs[2][0], outputs[2][1:]
if multi_label:
logits = logits.sigmoid()
if self.args.n_gpu > 1:
tmp_eval_loss = tmp_eval_loss.mean()
eval_loss += tmp_eval_loss.item()
nb_eval_steps += 1
if preds is None:
preds = logits.detach().cpu().numpy()
out_label_ids = inputs["labels"].detach().cpu().numpy()
all_layer_hidden_states = np.array(
[state.detach().cpu().numpy() for state in layer_hidden_states]
)
all_embedding_outputs = embedding_outputs.detach().cpu().numpy()
else:
preds = np.append(preds, logits.detach().cpu().numpy(), axis=0)
out_label_ids = np.append(out_label_ids, inputs["labels"].detach().cpu().numpy(), axis=0)
all_layer_hidden_states = np.append(
all_layer_hidden_states,
np.array([state.detach().cpu().numpy() for state in layer_hidden_states]),
axis=1,
)
all_embedding_outputs = np.append(
all_embedding_outputs, embedding_outputs.detach().cpu().numpy(), axis=0
)
else:
n_batches = len(eval_dataloader)
for i, batch in enumerate(tqdm(eval_dataloader, disable=args.silent)):
model.eval()
# batch = tuple(t.to(device) for t in batch)
with torch.no_grad():
inputs = self._get_inputs_dict(batch)
if self.args.fp16:
with amp.autocast():
outputs = model(**inputs)
tmp_eval_loss, logits = outputs[:2]
else:
outputs = model(**inputs)
tmp_eval_loss, logits = outputs[:2]
if multi_label:
logits = logits.sigmoid()
if self.args.n_gpu > 1:
tmp_eval_loss = tmp_eval_loss.mean()
eval_loss += tmp_eval_loss.item()
nb_eval_steps += 1
start_index = self.args.eval_batch_size * i
end_index = start_index + self.args.eval_batch_size if i != (n_batches - 1) else len(eval_dataset)
preds[start_index:end_index] = logits.detach().cpu().numpy()
out_label_ids[start_index:end_index] = inputs["labels"].detach().cpu().numpy()
# if preds is None:
# preds = logits.detach().cpu().numpy()
# out_label_ids = inputs["labels"].detach().cpu().numpy()
# else:
# preds = np.append(preds, logits.detach().cpu().numpy(), axis=0)
# out_label_ids = np.append(out_label_ids, inputs["labels"].detach().cpu().numpy(), axis=0)
eval_loss = eval_loss / nb_eval_steps
if args.sliding_window:
count = 0
window_ranges = []
for n_windows in window_counts:
window_ranges.append([count, count + n_windows])
count += n_windows
preds = [preds[window_range[0]: window_range[1]] for window_range in window_ranges]
model_outputs = preds
preds = [np.argmax(pred, axis=1) for pred in preds]
final_preds = []
for pred_row in preds:
mode_pred, counts = mode(pred_row)
if len(counts) > 1 and counts[0] == counts[1]:
final_preds.append(args.tie_value)
else:
final_preds.append(mode_pred[0])
preds = np.array(final_preds)
elif not multi_label and args.regression is True:
preds = np.squeeze(preds)
model_outputs = preds
else:
model_outputs = preds
preds = np.argmax(preds, axis=1)
if self.args.labels_map and not self.args.regression:
inverse_labels_map = {value: key for key, value in self.args.labels_map.items()}
preds = [inverse_labels_map[pred] for pred in preds]
if self.config.output_hidden_states:
return preds, model_outputs, all_embedding_outputs, all_layer_hidden_states
else:
return preds, model_outputs
def convert_to_onnx(self, output_dir=None, set_onnx_arg=True):
"""Convert the model to ONNX format and save to output_dir
Args:
output_dir (str, optional): If specified, ONNX model will be saved to output_dir (else args.output_dir will be used). Defaults to None.
set_onnx_arg (bool, optional): Updates the model args to set onnx=True. Defaults to True.
""" # noqa
if not output_dir:
output_dir = os.path.join(self.args.output_dir, "onnx")
os.makedirs(output_dir, exist_ok=True)
if os.listdir(output_dir):
raise ValueError(
"Output directory ({}) already exists and is not empty."
" Output directory for onnx conversion must be empty.".format(output_dir)
)
onnx_model_name = os.path.join(output_dir, "onnx_model.onnx")
with tempfile.TemporaryDirectory() as temp_dir:
self.save_model(output_dir=temp_dir, model=self.model)
convert(
framework="pt",
model=temp_dir,
tokenizer=self.tokenizer,
output=Path(onnx_model_name),
pipeline_name="sentiment-analysis",
opset=11,
)
self.args.onnx = True
self.tokenizer.save_pretrained(output_dir)
self.config.save_pretrained(output_dir)
self.save_model_args(output_dir)
def _threshold(self, x, threshold):
if x >= threshold:
return 1
return 0
def _move_model_to_device(self):
self.model.to(self.device)
def _get_inputs_dict(self, batch):
if isinstance(batch[0], dict):
inputs = {key: value.squeeze().to(self.device) for key, value in batch[0].items()}
inputs["labels"] = batch[1].to(self.device)
else:
batch = tuple(t.to(self.device) for t in batch)
inputs = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
# XLM, DistilBERT and RoBERTa don't use segment_ids
if self.args.model_type != "distilbert":
inputs["token_type_ids"] = (
batch[2] if self.args.model_type in ["bert", "xlnet", "albert", "layoutlm"] else None
)
if self.args.model_type == "layoutlm":
inputs["bbox"] = batch[4]
return inputs
def _get_last_metrics(self, metric_values):
return {metric: values[-1] for metric, values in metric_values.items()}
def _create_training_progress_scores(self, multi_label, **kwargs):
extra_metrics = {key: [] for key in kwargs}
if multi_label:
training_progress_scores = {
"global_step": [],
"LRAP": [],
"train_loss": [],
"eval_loss": [],
**extra_metrics,
}
else:
if self.model.num_labels == 2:
training_progress_scores = {
"global_step": [],
"tp": [],
"tn": [],
"fp": [],
"fn": [],
"mcc": [],
"train_loss": [],
"eval_loss": [],
**extra_metrics,
}
elif self.model.num_labels == 1:
training_progress_scores = {
"global_step": [],
"train_loss": [],
"eval_loss": [],
**extra_metrics,
}
else:
training_progress_scores = {
"global_step": [],
"mcc": [],
"train_loss": [],
"eval_loss": [],
**extra_metrics,
}
return training_progress_scores
def save_model(self, output_dir=None, optimizer=None, scheduler=None, model=None, results=None):
if not output_dir:
output_dir = self.args.output_dir
os.makedirs(output_dir, exist_ok=True)
if model and not self.args.no_save:
# Take care of distributed/parallel training
model_to_save = model.module if hasattr(model, "module") else model
model_to_save.save_pretrained(output_dir)
self.tokenizer.save_pretrained(output_dir)
torch.save(self.args, os.path.join(output_dir, "training_args.bin"))
if optimizer and scheduler and self.args.save_optimizer_and_scheduler:
torch.save(optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt"))
torch.save(scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt"))
self.save_model_args(output_dir)
if results:
output_eval_file = os.path.join(output_dir, "eval_results.txt")
with open(output_eval_file, "w") as writer:
for key in sorted(results.keys()):
writer.write("{} = {}\n".format(key, str(results[key])))
def save_model_args(self, output_dir):
os.makedirs(output_dir, exist_ok=True)
self.args.save(output_dir)
def _load_model_args(self, input_dir):
args = ClassificationArgs()
args.load(input_dir)
return args
def get_named_parameters(self):
return [n for n, p in self.model.named_parameters()]
|
the-stack_0_24391 | # using modified merge function
def compute_union(arr1, arr2):
union = []
index1 = 0
index2 = 0
while (index1 < len(arr1)) and (index2 < len(arr2)):
if arr1[index1] < arr2[index2]:
union.append(arr1[index1])
index1 += 1
elif arr1[index1] > arr2[index2]:
union.append(arr2[index2])
index2 += 1
else:
union.append(arr2[index2])
index1 += 1
index2 += 1
while index1 < len(arr1):
union.append(arr1[index1])
index1 += 1
while index2 < len(arr2):
union.append(arr2[index2])
index2 += 1
return union
# using modified merge function
def compute_intersection(arr1, arr2):
intersection = []
index1 = 0
index2 = 0
while (index1 < len(arr1)) and (index2 < len(arr2)):
if arr1[index1] < arr2[index2]:
index1 += 1
elif arr1[index1] > arr2[index2]:
index2 += 1
else:
intersection.append(arr2[index2])
index1 += 1
index2 += 1
return intersection
if __name__ == "__main__":
arr1 = [1, 3, 4, 5, 7]
arr2 = [2, 3, 5, 6]
# arr1=[2, 5, 6]
# arr2=[4, 6, 8, 10]
union = compute_union(arr1, arr2)
print('union : ', union)
intersection = compute_intersection(arr1, arr2)
print('intersection : ', intersection)
|
the-stack_0_24392 | import os
import stat
from datetime import datetime
from ._compat import _get_argv_encoding
from ._compat import filename_to_ui
from ._compat import get_filesystem_encoding
from ._compat import get_strerror
from ._compat import open_stream
from .exceptions import BadParameter
from .utils import LazyFile
from .utils import safecall
class ParamType:
"""Represents the type of a parameter. Validates and converts values
from the command line or Python into the correct type.
To implement a custom type, subclass and implement at least the
following:
- The :attr:`name` class attribute must be set.
- Calling an instance of the type with ``None`` must return
``None``. This is already implemented by default.
- :meth:`convert` must convert string values to the correct type.
- :meth:`convert` must accept values that are already the correct
type.
- It must be able to convert a value if the ``ctx`` and ``param``
arguments are ``None``. This can occur when converting prompt
input.
"""
is_composite = False
#: the descriptive name of this type
name = None
#: if a list of this type is expected and the value is pulled from a
#: string environment variable, this is what splits it up. `None`
#: means any whitespace. For all parameters the general rule is that
#: whitespace splits them up. The exception are paths and files which
#: are split by ``os.path.pathsep`` by default (":" on Unix and ";" on
#: Windows).
envvar_list_splitter = None
def to_info_dict(self):
"""Gather information that could be useful for a tool generating
user-facing documentation.
Use :meth:`click.Context.to_info_dict` to traverse the entire
CLI structure.
.. versionadded:: 8.0
"""
# The class name without the "ParamType" suffix.
param_type = type(self).__name__.partition("ParamType")[0]
param_type = param_type.partition("ParameterType")[0]
return {"param_type": param_type, "name": self.name}
def __call__(self, value, param=None, ctx=None):
if value is not None:
return self.convert(value, param, ctx)
def get_metavar(self, param):
"""Returns the metavar default for this param if it provides one."""
def get_missing_message(self, param):
"""Optionally might return extra information about a missing
parameter.
.. versionadded:: 2.0
"""
def convert(self, value, param, ctx):
"""Convert the value to the correct type. This is not called if
the value is ``None`` (the missing value).
This must accept string values from the command line, as well as
values that are already the correct type. It may also convert
other compatible types.
The ``param`` and ``ctx`` arguments may be ``None`` in certain
situations, such as when converting prompt input.
If the value cannot be converted, call :meth:`fail` with a
descriptive message.
:param value: The value to convert.
:param param: The parameter that is using this type to convert
its value. May be ``None``.
:param ctx: The current context that arrived at this value. May
be ``None``.
"""
return value
def split_envvar_value(self, rv):
"""Given a value from an environment variable this splits it up
into small chunks depending on the defined envvar list splitter.
If the splitter is set to `None`, which means that whitespace splits,
then leading and trailing whitespace is ignored. Otherwise, leading
and trailing splitters usually lead to empty items being included.
"""
return (rv or "").split(self.envvar_list_splitter)
def fail(self, message, param=None, ctx=None):
"""Helper method to fail with an invalid value message."""
raise BadParameter(message, ctx=ctx, param=param)
class CompositeParamType(ParamType):
is_composite = True
@property
def arity(self):
raise NotImplementedError()
class FuncParamType(ParamType):
def __init__(self, func):
self.name = func.__name__
self.func = func
def to_info_dict(self):
info_dict = super().to_info_dict()
info_dict["func"] = self.func
return info_dict
def convert(self, value, param, ctx):
try:
return self.func(value)
except ValueError:
try:
value = str(value)
except UnicodeError:
value = value.decode("utf-8", "replace")
self.fail(value, param, ctx)
class UnprocessedParamType(ParamType):
name = "text"
def convert(self, value, param, ctx):
return value
def __repr__(self):
return "UNPROCESSED"
class StringParamType(ParamType):
name = "text"
def convert(self, value, param, ctx):
if isinstance(value, bytes):
enc = _get_argv_encoding()
try:
value = value.decode(enc)
except UnicodeError:
fs_enc = get_filesystem_encoding()
if fs_enc != enc:
try:
value = value.decode(fs_enc)
except UnicodeError:
value = value.decode("utf-8", "replace")
else:
value = value.decode("utf-8", "replace")
return value
return value
def __repr__(self):
return "STRING"
class Choice(ParamType):
"""The choice type allows a value to be checked against a fixed set
of supported values. All of these values have to be strings.
You should only pass a list or tuple of choices. Other iterables
(like generators) may lead to surprising results.
The resulting value will always be one of the originally passed choices
regardless of ``case_sensitive`` or any ``ctx.token_normalize_func``
being specified.
See :ref:`choice-opts` for an example.
:param case_sensitive: Set to false to make choices case
insensitive. Defaults to true.
"""
name = "choice"
def __init__(self, choices, case_sensitive=True):
self.choices = choices
self.case_sensitive = case_sensitive
def to_info_dict(self):
info_dict = super().to_info_dict()
info_dict["choices"] = self.choices
info_dict["case_sensitive"] = self.case_sensitive
return info_dict
def get_metavar(self, param):
choices_str = "|".join(self.choices)
# Use curly braces to indicate a required argument.
if param.required and param.param_type_name == "argument":
return f"{{{choices_str}}}"
# Use square braces to indicate an option or optional argument.
return f"[{choices_str}]"
def get_missing_message(self, param):
choice_str = ",\n\t".join(self.choices)
return f"Choose from:\n\t{choice_str}"
def convert(self, value, param, ctx):
# Match through normalization and case sensitivity
# first do token_normalize_func, then lowercase
# preserve original `value` to produce an accurate message in
# `self.fail`
normed_value = value
normed_choices = {choice: choice for choice in self.choices}
if ctx is not None and ctx.token_normalize_func is not None:
normed_value = ctx.token_normalize_func(value)
normed_choices = {
ctx.token_normalize_func(normed_choice): original
for normed_choice, original in normed_choices.items()
}
if not self.case_sensitive:
normed_value = normed_value.casefold()
normed_choices = {
normed_choice.casefold(): original
for normed_choice, original in normed_choices.items()
}
if normed_value in normed_choices:
return normed_choices[normed_value]
self.fail(
f"invalid choice: {value}. (choose from {', '.join(self.choices)})",
param,
ctx,
)
def __repr__(self):
return f"Choice({list(self.choices)})"
class DateTime(ParamType):
"""The DateTime type converts date strings into `datetime` objects.
The format strings which are checked are configurable, but default to some
common (non-timezone aware) ISO 8601 formats.
When specifying *DateTime* formats, you should only pass a list or a tuple.
Other iterables, like generators, may lead to surprising results.
The format strings are processed using ``datetime.strptime``, and this
consequently defines the format strings which are allowed.
Parsing is tried using each format, in order, and the first format which
parses successfully is used.
:param formats: A list or tuple of date format strings, in the order in
which they should be tried. Defaults to
``'%Y-%m-%d'``, ``'%Y-%m-%dT%H:%M:%S'``,
``'%Y-%m-%d %H:%M:%S'``.
"""
name = "datetime"
def __init__(self, formats=None):
self.formats = formats or ["%Y-%m-%d", "%Y-%m-%dT%H:%M:%S", "%Y-%m-%d %H:%M:%S"]
def to_info_dict(self):
info_dict = super().to_info_dict()
info_dict["formats"] = self.formats
return info_dict
def get_metavar(self, param):
return f"[{'|'.join(self.formats)}]"
def _try_to_convert_date(self, value, format):
try:
return datetime.strptime(value, format)
except ValueError:
return None
def convert(self, value, param, ctx):
# Exact match
for format in self.formats:
dtime = self._try_to_convert_date(value, format)
if dtime:
return dtime
self.fail(
f"invalid datetime format: {value}. (choose from {', '.join(self.formats)})"
)
def __repr__(self):
return "DateTime"
class _NumberParamTypeBase(ParamType):
_number_class = None
def convert(self, value, param, ctx):
try:
return self._number_class(value)
except ValueError:
self.fail(f"{value} is not a valid {self.name}", param, ctx)
class _NumberRangeBase(_NumberParamTypeBase):
def __init__(self, min=None, max=None, min_open=False, max_open=False, clamp=False):
self.min = min
self.max = max
self.min_open = min_open
self.max_open = max_open
self.clamp = clamp
def to_info_dict(self):
info_dict = super().to_info_dict()
info_dict.update(
min=self.min,
max=self.max,
min_open=self.min_open,
max_open=self.max_open,
clamp=self.clamp,
)
return info_dict
def convert(self, value, param, ctx):
import operator
rv = super().convert(value, param, ctx)
lt_min = self.min is not None and (
operator.le if self.min_open else operator.lt
)(rv, self.min)
gt_max = self.max is not None and (
operator.ge if self.max_open else operator.gt
)(rv, self.max)
if self.clamp:
if lt_min:
return self._clamp(self.min, 1, self.min_open)
if gt_max:
return self._clamp(self.max, -1, self.max_open)
if lt_min or gt_max:
self.fail(f"{rv} is not in the range {self._describe_range()}.", param, ctx)
return rv
def _clamp(self, bound, dir, open):
"""Find the valid value to clamp to bound in the given
direction.
:param bound: The boundary value.
:param dir: 1 or -1 indicating the direction to move.
:param open: If true, the range does not include the bound.
"""
raise NotImplementedError
def _describe_range(self):
"""Describe the range for use in help text."""
if self.min is None:
op = "<" if self.max_open else "<="
return f"x{op}{self.max}"
if self.max is None:
op = ">" if self.min_open else ">="
return f"x{op}{self.min}"
lop = "<" if self.min_open else "<="
rop = "<" if self.max_open else "<="
return f"{self.min}{lop}x{rop}{self.max}"
def __repr__(self):
clamp = " clamped" if self.clamp else ""
return f"<{type(self).__name__} {self._describe_range()}{clamp}>"
class IntParamType(_NumberParamTypeBase):
name = "integer"
_number_class = int
def __repr__(self):
return "INT"
class IntRange(_NumberRangeBase, IntParamType):
"""Restrict an :data:`click.INT` value to a range of accepted
values. See :ref:`ranges`.
If ``min`` or ``max`` are not passed, any value is accepted in that
direction. If ``min_open`` or ``max_open`` are enabled, the
corresponding boundary is not included in the range.
If ``clamp`` is enabled, a value outside the range is clamped to the
boundary instead of failing.
.. versionchanged:: 8.0
Added the ``min_open`` and ``max_open`` parameters.
"""
name = "integer range"
def _clamp(self, bound, dir, open):
if not open:
return bound
return bound + dir
class FloatParamType(_NumberParamTypeBase):
name = "float"
_number_class = float
def __repr__(self):
return "FLOAT"
class FloatRange(_NumberRangeBase, FloatParamType):
"""Restrict a :data:`click.FLOAT` value to a range of accepted
values. See :ref:`ranges`.
If ``min`` or ``max`` are not passed, any value is accepted in that
direction. If ``min_open`` or ``max_open`` are enabled, the
corresponding boundary is not included in the range.
If ``clamp`` is enabled, a value outside the range is clamped to the
boundary instead of failing. This is not supported if either
boundary is marked ``open``.
.. versionchanged:: 8.0
Added the ``min_open`` and ``max_open`` parameters.
"""
name = "float range"
def __init__(self, min=None, max=None, min_open=False, max_open=False, clamp=False):
super().__init__(
min=min, max=max, min_open=min_open, max_open=max_open, clamp=clamp
)
if (min_open or max_open) and clamp:
raise TypeError("Clamping is not supported for open bounds.")
def _clamp(self, bound, dir, open):
if not open:
return bound
# Could use Python 3.9's math.nextafter here, but clamping an
# open float range doesn't seem to be particularly useful. It's
# left up to the user to write a callback to do it if needed.
raise RuntimeError("Clamping is not supported for open bounds.")
class BoolParamType(ParamType):
name = "boolean"
def convert(self, value, param, ctx):
if isinstance(value, bool):
return bool(value)
value = value.lower()
if value in {"1", "true", "t", "yes", "y", "on"}:
return True
elif value in {"0", "false", "f", "no", "n", "off"}:
return False
self.fail(f"{value!r} is not a valid boolean value.", param, ctx)
def __repr__(self):
return "BOOL"
class UUIDParameterType(ParamType):
name = "uuid"
def convert(self, value, param, ctx):
import uuid
try:
return uuid.UUID(value)
except ValueError:
self.fail(f"{value} is not a valid UUID value", param, ctx)
def __repr__(self):
return "UUID"
class File(ParamType):
"""Declares a parameter to be a file for reading or writing. The file
is automatically closed once the context tears down (after the command
finished working).
Files can be opened for reading or writing. The special value ``-``
indicates stdin or stdout depending on the mode.
By default, the file is opened for reading text data, but it can also be
opened in binary mode or for writing. The encoding parameter can be used
to force a specific encoding.
The `lazy` flag controls if the file should be opened immediately or upon
first IO. The default is to be non-lazy for standard input and output
streams as well as files opened for reading, `lazy` otherwise. When opening a
file lazily for reading, it is still opened temporarily for validation, but
will not be held open until first IO. lazy is mainly useful when opening
for writing to avoid creating the file until it is needed.
Starting with Click 2.0, files can also be opened atomically in which
case all writes go into a separate file in the same folder and upon
completion the file will be moved over to the original location. This
is useful if a file regularly read by other users is modified.
See :ref:`file-args` for more information.
"""
name = "filename"
envvar_list_splitter = os.path.pathsep
def __init__(
self, mode="r", encoding=None, errors="strict", lazy=None, atomic=False
):
self.mode = mode
self.encoding = encoding
self.errors = errors
self.lazy = lazy
self.atomic = atomic
def to_info_dict(self):
info_dict = super().to_info_dict()
info_dict.update(mode=self.mode, encoding=self.encoding)
return info_dict
def resolve_lazy_flag(self, value):
if self.lazy is not None:
return self.lazy
if value == "-":
return False
elif "w" in self.mode:
return True
return False
def convert(self, value, param, ctx):
try:
if hasattr(value, "read") or hasattr(value, "write"):
return value
lazy = self.resolve_lazy_flag(value)
if lazy:
f = LazyFile(
value, self.mode, self.encoding, self.errors, atomic=self.atomic
)
if ctx is not None:
ctx.call_on_close(f.close_intelligently)
return f
f, should_close = open_stream(
value, self.mode, self.encoding, self.errors, atomic=self.atomic
)
# If a context is provided, we automatically close the file
# at the end of the context execution (or flush out). If a
# context does not exist, it's the caller's responsibility to
# properly close the file. This for instance happens when the
# type is used with prompts.
if ctx is not None:
if should_close:
ctx.call_on_close(safecall(f.close))
else:
ctx.call_on_close(safecall(f.flush))
return f
except OSError as e: # noqa: B014
self.fail(
f"Could not open file: {filename_to_ui(value)}: {get_strerror(e)}",
param,
ctx,
)
class Path(ParamType):
"""The path type is similar to the :class:`File` type but it performs
different checks. First of all, instead of returning an open file
handle it returns just the filename. Secondly, it can perform various
basic checks about what the file or directory should be.
.. versionchanged:: 6.0
`allow_dash` was added.
:param exists: if set to true, the file or directory needs to exist for
this value to be valid. If this is not required and a
file does indeed not exist, then all further checks are
silently skipped.
:param file_okay: controls if a file is a possible value.
:param dir_okay: controls if a directory is a possible value.
:param writable: if true, a writable check is performed.
:param readable: if true, a readable check is performed.
:param resolve_path: if this is true, then the path is fully resolved
before the value is passed onwards. This means
that it's absolute and symlinks are resolved. It
will not expand a tilde-prefix, as this is
supposed to be done by the shell only.
:param allow_dash: If this is set to `True`, a single dash to indicate
standard streams is permitted.
:param path_type: optionally a string type that should be used to
represent the path. The default is `None` which
means the return value will be either bytes or
unicode depending on what makes most sense given the
input data Click deals with.
"""
envvar_list_splitter = os.path.pathsep
def __init__(
self,
exists=False,
file_okay=True,
dir_okay=True,
writable=False,
readable=True,
resolve_path=False,
allow_dash=False,
path_type=None,
):
self.exists = exists
self.file_okay = file_okay
self.dir_okay = dir_okay
self.writable = writable
self.readable = readable
self.resolve_path = resolve_path
self.allow_dash = allow_dash
self.type = path_type
if self.file_okay and not self.dir_okay:
self.name = "file"
self.path_type = "File"
elif self.dir_okay and not self.file_okay:
self.name = "directory"
self.path_type = "Directory"
else:
self.name = "path"
self.path_type = "Path"
def to_info_dict(self):
info_dict = super().to_info_dict()
info_dict.update(
exists=self.exists,
file_okay=self.file_okay,
dir_okay=self.dir_okay,
writable=self.writable,
readable=self.readable,
allow_dash=self.allow_dash,
)
return info_dict
def coerce_path_result(self, rv):
if self.type is not None and not isinstance(rv, self.type):
if self.type is str:
rv = rv.decode(get_filesystem_encoding())
else:
rv = rv.encode(get_filesystem_encoding())
return rv
def convert(self, value, param, ctx):
rv = value
is_dash = self.file_okay and self.allow_dash and rv in (b"-", "-")
if not is_dash:
if self.resolve_path:
rv = os.path.realpath(rv)
try:
st = os.stat(rv)
except OSError:
if not self.exists:
return self.coerce_path_result(rv)
self.fail(
f"{self.path_type} {filename_to_ui(value)!r} does not exist.",
param,
ctx,
)
if not self.file_okay and stat.S_ISREG(st.st_mode):
self.fail(
f"{self.path_type} {filename_to_ui(value)!r} is a file.",
param,
ctx,
)
if not self.dir_okay and stat.S_ISDIR(st.st_mode):
self.fail(
f"{self.path_type} {filename_to_ui(value)!r} is a directory.",
param,
ctx,
)
if self.writable and not os.access(value, os.W_OK):
self.fail(
f"{self.path_type} {filename_to_ui(value)!r} is not writable.",
param,
ctx,
)
if self.readable and not os.access(value, os.R_OK):
self.fail(
f"{self.path_type} {filename_to_ui(value)!r} is not readable.",
param,
ctx,
)
return self.coerce_path_result(rv)
class Tuple(CompositeParamType):
"""The default behavior of Click is to apply a type on a value directly.
This works well in most cases, except for when `nargs` is set to a fixed
count and different types should be used for different items. In this
case the :class:`Tuple` type can be used. This type can only be used
if `nargs` is set to a fixed number.
For more information see :ref:`tuple-type`.
This can be selected by using a Python tuple literal as a type.
:param types: a list of types that should be used for the tuple items.
"""
def __init__(self, types):
self.types = [convert_type(ty) for ty in types]
def to_info_dict(self):
info_dict = super().to_info_dict()
info_dict["types"] = [t.to_info_dict() for t in self.types]
return info_dict
@property
def name(self):
return f"<{' '.join(ty.name for ty in self.types)}>"
@property
def arity(self):
return len(self.types)
def convert(self, value, param, ctx):
if len(value) != len(self.types):
raise TypeError(
"It would appear that nargs is set to conflict with the"
" composite type arity."
)
return tuple(ty(x, param, ctx) for ty, x in zip(self.types, value))
def convert_type(ty, default=None):
"""Converts a callable or python type into the most appropriate
param type.
"""
guessed_type = False
if ty is None and default is not None:
if isinstance(default, tuple):
ty = tuple(map(type, default))
else:
ty = type(default)
guessed_type = True
if isinstance(ty, tuple):
return Tuple(ty)
if isinstance(ty, ParamType):
return ty
if ty is str or ty is None:
return STRING
if ty is int:
return INT
# Booleans are only okay if not guessed. This is done because for
# flags the default value is actually a bit of a lie in that it
# indicates which of the flags is the one we want. See get_default()
# for more information.
if ty is bool and not guessed_type:
return BOOL
if ty is float:
return FLOAT
if guessed_type:
return STRING
# Catch a common mistake
if __debug__:
try:
if issubclass(ty, ParamType):
raise AssertionError(
f"Attempted to use an uninstantiated parameter type ({ty})."
)
except TypeError:
pass
return FuncParamType(ty)
#: A dummy parameter type that just does nothing. From a user's
#: perspective this appears to just be the same as `STRING` but
#: internally no string conversion takes place if the input was bytes.
#: This is usually useful when working with file paths as they can
#: appear in bytes and unicode.
#:
#: For path related uses the :class:`Path` type is a better choice but
#: there are situations where an unprocessed type is useful which is why
#: it is is provided.
#:
#: .. versionadded:: 4.0
UNPROCESSED = UnprocessedParamType()
#: A unicode string parameter type which is the implicit default. This
#: can also be selected by using ``str`` as type.
STRING = StringParamType()
#: An integer parameter. This can also be selected by using ``int`` as
#: type.
INT = IntParamType()
#: A floating point value parameter. This can also be selected by using
#: ``float`` as type.
FLOAT = FloatParamType()
#: A boolean parameter. This is the default for boolean flags. This can
#: also be selected by using ``bool`` as a type.
BOOL = BoolParamType()
#: A UUID parameter.
UUID = UUIDParameterType()
|
the-stack_0_24396 | # -*- coding: utf-8 -*-
"""some helper functions."""
import numpy as np
def load_data():
"""load data."""
data = np.loadtxt("dataEx3.csv", delimiter=",", skiprows=1, unpack=True)
x = data[0]
y = data[1]
return x, y
def load_data_from_ex02(sub_sample=True, add_outlier=False):
"""Load data and convert it to the metric system."""
path_dataset = "height_weight_genders.csv"
data = np.genfromtxt(
path_dataset, delimiter=",", skip_header=1, usecols=[1, 2])
height = data[:, 0]
weight = data[:, 1]
gender = np.genfromtxt(
path_dataset, delimiter=",", skip_header=1, usecols=[0],
converters={0: lambda x: 0 if b"Male" in x else 1})
# Convert to metric system
height *= 0.025
weight *= 0.454
# sub-sample
if sub_sample:
height = height[::50]
weight = weight[::50]
if add_outlier:
# outlier experiment
height = np.concatenate([height, [1.1, 1.2]])
weight = np.concatenate([weight, [51.5/0.454, 55.3/0.454]])
return height, weight, gender
def standardize(x):
"""Standardize the original data set."""
mean_x = np.mean(x)
x = x - mean_x
std_x = np.std(x)
x = x / std_x
return x, mean_x, std_x
def build_model_data(height, weight):
"""Form (y,tX) to get regression data in matrix form."""
y = weight
x = height
num_samples = len(y)
tx = np.c_[np.ones(num_samples), x]
return y, tx
|
the-stack_0_24397 | from openerp.osv import fields, osv
import openerp.addons.decimal_precision as dp
from openerp.tools.translate import _
class CashBox(osv.osv_memory):
_register = False
_columns = {
'name' : fields.char('Reason', required=True),
# Attention, we don't set a domain, because there is a journal_type key
# in the context of the action
'amount' : fields.float('Amount',
digits_compute = dp.get_precision('Account'),
required=True),
}
def run(self, cr, uid, ids, context=None):
if not context:
context = dict()
active_model = context.get('active_model', False) or False
active_ids = context.get('active_ids', []) or []
records = self.pool[active_model].browse(cr, uid, active_ids, context=context)
return self._run(cr, uid, ids, records, context=context)
def _run(self, cr, uid, ids, records, context=None):
for box in self.browse(cr, uid, ids, context=context):
for record in records:
if not record.journal_id:
raise osv.except_osv(_('Error!'),
_("Please check that the field 'Journal' is set on the Bank Statement"))
if not record.journal_id.internal_account_id:
raise osv.except_osv(_('Error!'),
_("Please check that the field 'Internal Transfers Account' is set on the payment method '%s'.") % (record.journal_id.name,))
self._create_bank_statement_line(cr, uid, box, record, context=context)
return {}
def _create_bank_statement_line(self, cr, uid, box, record, context=None):
if record.state == 'confirm':
raise osv.except_osv(_('Error!'),
_("You cannot put/take money in/out for a bank statement which is closed."))
values = self._compute_values_for_statement_line(cr, uid, box, record, context=context)
return self.pool.get('account.bank.statement').write(cr, uid, [record.id], {'line_ids': [(0, False, values)]}, context=context)
class CashBoxIn(CashBox):
_name = 'cash.box.in'
_columns = CashBox._columns.copy()
_columns.update({
'ref': fields.char('Reference'),
})
def _compute_values_for_statement_line(self, cr, uid, box, record, context=None):
if not record.journal_id.internal_account_id.id:
raise osv.except_osv(_('Configuration Error'), _("You should have defined an 'Internal Transfer Account' in your cash register's journal!"))
return {
'date': record.date,
'statement_id': record.id,
'journal_id': record.journal_id.id,
'amount': box.amount or 0.0,
'account_id': record.journal_id.internal_account_id.id,
'ref': '%s' % (box.ref or ''),
'name': box.name,
}
class CashBoxOut(CashBox):
_name = 'cash.box.out'
_columns = CashBox._columns.copy()
def _compute_values_for_statement_line(self, cr, uid, box, record, context=None):
if not record.journal_id.internal_account_id.id:
raise osv.except_osv(_('Configuration Error'), _("You should have defined an 'Internal Transfer Account' in your cash register's journal!"))
amount = box.amount or 0.0
return {
'date': record.date,
'statement_id': record.id,
'journal_id': record.journal_id.id,
'amount': -amount if amount > 0.0 else amount,
'account_id': record.journal_id.internal_account_id.id,
'name': box.name,
}
|
the-stack_0_24398 | from enum import Enum
class VoteType(Enum):
upvote = "upvote"
test = "test"
def __str__(self):
return self.name
def try_enum(cls, val):
try:
return cls(val)
except ValueError:
return val |
the-stack_0_24399 | '''
俄罗斯套娃信封问题
给定一些标记了宽度和高度的信封,宽度和高度以整数对形式 (w, h) 出现。当另一个信封的宽度和高度都比这个信封大的时候,
这个信封就可以放进另一个信封里,如同俄罗斯套娃一样。
请计算最多能有多少个信封能组成一组“俄罗斯套娃”信封(即可以把一个信封放到另一个信封里面)。
'''
from typing import List
'''
思路:动态规划
1、对整数对进行排序,先按照w,w相同的再按照h,排序后小的肯定在前面,大的在后面
2、能够发现,这个问题具有最优子结构。状态转移方程为:
select(1)=1
select(i)=select(j)+1 max(count[j])满足:j<i,且j[w,h]<i[w,h]
时间复杂度:排序O(nlogn),计算套娃数需要一次遍历,总计O(nlogn)
空间复杂度:需要辅助数组存储每个位置上的套娃数,O(n)
'''
class Solution:
def maxEnvelopes(self, envelopes: List[List[int]]) -> int:
n = len(envelopes)
if n == 0:
return 0
counter = [1] * n # 每个位置上的套娃数
maxNums = [1] * n # 截止该位置最大的套娃数
envelopes.sort(key=lambda e: (e[0] - 1 / e[1])) # 先按照w排序,w相同的按照h排序
for i in range(1, n):
maxSub = 0
for j in range(i - 1, -1, -1): # 向前搜索子套娃
if envelopes[i][1] > envelopes[j][1] and envelopes[i][0] > envelopes[j][0]:
maxSub = max(maxSub, counter[j])
if maxSub >= maxNums[j]: # 如果计算得到的子套娃数,大于该位置的最大套娃数,不需要向前搜索了
break
counter[i] += maxSub
maxNums[i] = max(maxNums[i - 1], counter[i])
return maxNums[n - 1]
s = Solution()
print(s.maxEnvelopes([[46, 89], [50, 53], [52, 68], [72, 45], [77, 81]]))
print(s.maxEnvelopes([[5, 4], [6, 4], [6, 7], [2, 3]]))
|
the-stack_0_24402 | # -*- coding: utf-8 -*-
"""
Spyder Editor
@author: metalcorebear
"""
from mesa import Agent
#Agent class
class human(Agent):
def __init__(self, unique_id, model):
super().__init__(unique_id, model)
self.pos = unique_id
self.infected, self.susceptible, self.severe = self.model.SIR_instance.initial_infection()
self.was_infected = False
self.recovered = False
self.alive = True
self.day = 0
self.induced_infections = 0
self.infected_others = False
def step(self):
self.model.SIR_instance.interact(self)
self.day += 1
|
the-stack_0_24403 | from cryptography.hazmat.primitives.asymmetric.ed25519 import (
Ed25519PublicKey, Ed25519PrivateKey
)
from cryptography.hazmat.primitives.asymmetric.ed448 import (
Ed448PublicKey, Ed448PrivateKey
)
from cryptography.hazmat.primitives.asymmetric.x25519 import (
X25519PublicKey, X25519PrivateKey
)
from cryptography.hazmat.primitives.asymmetric.x448 import (
X448PublicKey, X448PrivateKey
)
from cryptography.hazmat.primitives.serialization import (
Encoding, PublicFormat, PrivateFormat, NoEncryption
)
from authlib.common.encoding import (
to_unicode, to_bytes,
urlsafe_b64decode, urlsafe_b64encode,
)
from ..rfc7517 import AsymmetricKey
PUBLIC_KEYS_MAP = {
'Ed25519': Ed25519PublicKey,
'Ed448': Ed448PublicKey,
'X25519': X25519PublicKey,
'X448': X448PublicKey,
}
PRIVATE_KEYS_MAP = {
'Ed25519': Ed25519PrivateKey,
'Ed448': Ed448PrivateKey,
'X25519': X25519PrivateKey,
'X448': X448PrivateKey,
}
class OKPKey(AsymmetricKey):
"""Key class of the ``OKP`` key type."""
kty = 'OKP'
REQUIRED_JSON_FIELDS = ['crv', 'x']
PUBLIC_KEY_FIELDS = REQUIRED_JSON_FIELDS
PRIVATE_KEY_FIELDS = ['crv', 'd']
PUBLIC_KEY_CLS = tuple(PUBLIC_KEYS_MAP.values())
PRIVATE_KEY_CLS = tuple(PRIVATE_KEYS_MAP.values())
SSH_PUBLIC_PREFIX = b'ssh-ed25519'
def exchange_shared_key(self, pubkey):
# used in ECDHAlgorithm
if self.private_key and isinstance(self.private_key, (X25519PrivateKey, X448PrivateKey)):
return self.private_key.exchange(pubkey)
raise ValueError('Invalid key for exchanging shared key')
@staticmethod
def get_key_curve(key):
if isinstance(key, (Ed25519PublicKey, Ed25519PrivateKey)):
return 'Ed25519'
elif isinstance(key, (Ed448PublicKey, Ed448PrivateKey)):
return 'Ed448'
elif isinstance(key, (X25519PublicKey, X25519PrivateKey)):
return 'X25519'
elif isinstance(key, (X448PublicKey, X448PrivateKey)):
return 'X448'
def load_private_key(self):
crv_key = PRIVATE_KEYS_MAP[self._dict_data['crv']]
d_bytes = urlsafe_b64decode(to_bytes(self._dict_data['d']))
return crv_key.from_private_bytes(d_bytes)
def load_public_key(self):
crv_key = PUBLIC_KEYS_MAP[self._dict_data['crv']]
x_bytes = urlsafe_b64decode(to_bytes(self._dict_data['x']))
return crv_key.from_public_bytes(x_bytes)
def dumps_private_key(self):
obj = self.dumps_public_key(self.private_key.public_key())
d_bytes = self.private_key.private_bytes(
Encoding.Raw,
PrivateFormat.Raw,
NoEncryption()
)
obj['d'] = to_unicode(urlsafe_b64encode(d_bytes))
return obj
def dumps_public_key(self, public_key=None):
if public_key is None:
public_key = self.public_key
x_bytes = public_key.public_bytes(Encoding.Raw, PublicFormat.Raw)
return {
'crv': self.get_key_curve(public_key),
'x': to_unicode(urlsafe_b64encode(x_bytes)),
}
@classmethod
def generate_key(cls, crv='Ed25519', options=None, is_private=False) -> 'OKPKey':
if crv not in PRIVATE_KEYS_MAP:
raise ValueError('Invalid crv value: "{}"'.format(crv))
private_key_cls = PRIVATE_KEYS_MAP[crv]
raw_key = private_key_cls.generate()
if not is_private:
raw_key = raw_key.public_key()
return cls.import_key(raw_key, options=options)
|
the-stack_0_24407 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import os
import re
from contextlib import contextmanager
import numpy.testing as npt
import astropy.units as u
import pytest
from ...import nvas
from ...utils.testing_tools import MockResponse
from ...utils import commons
COORDS_GAL = commons.GalacticCoordGenerator(
l=49.489, b=-0.37, unit=(u.deg, u.deg)) # ARM 2000
COORDS_ICRS = commons.ICRSCoordGenerator(
"12h29m06.69512s +2d03m08.66276s") # 3C 273
DATA_FILES = {'image': 'image.imfits',
'image_search': 'image_results.html'}
def data_path(filename):
data_dir = os.path.join(os.path.dirname(__file__), 'data')
return os.path.join(data_dir, filename)
@pytest.fixture
def patch_post(request):
try:
mp = request.getfixturevalue("monkeypatch")
except AttributeError: # pytest < 3
mp = request.getfuncargvalue("monkeypatch")
mp.setattr(nvas.Nvas, '_request', post_mockreturn)
return mp
@pytest.fixture
def patch_parse_coordinates(request):
def parse_coordinates_mock_return(c):
return c
try:
mp = request.getfixturevalue("monkeypatch")
except AttributeError: # pytest < 3
mp = request.getfuncargvalue("monkeypatch")
mp.setattr(commons, 'parse_coordinates', parse_coordinates_mock_return)
return mp
def post_mockreturn(method, url, data, timeout, **kwargs):
filename = data_path(DATA_FILES['image_search'])
content = open(filename, 'rb').read()
response = MockResponse(content, **kwargs)
return response
@pytest.fixture
def patch_get_readable_fileobj(request):
@contextmanager
def get_readable_fileobj_mockreturn(filename, **kwargs):
encoding = kwargs.get('encoding', None)
if encoding == 'binary':
file_obj = open(data_path(DATA_FILES["image"]), 'rb')
else:
file_obj = open(data_path(DATA_FILES["image"]),
"r", encoding=encoding)
yield file_obj
try:
mp = request.getfixturevalue("monkeypatch")
except AttributeError: # pytest < 3
mp = request.getfuncargvalue("monkeypatch")
mp.setattr(commons, 'get_readable_fileobj',
get_readable_fileobj_mockreturn)
return mp
def deparse_coordinates(cstr):
"""
'19 23 40.001395 +14 31 01.550347' -> '19:23:40.001395 +14:31:01.550347'
"""
return re.sub(r" ([\+-])", r",\1", cstr).replace(" ", ":").replace(",", " ")
@pytest.mark.parametrize(('coordinates'), [COORDS_GAL, COORDS_ICRS])
def test_parse_coordinates(coordinates):
out_str = nvas.core._parse_coordinates(coordinates)
new_coords = commons.ICRSCoordGenerator(
deparse_coordinates(out_str), unit=(u.hour, u.deg))
# if all goes well new_coords and coordinates have same ra and dec
npt.assert_approx_equal(new_coords.ra.degree,
coordinates.transform_to('fk5').ra.degree,
significant=3)
npt.assert_approx_equal(new_coords.dec.degree,
coordinates.transform_to('fk5').dec.degree,
significant=3)
def test_extract_image_urls():
html_in = open(data_path(DATA_FILES['image_search']), 'r').read()
image_list = nvas.core.Nvas.extract_image_urls(html_in)
assert len(image_list) == 2
def test_get_images_async(patch_post, patch_parse_coordinates):
image_list = nvas.core.Nvas.get_images_async(COORDS_ICRS, band='K',
radius=2 * u.arcsec,
max_rms=100)
assert len(image_list) == 2
def test_get_images(patch_post, patch_parse_coordinates,
patch_get_readable_fileobj):
images = nvas.core.Nvas.get_images(COORDS_GAL, radius='5d0m0s', band='all')
assert images is not None
def test_get_image_list(patch_post, patch_parse_coordinates):
image_list = nvas.core.Nvas.get_image_list(
COORDS_GAL, radius=15 * u.arcsec,
max_rms=500, band="all", get_query_payload=True)
npt.assert_approx_equal(image_list["nvas_rad"], 0.25, significant=2)
assert image_list["nvas_bnd"] == ""
assert image_list["nvas_rms"] == 500
image_list = nvas.core.Nvas.get_image_list(
COORDS_GAL, radius=15 * u.arcsec, max_rms=500, band="all")
assert len(image_list) == 2
|
the-stack_0_24409 | import os, sys, json, copy, socket, itertools, string, subprocess
from os.path import expanduser
from pkg_resources import resource_filename
SAMPLE_SETTINGS = resource_filename(__name__, 'data/settings.json')
SETTINGS = 'opencanary.conf'
def byteify(input):
if isinstance(input, dict):
return {byteify(key): byteify(value)
for key, value in input.iteritems()}
elif isinstance(input, list):
return [byteify(element) for element in input]
elif isinstance(input, unicode):
return input.encode('utf-8')
else:
return input
class Config:
def __init__(self, configfile=SETTINGS):
self.__config = None
self.__configfile = configfile
files = [configfile, "%s/.%s" % (expanduser("~"), configfile), "/etc/opencanaryd/%s"%configfile]
print("** We hope you enjoy using OpenCanary. For more open source Canary goodness, head over to canarytokens.org. **")
for fname in files:
try:
with open(fname, "r") as f:
print("[-] Using config file: %s" % fname)
self.__config = json.load(f)
self.__config = byteify(self.__config)
return
except IOError as e:
print("[-] Failed to open %s for reading (%s)" % (fname, e))
except ValueError as e:
print("[-] Failed to decode json from %s (%s)" % (fname, e))
subprocess.call("cp -r %s /var/tmp/config-err-$(date +%%s)" % fname, shell=True)
except Exception as e:
print("[-] An error occured loading %s (%s)" % (fname, e))
def moduleEnabled(self, module_name):
k = "%s.enabled" % module_name.lower()
if k in self.__config:
return bool(self.__config[k])
return False
def getVal(self, key, default=None):
# throw exception to caller
try:
return self.__config[key]
except KeyError as e:
if default is not None:
return default
raise e
def setValues(self, params):
"""Set all the valid values in params and return a list of errors for invalid"""
# silently ensure that node_id and mac are not modified via web
sacred = ["device.node_id", "device.mac"]
for k in sacred:
if k in params:
del params[k]
# if dhcp is enabled, ignore the static ip settings
if params.get("device.dhcp.enabled", False):
static = ["device.ip_address", "device.netmask",
"device.gw", "device.dns1", "device.dns2"]
for k in static:
if k in params:
del params[k]
# for each section, if disabled, delete ignore section's settings
disabled_modules = tuple(filter(lambda m: not params.get("%s.enabled" % m, False), ["ftp", "ssh", "smb", "http"]))
for k in params.keys():
if not k.endswith("enabled") and k.startswith(disabled_modules):
del params[k]
continue
# test options indpenedently for validity
errors = []
for key,value in params.iteritems():
try:
self.valid(key,value)
except ConfigException as e:
errors.append(e)
# Test that no ports overlap
ports = {k: v for k, v in self.__config.iteritems() if k.endswith(".port")}
newports = {k: v for k, v in params.iteritems() if k.endswith(".port")}
ports.update(newports)
ports = [(port,setting) for setting, port in ports.iteritems()]
ports.sort()
for port, settings in itertools.groupby(ports, lambda x: x[0]):
settings = list(settings)
if len(settings) > 1:
services = ", ".join([s[1].split(".")[0] for s in settings])
errmsg = "More than one service uses this port (%s)" % services
for (port, setting) in settings:
errors.append(ConfigException(setting, errmsg))
# Delete invalid settings for which an error is reported
for err in errors:
if err.key in params:
del params[err.key]
# Update current settings
self.__config.update(params)
return errors
def setVal(self, key, val):
"""Set value only if valid otherwise throw exception"""
errs = self.setValues({key: val})
# sucessful update
if not errs:
return
# raise first error reported on the update key
for e in errs:
if e.key == key:
raise e
def valid(self, key, val):
"""
Test an the validity of an invidual setting
Raise config error message on failure.
TODO: delegate module tests to appropriate module
"""
if key.endswith(".enabled"):
if not ((val is True) or (val is False)):
raise ConfigException(key, "Boolean setting is not True or False (%s)" % val)
if key.endswith(".port"):
if (not isinstance(val,int)) or val < 1 or val > 65535:
raise ConfigException(key, "Invalid port number (%s)" % val)
# Max length of SSH version string is 255 chars including trailing CR and LF
# https://tools.ietf.org/html/rfc4253
if key == "ssh.version" and len(val) > 253:
raise ConfigException(key, "SSH version string too long (%s..)" % val[:5])
if key == "smb.filelist":
extensions = ["PDF", "DOC", "DOCX"]
for f in val:
if "name" not in f:
raise ConfigException(key, "No filename specified for %s" % f)
if "type" not in f:
raise ConfigException(key, "No filetype specified for %s" % f)
if not f["name"]:
raise ConfigException(key, "Filename cannot be empty")
if not f["type"]:
raise Configexception(key, "File type cannot be empty")
if f["type"] not in extensions:
raise Configexception(key, "Extension %s is not supported" % f["type"])
if key == "device.name":
allowed_chars = string.ascii_letters + string.digits + "+-#_"
if len(val) > 100:
raise ConfigException(key, "Name cannot be longer than 100 characters")
elif len(val) < 1:
raise ConfigException(key, "Name ought to be at least one character")
elif any(map(lambda x: x not in allowed_chars, val)):
raise ConfigException(key, "Please use only characters, digits, any of the following: + - # _")
if key == "device.desc":
allowed_chars = string.ascii_letters + string.digits + "+-#_ "
if len(val) > 100:
raise ConfigException(key, "Name cannot be longer than 100 characters")
elif len(val) < 1:
raise ConfigException(key, "Name ought to be at least one character")
elif any(map(lambda x: x not in allowed_chars, val)):
raise ConfigException(key, "Please use only characters, digits, spaces and any of the following: + - # _")
return True
def saveSettings(self):
"""Backup config file to older version and save to new file"""
try:
cfg = self.__configfile
if os.path.isfile(cfg):
os.rename(cfg, cfg + ".bak")
with open(cfg, "w") as f:
json.dump(self.__config, f, sort_keys=True, indent=4, separators=(',', ': '))
except Exception as e:
print("[-] Failed to save config file %s" % e)
raise ConfigException("config", "%s" % e)
def __repr__(self):
return self.__config.__repr__()
def __str__(self):
return self.__config.__str__()
def toDict(self):
""" Return all settings as a dict """
return self.__config
def toJSON(self):
"""
JSON representation of config
"""
return json.dumps(self.__config, sort_keys=True, indent=4, separators=(',', ': '))
class ConfigException(Exception):
"""Exception raised on invalid config value"""
def __init__(self, key, msg):
self.key = key
self.msg = msg
def __str__(self):
return "%s: %s" % (self.key, self.msg)
def __repr__(self):
return "<%s %s (%s)>" % (self.__class__.__name__, self.key, self.msg)
config = Config()
|
the-stack_0_24410 | from redis import Redis
from api_throttler.throttler import Throttler
class FixedWindowThrottlerRedis(Throttler):
"""
Fixed Window API rate throttler using Redis
"""
def __init__(self, cache: Redis, calls: int = 15, period: int = 900):
super().__init__(calls, period, cache)
def is_throttled(self, key: str) -> bool:
""" Return if the API call with a given key is throttled """
if self.cache.setnx(key, 0):
self.cache.expire(key, self.period)
self.cache.incr(key)
return int(self.cache.get(key)) > self.calls
|
the-stack_0_24411 | # -*- coding: utf-8 -*-
"""
wikipedia_city_big_numbers_extract.py
-------------------------------------
Extract huge numbers that may be the population (further treatment needed) from a wikipedia city's page.
:author: Bouillon Pierre
"""
import re
import requests
from http import HTTPStatus
API = 'https://en.wikipedia.org/w/api.php'
LOOK_FOR = 'Goroka'
POPULATION_REGEX = r'(?:^||,)\s*(("[^"]*")|[^,]*)\s*(?:,\s*|$)'
def main():
target = f'{API}?action=parse&page={LOOK_FOR}&format=json'
r = requests.get(target)
if r.status_code != HTTPStatus.OK:
exit('Can\'t reach the API')
data = r.json()['parse']['text']['*']
print(re.findall(POPULATION_REGEX, data, flags=re.IGNORECASE))
if __name__ == '__main__':
main()
|
the-stack_0_24414 | from django.shortcuts import render, get_object_or_404
from django.http import HttpResponseRedirect
from django.urls import reverse
from django.views import generic
from polls.models import Question, Choice
#--- Class-based GenericView
class IndexView(generic.ListView):
template_name = 'polls/index.html'
context_object_name = 'latest_question_list'
def get_queryset(self):
"""최근 생성된 질문 5개를 반환함"""
return Question.objects.order_by('-pub_date')[:5]
class DetailView(generic.DetailView):
model = Question
template_name = 'polls/detail.html'
class ResultsView(generic.DetailView):
model = Question
template_name = 'polls/results.html'
#--- Function-based View
# def index(request):
# latest_question_list = Question.objects.all().order_by('-pub_date')[:5]
# context = {'latest_question_list': latest_question_list}
# return render(request, 'polls/index.html', context)
# def detail(request, question_id):
# question = get_object_or_404(Question, pk=question_id)
# return render(request, 'polls/detail.html', {'question': question})
def vote(request, question_id):
question = get_object_or_404(Question, pk=question_id)
try:
selected_choice = question.choice_set.get(pk=request.POST['choice'])
except (KeyError, Choice.DoesNotExist):
# 설문 투표 폼을 다시 보여준다.
return render(request, 'polls/detail.html', {
'question': question,
'error_message': "You didn't select a choice.",
})
else:
selected_choice.votes += 1
selected_choice.save()
# POST 데이터를 정상적으로 처리하였으면,
# 항상 HttpResponseRedirect를 반환하여 리다이렉션 처리함
return HttpResponseRedirect(reverse('polls:results', args=(question.id,)))
# def results(request, question_id):
# question = get_object_or_404(Question, pk=question_id)
# return render(request, 'polls/results.html', {'question': question}) |
the-stack_0_24415 | import enum
import os
import sys
import cv2
from photosifter.util import verbose
from photosifter.remote import GooglePhotosLibrary
from photosifter.display import DisplayHandler
from photosifter.display import MAXIMUM_DISPLAY_SIZE
from photosifter.display import BORDER
from photosifter.image_handler import ImageHandler
from photosifter.image_handler import RemoteImageHandler
# Don't really care about to many variables/branches/statements....
# pylint: disable=R0914,W0212,R0912,R0915
class KEY(enum.IntEnum):
"""Enum containing all used keyboard keys."""
ESC = 27
COMMA = 44
DOT = 46
ONE = 49
LEFT = 81
RIGHT = 83
A = 97
D = 100
F = 102
L = 108
P = 112
R = 114
S = 115
X = 120
Y = 121
Z = 122
def sift(args):
# Initialize GooglePhotosLibrary object for remote connection
if args.action == "remote":
try:
library = GooglePhotosLibrary()
except FileNotFoundError as err:
sys.stderr.write(f"{err}\n\n"
"To run in the remote mode, you must have client_secret.json file with\n"
"Google API credentials for your application. Note that this file will\n"
"not be required after the authentication is complete.\n")
sys.exit(11)
try:
if args.action == "remote":
handler = RemoteImageHandler(args.images, library, args.backup_maxlen)
else:
handler = ImageHandler(args.images, args.with_threading, args.backup_maxlen)
except IOError as err:
sys.stderr.write(f"Cannot open directory '{args.images}'\n{err}\n")
sys.exit(1)
if len(handler) < 2:
sys.stderr.write("There are no images to display.\n")
sys.exit(2)
try:
os.mkdir(os.path.join(args.images, 'deleted'))
except FileExistsError:
pass
except OSError as err:
sys.stderr.write(f"Cannot create 'deleted' folder.\n{err}\n")
sys.exit(3)
display = DisplayHandler()
resize_mode = False
swap_mode = False
swap_first = 0
amount = 2
rerender = True
while True:
if rerender:
image_objects = handler.get_list(amount)
display.render(image_objects)
rerender = False
key = -1
while key == -1:
key = cv2.waitKey(1000)
if key in [KEY.LEFT, KEY.COMMA]:
rerender = handler.roll_right()
elif key in [KEY.RIGHT, KEY.DOT]:
rerender = handler.roll_left()
elif key in [KEY.A, KEY.D, KEY.S]:
if amount == 1:
if key != KEY.D:
continue
idx = 0
elif amount == 2 and len(handler) > 1:
if key == KEY.A:
idx = 0
elif key == KEY.D:
idx = 1
elif key == KEY.S:
difference = handler.get_relative(0).focus - handler.get_relative(1).focus
if abs(difference) < args.threshold:
continue
idx = int(difference > 0)
else:
# These convenient key bindings do nothing for more concatenated photos
continue
handler.delete_image(idx, amount)
rerender = True
elif key in [KEY.Y, KEY.Z]:
handler.restore_last()
rerender = True
elif key == KEY.F:
display.toggle_fullscreen()
elif key == KEY.P:
display.toggle_text_embeding()
rerender = True
elif key == KEY.L:
if swap_mode:
display.render_border()
else:
display.render_border(BORDER.GREEN)
swap_first = 0
swap_mode = not swap_mode
elif key == KEY.R:
if resize_mode:
display.render_border()
else:
display.render_border(BORDER.BLUE)
resize_mode = not resize_mode
elif key in [KEY.ESC, KEY.X]:
break
elif KEY.ONE <= key < KEY.ONE + MAXIMUM_DISPLAY_SIZE:
value = key - ord('0')
if resize_mode:
resize_mode = False
amount = value
elif swap_mode:
if value > amount:
continue
if swap_first:
swap_mode = False
handler.swap_images(swap_first - 1, value - 1)
else:
swap_first = value
continue
else:
if value > amount:
continue
handler.delete_image(value - 1, amount)
rerender = True
else:
verbose(f"Key {key} pressed.")
if len(handler) < 2:
break
del display
del handler
|
the-stack_0_24417 | # create_targets.py
# December 20 2021
import typer
import pandas as pd
import numpy as np
import datetime
def main(y_path: str):
startTime = datetime.datetime.now()
df = pd.read_csv(y_path + ".csv")
df['qlesq_QoL_threshold'] = np.where(df['end_qlesq'] >= 67, 1, 0)
df['qlesq_change'] = df['end_qlesq'] - df['start_qlesq']
df['qlesq_resp'] = np.where(df['qlesq_change'] >= 10, 1, 0)
df.to_csv(y_path + "__targets.csv", index = False)
print(f"Completed in: {datetime.datetime.now() - startTime}")
if __name__ == "__main__":
typer.run(main) |
the-stack_0_24421 | import asyncio
# import random
# import time
from datetime import datetime
import pandas as pd
from pymongo.errors import DuplicateKeyError
from ..mongodb import get_db
from ..utils import ensure_dtypes, make_logger
from ..websource.ths_news import fetch_news
collection_name = '同花顺财经'
logger = make_logger(collection_name)
def get_max_id():
db = get_db()
collection = db[collection_name]
pipe = [
{
'$sort': {
'id': -1
}
},
{
'$project': {
'_id': 0,
'id': 1
}
},
{
'$limit': 1
},
]
try:
return list(collection.aggregate(pipe))[0]['id']
except Exception:
return 0
def _add(collection, docs, init):
if not init:
max_id = get_max_id()
filtered = list(filter(lambda x: x['id'] > max_id, docs))
if len(filtered):
collection.insert_many(filtered)
logger.info(f"Insert {len(filtered)} docs")
else:
count = 0
for doc in docs:
try:
collection.insert_one(doc)
count += 1
except DuplicateKeyError:
print(f"{doc['id']} {doc['title']}")
logger.info(f"Insert {count} docs")
async def refresh(pages=3, init=False):
"""刷新"""
db = get_db()
collection = db[collection_name]
if init:
# 初始化只能取半年的数据
collection.drop()
create_index(collection)
# 逆序
# for p in range(pages, 0, -1):
# docs = await fetch_news(p)
# _add(collection, docs, init)
# for p in range(pages, 0, -1):
docs = await fetch_news(pages)
_add(collection, docs, init)
def create_index(collection):
collection.create_index([("id", -1)], unique=True, name='id_index')
collection.create_index([("ctime", -1)], name='dt_index1')
collection.create_index([("rtime", -1)], name='dt_index2')
if __name__ == '__main__':
# 初始化只能取半年的数据
asyncio.run(refresh(100, True))
|
the-stack_0_24422 | import re
import os
import sys
import time
import datetime
import traceback
from decimal import Decimal
import threading
import asyncio
from typing import TYPE_CHECKING, Optional, Union, Callable, Sequence
from electrum_but.but_ps_util import (PSPossibleDoubleSpendError,
PSSpendToPSAddressesError)
from electrum_but.storage import WalletStorage, StorageReadWriteError
from electrum_but.wallet_db import WalletDB
from electrum_but.wallet import Wallet, InternalAddressCorruption, Abstract_Wallet
from electrum_but.wallet import update_password_for_directory
from electrum_but.plugin import run_hook
from electrum_but import util
from electrum_but.util import (profiler, InvalidPassword, send_exception_to_crash_reporter,
format_satoshis, format_satoshis_plain, format_fee_satoshis)
from electrum_but.invoices import PR_PAID, PR_FAILED
from electrum_but import blockchain
from electrum_but.network import Network, TxBroadcastError, BestEffortRequestFailed
from electrum_but.interface import PREFERRED_NETWORK_PROTOCOL, ServerAddr
from electrum_but.logging import Logger
from electrum_but.gui import messages
from .i18n import _
from . import KIVY_GUI_PATH
from kivy.app import App
from kivy.core.window import Window
from kivy.utils import platform
from kivy.properties import (OptionProperty, AliasProperty, ObjectProperty,
StringProperty, ListProperty, BooleanProperty, NumericProperty)
from kivy.cache import Cache
from kivy.clock import Clock
from kivy.factory import Factory
from kivy.metrics import inch
from kivy.lang import Builder
from .uix.dialogs.password_dialog import OpenWalletDialog, ChangePasswordDialog, PincodeDialog, PasswordDialog
from .uix.dialogs.choice_dialog import ChoiceDialog
## lazy imports for factory so that widgets can be used in kv
#Factory.register('InstallWizard', module='electrum_but.gui.kivy.uix.dialogs.installwizard')
#Factory.register('InfoBubble', module='electrum_but.gui.kivy.uix.dialogs')
#Factory.register('OutputList', module='electrum_but.gui.kivy.uix.dialogs')
#Factory.register('OutputItem', module='electrum_but.gui.kivy.uix.dialogs')
from .uix.dialogs.installwizard import InstallWizard
from .uix.dialogs import InfoBubble, crash_reporter
from .uix.dialogs import OutputList, OutputItem
from .uix.dialogs import TopLabel, RefLabel
from .uix.dialogs.question import Question
from .uix.dialogs.but_kivy import TorWarnDialog
from .uix.dialogs.warn_dialog import WarnDialog
from .uix.dialogs.question import Question
#from kivy.core.window import Window
#Window.softinput_mode = 'below_target'
# delayed imports: for startup speed on android
notification = app = ref = None
# register widget cache for keeping memory down timeout to forever to cache
# the data
Cache.register('electrum_but_widgets', timeout=0)
from kivy.uix.screenmanager import Screen
from kivy.uix.tabbedpanel import TabbedPanel
from kivy.uix.label import Label
from kivy.core.clipboard import Clipboard
Factory.register('TabbedCarousel', module='electrum_but.gui.kivy.uix.screens')
# Register fonts without this you won't be able to use bold/italic...
# inside markup.
from kivy.core.text import Label
Label.register(
'Roboto',
KIVY_GUI_PATH + '/data/fonts/Roboto.ttf',
KIVY_GUI_PATH + '/data/fonts/Roboto.ttf',
KIVY_GUI_PATH + '/data/fonts/Roboto-Bold.ttf',
KIVY_GUI_PATH + '/data/fonts/Roboto-Bold.ttf',
)
from electrum_but.util import (NoDynamicFeeEstimates, NotEnoughFunds,
BUTK_BIP21_URI_SCHEME, PAY_BIP21_URI_SCHEME,
UserFacingException)
if TYPE_CHECKING:
from . import ElectrumGui
from electrum_but.simple_config import SimpleConfig
from electrum_but.plugin import Plugins
from electrum_but.paymentrequest import PaymentRequest
ATLAS_ICON = f'atlas://{KIVY_GUI_PATH}/theming/light/%s'
class ElectrumWindow(App, Logger):
electrum_config = ObjectProperty(None)
language = StringProperty('en')
# properties might be updated by the network
num_blocks = NumericProperty(0)
num_nodes = NumericProperty(0)
server_host = StringProperty('')
server_port = StringProperty('')
num_chains = NumericProperty(0)
blockchain_name = StringProperty('')
fee_status = StringProperty('Fee')
balance = StringProperty('')
fiat_balance = StringProperty('')
is_fiat = BooleanProperty(False)
blockchain_forkpoint = NumericProperty(0)
auto_connect = BooleanProperty(False)
def on_auto_connect(self, instance, x):
net_params = self.network.get_parameters()
net_params = net_params._replace(auto_connect=self.auto_connect)
self.network.run_from_another_thread(self.network.set_parameters(net_params))
def toggle_auto_connect(self, x):
self.auto_connect = not self.auto_connect
oneserver = BooleanProperty(False)
def on_oneserver(self, instance, x):
net_params = self.network.get_parameters()
net_params = net_params._replace(oneserver=self.oneserver)
self.network.run_from_another_thread(self.network.set_parameters(net_params))
def toggle_oneserver(self, x):
self.oneserver = not self.oneserver
tor_auto_on_bp = BooleanProperty()
def toggle_tor_auto_on(self, x):
self.tor_auto_on_bp = not self.electrum_config.get('tor_auto_on', True)
self.electrum_config.set_key('tor_auto_on', self.tor_auto_on_bp, True)
fiat_bypass_tor_bp = BooleanProperty()
def toggle_fiat_bypass_tor(self, x):
self.fiat_bypass_tor_bp = \
not self.electrum_config.get('fiat_bypass_tor', False)
self.electrum_config.set_key('fiat_bypass_tor',
self.fiat_bypass_tor_bp, True)
coro = self.network.restart()
self.network.run_from_another_thread(coro)
proxy_str = StringProperty('')
def update_proxy_str(self, proxy: dict):
mode = proxy.get('mode')
host = proxy.get('host')
port = proxy.get('port')
self.proxy_str = (host + ':' + port) if mode else _('None')
def choose_server_dialog(self, popup):
protocol = PREFERRED_NETWORK_PROTOCOL
def cb2(server_str):
popup.ids.server_str.text = server_str
servers = self.network.get_servers()
server_choices = {}
for _host, d in sorted(servers.items()):
port = d.get(protocol)
if port:
server = ServerAddr(_host, port, protocol=protocol)
server_choices[server.net_addr_str()] = _host
ChoiceDialog(_('Choose a server'), server_choices, popup.ids.server_str.text, cb2).open()
def maybe_switch_to_server(self, server_str: str):
net_params = self.network.get_parameters()
try:
server = ServerAddr.from_str_with_inference(server_str)
if not server: raise Exception("failed to parse")
except Exception as e:
self.show_error(_("Invalid server details: {}").format(repr(e)))
return
net_params = net_params._replace(server=server)
self.network.run_from_another_thread(self.network.set_parameters(net_params))
def choose_blockchain_dialog(self, dt):
chains = self.network.get_blockchains()
def cb(name):
with blockchain.blockchains_lock: blockchain_items = list(blockchain.blockchains.items())
for chain_id, b in blockchain_items:
if name == b.get_name():
self.network.run_from_another_thread(self.network.follow_chain_given_id(chain_id))
chain_objects = [blockchain.blockchains.get(chain_id) for chain_id in chains]
chain_objects = filter(lambda b: b is not None, chain_objects)
names = [b.get_name() for b in chain_objects]
if len(names) > 1:
cur_chain = self.network.blockchain().get_name()
ChoiceDialog(_('Choose your chain'), names, cur_chain, cb).open()
use_change = BooleanProperty(False)
def on_use_change(self, instance, x):
if self.wallet:
self.wallet.use_change = self.use_change
self.wallet.db.put('use_change', self.use_change)
self.wallet.save_db()
use_unconfirmed = BooleanProperty(False)
def on_use_unconfirmed(self, instance, x):
self.electrum_config.set_key('confirmed_only', not self.use_unconfirmed, True)
def switch_to_send_screen(func):
# try until send_screen is available
def wrapper(self, *args):
f = lambda dt: (bool(func(self, *args) and False) if self.send_screen else bool(self.switch_to('send') or True)) if self.wallet else True
Clock.schedule_interval(f, 0.1)
return wrapper
@switch_to_send_screen
def set_URI(self, uri):
self.send_screen.set_URI(uri)
def on_new_intent(self, intent):
data = str(intent.getDataString())
scheme = str(intent.getScheme()).lower()
if scheme in [BUTK_BIP21_URI_SCHEME, PAY_BIP21_URI_SCHEME]:
self.set_URI(data)
def on_language(self, instance, language):
self.logger.info('language: {}'.format(language))
_.switch_lang(language)
def update_history(self, *dt):
if self.history_screen:
self.history_screen.update()
def on_quotes(self, d):
self.logger.info("on_quotes")
self._trigger_update_status()
self._trigger_update_history()
def on_history(self, d):
self.logger.info("on_history")
if self.wallet:
self.wallet.clear_coin_price_cache()
self._trigger_update_history()
def on_fee_histogram(self, *args):
self._trigger_update_history()
def on_request_status(self, event, wallet, key, status):
req = self.wallet.receive_requests.get(key)
if req is None:
return
if self.receive_screen:
if status == PR_PAID:
self.receive_screen.update()
else:
self.receive_screen.update_item(key, req)
if self.request_popup and self.request_popup.key == key:
self.request_popup.update_status()
if status == PR_PAID:
self.show_info(_('Payment Received') + '\n' + key)
self._trigger_update_history()
def on_invoice_status(self, event, wallet, key):
req = self.wallet.get_invoice(key)
if req is None:
return
status = self.wallet.get_invoice_status(req)
if self.send_screen:
if status == PR_PAID:
self.send_screen.update()
else:
self.send_screen.update_item(key, req)
if self.invoice_popup and self.invoice_popup.key == key:
self.invoice_popup.update_status()
def on_payment_succeeded(self, event, wallet, key):
description = self.wallet.get_label(key)
self.show_info(_('Payment succeeded') + '\n\n' + description)
self._trigger_update_history()
def on_payment_failed(self, event, wallet, key, reason):
self.show_info(_('Payment failed') + '\n\n' + reason)
def _get_bu(self):
return self.electrum_config.get_base_unit()
def _set_bu(self, value):
self.electrum_config.set_base_unit(value)
self._trigger_update_status()
self._trigger_update_history()
wallet_name = StringProperty(_('No Wallet'))
base_unit = AliasProperty(_get_bu, _set_bu)
fiat_unit = StringProperty('')
def on_fiat_unit(self, a, b):
self._trigger_update_history()
def decimal_point(self):
return self.electrum_config.get_decimal_point()
def btc_to_fiat(self, amount_str):
if not amount_str:
return ''
if not self.fx.is_enabled():
return ''
rate = self.fx.exchange_rate()
if rate.is_nan():
return ''
fiat_amount = self.get_amount(amount_str + ' ' + self.base_unit) * rate / pow(10, 8)
return "{:.2f}".format(fiat_amount).rstrip('0').rstrip('.')
def fiat_to_btc(self, fiat_amount):
if not fiat_amount:
return ''
rate = self.fx.exchange_rate()
if rate.is_nan():
return ''
satoshis = int(pow(10,8) * Decimal(fiat_amount) / Decimal(rate))
return format_satoshis_plain(satoshis, decimal_point=self.decimal_point())
def get_amount(self, amount_str):
a, u = amount_str.split()
assert u == self.base_unit
try:
x = Decimal(a)
except:
return None
p = pow(10, self.decimal_point())
return int(p * x)
_orientation = OptionProperty('landscape',
options=('landscape', 'portrait'))
def _get_orientation(self):
return self._orientation
orientation = AliasProperty(_get_orientation,
None,
bind=('_orientation',))
'''Tries to ascertain the kind of device the app is running on.
Cane be one of `tablet` or `phone`.
:data:`orientation` is a read only `AliasProperty` Defaults to 'landscape'
'''
_ui_mode = OptionProperty('phone', options=('tablet', 'phone'))
def _get_ui_mode(self):
return self._ui_mode
ui_mode = AliasProperty(_get_ui_mode,
None,
bind=('_ui_mode',))
'''Defines tries to ascertain the kind of device the app is running on.
Cane be one of `tablet` or `phone`.
:data:`ui_mode` is a read only `AliasProperty` Defaults to 'phone'
'''
def __init__(self, **kwargs):
self.is_android = ('ANDROID_DATA' in os.environ)
# initialize variables
self._clipboard = Clipboard
self.info_bubble = None
self.nfcscanner = None
self.tabs = None
self.is_exit = False
self.wallet = None # type: Optional[Abstract_Wallet]
self.pause_time = 0
self.asyncio_loop = asyncio.get_event_loop()
self.password = None
self._use_single_password = False
self.resume_dialog = None
App.__init__(self)#, **kwargs)
Logger.__init__(self)
self.electrum_config = config = kwargs.get('config', None) # type: SimpleConfig
self.language = config.get('language', 'en')
self.network = network = kwargs.get('network', None) # type: Network
self.tor_auto_on_bp = self.electrum_config.get('tor_auto_on', True)
if self.network:
self.num_blocks = self.network.get_local_height()
self.num_nodes = len(self.network.get_interfaces())
net_params = self.network.get_parameters()
self.server_host = net_params.server.host
self.server_port = str(net_params.server.port)
self.auto_connect = net_params.auto_connect
self.oneserver = net_params.oneserver
self.proxy_config = net_params.proxy if net_params.proxy else {}
self.update_proxy_str(self.proxy_config)
self.plugins = kwargs.get('plugins', None) # type: Plugins
self.gui_object = kwargs.get('gui_object', None) # type: ElectrumGui
self.daemon = self.gui_object.daemon
self.fx = self.daemon.fx
self.use_unconfirmed = not config.get('confirmed_only', False)
# create triggers so as to minimize updating a max of 2 times a sec
self._trigger_update_wallet = Clock.create_trigger(self.update_wallet, .5)
self._trigger_update_status = Clock.create_trigger(self.update_status, .5)
self._trigger_update_history = Clock.create_trigger(self.update_history, .5)
self._trigger_update_interfaces = Clock.create_trigger(self.update_interfaces, .5)
self._periodic_update_status_during_sync = Clock.schedule_interval(self.update_wallet_synchronizing_progress, .5)
# cached dialogs
self._plugins_dialog = None
self._settings_dialog = None
self._but_net_dialog = None
self._addresses_dialog = None
self.set_fee_status()
self.invoice_popup = None
self.request_popup = None
def on_pr(self, pr: 'PaymentRequest'):
if not self.wallet:
self.show_error(_('No wallet loaded.'))
return
if pr.verify(self.wallet.contacts):
key = pr.get_id()
invoice = self.wallet.get_invoice(key) # FIXME wrong key...
if invoice and self.wallet.get_invoice_status(invoice) == PR_PAID:
self.show_error("invoice already paid")
self.send_screen.do_clear()
elif pr.has_expired():
self.show_error(_('Payment request has expired'))
else:
self.switch_to('send')
self.send_screen.set_request(pr)
else:
self.show_error("invoice error:" + pr.error)
self.send_screen.do_clear()
def on_qr(self, data):
from electrum_but.bitcoin import is_address
data = data.strip()
if is_address(data):
self.set_URI(data)
return
data_l = data.lower()
if (data_l.startswith(BUTK_BIP21_URI_SCHEME + ':')
or data_l.startswith(PAY_BIP21_URI_SCHEME + ':')):
self.set_URI(data)
return
# try to decode transaction
from electrum_but.transaction import tx_from_any
try:
tx = tx_from_any(data)
except:
tx = None
if tx:
self.tx_dialog(tx)
return
# show error
self.show_error("Unable to decode QR data")
def update_tab(self, name):
s = getattr(self, name + '_screen', None)
if s:
s.update()
@profiler
def update_tabs(self):
for name in ['send', 'history', 'receive']:
self.update_tab(name)
def switch_to(self, name):
s = getattr(self, name + '_screen', None)
panel = self.tabs.ids.panel
tab = self.tabs.ids[name + '_tab']
panel.switch_to(tab)
def show_request(self, key):
from .uix.dialogs.request_dialog import RequestDialog
self.request_popup = RequestDialog('Request', key)
self.request_popup.open()
def show_invoice(self, key):
from .uix.dialogs.invoice_dialog import InvoiceDialog
invoice = self.wallet.get_invoice(key)
if not invoice:
return
data = key
self.invoice_popup = InvoiceDialog('Invoice', data, key)
self.invoice_popup.open()
def run_other_app(self, app_name):
if not self.is_android:
return f'Can not start {app_name}, not android system'
from jnius import autoclass
PythonActivity = autoclass('org.kivy.android.PythonActivity')
Intent = autoclass('android.content.Intent')
pm = autoclass('android.content.pm.PackageManager')
activity = PythonActivity.mActivity
pm_ = activity.getPackageManager()
array_pkg = pm_.getInstalledApplications(pm.GET_META_DATA).toArray()
selected_pkg = []
for i in array_pkg:
if "/data/app/" not in getattr(i, "publicSourceDir"):
continue
selected_pkg.append(i)
app_to_launch = app_name
found = False
for i in selected_pkg:
if app_to_launch == getattr(i, "packageName"):
found = True
try:
package_name = getattr(i, "packageName")
app_intent = pm_.getLaunchIntentForPackage(package_name)
app_intent.setAction(Intent.ACTION_VIEW)
app_intent.setFlags(Intent.FLAG_ACTIVITY_NEW_TASK)
activity.startActivity(app_intent)
def _run_task(activity, app_intent):
time.sleep(0.25)
activity.startActivity(app_intent)
args = (activity, app_intent)
threading.Thread(target=_run_task, args=args).start()
except Exception as e:
return f'Error on lauhcing {app_name}: {str(e)}'
if not found:
return f'App {app_name} not found'
def qr_dialog(self, title, data, show_text=False, text_for_clipboard=None, help_text=None):
from .uix.dialogs.qr_dialog import QRDialog
def on_qr_failure():
popup.dismiss()
msg = _('Failed to display QR code.')
if text_for_clipboard:
msg += '\n' + _('Text copied to clipboard.')
self._clipboard.copy(text_for_clipboard)
Clock.schedule_once(lambda dt: self.show_info(msg))
popup = QRDialog(
title, data, show_text,
failure_cb=on_qr_failure,
text_for_clipboard=text_for_clipboard,
help_text=help_text)
popup.open()
def scan_qr(self, on_complete):
if platform != 'android':
return self.scan_qr_non_android(on_complete)
from jnius import autoclass, cast
from android import activity
PythonActivity = autoclass('org.kivy.android.PythonActivity')
SimpleScannerActivity = autoclass("org.but.electrum.qr.SimpleScannerActivity")
Intent = autoclass('android.content.Intent')
intent = Intent(PythonActivity.mActivity, SimpleScannerActivity)
def on_qr_result(requestCode, resultCode, intent):
try:
if resultCode == -1: # RESULT_OK:
# this doesn't work due to some bug in jnius:
# contents = intent.getStringExtra("text")
String = autoclass("java.lang.String")
contents = intent.getStringExtra(String("text"))
on_complete(contents)
except Exception as e: # exc would otherwise get lost
send_exception_to_crash_reporter(e)
finally:
activity.unbind(on_activity_result=on_qr_result)
activity.bind(on_activity_result=on_qr_result)
PythonActivity.mActivity.startActivityForResult(intent, 0)
def scan_qr_non_android(self, on_complete):
from electrum_but import qrscanner
try:
video_dev = self.electrum_config.get_video_device()
data = qrscanner.scan_barcode(video_dev)
on_complete(data)
except UserFacingException as e:
self.show_error(e)
except BaseException as e:
self.logger.exception('camera error')
self.show_error(repr(e))
def do_share(self, data, title):
if platform != 'android':
return
from jnius import autoclass, cast
JS = autoclass('java.lang.String')
Intent = autoclass('android.content.Intent')
sendIntent = Intent()
sendIntent.setAction(Intent.ACTION_SEND)
sendIntent.setType("text/plain")
sendIntent.putExtra(Intent.EXTRA_TEXT, JS(data))
PythonActivity = autoclass('org.kivy.android.PythonActivity')
currentActivity = cast('android.app.Activity', PythonActivity.mActivity)
it = Intent.createChooser(sendIntent, cast('java.lang.CharSequence', JS(title)))
currentActivity.startActivity(it)
def build(self):
return Builder.load_file(KIVY_GUI_PATH + '/main.kv')
def _pause(self):
if platform == 'android':
# move activity to back
from jnius import autoclass
python_act = autoclass('org.kivy.android.PythonActivity')
mActivity = python_act.mActivity
mActivity.moveTaskToBack(True)
def handle_crash_on_startup(func):
def wrapper(self, *args, **kwargs):
try:
return func(self, *args, **kwargs)
except Exception as e:
self.logger.exception('crash on startup')
from .uix.dialogs.crash_reporter import CrashReporter
# show the crash reporter, and when it's closed, shutdown the app
cr = CrashReporter(self, exctype=type(e), value=e, tb=e.__traceback__)
cr.on_dismiss = lambda: self.stop()
Clock.schedule_once(lambda _, cr=cr: cr.open(), 0)
return wrapper
@handle_crash_on_startup
def on_start(self):
''' This is the start point of the kivy ui
'''
import time
self.logger.info('Time to on_start: {} <<<<<<<<'.format(time.process_time()))
Window.bind(size=self.on_size, on_keyboard=self.on_keyboard)
#Window.softinput_mode = 'below_target'
self.on_size(Window, Window.size)
self.init_ui()
crash_reporter.ExceptionHook(self)
# init plugins
run_hook('init_kivy', self)
# fiat currency
self.fiat_unit = self.fx.ccy if self.fx.is_enabled() else ''
# default tab
self.switch_to('history')
# bind intent for but: URI scheme
if platform == 'android':
from android import activity
from jnius import autoclass
PythonActivity = autoclass('org.kivy.android.PythonActivity')
mactivity = PythonActivity.mActivity
self.on_new_intent(mactivity.getIntent())
activity.bind(on_new_intent=self.on_new_intent)
# connect callbacks
if self.network:
interests = ['wallet_updated', 'network_updated', 'blockchain_updated',
'status', 'new_transaction', 'verified',
'verified-islock']
util.register_callback(self.on_network_event, interests)
util.register_callback(self.on_fee, ['fee'])
util.register_callback(self.on_fee_histogram, ['fee_histogram'])
util.register_callback(self.on_quotes, ['on_quotes'])
util.register_callback(self.on_history, ['on_history'])
util.register_callback(self.on_invoice_status, ['invoice_status'])
util.register_callback(self.on_request_status, ['request_status'])
util.register_callback(self.on_payment_failed, ['payment_failed'])
util.register_callback(self.on_payment_succeeded, ['payment_succeeded'])
# load wallet
self.load_wallet_by_name(self.electrum_config.get_wallet_path(use_gui_last_wallet=True))
# URI passed in config
uri = self.electrum_config.get('url')
if uri:
self.set_URI(uri)
def get_wallet_path(self):
if self.wallet:
return self.wallet.storage.path
else:
return ''
def on_wizard_success(self, storage, db, password):
self.password = password
if self.electrum_config.get('single_password'):
self._use_single_password = update_password_for_directory(self.electrum_config, password, password)
self.logger.info(f'use single password: {self._use_single_password}')
wallet = Wallet(db, storage, config=self.electrum_config)
wallet.start_network(self.daemon.network)
self.daemon.add_wallet(wallet)
self.load_wallet(wallet)
self.show_backup_msg()
def show_backup_msg(self):
w = self.wallet
if w and getattr(w.storage, 'backup_message', None):
WarnDialog(w.storage.backup_message, title=_('Information')).open()
w.storage.backup_message = ''
def on_wizard_aborted(self):
# wizard did not return a wallet; and there is no wallet open atm
if not self.wallet:
self.stop()
def load_wallet_by_name(self, path):
def continue_load():
self._load_wallet_by_name(path)
if (self.electrum_config.get('tor_auto_on', True)
and not self.network.detect_tor_proxy()):
TorWarnDialog(self, path, continue_load).open()
else:
continue_load()
def _load_wallet_by_name(self, path):
if not path:
return
if self.wallet and self.wallet.storage.path == path:
return
if self.password and self._use_single_password:
storage = WalletStorage(path)
# call check_password to decrypt
storage.check_password(self.password)
self.on_open_wallet(self.password, storage)
return
d = OpenWalletDialog(self, path, self.on_open_wallet)
d.open()
def on_open_wallet(self, password, storage):
if not storage.file_exists():
wizard = InstallWizard(self.electrum_config, self.plugins)
wizard.path = storage.path
wizard.run('new')
else:
assert storage.is_past_initial_decryption()
db = WalletDB(storage.read(), manual_upgrades=False)
assert not db.requires_upgrade()
if db.upgrade_done:
storage.backup_old_version()
if db.check_unfinished_multisig():
wizard = InstallWizard(self.electrum_config, self.plugins)
wizard.path = storage.path
wizard.continue_multisig_setup(storage)
else:
self.on_wizard_success(storage, db, password)
def on_stop(self):
self.logger.info('on_stop')
self.history_screen.stop_get_data_thread()
self.stop_wallet()
def stop_wallet(self):
if self.wallet:
util.unregister_callback(self.on_ps_callback)
self.daemon.stop_wallet(self.wallet.storage.path)
self.wallet = None
def on_keyboard(self, instance, key, keycode, codepoint, modifiers):
if key == 27 and self.is_exit is False:
self.is_exit = True
self.show_info(_('Press again to exit'))
return True
# override settings button
if key in (319, 282): #f1/settings button on android
#self.gui.main_gui.toggle_settings(self)
return True
if key == 27 and self.is_exit:
if self.wallet:
psman = self.wallet.psman
is_mixing = (psman.state in psman.mixing_running_states)
is_waiting = psman.is_waiting if is_mixing else False
if is_mixing and not is_waiting:
def on_want_exit(b):
if b:
from kivy.base import stopTouchApp
stopTouchApp()
d = Question(psman.WAIT_MIXING_STOP_MSG, on_want_exit)
d.open()
return True
def settings_dialog(self):
from .uix.dialogs.settings import SettingsDialog
if self._settings_dialog is None:
self._settings_dialog = SettingsDialog(self)
self._settings_dialog.update()
self._settings_dialog.open()
def is_wallet_creation_disabled(self):
return bool(self.electrum_config.get('single_password')) and self.password is None
def wallets_dialog(self):
from .uix.dialogs.wallets import WalletDialog
dirname = os.path.dirname(self.electrum_config.get_wallet_path())
d = WalletDialog(dirname, self.load_wallet_by_name, self.is_wallet_creation_disabled())
d.open()
def plugins_dialog(self):
from .uix.dialogs.plugins import PluginsDialog
if self._plugins_dialog is None:
self._plugins_dialog = PluginsDialog(self)
self._plugins_dialog.update()
self._plugins_dialog.open()
def but_net_dialog(self):
from .uix.dialogs.but_net import ButcoinNetDialog
if self._but_net_dialog is None:
self._but_net_dialog = ButcoinNetDialog(self)
self._but_net_dialog.update()
self._but_net_dialog.open()
def privatesend_dialog(self):
if self.wallet.psman.unsupported:
from .uix.dialogs.privatesend import PSDialogUnsupportedPS as psdlg
else:
from .uix.dialogs.privatesend import PSDialog as psdlg
psdlg(self).open()
def popup_dialog(self, name):
if name == 'settings':
self.settings_dialog()
elif name == 'plugins':
self.plugins_dialog()
elif name == 'but_net':
self.but_net_dialog()
elif name == 'privatesend':
self.privatesend_dialog()
elif name == 'wallets':
self.wallets_dialog()
elif name == 'status':
popup = Builder.load_file(KIVY_GUI_PATH + f'/uix/ui_screens/{name}.kv')
master_public_keys_layout = popup.ids.master_public_keys
for xpub in self.wallet.get_master_public_keys()[1:]:
master_public_keys_layout.add_widget(TopLabel(text=_('Master Public Key')))
ref = RefLabel()
ref.name = _('Master Public Key')
ref.data = xpub
master_public_keys_layout.add_widget(ref)
popup.open()
elif name.endswith("_dialog"):
getattr(self, name)()
else:
popup = Builder.load_file(KIVY_GUI_PATH + f'/uix/ui_screens/{name}.kv')
popup.open()
@profiler
def init_ui(self):
''' Initialize The Ux part of electrum. This function performs the basic
tasks of setting up the ui.
'''
#from weakref import ref
self.funds_error = False
# setup UX
self.screens = {}
#setup lazy imports for mainscreen
Factory.register('AnimatedPopup',
module='electrum_but.gui.kivy.uix.dialogs')
Factory.register('QRCodeWidget',
module='electrum_but.gui.kivy.uix.qrcodewidget')
# preload widgets. Remove this if you want to load the widgets on demand
#Cache.append('electrum_but_widgets', 'AnimatedPopup', Factory.AnimatedPopup())
#Cache.append('electrum_but_widgets', 'QRCodeWidget', Factory.QRCodeWidget())
# load and focus the ui
self.root.manager = self.root.ids['manager']
self.history_screen = None
self.send_screen = None
self.receive_screen = None
if self.testnet:
self.icon = os.path.dirname(KIVY_GUI_PATH) + "/icons/electrum-but-testnet.png"
else:
self.icon = os.path.dirname(KIVY_GUI_PATH) + "/icons/electrum-but.png"
self.root.ids.ps_button.icon = self.ps_icon()
self.tabs = self.root.ids['tabs']
def update_interfaces(self, dt):
net_params = self.network.get_parameters()
self.num_nodes = len(self.network.get_interfaces())
self.num_chains = len(self.network.get_blockchains())
chain = self.network.blockchain()
self.blockchain_forkpoint = chain.get_max_forkpoint()
self.blockchain_name = chain.get_name()
interface = self.network.interface
if interface:
self.server_host = interface.host
else:
self.server_host = str(net_params.server.host) + ' (connecting...)'
self.proxy_config = net_params.proxy or {}
self.update_proxy_str(self.proxy_config)
def on_network_event(self, event, *args):
self.logger.info('network event: '+ event)
if event == 'network_updated':
self._trigger_update_interfaces()
self._trigger_update_status()
elif event == 'wallet_updated':
self._trigger_update_wallet()
self._trigger_update_status()
elif event == 'blockchain_updated':
# to update number of confirmations in history
self._trigger_update_wallet()
elif event == 'status':
self._trigger_update_status()
elif event == 'new_transaction':
wallet, tx = args
if wallet.psman.need_notify(tx.txid()):
self._trigger_update_wallet()
elif event == 'verified':
self._trigger_update_wallet()
elif event == 'verified-islock':
self._trigger_update_wallet()
def on_ps_callback(self, event, *args):
Clock.schedule_once(lambda dt: self.on_ps_event(event, *args))
def on_ps_event(self, event, *args):
psman = self.wallet.psman
is_mixing = (psman.state in psman.mixing_running_states)
is_waiting = psman.is_waiting if is_mixing else False
if event == 'ps-data-changes':
wallet = args[0]
if wallet == self.wallet:
self._trigger_update_wallet()
if event == 'ps-reserved-changes':
wallet = args[0]
if wallet == self.wallet:
self._trigger_update_wallet()
elif event in ['ps-state-changes', 'ps-wfl-changes',
'ps-keypairs-changes']:
wallet, msg, msg_type = (*args, None, None)[:3]
if wallet == self.wallet:
self.update_ps_btn(is_mixing, is_waiting)
if msg:
if msg_type and msg_type.startswith('inf'):
self.show_info(msg)
else:
WarnDialog(msg, title=_('PrivateSend')).open()
elif event == 'ps-not-enough-sm-denoms':
wallet, denoms_by_vals = args
if wallet == self.wallet:
q = psman.create_sm_denoms_data(confirm_txt=True)
def create_small_denoms(confirmed):
if confirmed:
self.create_small_denoms(denoms_by_vals)
d = Question(q, create_small_denoms)
d.open()
elif event == 'ps-other-coins-arrived':
wallet, txid = args
if wallet == self.wallet:
q = '\n\n'.join([psman.OTHER_COINS_ARRIVED_MSG1.format(txid),
psman.OTHER_COINS_ARRIVED_MSG2,
psman.OTHER_COINS_ARRIVED_MSG3,
psman.OTHER_COINS_ARRIVED_MSG4,
psman.OTHER_COINS_ARRIVED_Q])
def show_coins_dialog(confirmed):
if confirmed:
self.coins_dialog(1)
d = Question(q, show_coins_dialog)
d.open()
def create_small_denoms(self, denoms_by_vals):
w = self.wallet
psman = w.psman
coins = psman.get_biggest_denoms_by_min_round()
if not coins:
msg = psman.create_sm_denoms_data(no_denoms_txt=True)
self.show_error(msg)
self.create_new_denoms(coins[0:1])
def create_new_denoms(self, coins):
def on_q_answered(confirmed):
if confirmed:
self.protected(_('Enter your PIN code to sign'
' new denoms transactions'),
self._create_new_denoms, (coins,))
w = self.wallet
psman = w.psman
info = psman.new_denoms_from_coins_info(coins)
q = _('Do you want to create transactions?\n\n{}').format(info)
d = Question(q, on_q_answered)
d.open()
def _create_new_denoms(self, coins, password):
w = self.wallet
psman = w.psman
wfl, err = psman.create_new_denoms_wfl_from_gui(coins, password)
if err:
self.show_error(err)
else:
self.show_info(f'Created New Denoms workflow with'
f' txids: {", ".join(wfl.tx_order)}')
def create_new_collateral(self, coins):
def on_q_answered(confirmed):
if confirmed:
self.protected(_('Enter your PIN code to sign'
' new collateral transactions'),
self._create_new_collateral, (coins,))
w = self.wallet
psman = w.psman
info = psman.new_collateral_from_coins_info(coins)
q = _('Do you want to create transactions?\n\n{}').format(info)
d = Question(q, on_q_answered)
d.open()
def _create_new_collateral(self, coins, password):
w = self.wallet
psman = w.psman
wfl, err = psman.create_new_collateral_wfl_from_gui(coins, password)
if err:
self.show_error(err)
else:
self.show_info(f'Created New Collateral workflow with'
f' txids: {", ".join(wfl.tx_order)}')
def update_ps_btn(self, is_mixing, is_waiting):
ps_button = self.root.ids.ps_button
ps_button.icon = self.ps_icon(active=is_mixing, is_waiting=is_waiting)
@profiler
def load_wallet(self, wallet: 'Abstract_Wallet'):
if self.wallet:
self.stop_wallet()
self.wallet = wallet
util.register_callback(self.on_ps_callback,
['ps-data-changes',
'ps-reserved-changes',
'ps-not-enough-sm-denoms',
'ps-other-coins-arrived',
'ps-wfl-changes',
'ps-keypairs-changes',
'ps-state-changes'])
self.wallet_name = wallet.basename()
self.update_wallet()
# Once GUI has been initialized check if we want to announce something
# since the callback has been called before the GUI was initialized
if self.receive_screen:
self.receive_screen.clear()
self.update_tabs()
run_hook('load_wallet', wallet, self)
try:
wallet.try_detecting_internal_addresses_corruption()
except InternalAddressCorruption as e:
self.show_error(str(e))
send_exception_to_crash_reporter(e)
return
self.use_change = self.wallet.use_change
self.electrum_config.save_last_wallet(wallet)
self.request_focus_for_main_view()
def request_focus_for_main_view(self):
if platform != 'android':
return
# The main view of the activity might be not have focus
# in which case e.g. the OS "back" button would not work.
# see #6276 (specifically "method 2" and "method 3")
from jnius import autoclass
PythonActivity = autoclass('org.kivy.android.PythonActivity')
PythonActivity.requestFocusForMainView()
def update_status(self, *dt):
if not self.wallet:
return
if self.network is None or not self.network.is_connected():
status = _("Offline")
elif self.network.is_connected():
self.num_blocks = self.network.get_local_height()
server_height = self.network.get_server_height()
server_lag = self.num_blocks - server_height
if not self.wallet.up_to_date or server_height == 0:
num_sent, num_answered = self.wallet.get_history_sync_state_details()
status = ("{} [size=18dp]({}/{})[/size]"
.format(_("Synchronizing..."), num_answered, num_sent))
elif server_lag > 1:
status = _("Server is lagging ({} blocks)").format(server_lag)
else:
status = ''
else:
status = _("Disconnected")
if status:
self.balance = status
self.fiat_balance = status
else:
c, u, x = self.wallet.get_balance()
balance_sat = c + u + x
text = self.format_amount(balance_sat)
self.balance = str(text.strip()) + ' [size=22dp]%s[/size]'% self.base_unit
self.fiat_balance = self.fx.format_amount(balance_sat) + ' [size=22dp]%s[/size]'% self.fx.ccy
def update_wallet_synchronizing_progress(self, *dt):
if not self.wallet:
return
if not self.wallet.up_to_date:
self._trigger_update_status()
def get_max_amount(self, is_ps=False):
from electrum_but.transaction import PartialTxOutput
if run_hook('abort_send', self):
return ''
min_rounds = None if not is_ps else self.wallet.psman.mix_rounds
include_ps = (min_rounds is None)
inputs = self.wallet.get_spendable_coins(None,
include_ps=include_ps,
min_rounds=min_rounds)
if not inputs:
return ''
addr = None
if self.send_screen:
addr = str(self.send_screen.address)
if not addr:
addr = self.wallet.dummy_address()
outputs = [PartialTxOutput.from_address_and_value(addr, '!')]
try:
tx = self.wallet.make_unsigned_transaction(coins=inputs, outputs=outputs,
min_rounds=min_rounds)
except NoDynamicFeeEstimates as e:
Clock.schedule_once(lambda dt, bound_e=e: self.show_error(str(bound_e)))
return ''
except NotEnoughFunds:
return ''
except InternalAddressCorruption as e:
self.show_error(str(e))
send_exception_to_crash_reporter(e)
return ''
amount = tx.output_value()
__, x_fee_amount = run_hook('get_tx_extra_fee', self.wallet, tx) or (None, 0)
amount_after_all_fees = amount - x_fee_amount
return format_satoshis_plain(amount_after_all_fees, decimal_point=self.decimal_point())
def format_amount(self, x, is_diff=False, whitespaces=False):
return format_satoshis(
x,
num_zeros=0,
decimal_point=self.decimal_point(),
is_diff=is_diff,
whitespaces=whitespaces,
)
def format_amount_and_units(self, x) -> str:
if x is None:
return 'none'
if x == '!':
return 'max'
return format_satoshis_plain(x, decimal_point=self.decimal_point()) + ' ' + self.base_unit
def format_fee_rate(self, fee_rate):
# fee_rate is in duffs/kB
return format_fee_satoshis(fee_rate) + ' duffs/kB'
#@profiler
def update_wallet(self, *dt):
self._trigger_update_status()
if self.wallet and (self.wallet.up_to_date or not self.network or not self.network.is_connected()):
self.update_tabs()
def notify(self, message):
try:
global notification, os
if not notification:
from plyer import notification
icon = (os.path.dirname(os.path.realpath(__file__))
+ '/../../' + self.icon)
notification.notify('Butcoin Electrum', message,
app_icon=icon, app_name='Butcoin Electrum')
except ImportError:
self.logger.Error('Notification: needs plyer; `sudo python3 -m pip install plyer`')
@property
def testnet(self):
return self.electrum_config.get('testnet')
@property
def app_icon(self):
return ATLAS_ICON % ('logo-testnet' if self.testnet else 'logo')
def ps_icon(self, active=False, is_waiting=False):
if not active:
icon = 'privatesend'
elif not is_waiting:
icon = 'privatesend_active'
else:
icon = 'privatesend_waiting'
return ATLAS_ICON % icon
def on_pause(self):
self.pause_time = time.time()
# pause nfc
if self.nfcscanner:
self.nfcscanner.nfc_disable()
return True
def on_resume(self):
if self.nfcscanner:
self.nfcscanner.nfc_enable()
if self.resume_dialog is not None:
return
now = time.time()
if self.wallet and self.has_pin_code() and now - self.pause_time > 5*60:
def on_success(x):
self.resume_dialog = None
d = PincodeDialog(
self,
check_password=self.check_pin_code,
on_success=on_success,
on_failure=self.stop)
self.resume_dialog = d
d.open()
def on_size(self, instance, value):
width, height = value
self._orientation = 'landscape' if width > height else 'portrait'
self._ui_mode = 'tablet' if min(width, height) > inch(3.51) else 'phone'
def on_ref_label(self, label, *, show_text_with_qr: bool = True):
if not label.data:
return
self.qr_dialog(label.name, label.data, show_text_with_qr)
def show_error(self, error, width='200dp', pos=None, arrow_pos=None,
exit=False, icon=f'atlas://{KIVY_GUI_PATH}/theming/light/error', duration=0,
modal=False):
''' Show an error Message Bubble.
'''
self.show_info_bubble(text=error, icon=icon, width=width,
pos=pos or Window.center, arrow_pos=arrow_pos, exit=exit,
duration=duration, modal=modal)
def show_info(self, error, width='200dp', pos=None, arrow_pos=None,
exit=False, duration=0, modal=False):
''' Show an Info Message Bubble.
'''
self.show_error(error, icon=f'atlas://{KIVY_GUI_PATH}/theming/light/important',
duration=duration, modal=modal, exit=exit, pos=pos,
arrow_pos=arrow_pos)
def show_info_bubble(self, text=_('Hello World'), pos=None, duration=0,
arrow_pos='bottom_mid', width=None, icon='', modal=False, exit=False):
'''Method to show an Information Bubble
.. parameters::
text: Message to be displayed
pos: position for the bubble
duration: duration the bubble remains on screen. 0 = click to hide
width: width of the Bubble
arrow_pos: arrow position for the bubble
'''
text = str(text) # so that we also handle e.g. Exception
info_bubble = self.info_bubble
if not info_bubble:
info_bubble = self.info_bubble = Factory.InfoBubble()
win = Window
if info_bubble.parent:
win.remove_widget(info_bubble
if not info_bubble.modal else
info_bubble._modal_view)
if not arrow_pos:
info_bubble.show_arrow = False
else:
info_bubble.show_arrow = True
info_bubble.arrow_pos = arrow_pos
img = info_bubble.ids.img
if text == 'texture':
# icon holds a texture not a source image
# display the texture in full screen
text = ''
img.texture = icon
info_bubble.fs = True
info_bubble.show_arrow = False
img.allow_stretch = True
info_bubble.dim_background = True
info_bubble.background_image = f'atlas://{KIVY_GUI_PATH}/theming/light/card'
else:
info_bubble.fs = False
info_bubble.icon = icon
#if img.texture and img._coreimage:
# img.reload()
img.allow_stretch = False
info_bubble.dim_background = False
info_bubble.background_image = 'atlas://data/images/defaulttheme/bubble'
info_bubble.message = text
if not pos:
pos = (win.center[0], win.center[1] - (info_bubble.height/2))
info_bubble.show(pos, duration, width, modal=modal, exit=exit)
def tx_dialog(self, tx):
from .uix.dialogs.tx_dialog import TxDialog
d = TxDialog(self, tx)
d.open()
def show_transaction(self, txid):
tx = self.wallet.db.get_transaction(txid)
if tx:
self.tx_dialog(tx)
else:
self.show_error(f'Transaction not found {txid}')
def sign_tx(self, *args):
threading.Thread(target=self._sign_tx, args=args).start()
def _sign_tx(self, tx, password, on_success, on_failure):
try:
self.wallet.sign_transaction(tx, password)
except InvalidPassword:
Clock.schedule_once(lambda dt: on_failure(_("Invalid PIN")))
return
on_success = run_hook('tc_sign_wrapper', self.wallet, tx, on_success, on_failure) or on_success
Clock.schedule_once(lambda dt: on_success(tx))
def _broadcast_thread(self, tx, pr, on_complete):
status = False
if pr and pr.has_expired():
self.send_screen.payment_request = None
status, msg = False, _("Invoice has expired")
Clock.schedule_once(lambda dt: on_complete(status, msg))
return
need_broadcast = True if not pr or pr.need_broadcast_tx else False
txid = tx.txid()
try:
if need_broadcast:
coro = self.wallet.psman.broadcast_transaction(tx)
self.network.run_from_another_thread(coro)
else:
self.logger.info(f'Do not broadcast: {txid}, send bip70'
f' Payment msg to: {pr.payment_url}')
except TxBroadcastError as e:
msg = e.get_message_for_gui()
except PSPossibleDoubleSpendError as e:
msg = str(e)
except PSSpendToPSAddressesError as e:
msg = str(e)
except BestEffortRequestFailed as e:
msg = repr(e)
else:
if pr:
self.send_screen.payment_request = None
refund_address = self.wallet.get_receiving_address()
coro = pr.send_payment_and_receive_paymentack(tx.serialize(), refund_address)
fut = asyncio.run_coroutine_threadsafe(coro, self.network.asyncio_loop)
ack_status, ack_msg = fut.result(timeout=20)
self.logger.info(f"Payment ACK: {ack_status}. Ack message: {ack_msg}")
if need_broadcast:
status, msg = True, txid
else:
status, msg = ack_status, ack_msg
Clock.schedule_once(lambda dt: on_complete(status, msg))
def broadcast(self, tx, pr=None):
def on_complete(ok, msg):
if ok:
self.show_info(_('Payment sent.'))
if self.send_screen:
self.send_screen.do_clear()
else:
msg = msg or ''
self.show_error(msg)
if self.network and self.network.is_connected():
self.show_info(_('Sending'))
threading.Thread(target=self._broadcast_thread, args=(tx, pr, on_complete)).start()
else:
self.show_info(_('Cannot broadcast transaction') +
':\n' + _('Electrum network not connected'))
def description_dialog(self, screen):
from .uix.dialogs.label_dialog import LabelDialog
text = screen.message
def callback(text):
screen.message = text
d = LabelDialog(_('Enter description'), text, callback)
d.open()
def amount_dialog(self, screen, show_max):
from .uix.dialogs.amount_dialog import AmountDialog
amount = screen.amount
if amount:
amount, u = str(amount).split()
assert u == self.base_unit
is_ps = getattr(screen, 'is_ps', None)
def amount_cb(amount):
if amount == '!':
screen.is_max = True
max_amt = self.get_max_amount()
screen.amount = (max_amt + ' ' + self.base_unit) if max_amt else ''
else:
screen.amount = amount
screen.is_max = False
if is_ps is None:
popup = AmountDialog(show_max, amount, cb=amount_cb)
else:
popup = AmountDialog(show_max, amount, is_ps=is_ps, cb=amount_cb)
popup.open()
def addresses_dialog(self):
from .uix.dialogs.addresses import AddressesDialog
if self._addresses_dialog is None:
self._addresses_dialog = AddressesDialog(self)
self._addresses_dialog.update()
self._addresses_dialog.open()
def coins_dialog(self, filter_val=0):
from .uix.dialogs.coins_dialog import CoinsDialog
popup = CoinsDialog(self, filter_val=filter_val)
popup.update()
popup.open()
def fee_dialog(self):
from .uix.dialogs.fee_dialog import FeeDialog
fee_dialog = FeeDialog(self, self.electrum_config, self.set_fee_status)
fee_dialog.open()
def set_fee_status(self):
target, tooltip, dyn = self.electrum_config.get_fee_target()
self.fee_status = target
def on_fee(self, event, *arg):
self.set_fee_status()
def protected(self, msg, f, args):
if self.electrum_config.get('pin_code'):
msg += "\n" + _("Enter your PIN code to proceed")
on_success = lambda pw: f(*args, self.password)
d = PincodeDialog(
self,
message = msg,
check_password=self.check_pin_code,
on_success=on_success,
on_failure=lambda: None)
d.open()
else:
d = Question(
msg,
lambda b: f(*args, self.password) if b else None,
yes_str=_("OK"),
no_str=_("Cancel"),
title=_("Confirm action"))
d.open()
def delete_wallet(self):
basename = os.path.basename(self.wallet.storage.path)
d = Question(_('Delete wallet?') + '\n' + basename, self._delete_wallet)
d.open()
def _delete_wallet(self, b):
if b:
basename = self.wallet.basename()
self.protected(_("Are you sure you want to delete wallet {}?").format(basename),
self.__delete_wallet, ())
def __delete_wallet(self, pw):
wallet_path = self.get_wallet_path()
basename = os.path.basename(wallet_path)
if self.wallet.has_password():
try:
self.wallet.check_password(pw)
except InvalidPassword:
self.show_error("Invalid password")
return
self.stop_wallet()
os.unlink(wallet_path)
self.show_error(_("Wallet removed: {}").format(basename))
new_path = self.electrum_config.get_wallet_path(use_gui_last_wallet=True)
self.load_wallet_by_name(new_path)
def show_seed(self, label):
self.protected(_("Display your seed?"), self._show_seed, (label,))
def _show_seed(self, label, password):
if self.wallet.has_password() and password is None:
return
keystore = self.wallet.keystore
seed = keystore.get_seed(password)
passphrase = keystore.get_passphrase(password)
label.data = seed
if passphrase:
label.data += '\n\n' + _('Passphrase') + ': ' + passphrase
def has_pin_code(self):
return bool(self.electrum_config.get('pin_code'))
def check_pin_code(self, pin):
if pin != self.electrum_config.get('pin_code'):
raise InvalidPassword
def change_password(self, cb):
def on_success(old_password, new_password):
# called if old_password works on self.wallet
self.password = new_password
if self._use_single_password:
path = self.wallet.storage.path
self.stop_wallet()
update_password_for_directory(self.electrum_config, old_password, new_password)
self.load_wallet_by_name(path)
msg = _("Password updated successfully")
else:
self.wallet.update_password(old_password, new_password)
msg = _("Password updated for {}").format(os.path.basename(self.wallet.storage.path))
self.show_info(msg)
on_failure = lambda: self.show_error(_("Password not updated"))
d = ChangePasswordDialog(self, self.wallet, on_success, on_failure)
d.open()
def pin_code_dialog(self, cb):
if self._use_single_password and self.has_pin_code():
def on_choice(choice):
if choice == 0:
self.change_pin_code(cb)
else:
self.reset_pin_code(cb)
choices = {0:'Change PIN code', 1:'Reset PIN'}
dialog = ChoiceDialog(
_('PIN Code'), choices, 0,
on_choice,
keep_choice_order=True)
dialog.open()
else:
self.change_pin_code(cb)
def reset_pin_code(self, cb):
on_success = lambda x: self._set_new_pin_code(None, cb)
d = PasswordDialog(self,
basename = self.wallet.basename(),
check_password = self.wallet.check_password,
on_success=on_success,
on_failure=lambda: None,
is_change=False,
has_password=self.wallet.has_password())
d.open()
def _set_new_pin_code(self, new_pin, cb):
self.electrum_config.set_key('pin_code', new_pin)
cb()
self.show_info(_("PIN updated") if new_pin else _('PIN disabled'))
def change_pin_code(self, cb):
on_failure = lambda: self.show_error(_("PIN not updated"))
on_success = lambda old_pin, new_pin: self._set_new_pin_code(new_pin, cb)
d = PincodeDialog(
self,
check_password=self.check_pin_code,
on_success=on_success,
on_failure=on_failure,
is_change=True,
has_password = self.has_pin_code())
d.open()
def save_backup(self):
if platform != 'android':
backup_dir = self.electrum_config.get_backup_dir()
if backup_dir:
self._save_backup(backup_dir)
else:
self.show_error(_("Backup NOT saved. Backup directory not configured."))
return
backup_dir = util.android_backup_dir()
from android.permissions import request_permissions, Permission
def cb(permissions, grant_results: Sequence[bool]):
if not grant_results or not grant_results[0]:
self.show_error(_("Cannot save backup without STORAGE permission"))
return
# note: Clock.schedule_once is a hack so that we get called on a non-daemon thread
# (needed for WalletDB.write)
Clock.schedule_once(lambda dt: self._save_backup(backup_dir))
request_permissions([Permission.WRITE_EXTERNAL_STORAGE], cb)
def _save_backup(self, backup_dir):
try:
new_path = self.wallet.save_backup(backup_dir)
except Exception as e:
self.logger.exception("Failed to save wallet backup")
self.show_error("Failed to save wallet backup" + '\n' + str(e))
return
self.show_info(_("Backup saved:") + f"\n{new_path}")
def export_private_keys(self, pk_label, addr):
if self.wallet.is_watching_only():
self.show_info(_('This is a watching-only wallet. It does not contain private keys.'))
return
def show_private_key(addr, pk_label, password):
if self.wallet.has_password() and password is None:
return
if not self.wallet.can_export():
return
try:
key = str(self.wallet.export_private_key(addr, password))
pk_label.data = key
except InvalidPassword:
self.show_error("Invalid PIN")
return
self.protected(_("Decrypt your private key?"), show_private_key, (addr, pk_label))
|
the-stack_0_24423 | # coding=utf-8
import json
import os
import platform
import subprocess
import re
from conans.errors import ConanException
from conans.tools import logger
CONAN_HOOK_PYLINT_RCFILE = "CONAN_PYLINTRC"
CONAN_HOOK_PYLINT_WERR = "CONAN_PYLINT_WERR"
CONAN_HOOK_PYLINT_RECIPE_PLUGINS = "CONAN_PYLINT_RECIPE_PLUGINS"
def pre_export(output, conanfile_path, *args, **kwargs):
try:
import astroid # Conan 'pylint_plugin.py' uses astroid
from pylint import epylint as lint
except ImportError as e:
output.error("Install pylint to use 'recipe_linter' hook: 'pip install pylint astroid'")
return
output.info("Lint recipe '{}'".format(conanfile_path))
conanfile_dirname = os.path.dirname(conanfile_path)
lint_args = ['--output-format=json', # JSON output fails in Windows (parsing)
'--py3k',
'--enable=all',
'--reports=no',
'--disable=no-absolute-import',
'--persistent=no',
# These were disabled in linter that was inside Conan
# '--disable=W0702', # No exception type(s) specified (bare-except)
# '--disable=W0703', # Catching too general exception Exception (broad-except)
'--init-hook="import sys;sys.path.extend([\'{}\',])"'.format(conanfile_dirname.replace('\\', '/'))
]
pylint_plugins = os.getenv(CONAN_HOOK_PYLINT_RECIPE_PLUGINS, 'conans.pylint_plugin')
if pylint_plugins:
lint_args += ['--load-plugins={}'.format(pylint_plugins)]
rc_file = os.getenv(CONAN_HOOK_PYLINT_RCFILE)
if rc_file:
lint_args += ['--rcfile', rc_file.replace('\\', '/')]
try:
command = ['pylint'] + lint_args + ['"{}"'.format(conanfile_path).replace('\\', '/')]
command = " ".join(command)
shell = bool(platform.system() != "Windows")
p = subprocess.Popen(command, shell=shell, bufsize=10,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
pylint_stdout, pylint_stderr = p.communicate()
# Remove ANSI escape sequences from Pylint output (fails in Windows)
ansi_escape = re.compile(r'\x1B\[[0-?]*[ -/]*[@-~]')
pylint_stdout = ansi_escape.sub('', pylint_stdout.decode('utf-8'))
except Exception as exc:
output.error("Unexpected error running linter: {}".format(exc))
else:
try:
messages = json.loads(pylint_stdout)
except Exception as exc:
output.error("Error parsing JSON output: {}".format(exc))
logger.error(
"Error parsing linter output for recipe '{}': {}".format(conanfile_path, exc))
logger.error(" - linter arguments: {}".format(lint_args))
logger.error(" - output: {}".format(pylint_stdout.getvalue()))
logger.error(" - stderr: {}".format(pylint_stderr.getvalue()))
else:
errors = 0
for msg in messages:
line = "{path}:{line}:{column}: {message-id}: {message} ({symbol})".format(**msg)
output.info(line)
errors += int(msg["type"] == "error")
output.info("Linter detected '{}' errors".format(errors))
if os.getenv(CONAN_HOOK_PYLINT_WERR) and errors:
raise ConanException("Package recipe has linter errors. Please fix them.")
|
the-stack_0_24427 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.image_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import colorsys
import math
import os
import time
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_image_ops
from tensorflow.python.ops import image_ops
from tensorflow.python.ops import io_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
from tensorflow.python.platform import test
class RGBToHSVTest(test_util.TensorFlowTestCase):
def testBatch(self):
# Build an arbitrary RGB image
np.random.seed(7)
batch_size = 5
shape = (batch_size, 2, 7, 3)
for nptype in [np.float32, np.float64]:
inp = np.random.rand(*shape).astype(nptype)
# Convert to HSV and back, as a batch and individually
with self.test_session(use_gpu=True) as sess:
batch0 = constant_op.constant(inp)
batch1 = image_ops.rgb_to_hsv(batch0)
batch2 = image_ops.hsv_to_rgb(batch1)
split0 = array_ops.unstack(batch0)
split1 = list(map(image_ops.rgb_to_hsv, split0))
split2 = list(map(image_ops.hsv_to_rgb, split1))
join1 = array_ops.stack(split1)
join2 = array_ops.stack(split2)
batch1, batch2, join1, join2 = sess.run([batch1, batch2, join1, join2])
# Verify that processing batch elements together is the same as separate
self.assertAllClose(batch1, join1)
self.assertAllClose(batch2, join2)
self.assertAllClose(batch2, inp)
def testRGBToHSVRoundTrip(self):
data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
for nptype in [np.float32, np.float64]:
rgb_np = np.array(data, dtype=nptype).reshape([2, 2, 3]) / 255.
with self.test_session(use_gpu=True):
hsv = image_ops.rgb_to_hsv(rgb_np)
rgb = image_ops.hsv_to_rgb(hsv)
rgb_tf = rgb.eval()
self.assertAllClose(rgb_tf, rgb_np)
class GrayscaleToRGBTest(test_util.TensorFlowTestCase):
def _RGBToGrayscale(self, images):
is_batch = True
if len(images.shape) == 3:
is_batch = False
images = np.expand_dims(images, axis=0)
out_shape = images.shape[0:3] + (1,)
out = np.zeros(shape=out_shape, dtype=np.uint8)
for batch in xrange(images.shape[0]):
for y in xrange(images.shape[1]):
for x in xrange(images.shape[2]):
red = images[batch, y, x, 0]
green = images[batch, y, x, 1]
blue = images[batch, y, x, 2]
gray = 0.2989 * red + 0.5870 * green + 0.1140 * blue
out[batch, y, x, 0] = int(gray)
if not is_batch:
out = np.squeeze(out, axis=0)
return out
def _TestRGBToGrayscale(self, x_np):
y_np = self._RGBToGrayscale(x_np)
with self.test_session(use_gpu=True):
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.rgb_to_grayscale(x_tf)
y_tf = y.eval()
self.assertAllEqual(y_tf, y_np)
def testBasicRGBToGrayscale(self):
# 4-D input with batch dimension.
x_np = np.array(
[[1, 2, 3], [4, 10, 1]], dtype=np.uint8).reshape([1, 1, 2, 3])
self._TestRGBToGrayscale(x_np)
# 3-D input with no batch dimension.
x_np = np.array([[1, 2, 3], [4, 10, 1]], dtype=np.uint8).reshape([1, 2, 3])
self._TestRGBToGrayscale(x_np)
def testBasicGrayscaleToRGB(self):
# 4-D input with batch dimension.
x_np = np.array([[1, 2]], dtype=np.uint8).reshape([1, 1, 2, 1])
y_np = np.array(
[[1, 1, 1], [2, 2, 2]], dtype=np.uint8).reshape([1, 1, 2, 3])
with self.test_session(use_gpu=True):
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.grayscale_to_rgb(x_tf)
y_tf = y.eval()
self.assertAllEqual(y_tf, y_np)
# 3-D input with no batch dimension.
x_np = np.array([[1, 2]], dtype=np.uint8).reshape([1, 2, 1])
y_np = np.array([[1, 1, 1], [2, 2, 2]], dtype=np.uint8).reshape([1, 2, 3])
with self.test_session(use_gpu=True):
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.grayscale_to_rgb(x_tf)
y_tf = y.eval()
self.assertAllEqual(y_tf, y_np)
def testShapeInference(self):
# Shape inference works and produces expected output where possible
rgb_shape = [7, None, 19, 3]
gray_shape = rgb_shape[:-1] + [1]
with self.test_session(use_gpu=True):
rgb_tf = array_ops.placeholder(dtypes.uint8, shape=rgb_shape)
gray = image_ops.rgb_to_grayscale(rgb_tf)
self.assertEqual(gray_shape, gray.get_shape().as_list())
with self.test_session(use_gpu=True):
gray_tf = array_ops.placeholder(dtypes.uint8, shape=gray_shape)
rgb = image_ops.grayscale_to_rgb(gray_tf)
self.assertEqual(rgb_shape, rgb.get_shape().as_list())
# Shape inference does not break for unknown shapes
with self.test_session(use_gpu=True):
rgb_tf_unknown = array_ops.placeholder(dtypes.uint8)
gray_unknown = image_ops.rgb_to_grayscale(rgb_tf_unknown)
self.assertFalse(gray_unknown.get_shape())
with self.test_session(use_gpu=True):
gray_tf_unknown = array_ops.placeholder(dtypes.uint8)
rgb_unknown = image_ops.grayscale_to_rgb(gray_tf_unknown)
self.assertFalse(rgb_unknown.get_shape())
class AdjustGamma(test_util.TensorFlowTestCase):
def test_adjust_gamma_one(self):
"""Same image should be returned for gamma equal to one"""
with self.test_session():
x_data = np.random.uniform(0, 255, (8, 8))
x_np = np.array(x_data, dtype=np.float32)
x = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.adjust_gamma(x, gamma=1)
y_tf = y.eval()
y_np = x_np
self.assertAllClose(y_tf, y_np, 1e-6)
def test_adjust_gamma_zero(self):
"""White image should be returned for gamma equal to zero"""
with self.test_session():
x_data = np.random.uniform(0, 255, (8, 8))
x_np = np.array(x_data, dtype=np.float32)
x = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.adjust_gamma(x, gamma=0)
y_tf = y.eval()
dtype = x.dtype.as_numpy_dtype
y_np = np.array([dtypes.dtype_range[dtype][1]] * x_np.size)
y_np = y_np.reshape((8, 8))
self.assertAllClose(y_tf, y_np, 1e-6)
def test_adjust_gamma_less_one(self):
"""Verifying the output with expected results for gamma
correction with gamma equal to half"""
with self.test_session():
x_np = np.arange(0, 255, 4, np.uint8).reshape(8, 8)
y = image_ops.adjust_gamma(x_np, gamma=0.5)
y_tf = np.trunc(y.eval())
y_np = np.array(
[[0, 31, 45, 55, 63, 71, 78, 84],
[90, 95, 100, 105, 110, 115, 119, 123],
[127, 131, 135, 139, 142, 146, 149, 153],
[156, 159, 162, 165, 168, 171, 174, 177],
[180, 183, 186, 188, 191, 194, 196, 199],
[201, 204, 206, 209, 211, 214, 216, 218],
[221, 223, 225, 228, 230, 232, 234, 236],
[238, 241, 243, 245, 247, 249, 251, 253]],
dtype=np.float32)
self.assertAllClose(y_tf, y_np, 1e-6)
def test_adjust_gamma_greater_one(self):
"""Verifying the output with expected results for gamma
correction with gamma equal to two"""
with self.test_session():
x_np = np.arange(0, 255, 4, np.uint8).reshape(8, 8)
y = image_ops.adjust_gamma(x_np, gamma=2)
y_tf = np.trunc(y.eval())
y_np = np.array(
[[0, 0, 0, 0, 1, 1, 2, 3],
[4, 5, 6, 7, 9, 10, 12, 14],
[16, 18, 20, 22, 25, 27, 30, 33],
[36, 39, 42, 45, 49, 52, 56, 60],
[64, 68, 72, 76, 81, 85, 90, 95],
[100, 105, 110, 116, 121, 127, 132, 138],
[144, 150, 156, 163, 169, 176, 182, 189],
[196, 203, 211, 218, 225, 233, 241, 249]],
dtype=np.float32)
self.assertAllClose(y_tf, y_np, 1e-6)
class AdjustHueTest(test_util.TensorFlowTestCase):
def testAdjustNegativeHue(self):
x_shape = [2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
delta = -0.25
y_data = [0, 13, 1, 54, 226, 59, 8, 234, 150, 255, 39, 1]
y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)
with self.test_session(use_gpu=True):
x = constant_op.constant(x_np, shape=x_shape)
y = image_ops.adjust_hue(x, delta)
y_tf = y.eval()
self.assertAllEqual(y_tf, y_np)
def testAdjustPositiveHue(self):
x_shape = [2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
delta = 0.25
y_data = [13, 0, 11, 226, 54, 221, 234, 8, 92, 1, 217, 255]
y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)
with self.test_session(use_gpu=True):
x = constant_op.constant(x_np, shape=x_shape)
y = image_ops.adjust_hue(x, delta)
y_tf = y.eval()
self.assertAllEqual(y_tf, y_np)
def _adjustHueNp(self, x_np, delta_h):
self.assertEqual(x_np.shape[-1], 3)
x_v = x_np.reshape([-1, 3])
y_v = np.ndarray(x_v.shape, dtype=x_v.dtype)
channel_count = x_v.shape[0]
for i in xrange(channel_count):
r = x_v[i][0]
g = x_v[i][1]
b = x_v[i][2]
h, s, v = colorsys.rgb_to_hsv(r, g, b)
h += delta_h
h = math.fmod(h + 10.0, 1.0)
r, g, b = colorsys.hsv_to_rgb(h, s, v)
y_v[i][0] = r
y_v[i][1] = g
y_v[i][2] = b
return y_v.reshape(x_np.shape)
def _adjustHueTf(self, x_np, delta_h):
with self.test_session(use_gpu=True):
x = constant_op.constant(x_np)
y = image_ops.adjust_hue(x, delta_h)
y_tf = y.eval()
return y_tf
def testAdjustRandomHue(self):
x_shapes = [
[2, 2, 3],
[4, 2, 3],
[2, 4, 3],
[2, 5, 3],
[1000, 1, 3],
]
test_styles = [
"all_random",
"rg_same",
"rb_same",
"gb_same",
"rgb_same",
]
for x_shape in x_shapes:
for test_style in test_styles:
x_np = np.random.rand(*x_shape) * 255.
delta_h = np.random.rand() * 2.0 - 1.0
if test_style == "all_random":
pass
elif test_style == "rg_same":
x_np[..., 1] = x_np[..., 0]
elif test_style == "rb_same":
x_np[..., 2] = x_np[..., 0]
elif test_style == "gb_same":
x_np[..., 2] = x_np[..., 1]
elif test_style == "rgb_same":
x_np[..., 1] = x_np[..., 0]
x_np[..., 2] = x_np[..., 0]
else:
raise AssertionError("Invalid test style: %s" % (test_style))
y_np = self._adjustHueNp(x_np, delta_h)
y_tf = self._adjustHueTf(x_np, delta_h)
self.assertAllClose(y_tf, y_np, rtol=2e-5, atol=1e-5)
def testInvalidShapes(self):
fused = False
if not fused:
# The tests are known to pass with the fused adjust_hue. We will enable
# them when the fused implementation is the default.
return
x_np = np.random.rand(2, 3) * 255.
delta_h = np.random.rand() * 2.0 - 1.0
fused = False
with self.assertRaisesRegexp(ValueError, "Shape must be at least rank 3"):
self._adjustHueTf(x_np, delta_h)
x_np = np.random.rand(4, 2, 4) * 255.
delta_h = np.random.rand() * 2.0 - 1.0
with self.assertRaisesOpError("input must have 3 channels"):
self._adjustHueTf(x_np, delta_h)
class AdjustHueBenchmark(test.Benchmark):
def _benchmarkAdjustHue(self, device, cpu_count):
image_shape = [299, 299, 3]
warmup_rounds = 100
benchmark_rounds = 1000
config = config_pb2.ConfigProto()
if cpu_count is not None:
config.inter_op_parallelism_threads = 1
config.intra_op_parallelism_threads = cpu_count
with session.Session("", graph=ops.Graph(), config=config) as sess:
with ops.device(device):
inputs = variables.Variable(
random_ops.random_uniform(
image_shape, dtype=dtypes.float32) * 255,
trainable=False,
dtype=dtypes.float32)
delta = constant_op.constant(0.1, dtype=dtypes.float32)
outputs = image_ops.adjust_hue(inputs, delta)
run_op = control_flow_ops.group(outputs)
sess.run(variables.global_variables_initializer())
for i in xrange(warmup_rounds + benchmark_rounds):
if i == warmup_rounds:
start = time.time()
sess.run(run_op)
end = time.time()
step_time = (end - start) / benchmark_rounds
tag = "%s" % (cpu_count) if cpu_count is not None else "_all"
print("benchmarkAdjustHue_299_299_3_cpu%s step_time: %.2f us" %
(tag, step_time * 1e6))
self.report_benchmark(
name="benchmarkAdjustHue_299_299_3_cpu%s" % (tag),
iters=benchmark_rounds,
wall_time=step_time)
def benchmarkAdjustHueCpu1(self):
self._benchmarkAdjustHue("/cpu:0", 1)
def benchmarkAdjustHueCpuAll(self):
self._benchmarkAdjustHue("/cpu:0", None)
def benchmarkAdjustHueGpu(self):
self._benchmarkAdjustHue(test.gpu_device_name(), None)
class AdjustSaturationBenchmark(test.Benchmark):
def _benchmarkAdjustSaturation(self, device, cpu_count):
image_shape = [299, 299, 3]
warmup_rounds = 100
benchmark_rounds = 1000
config = config_pb2.ConfigProto()
if cpu_count is not None:
config.inter_op_parallelism_threads = 1
config.intra_op_parallelism_threads = cpu_count
with session.Session("", graph=ops.Graph(), config=config) as sess:
with ops.device(device):
inputs = variables.Variable(
random_ops.random_uniform(
image_shape, dtype=dtypes.float32) * 255,
trainable=False,
dtype=dtypes.float32)
delta = constant_op.constant(0.1, dtype=dtypes.float32)
outputs = image_ops.adjust_saturation(inputs, delta)
run_op = control_flow_ops.group(outputs)
sess.run(variables.global_variables_initializer())
for _ in xrange(warmup_rounds):
sess.run(run_op)
start = time.time()
for _ in xrange(benchmark_rounds):
sess.run(run_op)
end = time.time()
step_time = (end - start) / benchmark_rounds
tag = "%s" % (cpu_count) if cpu_count is not None else "_all"
print("benchmarkAdjustSaturation_599_599_3_cpu%s step_time: %.2f us" %
(tag, step_time * 1e6))
self.report_benchmark(
name="benchmarkAdjustSaturation_599_599_3_cpu%s" % (tag),
iters=benchmark_rounds,
wall_time=step_time)
def benchmarkAdjustSaturationCpu1(self):
self._benchmarkAdjustSaturation("/cpu:0", 1)
def benchmarkAdjustSaturationCpuAll(self):
self._benchmarkAdjustSaturation("/cpu:0", None)
def benchmarkAdjustSaturationGpu(self):
self._benchmarkAdjustSaturation(test.gpu_device_name(), None)
class ResizeBilinearBenchmark(test.Benchmark):
def _benchmarkResize(self, image_size, num_channels):
batch_size = 1
num_ops = 1000
img = variables.Variable(
random_ops.random_normal(
[batch_size, image_size[0], image_size[1], num_channels]),
name="img")
deps = []
for _ in xrange(num_ops):
with ops.control_dependencies(deps):
resize_op = image_ops.resize_bilinear(
img, [299, 299], align_corners=False)
deps = [resize_op]
benchmark_op = control_flow_ops.group(*deps)
with session.Session() as sess:
sess.run(variables.global_variables_initializer())
results = self.run_op_benchmark(
sess,
benchmark_op,
name=("resize_bilinear_%s_%s_%s" %
(image_size[0], image_size[1], num_channels)))
print("%s : %.2f ms/img" % (results["name"], 1000 * results["wall_time"]
/ (batch_size * num_ops)))
def benchmarkSimilar3Channel(self):
self._benchmarkResize((183, 229), 3)
def benchmarkScaleUp3Channel(self):
self._benchmarkResize((141, 186), 3)
def benchmarkScaleDown3Channel(self):
self._benchmarkResize((749, 603), 3)
def benchmarkSimilar1Channel(self):
self._benchmarkResize((183, 229), 1)
def benchmarkScaleUp1Channel(self):
self._benchmarkResize((141, 186), 1)
def benchmarkScaleDown1Channel(self):
self._benchmarkResize((749, 603), 1)
class ResizeBicubicBenchmark(test.Benchmark):
def _benchmarkResize(self, image_size, num_channels):
batch_size = 1
num_ops = 1000
img = variables.Variable(
random_ops.random_normal(
[batch_size, image_size[0], image_size[1], num_channels]),
name="img")
deps = []
for _ in xrange(num_ops):
with ops.control_dependencies(deps):
resize_op = image_ops.resize_bicubic(
img, [299, 299], align_corners=False)
deps = [resize_op]
benchmark_op = control_flow_ops.group(*deps)
with session.Session() as sess:
sess.run(variables.global_variables_initializer())
results = self.run_op_benchmark(
sess,
benchmark_op,
min_iters=20,
name=("resize_bicubic_%s_%s_%s" % (image_size[0], image_size[1],
num_channels)))
print("%s : %.2f ms/img" % (results["name"], 1000 * results["wall_time"]
/ (batch_size * num_ops)))
def benchmarkSimilar3Channel(self):
self._benchmarkResize((183, 229), 3)
def benchmarkScaleUp3Channel(self):
self._benchmarkResize((141, 186), 3)
def benchmarkScaleDown3Channel(self):
self._benchmarkResize((749, 603), 3)
def benchmarkSimilar1Channel(self):
self._benchmarkResize((183, 229), 1)
def benchmarkScaleUp1Channel(self):
self._benchmarkResize((141, 186), 1)
def benchmarkScaleDown1Channel(self):
self._benchmarkResize((749, 603), 1)
def benchmarkSimilar4Channel(self):
self._benchmarkResize((183, 229), 4)
def benchmarkScaleUp4Channel(self):
self._benchmarkResize((141, 186), 4)
def benchmarkScaleDown4Channel(self):
self._benchmarkResize((749, 603), 4)
class ResizeAreaBenchmark(test.Benchmark):
def _benchmarkResize(self, image_size, num_channels):
batch_size = 1
num_ops = 1000
img = variables.Variable(
random_ops.random_normal([batch_size, image_size[0],
image_size[1], num_channels]),
name="img")
deps = []
for _ in xrange(num_ops):
with ops.control_dependencies(deps):
resize_op = image_ops.resize_area(img, [299, 299], align_corners=False)
deps = [resize_op]
benchmark_op = control_flow_ops.group(*deps)
with session.Session() as sess:
sess.run(variables.global_variables_initializer())
results = self.run_op_benchmark(
sess, benchmark_op,
name=("resize_area_%s_%s_%s" %
(image_size[0], image_size[1], num_channels)))
print("%s : %.2f ms/img" % (
results["name"],
1000*results["wall_time"] / (batch_size * num_ops)))
def benchmarkSimilar3Channel(self):
self._benchmarkResize((183, 229), 3)
def benchmarkScaleUp3Channel(self):
self._benchmarkResize((141, 186), 3)
def benchmarkScaleDown3Channel(self):
self._benchmarkResize((749, 603), 3)
def benchmarkSimilar1Channel(self):
self._benchmarkResize((183, 229), 1)
def benchmarkScaleUp1Channel(self):
self._benchmarkResize((141, 186), 1)
def benchmarkScaleDown1Channel(self):
self._benchmarkResize((749, 603), 1)
class AdjustSaturationTest(test_util.TensorFlowTestCase):
def testHalfSaturation(self):
x_shape = [2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
saturation_factor = 0.5
y_data = [6, 9, 13, 140, 180, 226, 135, 121, 234, 172, 255, 128]
y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)
with self.test_session(use_gpu=True):
x = constant_op.constant(x_np, shape=x_shape)
y = image_ops.adjust_saturation(x, saturation_factor)
y_tf = y.eval()
self.assertAllEqual(y_tf, y_np)
def testTwiceSaturation(self):
x_shape = [2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
saturation_factor = 2.0
y_data = [0, 5, 13, 0, 106, 226, 30, 0, 234, 89, 255, 0]
y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)
with self.test_session(use_gpu=True):
x = constant_op.constant(x_np, shape=x_shape)
y = image_ops.adjust_saturation(x, saturation_factor)
y_tf = y.eval()
self.assertAllEqual(y_tf, y_np)
def _adjust_saturation(self, image, saturation_factor):
image = ops.convert_to_tensor(image, name="image")
orig_dtype = image.dtype
flt_image = image_ops.convert_image_dtype(image, dtypes.float32)
saturation_adjusted_image = gen_image_ops.adjust_saturation(
flt_image, saturation_factor)
return image_ops.convert_image_dtype(saturation_adjusted_image,
orig_dtype)
def testHalfSaturationFused(self):
x_shape = [2, 2, 3]
x_rgb_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_rgb_data, dtype=np.uint8).reshape(x_shape)
saturation_factor = 0.5
y_rgb_data = [6, 9, 13, 140, 180, 226, 135, 121, 234, 172, 255, 128]
y_np = np.array(y_rgb_data, dtype=np.uint8).reshape(x_shape)
with self.test_session(use_gpu=True):
x = constant_op.constant(x_np, shape=x_shape)
y = self._adjust_saturation(x, saturation_factor)
y_tf = y.eval()
self.assertAllEqual(y_tf, y_np)
def testTwiceSaturationFused(self):
x_shape = [2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
saturation_factor = 2.0
y_data = [0, 5, 13, 0, 106, 226, 30, 0, 234, 89, 255, 0]
y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)
with self.test_session(use_gpu=True):
x = constant_op.constant(x_np, shape=x_shape)
y = self._adjust_saturation(x, saturation_factor)
y_tf = y.eval()
self.assertAllEqual(y_tf, y_np)
def _adjustSaturationNp(self, x_np, scale):
self.assertEqual(x_np.shape[-1], 3)
x_v = x_np.reshape([-1, 3])
y_v = np.ndarray(x_v.shape, dtype=x_v.dtype)
channel_count = x_v.shape[0]
for i in xrange(channel_count):
r = x_v[i][0]
g = x_v[i][1]
b = x_v[i][2]
h, s, v = colorsys.rgb_to_hsv(r, g, b)
s *= scale
s = min(1.0, max(0.0, s))
r, g, b = colorsys.hsv_to_rgb(h, s, v)
y_v[i][0] = r
y_v[i][1] = g
y_v[i][2] = b
return y_v.reshape(x_np.shape)
def testAdjustRandomSaturation(self):
x_shapes = [
[2, 2, 3],
[4, 2, 3],
[2, 4, 3],
[2, 5, 3],
[1000, 1, 3],
]
test_styles = [
"all_random",
"rg_same",
"rb_same",
"gb_same",
"rgb_same",
]
with self.test_session():
for x_shape in x_shapes:
for test_style in test_styles:
x_np = np.random.rand(*x_shape) * 255.
scale = np.random.rand()
if test_style == "all_random":
pass
elif test_style == "rg_same":
x_np[..., 1] = x_np[..., 0]
elif test_style == "rb_same":
x_np[..., 2] = x_np[..., 0]
elif test_style == "gb_same":
x_np[..., 2] = x_np[..., 1]
elif test_style == "rgb_same":
x_np[..., 1] = x_np[..., 0]
x_np[..., 2] = x_np[..., 0]
else:
raise AssertionError("Invalid test style: %s" % (test_style))
y_baseline = self._adjustSaturationNp(x_np, scale)
y_fused = self._adjust_saturation(x_np, scale).eval()
self.assertAllClose(y_fused, y_baseline, rtol=2e-5, atol=1e-5)
class FlipTransposeRotateTest(test_util.TensorFlowTestCase):
def testIdempotentLeftRight(self):
x_np = np.array([[1, 2, 3], [1, 2, 3]], dtype=np.uint8).reshape([2, 3, 1])
with self.test_session(use_gpu=True):
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.flip_left_right(image_ops.flip_left_right(x_tf))
y_tf = y.eval()
self.assertAllEqual(y_tf, x_np)
def testLeftRight(self):
x_np = np.array([[1, 2, 3], [1, 2, 3]], dtype=np.uint8).reshape([2, 3, 1])
y_np = np.array([[3, 2, 1], [3, 2, 1]], dtype=np.uint8).reshape([2, 3, 1])
with self.test_session(use_gpu=True):
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.flip_left_right(x_tf)
y_tf = y.eval()
self.assertAllEqual(y_tf, y_np)
def testRandomFlipLeftRight(self):
x_np = np.array([[1, 2, 3], [1, 2, 3]], dtype=np.uint8).reshape([2, 3, 1])
y_np = np.array([[3, 2, 1], [3, 2, 1]], dtype=np.uint8).reshape([2, 3, 1])
with self.test_session(use_gpu=True):
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.random_flip_left_right(x_tf)
count_flipped = 0
count_unflipped = 0
for _ in range(50):
y_tf = y.eval()
if y_tf[0][0] == 1:
self.assertAllEqual(y_tf, x_np)
count_unflipped += 1
else:
self.assertAllEqual(y_tf, y_np)
count_flipped += 1
self.assertGreaterEqual(count_flipped, 1)
self.assertGreaterEqual(count_unflipped, 1)
def testIdempotentUpDown(self):
x_np = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.uint8).reshape([2, 3, 1])
with self.test_session(use_gpu=True):
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.flip_up_down(image_ops.flip_up_down(x_tf))
y_tf = y.eval()
self.assertAllEqual(y_tf, x_np)
def testUpDown(self):
x_np = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.uint8).reshape([2, 3, 1])
y_np = np.array([[4, 5, 6], [1, 2, 3]], dtype=np.uint8).reshape([2, 3, 1])
with self.test_session(use_gpu=True):
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.flip_up_down(x_tf)
y_tf = y.eval()
self.assertAllEqual(y_tf, y_np)
def testRandomFlipUpDown(self):
x_np = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.uint8).reshape([2, 3, 1])
y_np = np.array([[4, 5, 6], [1, 2, 3]], dtype=np.uint8).reshape([2, 3, 1])
with self.test_session(use_gpu=True):
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.random_flip_up_down(x_tf)
count_flipped = 0
count_unflipped = 0
for _ in range(50):
y_tf = y.eval()
if y_tf[0][0] == 1:
self.assertAllEqual(y_tf, x_np)
count_unflipped += 1
else:
self.assertAllEqual(y_tf, y_np)
count_flipped += 1
self.assertGreaterEqual(count_flipped, 1)
self.assertGreaterEqual(count_unflipped, 1)
def testIdempotentTranspose(self):
x_np = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.uint8).reshape([2, 3, 1])
with self.test_session(use_gpu=True):
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.transpose_image(image_ops.transpose_image(x_tf))
y_tf = y.eval()
self.assertAllEqual(y_tf, x_np)
def testTranspose(self):
x_np = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.uint8).reshape([2, 3, 1])
y_np = np.array([[1, 4], [2, 5], [3, 6]], dtype=np.uint8).reshape([3, 2, 1])
with self.test_session(use_gpu=True):
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.transpose_image(x_tf)
y_tf = y.eval()
self.assertAllEqual(y_tf, y_np)
def testPartialShapes(self):
p_unknown_rank = array_ops.placeholder(dtypes.uint8)
p_unknown_dims = array_ops.placeholder(
dtypes.uint8, shape=[None, None, None])
p_unknown_width = array_ops.placeholder(dtypes.uint8, shape=[64, None, 3])
p_wrong_rank = array_ops.placeholder(dtypes.uint8, shape=[None, None])
p_zero_dim = array_ops.placeholder(dtypes.uint8, shape=[64, 0, 3])
for op in [
image_ops.flip_left_right, image_ops.flip_up_down,
image_ops.random_flip_left_right, image_ops.random_flip_up_down,
image_ops.transpose_image, image_ops.rot90
]:
transformed_unknown_rank = op(p_unknown_rank)
self.assertEqual(3, transformed_unknown_rank.get_shape().ndims)
transformed_unknown_dims = op(p_unknown_dims)
self.assertEqual(3, transformed_unknown_dims.get_shape().ndims)
transformed_unknown_width = op(p_unknown_width)
self.assertEqual(3, transformed_unknown_width.get_shape().ndims)
with self.assertRaisesRegexp(ValueError, "must be three-dimensional"):
op(p_wrong_rank)
with self.assertRaisesRegexp(ValueError, "must be > 0"):
op(p_zero_dim)
def testRot90GroupOrder(self):
image = np.arange(24, dtype=np.uint8).reshape([2, 4, 3])
with self.test_session(use_gpu=True):
rotated = image
for _ in xrange(4):
rotated = image_ops.rot90(rotated)
self.assertAllEqual(image, rotated.eval())
def testRot90NumpyEquivalence(self):
image = np.arange(24, dtype=np.uint8).reshape([2, 4, 3])
with self.test_session(use_gpu=True):
k_placeholder = array_ops.placeholder(dtypes.int32, shape=[])
y_tf = image_ops.rot90(image, k_placeholder)
for k in xrange(4):
y_np = np.rot90(image, k=k)
self.assertAllEqual(y_np, y_tf.eval({k_placeholder: k}))
class RandomFlipTest(test_util.TensorFlowTestCase):
def testRandomLeftRight(self):
x_np = np.array([0, 1], dtype=np.uint8).reshape([1, 2, 1])
num_iterations = 500
hist = [0, 0]
with self.test_session(use_gpu=True):
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.random_flip_left_right(x_tf)
for _ in xrange(num_iterations):
y_np = y.eval().flatten()[0]
hist[y_np] += 1
# Ensure that each entry is observed within 4 standard deviations.
four_stddev = 4.0 * np.sqrt(num_iterations / 2.0)
self.assertAllClose(hist, [num_iterations / 2.0] * 2, atol=four_stddev)
def testRandomUpDown(self):
x_np = np.array([0, 1], dtype=np.uint8).reshape([2, 1, 1])
num_iterations = 500
hist = [0, 0]
with self.test_session(use_gpu=True):
x_tf = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.random_flip_up_down(x_tf)
for _ in xrange(num_iterations):
y_np = y.eval().flatten()[0]
hist[y_np] += 1
# Ensure that each entry is observed within 4 standard deviations.
four_stddev = 4.0 * np.sqrt(num_iterations / 2.0)
self.assertAllClose(hist, [num_iterations / 2.0] * 2, atol=four_stddev)
class AdjustContrastTest(test_util.TensorFlowTestCase):
def _testContrast(self, x_np, y_np, contrast_factor):
with self.test_session(use_gpu=True):
x = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.adjust_contrast(x, contrast_factor)
y_tf = y.eval()
self.assertAllClose(y_tf, y_np, 1e-6)
def testDoubleContrastUint8(self):
x_shape = [1, 2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
y_data = [0, 0, 0, 62, 169, 255, 28, 0, 255, 135, 255, 0]
y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)
self._testContrast(x_np, y_np, contrast_factor=2.0)
def testDoubleContrastFloat(self):
x_shape = [1, 2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.float).reshape(x_shape) / 255.
y_data = [
-45.25, -90.75, -92.5, 62.75, 169.25, 333.5, 28.75, -84.75, 349.5,
134.75, 409.25, -116.5
]
y_np = np.array(y_data, dtype=np.float).reshape(x_shape) / 255.
self._testContrast(x_np, y_np, contrast_factor=2.0)
def testHalfContrastUint8(self):
x_shape = [1, 2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
y_data = [22, 52, 65, 49, 118, 172, 41, 54, 176, 67, 178, 59]
y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)
self._testContrast(x_np, y_np, contrast_factor=0.5)
def testBatchDoubleContrast(self):
x_shape = [2, 1, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
y_data = [0, 0, 0, 81, 200, 255, 10, 0, 255, 116, 255, 0]
y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)
self._testContrast(x_np, y_np, contrast_factor=2.0)
def _adjustContrastNp(self, x_np, contrast_factor):
mean = np.mean(x_np, (1, 2), keepdims=True)
y_np = mean + contrast_factor * (x_np - mean)
return y_np
def _adjustContrastTf(self, x_np, contrast_factor):
with self.test_session(use_gpu=True):
x = constant_op.constant(x_np)
y = image_ops.adjust_contrast(x, contrast_factor)
y_tf = y.eval()
return y_tf
def testRandomContrast(self):
x_shapes = [
[1, 2, 2, 3],
[2, 1, 2, 3],
[1, 2, 2, 3],
[2, 5, 5, 3],
[2, 1, 1, 3],
]
for x_shape in x_shapes:
x_np = np.random.rand(*x_shape) * 255.
contrast_factor = np.random.rand() * 2.0 + 0.1
y_np = self._adjustContrastNp(x_np, contrast_factor)
y_tf = self._adjustContrastTf(x_np, contrast_factor)
self.assertAllClose(y_tf, y_np, rtol=1e-5, atol=1e-5)
class AdjustBrightnessTest(test_util.TensorFlowTestCase):
def _testBrightness(self, x_np, y_np, delta):
with self.test_session(use_gpu=True):
x = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.adjust_brightness(x, delta)
y_tf = y.eval()
self.assertAllClose(y_tf, y_np, 1e-6)
def testPositiveDeltaUint8(self):
x_shape = [2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
y_data = [10, 15, 23, 64, 145, 236, 47, 18, 244, 100, 255, 11]
y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)
self._testBrightness(x_np, y_np, delta=10. / 255.)
def testPositiveDeltaFloat(self):
x_shape = [2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.float32).reshape(x_shape) / 255.
y_data = [10, 15, 23, 64, 145, 236, 47, 18, 244, 100, 265, 11]
y_np = np.array(y_data, dtype=np.float32).reshape(x_shape) / 255.
self._testBrightness(x_np, y_np, delta=10. / 255.)
def testNegativeDelta(self):
x_shape = [2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
y_data = [0, 0, 3, 44, 125, 216, 27, 0, 224, 80, 245, 0]
y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)
self._testBrightness(x_np, y_np, delta=-10. / 255.)
class PerImageWhiteningTest(test_util.TensorFlowTestCase):
def _NumpyPerImageWhitening(self, x):
num_pixels = np.prod(x.shape)
x2 = np.square(x).astype(np.float32)
mn = np.mean(x)
vr = np.mean(x2) - (mn * mn)
stddev = max(math.sqrt(vr), 1.0 / math.sqrt(num_pixels))
y = x.astype(np.float32)
y -= mn
y /= stddev
return y
def testBasic(self):
x_shape = [13, 9, 3]
x_np = np.arange(0, np.prod(x_shape), dtype=np.int32).reshape(x_shape)
y_np = self._NumpyPerImageWhitening(x_np)
with self.test_session(use_gpu=True):
x = constant_op.constant(x_np, shape=x_shape)
y = image_ops.per_image_standardization(x)
y_tf = y.eval()
self.assertAllClose(y_tf, y_np, atol=1e-4)
def testUniformImage(self):
im_np = np.ones([19, 19, 3]).astype(np.float32) * 249
im = constant_op.constant(im_np)
whiten = image_ops.per_image_standardization(im)
with self.test_session(use_gpu=True):
whiten_np = whiten.eval()
self.assertFalse(np.any(np.isnan(whiten_np)))
class CropToBoundingBoxTest(test_util.TensorFlowTestCase):
def _CropToBoundingBox(self, x, offset_height, offset_width, target_height,
target_width, use_tensor_inputs):
if use_tensor_inputs:
offset_height = ops.convert_to_tensor(offset_height)
offset_width = ops.convert_to_tensor(offset_width)
target_height = ops.convert_to_tensor(target_height)
target_width = ops.convert_to_tensor(target_width)
x_tensor = array_ops.placeholder(x.dtype, shape=[None] * x.ndim)
feed_dict = {x_tensor: x}
else:
x_tensor = x
feed_dict = {}
y = image_ops.crop_to_bounding_box(x_tensor, offset_height, offset_width,
target_height, target_width)
if not use_tensor_inputs:
self.assertTrue(y.get_shape().is_fully_defined())
with self.test_session(use_gpu=True):
return y.eval(feed_dict=feed_dict)
def _assertReturns(self,
x,
x_shape,
offset_height,
offset_width,
y,
y_shape,
use_tensor_inputs_options=None):
use_tensor_inputs_options = use_tensor_inputs_options or [False, True]
target_height, target_width, _ = y_shape
x = np.array(x).reshape(x_shape)
y = np.array(y).reshape(y_shape)
for use_tensor_inputs in use_tensor_inputs_options:
y_tf = self._CropToBoundingBox(x, offset_height, offset_width,
target_height, target_width,
use_tensor_inputs)
self.assertAllClose(y, y_tf)
def _assertRaises(self,
x,
x_shape,
offset_height,
offset_width,
target_height,
target_width,
err_msg,
use_tensor_inputs_options=None):
use_tensor_inputs_options = use_tensor_inputs_options or [False, True]
x = np.array(x).reshape(x_shape)
for use_tensor_inputs in use_tensor_inputs_options:
try:
self._CropToBoundingBox(x, offset_height, offset_width, target_height,
target_width, use_tensor_inputs)
except Exception as e:
if err_msg not in str(e):
raise
else:
raise AssertionError("Exception not raised: %s" % err_msg)
def _assertShapeInference(self, pre_shape, height, width, post_shape):
image = array_ops.placeholder(dtypes.float32, shape=pre_shape)
y = image_ops.crop_to_bounding_box(image, 0, 0, height, width)
self.assertEqual(y.get_shape().as_list(), post_shape)
def testNoOp(self):
x_shape = [10, 10, 10]
x = np.random.uniform(size=x_shape)
self._assertReturns(x, x_shape, 0, 0, x, x_shape)
def testCrop(self):
x = [1, 2, 3, 4, 5, 6, 7, 8, 9]
x_shape = [3, 3, 1]
offset_height, offset_width = [1, 0]
y_shape = [2, 3, 1]
y = [4, 5, 6, 7, 8, 9]
self._assertReturns(x, x_shape, offset_height, offset_width, y, y_shape)
offset_height, offset_width = [0, 1]
y_shape = [3, 2, 1]
y = [2, 3, 5, 6, 8, 9]
self._assertReturns(x, x_shape, offset_height, offset_width, y, y_shape)
offset_height, offset_width = [0, 0]
y_shape = [2, 3, 1]
y = [1, 2, 3, 4, 5, 6]
self._assertReturns(x, x_shape, offset_height, offset_width, y, y_shape)
offset_height, offset_width = [0, 0]
y_shape = [3, 2, 1]
y = [1, 2, 4, 5, 7, 8]
self._assertReturns(x, x_shape, offset_height, offset_width, y, y_shape)
def testShapeInference(self):
self._assertShapeInference([55, 66, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([59, 69, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([None, 66, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([None, 69, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([55, None, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([59, None, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([None, None, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([55, 66, None], 55, 66, [55, 66, None])
self._assertShapeInference([59, 69, None], 55, 66, [55, 66, None])
self._assertShapeInference([None, None, None], 55, 66, [55, 66, None])
self._assertShapeInference(None, 55, 66, [55, 66, None])
def testNon3DInput(self):
# Input image is not 3D
x = [0] * 15
offset_height, offset_width = [0, 0]
target_height, target_width = [2, 2]
for x_shape in ([3, 5], [1, 3, 5, 1, 1]):
self._assertRaises(x, x_shape, offset_height, offset_width, target_height,
target_width,
"'image' must have either 3 or 4 dimensions.")
def testZeroLengthInput(self):
# Input image has 0-length dimension(s).
# Each line is a test configuration:
# x_shape, target_height, target_width
test_config = (([0, 2, 2], 1, 1), ([2, 0, 2], 1, 1), ([2, 2, 0], 1, 1),
([0, 2, 2], 0, 1), ([2, 0, 2], 1, 0))
offset_height, offset_width = [0, 0]
x = []
for x_shape, target_height, target_width in test_config:
self._assertRaises(
x,
x_shape,
offset_height,
offset_width,
target_height,
target_width,
"all dims of 'image.shape' must be > 0",
use_tensor_inputs_options=[False])
# Multiple assertion could fail, but the evaluation order is arbitrary.
# Match gainst generic pattern.
self._assertRaises(
x,
x_shape,
offset_height,
offset_width,
target_height,
target_width,
"assertion failed:",
use_tensor_inputs_options=[True])
def testBadParams(self):
x_shape = [4, 4, 1]
x = np.zeros(x_shape)
# Each line is a test configuration:
# (offset_height, offset_width, target_height, target_width), err_msg
test_config = (([-1, 0, 3, 3], "offset_height must be >= 0"),
([0, -1, 3, 3], "offset_width must be >= 0"),
([0, 0, 0, 3], "target_height must be > 0"),
([0, 0, 3, 0], "target_width must be > 0"),
([2, 0, 3, 3], "height must be >= target + offset"),
([0, 2, 3, 3], "width must be >= target + offset"))
for params, err_msg in test_config:
self._assertRaises(x, x_shape, *params, err_msg=err_msg)
class CentralCropTest(test_util.TensorFlowTestCase):
def _assertShapeInference(self, pre_shape, fraction, post_shape):
image = array_ops.placeholder(dtypes.float32, shape=pre_shape)
y = image_ops.central_crop(image, fraction)
if post_shape is None:
self.assertEqual(y.get_shape().dims, None)
else:
self.assertEqual(y.get_shape().as_list(), post_shape)
def testNoOp(self):
x_shape = [13, 9, 3]
x_np = np.ones(x_shape, dtype=np.float32)
with self.test_session(use_gpu=True):
x = constant_op.constant(x_np, shape=x_shape)
y = image_ops.central_crop(x, 1.0)
y_tf = y.eval()
self.assertAllEqual(y_tf, x_np)
self.assertEqual(y.op.name, x.op.name)
def testCropping(self):
x_shape = [4, 8, 1]
x_np = np.array([[1, 2, 3, 4, 5, 6, 7, 8], [1, 2, 3, 4, 5, 6, 7, 8],
[1, 2, 3, 4, 5, 6, 7, 8], [1, 2, 3, 4, 5, 6, 7, 8]],
dtype=np.int32).reshape(x_shape)
y_np = np.array([[3, 4, 5, 6], [3, 4, 5, 6]]).reshape([2, 4, 1])
with self.test_session(use_gpu=True):
x = constant_op.constant(x_np, shape=x_shape)
y = image_ops.central_crop(x, 0.5)
y_tf = y.eval()
self.assertAllEqual(y_tf, y_np)
def testShapeInference(self):
# Test no-op fraction=1.0
self._assertShapeInference([50, 60, 3], 1.0, [50, 60, 3])
self._assertShapeInference([None, 60, 3], 1.0, [None, 60, 3])
self._assertShapeInference([50, None, 3], 1.0, [50, None, 3])
self._assertShapeInference([None, None, 3], 1.0, [None, None, 3])
self._assertShapeInference([50, 60, None], 1.0, [50, 60, None])
self._assertShapeInference([None, None, None], 1.0, [None, None, None])
self._assertShapeInference(None, 1.0, None)
# TODO(toddw): Currently central_crop() doesn't infer the result shape even
# when it's possible. If we change it to do so, we can test as follows:
#
# self._assertShapeInference([50, 60, 3], 0.5, [25, 30, 3])
# self._assertShapeInference([None, 60, 3], 0.5, [None, 30, 3])
# self._assertShapeInference([50, None, 3], 0.5, [25, None, 3])
# self._assertShapeInference([None, None, 3], 0.5, [None, None, 3])
# self._assertShapeInference([50, 60, None], 0.5, [25, 30, None])
# self._assertShapeInference([None, None, None], 0.5, [None, None, None])
# self._assertShapeInference(None, 0.5, None)
def testError(self):
x_shape = [13, 9, 3]
x_np = np.ones(x_shape, dtype=np.float32)
with self.test_session(use_gpu=True):
x = constant_op.constant(x_np, shape=x_shape)
with self.assertRaises(ValueError):
_ = image_ops.central_crop(x, 0.0)
with self.assertRaises(ValueError):
_ = image_ops.central_crop(x, 1.01)
class PadToBoundingBoxTest(test_util.TensorFlowTestCase):
def _PadToBoundingBox(self, x, offset_height, offset_width, target_height,
target_width, use_tensor_inputs):
if use_tensor_inputs:
offset_height = ops.convert_to_tensor(offset_height)
offset_width = ops.convert_to_tensor(offset_width)
target_height = ops.convert_to_tensor(target_height)
target_width = ops.convert_to_tensor(target_width)
x_tensor = array_ops.placeholder(x.dtype, shape=[None] * x.ndim)
feed_dict = {x_tensor: x}
else:
x_tensor = x
feed_dict = {}
y = image_ops.pad_to_bounding_box(x_tensor, offset_height, offset_width,
target_height, target_width)
if not use_tensor_inputs:
self.assertTrue(y.get_shape().is_fully_defined())
with self.test_session(use_gpu=True):
return y.eval(feed_dict=feed_dict)
def _assertReturns(self,
x,
x_shape,
offset_height,
offset_width,
y,
y_shape,
use_tensor_inputs_options=None):
use_tensor_inputs_options = use_tensor_inputs_options or [False, True]
target_height, target_width, _ = y_shape
x = np.array(x).reshape(x_shape)
y = np.array(y).reshape(y_shape)
for use_tensor_inputs in use_tensor_inputs_options:
y_tf = self._PadToBoundingBox(x, offset_height, offset_width,
target_height, target_width,
use_tensor_inputs)
self.assertAllClose(y, y_tf)
def _assertRaises(self,
x,
x_shape,
offset_height,
offset_width,
target_height,
target_width,
err_msg,
use_tensor_inputs_options=None):
use_tensor_inputs_options = use_tensor_inputs_options or [False, True]
x = np.array(x).reshape(x_shape)
for use_tensor_inputs in use_tensor_inputs_options:
try:
self._PadToBoundingBox(x, offset_height, offset_width, target_height,
target_width, use_tensor_inputs)
except Exception as e:
if err_msg not in str(e):
raise
else:
raise AssertionError("Exception not raised: %s" % err_msg)
def _assertShapeInference(self, pre_shape, height, width, post_shape):
image = array_ops.placeholder(dtypes.float32, shape=pre_shape)
y = image_ops.pad_to_bounding_box(image, 0, 0, height, width)
self.assertEqual(y.get_shape().as_list(), post_shape)
def testNoOp(self):
x_shape = [10, 10, 10]
x = np.random.uniform(size=x_shape)
offset_height, offset_width = [0, 0]
self._assertReturns(x, x_shape, offset_height, offset_width, x, x_shape)
def testPadding(self):
x = [1, 2, 3,
4, 5, 6,
7, 8, 9]
x_shape = [3, 3, 1]
offset_height, offset_width = [1, 0]
y = [0, 0, 0,
1, 2, 3,
4, 5, 6,
7, 8, 9]
y_shape = [4, 3, 1]
self._assertReturns(x, x_shape, offset_height, offset_width, y, y_shape)
offset_height, offset_width = [0, 1]
y = [0, 1, 2, 3,
0, 4, 5, 6,
0, 7, 8, 9]
y_shape = [3, 4, 1]
self._assertReturns(x, x_shape, offset_height, offset_width, y, y_shape)
offset_height, offset_width = [0, 0]
y = [1, 2, 3,
4, 5, 6,
7, 8, 9,
0, 0, 0]
y_shape = [4, 3, 1]
self._assertReturns(x, x_shape, offset_height, offset_width, y, y_shape)
offset_height, offset_width = [0, 0]
y = [1, 2, 3, 0,
4, 5, 6, 0,
7, 8, 9, 0]
y_shape = [3, 4, 1]
self._assertReturns(x, x_shape, offset_height, offset_width, y, y_shape)
def testShapeInference(self):
self._assertShapeInference([55, 66, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([50, 60, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([None, 66, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([None, 60, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([55, None, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([50, None, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([None, None, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([55, 66, None], 55, 66, [55, 66, None])
self._assertShapeInference([50, 60, None], 55, 66, [55, 66, None])
self._assertShapeInference([None, None, None], 55, 66, [55, 66, None])
self._assertShapeInference(None, 55, 66, [55, 66, None])
def testNon3DInput(self):
# Input image is not 3D
x = [0] * 15
offset_height, offset_width = [0, 0]
target_height, target_width = [2, 2]
for x_shape in ([3, 5], [1, 3, 5, 1, 1]):
self._assertRaises(x, x_shape, offset_height, offset_width, target_height,
target_width,
"'image' must have either 3 or 4 dimensions.")
def testZeroLengthInput(self):
# Input image has 0-length dimension(s).
# Each line is a test configuration:
# x_shape, target_height, target_width
test_config = (([0, 2, 2], 2, 2),
([2, 0, 2], 2, 2),
([2, 2, 0], 2, 2))
offset_height, offset_width = [0, 0]
x = []
for x_shape, target_height, target_width in test_config:
self._assertRaises(
x,
x_shape,
offset_height,
offset_width,
target_height,
target_width,
"all dims of 'image.shape' must be > 0",
use_tensor_inputs_options=[False])
# The orignal error message does not contain back slashes. However, they
# are added by either the assert op or the runtime. If this behaviour
# changes in the future, the match string will also needs to be changed.
self._assertRaises(
x,
x_shape,
offset_height,
offset_width,
target_height,
target_width,
"all dims of \\'image.shape\\' must be > 0",
use_tensor_inputs_options=[True])
def testBadParams(self):
x_shape = [3, 3, 1]
x = np.zeros(x_shape)
# Each line is a test configuration:
# offset_height, offset_width, target_height, target_width, err_msg
test_config = ((-1, 0, 4, 4, "offset_height must be >= 0"),
(0, -1, 4, 4, "offset_width must be >= 0"),
(2, 0, 4, 4, "height must be <= target - offset"),
(0, 2, 4, 4, "width must be <= target - offset"))
for config_item in test_config:
self._assertRaises(x, x_shape, *config_item)
class SelectDistortedCropBoxTest(test_util.TensorFlowTestCase):
def _testSampleDistortedBoundingBox(self, image, bounding_box,
min_object_covered, aspect_ratio_range,
area_range):
original_area = float(np.prod(image.shape))
bounding_box_area = float((bounding_box[3] - bounding_box[1]) *
(bounding_box[2] - bounding_box[0]))
image_size_np = np.array(image.shape, dtype=np.int32)
bounding_box_np = (np.array(
bounding_box, dtype=np.float32).reshape([1, 1, 4]))
aspect_ratios = []
area_ratios = []
fraction_object_covered = []
num_iter = 1000
with self.test_session(use_gpu=True):
image_tf = constant_op.constant(image, shape=image.shape)
image_size_tf = constant_op.constant(
image_size_np, shape=image_size_np.shape)
bounding_box_tf = constant_op.constant(
bounding_box_np, dtype=dtypes.float32, shape=bounding_box_np.shape)
begin, size, _ = image_ops.sample_distorted_bounding_box(
image_size=image_size_tf,
bounding_boxes=bounding_box_tf,
min_object_covered=min_object_covered,
aspect_ratio_range=aspect_ratio_range,
area_range=area_range)
y = array_ops.strided_slice(image_tf, begin, begin + size)
for _ in xrange(num_iter):
y_tf = y.eval()
crop_height = y_tf.shape[0]
crop_width = y_tf.shape[1]
aspect_ratio = float(crop_width) / float(crop_height)
area = float(crop_width * crop_height)
aspect_ratios.append(aspect_ratio)
area_ratios.append(area / original_area)
fraction_object_covered.append(float(np.sum(y_tf)) / bounding_box_area)
# Ensure that each entry is observed within 3 standard deviations.
# num_bins = 10
# aspect_ratio_hist, _ = np.histogram(aspect_ratios,
# bins=num_bins,
# range=aspect_ratio_range)
# mean = np.mean(aspect_ratio_hist)
# stddev = np.sqrt(mean)
# TODO(wicke, shlens, dga): Restore this test so that it is no longer flaky.
# TODO(irving): Since the rejection probability is not independent of the
# aspect ratio, the aspect_ratio random value is not exactly uniformly
# distributed in [min_aspect_ratio, max_aspect_ratio). This test should be
# fixed to reflect the true statistical property, then tightened to enforce
# a stricter bound. Or, ideally, the sample_distorted_bounding_box Op
# be fixed to not use rejection sampling and generate correctly uniform
# aspect ratios.
# self.assertAllClose(aspect_ratio_hist,
# [mean] * num_bins, atol=3.6 * stddev)
# The resulting crop will not be uniformly distributed in area. In practice,
# we find that the area skews towards the small sizes. Instead, we perform
# a weaker test to ensure that the area ratios are merely within the
# specified bounds.
self.assertLessEqual(max(area_ratios), area_range[1])
self.assertGreaterEqual(min(area_ratios), area_range[0])
# For reference, here is what the distribution of area ratios look like.
area_ratio_hist, _ = np.histogram(area_ratios, bins=10, range=area_range)
print("area_ratio_hist ", area_ratio_hist)
# Ensure that fraction_object_covered is satisfied.
# TODO(wicke, shlens, dga): Restore this test so that it is no longer flaky.
# self.assertGreaterEqual(min(fraction_object_covered), min_object_covered)
def testWholeImageBoundingBox(self):
height = 40
width = 50
image_size = [height, width, 1]
bounding_box = [0.0, 0.0, 1.0, 1.0]
image = np.arange(
0, np.prod(image_size), dtype=np.int32).reshape(image_size)
self._testSampleDistortedBoundingBox(
image,
bounding_box,
min_object_covered=0.1,
aspect_ratio_range=(0.75, 1.33),
area_range=(0.05, 1.0))
def testWithBoundingBox(self):
height = 40
width = 50
x_shape = [height, width, 1]
image = np.zeros(x_shape, dtype=np.int32)
# Create an object with 1's in a region with area A and require that
# the total pixel values >= 0.1 * A.
min_object_covered = 0.1
xmin = 2
ymin = 3
xmax = 12
ymax = 13
for x in np.arange(xmin, xmax + 1, 1):
for y in np.arange(ymin, ymax + 1, 1):
image[x, y] = 1
# Bounding box is specified as (ymin, xmin, ymax, xmax) in
# relative coordinates.
bounding_box = (float(ymin) / height, float(xmin) / width,
float(ymax) / height, float(xmax) / width)
self._testSampleDistortedBoundingBox(
image,
bounding_box=bounding_box,
min_object_covered=min_object_covered,
aspect_ratio_range=(0.75, 1.33),
area_range=(0.05, 1.0))
def testSampleDistortedBoundingBoxShape(self):
with self.test_session(use_gpu=True):
image_size = constant_op.constant(
[40, 50, 1], shape=[3], dtype=dtypes.int32)
bounding_box = constant_op.constant(
[0.0, 0.0, 1.0, 1.0],
shape=[4],
dtype=dtypes.float32,)
begin, end, bbox_for_drawing = image_ops.sample_distorted_bounding_box(
image_size=image_size,
bounding_boxes=bounding_box,
min_object_covered=0.1,
aspect_ratio_range=(0.75, 1.33),
area_range=(0.05, 1.0))
# Test that the shapes are correct.
self.assertAllEqual([3], begin.get_shape().as_list())
self.assertAllEqual([3], end.get_shape().as_list())
self.assertAllEqual([1, 1, 4], bbox_for_drawing.get_shape().as_list())
class ResizeImagesTest(test_util.TensorFlowTestCase):
OPTIONS = [image_ops.ResizeMethod.BILINEAR,
image_ops.ResizeMethod.NEAREST_NEIGHBOR,
image_ops.ResizeMethod.BICUBIC,
image_ops.ResizeMethod.AREA]
TYPES = [np.uint8, np.int8, np.int16, np.int32, np.int64,
np.float32, np.float64]
def _assertShapeInference(self, pre_shape, size, post_shape):
# Try single image resize
single_image = array_ops.placeholder(dtypes.float32, shape=pre_shape)
y = image_ops.resize_images(single_image, size)
self.assertEqual(y.get_shape().as_list(), post_shape)
# Try batch images resize with known batch size
images = array_ops.placeholder(dtypes.float32, shape=[99] + pre_shape)
y = image_ops.resize_images(images, size)
self.assertEqual(y.get_shape().as_list(), [99] + post_shape)
# Try batch images resize with unknown batch size
images = array_ops.placeholder(dtypes.float32, shape=[None] + pre_shape)
y = image_ops.resize_images(images, size)
self.assertEqual(y.get_shape().as_list(), [None] + post_shape)
def shouldRunOnGPU(self, opt, nptype):
if (opt == image_ops.ResizeMethod.NEAREST_NEIGHBOR and
nptype in [np.float32, np.float64]):
return True
else:
return False
def testNoOp(self):
img_shape = [1, 6, 4, 1]
single_shape = [6, 4, 1]
# This test is also conducted with int8, so 127 is the maximum
# value that can be used.
data = [127, 127, 64, 64,
127, 127, 64, 64,
64, 64, 127, 127,
64, 64, 127, 127,
50, 50, 100, 100,
50, 50, 100, 100]
target_height = 6
target_width = 4
for nptype in self.TYPES:
img_np = np.array(data, dtype=nptype).reshape(img_shape)
for opt in self.OPTIONS:
with self.test_session(use_gpu=True) as sess:
image = constant_op.constant(img_np, shape=img_shape)
y = image_ops.resize_images(image, [target_height, target_width], opt)
yshape = array_ops.shape(y)
resized, newshape = sess.run([y, yshape])
self.assertAllEqual(img_shape, newshape)
self.assertAllClose(resized, img_np, atol=1e-5)
# Resizing with a single image must leave the shape unchanged also.
with self.test_session(use_gpu=True):
img_single = img_np.reshape(single_shape)
image = constant_op.constant(img_single, shape=single_shape)
y = image_ops.resize_images(image, [target_height, target_width],
self.OPTIONS[0])
yshape = array_ops.shape(y)
newshape = yshape.eval()
self.assertAllEqual(single_shape, newshape)
def testTensorArguments(self):
img_shape = [1, 6, 4, 1]
single_shape = [6, 4, 1]
# This test is also conducted with int8, so 127 is the maximum
# value that can be used.
data = [127, 127, 64, 64,
127, 127, 64, 64,
64, 64, 127, 127,
64, 64, 127, 127,
50, 50, 100, 100,
50, 50, 100, 100]
new_size = array_ops.placeholder(dtypes.int32, shape=(2))
img_np = np.array(data, dtype=np.uint8).reshape(img_shape)
for opt in self.OPTIONS:
with self.test_session(use_gpu=True) as sess:
image = constant_op.constant(img_np, shape=img_shape)
y = image_ops.resize_images(image, new_size, opt)
yshape = array_ops.shape(y)
resized, newshape = sess.run([y, yshape], {new_size: [6, 4]})
self.assertAllEqual(img_shape, newshape)
self.assertAllClose(resized, img_np, atol=1e-5)
# Resizing with a single image must leave the shape unchanged also.
with self.test_session(use_gpu=True):
img_single = img_np.reshape(single_shape)
image = constant_op.constant(img_single, shape=single_shape)
y = image_ops.resize_images(image, new_size, self.OPTIONS[0])
yshape = array_ops.shape(y)
resized, newshape = sess.run([y, yshape], {new_size: [6, 4]})
self.assertAllEqual(single_shape, newshape)
self.assertAllClose(resized, img_single, atol=1e-5)
# Incorrect shape.
with self.assertRaises(ValueError):
new_size = constant_op.constant(4)
_ = image_ops.resize_images(image, new_size,
image_ops.ResizeMethod.BILINEAR)
with self.assertRaises(ValueError):
new_size = constant_op.constant([4])
_ = image_ops.resize_images(image, new_size,
image_ops.ResizeMethod.BILINEAR)
with self.assertRaises(ValueError):
new_size = constant_op.constant([1, 2, 3])
_ = image_ops.resize_images(image, new_size,
image_ops.ResizeMethod.BILINEAR)
# Incorrect dtypes.
with self.assertRaises(ValueError):
new_size = constant_op.constant([6.0, 4])
_ = image_ops.resize_images(image, new_size,
image_ops.ResizeMethod.BILINEAR)
with self.assertRaises(ValueError):
_ = image_ops.resize_images(image, [6, 4.0],
image_ops.ResizeMethod.BILINEAR)
with self.assertRaises(ValueError):
_ = image_ops.resize_images(image, [None, 4],
image_ops.ResizeMethod.BILINEAR)
with self.assertRaises(ValueError):
_ = image_ops.resize_images(image, [6, None],
image_ops.ResizeMethod.BILINEAR)
def testSumTensor(self):
img_shape = [1, 6, 4, 1]
# This test is also conducted with int8, so 127 is the maximum
# value that can be used.
data = [127, 127, 64, 64,
127, 127, 64, 64,
64, 64, 127, 127,
64, 64, 127, 127,
50, 50, 100, 100,
50, 50, 100, 100]
# Test size where width is specified as a tensor which is a sum
# of two tensors.
width_1 = constant_op.constant(1)
width_2 = constant_op.constant(3)
width = math_ops.add(width_1, width_2)
height = constant_op.constant(6)
img_np = np.array(data, dtype=np.uint8).reshape(img_shape)
for opt in self.OPTIONS:
with self.test_session() as sess:
image = constant_op.constant(img_np, shape=img_shape)
y = image_ops.resize_images(image, [height, width], opt)
yshape = array_ops.shape(y)
resized, newshape = sess.run([y, yshape])
self.assertAllEqual(img_shape, newshape)
self.assertAllClose(resized, img_np, atol=1e-5)
def testResizeDown(self):
# This test is also conducted with int8, so 127 is the maximum
# value that can be used.
data = [127, 127, 64, 64,
127, 127, 64, 64,
64, 64, 127, 127,
64, 64, 127, 127,
50, 50, 100, 100,
50, 50, 100, 100]
expected_data = [127, 64,
64, 127,
50, 100]
target_height = 3
target_width = 2
# Test out 3-D and 4-D image shapes.
img_shapes = [[1, 6, 4, 1], [6, 4, 1]]
target_shapes = [[1, target_height, target_width, 1],
[target_height, target_width, 1]]
for target_shape, img_shape in zip(target_shapes, img_shapes):
for nptype in self.TYPES:
img_np = np.array(data, dtype=nptype).reshape(img_shape)
for opt in self.OPTIONS:
if test.is_gpu_available() and self.shouldRunOnGPU(opt, nptype):
with self.test_session(use_gpu=True):
image = constant_op.constant(img_np, shape=img_shape)
y = image_ops.resize_images(image, [target_height, target_width],
opt)
expected = np.array(expected_data).reshape(target_shape)
resized = y.eval()
self.assertAllClose(resized, expected, atol=1e-5)
def testResizeUpAlignCornersFalse(self):
img_shape = [1, 3, 2, 1]
data = [64, 32,
32, 64,
50, 100]
target_height = 6
target_width = 4
expected_data = {}
expected_data[image_ops.ResizeMethod.BILINEAR] = [
64.0, 48.0, 32.0, 32.0,
48.0, 48.0, 48.0, 48.0,
32.0, 48.0, 64.0, 64.0,
41.0, 61.5, 82.0, 82.0,
50.0, 75.0, 100.0, 100.0,
50.0, 75.0, 100.0, 100.0]
expected_data[image_ops.ResizeMethod.NEAREST_NEIGHBOR] = [
64.0, 64.0, 32.0, 32.0,
64.0, 64.0, 32.0, 32.0,
32.0, 32.0, 64.0, 64.0,
32.0, 32.0, 64.0, 64.0,
50.0, 50.0, 100.0, 100.0,
50.0, 50.0, 100.0, 100.0]
expected_data[image_ops.ResizeMethod.AREA] = [
64.0, 64.0, 32.0, 32.0,
64.0, 64.0, 32.0, 32.0,
32.0, 32.0, 64.0, 64.0,
32.0, 32.0, 64.0, 64.0,
50.0, 50.0, 100.0, 100.0,
50.0, 50.0, 100.0, 100.0]
for nptype in self.TYPES:
for opt in [
image_ops.ResizeMethod.BILINEAR,
image_ops.ResizeMethod.NEAREST_NEIGHBOR,
image_ops.ResizeMethod.AREA]:
with self.test_session(use_gpu=True):
img_np = np.array(data, dtype=nptype).reshape(img_shape)
image = constant_op.constant(img_np, shape=img_shape)
y = image_ops.resize_images(
image, [target_height, target_width], opt, align_corners=False)
resized = y.eval()
expected = np.array(expected_data[opt]).reshape(
[1, target_height, target_width, 1])
self.assertAllClose(resized, expected, atol=1e-05)
def testResizeUpAlignCornersTrue(self):
img_shape = [1, 3, 2, 1]
data = [6, 3,
3, 6,
6, 9]
target_height = 5
target_width = 4
expected_data = {}
expected_data[image_ops.ResizeMethod.BILINEAR] = [
6.0, 5.0, 4.0, 3.0,
4.5, 4.5, 4.5, 4.5,
3.0, 4.0, 5.0, 6.0,
4.5, 5.5, 6.5, 7.5,
6.0, 7.0, 8.0, 9.0
]
expected_data[image_ops.ResizeMethod.NEAREST_NEIGHBOR] = [
6.0, 6.0, 3.0, 3.0,
3.0, 3.0, 6.0, 6.0,
3.0, 3.0, 6.0, 6.0,
6.0, 6.0, 9.0, 9.0,
6.0, 6.0, 9.0, 9.0
]
# TODO(b/37749740): Improve alignment of ResizeMethod.AREA when
# align_corners=True.
expected_data[image_ops.ResizeMethod.AREA] = [
6.0, 6.0, 6.0, 3.0,
6.0, 6.0, 6.0, 3.0,
3.0, 3.0, 3.0, 6.0,
3.0, 3.0, 3.0, 6.0,
6.0, 6.0, 6.0, 9.0
]
for nptype in self.TYPES:
for opt in [
image_ops.ResizeMethod.BILINEAR,
image_ops.ResizeMethod.NEAREST_NEIGHBOR,
image_ops.ResizeMethod.AREA
]:
with self.test_session(use_gpu=True):
img_np = np.array(data, dtype=nptype).reshape(img_shape)
image = constant_op.constant(img_np, shape=img_shape)
y = image_ops.resize_images(
image, [target_height, target_width], opt, align_corners=True)
resized = y.eval()
expected = np.array(expected_data[opt]).reshape(
[1, target_height, target_width, 1])
self.assertAllClose(resized, expected, atol=1e-05)
def testResizeUpBicubic(self):
img_shape = [1, 6, 6, 1]
data = [128, 128, 64, 64, 128, 128, 64, 64,
64, 64, 128, 128, 64, 64, 128, 128,
50, 50, 100, 100, 50, 50, 100, 100,
50, 50, 100, 100, 50, 50, 100, 100,
50, 50, 100, 100]
img_np = np.array(data, dtype=np.uint8).reshape(img_shape)
target_height = 8
target_width = 8
expected_data = [128, 135, 96, 55, 64, 114, 134, 128,
78, 81, 68, 52, 57, 118, 144, 136,
55, 49, 79, 109, 103, 89, 83, 84,
74, 70, 95, 122, 115, 69, 49, 55,
100, 105, 75, 43, 50, 89, 105, 100,
57, 54, 74, 96, 91, 65, 55, 58,
70, 69, 75, 81, 80, 72, 69, 70,
105, 112, 75, 36, 45, 92, 111, 105]
with self.test_session(use_gpu=True):
image = constant_op.constant(img_np, shape=img_shape)
y = image_ops.resize_images(image, [target_height, target_width],
image_ops.ResizeMethod.BICUBIC)
resized = y.eval()
expected = np.array(expected_data).reshape(
[1, target_height, target_width, 1])
self.assertAllClose(resized, expected, atol=1)
def testResizeDownArea(self):
img_shape = [1, 6, 6, 1]
data = [128, 64, 32, 16, 8, 4,
4, 8, 16, 32, 64, 128,
128, 64, 32, 16, 8, 4,
5, 10, 15, 20, 25, 30,
30, 25, 20, 15, 10, 5,
5, 10, 15, 20, 25, 30]
img_np = np.array(data, dtype=np.uint8).reshape(img_shape)
target_height = 4
target_width = 4
expected_data = [73, 33, 23, 39,
73, 33, 23, 39,
14, 16, 19, 21,
14, 16, 19, 21]
with self.test_session(use_gpu=True):
image = constant_op.constant(img_np, shape=img_shape)
y = image_ops.resize_images(image, [target_height, target_width],
image_ops.ResizeMethod.AREA)
expected = np.array(expected_data).reshape(
[1, target_height, target_width, 1])
resized = y.eval()
self.assertAllClose(resized, expected, atol=1)
def testCompareNearestNeighbor(self):
if test.is_gpu_available():
input_shape = [1, 5, 6, 3]
target_height = 8
target_width = 12
for nptype in [np.float32, np.float64]:
for align_corners in [True, False]:
img_np = np.arange(
0, np.prod(input_shape), dtype=nptype).reshape(input_shape)
with self.test_session(use_gpu=True):
image = constant_op.constant(img_np, shape=input_shape)
new_size = constant_op.constant([target_height, target_width])
out_op = image_ops.resize_images(
image,
new_size,
image_ops.ResizeMethod.NEAREST_NEIGHBOR,
align_corners=align_corners)
gpu_val = out_op.eval()
with self.test_session(use_gpu=False):
image = constant_op.constant(img_np, shape=input_shape)
new_size = constant_op.constant([target_height, target_width])
out_op = image_ops.resize_images(
image,
new_size,
image_ops.ResizeMethod.NEAREST_NEIGHBOR,
align_corners=align_corners)
cpu_val = out_op.eval()
self.assertAllClose(cpu_val, gpu_val, rtol=1e-5, atol=1e-5)
def testCompareBilinear(self):
if test.is_gpu_available():
input_shape = [1, 5, 6, 3]
target_height = 8
target_width = 12
for nptype in [np.float32, np.float64]:
for align_corners in [True, False]:
img_np = np.arange(
0, np.prod(input_shape), dtype=nptype).reshape(input_shape)
value = {}
for use_gpu in [True, False]:
with self.test_session(use_gpu=use_gpu):
image = constant_op.constant(img_np, shape=input_shape)
new_size = constant_op.constant([target_height, target_width])
out_op = image_ops.resize_images(
image,
new_size,
image_ops.ResizeMethod.BILINEAR,
align_corners=align_corners)
value[use_gpu] = out_op.eval()
self.assertAllClose(value[True], value[False], rtol=1e-5, atol=1e-5)
def testShapeInference(self):
self._assertShapeInference([50, 60, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([55, 66, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([59, 69, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([50, 69, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([59, 60, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([None, 60, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([None, 66, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([None, 69, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([50, None, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([55, None, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([59, None, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([None, None, 3], [55, 66], [55, 66, 3])
self._assertShapeInference([50, 60, None], [55, 66], [55, 66, None])
self._assertShapeInference([55, 66, None], [55, 66], [55, 66, None])
self._assertShapeInference([59, 69, None], [55, 66], [55, 66, None])
self._assertShapeInference([50, 69, None], [55, 66], [55, 66, None])
self._assertShapeInference([59, 60, None], [55, 66], [55, 66, None])
self._assertShapeInference([None, None, None], [55, 66], [55, 66, None])
class ResizeImageWithCropOrPadTest(test_util.TensorFlowTestCase):
def _ResizeImageWithCropOrPad(self, x, target_height, target_width,
use_tensor_inputs):
if use_tensor_inputs:
target_height = ops.convert_to_tensor(target_height)
target_width = ops.convert_to_tensor(target_width)
x_tensor = array_ops.placeholder(x.dtype, shape=[None] * x.ndim)
feed_dict = {x_tensor: x}
else:
x_tensor = x
feed_dict = {}
y = image_ops.resize_image_with_crop_or_pad(x_tensor, target_height,
target_width)
if not use_tensor_inputs:
self.assertTrue(y.get_shape().is_fully_defined())
with self.test_session(use_gpu=True):
return y.eval(feed_dict=feed_dict)
def _assertReturns(self,
x,
x_shape,
y,
y_shape,
use_tensor_inputs_options=None):
use_tensor_inputs_options = use_tensor_inputs_options or [False, True]
target_height, target_width, _ = y_shape
x = np.array(x).reshape(x_shape)
y = np.array(y).reshape(y_shape)
for use_tensor_inputs in use_tensor_inputs_options:
y_tf = self._ResizeImageWithCropOrPad(x, target_height, target_width,
use_tensor_inputs)
self.assertAllClose(y, y_tf)
def _assertRaises(self,
x,
x_shape,
target_height,
target_width,
err_msg,
use_tensor_inputs_options=None):
use_tensor_inputs_options = use_tensor_inputs_options or [False, True]
x = np.array(x).reshape(x_shape)
for use_tensor_inputs in use_tensor_inputs_options:
try:
self._ResizeImageWithCropOrPad(x, target_height, target_width,
use_tensor_inputs)
except Exception as e:
if err_msg not in str(e):
raise
else:
raise AssertionError("Exception not raised: %s" % err_msg)
def _assertShapeInference(self, pre_shape, height, width, post_shape):
image = array_ops.placeholder(dtypes.float32, shape=pre_shape)
y = image_ops.resize_image_with_crop_or_pad(image, height, width)
self.assertEqual(y.get_shape().as_list(), post_shape)
def testNoOp(self):
x_shape = [10, 10, 10]
x = np.random.uniform(size=x_shape)
self._assertReturns(x, x_shape, x, x_shape)
def testPad(self):
# Pad even along col.
x = [1, 2, 3, 4,
5, 6, 7, 8]
x_shape = [2, 4, 1]
y = [0, 1, 2, 3, 4, 0,
0, 5, 6, 7, 8, 0]
y_shape = [2, 6, 1]
self._assertReturns(x, x_shape, y, y_shape)
# Pad odd along col.
x = [1, 2, 3, 4,
5, 6, 7, 8]
x_shape = [2, 4, 1]
y = [0, 1, 2, 3, 4, 0, 0,
0, 5, 6, 7, 8, 0, 0]
y_shape = [2, 7, 1]
self._assertReturns(x, x_shape, y, y_shape)
# Pad even along row.
x = [1, 2, 3, 4,
5, 6, 7, 8]
x_shape = [2, 4, 1]
y = [0, 0, 0, 0,
1, 2, 3, 4,
5, 6, 7, 8,
0, 0, 0, 0]
y_shape = [4, 4, 1]
self._assertReturns(x, x_shape, y, y_shape)
# Pad odd along row.
x = [1, 2, 3, 4,
5, 6, 7, 8]
x_shape = [2, 4, 1]
y = [0, 0, 0, 0,
1, 2, 3, 4,
5, 6, 7, 8,
0, 0, 0, 0,
0, 0, 0, 0]
y_shape = [5, 4, 1]
self._assertReturns(x, x_shape, y, y_shape)
def testCrop(self):
# Crop even along col.
x = [1, 2, 3, 4,
5, 6, 7, 8]
x_shape = [2, 4, 1]
y = [2, 3,
6, 7]
y_shape = [2, 2, 1]
self._assertReturns(x, x_shape, y, y_shape)
# Crop odd along col.
x = [1, 2, 3, 4, 5, 6,
7, 8, 9, 10, 11, 12]
x_shape = [2, 6, 1]
y = [2, 3, 4,
8, 9, 10]
y_shape = [2, 3, 1]
self._assertReturns(x, x_shape, y, y_shape)
# Crop even along row.
x = [1, 2,
3, 4,
5, 6,
7, 8]
x_shape = [4, 2, 1]
y = [3, 4,
5, 6]
y_shape = [2, 2, 1]
self._assertReturns(x, x_shape, y, y_shape)
# Crop odd along row.
x = [1, 2,
3, 4,
5, 6,
7, 8,
9, 10,
11, 12,
13, 14,
15, 16]
x_shape = [8, 2, 1]
y = [3, 4,
5, 6,
7, 8,
9, 10,
11, 12]
y_shape = [5, 2, 1]
self._assertReturns(x, x_shape, y, y_shape)
def testCropAndPad(self):
# Pad along row but crop along col.
x = [1, 2, 3, 4,
5, 6, 7, 8]
x_shape = [2, 4, 1]
y = [0, 0,
2, 3,
6, 7,
0, 0]
y_shape = [4, 2, 1]
self._assertReturns(x, x_shape, y, y_shape)
# Crop along row but pad along col.
x = [1, 2,
3, 4,
5, 6,
7, 8]
x_shape = [4, 2, 1]
y = [0, 3, 4, 0,
0, 5, 6, 0]
y_shape = [2, 4, 1]
self._assertReturns(x, x_shape, y, y_shape)
def testShapeInference(self):
self._assertShapeInference([50, 60, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([55, 66, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([59, 69, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([50, 69, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([59, 60, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([None, 60, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([None, 66, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([None, 69, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([50, None, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([55, None, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([59, None, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([None, None, 3], 55, 66, [55, 66, 3])
self._assertShapeInference([50, 60, None], 55, 66, [55, 66, None])
self._assertShapeInference([55, 66, None], 55, 66, [55, 66, None])
self._assertShapeInference([59, 69, None], 55, 66, [55, 66, None])
self._assertShapeInference([50, 69, None], 55, 66, [55, 66, None])
self._assertShapeInference([59, 60, None], 55, 66, [55, 66, None])
self._assertShapeInference([None, None, None], 55, 66, [55, 66, None])
self._assertShapeInference(None, 55, 66, [55, 66, None])
def testNon3DInput(self):
# Input image is not 3D
x = [0] * 15
target_height, target_width = [4, 4]
for x_shape in ([3, 5],):
self._assertRaises(x, x_shape, target_height, target_width,
"'image' must have either 3 or 4 dimensions.")
for x_shape in ([1, 3, 5, 1, 1],):
self._assertRaises(x, x_shape, target_height, target_width,
"'image' must have either 3 or 4 dimensions.")
def testZeroLengthInput(self):
# Input image has 0-length dimension(s).
target_height, target_width = [1, 1]
x = []
for x_shape in ([0, 2, 2], [2, 0, 2], [2, 2, 0]):
self._assertRaises(
x,
x_shape,
target_height,
target_width,
"all dims of 'image.shape' must be > 0",
use_tensor_inputs_options=[False])
# The orignal error message does not contain back slashes. However, they
# are added by either the assert op or the runtime. If this behaviour
# changes in the future, the match string will also needs to be changed.
self._assertRaises(
x,
x_shape,
target_height,
target_width,
"all dims of \\'image.shape\\' must be > 0",
use_tensor_inputs_options=[True])
def testBadParams(self):
x_shape = [4, 4, 1]
x = np.zeros(x_shape)
# target_height <= 0
target_height, target_width = [0, 5]
self._assertRaises(x, x_shape, target_height, target_width,
"target_height must be > 0")
# target_width <= 0
target_height, target_width = [5, 0]
self._assertRaises(x, x_shape, target_height, target_width,
"target_width must be > 0")
def _SimpleColorRamp():
"""Build a simple color ramp RGB image."""
w, h = 256, 200
i = np.arange(h)[:, None]
j = np.arange(w)
image = np.empty((h, w, 3), dtype=np.uint8)
image[:, :, 0] = i
image[:, :, 1] = j
image[:, :, 2] = (i + j) >> 1
return image
class JpegTest(test_util.TensorFlowTestCase):
# TODO(irving): Add self.assertAverageLess or similar to test_util
def averageError(self, image0, image1):
self.assertEqual(image0.shape, image1.shape)
image0 = image0.astype(int) # Avoid overflow
return np.abs(image0 - image1).sum() / np.prod(image0.shape)
def testExisting(self):
# Read a real jpeg and verify shape
path = ("tensorflow/core/lib/jpeg/testdata/"
"jpeg_merge_test1.jpg")
with self.test_session(use_gpu=True) as sess:
jpeg0 = io_ops.read_file(path)
image0 = image_ops.decode_jpeg(jpeg0)
image1 = image_ops.decode_jpeg(image_ops.encode_jpeg(image0))
jpeg0, image0, image1 = sess.run([jpeg0, image0, image1])
self.assertEqual(len(jpeg0), 3771)
self.assertEqual(image0.shape, (256, 128, 3))
self.assertLess(self.averageError(image0, image1), 1.4)
def testCmyk(self):
# Confirm that CMYK reads in as RGB
base = "tensorflow/core/lib/jpeg/testdata"
rgb_path = os.path.join(base, "jpeg_merge_test1.jpg")
cmyk_path = os.path.join(base, "jpeg_merge_test1_cmyk.jpg")
shape = 256, 128, 3
for channels in 3, 0:
with self.test_session(use_gpu=True) as sess:
rgb = image_ops.decode_jpeg(
io_ops.read_file(rgb_path), channels=channels)
cmyk = image_ops.decode_jpeg(
io_ops.read_file(cmyk_path), channels=channels)
rgb, cmyk = sess.run([rgb, cmyk])
self.assertEqual(rgb.shape, shape)
self.assertEqual(cmyk.shape, shape)
error = self.averageError(rgb, cmyk)
self.assertLess(error, 4)
def testSynthetic(self):
with self.test_session(use_gpu=True) as sess:
# Encode it, then decode it, then encode it
image0 = constant_op.constant(_SimpleColorRamp())
jpeg0 = image_ops.encode_jpeg(image0)
image1 = image_ops.decode_jpeg(jpeg0, dct_method="INTEGER_ACCURATE")
image2 = image_ops.decode_jpeg(
image_ops.encode_jpeg(image1), dct_method="INTEGER_ACCURATE")
jpeg0, image0, image1, image2 = sess.run([jpeg0, image0, image1, image2])
# The decoded-encoded image should be similar to the input
self.assertLess(self.averageError(image0, image1), 0.6)
# We should be very close to a fixpoint
self.assertLess(self.averageError(image1, image2), 0.02)
# Smooth ramps compress well (input size is 153600)
self.assertGreaterEqual(len(jpeg0), 5000)
self.assertLessEqual(len(jpeg0), 6000)
def testSyntheticFasterAlgorithm(self):
with self.test_session(use_gpu=True) as sess:
# Encode it, then decode it, then encode it
image0 = constant_op.constant(_SimpleColorRamp())
jpeg0 = image_ops.encode_jpeg(image0)
image1 = image_ops.decode_jpeg(jpeg0, dct_method="INTEGER_FAST")
image2 = image_ops.decode_jpeg(
image_ops.encode_jpeg(image1), dct_method="INTEGER_FAST")
jpeg0, image0, image1, image2 = sess.run([jpeg0, image0, image1, image2])
# The decoded-encoded image should be similar to the input, but
# note this is worse than the slower algorithm because it is
# less accurate.
self.assertLess(self.averageError(image0, image1), 0.95)
# Repeated compression / decompression will have a higher error
# with a lossier algorithm.
self.assertLess(self.averageError(image1, image2), 1.05)
# Smooth ramps compress well (input size is 153600)
self.assertGreaterEqual(len(jpeg0), 5000)
self.assertLessEqual(len(jpeg0), 6000)
def testDefaultDCTMethodIsIntegerFast(self):
with self.test_session(use_gpu=True) as sess:
# Compare decoding with both dct_option=INTEGER_FAST and
# default. They should be the same.
image0 = constant_op.constant(_SimpleColorRamp())
jpeg0 = image_ops.encode_jpeg(image0)
image1 = image_ops.decode_jpeg(jpeg0, dct_method="INTEGER_FAST")
image2 = image_ops.decode_jpeg(jpeg0)
image1, image2 = sess.run([image1, image2])
# The images should be the same.
self.assertAllClose(image1, image2)
def testShape(self):
with self.test_session(use_gpu=True) as sess:
jpeg = constant_op.constant("nonsense")
for channels in 0, 1, 3:
image = image_ops.decode_jpeg(jpeg, channels=channels)
self.assertEqual(image.get_shape().as_list(),
[None, None, channels or None])
class PngTest(test_util.TensorFlowTestCase):
def testExisting(self):
# Read some real PNGs, converting to different channel numbers
prefix = "tensorflow/core/lib/png/testdata/"
inputs = (1, "lena_gray.png"), (4, "lena_rgba.png")
for channels_in, filename in inputs:
for channels in 0, 1, 3, 4:
with self.test_session(use_gpu=True) as sess:
png0 = io_ops.read_file(prefix + filename)
image0 = image_ops.decode_png(png0, channels=channels)
png0, image0 = sess.run([png0, image0])
self.assertEqual(image0.shape, (26, 51, channels or channels_in))
if channels == channels_in:
image1 = image_ops.decode_png(image_ops.encode_png(image0))
self.assertAllEqual(image0, image1.eval())
def testSynthetic(self):
with self.test_session(use_gpu=True) as sess:
# Encode it, then decode it
image0 = constant_op.constant(_SimpleColorRamp())
png0 = image_ops.encode_png(image0, compression=7)
image1 = image_ops.decode_png(png0)
png0, image0, image1 = sess.run([png0, image0, image1])
# PNG is lossless
self.assertAllEqual(image0, image1)
# Smooth ramps compress well, but not too well
self.assertGreaterEqual(len(png0), 400)
self.assertLessEqual(len(png0), 750)
def testSyntheticUint16(self):
with self.test_session(use_gpu=True) as sess:
# Encode it, then decode it
image0 = constant_op.constant(_SimpleColorRamp(), dtype=dtypes.uint16)
png0 = image_ops.encode_png(image0, compression=7)
image1 = image_ops.decode_png(png0, dtype=dtypes.uint16)
png0, image0, image1 = sess.run([png0, image0, image1])
# PNG is lossless
self.assertAllEqual(image0, image1)
# Smooth ramps compress well, but not too well
self.assertGreaterEqual(len(png0), 800)
self.assertLessEqual(len(png0), 1500)
def testSyntheticTwoChannel(self):
with self.test_session(use_gpu=True) as sess:
# Strip the b channel from an rgb image to get a two-channel image.
gray_alpha = _SimpleColorRamp()[:, :, 0:2]
image0 = constant_op.constant(gray_alpha)
png0 = image_ops.encode_png(image0, compression=7)
image1 = image_ops.decode_png(png0)
png0, image0, image1 = sess.run([png0, image0, image1])
self.assertEqual(2, image0.shape[-1])
self.assertAllEqual(image0, image1)
def testSyntheticTwoChannelUint16(self):
with self.test_session(use_gpu=True) as sess:
# Strip the b channel from an rgb image to get a two-channel image.
gray_alpha = _SimpleColorRamp()[:, :, 0:2]
image0 = constant_op.constant(gray_alpha, dtype=dtypes.uint16)
png0 = image_ops.encode_png(image0, compression=7)
image1 = image_ops.decode_png(png0, dtype=dtypes.uint16)
png0, image0, image1 = sess.run([png0, image0, image1])
self.assertEqual(2, image0.shape[-1])
self.assertAllEqual(image0, image1)
def testShape(self):
with self.test_session(use_gpu=True):
png = constant_op.constant("nonsense")
for channels in 0, 1, 3:
image = image_ops.decode_png(png, channels=channels)
self.assertEqual(image.get_shape().as_list(),
[None, None, channels or None])
class GifTest(test_util.TensorFlowTestCase):
def testValid(self):
# Read some real GIFs
prefix = "tensorflow/core/lib/gif/testdata/"
filename = "scan.gif"
WIDTH = 20
HEIGHT = 40
STRIDE = 5
shape = (12, HEIGHT, WIDTH, 3)
with self.test_session(use_gpu=True) as sess:
gif0 = io_ops.read_file(prefix + filename)
image0 = image_ops.decode_gif(gif0)
gif0, image0 = sess.run([gif0, image0])
self.assertEqual(image0.shape, shape)
for frame_idx, frame in enumerate(image0):
gt = np.zeros(shape[1:], dtype=np.uint8)
start = frame_idx * STRIDE
end = (frame_idx + 1) * STRIDE
print(frame_idx)
if end <= WIDTH:
gt[:, start:end, :] = 255
else:
start -= WIDTH
end -= WIDTH
gt[start:end, :, :] = 255
self.assertAllClose(frame, gt)
def testInValid(self):
# Read some real GIFs
prefix = "tensorflow/core/lib/gif/testdata/"
filename = "optimized.gif"
with self.test_session(use_gpu=True) as sess:
gif0 = io_ops.read_file(prefix + filename)
image0 = image_ops.decode_gif(gif0)
with self.assertRaises(errors.InvalidArgumentError):
gif0, image0 = sess.run([gif0, image0])
def testShape(self):
with self.test_session(use_gpu=True) as sess:
gif = constant_op.constant("nonsense")
image = image_ops.decode_gif(gif)
self.assertEqual(image.get_shape().as_list(), [None, None, None, 3])
class ConvertImageTest(test_util.TensorFlowTestCase):
def _convert(self, original, original_dtype, output_dtype, expected):
x_np = np.array(original, dtype=original_dtype.as_numpy_dtype())
y_np = np.array(expected, dtype=output_dtype.as_numpy_dtype())
with self.test_session(use_gpu=True):
image = constant_op.constant(x_np)
y = image_ops.convert_image_dtype(image, output_dtype)
self.assertTrue(y.dtype == output_dtype)
self.assertAllClose(y.eval(), y_np, atol=1e-5)
def testNoConvert(self):
# Make sure converting to the same data type creates only an identity op
with self.test_session(use_gpu=True):
image = constant_op.constant([1], dtype=dtypes.uint8)
image_ops.convert_image_dtype(image, dtypes.uint8)
y = image_ops.convert_image_dtype(image, dtypes.uint8)
self.assertEquals(y.op.type, "Identity")
self.assertEquals(y.op.inputs[0], image)
def testConvertBetweenInteger(self):
# Make sure converting to between integer types scales appropriately
with self.test_session(use_gpu=True):
self._convert([0, 255], dtypes.uint8, dtypes.int16, [0, 255 * 128])
self._convert([0, 32767], dtypes.int16, dtypes.uint8, [0, 255])
def testConvertBetweenFloat(self):
# Make sure converting to between float types does nothing interesting
with self.test_session(use_gpu=True):
self._convert([-1.0, 0, 1.0, 200000], dtypes.float32, dtypes.float64,
[-1.0, 0, 1.0, 200000])
self._convert([-1.0, 0, 1.0, 200000], dtypes.float64, dtypes.float32,
[-1.0, 0, 1.0, 200000])
def testConvertBetweenIntegerAndFloat(self):
# Make sure converting from and to a float type scales appropriately
with self.test_session(use_gpu=True):
self._convert([0, 1, 255], dtypes.uint8, dtypes.float32,
[0, 1.0 / 255.0, 1])
self._convert([0, 1.1 / 255.0, 1], dtypes.float32, dtypes.uint8,
[0, 1, 255])
def testConvertBetweenInt16AndInt8(self):
with self.test_session(use_gpu=True):
# uint8, uint16
self._convert([0, 255 * 256], dtypes.uint16, dtypes.uint8,
[0, 255])
self._convert([0, 255], dtypes.uint8, dtypes.uint16,
[0, 255 * 256])
# int8, uint16
self._convert([0, 127 * 2 * 256], dtypes.uint16, dtypes.int8,
[0, 127])
self._convert([0, 127], dtypes.int8, dtypes.uint16,
[0, 127 * 2 * 256])
# int16, uint16
self._convert([0, 255 * 256], dtypes.uint16, dtypes.int16,
[0, 255 * 128])
self._convert([0, 255 * 128], dtypes.int16, dtypes.uint16,
[0, 255 * 256])
class TotalVariationTest(test_util.TensorFlowTestCase):
"""Tests the function total_variation() in image_ops.
We test a few small handmade examples, as well as
some larger examples using an equivalent numpy
implementation of the total_variation() function.
We do NOT test for overflows and invalid / edge-case arguments.
"""
def _test(self, x_np, y_np):
"""Test that the TensorFlow implementation of
total_variation(x_np) calculates the values in y_np.
Note that these may be float-numbers so we only test
for approximate equality within some narrow error-bound.
"""
# Create a TensorFlow session.
with self.test_session(use_gpu=True):
# Add a constant to the TensorFlow graph that holds the input.
x_tf = constant_op.constant(x_np, shape=x_np.shape)
# Add ops for calculating the total variation using TensorFlow.
y = image_ops.total_variation(images=x_tf)
# Run the TensorFlow session to calculate the result.
y_tf = y.eval()
# Assert that the results are as expected within
# some small error-bound in case they are float-values.
self.assertAllClose(y_tf, y_np)
def _total_variation_np(self, x_np):
"""Calculate the total variation of x_np using numpy.
This implements the same function as TensorFlow but
using numpy instead.
Args:
x_np: Numpy array with 3 or 4 dimensions.
"""
dim = len(x_np.shape)
if dim == 3:
# Calculate differences for neighboring pixel-values using slices.
dif1 = x_np[1:, :, :] - x_np[:-1, :, :]
dif2 = x_np[:, 1:, :] - x_np[:, :-1, :]
# Sum for all axis.
sum_axis = None
elif dim == 4:
# Calculate differences for neighboring pixel-values using slices.
dif1 = x_np[:, 1:, :, :] - x_np[:, :-1, :, :]
dif2 = x_np[:, :, 1:, :] - x_np[:, :, :-1, :]
# Only sum for the last 3 axis.
sum_axis = (1, 2, 3)
else:
# This should not occur in this test-code.
pass
tot_var = np.sum(np.abs(dif1), axis=sum_axis) + \
np.sum(np.abs(dif2), axis=sum_axis)
return tot_var
def _test_tensorflow_vs_numpy(self, x_np):
"""Test the TensorFlow implementation against a numpy implementation.
Args:
x_np: Numpy array with 3 or 4 dimensions.
"""
# Calculate the y-values using the numpy implementation.
y_np = self._total_variation_np(x_np)
self._test(x_np, y_np)
def _generateArray(self, shape):
"""Generate an array of the given shape for use in testing.
The numbers are calculated as the cumulative sum, which
causes the difference between neighboring numbers to vary."""
# Flattened length of the array.
flat_len = np.prod(shape)
a = np.array(range(flat_len), dtype=int)
a = np.cumsum(a)
a = a.reshape(shape)
return a
def testTotalVariationNumpy(self):
"""Test the TensorFlow implementation against a numpy implementation.
The two implementations are very similar so it is possible that both
have the same bug, which would not be detected by this test. It is
therefore necessary to test with manually crafted data as well."""
# Generate a test-array.
# This is an 'image' with 100x80 pixels and 3 color channels.
a = self._generateArray(shape=(100, 80, 3))
# Test the TensorFlow implementation vs. numpy implementation.
# We use a numpy implementation to check the results that are
# calculated using TensorFlow are correct.
self._test_tensorflow_vs_numpy(a)
self._test_tensorflow_vs_numpy(a + 1)
self._test_tensorflow_vs_numpy(-a)
self._test_tensorflow_vs_numpy(1.1 * a)
# Expand to a 4-dim array.
b = a[np.newaxis, :]
# Combine several variations of the image into a single 4-dim array.
multi = np.vstack((b, b + 1, -b, 1.1 * b))
# Test that the TensorFlow function can also handle 4-dim arrays.
self._test_tensorflow_vs_numpy(multi)
def testTotalVariationHandmade(self):
"""Test the total variation for a few handmade examples."""
# We create an image that is 2x2 pixels with 3 color channels.
# The image is very small so we can check the result by hand.
# Red color channel.
# The following are the sum of absolute differences between the pixels.
# sum row dif = (4-1) + (7-2) = 3 + 5 = 8
# sum col dif = (2-1) + (7-4) = 1 + 3 = 4
r = [[1, 2],
[4, 7]]
# Blue color channel.
# sum row dif = 18 + 29 = 47
# sum col dif = 7 + 18 = 25
g = [[11, 18],
[29, 47]]
# Green color channel.
# sum row dif = 120 + 193 = 313
# sum col dif = 47 + 120 = 167
b = [[73, 120],
[193, 313]]
# Combine the 3 color channels into a single 3-dim array.
# The shape is (2, 2, 3) corresponding to (height, width and color).
a = np.dstack((r, g, b))
# Total variation for this image.
# Sum of all pixel differences = 8 + 4 + 47 + 25 + 313 + 167 = 564
tot_var = 564
# Calculate the total variation using TensorFlow and assert it is correct.
self._test(a, tot_var)
# If we add 1 to all pixel-values then the total variation is unchanged.
self._test(a + 1, tot_var)
# If we negate all pixel-values then the total variation is unchanged.
self._test(-a, tot_var)
# Scale the pixel-values by a float. This scales the total variation as well.
b = 1.1 * a
self._test(b, 1.1 * tot_var)
# Scale by another float.
c = 1.2 * a
self._test(c, 1.2 * tot_var)
# Combine these 3 images into a single array of shape (3, 2, 2, 3)
# where the first dimension is for the image-number.
multi = np.vstack((a[np.newaxis, :],
b[np.newaxis, :],
c[np.newaxis, :]))
# Check that TensorFlow correctly calculates the total variation
# for each image individually and returns the correct array.
self._test(multi, tot_var * np.array([1.0, 1.1, 1.2]))
if __name__ == "__main__":
googletest.main()
|
the-stack_0_24428 | import numpy as np
from slick_dnn.autograd import Autograd
class MSELoss(Autograd):
def forward(self, ctx, truth, predictions):
if truth.shape != predictions.shape:
raise ValueError("Wrong shapes")
ctx.save_for_back(truth, predictions)
return ((truth - predictions) ** 2).mean()
def backward(self, ctx, grad):
truth, predictions = ctx.data_for_back
num_batches = truth.shape[0]
return (grad * 2 / num_batches * (truth - predictions),
grad * 2 / num_batches * (predictions - truth))
class CrossEntropyLoss(Autograd):
def forward(self, ctx, truth, predictions):
ctx.save_for_back(truth, predictions)
predictions = np.clip(predictions, 1e-15, 1 - 1e-15)
return - truth * np.log(predictions) - (1 - truth) * np.log(1 - predictions)
def backward(self, ctx, grad):
truth, predictions = ctx.data_for_back
num_batches = truth.shape[0]
p = predictions
p = np.clip(p, 1e-15, 1 - 1e-15)
return (grad * (np.log(1 - p) - np.log(p)) / num_batches,
grad * (- (truth / p) + (1 - truth) / (1 - p)) / num_batches)
|
the-stack_0_24429 | # (C) Datadog, Inc. 2018-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
from codecs import open # To use a consistent encoding
from os import path
from setuptools import setup
HERE = path.abspath(path.dirname(__file__))
# Get version info
ABOUT = {}
with open(path.join(HERE, "datadog_checks", "linkerd", "__about__.py")) as f:
exec(f.read(), ABOUT)
# Get the long description from the README file
with open(path.join(HERE, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
CHECKS_BASE_REQ = 'datadog_checks_base'
setup(
name='datadog-linkerd',
version=ABOUT["__version__"],
description='The Linkerd check',
long_description=long_description,
long_description_content_type='text/markdown',
keywords='datadog agent linkerd check',
# The project's main homepage.
url='https://github.com/DataDog/integrations-core',
# Author details
author='Datadog',
author_email='[email protected]',
# License
license='BSD',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Topic :: System :: Monitoring',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
],
# The package we're going to ship
packages=['datadog_checks.linkerd'],
# Run-time dependencies
install_requires=[CHECKS_BASE_REQ],
# Extra files to ship with the wheel package
include_package_data=True,
)
|
the-stack_0_24430 | from __future__ import absolute_import
class AverageMeter(object):
def __init__(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count |
the-stack_0_24431 | #
# Copyright 2014-2015 eNovance
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from collections import defaultdict
import hashlib
import itertools
import json
import operator
import pkg_resources
import threading
import uuid
from gnocchiclient import exceptions as gnocchi_exc
from keystoneauth1 import exceptions as ka_exceptions
from oslo_log import log
from oslo_utils import fnmatch
from oslo_utils import timeutils
import six
import six.moves.urllib.parse as urlparse
from stevedore import extension
from ceilometer import declarative
from ceilometer import gnocchi_client
from ceilometer.i18n import _
from ceilometer import keystone_client
from ceilometer import publisher
NAME_ENCODED = __name__.encode('utf-8')
CACHE_NAMESPACE = uuid.UUID(bytes=hashlib.md5(NAME_ENCODED).digest())
LOG = log.getLogger(__name__)
def cache_key_mangler(key):
"""Construct an opaque cache key."""
if six.PY2:
key = key.encode('utf-8')
return uuid.uuid5(CACHE_NAMESPACE, key).hex
EVENT_CREATE, EVENT_UPDATE, EVENT_DELETE = ("create", "update", "delete")
class ResourcesDefinition(object):
MANDATORY_FIELDS = {'resource_type': six.string_types,
'metrics': (dict, list)}
MANDATORY_EVENT_FIELDS = {'id': six.string_types}
def __init__(self, definition_cfg, archive_policy_default,
archive_policy_override, plugin_manager):
self.cfg = definition_cfg
self._check_required_and_types(self.MANDATORY_FIELDS, self.cfg)
if self.support_events():
self._check_required_and_types(self.MANDATORY_EVENT_FIELDS,
self.cfg['event_attributes'])
self._attributes = {}
for name, attr_cfg in self.cfg.get('attributes', {}).items():
self._attributes[name] = declarative.Definition(name, attr_cfg,
plugin_manager)
self._event_attributes = {}
for name, attr_cfg in self.cfg.get('event_attributes', {}).items():
self._event_attributes[name] = declarative.Definition(
name, attr_cfg, plugin_manager)
self.metrics = {}
# NOTE(sileht): Convert old list to new dict format
if isinstance(self.cfg['metrics'], list):
values = [None] * len(self.cfg['metrics'])
self.cfg['metrics'] = dict(zip(self.cfg['metrics'], values))
for m, extra in self.cfg['metrics'].items():
if not extra:
extra = {}
if not extra.get("archive_policy_name"):
extra["archive_policy_name"] = archive_policy_default
if archive_policy_override:
extra["archive_policy_name"] = archive_policy_override
# NOTE(sileht): For backward compat, this is after the override to
# preserve the wierd previous behavior. We don't really care as we
# deprecate it.
if 'archive_policy' in self.cfg:
LOG.warning("archive_policy '%s' for a resource-type (%s) is "
"deprecated, set it for each metric instead.",
self.cfg["archive_policy"],
self.cfg["resource_type"])
extra["archive_policy_name"] = self.cfg['archive_policy']
self.metrics[m] = extra
@staticmethod
def _check_required_and_types(expected, definition):
for field, field_types in expected.items():
if field not in definition:
raise declarative.ResourceDefinitionException(
_("Required field %s not specified") % field, definition)
if not isinstance(definition[field], field_types):
raise declarative.ResourceDefinitionException(
_("Required field %(field)s should be a %(type)s") %
{'field': field, 'type': field_types}, definition)
@staticmethod
def _ensure_list(value):
if isinstance(value, list):
return value
return [value]
def support_events(self):
for e in ["event_create", "event_delete", "event_update"]:
if e in self.cfg:
return True
return False
def event_match(self, event_type):
for e in self._ensure_list(self.cfg.get('event_create', [])):
if fnmatch.fnmatch(event_type, e):
return EVENT_CREATE
for e in self._ensure_list(self.cfg.get('event_delete', [])):
if fnmatch.fnmatch(event_type, e):
return EVENT_DELETE
for e in self._ensure_list(self.cfg.get('event_update', [])):
if fnmatch.fnmatch(event_type, e):
return EVENT_UPDATE
def sample_attributes(self, sample):
attrs = {}
sample_dict = sample.as_dict()
for name, definition in self._attributes.items():
value = definition.parse(sample_dict)
if value is not None:
attrs[name] = value
return attrs
def event_attributes(self, event):
attrs = {'type': self.cfg['resource_type']}
traits = dict([(trait.name, trait.value) for trait in event.traits])
for attr, field in self.cfg.get('event_attributes', {}).items():
value = traits.get(field)
if value is not None:
attrs[attr] = value
return attrs
class LockedDefaultDict(defaultdict):
"""defaultdict with lock to handle threading
Dictionary only deletes if nothing is accessing dict and nothing is holding
lock to be deleted. If both cases are not true, it will skip delete.
"""
def __init__(self, *args, **kwargs):
self.lock = threading.Lock()
super(LockedDefaultDict, self).__init__(*args, **kwargs)
def __getitem__(self, key):
with self.lock:
return super(LockedDefaultDict, self).__getitem__(key)
def pop(self, key, *args):
with self.lock:
key_lock = super(LockedDefaultDict, self).__getitem__(key)
if key_lock.acquire(False):
try:
super(LockedDefaultDict, self).pop(key, *args)
finally:
key_lock.release()
class GnocchiPublisher(publisher.ConfigPublisherBase):
"""Publisher class for recording metering data into the Gnocchi service.
The publisher class records each meter into the gnocchi service
configured in Ceilometer pipeline file. An example target may
look like the following:
gnocchi://?archive_policy=low&filter_project=gnocchi
"""
def __init__(self, conf, parsed_url):
super(GnocchiPublisher, self).__init__(conf, parsed_url)
# TODO(jd) allow to override Gnocchi endpoint via the host in the URL
options = urlparse.parse_qs(parsed_url.query)
self.filter_project = options.get('filter_project', ['service'])[-1]
self.filter_domain = options.get('filter_domain', ['Default'])[-1]
resources_definition_file = options.get(
'resources_definition_file', ['gnocchi_resources.yaml'])[-1]
archive_policy_override = options.get('archive_policy', [None])[-1]
self.resources_definition, self.archive_policies_definition = (
self._load_definitions(conf, archive_policy_override,
resources_definition_file))
self.metric_map = dict((metric, rd) for rd in self.resources_definition
for metric in rd.metrics)
timeout = options.get('timeout', [6.05])[-1]
self._ks_client = keystone_client.get_client(conf)
self.cache = None
try:
import oslo_cache
oslo_cache.configure(conf)
# NOTE(cdent): The default cache backend is a real but
# noop backend. We don't want to use that here because
# we want to avoid the cache pathways entirely if the
# cache has not been configured explicitly.
if conf.cache.enabled:
cache_region = oslo_cache.create_region()
self.cache = oslo_cache.configure_cache_region(
conf, cache_region)
self.cache.key_mangler = cache_key_mangler
except ImportError:
pass
except oslo_cache.exception.ConfigurationError as exc:
LOG.warning('unable to configure oslo_cache: %s', exc)
self._gnocchi_project_id = None
self._gnocchi_project_id_lock = threading.Lock()
self._gnocchi_resource_lock = LockedDefaultDict(threading.Lock)
self._gnocchi = gnocchi_client.get_gnocchiclient(
conf, request_timeout=timeout)
self._already_logged_event_types = set()
self._already_logged_metric_names = set()
self._already_configured_archive_policies = False
@staticmethod
def _load_definitions(conf, archive_policy_override,
resources_definition_file):
plugin_manager = extension.ExtensionManager(
namespace='ceilometer.event.trait_plugin')
data = declarative.load_definitions(
conf, {}, resources_definition_file,
pkg_resources.resource_filename(__name__,
"data/gnocchi_resources.yaml"))
archive_policy_default = data.get("archive_policy_default",
"ceilometer-low")
resource_defs = []
for resource in data.get('resources', []):
try:
resource_defs.append(ResourcesDefinition(
resource,
archive_policy_default,
archive_policy_override,
plugin_manager))
except Exception:
LOG.error("Failed to load resource due to error",
exc_info=True)
return resource_defs, data.get("archive_policies", [])
def ensures_archives_policies(self):
if not self._already_configured_archive_policies:
for ap in self.archive_policies_definition:
try:
self._gnocchi.archive_policy.create(ap)
except gnocchi_exc.ArchivePolicyAlreadyExists:
# created in the meantime by another worker
pass
self._already_configured_archive_policies = True
@property
def gnocchi_project_id(self):
if self._gnocchi_project_id is not None:
return self._gnocchi_project_id
with self._gnocchi_project_id_lock:
if self._gnocchi_project_id is None:
try:
project = self._ks_client.projects.find(
name=self.filter_project,
domain=self.filter_domain)
except ka_exceptions.NotFound:
LOG.warning('filtered project not found in keystone,'
' ignoring the filter_project '
'option')
self.filter_project = None
return None
except Exception:
LOG.exception('fail to retrieve filtered project ')
raise
self._gnocchi_project_id = project.id
LOG.debug("filtered project found: %s",
self._gnocchi_project_id)
return self._gnocchi_project_id
def _is_swift_account_sample(self, sample):
try:
return (self.metric_map[sample.name].cfg['resource_type']
== 'swift_account')
except KeyError:
return False
def _is_gnocchi_activity(self, sample):
return (self.filter_project and self.gnocchi_project_id and (
# avoid anything from the user used by gnocchi
sample.project_id == self.gnocchi_project_id or
# avoid anything in the swift account used by gnocchi
(sample.resource_id == self.gnocchi_project_id and
self._is_swift_account_sample(sample))
))
def _get_resource_definition_from_event(self, event_type):
for rd in self.resources_definition:
operation = rd.event_match(event_type)
if operation:
return rd, operation
def publish_samples(self, data):
self.ensures_archives_policies()
# NOTE(sileht): skip sample generated by gnocchi itself
data = [s for s in data if not self._is_gnocchi_activity(s)]
data.sort(key=operator.attrgetter('resource_id'))
resource_grouped_samples = itertools.groupby(
data, key=operator.attrgetter('resource_id'))
gnocchi_data = {}
measures = {}
for resource_id, samples_of_resource in resource_grouped_samples:
# NOTE(sileht): / is forbidden by Gnocchi
resource_id = resource_id.replace('/', '_')
for sample in samples_of_resource:
metric_name = sample.name
rd = self.metric_map.get(metric_name)
if rd is None:
if metric_name not in self._already_logged_metric_names:
LOG.warning("metric %s is not handled by Gnocchi" %
metric_name)
self._already_logged_metric_names.add(metric_name)
continue
if resource_id not in gnocchi_data:
gnocchi_data[resource_id] = {
'resource_type': rd.cfg['resource_type'],
'resource': {"id": resource_id,
"user_id": sample.user_id,
"project_id": sample.project_id}}
gnocchi_data[resource_id].setdefault(
"resource_extra", {}).update(rd.sample_attributes(sample))
measures.setdefault(resource_id, {}).setdefault(
metric_name,
{"measures": [],
"archive_policy_name":
rd.metrics[metric_name]["archive_policy_name"],
"unit": sample.unit}
)["measures"].append(
{'timestamp': sample.timestamp,
'value': sample.volume}
)
try:
self.batch_measures(measures, gnocchi_data)
except gnocchi_exc.ClientException as e:
LOG.error(six.text_type(e))
except Exception as e:
LOG.error(six.text_type(e), exc_info=True)
for info in gnocchi_data.values():
resource = info["resource"]
resource_type = info["resource_type"]
resource_extra = info["resource_extra"]
if not resource_extra:
continue
try:
self._if_not_cached(resource_type, resource['id'],
resource_extra)
except gnocchi_exc.ClientException as e:
LOG.error(six.text_type(e))
except Exception as e:
LOG.error(six.text_type(e), exc_info=True)
@staticmethod
def _extract_resources_from_error(e, resource_infos):
resource_ids = set([r['original_resource_id']
for r in e.message['detail']])
return [(resource_infos[rid]['resource_type'],
resource_infos[rid]['resource'],
resource_infos[rid]['resource_extra'])
for rid in resource_ids]
def batch_measures(self, measures, resource_infos):
# NOTE(sileht): We don't care about error here, we want
# resources metadata always been updated
try:
self._gnocchi.metric.batch_resources_metrics_measures(
measures, create_metrics=True)
except gnocchi_exc.BadRequest as e:
if not isinstance(e.message, dict):
raise
if e.message.get('cause') != 'Unknown resources':
raise
resources = self._extract_resources_from_error(e, resource_infos)
for resource_type, resource, resource_extra in resources:
try:
resource.update(resource_extra)
self._create_resource(resource_type, resource)
except gnocchi_exc.ResourceAlreadyExists:
# NOTE(sileht): resource created in the meantime
pass
except gnocchi_exc.ClientException as e:
LOG.error('Error creating resource %(id)s: %(err)s',
{'id': resource['id'], 'err': six.text_type(e)})
# We cannot post measures for this resource
# and we can't patch it later
del measures[resource['id']]
del resource_infos[resource['id']]
else:
if self.cache and resource_extra:
self.cache.set(resource['id'],
self._hash_resource(resource_extra))
# NOTE(sileht): we have created missing resources/metrics,
# now retry to post measures
self._gnocchi.metric.batch_resources_metrics_measures(
measures, create_metrics=True)
LOG.debug(
"%d measures posted against %d metrics through %d resources",
sum(len(m["measures"])
for rid in measures
for m in measures[rid].values()),
sum(len(m) for m in measures.values()), len(resource_infos))
def _create_resource(self, resource_type, resource):
self._gnocchi.resource.create(resource_type, resource)
LOG.debug('Resource %s created', resource["id"])
def _update_resource(self, resource_type, res_id, resource_extra):
self._gnocchi.resource.update(resource_type, res_id, resource_extra)
LOG.debug('Resource %s updated', res_id)
def _if_not_cached(self, resource_type, res_id, resource_extra):
if self.cache:
attribute_hash = self._hash_resource(resource_extra)
if self._resource_cache_diff(res_id, attribute_hash):
with self._gnocchi_resource_lock[res_id]:
# NOTE(luogangyi): there is a possibility that the
# resource was already built in cache by another
# ceilometer-notification-agent when we get the lock here.
if self._resource_cache_diff(res_id, attribute_hash):
self._update_resource(resource_type, res_id,
resource_extra)
self.cache.set(res_id, attribute_hash)
else:
LOG.debug('Resource cache hit for %s', res_id)
self._gnocchi_resource_lock.pop(res_id, None)
else:
LOG.debug('Resource cache hit for %s', res_id)
else:
self._update_resource(resource_type, res_id, resource_extra)
@staticmethod
def _hash_resource(resource):
return hash(tuple(i for i in resource.items() if i[0] != 'metrics'))
def _resource_cache_diff(self, key, attribute_hash):
cached_hash = self.cache.get(key)
return not cached_hash or cached_hash != attribute_hash
def publish_events(self, events):
for event in events:
rd = self._get_resource_definition_from_event(event.event_type)
if not rd:
if event.event_type not in self._already_logged_event_types:
LOG.debug("No gnocchi definition for event type: %s",
event.event_type)
self._already_logged_event_types.add(event.event_type)
continue
rd, operation = rd
if operation == EVENT_DELETE:
self._delete_event(rd, event)
def _delete_event(self, rd, event):
ended_at = timeutils.utcnow().isoformat()
resource = rd.event_attributes(event)
associated_resources = rd.cfg.get('event_associated_resources', {})
if associated_resources:
to_end = itertools.chain([resource], *[
self._search_resource(resource_type, query % resource['id'])
for resource_type, query in associated_resources.items()
])
else:
to_end = [resource]
for resource in to_end:
self._set_ended_at(resource, ended_at)
def _search_resource(self, resource_type, query):
try:
return self._gnocchi.resource.search(
resource_type, json.loads(query))
except Exception:
LOG.error("Fail to search resource type %{resource_type}s "
"with '%{query}s'",
{'resource_type': resource_type, 'query': query},
exc_info=True)
return []
def _set_ended_at(self, resource, ended_at):
try:
self._gnocchi.resource.update(resource['type'], resource['id'],
{'ended_at': ended_at})
except gnocchi_exc.ResourceNotFound:
LOG.debug("Delete event received on unexisting resource (%s), "
"ignore it.", resource['id'])
except Exception:
LOG.error("Fail to update the resource %s", resource,
exc_info=True)
LOG.debug('Resource %s ended at %s' % (resource["id"], ended_at))
|
the-stack_0_24433 | # Copyright (c) 2016 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Unit Tests for oslo.log formatter"""
import mock
import logging
import sys
from oslo_config import cfg
from oslo_config import fixture as config_fixture
from oslo_context import context
from oslo_log import formatters
from oslo_log import log
from oslotest import base as test_base
def _fake_context():
ctxt = context.RequestContext(user="user",
tenant="tenant",
project_domain="pdomain",
user_domain="udomain",
overwrite=True)
return ctxt
class AlternativeRequestContext(object):
def __init__(self, user=None, tenant=None):
self.user = user
self.tenant = tenant
def to_dict(self):
return {'user': self.user,
'tenant': self.tenant}
class FormatterTest(test_base.BaseTestCase):
def setUp(self):
super(FormatterTest, self).setUp()
def test_replace_false_value_exists(self):
d = {"user": "user1"}
s = "%(user)s" % formatters._ReplaceFalseValue(d)
self.assertEqual(d['user'], s)
def test_replace_false_value_not_exists(self):
d = {"user": "user1"}
s = "%(project)s" % formatters._ReplaceFalseValue(d)
self.assertEqual("-", s)
def test_dictify_context_empty(self):
self.assertEqual({}, formatters._dictify_context(None))
@mock.patch("debtcollector.deprecate")
def test_dictify_context_with_dict(self, mock_deprecate):
d = {"user": "user"}
self.assertEqual(d, formatters._dictify_context(d))
mock_deprecate.assert_not_called()
@mock.patch("debtcollector.deprecate")
def test_dictify_context_with_context(self, mock_deprecate):
ctxt = _fake_context()
self.assertEqual(ctxt.get_logging_values(),
formatters._dictify_context(ctxt))
mock_deprecate.assert_not_called()
@mock.patch("debtcollector.deprecate")
def test_dictify_context_without_get_logging_values(self, mock_deprecate):
ctxt = AlternativeRequestContext(user="user", tenant="tenant")
d = {"user": "user", "tenant": "tenant"}
self.assertEqual(d, formatters._dictify_context(ctxt))
mock_deprecate.assert_called_with(
'The RequestContext.get_logging_values() '
'method should be defined for logging context specific '
'information. The to_dict() method is deprecated '
'for oslo.log use.', removal_version='5.0.0', version='3.8.0')
# Test for https://bugs.python.org/issue28603
class FormatUnhashableExceptionTest(test_base.BaseTestCase):
def setUp(self):
super(FormatUnhashableExceptionTest, self).setUp()
self.config_fixture = self.useFixture(
config_fixture.Config(cfg.ConfigOpts()))
self.conf = self.config_fixture.conf
log.register_options(self.conf)
def _unhashable_exception_info(self):
class UnhashableException(Exception):
__hash__ = None
try:
raise UnhashableException()
except UnhashableException:
return sys.exc_info()
def test_error_summary(self):
exc_info = self._unhashable_exception_info()
record = logging.LogRecord('test', logging.ERROR, 'test', 0,
'test message', [], exc_info)
err_summary = formatters._get_error_summary(record)
self.assertTrue(err_summary)
def test_json_format_exception(self):
exc_info = self._unhashable_exception_info()
formatter = formatters.JSONFormatter()
tb = ''.join(formatter.formatException(exc_info))
self.assertTrue(tb)
def test_fluent_format_exception(self):
exc_info = self._unhashable_exception_info()
formatter = formatters.FluentFormatter()
tb = formatter.formatException(exc_info)
self.assertTrue(tb)
def test_context_format_exception_norecord(self):
exc_info = self._unhashable_exception_info()
formatter = formatters.ContextFormatter(config=self.conf)
tb = formatter.formatException(exc_info)
self.assertTrue(tb)
def test_context_format_exception(self):
exc_info = self._unhashable_exception_info()
formatter = formatters.ContextFormatter(config=self.conf)
record = logging.LogRecord('test', logging.ERROR, 'test', 0,
'test message', [], exc_info)
tb = formatter.format(record)
self.assertTrue(tb)
|
the-stack_0_24434 | from ..Qt import QtCore, QtGui, QtOpenGL, QT_LIB
from OpenGL.GL import *
import OpenGL.GL.framebufferobjects as glfbo
import numpy as np
from .. import Vector
from .. import functions as fn
##Vector = QtGui.QVector3D
ShareWidget = None
class GLViewWidget(QtOpenGL.QGLWidget):
"""
Basic widget for displaying 3D data
- Rotation/scale controls
- Axis/grid display
- Export options
High-DPI displays: Qt5 should automatically detect the correct resolution.
For Qt4, specify the ``devicePixelRatio`` argument when initializing the
widget (usually this value is 1-2).
"""
def __init__(self, parent=None, devicePixelRatio=None):
global ShareWidget
if ShareWidget is None:
## create a dummy widget to allow sharing objects (textures, shaders, etc) between views
ShareWidget = QtOpenGL.QGLWidget()
QtOpenGL.QGLWidget.__init__(self, parent, ShareWidget)
self.setFocusPolicy(QtCore.Qt.ClickFocus)
self.opts = {
'center': Vector(0,0,0), ## will always appear at the center of the widget
'distance': 10.0, ## distance of camera from center
'fov': 60, ## horizontal field of view in degrees
'elevation': 30, ## camera's angle of elevation in degrees
'azimuth': 45, ## camera's azimuthal angle in degrees
## (rotation around z-axis 0 points along x-axis)
'viewport': None, ## glViewport params; None == whole widget
'devicePixelRatio': devicePixelRatio,
}
self.setBackgroundColor('k')
self.items = []
self.noRepeatKeys = [QtCore.Qt.Key_Right, QtCore.Qt.Key_Left, QtCore.Qt.Key_Up, QtCore.Qt.Key_Down, QtCore.Qt.Key_PageUp, QtCore.Qt.Key_PageDown]
self.keysPressed = {}
self.keyTimer = QtCore.QTimer()
self.keyTimer.timeout.connect(self.evalKeyState)
self.makeCurrent()
def addItem(self, item):
self.items.append(item)
if hasattr(item, 'initializeGL'):
self.makeCurrent()
try:
item.initializeGL()
except:
self.checkOpenGLVersion('Error while adding item %s to GLViewWidget.' % str(item))
item._setView(self)
#print "set view", item, self, item.view()
self.update()
def removeItem(self, item):
self.items.remove(item)
item._setView(None)
self.update()
def initializeGL(self):
self.resizeGL(self.width(), self.height())
def setBackgroundColor(self, *args, **kwds):
"""
Set the background color of the widget. Accepts the same arguments as
pg.mkColor() and pg.glColor().
"""
self.opts['bgcolor'] = fn.glColor(*args, **kwds)
self.update()
def getViewport(self):
vp = self.opts['viewport']
dpr = self.devicePixelRatio()
if vp is None:
return (0, 0, int(self.width() * dpr), int(self.height() * dpr))
else:
return tuple([int(x * dpr) for x in vp])
def devicePixelRatio(self):
dpr = self.opts['devicePixelRatio']
if dpr is not None:
return dpr
if hasattr(QtOpenGL.QGLWidget, 'devicePixelRatio'):
return QtOpenGL.QGLWidget.devicePixelRatio(self)
else:
return 1.0
def resizeGL(self, w, h):
pass
#glViewport(*self.getViewport())
#self.update()
def setProjection(self, region=None):
m = self.projectionMatrix(region)
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
a = np.array(m.copyDataTo()).reshape((4,4))
glMultMatrixf(a.transpose())
def projectionMatrix(self, region=None):
if region is None:
dpr = self.devicePixelRatio()
region = (0, 0, self.width() * dpr, self.height() * dpr)
x0, y0, w, h = self.getViewport()
dist = self.opts['distance']
fov = self.opts['fov']
nearClip = dist * 0.001
farClip = dist * 1000.
r = nearClip * np.tan(fov * 0.5 * np.pi / 180.)
t = r * h / w
## Note that X0 and width in these equations must be the values used in viewport
left = r * ((region[0]-x0) * (2.0/w) - 1)
right = r * ((region[0]+region[2]-x0) * (2.0/w) - 1)
bottom = t * ((region[1]-y0) * (2.0/h) - 1)
top = t * ((region[1]+region[3]-y0) * (2.0/h) - 1)
tr = QtGui.QMatrix4x4()
tr.frustum(left, right, bottom, top, nearClip, farClip)
return tr
def setModelview(self):
glMatrixMode(GL_MODELVIEW)
glLoadIdentity()
m = self.viewMatrix()
a = np.array(m.copyDataTo()).reshape((4,4))
glMultMatrixf(a.transpose())
def viewMatrix(self):
tr = QtGui.QMatrix4x4()
tr.translate( 0.0, 0.0, -self.opts['distance'])
tr.rotate(self.opts['elevation']-90, 1, 0, 0)
tr.rotate(self.opts['azimuth']+90, 0, 0, -1)
center = self.opts['center']
tr.translate(-center.x(), -center.y(), -center.z())
return tr
def itemsAt(self, region=None):
"""
Return a list of the items displayed in the region (x, y, w, h)
relative to the widget.
"""
region = (region[0], self.height()-(region[1]+region[3]), region[2], region[3])
#buf = np.zeros(100000, dtype=np.uint)
buf = glSelectBuffer(100000)
try:
glRenderMode(GL_SELECT)
glInitNames()
glPushName(0)
self._itemNames = {}
self.paintGL(region=region, useItemNames=True)
finally:
hits = glRenderMode(GL_RENDER)
items = [(h.near, h.names[0]) for h in hits]
items.sort(key=lambda i: i[0])
return [self._itemNames[i[1]] for i in items]
def paintGL(self, region=None, viewport=None, useItemNames=False):
"""
viewport specifies the arguments to glViewport. If None, then we use self.opts['viewport']
region specifies the sub-region of self.opts['viewport'] that should be rendered.
Note that we may use viewport != self.opts['viewport'] when exporting.
"""
if viewport is None:
glViewport(*self.getViewport())
else:
glViewport(*viewport)
self.setProjection(region=region)
self.setModelview()
bgcolor = self.opts['bgcolor']
glClearColor(*bgcolor)
glClear( GL_DEPTH_BUFFER_BIT | GL_COLOR_BUFFER_BIT )
self.drawItemTree(useItemNames=useItemNames)
def drawItemTree(self, item=None, useItemNames=False):
if item is None:
items = [x for x in self.items if x.parentItem() is None]
else:
items = item.childItems()
items.append(item)
items.sort(key=lambda a: a.depthValue())
for i in items:
if not i.visible():
continue
if i is item:
try:
glPushAttrib(GL_ALL_ATTRIB_BITS)
if useItemNames:
glLoadName(i._id)
self._itemNames[i._id] = i
i.paint()
except:
from .. import debug
debug.printExc()
msg = "Error while drawing item %s." % str(item)
ver = glGetString(GL_VERSION)
if ver is not None:
ver = ver.split()[0]
if int(ver.split(b'.')[0]) < 2:
print(msg + " The original exception is printed above; however, pyqtgraph requires OpenGL version 2.0 or greater for many of its 3D features and your OpenGL version is %s. Installing updated display drivers may resolve this issue." % ver)
else:
print(msg)
finally:
glPopAttrib()
else:
glMatrixMode(GL_MODELVIEW)
glPushMatrix()
try:
tr = i.transform()
a = np.array(tr.copyDataTo()).reshape((4,4))
glMultMatrixf(a.transpose())
self.drawItemTree(i, useItemNames=useItemNames)
finally:
glMatrixMode(GL_MODELVIEW)
glPopMatrix()
def setCameraPosition(self, pos=None, distance=None, elevation=None, azimuth=None):
if pos is not None:
self.opts['center'] = pos
if distance is not None:
self.opts['distance'] = distance
if elevation is not None:
self.opts['elevation'] = elevation
if azimuth is not None:
self.opts['azimuth'] = azimuth
self.update()
def cameraPosition(self):
"""Return current position of camera based on center, dist, elevation, and azimuth"""
center = self.opts['center']
dist = self.opts['distance']
elev = self.opts['elevation'] * np.pi/180.
azim = self.opts['azimuth'] * np.pi/180.
pos = Vector(
center.x() + dist * np.cos(elev) * np.cos(azim),
center.y() + dist * np.cos(elev) * np.sin(azim),
center.z() + dist * np.sin(elev)
)
return pos
def orbit(self, azim, elev):
"""Orbits the camera around the center position. *azim* and *elev* are given in degrees."""
self.opts['azimuth'] += azim
self.opts['elevation'] = np.clip(self.opts['elevation'] + elev, -90, 90)
self.update()
def pan(self, dx, dy, dz, relative='global'):
"""
Moves the center (look-at) position while holding the camera in place.
============== =======================================================
**Arguments:**
*dx* Distance to pan in x direction
*dy* Distance to pan in y direction
*dx* Distance to pan in z direction
*relative* String that determines the direction of dx,dy,dz.
If "global", then the global coordinate system is used.
If "view", then the z axis is aligned with the view
direction, and x and y axes are inthe plane of the
view: +x points right, +y points up.
If "view-upright", then x is in the global xy plane and
points to the right side of the view, y is in the
global xy plane and orthogonal to x, and z points in
the global z direction.
============== =======================================================
Distances are scaled roughly such that a value of 1.0 moves
by one pixel on screen.
Prior to version 0.11, *relative* was expected to be either True (x-aligned) or
False (global). These values are deprecated but still recognized.
"""
# for backward compatibility:
relative = {True: "view-upright", False: "global"}.get(relative, relative)
if relative == 'global':
self.opts['center'] += QtGui.QVector3D(dx, dy, dz)
elif relative == 'view-upright':
cPos = self.cameraPosition()
cVec = self.opts['center'] - cPos
dist = cVec.length() ## distance from camera to center
xDist = dist * 2. * np.tan(0.5 * self.opts['fov'] * np.pi / 180.) ## approx. width of view at distance of center point
xScale = xDist / self.width()
zVec = QtGui.QVector3D(0,0,1)
xVec = QtGui.QVector3D.crossProduct(zVec, cVec).normalized()
yVec = QtGui.QVector3D.crossProduct(xVec, zVec).normalized()
self.opts['center'] = self.opts['center'] + xVec * xScale * dx + yVec * xScale * dy + zVec * xScale * dz
elif relative == 'view':
# pan in plane of camera
elev = np.radians(self.opts['elevation'])
azim = np.radians(self.opts['azimuth'])
fov = np.radians(self.opts['fov'])
dist = (self.opts['center'] - self.cameraPosition()).length()
fov_factor = np.tan(fov / 2) * 2
scale_factor = dist * fov_factor / self.width()
z = scale_factor * np.cos(elev) * dy
x = scale_factor * (np.sin(azim) * dx - np.sin(elev) * np.cos(azim) * dy)
y = scale_factor * (np.cos(azim) * dx + np.sin(elev) * np.sin(azim) * dy)
self.opts['center'] += QtGui.QVector3D(x, -y, z)
else:
raise ValueError("relative argument must be global, view, or view-upright")
self.update()
def pixelSize(self, pos):
"""
Return the approximate size of a screen pixel at the location pos
Pos may be a Vector or an (N,3) array of locations
"""
cam = self.cameraPosition()
if isinstance(pos, np.ndarray):
cam = np.array(cam).reshape((1,)*(pos.ndim-1)+(3,))
dist = ((pos-cam)**2).sum(axis=-1)**0.5
else:
dist = (pos-cam).length()
xDist = dist * 2. * np.tan(0.5 * self.opts['fov'] * np.pi / 180.)
return xDist / self.width()
def mousePressEvent(self, ev):
self.mousePos = ev.pos()
def mouseMoveEvent(self, ev):
diff = ev.pos() - self.mousePos
self.mousePos = ev.pos()
if ev.buttons() == QtCore.Qt.LeftButton:
if (ev.modifiers() & QtCore.Qt.ControlModifier):
self.pan(diff.x(), diff.y(), 0, relative='view')
else:
self.orbit(-diff.x(), diff.y())
elif ev.buttons() == QtCore.Qt.MidButton:
if (ev.modifiers() & QtCore.Qt.ControlModifier):
self.pan(diff.x(), 0, diff.y(), relative='view-upright')
else:
self.pan(diff.x(), diff.y(), 0, relative='view-upright')
def mouseReleaseEvent(self, ev):
pass
# Example item selection code:
#region = (ev.pos().x()-5, ev.pos().y()-5, 10, 10)
#print(self.itemsAt(region))
## debugging code: draw the picking region
#glViewport(*self.getViewport())
#glClear( GL_DEPTH_BUFFER_BIT | GL_COLOR_BUFFER_BIT )
#region = (region[0], self.height()-(region[1]+region[3]), region[2], region[3])
#self.paintGL(region=region)
#self.swapBuffers()
def wheelEvent(self, ev):
delta = 0
if QT_LIB in ['PyQt4', 'PySide']:
delta = ev.delta()
else:
delta = ev.angleDelta().x()
if delta == 0:
delta = ev.angleDelta().y()
if (ev.modifiers() & QtCore.Qt.ControlModifier):
self.opts['fov'] *= 0.999**delta
else:
self.opts['distance'] *= 0.999**delta
self.update()
def keyPressEvent(self, ev):
if ev.key() in self.noRepeatKeys:
ev.accept()
if ev.isAutoRepeat():
return
self.keysPressed[ev.key()] = 1
self.evalKeyState()
def keyReleaseEvent(self, ev):
if ev.key() in self.noRepeatKeys:
ev.accept()
if ev.isAutoRepeat():
return
try:
del self.keysPressed[ev.key()]
except:
self.keysPressed = {}
self.evalKeyState()
def evalKeyState(self):
speed = 2.0
if len(self.keysPressed) > 0:
for key in self.keysPressed:
if key == QtCore.Qt.Key_Right:
self.orbit(azim=-speed, elev=0)
elif key == QtCore.Qt.Key_Left:
self.orbit(azim=speed, elev=0)
elif key == QtCore.Qt.Key_Up:
self.orbit(azim=0, elev=-speed)
elif key == QtCore.Qt.Key_Down:
self.orbit(azim=0, elev=speed)
elif key == QtCore.Qt.Key_PageUp:
pass
elif key == QtCore.Qt.Key_PageDown:
pass
self.keyTimer.start(16)
else:
self.keyTimer.stop()
def checkOpenGLVersion(self, msg):
## Only to be called from within exception handler.
ver = glGetString(GL_VERSION).split()[0]
if int(ver.split('.')[0]) < 2:
from .. import debug
debug.printExc()
raise Exception(msg + " The original exception is printed above; however, pyqtgraph requires OpenGL version 2.0 or greater for many of its 3D features and your OpenGL version is %s. Installing updated display drivers may resolve this issue." % ver)
else:
raise
def readQImage(self):
"""
Read the current buffer pixels out as a QImage.
"""
w = self.width()
h = self.height()
self.repaint()
pixels = np.empty((h, w, 4), dtype=np.ubyte)
pixels[:] = 128
pixels[...,0] = 50
pixels[...,3] = 255
glReadPixels(0, 0, w, h, GL_RGBA, GL_UNSIGNED_BYTE, pixels)
# swap B,R channels for Qt
tmp = pixels[...,0].copy()
pixels[...,0] = pixels[...,2]
pixels[...,2] = tmp
pixels = pixels[::-1] # flip vertical
img = fn.makeQImage(pixels, transpose=False)
return img
def renderToArray(self, size, format=GL_BGRA, type=GL_UNSIGNED_BYTE, textureSize=1024, padding=256):
w,h = map(int, size)
self.makeCurrent()
tex = None
fb = None
try:
output = np.empty((w, h, 4), dtype=np.ubyte)
fb = glfbo.glGenFramebuffers(1)
glfbo.glBindFramebuffer(glfbo.GL_FRAMEBUFFER, fb )
glEnable(GL_TEXTURE_2D)
tex = glGenTextures(1)
glBindTexture(GL_TEXTURE_2D, tex)
texwidth = textureSize
data = np.zeros((texwidth,texwidth,4), dtype=np.ubyte)
## Test texture dimensions first
glTexImage2D(GL_PROXY_TEXTURE_2D, 0, GL_RGBA, texwidth, texwidth, 0, GL_RGBA, GL_UNSIGNED_BYTE, None)
if glGetTexLevelParameteriv(GL_PROXY_TEXTURE_2D, 0, GL_TEXTURE_WIDTH) == 0:
raise Exception("OpenGL failed to create 2D texture (%dx%d); too large for this hardware." % shape[:2])
## create teture
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, texwidth, texwidth, 0, GL_RGBA, GL_UNSIGNED_BYTE, data.transpose((1,0,2)))
self.opts['viewport'] = (0, 0, w, h) # viewport is the complete image; this ensures that paintGL(region=...)
# is interpreted correctly.
p2 = 2 * padding
for x in range(-padding, w-padding, texwidth-p2):
for y in range(-padding, h-padding, texwidth-p2):
x2 = min(x+texwidth, w+padding)
y2 = min(y+texwidth, h+padding)
w2 = x2-x
h2 = y2-y
## render to texture
glfbo.glFramebufferTexture2D(glfbo.GL_FRAMEBUFFER, glfbo.GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, tex, 0)
self.paintGL(region=(x, h-y-h2, w2, h2), viewport=(0, 0, w2, h2)) # only render sub-region
glBindTexture(GL_TEXTURE_2D, tex) # fixes issue #366
## read texture back to array
data = glGetTexImage(GL_TEXTURE_2D, 0, format, type)
data = np.fromstring(data, dtype=np.ubyte).reshape(texwidth,texwidth,4).transpose(1,0,2)[:, ::-1]
output[x+padding:x2-padding, y+padding:y2-padding] = data[padding:w2-padding, -(h2-padding):-padding]
finally:
self.opts['viewport'] = None
glfbo.glBindFramebuffer(glfbo.GL_FRAMEBUFFER, 0)
glBindTexture(GL_TEXTURE_2D, 0)
if tex is not None:
glDeleteTextures([tex])
if fb is not None:
glfbo.glDeleteFramebuffers([fb])
return output
|
the-stack_0_24435 | # -*- coding: utf-8 -*-
# Copyright 1999-2017 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import time
import uuid
import logging
from .common import build_trait
from ..config import options
from ..compat import OrderedDict, six
from ..errors import InternalServerError, RequestTimeTooSkewed
from ..models.instance import Instance
from ..serializers import JSONSerializableModel, JSONNodeField, JSONNodesReferencesField
PROGRESS_RETRY = 3
PROGRESS_RETRY_DELAY = 0.15
logger = logging.getLogger(__name__)
"""
Progress Storage
"""
PROGRESS_REPO = dict()
class _StageProgressJSON(JSONSerializableModel):
name = JSONNodeField('name')
backup_workers = JSONNodeField('backup_workers', parse_callback=int, default=0)
terminated_workers = JSONNodeField('terminated_workers', parse_callback=int, default=0)
running_workers = JSONNodeField('running_workers', parse_callback=int, default=0)
total_workers = JSONNodeField('total_workers', parse_callback=int, default=0)
input_records = JSONNodeField('input_records', parse_callback=int, default=0)
output_records = JSONNodeField('output_records', parse_callback=int, default=0)
finished_percentage = JSONNodeField('finished_percentage', parse_callback=int, default=0)
def __init__(self, **kwargs):
super(_StageProgressJSON, self).__init__(**kwargs)
class _TaskProgressJSON(JSONSerializableModel):
name = JSONNodeField('name')
status = JSONNodeField('status', parse_callback=lambda v: Instance.Task.TaskStatus(v.upper()),
serialize_callback=lambda v: v.value)
stages = JSONNodesReferencesField(_StageProgressJSON, 'stages')
class _InstanceProgressJSON(JSONSerializableModel):
id = JSONNodeField('id')
logview = JSONNodeField('logview')
status = JSONNodeField('status', parse_callback=lambda v: Instance.Status(v.upper()),
serialize_callback=lambda v: v.value)
tasks = JSONNodeField('tasks', parse_callback=lambda v: _InstanceProgressJSON._parse_tasks(v),
serialize_callback=lambda v: [d.serial() for d in six.itervalues(v)])
@staticmethod
def _parse_tasks(obj):
return OrderedDict([(o['name'], _TaskProgressJSON.parse(o)) for o in obj])
class _InstancesProgressJSON(JSONSerializableModel):
name = JSONNodeField('name')
key = JSONNodeField('key')
gen_time = JSONNodeField('gen_time')
logview = JSONNodeField('logview')
instances = JSONNodeField('instances', parse_callback=lambda v: _InstancesProgressJSON._parse_instances(v),
serialize_callback=lambda v: [d.serial() for d in six.itervalues(v)])
@staticmethod
def _parse_instances(obj):
return OrderedDict([(o['id'], _InstanceProgressJSON.parse(o)) for o in obj])
def update_instance(self, inst):
self.instances[inst.id] = inst
def create_instance_group(name):
key = '%x_%s' % (int(time.time()), str(uuid.uuid4()).lower())
group_json = _InstancesProgressJSON(name=name, key=key, instances=OrderedDict())
group_json.gen_time = time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime())
PROGRESS_REPO[key] = group_json
return key
_logview_cache = dict()
def _reload_instance_status(odps, group_id, instance_id):
if group_id not in PROGRESS_REPO:
raise KeyError('Instance group ID not exist.')
group_json = PROGRESS_REPO[group_id]
if instance_id in group_json.instances:
inst_json = group_json.instances[instance_id]
if inst_json.status == Instance.Status.TERMINATED:
return
else:
inst_json = _InstanceProgressJSON(id=instance_id, tasks=dict())
group_json.instances[instance_id] = inst_json
group_json.gen_time = time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime())
old_status = inst_json.status
sub_inst = odps.get_instance(instance_id)
inst_json.status = sub_inst.status
if instance_id not in _logview_cache:
_logview_cache[instance_id] = sub_inst.get_logview_address()
inst_json.logview = _logview_cache[instance_id]
if old_status != Instance.Status.TERMINATED:
for task_name, task in six.iteritems(sub_inst.get_task_statuses()):
if task_name in inst_json.tasks:
task_json = inst_json.tasks[task_name]
task_json.status = task.status
if task.status not in set([Instance.Task.TaskStatus.RUNNING, Instance.Task.TaskStatus.WAITING]):
continue
else:
task_json = _TaskProgressJSON(name=task_name, status=task.status, stages=[])
inst_json.tasks[task_name] = task_json
task_json.stages = []
try:
task_prog = sub_inst.get_task_progress(task_name)
except Exception:
continue
for stage in task_prog.stages:
stage_json = _StageProgressJSON()
for field_name in six.iterkeys(_StageProgressJSON.__fields):
if hasattr(stage, field_name):
val = getattr(stage, field_name)
if val is not None:
setattr(stage_json, field_name, val)
task_json.stages.append(stage_json)
def reload_instance_status(odps, group_id, instance_id):
retry_num = options.retry_times
while retry_num > 0:
try:
return _reload_instance_status(odps, group_id, instance_id)
except (InternalServerError, RequestTimeTooSkewed):
retry_num -= 1
if retry_num > 0:
time.sleep(PROGRESS_RETRY_DELAY)
def fetch_instance_group(group_id):
if group_id not in PROGRESS_REPO:
raise KeyError('Instance group ID not exist.')
return PROGRESS_REPO[group_id]
def exist_instance_group(group_id):
return group_id in PROGRESS_REPO
"""
User Interface
"""
try:
# currently not support juypterlab
if 'dsw_userNumber' in os.environ:
raise ImportError
from ..console import widgets, ipython_major_version, in_ipython_frontend, is_widgets_available
if ipython_major_version < 4:
from IPython.utils.traitlets import Unicode, List
else:
from traitlets import Unicode, List
from IPython.display import display
except Exception:
InstancesProgress = None
else:
if widgets and in_ipython_frontend():
class InstancesProgress(widgets.DOMWidget):
_view_name = build_trait(Unicode, 'InstancesProgress', sync=True)
_view_module = build_trait(Unicode, 'pyodps/progress', sync=True)
prefix = build_trait(Unicode, 'prefix', sync=True)
suffix = build_trait(Unicode, 'suffix', sync=True)
def __init__(self, **kwargs):
"""Constructor"""
widgets.DOMWidget.__init__(self, **kwargs) # Call the base.
# Allow the user to register error callbacks with the following signatures:
self.errors = widgets.CallbackDispatcher(accepted_nargs=[0, 1])
def update(self):
self.send(json.dumps(dict(action='update', content=[])))
def update_group(self, group_jsons):
if isinstance(group_jsons, six.string_types):
group_jsons = [group_jsons, ]
try:
self.send(json.dumps(dict(action='update', content=group_jsons)))
except:
pass
def delete_group(self, group_keys):
if isinstance(group_keys, six.string_types):
group_keys = [group_keys, ]
try:
self.send(json.dumps(dict(action='delete', content=group_keys)))
except:
pass
def clear_groups(self):
try:
self.send(json.dumps(dict(action='clear')))
except:
pass
else:
InstancesProgress = None
class ProgressGroupUI(object):
def __init__(self, ipython_widget=False):
self._ipython_widget = ipython_widget
if ipython_widget and InstancesProgress is None:
raise RuntimeError('Cannot create group ui when InstancesProgress is None')
self._widget = None
self._group_keys = set()
self._prefix = ''
self._suffix = ''
@property
def prefix(self):
return self._prefix
@prefix.setter
def prefix(self, value):
self._prefix = value
self._update_text()
@property
def suffix(self):
return self._suffix
@suffix.setter
def suffix(self, value):
self._suffix = value
self._update_text()
def has_keys(self, keys):
if isinstance(keys, six.string_types):
keys = [keys, ]
return all(k in self._group_keys for k in keys)
def add_keys(self, keys):
if isinstance(keys, six.string_types):
keys = [keys, ]
self._group_keys.update(keys)
self._update_group(keys)
def remove_keys(self, keys):
if isinstance(keys, six.string_types):
keys = [keys, ]
self._group_keys -= set(keys)
self._widget.delete_group(keys)
def clear_keys(self):
self._group_keys = set()
self._widget.clear_groups()
def _update_text(self):
if self._ipython_widget:
if not self._widget:
self._widget = InstancesProgress()
if is_widgets_available():
display(self._widget)
self._widget.prefix = self._prefix
self._widget.suffix = self._suffix
self._widget.update()
def _update_group(self, keys):
if self._ipython_widget:
if not self._widget:
self._widget = InstancesProgress()
if is_widgets_available():
display(self._widget)
if isinstance(keys, six.string_types):
keys = [keys, ]
data = [fetch_instance_group(key).serialize() for key in keys if exist_instance_group(key)]
self._widget.update_group(data)
def update(self):
self._update_text()
data = [fetch_instance_group(key).serialize() for key in self._group_keys if exist_instance_group(key)]
self._widget.update_group(data)
def close(self):
if self._ipython_widget and self._widget:
self._widget.close()
|
the-stack_0_24436 | import numpy as np
import pandas as pd
size = 15
basearray = np.linspace(0,1,size) # linear scale
basearray = basearray.astype('float')
# print(basearray)
np.random.shuffle(basearray)
# print(basearray)
index = 0
while index < len(basearray):
if basearray[index] < 0.3:
basearray[index] = 0.0
else:
basearray[index] = basearray[index] + index * 0.1
index = index + 1
##print(basearray)
def replacezeroeswith(array, newvalue):
array[ array == 0 ] = newvalue
test1 = basearray.copy()
replacezeroeswith(test1, np.nan)
##print(test1)
df=pd.DataFrame({'X':basearray})
replacezeroeswith(df.X, np.nan)
##print(df)
# Option 1 Now drop NaN values
df1 = pd.DataFrame({'X':basearray})
df1 = df1.dropna()
##print("df1")
##print(df1)
# Option 2 Replace with
mean = df1.X.mean()
df2 = pd.DataFrame({'X':basearray})
replacezeroeswith(df2.X, mean)
##print("df2")
##print(df2)
# Option 3 select random item from existing (Hot Deck)
hotdeck = np.random.choice(df1.X)
df3 = pd.DataFrame({'X':basearray})
replacezeroeswith(df3.X,hotdeck)
##print("df3")
##print(df3)
# Option 4 select item with given rule (Cold Deck)
colddeck = df1.X[0] # first item
df4 = pd.DataFrame({'X':basearray})
replacezeroeswith(df4.X, colddeck)
##print("df4")
##print(df4)
# Option 5 regression imputation / replace with estimator
def replace_with_estimator(given, index):
untilme = given[0: index - 1]
# moving average
wsize = 5
size = len(untilme)
if wsize > size:
wsize = size
estimate = given.rolling(wsize).mean().iloc[-1]
given[index] = estimate
df5 = pd.DataFrame({'X':basearray})
index = 0
while index < len(df5.X):
if df5.X[index] == 0 :
replace_with_estimator(df5.X,index)
index = index + 1
##print("df5")
##print(df5)
# Option 6 - Stochastic regression - estimate with some random residue
# not implemented here
df_final = pd.DataFrame({'X':df.X, 'M1': df1.X, 'M2': df2.X, 'M3': df3.X, 'M4': df4.X, 'M5': df5.X})
print(df_final)
# Homework is about Option 7 - Interpolation
|
the-stack_0_24438 | # -*- coding:utf-8 -*-
import urllib.request
import xlwt
import re
import urllib.parse
import json
class JobInformation:
def __init__(self):
self.job_id = ""
self.job_name = ""
self.company_name = ""
self.provide_salary = ""
self.update_date = ""
self.company_location = ""
self.experience_requirement = ""
self.academic_requirements = ""
self.demand_num = ""
self.job_requirements = ""
def _get_51job_page_html(page, header):
# 获取html
ur1 = str(page)+'.html?lang=c&postchannel=0000&workyear=99&cotype=99°reefrom=99&jobterm=99&companysize=99&ord_field=0&dibiaoid=0&line=&welfare='
ur2 = 'https://search.51job.com/list/030000,000000,5800,29%252c20,3,99,+,2,'
url = ur2+ur1
# print(url)
request = urllib.request.Request(url, headers=header)
response = urllib.request.urlopen(request)
html = response.read().decode('gbk')
# print(html)# 读取源代码并转为unicode
return html
def _analysis_51job_information(html):
# 解析html保存为list(dict)格式
jobs = []
reg = '(?<=window.__SEARCH_RESULT__ = ).*?(?=</script>)'
json_infos = re.findall(reg, html)
# print("正则匹配成功数:{0}".format(len(json_infos)))
# print(json_infos)
for json_info in json_infos:
json_data = json.loads(json_info)
if 'engine_search_result' in json_data:
# print("待解析岗位数:{0}".format(len(json_data['engine_search_result'])))
for raw_info in json_data['engine_search_result']:
info = JobInformation()
info.job_id = raw_info.get("jobid", "")
info.job_name = raw_info.get("job_name", "")
info.company_name = raw_info.get("company_name", "")
info.provide_salary = raw_info.get("providesalary_text", "")
info.update_date = raw_info.get("updatedate", "")
attribute_text = raw_info.get("attribute_text", [])
if len(attribute_text) > 0:
info.company_location = attribute_text[0]
if len(attribute_text) > 1:
info.experience_requirement = attribute_text[1]
if len(attribute_text) > 2:
info.academic_requirements = attribute_text[2]
info.demand_num = _get_recruiting_numbers(raw_info.get("attribute_text", ["", "", ""]))
info.job_requirements = ""
jobs.append(info)
# print("解析成功岗位数:{0}".format(len(jobs)))
return jobs
def _get_recruiting_numbers(attribute_list):
# 招工数解析
reg = r'(?<=招).*?(?=人)'
for attribute in attribute_list:
matchObj = re.search(reg, attribute)
if matchObj:
return matchObj.group(0)
return ""
def _get_51job_job_html(url, header):
request = urllib.request.Request(url, headers=header)
response = urllib.request.urlopen(request)
html = response.read().decode('gbk')
return html
def _get_51job_requirements(html):
job_requirements = re.findall(re.compile(
r' <div class="bmsg job_msg inbox">.*?<p>(.*?)<div class="mt10">',
re.S), html)
return job_requirements
def _get_zhaopin_page_html(page, header):
# 获取html
ur1 = str(page) + '&dt=4&ind=600000000&jt=21000100000000'
ur2 = 'https://xiaoyuan.zhaopin.com/search/jn=2&cts=548&pg='
url = ur2 + ur1
#print(url)
request = urllib.request.Request(url, headers=header)
response = urllib.request.urlopen(request)
html = response.read().decode('utf-8')
# print(html)# 读取源代码并转为unicode
return html
def _analysis_zhaopin_information(html):
# 解析html保存为list(dict)格式
jobs = []
reg = '(?<=__INITIAL_STATE__=).*?(?=</script>)'
json_infos = re.findall(reg, html)
# print("正则匹配成功数:{0}".format(len(json_infos)))
# print(json_infos)
for json_info in json_infos:
json_data = json.loads(json_info)
if 'souresult' in json_data:
# print("待解析岗位数:{0}".format(len(json_data['engine_search_result'])))
for raw_info in json_data['souresult']['Items']:
info = JobInformation()
info.job_id = raw_info.get("JobPositionNumber", "")
info.job_name = raw_info.get("JobTitle", "")
info.company_name = raw_info.get("CompanyName", "")
info.provide_salary = raw_info.get("MaxSalary", "")
info.update_date = raw_info.get("DateCreated", "")
info.demand_num = raw_info.get("RecruitCount", "")
info.company_location = raw_info.get("CityName", "")
jobs.append(info)
# print("解析成功岗位数:{0}".format(len(jobs)))
return jobs
def crawling_51bob_infomation(page_num):
# 模拟浏览器
header = {
'Host': 'search.51job.com',
'Referer': 'https://search.51job.com/list/030000,000000,5800,29%252c20,3,99,+,2,1.html?lang=c&postchannel=0000&workyear=99&cotype=99°reefrom=01%252c02%252c03&jobterm=99&companysize=99&ord_field=0&dibiaoid=0&line=&welfare=',
'Upgrade-Insecure-Requests': '1',
'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.77 Safari/537.36'
}
information = []
for i in range(1, page_num+1):
page_html = _get_51job_page_html(i, header)
# print(page_html)
page_information = _analysis_51job_information(page_html)
information += page_information
return information
def crawling_zhaopin_infomation(page_num):
# 模拟浏览器
header = {
'authority': 'xiaoyuan.zhaopin.com',
'path': '/search/jn=2&cts=548&pg=1&dt=4&ind=600000000&jt=21000100000000',
'Upgrade-Insecure-Requests': '1',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.77 Safari/537.36'
}
information = []
for i in range(1, page_num+1):
page_html = _get_zhaopin_page_html(i, header)
# print(page_html)
page_information = _analysis_zhaopin_information(page_html)
information += page_information
return information |
the-stack_0_24440 | import argparse
import os
import urllib
from bs4 import BeautifulSoup
from tqdm import tqdm
def get_properties(url, project_name="test_project", output_file = "get_properties.csv"):
"""
This function extracts the information regarding : [Name, Label, Domain, Range] from a page like this :
http://mappings.dbpedia.org/server/ontology/classes/Place and saves it in a file in CSV format.
"""
page = urllib.request.urlopen(url)
soup = BeautifulSoup(page, "html.parser")
if(not os.path.isdir(project_name)):
os.makedirs(project_name)
output_file = open(project_name+"/" + output_file, 'w')
fl = 0
accum = []
for rows in tqdm(soup.find_all("tr")):
x = rows.find_all("td")
if len(x) <= 2:
fl = 1
continue
if fl == 1:
fl = 2
continue
name = rows.find_all("td")[0].get_text().replace(" (edit)", "")
label = rows.find_all("td")[1].get_text()
dom = rows.find_all("td")[2].get_text()
rng = rows.find_all("td")[3].get_text()
final = name + "," + label + "," + dom + "," + rng
accum.append(final)
output_file.write(final+"\n")
output_file.close()
return accum
"""
Name, Label, Domain, Range
"""
if __name__ == "__main__":
"""
Section to parse the command line arguments.
"""
parser = argparse.ArgumentParser()
requiredNamed = parser.add_argument_group('Required Arguments')
requiredNamed.add_argument('--url', dest='url', metavar='url',
help='Webpage URL: eg-http://mappings.dbpedia.org/server/ontology/classes/Place', required=True)
requiredNamed.add_argument(
'--output_file', dest='out_put', metavar='out_put', help='temp.csv', required=True)
requiredNamed.add_argument(
'--project_name', dest='project_name', metavar='project_name', help='test', required=True)
args = parser.parse_args()
url = args.url
output_file = args.out_put
project_name = args.project_name
get_properties(url = url, project_name= project_name, output_file = output_file)
pass
|
the-stack_0_24442 | # Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
"""Schema for a buildcache spec.yaml file
.. literalinclude:: _spack_root/lib/spack/spack/schema/buildcache_spec.py
:lines: 14-
"""
import spack.schema.spec
schema = {
'$schema': 'http://json-schema.org/draft-07/schema#',
'title': 'Spack buildcache specfile schema',
'type': 'object',
'additionalProperties': False,
'properties': {
'buildinfo': {
'type': 'object',
'additionalProperties': False,
'required': ['relative_prefix'],
'properties': {
'relative_prefix': {'type': 'string'},
'relative_rpaths': {'type': 'boolean'},
},
},
'spec': {
'type': 'object',
'additionalProperties': True,
'items': spack.schema.spec.properties,
},
'binary_cache_checksum': {
'type': 'object',
'properties': {
'hash_algorithm': {'type': 'string'},
'hash': {'type': 'string'},
},
},
},
}
|
the-stack_0_24443 | import os
import json
import glob
import tqdm
from exceptions import ProjectTypeError
from utils.mask import create_mask
def process(path):
project_path = os.path.join(path, 'project.json')
with open(project_path, 'r', encoding='utf-8') as f:
project_info = json.load(f)
try:
data_type = project_info['data_type']
assert data_type in ['image', 'image sequence']
except (KeyError, AssertionError):
raise ProjectTypeError('Legacy projects are not supported')
meta_dir = os.path.join(path, 'meta', '**', '*.json')
for meta_path in tqdm.tqdm(glob.glob(meta_dir)):
with open(meta_path, 'r', encoding='utf-8') as f:
meta = json.load(f)
image_info = meta['image_info']
dataset, data_key = meta['dataset'], meta['data_key']
if 'width' not in image_info:
print(f'[WARNING, {dataset}/{data_key}] skipped: Image info does not exist, please submit through our annotation app')
continue
label_id = meta['label_id']
label_path = os.path.join(path, 'labels', f'{label_id}.json')
with open(label_path, 'r', encoding='utf-8') as f:
label = json.load(f)
mask_dir = os.path.join(path, 'masks')
create_mask(data_type, project_info, image_info, label_id, label, mask_dir)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--path', type=str, required=True, help='path to unzipped export result')
args = parser.parse_args()
try:
process(path=args.path)
except Exception as e:
print(e)
|
the-stack_0_24444 | #!/usr/bin/env python
import rospy
from std_msgs.msg import Int32
from geometry_msgs.msg import PoseStamped, Pose
from styx_msgs.msg import TrafficLightArray, TrafficLight
from styx_msgs.msg import Lane
from sensor_msgs.msg import Image
from cv_bridge import CvBridge
from light_classification.tl_classifier import TLClassifier
import tf
import cv2
import yaml
from scipy.spatial import KDTree
STATE_COUNT_THRESHOLD = 2
class TLDetector(object):
def __init__(self):
rospy.init_node('tl_detector')
graph_source = rospy.get_param('~graph_source')
self.light_classifier = TLClassifier(graph_source)
self.pose = None
self.waypoints = None
self.camera_image = None
self.lights = []
self.tree = None
config_string = rospy.get_param("/traffic_light_config")
self.config = yaml.load(config_string)
self.upcoming_red_light_pub = rospy.Publisher('/traffic_waypoint', Int32, queue_size=1)
self.bridge = CvBridge()
self.listener = tf.TransformListener()
self.state = TrafficLight.UNKNOWN
self.last_state = TrafficLight.UNKNOWN
self.last_wp = -1
self.state_count = 0
# Subscribe only at this point to give tf time to start up
sub1 = rospy.Subscriber('/current_pose', PoseStamped, self.pose_cb, queue_size=1)
sub2 = rospy.Subscriber('/base_waypoints', Lane, self.waypoints_cb, queue_size=1)
'''
/vehicle/traffic_lights provides you with the location of the traffic light in 3D map space and
helps you acquire an accurate ground truth data source for the traffic light
classifier by sending the current color state of all traffic lights in the
simulator. When testing on the vehicle, the color state will not be available. You'll need to
rely on the position of the light and the camera image to predict it.
'''
sub3 = rospy.Subscriber('/vehicle/traffic_lights', TrafficLightArray, self.traffic_cb, queue_size=1)
sub6 = rospy.Subscriber('/image_color', Image, self.image_cb, queue_size=1)
rospy.spin()
def pose_cb(self, msg):
self.pose = msg
def waypoints_cb(self, waypoints):
self.waypoints = waypoints
self.tree = KDTree([[wp.pose.pose.position.x, wp.pose.pose.position.y] for wp in self.waypoints.waypoints])
def traffic_cb(self, msg):
self.lights = msg.lights
def image_cb(self, msg):
"""Identifies red lights in the incoming camera image and publishes the index
of the waypoint closest to the red light's stop line to /traffic_waypoint
Args:
msg (Image): image from car-mounted camera
"""
self.has_image = True
self.camera_image = msg
light_wp, state = self.process_traffic_lights()
'''
Publish upcoming red lights at camera frequency.
Each predicted state has to occur `STATE_COUNT_THRESHOLD` number
of times till we start using it. Otherwise the previous stable state is
used.
'''
if self.state != state:
self.state_count = 0
self.state = state
elif self.state_count >= STATE_COUNT_THRESHOLD:
self.last_state = self.state
light_wp = light_wp if state == TrafficLight.RED else -1
self.last_wp = light_wp
self.upcoming_red_light_pub.publish(Int32(light_wp))
else:
self.upcoming_red_light_pub.publish(Int32(self.last_wp))
self.state_count += 1
def get_closest_waypoint(self, x, y):
"""Identifies the closest path waypoint to the given position
https://en.wikipedia.org/wiki/Closest_pair_of_points_problem
Args:
pose (Pose): position to match a waypoint to
Returns:
int: index of the closest waypoint in self.waypoints
"""
return self.tree.query([x,y], 1)[1]
def get_light_state(self, light):
"""Determines the current color of the traffic light
Args:
light (TrafficLight): light to classify
Returns:
int: ID of traffic light color (specified in styx_msgs/TrafficLight)
"""
if(not self.has_image):
self.prev_light_loc = None
return False
cv_image = self.bridge.imgmsg_to_cv2(self.camera_image, "bgr8")
#Get classification
return self.light_classifier.get_classification(cv_image)
def process_traffic_lights(self):
"""Finds closest visible traffic light, if one exists, and determines its
location and color
Returns:
int: index of waypoint closes to the upcoming stop line for a traffic light (-1 if none exists)
int: ID of traffic light color (specified in styx_msgs/TrafficLight)
"""
closest_light = None
line_wp_idx = None
# List of positions that correspond to the line to stop in front of for a given intersection
stop_line_positions = self.config['stop_line_positions']
if(self.pose and self.tree):
car = self.pose.pose.position
car_wp_idx = self.get_closest_waypoint(car.x, car.y)
#TODO find the closest visible traffic light (if one exists)
diff = len(self.waypoints.waypoints)
for i, light in enumerate(self.lights):
# Get idx of stopline
line = stop_line_positions[i]
temp_wp_idx = self.get_closest_waypoint(line[0], line[1])
d = temp_wp_idx - car_wp_idx
if d >= 0 and d < diff:
diff = d
closest_light = light
line_wp_idx = temp_wp_idx
if closest_light:
state = self.get_light_state(closest_light)
return line_wp_idx, state
return -1, TrafficLight.UNKNOWN
if __name__ == '__main__':
try:
TLDetector()
except rospy.ROSInterruptException:
rospy.logerr('Could not start traffic node.')
|
the-stack_0_24445 | import spectra
import nose
def test_polylinear():
"""
via: https://github.com/jsvine/spectra/issues/4
"""
colors = ['yellow', 'red', 'black']
domain = [0, 50, 100]
color_scale = spectra.scale(colors).domain(domain)
r = color_scale.range(5)
results = [ c.hexcode for c in r ]
goal = ['#ffff00', '#ff8000', '#ff0000', '#800000', '#000000']
assert(results == goal)
@nose.tools.raises(ValueError)
def test_polylinear_fail():
colors = ['yellow', 'red', 'black']
domain = [ 0, 50 ] # Domain has one too few items
spectra.scale(colors).domain(domain)
|
the-stack_0_24446 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Spunky Bot - An automated game server bot
http://github.com/spunkybot/spunkybot
Author: Alexander Kress
This program is released under the MIT License. See LICENSE for more details.
## About ##
Spunky Bot is a lightweight game server administration bot and RCON tool,
inspired by the eb2k9 bot by Shawn Haggard.
The purpose of Spunky Bot is to administrate an Urban Terror 4.1 / 4.2 / 4.3
server and provide statistics data for players.
## Configuration ##
Modify the UrT server config as follows:
* seta g_logsync "1"
* seta g_loghits "1"
* seta g_friendlyfire "2"
Modify the files '/conf/settings.conf' and '/conf/rules.conf'
Run the bot: python spunky.py
"""
__version__ = '1.13.0.dev0'
### IMPORTS
import os
import time
import sqlite3
import math
import textwrap
import random
import ConfigParser
import logging.handlers
from Queue import Queue
from threading import Thread
from threading import RLock
import lib.pygeoip as pygeoip
import lib.schedule as schedule
from lib.pyquake3 import PyQuake3
# Get an instance of a logger
logger = logging.getLogger('spunkybot')
logger.setLevel(logging.DEBUG)
logger.propagate = False
# Bot player number
BOT_PLAYER_NUM = 1022
# RCON Delay in seconds, recommended range: 0.18 - 0.33
RCON_DELAY = 0.3
COMMANDS = {'help': {'desc': 'display all available commands', 'syntax': '^7Usage: ^2!help', 'level': 0, 'short': 'h'},
'forgive': {'desc': 'forgive a player for team killing', 'syntax': '^7Usage: ^2!forgive ^7[<name>]', 'level': 0, 'short': 'f'},
'forgiveall': {'desc': 'forgive all team kills', 'syntax': '^7Usage: ^2!forgiveall', 'level': 0, 'short': 'fa'},
'forgivelist': {'desc': 'list all players who killed you', 'syntax': '^7Usage: ^2!forgivelist', 'level': 0, 'short': 'fl'},
'forgiveprev': {'desc': 'forgive last team kill', 'syntax': '^7Usage: ^2!forgiveprev', 'level': 0, 'short': 'fp'},
'grudge': {'desc': 'grudge a player for team killing, a grudged player will not be forgiven', 'syntax': '^7Usage: ^2!grudge ^7[<name>]', 'level': 0},
'bombstats': {'desc': 'display Bomb stats', 'syntax': '^7Usage: ^2!bombstats', 'level': 0},
'ctfstats': {'desc': 'display Capture the Flag stats', 'syntax': '^7Usage: ^2!ctfstats', 'level': 0},
'freezestats': {'desc': 'display freeze/thawout stats', 'syntax': '^7Usage: ^2!freezestats', 'level': 0},
'hestats': {'desc': 'display HE grenade kill stats', 'syntax': '^7Usage: ^2!hestats', 'level': 0},
'hits': {'desc': 'display hit stats', 'syntax': '^7Usage: ^2!hits', 'level': 0},
'hs': {'desc': 'display headshot counter', 'syntax': '^7Usage: ^2!hs', 'level': 0},
'knife': {'desc': 'display knife kill stats', 'syntax': '^7Usage: ^2!knife', 'level': 0},
'register': {'desc': 'register yourself as a basic user', 'syntax': '^7Usage: ^2!register', 'level': 0},
'spree': {'desc': 'display current kill streak', 'syntax': '^7Usage: ^2!spree', 'level': 0},
'stats': {'desc': 'display current map stats', 'syntax': '^7Usage: ^2!stats', 'level': 0},
'teams': {'desc': 'balance teams', 'syntax': '^7Usage: ^2!teams', 'level': 0},
'time': {'desc': 'display the current server time', 'syntax': '^7Usage: ^2!time', 'level': 0},
# user commands, level 1
'regtest': {'desc': 'display current user status', 'syntax': '^7Usage: ^2!regtest', 'level': 1},
'xlrstats': {'desc': 'display full player statistics', 'syntax': '^7Usage: ^2!xlrstats ^7[<name>]', 'level': 1},
'xlrtopstats': {'desc': 'display the top players', 'syntax': '^7Usage: ^2!xlrtopstats', 'level': 1, 'short': 'topstats'},
# moderator commands, level 20
'admintest': {'desc': 'display current admin status', 'syntax': '^7Usage: ^2!admintest', 'level': 20},
'country': {'desc': 'get the country of a player', 'syntax': '^7Usage: ^2!country ^7<name>', 'level': 20},
'lastmaps': {'desc': 'list the last played maps', 'syntax': '^7Usage: ^2!lastmaps', 'level': 20},
'lastvote': {'desc': 'display information about the last called vote', 'syntax': '^7Usage: ^2!lastvote', 'level': 20},
'leveltest': {'desc': 'get the admin level for a given player or myself', 'syntax': '^7Usage: ^2!leveltest ^7[<name>]', 'level': 20, 'short': 'lt'},
'list': {'desc': 'list all connected players', 'syntax': '^7Usage: ^2!list', 'level': 20},
'locate': {'desc': 'display geolocation info of a player', 'syntax': '^7Usage: ^2!locate ^7<name>', 'level': 20, 'short': 'lc'},
'mute': {'desc': 'mute or un-mute a player', 'syntax': '^7Usage: ^2!mute ^7<name> [<duration>]', 'level': 20},
'nextmap': {'desc': 'display the next map in rotation', 'syntax': '^7Usage: ^2!nextmap', 'level': 20},
'poke': {'desc': 'notify a player that he needs to move', 'syntax': '^7Usage: ^2!poke ^7<name>', 'level': 20},
'seen': {'desc': 'display when a player was last seen', 'syntax': '^7Usage: ^2!seen ^7<name>', 'level': 20},
'shuffleteams': {'desc': 'shuffle the teams', 'syntax': '^7Usage: ^2!shuffleteams', 'level': 20, 'short': 'shuffle'},
'spec': {'desc': 'move yourself to spectator', 'syntax': '^7Usage: ^2!spec', 'level': 20, 'short': 'sp'},
'warn': {'desc': 'warn player', 'syntax': '^7Usage: ^2!warn ^7<name> [<reason>]', 'level': 20, 'short': 'w'},
'warninfo': {'desc': 'display how many warnings a player has', 'syntax': '^7Usage: ^2!warninfo ^7<name>', 'level': 20, 'short': 'wi'},
'warnremove': {'desc': "remove a player's last warning", 'syntax': '^7Usage: ^2!warnremove ^7<name>', 'level': 20, 'short': 'wr'},
'warns': {'desc': 'list the warnings', 'syntax': '^7Usage: ^2!warns', 'level': 20},
'warntest': {'desc': 'test a warning', 'syntax': '^7Usage: ^2!warntest ^7<warning>', 'level': 20},
# admin commands, level 40
'admins': {'desc': 'list all the online admins', 'syntax': '^7Usage: ^2!admins', 'level': 40},
'afk': {'desc': 'force a player to spec, because he is away from keyboard', 'syntax': '^7Usage: ^2!afk ^7<name>', 'level': 40},
'aliases': {'desc': 'list the aliases of a player', 'syntax': '^7Usage: ^2!aliases ^7<name>', 'level': 40, 'short': 'alias'},
'bigtext': {'desc': 'display big message on screen', 'syntax': '^7Usage: ^2!bigtext ^7<text>', 'level': 40},
'exit': {'desc': 'display last disconnected player', 'syntax': '^7Usage: ^2!exit', 'level': 40},
'find': {'desc': 'display the slot number of a player', 'syntax': '^7Usage: ^2!find ^7<name>', 'level': 40},
'force': {'desc': 'force a player to the given team', 'syntax': '^7Usage: ^2!force ^7<name> <blue/red/spec> [<lock>]', 'level': 40},
'kick': {'desc': 'kick a player', 'syntax': '^7Usage: ^2!kick ^7<name> <reason>', 'level': 40, 'short': 'k'},
'nuke': {'desc': 'nuke a player', 'syntax': '^7Usage: ^2!nuke ^7<name>', 'level': 40},
'regulars': {'desc': 'display the regular players online', 'syntax': '^7Usage: ^2!regulars', 'level': 40, 'short': 'regs'},
'say': {'desc': 'say a message to all players', 'syntax': '^7Usage: ^2!say ^7<text>', 'level': 40, 'short': '!!'},
'tell': {'desc': 'tell a message to a specific player', 'syntax': '^7Usage: ^2!tell ^7<name> <text>', 'level': 40},
'tempban': {'desc': 'ban a player temporary for the given period of 1 sec to 3 days', 'syntax': '^7Usage: ^2!tempban ^7<name> <duration> [<reason>]', 'level': 40, 'short': 'tb'},
'warnclear': {'desc': 'clear the player warnings', 'syntax': '^7Usage: ^2!warnclear ^7<name>', 'level': 40, 'short': 'wc'},
# fulladmin commands, level 60
'ban': {'desc': 'ban a player for several days', 'syntax': '^7Usage: ^2!ban ^7<name> <reason>', 'level': 60, 'short': 'b'},
'baninfo': {'desc': 'display active bans of a player', 'syntax': '^7Usage: ^2!baninfo ^7<name>', 'level': 60, 'short': 'bi'},
'ci': {'desc': 'kick player with connection interrupt', 'syntax': '^7Usage: ^2!ci ^7<name>', 'level': 60},
'forgiveclear': {'desc': "clear a player's team kills", 'syntax': '^7Usage: ^2!forgiveclear ^7[<name>]', 'level': 60, 'short': 'fc'},
'forgiveinfo': {'desc': "display a player's team kills", 'syntax': '^7Usage: ^2!forgiveinfo ^7<name>', 'level': 60, 'short': 'fi'},
'ping': {'desc': 'display the ping of a player', 'syntax': '^7Usage: ^2!ping ^7<name>', 'level': 60},
'id': {'desc': 'show the IP, guid and authname of a player', 'syntax': '^7Usage: ^2!id ^7<name>', 'level': 60},
'kickbots': {'desc': 'kick all bots', 'syntax': '^7Usage: ^2!kickbots', 'level': 60, 'short': 'kb'},
'rain': {'desc': 'enables or disables rain', 'syntax': '^7Usage: ^2!rain ^7<on/off>', 'level': 60},
'scream': {'desc': 'scream a message in different colors to all players', 'syntax': '^7Usage: ^2!scream ^7<text>', 'level': 60},
'slap': {'desc': 'slap a player (a number of times)', 'syntax': '^7Usage: ^2!slap ^7<name> [<amount>]', 'level': 60},
'status': {'desc': 'report the status of the bot', 'syntax': '^7Usage: ^2!status', 'level': 60},
'swap': {'desc': 'swap teams for player A and B', 'syntax': '^7Usage: ^2!swap ^7<name1> [<name2>]', 'level': 60},
'version': {'desc': 'display the version of the bot', 'syntax': '^7Usage: ^2!version', 'level': 60},
'veto': {'desc': 'stop voting process', 'syntax': '^7Usage: ^2!veto', 'level': 60},
# senioradmin commands, level 80
'addbots': {'desc': 'add up to 4 bots to the game', 'syntax': '^7Usage: ^2!addbots', 'level': 80},
'banall': {'desc': 'ban all players matching pattern', 'syntax': '^7Usage: ^2!banall ^7<pattern> [<reason>]', 'level': 80, 'short': 'ball'},
'banlist': {'desc': 'display the last active 10 bans', 'syntax': '^7Usage: ^2!banlist', 'level': 80},
'bots': {'desc': 'enables or disables bot support', 'syntax': '^7Usage: ^2!bots ^7<on/off>', 'level': 80},
'cyclemap': {'desc': 'cycle to the next map', 'syntax': '^7Usage: ^2!cyclemap', 'level': 80},
'exec': {'desc': 'execute given config file', 'syntax': '^7Usage: ^2!exec ^7<filename>', 'level': 80},
'gear': {'desc': 'set allowed weapons', 'syntax': '^7Usage: ^2!gear ^7<default/all/knife/pistol/shotgun/sniper/magnum/mac>', 'level': 80},
'instagib': {'desc': 'set Instagib mode', 'syntax': '^7Usage: ^2!instagib ^7<on/off>', 'level': 80},
'kickall': {'desc': 'kick all players matching pattern', 'syntax': '^7Usage: ^2!kickall ^7<pattern> [<reason>]', 'level': 80, 'short': 'kall'},
'kill': {'desc': 'kill a player', 'syntax': '^7Usage: ^2!kill ^7<name>', 'level': 80},
'clear': {'desc': 'clear all player warnings', 'syntax': '^7Usage: ^2!clear', 'level': 80, 'short': 'kiss'},
'lastadmin': {'desc': 'display the last disconnected admin', 'syntax': '^7Usage: ^2!lastadmin', 'level': 80},
'lastbans': {'desc': 'list the last 4 bans', 'syntax': '^7Usage: ^2!lastbans', 'level': 80, 'short': 'bans'},
'lookup': {'desc': 'search for a player in the database', 'syntax': '^7Usage: ^2!lookup ^7<name>', 'level': 80, 'short': 'l'},
'makereg': {'desc': 'make a player a regular (Level 2) user', 'syntax': '^7Usage: ^2!makereg ^7<name>', 'level': 80, 'short': 'mr'},
'map': {'desc': 'load given map', 'syntax': '^7Usage: ^2!map ^7<ut4_name>', 'level': 80},
'mapcycle': {'desc': 'list the map rotation', 'syntax': '^7Usage: ^2!mapcycle', 'level': 80},
'maps': {'desc': 'display all available maps', 'syntax': '^7Usage: ^2!maps', 'level': 80},
'maprestart': {'desc': 'restart the map', 'syntax': '^7Usage: ^2!maprestart', 'level': 80, 'short': 'restart'},
'moon': {'desc': 'activate low gravity mode (Moon mode)', 'syntax': '^7Usage: ^2!moon ^7<on/off>', 'level': 80, 'short': 'lowgravity'},
'permban': {'desc': 'ban a player permanent', 'syntax': '^7Usage: ^2!permban ^7<name> <reason>', 'level': 80, 'short': 'pb'},
'putgroup': {'desc': 'add a client to a group', 'syntax': '^7Usage: ^2!putgroup ^7<name> <group>', 'level': 80},
'rebuild': {'desc': 'sync up all available maps', 'syntax': '^7Usage: ^2!rebuild', 'level': 80},
'setgravity': {'desc': 'set the gravity (default: 800)', 'syntax': '^7Usage: ^2!setgravity ^7<value>', 'level': 80},
'setnextmap': {'desc': 'set the next map', 'syntax': '^7Usage: ^2!setnextmap ^7<ut4_name>', 'level': 80},
'swapteams': {'desc': 'swap the current teams', 'syntax': '^7Usage: ^2!swapteams', 'level': 80},
'unban': {'desc': 'unban a player from the database', 'syntax': '^7Usage: ^2!unban ^7<@ID>', 'level': 80},
'unreg': {'desc': 'remove a player from the regular group', 'syntax': '^7Usage: ^2!unreg ^7<name>', 'level': 80},
# superadmin commands, level 90
'bomb': {'desc': 'change gametype to Bomb', 'syntax': '^7Usage: ^2!bomb', 'level': 90},
'ctf': {'desc': 'change gametype to Capture the Flag', 'syntax': '^7Usage: ^2!ctf', 'level': 90},
'ffa': {'desc': 'change gametype to Free For All', 'syntax': '^7Usage: ^2!ffa', 'level': 90},
'gungame': {'desc': 'change gametype to Gun Game', 'syntax': '^7Usage: ^2!gungame', 'level': 90},
'jump': {'desc': 'change gametype to Jump', 'syntax': '^7Usage: ^2!jump', 'level': 90},
'lms': {'desc': 'change gametype to Last Man Standing', 'syntax': '^7Usage: ^2!lms', 'level': 90},
'tdm': {'desc': 'change gametype to Team Deathmatch', 'syntax': '^7Usage: ^2!tdm', 'level': 90},
'ts': {'desc': 'change gametype to Team Survivor', 'syntax': '^7Usage: ^2!ts', 'level': 90},
'ungroup': {'desc': 'remove admin level from a player', 'syntax': '^7Usage: ^2!ungroup ^7<name>', 'level': 90},
'password': {'desc': 'set private server password', 'syntax': '^7Usage: ^2!password ^7[<password>]', 'level': 90},
'reload': {'desc': 'reload map', 'syntax': '^7Usage: ^2!reload', 'level': 90}}
REASONS = {'obj': 'go for objective',
'camp': 'stop camping',
'spam': 'do not spam, shut-up!',
'lang': 'bad language',
'racism': 'racism is not tolerated',
'ping': 'fix your ping',
'afk': 'away from keyboard',
'tk': 'stop team killing',
'sk': 'stop spawn killing',
'spec': 'spectator too long on full server',
'score': 'score too low for this server',
'ci': 'connection interrupted',
'999': 'connection interrupted',
'whiner': 'stop complaining all the time',
'skill': 'skill too low for this server',
'name': 'do not use offensive names',
'wh': 'wallhack',
'insult': 'stop insulting',
'autojoin': 'use auto-join',
'abuse': 'stop abusing others',
'teams': 'keep the teams even'}
### CLASS Log Parser ###
class LogParser(object):
"""
log file parser
"""
def __init__(self, config_file):
"""
create a new instance of LogParser
@param config_file: The full path of the bot configuration file
@type config_file: String
"""
# hit zone support for UrT > 4.2.013
self.hit_points = {0: "HEAD", 1: "HEAD", 2: "HELMET", 3: "TORSO", 4: "VEST", 5: "LEFT_ARM", 6: "RIGHT_ARM",
7: "GROIN", 8: "BUTT", 9: "LEFT_UPPER_LEG", 10: "RIGHT_UPPER_LEG", 11: "LEFT_LOWER_LEG",
12: "RIGHT_LOWER_LEG", 13: "LEFT_FOOT", 14: "RIGHT_FOOT"}
self.hit_item = {1: "UT_MOD_KNIFE", 2: "UT_MOD_BERETTA", 3: "UT_MOD_DEAGLE", 4: "UT_MOD_SPAS", 5: "UT_MOD_MP5K",
6: "UT_MOD_UMP45", 8: "UT_MOD_LR300", 9: "UT_MOD_G36", 10: "UT_MOD_PSG1", 14: "UT_MOD_SR8",
15: "UT_MOD_AK103", 17: "UT_MOD_NEGEV", 19: "UT_MOD_M4", 20: "UT_MOD_GLOCK", 21: "UT_MOD_COLT1911",
22: "UT_MOD_MAC11", 23: "UT_MOD_BLED"}
self.death_cause = {1: "MOD_WATER", 3: "MOD_LAVA", 5: "UT_MOD_TELEFRAG", 6: "MOD_FALLING", 7: "UT_MOD_SUICIDE",
9: "MOD_TRIGGER_HURT", 10: "MOD_CHANGE_TEAM", 12: "UT_MOD_KNIFE", 13: "UT_MOD_KNIFE_THROWN",
14: "UT_MOD_BERETTA", 15: "UT_MOD_DEAGLE", 16: "UT_MOD_SPAS", 17: "UT_MOD_UMP45", 18: "UT_MOD_MP5K",
19: "UT_MOD_LR300", 20: "UT_MOD_G36", 21: "UT_MOD_PSG1", 22: "UT_MOD_HK69", 23: "UT_MOD_BLED",
24: "UT_MOD_KICKED", 25: "UT_MOD_HEGRENADE", 28: "UT_MOD_SR8", 30: "UT_MOD_AK103",
31: "UT_MOD_SPLODED", 32: "UT_MOD_SLAPPED", 33: "UT_MOD_SMITED", 34: "UT_MOD_BOMBED",
35: "UT_MOD_NUKED", 36: "UT_MOD_NEGEV", 37: "UT_MOD_HK69_HIT", 38: "UT_MOD_M4",
39: "UT_MOD_GLOCK", 40: "UT_MOD_COLT1911", 41: "UT_MOD_MAC11"}
# RCON commands for the different admin roles
self.user_cmds = []
self.mod_cmds = []
self.admin_cmds = []
self.fulladmin_cmds = []
self.senioradmin_cmds = []
self.superadmin_cmds = []
# dictionary of shortcut commands
self.shortcut_cmd = {}
for key, value in COMMANDS.iteritems():
if 'short' in value:
self.shortcut_cmd[value['short']] = key
if value['level'] == 20:
self.mod_cmds.append(key)
self.admin_cmds.append(key)
self.fulladmin_cmds.append(key)
self.senioradmin_cmds.append(key)
self.superadmin_cmds.append(key)
elif value['level'] == 40:
self.admin_cmds.append(key)
self.fulladmin_cmds.append(key)
self.senioradmin_cmds.append(key)
self.superadmin_cmds.append(key)
elif value['level'] == 60:
self.fulladmin_cmds.append(key)
self.senioradmin_cmds.append(key)
self.superadmin_cmds.append(key)
elif value['level'] == 80:
self.senioradmin_cmds.append(key)
self.superadmin_cmds.append(key)
elif value['level'] >= 90:
self.superadmin_cmds.append(key)
else:
self.user_cmds.append(key)
self.mod_cmds.append(key)
self.admin_cmds.append(key)
self.fulladmin_cmds.append(key)
self.senioradmin_cmds.append(key)
self.superadmin_cmds.append(key)
# alphabetic sort of the commands
self.user_cmds.sort()
self.mod_cmds.sort()
self.admin_cmds.sort()
self.fulladmin_cmds.sort()
self.senioradmin_cmds.sort()
self.superadmin_cmds.sort()
self.config_file = config_file
config = ConfigParser.ConfigParser()
config.read(config_file)
# enable/disable debug output
verbose = config.getboolean('bot', 'verbose') if config.has_option('bot', 'verbose') else False
# logging format
formatter = logging.Formatter('[%(asctime)s] %(levelname)-8s %(message)s', datefmt='%d.%m.%Y %H:%M:%S')
# console logging
console = logging.StreamHandler()
if not verbose:
console.setLevel(logging.INFO)
console.setFormatter(formatter)
# devel.log file
devel_log = logging.handlers.RotatingFileHandler(filename=os.path.join(HOME, 'devel.log'), maxBytes=2097152, backupCount=1, encoding='utf8')
devel_log.setLevel(logging.INFO)
devel_log.setFormatter(formatter)
# add logging handler
logger.addHandler(console)
logger.addHandler(devel_log)
logger.info("*** Spunky Bot v%s : www.spunkybot.de ***", __version__)
logger.info("Starting logging : OK")
logger.info("Loading config file : %s", config_file)
games_log = config.get('server', 'log_file')
self.ffa_lms_gametype = False
self.ctf_gametype = False
self.ts_gametype = False
self.tdm_gametype = False
self.bomb_gametype = False
self.freeze_gametype = False
self.ts_do_team_balance = False
self.allow_cmd_teams = True
self.urt_modversion = None
self.game = None
self.players_lock = RLock()
self.firstblood = False
self.firstnadekill = False
self.firstknifekill = False
self.firstteamkill = False
self.last_disconnected_player = None
self.last_admin = None
self.allow_nextmap_vote = True
self.failed_vote_timer = 0
self.last_vote = ''
self.default_gear = ''
# enable/disable autokick for team killing
self.tk_autokick = config.getboolean('bot', 'teamkill_autokick') if config.has_option('bot', 'teamkill_autokick') else True
self.allow_tk_bots = config.getboolean('bot', 'allow_teamkill_bots') if config.has_option('bot', 'allow_teamkill_bots') else False
# enable/disable autokick of players with low score
self.noob_autokick = config.getboolean('bot', 'noob_autokick') if config.has_option('bot', 'noob_autokick') else False
self.spawnkill_autokick = config.getboolean('bot', 'spawnkill_autokick') if config.has_option('bot', 'spawnkill_autokick') else False
self.kill_spawnkiller = config.getboolean('bot', 'instant_kill_spawnkiller') if config.has_option('bot', 'instant_kill_spawnkiller') else False
self.spawnkill_warn_time = config.getint('bot', 'spawnkill_warn_time') if config.has_option('bot', 'spawnkill_warn_time') else 3
# set the maximum allowed ping
self.max_ping = config.getint('bot', 'max_ping') if config.has_option('bot', 'max_ping') else 200
# kick spectator on full server
self.num_kick_specs = config.getint('bot', 'kick_spec_full_server') if config.has_option('bot', 'kick_spec_full_server') else 10
# set task frequency
self.task_frequency = config.getint('bot', 'task_frequency') if config.has_option('bot', 'task_frequency') else 60
self.warn_expiration = config.getint('bot', 'warn_expiration') if config.has_option('bot', 'warn_expiration') else 240
self.bad_words_autokick = config.getint('bot', 'bad_words_autokick') if config.has_option('bot', 'bad_words_autokick') else 0
# enable/disable message 'Player connected from...'
self.show_country_on_connect = config.getboolean('bot', 'show_country_on_connect') if config.has_option('bot', 'show_country_on_connect') else True
# enable/disable message 'Firstblood / first nade kill...'
self.show_first_kill_msg = config.getboolean('bot', 'show_first_kill') if config.has_option('bot', 'show_first_kill') else True
self.show_hit_stats_msg = config.getboolean('bot', 'show_hit_stats_respawn') if config.has_option('bot', 'show_hit_stats_respawn') else True
self.show_multikill_msg = config.getboolean('bot', 'show_multi_kill') if config.has_option('bot', 'show_multi_kill') else True
# set teams autobalancer
self.teams_autobalancer = config.getboolean('bot', 'autobalancer') if config.has_option('bot', 'autobalancer') else False
self.allow_cmd_teams_round_end = config.getboolean('bot', 'allow_teams_round_end') if config.has_option('bot', 'allow_teams_round_end') else False
self.limit_nextmap_votes = config.getboolean('bot', 'limit_nextmap_votes') if config.has_option('bot', 'limit_nextmap_votes') else False
self.vote_delay = config.getint('bot', 'vote_delay') if config.has_option('bot', 'vote_delay') else 0
self.spam_bomb_planted_msg = config.getboolean('bot', 'spam_bomb_planted') if config.has_option('bot', 'spam_bomb_planted') else False
self.kill_survived_opponents = config.getboolean('bot', 'kill_survived_opponents') if config.has_option('bot', 'kill_survived_opponents') else False
self.spam_knife_kills_msg = config.getboolean('bot', 'spam_knife_kills') if config.has_option('bot', 'spam_knife_kills') else False
self.spam_nade_kills_msg = config.getboolean('bot', 'spam_nade_kills') if config.has_option('bot', 'spam_nade_kills') else False
self.spam_headshot_hits_msg = config.getboolean('bot', 'spam_headshot_hits') if config.has_option('bot', 'spam_headshot_hits') else False
self.reset_headshot_hits_mapcycle = config.getboolean('bot', 'reset_headshot_hits_mapcycle') if config.has_option('bot', 'reset_headshot_hits_mapcycle') else True
self.reset_kill_spree_mapcycle = config.getboolean('bot', 'reset_kill_spree_mapcycle') if config.has_option('bot', 'reset_kill_spree_mapcycle') else True
ban_duration = config.getint('bot', 'ban_duration') if config.has_option('bot', 'ban_duration') else 7
self.ban_duration = ban_duration if ban_duration > 0 else 1
# support for low gravity server
self.support_lowgravity = config.getboolean('lowgrav', 'support_lowgravity') if config.has_option('lowgrav', 'support_lowgravity') else False
self.gravity = config.getint('lowgrav', 'gravity') if config.has_option('lowgrav', 'gravity') else 800
self.explode_time = "40"
# log that the configuration file has been loaded
logger.info("Configuration loaded : OK")
# enable/disable option to get Head Admin by checking existence of head admin in database
curs.execute("SELECT COUNT(*) FROM `xlrstats` WHERE `admin_role` = 100")
self.iamgod = True if int(curs.fetchone()[0]) < 1 else False
logger.info("Connecting to Database: OK")
logger.debug("Cmd !iamgod available : %s", self.iamgod)
self.uptime = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time()))
# Rotating Messages and Rules
if config.has_option('rules', 'show_rules') and config.getboolean('rules', 'show_rules'):
self.output_rules = config.get('rules', 'display') if config.has_option('rules', 'display') else "chat"
rules_frequency = config.getint('rules', 'rules_frequency') if config.has_option('rules', 'rules_frequency') else 90
self.rules_file = os.path.join(HOME, 'conf', 'rules.conf')
self.rules_frequency = rules_frequency if rules_frequency > 0 else 10
if os.path.isfile(self.rules_file):
self.thread_rotate()
logger.info("Load rotating messages: OK")
else:
logger.error("ERROR: Rotating messages will be ignored, file '%s' has not been found", self.rules_file)
# Parse Game log file
try:
# open game log file
self.log_file = open(games_log, 'r')
except IOError:
logger.error("ERROR: The Gamelog file '%s' has not been found", games_log)
logger.error("*** Aborting Spunky Bot ***")
else:
# go to the end of the file
self.log_file.seek(0, 2)
# start parsing the games logfile
logger.info("Parsing Gamelog file : %s", games_log)
self.read_log()
def thread_rotate(self):
"""
Thread process for starting method rotate_messages
"""
processor = Thread(target=self.rotating_messages)
processor.setDaemon(True)
processor.start()
def rotating_messages(self):
"""
display rotating messages and rules
"""
# initial wait
time.sleep(30)
while 1:
with open(self.rules_file, 'r') as filehandle:
rotation_msg = filehandle.readlines()
if not rotation_msg:
break
for line in rotation_msg:
# display rule
with self.players_lock:
if "@admins" in line:
self.game.rcon_say(self.get_admins_online())
elif "@admincount" in line:
self.game.rcon_say(self.get_admin_count())
elif "@nextmap" in line:
self.game.rcon_say(self.get_nextmap())
elif "@time" in line:
self.game.rcon_say("^7Time: %s" % time.strftime("%H:%M", time.localtime(time.time())))
elif "@bigtext" in line:
self.game.rcon_bigtext("^7%s" % line.split('@bigtext')[-1].strip())
else:
if self.output_rules == 'chat':
self.game.rcon_say("^2%s" % line.strip())
elif self.output_rules == 'bigtext':
self.game.rcon_bigtext("^2%s" % line.strip())
else:
self.game.send_rcon("^2%s" % line.strip())
# wait for given delay in the config file
time.sleep(self.rules_frequency)
def find_game_start(self):
"""
find InitGame start
"""
seek_amount = 768
# search within the specified range for the InitGame message
start_pos = self.log_file.tell() - seek_amount
end_pos = start_pos + seek_amount
try:
self.log_file.seek(start_pos)
except IOError:
logger.error("ERROR: The games.log file is empty, ignoring game type and start")
# go to the end of the file
self.log_file.seek(0, 2)
game_start = True
else:
game_start = False
while not game_start:
while self.log_file:
line = self.log_file.readline()
tmp = line.split()
if len(tmp) > 1 and tmp[1] == "InitGame:":
game_start = True
if 'g_modversion\\4.3' in line:
self.hit_item.update({23: "UT_MOD_FRF1", 24: "UT_MOD_BENELLI", 25: "UT_MOD_P90",
26: "UT_MOD_MAGNUM", 29: "UT_MOD_KICKED", 30: "UT_MOD_KNIFE_THROWN"})
self.death_cause.update({42: "UT_MOD_FRF1", 43: "UT_MOD_BENELLI", 44: "UT_MOD_P90", 45: "UT_MOD_MAGNUM",
46: "UT_MOD_TOD50", 47: "UT_MOD_FLAG", 48: "UT_MOD_GOOMBA"})
self.urt_modversion = 43
logger.info("Game modversion : 4.3")
elif 'g_modversion\\4.2' in line:
self.hit_item.update({23: "UT_MOD_BLED", 24: "UT_MOD_KICKED", 25: "UT_MOD_KNIFE_THROWN"})
self.death_cause.update({42: "UT_MOD_FLAG", 43: "UT_MOD_GOOMBA"})
self.urt_modversion = 42
logger.info("Game modversion : 4.2")
elif 'g_modversion\\4.1' in line:
# hit zone support for UrT 4.1
self.hit_points = {0: "HEAD", 1: "HELMET", 2: "TORSO", 3: "KEVLAR", 4: "ARMS", 5: "LEGS", 6: "BODY"}
self.hit_item.update({21: "UT_MOD_KICKED", 22: "UT_MOD_KNIFE_THROWN"})
self.death_cause.update({33: "UT_MOD_BOMBED", 34: "UT_MOD_NUKED", 35: "UT_MOD_NEGEV",
39: "UT_MOD_FLAG", 40: "UT_MOD_GOOMBA"})
self.urt_modversion = 41
logger.info("Game modversion : 4.1")
if 'g_gametype\\0\\' in line or 'g_gametype\\1\\' in line or 'g_gametype\\9\\' in line or 'g_gametype\\11\\' in line:
# disable teamkill event and some commands for FFA (0), LMS (1), Jump (9), Gun (11)
self.ffa_lms_gametype = True
elif 'g_gametype\\7\\' in line:
self.ctf_gametype = True
elif 'g_gametype\\4\\' in line or 'g_gametype\\5\\' in line:
self.ts_gametype = True
elif 'g_gametype\\3\\' in line:
self.tdm_gametype = True
elif 'g_gametype\\8\\' in line:
self.bomb_gametype = True
elif 'g_gametype\\10\\' in line:
self.freeze_gametype = True
# get default g_gear value
self.default_gear = line.split('g_gear\\')[-1].split('\\')[0] if 'g_gear\\' in line else "%s" % '' if self.urt_modversion > 41 else '0'
if self.log_file.tell() > end_pos:
break
elif not line:
break
if self.log_file.tell() < seek_amount:
self.log_file.seek(0, 0)
else:
cur_pos = start_pos - seek_amount
end_pos = start_pos
start_pos = cur_pos
if start_pos < 0:
start_pos = 0
self.log_file.seek(start_pos)
def read_log(self):
"""
read the logfile
"""
if self.task_frequency > 0:
# schedule the task
if self.task_frequency < 10:
# avoid flooding with too less delay
schedule.every(10).seconds.do(self.taskmanager)
else:
schedule.every(self.task_frequency).seconds.do(self.taskmanager)
# schedule the task
schedule.every(2).hours.do(self.remove_expired_db_entries)
self.find_game_start()
# create instance of Game
self.game = Game(self.config_file, self.urt_modversion)
self.log_file.seek(0, 2)
while self.log_file:
schedule.run_pending()
line = self.log_file.readline()
if line:
self.parse_line(line)
else:
if not self.game.live:
self.game.go_live()
time.sleep(.125)
def remove_expired_db_entries(self):
"""
delete expired ban points
"""
# remove expired ban_points
curs.execute("DELETE FROM `ban_points` WHERE `expires` < '{}'".format(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time()))))
conn.commit()
def taskmanager(self):
"""
- check warnings and kick players with too many warnings
- check for spectators and set warning
- check for players with low score and set warning
"""
try:
with self.players_lock:
# get number of connected players
counter = self.game.get_number_players()
# check amount of warnings and kick player if needed
for player in self.game.players.itervalues():
player_num = player.get_player_num()
if player_num == BOT_PLAYER_NUM:
continue
player_name = player.get_name()
player_admin_role = player.get_admin_role()
# clear expired warnings
if self.warn_expiration > 0 and player.get_warning() > 0 and player.get_last_warn_time() and player.get_last_warn_time() + self.warn_expiration < time.time():
player.clear_warning()
# kick player with 3 or more warnings, Admins will never get kicked
if player.get_warning() > 2 and player_admin_role < 40:
if 'spectator' in player.get_last_warn_msg():
kick_msg = reason = "spectator too long on full server"
elif 'ping' in player.get_last_warn_msg():
kick_msg = "ping too high for this server ^7[^4%s^7]" % player.get_ping_value()
reason = "fix your ping"
elif 'score' in player.get_last_warn_msg():
kick_msg = reason = "score too low for this server"
elif 'team killing' in player.get_last_warn_msg():
kick_msg = reason = "team killing over limit"
player.add_ban_point('auto-kick for team killing', 600)
else:
kick_msg = reason = "too many warnings"
self.game.rcon_say("^2%s ^7was kicked, %s" % (player_name, kick_msg))
self.game.kick_player(player_num, reason=reason)
continue
# check for spectators and set warning
if self.num_kick_specs > 0 and player_admin_role < 20:
# ignore player with name prefix GTV-
if 'GTV-' in player_name:
continue
# if player is spectator on full server, inform player and increase warn counter
# GTV or Moderator or higher levels will not get the warning
elif counter > self.num_kick_specs and player.get_team() == 3 and player.get_time_joined() < (time.time() - 30):
player.add_warning(warning='spectator too long on full server', timer=False)
logger.debug("%s is spectator too long on full server", player_name)
warnmsg = "^1WARNING ^7[^3%d^7]: You are spectator too long on full server" % player.get_warning()
self.game.rcon_tell(player_num, warnmsg, False)
# reset spec warning
else:
player.clear_specific_warning('spectator too long on full server')
# check for players with low score and set warning
if self.noob_autokick and player_admin_role < 2 and player.get_ip_address() != '0.0.0.0':
kills = player.get_kills()
deaths = player.get_deaths()
ratio = round(float(kills) / float(deaths), 2) if deaths > 0 else 1.0
# if player ratio is too low, inform player and increase warn counter
# Regulars or higher levels will not get the warning
if kills > 0 and ratio < 0.33:
player.add_warning(warning='score too low for this server', timer=False)
logger.debug("Score of %s is too low, ratio: %s", player_name, ratio)
warnmsg = "^1WARNING ^7[^3%d^7]: Your score is too low for this server" % player.get_warning()
self.game.rcon_tell(player_num, warnmsg, False)
else:
player.clear_specific_warning('score too low for this server')
# warn player with 3 warnings, Admins will never get the alert warning
if player.get_warning() == 3 and player_admin_role < 40:
self.game.rcon_say("^1ALERT: ^2%s ^7auto-kick from warnings if not cleared" % player_name)
# check for player with high ping
self.check_player_ping()
except Exception as err:
logger.error(err, exc_info=True)
def check_player_ping(self):
"""
check ping of all players and set warning for high ping user
"""
if self.max_ping > 0:
# rcon update status
self.game.quake.rcon_update()
for player in self.game.quake.players:
# if ping is too high, increase warn counter, Admins or higher levels will not get the warning
try:
ping_value = player.ping
gameplayer = self.game.players[player.num]
except KeyError:
continue
else:
if self.max_ping < ping_value < 999 and gameplayer.get_admin_role() < 40:
gameplayer.add_high_ping(ping_value)
self.game.rcon_tell(player.num, "^1WARNING ^7[^3%d^7]: Your ping is too high [^4%d^7]. ^3The maximum allowed ping is %d." % (gameplayer.get_warning(), ping_value, self.max_ping), False)
else:
gameplayer.clear_specific_warning('fix your ping')
def parse_line(self, string):
"""
parse the logfile and search for specific action
"""
line = string[7:]
tmp = line.split(":", 1)
line = tmp[1].strip() if len(tmp) > 1 else tmp[0].strip()
option = {'InitGame': self.new_game, 'Warmup': self.handle_warmup, 'InitRound': self.handle_initround,
'Exit': self.handle_exit, 'say': self.handle_say, 'sayteam': self.handle_say, 'saytell': self.handle_saytell,
'ClientUserinfo': self.handle_userinfo, 'ClientUserinfoChanged': self.handle_userinfo_changed,
'ClientBegin': self.handle_begin, 'ClientDisconnect': self.handle_disconnect,
'SurvivorWinner': self.handle_teams_ts_mode, 'Kill': self.handle_kill, 'Hit': self.handle_hit,
'Freeze': self.handle_freeze, 'ThawOutFinished': self.handle_thawout, 'ClientSpawn': self.handle_spawn,
'Flag': self.handle_flag, 'FlagCaptureTime': self.handle_flagcapturetime,
'VotePassed': self.handle_vote_passed, 'VoteFailed': self.handle_vote_failed, 'Callvote': self.handle_callvote}
try:
action = tmp[0].strip()
if action in option:
option[action](line)
elif 'Bomb' in action:
self.handle_bomb(line)
elif 'Pop' in action:
self.handle_bomb_exploded()
except (IndexError, KeyError):
pass
except Exception as err:
logger.error(err, exc_info=True)
def explode_line(self, line):
"""
explode line
"""
arr = line.lstrip().lstrip('\\').split('\\')
key = True
key_val = None
values = {}
for item in arr:
if key:
key_val = item
key = False
else:
values[key_val.rstrip()] = item.rstrip()
key_val = None
key = True
return values
def handle_vote_passed(self, line):
"""
handle vote passed
"""
# nextmap vote
if "g_nextmap" in line:
self.game.next_mapname = line.split("g_nextmap")[-1].strip('"').strip()
self.game.rcon_say("^7Vote to set next map to '%s' ^2passed" % self.game.next_mapname)
self.allow_nextmap_vote = False
# cyclemap vote
elif "cyclemap" in line:
self.game.rcon_say("^7Vote to cycle map ^2passed")
# kick vote
elif "clientkickreason" in line:
self.game.rcon_say("^7Vote to kick %s ^2passed" % self.game.players[int(line.split('"clientkickreason "')[-1].strip('"'))].get_name())
def handle_vote_failed(self, line):
"""
handle vote failed
"""
# nextmap vote
if "g_nextmap" in line:
self.game.rcon_say("^7Vote to set next map to '%s' ^1failed" % line.split("g_nextmap")[-1].strip('"').strip())
if self.vote_delay:
self.failed_vote_timer = time.time() + self.vote_delay
# cyclemap vote
elif "cyclemap" in line:
self.game.rcon_say("^7Vote to cycle map ^1failed")
# kick vote
elif "clientkickreason" in line:
self.game.rcon_say("^7Vote to kick %s ^1failed" % self.game.players[int(line.split('"clientkickreason "')[-1].strip('"'))].get_name())
def handle_callvote(self, line):
"""
handle callvote
"""
if "g_nextmap" in line:
self.last_vote = "nextmap"
elif "cyclemap" in line:
self.last_vote = "cyclemap"
elif "clientkickreason" in line:
self.last_vote = "kick"
spam_msg = True
now = time.time()
if "g_nextmap" in line:
if self.limit_nextmap_votes and not self.allow_nextmap_vote:
self.game.send_rcon('veto')
self.game.rcon_say("^7Voting for Next Map is disabled until the end of this map")
spam_msg = False
if "map" in line and self.failed_vote_timer > now:
remaining_time = int(self.failed_vote_timer - now)
self.game.send_rcon('veto')
self.game.rcon_say("^7Map voting is disabled for ^2%d ^7seconds" % remaining_time)
if spam_msg:
self.game.rcon_bigtext("^7Press ^2F1 ^7or ^1F2 ^7to vote!")
if self.game.get_last_maps() and ('"g_nextmap' in line or '"map' in line):
self.game.rcon_say("^7Last Maps: ^3%s" % ", ".join(self.game.get_last_maps()))
def new_game(self, line):
"""
set-up a new game
"""
self.ffa_lms_gametype = True if ('g_gametype\\0\\' in line or 'g_gametype\\1\\' in line or 'g_gametype\\9\\' in line or 'g_gametype\\11\\' in line) else False
self.ctf_gametype = True if 'g_gametype\\7\\' in line else False
self.ts_gametype = True if ('g_gametype\\4\\' in line or 'g_gametype\\5\\' in line) else False
self.tdm_gametype = True if 'g_gametype\\3\\' in line else False
self.bomb_gametype = True if 'g_gametype\\8\\' in line else False
self.freeze_gametype = True if 'g_gametype\\10\\' in line else False
logger.debug("InitGame: Starting game...")
self.game.rcon_clear()
# reset the player stats
self.stats_reset()
# set the current map
self.game.set_current_map()
# load all available maps
self.game.set_all_maps()
# support for low gravity server
if self.support_lowgravity:
self.game.send_rcon("set g_gravity %d" % self.gravity)
# detonation timer
if self.bomb_gametype:
# bomb detonation timer
detonation_timer = self.game.get_cvar('g_bombexplodetime')
self.explode_time = detonation_timer if detonation_timer else "40"
# reset list of player who left server
self.last_disconnected_player = None
# allow nextmap votes
self.allow_nextmap_vote = True
self.failed_vote_timer = 0
def handle_spawn(self, line):
"""
handle client spawn
"""
player_num = int(line)
with self.players_lock:
self.game.players[player_num].set_alive(True)
def handle_flagcapturetime(self, line):
"""
handle flag capture time
"""
tmp = line.split(": ", 1)
player_num = int(tmp[0])
action = tmp[1]
if action.isdigit():
cap_time = round(float(action) / 1000, 2)
logger.debug("Player %d captured the flag in %s seconds", player_num, cap_time)
with self.players_lock:
self.game.players[player_num].set_flag_capture_time(cap_time)
def handle_warmup(self, line):
"""
handle warmup
"""
logger.debug("Warmup... %s", line)
self.allow_cmd_teams = True
def handle_initround(self, _):
"""
handle Init Round
"""
logger.debug("InitRound: Round started...")
if self.ctf_gametype:
with self.players_lock:
for player in self.game.players.itervalues():
player.reset_flag_stats()
elif self.ts_gametype or self.bomb_gametype or self.freeze_gametype:
if self.allow_cmd_teams_round_end:
self.allow_cmd_teams = False
def handle_exit(self, line):
"""
handle Exit of a match, show Awards, store user score in database and reset statistics
"""
logger.debug("Exit: %s", line)
self.handle_awards()
self.allow_cmd_teams = True
self.stats_reset(store_score=True)
def stats_reset(self, store_score=False):
"""
store user score in database if needed and reset the player statistics
"""
with self.players_lock:
for player in self.game.players.itervalues():
if store_score:
# store score in database
player.save_info()
# reset player statistics
player.reset(self.reset_headshot_hits_mapcycle, self.reset_kill_spree_mapcycle)
# reset team lock
player.set_team_lock(None)
# set first kill trigger
if self.show_first_kill_msg and not self.ffa_lms_gametype:
self.firstblood = True
self.firstnadekill = True
self.firstknifekill = True
self.firstteamkill = True
else:
self.firstblood = False
self.firstnadekill = False
self.firstknifekill = False
self.firstteamkill = False
def handle_userinfo(self, line):
"""
handle player user information, auto-kick known cheater ports or guids
"""
with self.players_lock:
player_num = int(line[:2].strip())
line = line[2:].lstrip("\\").lstrip()
values = self.explode_line(line)
challenge = True if 'challenge' in values else False
name = values['name'] if 'name' in values else "UnnamedPlayer"
ip_port = values['ip'] if 'ip' in values else "0.0.0.0:0"
auth = values['authl'] if 'authl' in values else ""
if 'cl_guid' in values:
guid = values['cl_guid']
elif 'skill' in values:
# bot connecting
guid = "BOT%d" % player_num
else:
guid = "None"
self.kick_player_reason(reason="Player with invalid GUID kicked", player_num=player_num)
try:
ip_address = ip_port.split(":")[0].strip()
port = ip_port.split(":")[1].strip()
except IndexError:
ip_address = ip_port.strip()
port = "27960"
# convert loopback/localhost address
if ip_address in ('loopback', 'localhost'):
ip_address = '127.0.0.1'
if player_num not in self.game.players:
player = Player(player_num, ip_address, guid, name, auth)
self.game.add_player(player)
# kick banned player
if player.get_ban_id():
self.kick_player_reason("^7%s ^1banned ^7(ID @%s): %s" % (player.get_name(), player.get_ban_id(), player.get_ban_msg()), player_num)
else:
if self.show_country_on_connect and player.get_country():
self.game.rcon_say("^7%s ^7connected from %s" % (player.get_name(), player.get_country()))
if self.game.players[player_num].get_guid() != guid:
self.game.players[player_num].set_guid(guid)
if self.game.players[player_num].get_authname() != auth:
self.game.players[player_num].set_authname(auth)
# kick player with hax guid 'kemfew'
if "KEMFEW" in guid.upper():
self.kick_player_reason("Cheater GUID detected for %s -> Player kicked" % name, player_num)
if "WORLD" in guid.upper() or "UNKNOWN" in guid.upper():
self.kick_player_reason("Invalid GUID detected for %s -> Player kicked" % name, player_num)
if challenge:
logger.debug("ClientUserinfo: Player %d %s is challenging the server and has the guid %s", player_num, self.game.players[player_num].get_name(), guid)
# kick player with hax port 1337
invalid_port_range = ["1337"]
if port in invalid_port_range:
self.kick_player_reason("Cheater Port detected for %s -> Player kicked" % name, player_num)
if self.last_disconnected_player and self.last_disconnected_player.get_guid() == self.game.players[player_num].get_guid():
self.last_disconnected_player = None
def kick_player_reason(self, reason, player_num):
"""
kick player for specific reason
"""
if self.urt_modversion > 41:
self.game.send_rcon('kick %d "%s"' % (player_num, reason))
else:
self.game.send_rcon("kick %d" % player_num)
self.game.send_rcon(reason)
def handle_userinfo_changed(self, line):
"""
handle player changes
"""
with self.players_lock:
player_num = int(line[:2].strip())
player = self.game.players[player_num]
line = line[2:].lstrip("\\")
try:
values = self.explode_line(line)
team_num = int(values['t'])
player.set_team(team_num)
name = values['n']
except KeyError:
team_num = 3
player.set_team(team_num)
name = self.game.players[player_num].get_name()
# set new name, if player changed name
if self.game.players[player_num].get_name() != name:
self.game.players[player_num].set_name(name)
# move locked player to the defined team, if player tries to change teams
team_lock = self.game.players[player_num].get_team_lock()
if team_lock and Player.teams[team_num] != team_lock:
self.game.rcon_forceteam(player_num, team_lock)
self.game.rcon_tell(player_num, "^3You are forced to: ^7%s" % team_lock)
logger.debug("ClientUserinfoChanged: Player %d %s joined team %s", player_num, name, Player.teams[team_num])
def handle_begin(self, line):
"""
handle player entering game
"""
with self.players_lock:
player_num = int(line)
player = self.game.players[player_num]
player_name = player.get_name()
player_auth = player.get_authname()
player_name = "%s [^5%s^7]" % (player_name, player_auth) if player_auth else player_name
player_id = player.get_player_id()
# Welcome message for registered players
if player.get_registered_user() and player.get_welcome_msg():
self.game.rcon_say("^3Everyone welcome back ^7%s^3, player number ^7#%s^3, to this server" % (player_name, player_id))
self.game.rcon_tell(player_num, "^7[^2Authed^7] Welcome back %s, you are ^2%s^7, last visit %s, you played %s times" % (player_name, player.roles[player.get_admin_role()], player.get_last_visit(), player.get_num_played()), False)
# disable welcome message for next rounds
player.disable_welcome_msg()
elif not player.get_registered_user() and not player.get_first_time() and player.get_welcome_msg():
self.game.rcon_tell(player_num, "^7Welcome back %s, you are player ^3#%s^7. ^3Type ^2!register ^3in chat to register and save your stats" % (player_name, player_id))
player.disable_welcome_msg()
elif player.get_first_time() and player.get_welcome_msg():
self.game.rcon_tell(player_num, "^7Welcome %s, this must be your first visit, you are player ^3#%s^7. ^3Type ^2!help ^3in chat for help" % (player_name, player_id))
player.disable_welcome_msg()
logger.debug("ClientBegin: Player %d %s has entered the game", player_num, player_name)
def handle_disconnect(self, line):
"""
handle player disconnect
"""
with self.players_lock:
player_num = int(line)
player = self.game.players[player_num]
player_name = player.get_name()
player.save_info()
player.reset()
self.last_disconnected_player = player
if player.get_admin_role() >= 40:
self.last_admin = player
del self.game.players[player_num]
for player in self.game.players.itervalues():
player.clear_tk(player_num)
player.clear_grudged_player(player_num)
logger.debug("ClientDisconnect: Player %d %s has left the game", player_num, player_name)
def handle_hit(self, line):
"""
handle all kind of hits
"""
with self.players_lock:
info = line.split(":", 1)[0].split()
hitter_id = int(info[1])
victim_id = int(info[0])
hitter = self.game.players[hitter_id]
hitter_name = hitter.get_name()
hitpoint = int(info[2])
hit_item = int(info[3])
# increase summary of all hits
hitter.set_all_hits()
zones = {'TORSO': 'body', 'VEST': 'body', 'KEVLAR': 'body', 'BUTT': 'body', 'GROIN': 'body',
'LEGS': 'legs', 'LEFT_UPPER_LEG': 'legs', 'RIGHT_UPPER_LEG': 'legs',
'LEFT_LOWER_LEG': 'legs', 'RIGHT_LOWER_LEG': 'legs', 'LEFT_FOOT': 'legs', 'RIGHT_FOOT': 'legs',
'ARMS': 'arms', 'LEFT_ARM': 'arms', 'RIGHT_ARM': 'arms'}
if hitpoint in self.hit_points:
if self.hit_points[hitpoint] in ('HEAD', 'HELMET'):
hitter.headshot()
hitter_hs_count = hitter.get_headshots()
hs_msg = {5: 'watch out!',
10: 'awesome!',
15: 'unbelievable!',
20: '^1MANIAC!',
25: '^2AIMBOT?',
30: 'stop that',
35: 'stop that ^5NOW',
40: '^6OMG ^7stop it',
45: 'no mercy',
50: '^2HEAD RIPPER'}
if self.spam_headshot_hits_msg and hitter_hs_count in hs_msg:
self.game.rcon_bigtext("^3%s: ^2%d ^7HeadShots, %s" % (hitter_name, hitter_hs_count, hs_msg[hitter_hs_count]))
hs_plural = "headshots" if hitter_hs_count > 1 else "headshot"
percentage = int(round(float(hitter_hs_count) / float(hitter.get_all_hits()), 2) * 100)
self.game.send_rcon("^7%s has ^2%d ^7%s (%d percent)" % (hitter_name, hitter_hs_count, hs_plural, percentage))
elif self.hit_points[hitpoint] in zones:
hitter.set_hitzones(zones[self.hit_points[hitpoint]])
logger.debug("Player %d %s hit %d %s in the %s with %s", hitter_id, hitter_name, victim_id, self.game.players[victim_id].get_name(), self.hit_points[hitpoint], self.hit_item[hit_item])
def handle_kill(self, line):
"""
handle kills
"""
with self.players_lock:
parts = line.split(":", 1)
info = parts[0].split()
k_name = parts[1].split()[0]
killer_id = int(info[0])
victim_id = int(info[1])
death_cause = self.death_cause[int(info[2])]
victim = self.game.players[victim_id]
victim.set_alive(False)
if k_name == "<non-client>":
# killed by World
killer_id = BOT_PLAYER_NUM
killer = self.game.players[killer_id]
killer_name = killer.get_name()
victim_name = victim.get_name()
tk_event = False
# teamkill event - disabled for FFA, LMS, Jump, for all other game modes team kills are counted and punished
if not self.ffa_lms_gametype:
if victim.get_team() == killer.get_team() and victim.get_team() != 3 and victim_id != killer_id and death_cause != "UT_MOD_BOMBED":
tk_event = True
# first teamkill message
if self.firstteamkill:
self.game.rcon_bigtext("^1First Team Kill: ^7%s killed by ^1%s" % (victim_name, killer_name))
self.firstteamkill = False
# increase team kill counter for killer and kick for too many team kills
killer.team_kill()
# increase team death counter for victim
victim.team_death()
# check if bot or human player got killed
human = False if self.allow_tk_bots and victim.get_ip_address() == '0.0.0.0' else True
# Regular and higher will not get punished
if killer.get_admin_role() < 2 and self.tk_autokick and killer.get_ip_address() != '0.0.0.0' and human:
# list of players of TK victim
killer.add_tk_victims(victim_id)
# list of players who killed victim
if killer_id not in victim.get_grudged_player():
victim.add_killed_me(killer_id)
self.game.rcon_tell(victim_id, "^7Type ^3!fp ^7to forgive ^3%s" % killer_name)
self.game.rcon_tell(killer_id, "^7Do not attack teammates, you ^1killed ^7%s" % victim_name)
if len(killer.get_tk_victim_names()) > 3:
killer.ban(duration=600, reason='team killing over limit', admin='bot')
self.game.rcon_say("^3%s ^7banned for ^110 minutes ^7for team killing over limit" % killer_name)
self.game.kick_player(killer_id, reason='team killing over limit')
else:
killer.add_warning('stop team killing')
self.game.rcon_tell(killer_id, "^1WARNING ^7[^3%d^7]: ^7For team killing you will get kicked" % killer.get_warning(), False)
if killer.get_warning() == 3 and killer.get_admin_role() < 40:
self.game.rcon_say("^1ALERT: ^2%s ^7auto-kick from warnings if not forgiven. Type ^3!forgive %s ^7to forgive" % (killer_name, killer_id))
suicide_reason = ['UT_MOD_SUICIDE', 'MOD_FALLING', 'MOD_WATER', 'MOD_LAVA', 'MOD_TRIGGER_HURT',
'UT_MOD_SPLODED', 'UT_MOD_SLAPPED', 'UT_MOD_SMITED']
suicide_weapon = ['UT_MOD_HEGRENADE', 'UT_MOD_HK69', 'UT_MOD_NUKED', 'UT_MOD_BOMBED']
# suicide counter
if death_cause in suicide_reason or (killer_id == victim_id and death_cause in suicide_weapon):
victim.suicide()
victim.die()
logger.debug("Player %d %s committed suicide with %s", victim_id, victim_name, death_cause)
# kill counter
elif not tk_event and int(info[2]) != 10: # 10: MOD_CHANGE_TEAM
if killer.get_losing_streak() >= 5:
self.game.rcon_say("^7You are back in business ^7%s" % killer_name)
killer.kill()
# spawn killing - warn/kick or instant kill
if (self.spawnkill_autokick or self.kill_spawnkiller) and self.spawnkill_warn_time and killer.get_admin_role() < 40:
# Spawn Protection time between players deaths in seconds to issue a warning
if victim.get_respawn_time() + self.spawnkill_warn_time > time.time():
if killer.get_ip_address() != '0.0.0.0':
if self.kill_spawnkiller and self.urt_modversion > 41:
self.game.send_rcon("smite %d" % killer_id)
self.game.rcon_say("^7%s killed for Spawn Killing" % killer_name)
if self.spawnkill_autokick:
killer.add_warning("stop spawn killing")
self.kick_high_warns(killer, 'stop spawn killing', 'Spawn Camping and Spawn Killing are not allowed')
else:
self.game.send_rcon("smite %d" % killer_id)
# multi kill message
if self.show_multikill_msg:
if killer.get_monsterkill() == 2:
self.game.rcon_say("^7%s: ^2Double Kill!" % killer_name)
elif killer.get_monsterkill() == 3:
self.game.rcon_say("^7%s: ^1Multi Kill!" % killer_name)
elif killer.get_monsterkill() == 4:
self.game.rcon_say("^7%s: ^1MONSTER KILL!!" % killer_name)
# first kill message
if self.firstblood:
self.game.rcon_bigtext("^1FIRST BLOOD: ^7%s killed by ^1%s" % (victim_name, killer_name))
self.firstblood = False
if death_cause == 'UT_MOD_HEGRENADE':
self.firstnadekill = False
if death_cause == 'UT_MOD_KNIFE' or death_cause == 'UT_MOD_KNIFE_THROWN':
self.firstknifekill = False
elif self.firstnadekill and death_cause == 'UT_MOD_HEGRENADE':
self.game.rcon_bigtext("^3%s: ^7first HE grenade kill" % killer_name)
self.firstnadekill = False
elif self.firstknifekill and (death_cause in ('UT_MOD_KNIFE', 'UT_MOD_KNIFE_THROWN')):
self.game.rcon_bigtext("^3%s: ^7first knife kill" % killer_name)
self.firstknifekill = False
# bomb mode
if self.bomb_gametype:
# bomb carrier killed
if victim.get_bombholder():
killer.kill_bomb_carrier()
# killed with bomb
if death_cause == 'UT_MOD_BOMBED':
killer.kills_with_bomb()
event_series_msg = {5: 'go on!',
10: 'beware!',
15: 'eat that!',
20: 'got pwned!',
25: 'impressive!',
30: 'dominating!'}
# HE grenade kill
if death_cause == 'UT_MOD_HEGRENADE':
killer.set_he_kill()
he_kill_count = killer.get_he_kills()
if self.spam_nade_kills_msg and he_kill_count in event_series_msg:
self.game.rcon_bigtext("^3%s: ^2%d ^7HE grenade kills, %s" % (killer_name, he_kill_count, event_series_msg[he_kill_count]))
# Knife kill
if "UT_MOD_KNIFE" in death_cause or "UT_MOD_KNIFE_THROWN" in death_cause:
killer.set_knife_kill()
knife_kill_count = killer.get_knife_kills()
if self.spam_knife_kills_msg and knife_kill_count in event_series_msg:
self.game.rcon_bigtext("^3%s: ^2%d ^7knife kills, %s" % (killer_name, knife_kill_count, event_series_msg[knife_kill_count]))
# killing spree counter
killer_color = "^1" if (killer.get_team() == 1) else "^4"
killer_killing_streak = killer.get_killing_streak()
kill_streak_msg = {5: "is on a killing spree (^15 ^7kills in a row)",
10: "is on a rampage (^110 ^7kills in a row)",
15: "is unstoppable (^115 ^7kills in a row)",
20: "is godlike (^120 ^7kills in a row)"}
if killer_killing_streak in kill_streak_msg and killer_id != BOT_PLAYER_NUM:
self.game.rcon_say("%s%s ^7%s" % (killer_color, killer_name, kill_streak_msg[killer_killing_streak]))
victim_color = "^1" if (victim.get_team() == 1) else "^4"
if victim.get_killing_streak() >= 20 and killer_name != victim_name and killer_id != BOT_PLAYER_NUM:
self.game.rcon_say("%s%s's ^7godlike (^1%s ^7kills) was ended by %s%s!" % (victim_color, victim_name, victim.get_killing_streak(), killer_color, killer_name))
elif victim.get_killing_streak() >= 15 and killer_name != victim_name and killer_id != BOT_PLAYER_NUM:
self.game.rcon_say("%s%s's ^7unstoppable (^1%s ^7kills) was ended by %s%s!" % (victim_color, victim_name, victim.get_killing_streak(), killer_color, killer_name))
elif victim.get_killing_streak() >= 10 and killer_name != victim_name and killer_id != BOT_PLAYER_NUM:
self.game.rcon_say("%s%s's ^7rampage (^1%s ^7kills) was ended by %s%s!" % (victim_color, victim_name, victim.get_killing_streak(), killer_color, killer_name))
elif victim.get_killing_streak() >= 5 and killer_name != victim_name and killer_id != BOT_PLAYER_NUM:
self.game.rcon_say("%s%s's ^7killing spree (^1%s ^7kills) was ended by %s%s!" % (victim_color, victim_name, victim.get_killing_streak(), killer_color, killer_name))
# death counter
victim.die()
if victim.get_losing_streak() == 5:
self.game.rcon_say("^7Keep it up ^3%s^7, it will come eventually" % victim_name)
if self.show_hit_stats_msg:
self.game.rcon_tell(victim_id, "^1HIT Stats: ^7HS: ^2%s ^7BODY: ^2%s ^7ARMS: ^2%s ^7LEGS: ^2%s ^7TOTAL: ^2%s" % (victim.get_headshots(), victim.get_hitzones('body'), victim.get_hitzones('arms'), victim.get_hitzones('legs'), victim.get_all_hits()))
logger.debug("Player %d %s killed %d %s with %s", killer_id, killer_name, victim_id, victim_name, death_cause)
def player_found(self, user):
"""
return True and instance of player or False and message text
"""
victim = None
name_list = []
append = name_list.append
for player in self.game.players.itervalues():
player_num = player.get_player_num()
if player_num == BOT_PLAYER_NUM:
continue
player_name = player.get_name()
player_authname = player.get_authname()
player_id = "@%d" % player.get_player_id()
if user.upper() == player_name.upper() or user == str(player_num) or user == player_id or user.lower() == player_authname:
victim = player
name_list = ["^3%s [^2%d^3]" % (player_name, player_num)]
break
elif user.upper() in player_name.upper():
victim = player
append("^3%s [^2%d^3]" % (player_name, player_num))
if not name_list:
if user.startswith('@'):
ret_val = self.offline_player(user)
else:
ret_val = False, None, "^3No players found matching %s" % user
elif len(name_list) > 1:
ret_val = False, None, "^7Players matching %s: ^3%s" % (user, ', '.join(name_list))
else:
ret_val = True, victim, "^7Found player matching %s: ^3%s" % (user, name_list[-1])
return ret_val
def offline_player(self, user_id):
"""
return True and instance of player or False and message text
"""
player_id = user_id.lstrip('@')
if player_id.isdigit() and int(player_id) > 1:
curs.execute("SELECT `guid`,`name`,`ip_address` FROM `player` WHERE `id` = {}".format(int(player_id)))
result = curs.fetchone()
if result:
victim = Player(player_num=1023, ip_address=str(result[2]), guid=str(result[0]), name=str(result[1]))
victim.define_offline_player(player_id=int(player_id))
ret_val = True, victim, None
else:
ret_val = False, None, "^3No Player found"
else:
ret_val = False, None, "^3No Player found"
return ret_val
def map_found(self, map_name):
"""
return True and map name or False and message text
"""
map_list = []
append = map_list.append
for maps in self.game.get_all_maps():
if map_name.lower() == maps or ('ut4_%s' % map_name.lower()) == maps:
append(maps)
break
elif map_name.lower() in maps:
append(maps)
if not map_list:
ret_val = False, None, "^3Map not found"
elif len(map_list) > 1:
ret_val = False, None, "^7Maps matching %s: ^3%s" % (map_name, ', '.join(map_list))
else:
ret_val = True, map_list[0], None
return ret_val
def handle_saytell(self, line):
"""
handle saytell commands
"""
tmp = line.strip()
try:
new = "%s%s" % (tmp[0], ''.join(tmp[1:]))
self.handle_say(new)
except IndexError:
pass
def clean_cmd_list(self, cmd_list):
"""
remove commands which are not available in current game type or modversion
"""
disabled_cmds = []
clean_list = list(cmd_list)
if self.ffa_lms_gametype or self.ts_gametype or self.tdm_gametype:
disabled_cmds = ['bombstats', 'ctfstats', 'freezestats']
elif self.bomb_gametype:
disabled_cmds = ['ctfstats', 'freezestats']
elif self.ctf_gametype:
disabled_cmds = ['bombstats', 'freezestats']
elif self.freeze_gametype:
disabled_cmds = ['bombstats', 'ctfstats']
if self.urt_modversion == 41:
disabled_cmds += ['kill', 'instagib']
elif self.urt_modversion == 42:
disabled_cmds += ['instagib']
for item in disabled_cmds:
try:
clean_list.remove(item)
except ValueError:
pass
return clean_list
def handle_say(self, line):
"""
handle say commands
"""
poke_options = ['Go', 'Wake up', '*poke*', 'Attention', 'Get up', 'Move out']
bad_words = ['fuck', 'ass', 'bastard', 'retard', 'slut', 'bitch', 'whore', 'cunt', 'pussy', 'dick', 'cock', 'sucker',
'fick', 'arsch', 'nutte', 'schlampe', 'hure', 'fotze', 'penis', 'wichser', 'nazi', 'hitler',
'putain', 'merde', 'chienne', 'batard', 'jihad', 'nigger',
'kurwa', 'suka', 'dupa', 'dupek', 'puta', 'maricon']
with self.players_lock:
line = line.strip()
try:
divider = line.split(": ", 1)
number = divider[0].split(" ", 1)[0]
cmd = divider[1].split()[0]
sar = {'player_num': int(number), 'command': cmd}
except IndexError:
sar = {'player_num': BOT_PLAYER_NUM, 'command': ''}
except ValueError:
sar = {'player_num': 0, 'command': str(line.split(": ", 1)).split(" ", 1)[0]}
if sar['command'] == '!mapstats':
self.game.rcon_tell(sar['player_num'], "^2%d ^7kills - ^2%d ^7deaths" % (self.game.players[sar['player_num']].get_kills(), self.game.players[sar['player_num']].get_deaths()))
self.game.rcon_tell(sar['player_num'], "^2%d ^7kills in a row - ^2%d ^7teamkills" % (self.game.players[sar['player_num']].get_killing_streak(), self.game.players[sar['player_num']].get_team_kill_count()))
self.game.rcon_tell(sar['player_num'], "^2%d ^7total hits - ^2%d ^7headshots" % (self.game.players[sar['player_num']].get_all_hits(), self.game.players[sar['player_num']].get_headshots()))
self.game.rcon_tell(sar['player_num'], "^2%d ^7HE grenade kills" % self.game.players[sar['player_num']].get_he_kills())
if self.ctf_gametype:
if self.urt_modversion > 41:
self.game.rcon_tell(sar['player_num'], "^7flags captured: ^2%d ^7- flags returned: ^2%d ^7- fastest cap: ^2%s ^7sec" % (self.game.players[sar['player_num']].get_flags_captured(), self.game.players[sar['player_num']].get_flags_returned(), self.game.players[sar['player_num']].get_flag_capture_time()))
else:
self.game.rcon_tell(sar['player_num'], "^7flags captured: ^2%d ^7- flags returned: ^2%d" % (self.game.players[sar['player_num']].get_flags_captured(), self.game.players[sar['player_num']].get_flags_returned()))
elif self.bomb_gametype:
self.game.rcon_tell(sar['player_num'], "^7planted: ^2%d ^7- defused: ^2%d" % (self.game.players[sar['player_num']].get_planted_bomb(), self.game.players[sar['player_num']].get_defused_bomb()))
self.game.rcon_tell(sar['player_num'], "^7bomb carrier killed: ^2%d ^7- enemies bombed: ^2%d" % (self.game.players[sar['player_num']].get_bomb_carrier_kills(), self.game.players[sar['player_num']].get_kills_with_bomb()))
elif self.freeze_gametype:
self.game.rcon_tell(sar['player_num'], "^7freeze: ^2%d ^7- thaw out: ^2%d" % (self.game.players[sar['player_num']].get_freeze(), self.game.players[sar['player_num']].get_thawout()))
elif sar['command'] in ('!help', '!h'):
if line.split(sar['command'])[1]:
cmd = line.split(sar['command'])[1].strip()
if cmd in COMMANDS:
if self.game.players[sar['player_num']].get_admin_role() >= COMMANDS[cmd]['level']:
self.game.rcon_tell(sar['player_num'], "%s ^3- %s" % (COMMANDS[cmd]['syntax'], COMMANDS[cmd]['desc']))
elif cmd in self.shortcut_cmd:
if self.game.players[sar['player_num']].get_admin_role() >= COMMANDS[self.shortcut_cmd[cmd]]['level']:
self.game.rcon_tell(sar['player_num'], "%s ^3- %s" % (COMMANDS[self.shortcut_cmd[cmd]]['syntax'], COMMANDS[self.shortcut_cmd[cmd]]['desc']))
else:
if cmd not in self.superadmin_cmds:
self.game.rcon_tell(sar['player_num'], "^7Unknown command ^3%s" % cmd)
else:
if self.game.players[sar['player_num']].get_admin_role() < 20:
self.game.rcon_tell(sar['player_num'], "^7Available commands: ^3%s" % ', ^3'.join(self.clean_cmd_list(self.user_cmds)))
# help for mods - additional commands
elif self.game.players[sar['player_num']].get_admin_role() == 20:
self.game.rcon_tell(sar['player_num'], "^7Moderator commands: ^3%s" % ', ^3'.join(self.clean_cmd_list(self.mod_cmds)))
# help for admins - additional commands
elif self.game.players[sar['player_num']].get_admin_role() == 40:
self.game.rcon_tell(sar['player_num'], "^7Admin commands: ^3%s" % ', ^3'.join(self.clean_cmd_list(self.admin_cmds)))
elif self.game.players[sar['player_num']].get_admin_role() == 60:
self.game.rcon_tell(sar['player_num'], "^7Full Admin commands: ^3%s" % ', ^3'.join(self.clean_cmd_list(self.fulladmin_cmds)))
elif self.game.players[sar['player_num']].get_admin_role() == 80:
self.game.rcon_tell(sar['player_num'], "^7Senior Admin commands: ^3%s" % ', ^3'.join(self.clean_cmd_list(self.senioradmin_cmds)))
elif self.game.players[sar['player_num']].get_admin_role() >= 90:
self.game.rcon_tell(sar['player_num'], "^7Super Admin commands: ^3%s" % ', ^3'.join(self.clean_cmd_list(self.superadmin_cmds)))
## player commands
# register - register yourself as a basic user
elif sar['command'] == '!register':
if not self.game.players[sar['player_num']].get_registered_user():
self.game.players[sar['player_num']].register_user_db(role=1)
self.game.rcon_tell(sar['player_num'], "^3%s ^7put in group User" % self.game.players[sar['player_num']].get_name())
else:
self.game.rcon_tell(sar['player_num'], "^3%s ^7is already in a higher level group" % self.game.players[sar['player_num']].get_name())
# regtest - display current user status
elif sar['command'] == '!regtest':
if self.game.players[sar['player_num']].get_registered_user():
self.game.rcon_tell(sar['player_num'], "^7%s [^3@%s^7] is registered since ^3%s" % (self.game.players[sar['player_num']].get_name(), self.game.players[sar['player_num']].get_player_id(), self.game.players[sar['player_num']].get_first_seen_date()))
else:
self.game.rcon_tell(sar['player_num'], "^7You are not a registered user.")
# hs - display headshot counter
elif sar['command'] == '!hs':
hs_count = self.game.players[sar['player_num']].get_headshots()
if hs_count > 0:
self.game.rcon_tell(sar['player_num'], "^7You made ^2%d ^7headshot%s" % (hs_count, 's' if hs_count > 1 else ''))
else:
self.game.rcon_tell(sar['player_num'], "^7You made no headshot")
# spree - display kill streak counter
elif sar['command'] == '!spree':
spree_count = self.game.players[sar['player_num']].get_killing_streak()
lose_count = self.game.players[sar['player_num']].get_losing_streak()
if spree_count > 0:
self.game.rcon_tell(sar['player_num'], "^7You have ^2%d ^7kill%s in a row" % (spree_count, 's' if spree_count > 1 else ''))
elif lose_count > 1:
self.game.rcon_tell(sar['player_num'], "^7You have a losing spree with ^1%d ^7deaths in a row" % lose_count)
else:
self.game.rcon_tell(sar['player_num'], "^7You are currently not having a killing spree")
# hestats - display HE grenade kill counter
elif sar['command'] == '!hestats':
he_kill_count = self.game.players[sar['player_num']].get_he_kills()
if he_kill_count > 0:
self.game.rcon_tell(sar['player_num'], "^7You made ^2%d ^7HE grenade kill%s" % (he_kill_count, 's' if he_kill_count > 1 else ''))
else:
self.game.rcon_tell(sar['player_num'], "^7You made no HE grenade kill")
# knife - display knife kill counter
elif sar['command'] == '!knife':
knife_kill_count = self.game.players[sar['player_num']].get_knife_kills()
if knife_kill_count > 0:
self.game.rcon_tell(sar['player_num'], "^7You made ^2%d ^7knife kill%s" % (knife_kill_count, 's' if knife_kill_count > 1 else ''))
else:
self.game.rcon_tell(sar['player_num'], "^7You made no knife kill")
# hits - display hit stats
elif sar['command'] == '!hits':
self.game.rcon_tell(sar['player_num'], "^1HIT Stats: ^7HS: ^2%s ^7BODY: ^2%s ^7ARMS: ^2%s ^7LEGS: ^2%s ^7TOTAL: ^2%s" % (self.game.players[sar['player_num']].get_headshots(), self.game.players[sar['player_num']].get_hitzones('body'), self.game.players[sar['player_num']].get_hitzones('arms'), self.game.players[sar['player_num']].get_hitzones('legs'), self.game.players[sar['player_num']].get_all_hits()))
# bombstats - display bomb statistics
elif sar['command'] == '!bombstats':
if self.bomb_gametype:
self.game.rcon_tell(sar['player_num'], "^7planted: ^2%d ^7- defused: ^2%d" % (self.game.players[sar['player_num']].get_planted_bomb(), self.game.players[sar['player_num']].get_defused_bomb()))
self.game.rcon_tell(sar['player_num'], "^7bomb carrier killed: ^2%d ^7- enemies bombed: ^2%d" % (self.game.players[sar['player_num']].get_bomb_carrier_kills(), self.game.players[sar['player_num']].get_kills_with_bomb()))
else:
self.game.rcon_tell(sar['player_num'], "^7You are not playing Bomb")
# ctfstats - display ctf statistics
elif sar['command'] == '!ctfstats':
if self.ctf_gametype:
if self.urt_modversion > 41:
self.game.rcon_tell(sar['player_num'], "^7flags captured: ^2%d ^7- flags returned: ^2%d ^7- fastest cap: ^2%s ^7sec" % (self.game.players[sar['player_num']].get_flags_captured(), self.game.players[sar['player_num']].get_flags_returned(), self.game.players[sar['player_num']].get_flag_capture_time()))
else:
self.game.rcon_tell(sar['player_num'], "^7flags captured: ^2%d ^7- flags returned: ^2%d" % (self.game.players[sar['player_num']].get_flags_captured(), self.game.players[sar['player_num']].get_flags_returned()))
else:
self.game.rcon_tell(sar['player_num'], "^7You are not playing Capture the Flag")
# freezestats - display freeze tag statistics
elif sar['command'] == '!freezestats':
if self.freeze_gametype:
self.game.rcon_tell(sar['player_num'], "^7freeze: ^2%d ^7- thaw out: ^2%d" % (self.game.players[sar['player_num']].get_freeze(), self.game.players[sar['player_num']].get_thawout()))
else:
self.game.rcon_tell(sar['player_num'], "^7You are not playing Freeze Tag")
# time - display the servers current time
elif sar['command'] in ('!time', '@time'):
msg = "^7%s" % time.strftime("%H:%M", time.localtime(time.time()))
self.tell_say_message(sar, msg)
# teams - balance teams
elif sar['command'] == '!teams' and self.game.players[sar['player_num']].get_admin_role() >= COMMANDS['teams']['level']:
if not self.ffa_lms_gametype:
self.handle_team_balance()
# stats - display current map stats
elif sar['command'] == '!stats':
if not self.freeze_gametype:
ratio = round(float(self.game.players[sar['player_num']].get_kills()) / float(self.game.players[sar['player_num']].get_deaths()), 2) if self.game.players[sar['player_num']].get_deaths() > 0 else 1.0
self.game.rcon_tell(sar['player_num'], "^7Map Stats %s: ^7K ^2%d ^7D ^3%d ^7TK ^1%d ^7Ratio ^5%s ^7HS ^2%d" % (self.game.players[sar['player_num']].get_name(), self.game.players[sar['player_num']].get_kills(), self.game.players[sar['player_num']].get_deaths(), self.game.players[sar['player_num']].get_team_kill_count(), ratio, self.game.players[sar['player_num']].get_headshots()))
else:
# Freeze Tag
self.game.rcon_tell(sar['player_num'], "^7Freeze Stats %s: ^7F ^2%d ^7T ^3%d ^7TK ^1%d ^7HS ^2%d" % (self.game.players[sar['player_num']].get_name(), self.game.players[sar['player_num']].get_freeze(), self.game.players[sar['player_num']].get_thawout(), self.game.players[sar['player_num']].get_team_kill_count(), self.game.players[sar['player_num']].get_headshots()))
# xlrstats - display full player stats
elif sar['command'] == '!xlrstats':
if line.split(sar['command'])[1]:
arg = line.split(sar['command'])[1].strip()
player_found = False
for player in self.game.players.itervalues():
if (arg.upper() in (player.get_name()).upper()) or (arg == str(player.get_player_num())) or (arg == ("@%s" % player.get_player_id())) or (arg.lower() == player.get_authname()):
player_found = True
if player.get_registered_user():
ratio = round(float(player.get_db_kills()) / float(player.get_db_deaths()), 2) if player.get_db_deaths() > 0 else 1.0
self.game.rcon_tell(sar['player_num'], "^7Stats %s: ^7K ^2%d ^7D ^3%d ^7TK ^1%d ^7Ratio ^5%s ^7HS ^2%d" % (player.get_name(), player.get_db_kills(), player.get_db_deaths(), player.get_db_tks(), ratio, player.get_db_headshots()))
else:
self.game.rcon_tell(sar['player_num'], "^7Sorry, this player is not registered")
break
if not player_found:
self.game.rcon_tell(sar['player_num'], "^7No player found matching ^3%s" % arg)
else:
if self.game.players[sar['player_num']].get_registered_user():
ratio = round(float(self.game.players[sar['player_num']].get_db_kills()) / float(self.game.players[sar['player_num']].get_db_deaths()), 2) if self.game.players[sar['player_num']].get_db_deaths() > 0 else 1.0
self.game.rcon_tell(sar['player_num'], "^7Stats %s: ^7K ^2%d ^7D ^3%d ^7TK ^1%d ^7Ratio ^5%s ^7HS ^2%d" % (self.game.players[sar['player_num']].get_name(), self.game.players[sar['player_num']].get_db_kills(), self.game.players[sar['player_num']].get_db_deaths(), self.game.players[sar['player_num']].get_db_tks(), ratio, self.game.players[sar['player_num']].get_db_headshots()))
else:
self.game.rcon_tell(sar['player_num'], "^7You need to ^2!register ^7first")
# xlrtopstats
elif (sar['command'] in ('!xlrtopstats', '!topstats')) and self.game.players[sar['player_num']].get_admin_role() >= COMMANDS['xlrtopstats']['level']:
curs.execute("SELECT name FROM `xlrstats` WHERE (`rounds` > 35 or `kills` > 500) and `last_played` > '{}' ORDER BY `ratio` DESC LIMIT 3".format(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime((time.time() - 10368000))))) # last played within the last 120 days
result = curs.fetchall()
toplist = ['^1#%s ^7%s' % (index + 1, result[index][0]) for index in xrange(len(result))]
msg = "^3Top players: %s" % str(", ".join(toplist)) if toplist else "^3Awards still available"
self.game.rcon_tell(sar['player_num'], msg)
# !forgive [<name>] - forgive a player for team killing
elif sar['command'] == '!forgive' or sar['command'] == '!f':
victim = self.game.players[sar['player_num']]
if victim.get_killed_me():
if line.split(sar['command'])[1]:
user = line.split(sar['command'])[1].strip()
found, forgive_player, _ = self.player_found(user)
if not found:
forgive_player_num = False
else:
forgive_player_num = forgive_player.get_player_num() if forgive_player.get_player_num() in victim.get_killed_me() else False
else:
forgive_player_num = victim.get_killed_me()[-1]
forgive_player = self.game.players[forgive_player_num]
if forgive_player_num is not False:
victim.clear_tk(forgive_player_num)
forgive_player.clear_killed_me(victim.get_player_num())
self.game.rcon_say("^7%s has forgiven %s's attack" % (victim.get_name(), forgive_player.get_name()))
else:
self.game.rcon_tell(sar['player_num'], "^7Whom to forgive? %s" % ", ".join(["^3%s [^2%s^3]" % (self.game.players[playernum].get_name(), playernum) for playernum in list(set(victim.get_killed_me()))]))
else:
self.game.rcon_tell(sar['player_num'], "^3No one to forgive")
# forgive last team kill
elif sar['command'] == '!forgiveprev' or sar['command'] == '!fp':
victim = self.game.players[sar['player_num']]
if victim.get_killed_me():
forgive_player_num = victim.get_killed_me()[-1]
forgive_player = self.game.players[forgive_player_num]
victim.clear_tk(forgive_player_num)
forgive_player.clear_killed_me(victim.get_player_num())
self.game.rcon_say("^7%s has forgiven %s's attack" % (victim.get_name(), forgive_player.get_name()))
else:
self.game.rcon_tell(sar['player_num'], "^3No one to forgive")
# !forgivelist - list all players who killed you
elif sar['command'] == '!forgivelist' or sar['command'] == '!fl':
victim = self.game.players[sar['player_num']]
if victim.get_killed_me():
self.game.rcon_tell(sar['player_num'], "^7Whom to forgive? %s" % ", ".join(["^3%s [^2%s^3]" % (self.game.players[playernum].get_name(), playernum) for playernum in list(set(victim.get_killed_me()))]))
else:
self.game.rcon_tell(sar['player_num'], "^3No one to forgive")
# forgive all team kills
elif sar['command'] == '!forgiveall' or sar['command'] == '!fa':
victim = self.game.players[sar['player_num']]
msg = []
append = msg.append
if victim.get_killed_me():
forgive_player_num_list = list(set(victim.get_killed_me()))
victim.clear_all_tk()
for forgive_player_num in forgive_player_num_list:
forgive_player = self.game.players[forgive_player_num]
forgive_player.clear_killed_me(victim.get_player_num())
append(forgive_player.get_name())
if msg:
self.game.rcon_say("^7%s has forgiven: ^3%s" % (victim.get_name(), ", ".join(msg)))
else:
self.game.rcon_tell(sar['player_num'], "^3No one to forgive")
# grudge - grudge a player for team killing (a grudged player will not be forgiven) - !grudge [<name>]
elif sar['command'] == '!grudge':
victim = self.game.players[sar['player_num']]
if victim.get_killed_me():
if line.split(sar['command'])[1]:
user = line.split(sar['command'])[1].strip()
found, grudge_player, _ = self.player_found(user)
if not found:
self.game.rcon_tell(sar['player_num'], "^7Whom to grudge? %s" % ", ".join(["^3%s [^2%s^3]" % (self.game.players[playernum].get_name(), playernum) for playernum in list(set(victim.get_killed_me()))]))
else:
victim.set_grudge(grudge_player.get_player_num())
self.game.rcon_say("^7%s has a grudge against ^1%s" % (victim.get_name(), grudge_player.get_name()))
else:
grudge_player = self.game.players[victim.get_killed_me()[-1]]
victim.set_grudge(grudge_player.get_player_num())
self.game.rcon_say("^7%s has a grudge against ^1%s" % (victim.get_name(), grudge_player.get_name()))
elif victim.get_grudged_player():
self.game.rcon_tell(sar['player_num'], "^7No one to grudge. You already have a grudge against: %s" % ", ".join(["^3%s" % self.game.players[playernum].get_name() for playernum in victim.get_grudged_player()]))
else:
self.game.rcon_tell(sar['player_num'], "^3No one to grudge")
## mod level 20
# admintest - display current admin status
elif sar['command'] == '!admintest' and self.game.players[sar['player_num']].get_admin_role() >= COMMANDS['admintest']['level']:
player_admin_role = self.game.players[sar['player_num']].get_admin_role()
self.game.rcon_tell(sar['player_num'], "^7%s [^3@%s^7] is ^3%s ^7[^2%d^7]" % (self.game.players[sar['player_num']].get_name(), self.game.players[sar['player_num']].get_player_id(), self.game.players[sar['player_num']].roles[player_admin_role], player_admin_role))
# country / locate
elif (sar['command'] == '!country' or sar['command'] == '@country' or sar['command'] == '!locate' or sar['command'] == '@locate' or sar['command'] == '!lc') and self.game.players[sar['player_num']].get_admin_role() >= COMMANDS['country']['level']:
if line.split(sar['command'])[1]:
user = line.split(sar['command'])[1].strip()
found, victim, msg = self.player_found(user)
if not found:
self.game.rcon_tell(sar['player_num'], msg)
else:
msg = "^3%s ^7is connecting from ^3%s" % (victim.get_name(), victim.get_country())
self.tell_say_message(sar, msg)
else:
self.game.rcon_tell(sar['player_num'], COMMANDS['country']['syntax'])
# poke - notify a player that he needs to move
elif sar['command'] == '!poke' and self.game.players[sar['player_num']].get_admin_role() >= COMMANDS['poke']['level']:
if line.split(sar['command'])[1]:
user = line.split(sar['command'])[1].strip()
found, victim, msg = self.player_found(user)
if not found:
self.game.rcon_tell(sar['player_num'], msg)
else:
self.game.rcon_tell(sar['player_num'], "^7%s %s!" % (random.choice(poke_options), victim.get_name()))
else:
self.game.rcon_tell(sar['player_num'], COMMANDS['poke']['syntax'])
# leveltest
elif (sar['command'] == '!leveltest' or sar['command'] == '!lt') and self.game.players[sar['player_num']].get_admin_role() >= COMMANDS['leveltest']['level']:
if line.split(sar['command'])[1]:
user = line.split(sar['command'])[1].strip()
found, victim, msg = self.player_found(user)
if not found:
self.game.rcon_tell(sar['player_num'], msg)
else:
victim_admin_role = victim.get_admin_role()
if victim_admin_role > 0:
self.game.rcon_tell(sar['player_num'], "^7%s [^3@%s^7] is ^3%s ^7[^2%d^7] and registered since ^3%s" % (victim.get_name(), victim.get_player_id(), victim.roles[victim_admin_role], victim_admin_role, victim.get_first_seen_date()))
else:
self.game.rcon_tell(sar['player_num'], "^7%s [^3@%s^7] is ^3%s ^7[^2%d^7]" % (victim.get_name(), victim.get_player_id(), victim.roles[victim_admin_role], victim_admin_role))
else:
self.game.rcon_tell(sar['player_num'], "^3Level %s [^2%d^3]: ^7%s" % (self.game.players[sar['player_num']].get_name(), self.game.players[sar['player_num']].get_admin_role(), self.game.players[sar['player_num']].roles[self.game.players[sar['player_num']].get_admin_role()]))
# lastmaps - list the last played maps
elif sar['command'] == '!lastmaps' and self.game.players[sar['player_num']].get_admin_role() >= COMMANDS['lastmaps']['level']:
if self.game.get_last_maps():
self.game.rcon_tell(sar['player_num'], "^7Last Maps: ^3%s" % ", ".join(self.game.get_last_maps()))
else:
self.game.rcon_tell(sar['player_num'], "^7No maps have been played since Spunky Bot started")
# lastvote - display information about the last called vote
elif sar['command'] == "!lastvote" and self.game.players[sar['player_num']].get_admin_role() >= COMMANDS['lastvote']['level']:
if self.last_vote:
self.game.rcon_tell(sar['player_num'], "^7Last vote: ^2%s" % self.last_vote)
else:
self.game.rcon_tell(sar['player_num'], "^7No votes have been called since Spunky Bot started")
# list - list all connected players
elif sar['command'] == '!list' and self.game.players[sar['player_num']].get_admin_role() >= COMMANDS['list']['level']:
msg = "^7Players online: %s" % ", ".join(["^3%s [^2%d^3]" % (player.get_name(), player.get_player_num()) for player in self.game.players.itervalues() if player.get_player_num() != BOT_PLAYER_NUM])
self.game.rcon_tell(sar['player_num'], msg)
# nextmap - display the next map in rotation
elif (sar['command'] == '!nextmap' or sar['command'] == '@nextmap') and self.game.players[sar['player_num']].get_admin_role() >= COMMANDS['nextmap']['level']:
msg = self.get_nextmap()
self.tell_say_message(sar, msg)
# mute - mute or unmute a player
elif sar['command'] == '!mute' and self.game.players[sar['player_num']].get_admin_role() >= COMMANDS['mute']['level']:
if line.split(sar['command'])[1]:
arg = line.split(sar['command'])[1].split()
if len(arg) > 1:
user = arg[0]
duration = arg[1]
if not duration.isdigit():
duration = ''
else:
user = arg[0]
duration = ''
found, victim, msg = self.player_found(user)
if not found:
self.game.rcon_tell(sar['player_num'], msg)
else:
self.game.send_rcon("mute %d %s" % (victim.get_player_num(), duration))
else:
self.game.rcon_tell(sar['player_num'], COMMANDS['mute']['syntax'])
# seen - display when the player was last seen
elif sar['command'] == '!seen' and self.game.players[sar['player_num']].get_admin_role() >= COMMANDS['seen']['level']:
if line.split(sar['command'])[1]:
user = line.split(sar['command'])[1].strip()
found, victim, msg = self.player_found(user)
if not found:
self.game.rcon_tell(sar['player_num'], msg)
else:
if victim.get_registered_user():
self.game.rcon_tell(sar['player_num'], "^3%s ^7was last seen on %s" % (victim.get_name(), victim.get_last_visit()))
else:
self.game.rcon_tell(sar['player_num'], "^3%s ^7is not a registered user" % victim.get_name())
else:
self.game.rcon_tell(sar['player_num'], COMMANDS['seen']['syntax'])
# shuffleteams
elif (sar['command'] == '!shuffleteams' or sar['command'] == '!shuffle') and self.game.players[sar['player_num']].get_admin_role() >= COMMANDS['shuffleteams']['level']:
if not self.ffa_lms_gametype:
self.game.send_rcon('shuffleteams')
else:
self.game.rcon_tell(sar['player_num'], "^7Command is disabled for this game mode")
# spec - move yourself to spectator
elif sar['command'] in ('!spec', '!sp') and self.game.players[sar['player_num']].get_admin_role() >= COMMANDS['spec']['level']:
self.game.rcon_forceteam(sar['player_num'], 'spectator')
# warninfo - display how many warnings the player has
elif (sar['command'] == '!warninfo' or sar['command'] == '!wi') and self.game.players[sar['player_num']].get_admin_role() >= COMMANDS['warninfo']['level']:
if line.split(sar['command'])[1]:
user = line.split(sar['command'])[1].strip()
found, victim, msg = self.player_found(user)
if not found:
self.game.rcon_tell(sar['player_num'], msg)
else:
# clear if already expired
if victim.get_last_warn_time() + self.warn_expiration < time.time():
victim.clear_warning()
warn_count = victim.get_warning()
warn_time = int(math.ceil(float(victim.get_last_warn_time() + self.warn_expiration - time.time()) / 60))
self.game.rcon_tell(sar['player_num'], "^3%s ^7has ^2%s ^7active warning%s%s" % (victim.get_name(), warn_count if warn_count > 0 else 'no', 's' if warn_count > 1 else '', ", expires in ^1%s ^7minute%s: ^3%s" % (warn_time, "s" if warn_time > 1 else "", ", ^3".join(victim.get_all_warn_msg())) if warn_count > 0 else ''))
else:
self.game.rcon_tell(sar['player_num'], COMMANDS['warninfo']['syntax'])
# warn - warn user - !warn <name> [<reason>]
elif (sar['command'] == '!warn' or sar['command'] == '!w') and self.game.players[sar['player_num']].get_admin_role() >= COMMANDS['warn']['level']:
if line.split(sar['command'])[1]:
arg = line.split(sar['command'])[1].split()
if arg:
user = arg[0]
reason = ' '.join(arg[1:])[:40].strip() if len(arg) > 1 else 'behave yourself'
found, victim, msg = self.player_found(user)
if not found:
self.game.rcon_tell(sar['player_num'], msg)
else:
warn_delay = 15
if victim.get_admin_role() >= self.game.players[sar['player_num']].get_admin_role():
self.game.rcon_tell(sar['player_num'], "^3You cannot warn an admin")
elif victim.get_last_warn_time() + warn_delay > time.time():
self.game.rcon_tell(sar['player_num'], "^3Only one warning per %d seconds can be issued" % warn_delay)
else:
# clear if already expired
if victim.get_last_warn_time() + self.warn_expiration < time.time():
victim.clear_warning()
show_alert = False
ban_duration = 0
if victim.get_warning() > 2:
self.game.kick_player(victim.get_player_num(), reason='too many warnings')
msg = "^2%s ^7was kicked, too many warnings" % victim.get_name()
else:
if reason in REASONS:
warning = REASONS[reason]
if reason == 'tk' and victim.get_warning() > 1:
ban_duration = victim.add_ban_point('tk, ban by %s' % self.game.players[sar['player_num']].get_name(), 900)
elif reason == 'lang' and victim.get_warning() > 1:
ban_duration = victim.add_ban_point('lang', 300)
elif reason == 'spam' and victim.get_warning() > 1:
ban_duration = victim.add_ban_point('spam', 300)
elif reason == 'racism':
ban_duration = victim.add_ban_point('racism', 900)
elif reason == 'name' and victim.get_warning() > 1:
ban_duration = victim.add_ban_point('name', 900)
elif reason == 'sk' and victim.get_warning() > 1:
ban_duration = victim.add_ban_point('sk', 900)
elif reason == 'camp':
ban_duration = victim.add_ban_point('camp', 900)
else:
warning = reason
victim.add_warning(warning)
msg = "^1WARNING ^7[^3%d^7]: ^2%s^7: %s" % (victim.get_warning(), victim.get_name(), warning)
# ban player if needed
if ban_duration > 0:
msg = "^2%s ^7banned for ^1%d minutes ^7for too many warnings" % (victim.get_name(), ban_duration)
self.game.kick_player(victim.get_player_num(), reason='too many warnings')
# show alert message for player with 3 warnings
elif victim.get_warning() == 3:
show_alert = True
self.game.rcon_say(msg)
if show_alert:
self.game.rcon_say("^1ALERT: ^2%s ^7auto-kick from warnings if not cleared" % victim.get_name())
else:
self.game.rcon_tell(sar['player_num'], COMMANDS['warn']['syntax'])
else:
self.game.rcon_tell(sar['player_num'], COMMANDS['warn']['syntax'])
# warnremove - remove a player's last warning
elif (sar['command'] == '!warnremove' or sar['command'] == '!wr') and self.game.players[sar['player_num']].get_admin_role() >= COMMANDS['warnremove']['level']:
if line.split(sar['command'])[1]:
user = line.split(sar['command'])[1].strip()
found, victim, msg = self.player_found(user)
if not found:
self.game.rcon_tell(sar['player_num'], msg)
else:
last_warning = victim.clear_last_warning()
if last_warning:
self.game.rcon_say("^7Last warning removed for %s: ^3%s" % (victim.get_name(), last_warning))
else:
self.game.rcon_tell(sar['player_num'], "^3%s ^7has no active warning" % victim.get_name())
else:
self.game.rcon_tell(sar['player_num'], COMMANDS['warnremove']['syntax'])
# warns - list the warnings
elif sar['command'] == '!warns' and self.game.players[sar['player_num']].get_admin_role() >= COMMANDS['warns']['level']:
keylist = REASONS.keys()
keylist.sort()
self.game.rcon_tell(sar['player_num'], "^7Warnings: ^3%s" % ", ^3".join([key for key in keylist]))
# warntest - test a warning
elif (sar['command'] == '!warntest' or sar['command'] == '!wt') and self.game.players[sar['player_num']].get_admin_role() >= COMMANDS['warntest']['level']:
if line.split(sar['command'])[1]:
reason = line.split(sar['command'])[1].strip()
warning = REASONS[reason] if reason in REASONS else reason
else:
warning = 'behave yourself'
self.game.rcon_tell(sar['player_num'], "^2TEST: ^1WARNING ^7[^31^7]: ^4%s" % warning)
## admin level 40
# admins - list all the online admins
elif (sar['command'] == '!admins' or sar['command'] == '@admins') and self.game.players[sar['player_num']].get_admin_role() >= COMMANDS['admins']['level']:
msg = self.get_admins_online()
self.tell_say_message(sar, msg)
# !regulars/!regs - display the regular players online
elif (sar['command'] == '!regulars' or sar['command'] == '!regs' or sar['command'] == '@regulars') and self.game.players[sar['player_num']].get_admin_role() >= COMMANDS['regulars']['level']:
liste = "%s" % ", ".join(["^3%s [^2%d^3]" % (player.get_name(), player.get_admin_role()) for player in self.game.players.itervalues() if player.get_admin_role() == 2])
msg = "^7Regulars online: %s" % liste if liste else "^7No regulars online"
self.tell_say_message(sar, msg)
# aliases - list the aliases of the player
elif (sar['command'] == '!aliases' or sar['command'] == '@aliases' or sar['command'] == '!alias' or sar['command'] == '@alias') and self.game.players[sar['player_num']].get_admin_role() >= COMMANDS['aliases']['level']:
if line.split(sar['command'])[1]:
user = line.split(sar['command'])[1].strip()
found, victim, msg = self.player_found(user)
if not found:
self.game.rcon_tell(sar['player_num'], msg)
else:
msg = "^7Aliases of ^5%s: ^3%s" % (victim.get_name(), victim.get_aliases())
self.tell_say_message(sar, msg)
else:
self.game.rcon_tell(sar['player_num'], COMMANDS['aliases']['syntax'])
# bigtext - display big message on screen
elif sar['command'] == '!bigtext' and self.game.players[sar['player_num']].get_admin_role() >= COMMANDS['bigtext']['level']:
if line.split(sar['command'])[1]:
self.game.rcon_bigtext("%s" % line.split(sar['command'])[1].strip())
else:
self.game.rcon_tell(sar['player_num'], COMMANDS['bigtext']['syntax'])
# say - say a message to all players
elif sar['command'] == '!say' and self.game.players[sar['player_num']].get_admin_role() >= COMMANDS['say']['level']:
if line.split(sar['command'])[1]:
self.game.rcon_say("^4%s: ^7%s" % (self.game.players[sar['player_num']].get_name(), line.split(sar['command'])[1].strip()))
else:
self.game.rcon_tell(sar['player_num'], COMMANDS['say']['syntax'])
# !!<text> - allow spectator to say a message to players in-game
elif sar['command'].startswith('!!') and self.game.players[sar['player_num']].get_admin_role() >= COMMANDS['say']['level']:
if line.split('!!')[1]:
self.game.rcon_say("^4%s: ^7%s" % (self.game.players[sar['player_num']].get_name(), line.split('!!', 1)[1].strip()))
# tell - tell a message to a specific player - !tell <name|id> <text>
elif sar['command'] == '!tell' and self.game.players[sar['player_num']].get_admin_role() >= COMMANDS['tell']['level']:
if line.split(sar['command'])[1]:
arg = line.split(sar['command'])[1].split()
if len(arg) > 1:
user = arg[0]
message = ' '.join(arg[1:]).strip()
found, victim, msg = self.player_found(user)
if not found:
self.game.rcon_tell(sar['player_num'], msg)
else:
self.game.rcon_tell(victim.get_player_num(), "^4%s: ^7%s" % (self.game.players[sar['player_num']].get_name(), message))
else:
self.game.rcon_tell(sar['player_num'], COMMANDS['tell']['syntax'])
else:
self.game.rcon_tell(sar['player_num'], COMMANDS['tell']['syntax'])
# exit - display last disconnected player of this match
elif sar['command'] == '!exit' and self.game.players[sar['player_num']].get_admin_role() >= COMMANDS['exit']['level']:
msg = "^7Last disconnected player: ^3%s" % self.last_disconnected_player.get_name() if self.last_disconnected_player else "^3No player left during this match"
self.game.rcon_tell(sar['player_num'], msg)
# find - display the slot number of the player
elif sar['command'] == '!find' and self.game.players[sar['player_num']].get_admin_role() >= COMMANDS['find']['level']:
if line.split(sar['command'])[1]:
user = line.split(sar['command'])[1].strip()
found, victim, msg = self.player_found(user)
self.game.rcon_tell(sar['player_num'], msg)
else:
self.game.rcon_tell(sar['player_num'], COMMANDS['find']['syntax'])
# afk - force a player to spec, because he is away from keyboard - !afk <name>
elif sar['command'] == '!afk' and self.game.players[sar['player_num']].get_admin_role() >= COMMANDS['afk']['level']:
if line.split(sar['command'])[1]:
user = line.split(sar['command'])[1].split()[0]
found, victim, msg = self.player_found(user)
if not found:
self.game.rcon_tell(sar['player_num'], msg)
else:
self.game.rcon_forceteam(victim.get_player_num(), 'spectator')
else:
self.game.rcon_tell(sar['player_num'], COMMANDS['afk']['syntax'])
# force - force a player to the given team
elif sar['command'] == '!force' and self.game.players[sar['player_num']].get_admin_role() >= COMMANDS['force']['level']:
if line.split(sar['command'])[1]:
arg = line.split(sar['command'])[1].split()
if len(arg) > 1:
user = arg[0]
team = arg[1]
lock = False
if len(arg) > 2:
lock = True if arg[2] == 'lock' else False
team_dict = {'red': 'red', 'r': 'red', 're': 'red',
'blue': 'blue', 'b': 'blue', 'bl': 'blue', 'blu': 'blue',
'spec': 'spectator', 'spectator': 'spectator', 's': 'spectator', 'sp': 'spectator', 'spe': 'spectator',
'green': 'green'}
found, victim, msg = self.player_found(user)
if not found:
self.game.rcon_tell(sar['player_num'], msg)
else:
if team in team_dict:
victim_player_num = victim.get_player_num()
self.game.rcon_forceteam(victim_player_num, team_dict[team])
self.game.rcon_tell(victim_player_num, "^3You are forced to: ^7%s" % team_dict[team])
# set team lock if defined
if lock:
victim.set_team_lock(team_dict[team])
else:
victim.set_team_lock(None)
# release the player from a forced team
elif team == "free":
victim.set_team_lock(None)
else:
self.game.rcon_tell(sar['player_num'], COMMANDS['force']['syntax'])
else:
self.game.rcon_tell(sar['player_num'], COMMANDS['force']['syntax'])
else:
self.game.rcon_tell(sar['player_num'], COMMANDS['force']['syntax'])
# nuke - nuke a player
elif sar['command'] == '!nuke' and self.game.players[sar['player_num']].get_admin_role() >= COMMANDS['nuke']['level']:
if line.split(sar['command'])[1]:
user = line.split(sar['command'])[1].split()[0]
found, victim, msg = self.player_found(user)
if not found:
self.game.rcon_tell(sar['player_num'], msg)
else:
if victim.get_admin_role() >= self.game.players[sar['player_num']].get_admin_role():
self.game.rcon_tell(sar['player_num'], "^3Insufficient privileges to nuke an admin")
else:
self.game.send_rcon("nuke %d" % victim.get_player_num())
else:
self.game.rcon_tell(sar['player_num'], COMMANDS['nuke']['syntax'])
# kick - kick a player
elif (sar['command'] == '!kick' or sar['command'] == '!k') and self.game.players[sar['player_num']].get_admin_role() >= COMMANDS['kick']['level']:
if line.split(sar['command'])[1]:
arg = line.split(sar['command'])[1].split()
if self.game.players[sar['player_num']].get_admin_role() >= 80 and len(arg) == 1:
user = arg[0]
reason = '.'
elif len(arg) > 1:
user = arg[0]
reason = ' '.join(arg[1:])[:40].strip()
else:
user = reason = None
if user and reason:
found, victim, msg = self.player_found(user)
if not found:
self.game.rcon_tell(sar['player_num'], msg)
else:
if sar['player_num'] == victim.get_player_num():
self.game.rcon_tell(sar['player_num'], "^7You cannot kick yourself")
elif victim.get_admin_role() >= self.game.players[sar['player_num']].get_admin_role():
self.game.rcon_tell(sar['player_num'], "^3Insufficient privileges to kick an admin")
else:
msg = "^2%s ^7was kicked by %s" % (victim.get_name(), self.game.players[sar['player_num']].get_name())
if reason in REASONS:
kick_reason = REASONS[reason]
msg = "%s: ^3%s" % (msg, kick_reason)
elif reason == '.':
kick_reason = ''
else:
kick_reason = reason
msg = "%s: ^3%s" % (msg, kick_reason)
self.game.kick_player(victim.get_player_num(), reason=kick_reason)
self.game.rcon_say(msg)
else:
self.game.rcon_tell(sar['player_num'], "^7You need to enter a reason: ^3!kick <name> <reason>")
else:
self.game.rcon_tell(sar['player_num'], COMMANDS['kick']['syntax'])
# warnclear - clear the user warnings
elif (sar['command'] == '!warnclear' or sar['command'] == '!wc') and self.game.players[sar['player_num']].get_admin_role() >= COMMANDS['warnclear']['level']:
if line.split(sar['command'])[1]:
user = line.split(sar['command'])[1].strip()
found, victim, msg = self.player_found(user)
if not found:
self.game.rcon_tell(sar['player_num'], msg)
else:
victim.clear_warning()
for player in self.game.players.itervalues():
player.clear_tk(victim.get_player_num())
self.game.rcon_say("^1All warnings and team kills cleared for ^2%s" % victim.get_name())
else:
self.game.rcon_tell(sar['player_num'], COMMANDS['warnclear']['syntax'])
# tempban - ban a player temporary for the given period
elif (sar['command'] == '!tempban' or sar['command'] == '!tb') and self.game.players[sar['player_num']].get_admin_role() >= COMMANDS['tempban']['level']:
if line.split(sar['command'])[1]:
arg = line.split(sar['command'])[1].split()
if len(arg) > 1:
user = arg[0]
duration, duration_output = self.convert_time(arg[1])
reason = ' '.join(arg[2:])[:40].strip() if len(arg) > 2 else 'tempban'
kick_reason = REASONS[reason] if reason in REASONS else '' if reason == 'tempban' else reason
found, victim, msg = self.player_found(user)
if not found:
self.game.rcon_tell(sar['player_num'], msg)
else:
if sar['player_num'] == victim.get_player_num():
self.game.rcon_tell(sar['player_num'], "^7You cannot ban yourself")
elif victim.get_admin_role() >= self.game.players[sar['player_num']].get_admin_role():
self.game.rcon_tell(sar['player_num'], "^3Insufficient privileges to ban an admin")
else:
if victim.ban(duration=duration, reason=reason, admin=self.game.players[sar['player_num']].get_name()):
msg = "^2%s ^1banned ^7for ^3%s ^7by %s" % (victim.get_name(), duration_output, self.game.players[sar['player_num']].get_name())
if kick_reason:
msg = "%s: ^3%s" % (msg, kick_reason)
self.game.rcon_say(msg)
else:
self.game.rcon_tell(sar['player_num'], "^7This player has already a longer ban")
self.game.kick_player(player_num=victim.get_player_num(), reason=kick_reason)
else:
self.game.rcon_tell(sar['player_num'], "^7You need to enter a duration: ^3!tempban <name> <duration> [<reason>]")
else:
self.game.rcon_tell(sar['player_num'], COMMANDS['tempban']['syntax'])
## full admin level 60
# !forgiveinfo <name> - display a player's team kills
elif (sar['command'] == '!forgiveinfo' or sar['command'] == '!fi') and self.game.players[sar['player_num']].get_admin_role() >= COMMANDS['forgiveinfo']['level']:
if line.split(sar['command'])[1]:
user = line.split(sar['command'])[1].strip()
found, victim, msg = self.player_found(user)
if not found:
self.game.rcon_tell(sar['player_num'], msg)
else:
tks = len(victim.get_tk_victim_names())
self.game.rcon_tell(sar['player_num'], "^3%s ^7killed ^1%s ^7teammate%s" % (victim.get_name(), tks, 's' if tks > 1 else '') if tks > 0 else "^3%s ^7has not killed teammates" % victim.get_name())
else:
self.game.rcon_tell(sar['player_num'], COMMANDS['forgiveinfo']['syntax'])
# !forgiveclear [<name>] - clear a player's team kills
elif (sar['command'] == '!forgiveclear' or sar['command'] == '!fc') and self.game.players[sar['player_num']].get_admin_role() >= COMMANDS['forgiveclear']['level']:
if line.split(sar['command'])[1]:
user = line.split(sar['command'])[1].strip()
found, victim, msg = self.player_found(user)
if not found:
self.game.rcon_tell(sar['player_num'], msg)
else:
victim.clear_all_killed_me()
for player in self.game.players.itervalues():
player.clear_tk(victim.get_player_num())
self.game.rcon_say("^1All team kills cleared for ^2%s" % victim.get_name())
else:
for player in self.game.players.itervalues():
player.clear_all_tk()
player.clear_all_killed_me()
self.game.rcon_say("^1All player team kills cleared")
# ping - display the ping of a player
elif sar['command'] == '!ping' and self.game.players[sar['player_num']].get_admin_role() >= COMMANDS['ping']['level']:
if line.split(sar['command'])[1]:
user = line.split(sar['command'])[1].strip()
player_ping = 0
found, victim, msg = self.player_found(user)
if not found:
self.game.rcon_tell(sar['player_num'], msg)
else:
# update rcon status
self.game.quake.rcon_update()
for player in self.game.quake.players:
if victim.get_player_num() == player.num:
player_ping = player.ping
msg = "^7%s has a ping of ^2%s ms" % (victim.get_name(), player_ping)
self.game.rcon_tell(sar['player_num'], msg)
else:
self.game.rcon_tell(sar['player_num'], COMMANDS['ping']['syntax'])
# id - show the IP, guid and authname of a player - !id <name>
elif sar['command'] == '!id' and self.game.players[sar['player_num']].get_admin_role() >= COMMANDS['id']['level']:
if line.split(sar['command'])[1]:
user = line.split(sar['command'])[1].strip()
found, victim, msg = self.player_found(user)
if not found:
self.game.rcon_tell(sar['player_num'], msg)
else:
msg = "^7[^2@%s^7] %s ^2%s ^7[^5%s^7] since ^3%s" % (victim.get_player_id(), victim.get_name(), victim.get_ip_address(), victim.get_authname() if victim.get_authname() else "---", victim.get_first_seen_date())
self.game.rcon_tell(sar['player_num'], msg)
else:
self.game.rcon_tell(sar['player_num'], COMMANDS['id']['syntax'])
# !kickbots - kick all bots
elif (sar['command'] == '!kickbots' or sar['command'] == '!kb') and self.game.players[sar['player_num']].get_admin_role() >= COMMANDS['kickbots']['level']:
self.game.send_rcon('kick allbots')
# scream - scream a message in different colors to all players
elif sar['command'] == '!scream' and self.game.players[sar['player_num']].get_admin_role() >= COMMANDS['scream']['level']:
if line.split(sar['command'])[1]:
self.game.rcon_say("^1%s" % line.split(sar['command'])[1].strip())
self.game.rcon_say("^2%s" % line.split(sar['command'])[1].strip())
self.game.rcon_say("^3%s" % line.split(sar['command'])[1].strip())
self.game.rcon_say("^5%s" % line.split(sar['command'])[1].strip())
else:
self.game.rcon_tell(sar['player_num'], COMMANDS['scream']['syntax'])
# slap - slap a player (a number of times); (1-15 times)
elif sar['command'] == '!slap' and self.game.players[sar['player_num']].get_admin_role() >= COMMANDS['slap']['level']:
if line.split(sar['command'])[1]:
arg = line.split(sar['command'])[1].split()
if len(arg) > 1:
user = arg[0]
number = arg[1]
if not number.isdigit():
number = 1
else:
number = int(number)
if number > 15:
number = 15
else:
user = arg[0]
number = 1
found, victim, msg = self.player_found(user)
if not found:
self.game.rcon_tell(sar['player_num'], msg)
else:
if victim.get_admin_role() >= self.game.players[sar['player_num']].get_admin_role():
self.game.rcon_tell(sar['player_num'], "^3Insufficient privileges to slap an admin")
else:
for _ in xrange(0, number):
self.game.send_rcon("slap %d" % victim.get_player_num())
else:
self.game.rcon_tell(sar['player_num'], COMMANDS['slap']['syntax'])
# swap - swap teams for player 1 and 2 (if in different teams)
elif sar['command'] == '!swap' and self.game.players[sar['player_num']].get_admin_role() >= COMMANDS['swap']['level']:
if not self.ffa_lms_gametype:
if line.split(sar['command'])[1]:
arg = line.split(sar['command'])[1].split()
# swap given player(s)
if len(arg) >= 1:
found1, victim1, _ = self.player_found(arg[0])
found2, victim2, _ = (True, self.game.players[sar['player_num']], "") if len(arg) == 1 else self.player_found(arg[1])
if not found1 or not found2:
self.game.rcon_tell(sar['player_num'], '^3Player not found')
else:
team1 = victim1.get_team()
team2 = victim2.get_team()
if team1 == team2:
self.game.rcon_tell(sar['player_num'], "^7Cannot swap, both players are in the same team")
else:
game_data = self.game.get_gamestats()
# remove team lock
victim1.set_team_lock(None)
victim2.set_team_lock(None)
if game_data[Player.teams[team1]] < game_data[Player.teams[team2]]:
self.game.rcon_forceteam(victim2.get_player_num(), Player.teams[team1])
self.game.rcon_forceteam(victim1.get_player_num(), Player.teams[team2])
else:
self.game.rcon_forceteam(victim1.get_player_num(), Player.teams[team2])
self.game.rcon_forceteam(victim2.get_player_num(), Player.teams[team1])
self.game.rcon_say('^7Swapped player ^3%s ^7with ^3%s' % (victim1.get_name(), victim2.get_name()))
else:
self.game.rcon_tell(sar['player_num'], COMMANDS['swap']['syntax'])
else:
self.game.rcon_tell(sar['player_num'], COMMANDS['swap']['syntax'])
else:
self.game.rcon_tell(sar['player_num'], "^7Command is disabled for this game mode")
# status - report the status of the bot
elif sar['command'] == '!status' and self.game.players[sar['player_num']].get_admin_role() >= COMMANDS['status']['level']:
curs.execute("PRAGMA database_list")
msg = "^7Database is ^2UP^7 and Bot started at ^2%s" % self.uptime if curs.fetchall() else "^7Database appears to be ^1DOWN"
self.game.rcon_tell(sar['player_num'], msg)
# version - display the version of the bot
elif sar['command'] == '!version' and self.game.players[sar['player_num']].get_admin_role() >= COMMANDS['version']['level']:
self.game.rcon_tell(sar['player_num'], "^7Spunky Bot ^2v%s" % __version__)
# veto - stop voting process
elif sar['command'] == '!veto' and self.game.players[sar['player_num']].get_admin_role() >= COMMANDS['veto']['level']:
self.game.send_rcon('veto')
# ci - kick player with connection interrupted
elif sar['command'] == '!ci' and self.game.players[sar['player_num']].get_admin_role() >= COMMANDS['ci']['level']:
if line.split(sar['command'])[1]:
user = line.split(sar['command'])[1].strip()
player_ping = 0
found, victim, msg = self.player_found(user)
if not found:
self.game.rcon_tell(sar['player_num'], msg)
else:
# update rcon status
self.game.quake.rcon_update()
for player in self.game.quake.players:
if victim.get_player_num() == player.num:
player_ping = player.ping
if player_ping == 999:
self.game.kick_player(victim.get_player_num(), reason='connection interrupted, try to reconnect')
self.game.rcon_say("^2%s ^7was kicked by %s: ^4connection interrupted" % (victim.get_name(), self.game.players[sar['player_num']].get_name()))
else:
self.game.rcon_tell(sar['player_num'], "^3%s has no connection interrupted" % victim.get_name())
else:
self.game.rcon_tell(sar['player_num'], COMMANDS['ci']['syntax'])
# ban - ban a player for several days
elif (sar['command'] == '!ban' or sar['command'] == '!b') and self.game.players[sar['player_num']].get_admin_role() >= COMMANDS['ban']['level']:
if line.split(sar['command'])[1]:
arg = line.split(sar['command'])[1].split()
if len(arg) == 1 and self.game.players[sar['player_num']].get_admin_role() >= 80:
user = arg[0]
reason = "tempban"
elif len(arg) > 1:
user = arg[0]
reason = ' '.join(arg[1:])[:40].strip()
else:
user = reason = None
if user and reason:
found, victim, msg = self.player_found(user)
kick_reason = REASONS[reason] if reason in REASONS else '' if reason == 'tempban' else reason
if not found:
self.game.rcon_tell(sar['player_num'], msg)
else:
if sar['player_num'] == victim.get_player_num():
self.game.rcon_tell(sar['player_num'], "^7You cannot ban yourself")
elif victim.get_admin_role() >= self.game.players[sar['player_num']].get_admin_role():
self.game.rcon_tell(sar['player_num'], "^3Insufficient privileges to ban an admin")
else:
# ban for given duration in days
if victim.ban(duration=(self.ban_duration * 86400), reason=reason, admin=self.game.players[sar['player_num']].get_name()):
msg = "^2%s ^1banned ^7for ^3%d day%s ^7by %s" % (victim.get_name(), self.ban_duration, 's' if self.ban_duration > 1 else '', self.game.players[sar['player_num']].get_name())
if kick_reason:
msg = "%s: ^3%s" % (msg, kick_reason)
self.game.rcon_say(msg)
else:
self.game.rcon_tell(sar['player_num'], "^7This player has already a longer ban")
self.game.kick_player(player_num=victim.get_player_num(), reason=kick_reason)
else:
self.game.rcon_tell(sar['player_num'], "^7You need to enter a reason: ^3!ban <name> <reason>")
else:
self.game.rcon_tell(sar['player_num'], COMMANDS['ban']['syntax'])
# baninfo - display active bans of a player
elif (sar['command'] == '!baninfo' or sar['command'] == '!bi') and self.game.players[sar['player_num']].get_admin_role() >= COMMANDS['baninfo']['level']:
if line.split(sar['command'])[1]:
user = line.split(sar['command'])[1].strip()
found, victim, msg = self.player_found(user)
if not found:
self.game.rcon_tell(sar['player_num'], msg)
else:
curs.execute("SELECT `expires` FROM `ban_list` WHERE `expires` > '{}' AND `guid` = '{}'".format(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time())), victim.get_guid()))
result = curs.fetchone()
if result:
self.game.rcon_tell(sar['player_num'], "^3%s ^7has an active ban until [^1%s^7]" % (victim.get_name(), str(result[0])))
else:
self.game.rcon_tell(sar['player_num'], "^3%s ^7has no active ban" % victim.get_name())
else:
self.game.rcon_tell(sar['player_num'], COMMANDS['baninfo']['syntax'])
# rain - enables or disables rain - !rain <on/off>
elif sar['command'] == '!rain' and self.game.players[sar['player_num']].get_admin_role() >= COMMANDS['rain']['level']:
if line.split(sar['command'])[1]:
arg = line.split(sar['command'])[1].strip()
if arg == "off":
self.game.send_rcon('g_enableprecip 0')
self.game.rcon_tell(sar['player_num'], "^7Rain: ^1Off")
elif arg == "on":
self.game.send_rcon('g_enableprecip 1')
self.game.rcon_tell(sar['player_num'], "^7Rain: ^2On")
else:
self.game.rcon_tell(sar['player_num'], COMMANDS['rain']['syntax'])
else:
self.game.rcon_tell(sar['player_num'], COMMANDS['rain']['syntax'])
## senior admin level 80
# !kickall <pattern> [<reason>]- kick all players matching <pattern>
elif (sar['command'] == '!kickall' or sar['command'] == '!kall') and self.game.players[sar['player_num']].get_admin_role() >= COMMANDS['kickall']['level']:
if line.split(sar['command'])[1]:
arg = line.split(sar['command'])[1].split()
user = arg[0]
reason = ' '.join(arg[1:])[:40].strip() if len(arg) > 1 else ''
if len(user) > 2:
pattern_list = [player for player in self.game.players.itervalues() if user.upper() in player.get_name().upper() and player.get_player_num() != BOT_PLAYER_NUM]
if pattern_list:
for player in pattern_list:
if player.get_admin_role() >= self.game.players[sar['player_num']].get_admin_role():
self.game.rcon_tell(sar['player_num'], "^3Insufficient privileges to kick an admin")
else:
self.game.kick_player(player.get_player_num(), reason)
else:
self.game.rcon_tell(sar['player_num'], "^3No Players found matching %s" % user)
else:
self.game.rcon_tell(sar['player_num'], "^3Pattern must be at least 3 characters long")
else:
self.game.rcon_tell(sar['player_num'], COMMANDS['kickall']['syntax'])
# !banall <pattern> [<reason>]- ban all players matching <pattern>
elif (sar['command'] == '!banall' or sar['command'] == '!ball') and self.game.players[sar['player_num']].get_admin_role() >= COMMANDS['banall']['level']:
if line.split(sar['command'])[1]:
arg = line.split(sar['command'])[1].split()
user = arg[0]
reason = ' '.join(arg[1:])[:40].strip() if len(arg) > 1 else 'tempban'
if len(user) > 2:
pattern_list = [player for player in self.game.players.itervalues() if user.upper() in player.get_name().upper() and player.get_player_num() != BOT_PLAYER_NUM]
if pattern_list:
for player in pattern_list:
if player.get_admin_role() >= self.game.players[sar['player_num']].get_admin_role():
self.game.rcon_tell(sar['player_num'], "^3Insufficient privileges to ban an admin")
else:
player.ban(duration=(self.ban_duration * 86400), reason=reason, admin=self.game.players[sar['player_num']].get_name())
self.game.rcon_say("^2%s ^1banned ^7for ^3%d day%s ^7by %s" % (player.get_name(), self.ban_duration, 's' if self.ban_duration > 1 else '', self.game.players[sar['player_num']].get_name()))
else:
self.game.rcon_tell(sar['player_num'], "^3No Players found matching %s" % user)
else:
self.game.rcon_tell(sar['player_num'], "^3Pattern must be at least 3 characters long")
else:
self.game.rcon_tell(sar['player_num'], COMMANDS['banall']['syntax'])
# !addbots
elif sar['command'] == '!addbots' and self.game.players[sar['player_num']].get_admin_role() >= COMMANDS['addbots']['level']:
self.game.send_rcon('addbot boa 4 blue 50 BOT_1')
self.game.send_rcon('addbot cheetah 4 red 50 BOT_2')
game_data = self.game.get_gamestats()
if game_data[Player.teams[1]] > game_data[Player.teams[2]]:
self.game.send_rcon('addbot mantis 4 blue 50 BOT_3')
elif game_data[Player.teams[1]] < game_data[Player.teams[2]]:
self.game.send_rcon('addbot chicken 4 red 50 BOT_4')
else:
self.game.send_rcon('addbot python 4 blue 50 BOT_3')
self.game.send_rcon('addbot cobra 4 red 50 BOT_4')
# !bots on/off
elif sar['command'] == '!bots' and self.game.players[sar['player_num']].get_admin_role() >= COMMANDS['bots']['level']:
if line.split(sar['command'])[1]:
arg = line.split(sar['command'])[1].strip()
if arg == "on":
self.game.send_rcon('bot_enable 1')
self.game.send_rcon('bot_minplayers 0')
self.game.rcon_tell(sar['player_num'], "^7Bot support: ^2On")
self.game.rcon_tell(sar['player_num'], "^3Map cycle may be required to enable bot support")
elif arg == "off":
self.game.send_rcon('bot_enable 0')
self.game.send_rcon('kick allbots')
self.game.rcon_tell(sar['player_num'], "^7Bot support: ^1Off")
else:
self.game.rcon_tell(sar['player_num'], COMMANDS['bots']['syntax'])
else:
self.game.rcon_tell(sar['player_num'], COMMANDS['bots']['syntax'])
# clear - clear all player warnings - !clear [<player>]
elif sar['command'] in ('!clear', '!kiss') and self.game.players[sar['player_num']].get_admin_role() >= COMMANDS['clear']['level']:
if line.split(sar['command'])[1]:
user = line.split(sar['command'])[1].strip()
found, victim, msg = self.player_found(user)
if not found:
self.game.rcon_tell(sar['player_num'], msg)
else:
victim.clear_warning()
for player in self.game.players.itervalues():
player.clear_tk(victim.get_player_num())
self.game.rcon_say("^1All warnings and team kills cleared for ^2%s" % victim.get_name())
else:
for player in self.game.players.itervalues():
player.clear_warning()
self.game.rcon_say("^1All player warnings and team kills cleared")
# map - load given map
elif sar['command'] == '!map' and self.game.players[sar['player_num']].get_admin_role() >= COMMANDS['map']['level']:
if line.split(sar['command'])[1]:
arg = line.split(sar['command'])[1].strip()
found, newmap, msg = self.map_found(arg)
if not found:
self.game.rcon_tell(sar['player_num'], msg)
else:
self.game.rcon_bigtext("^7Changing map to %s" % newmap)
self.game.send_rcon('map %s' % newmap)
else:
self.game.rcon_tell(sar['player_num'], COMMANDS['map']['syntax'])
# mapcycle - list the map rotation
elif sar['command'] in ('!mapcycle', '@mapcycle') and self.game.players[sar['player_num']].get_admin_role() >= COMMANDS['mapcycle']['level']:
self.tell_say_message(sar, "^7Mapcycle: ^3%s" % ', '.join(self.game.maplist))
# maps - display all available maps
elif (sar['command'] == '!maps' or sar['command'] == '@maps') and self.game.players[sar['player_num']].get_admin_role() >= COMMANDS['maps']['level']:
map_list = self.game.get_all_maps()
msg = "^7Available Maps [^2%s^7]: ^3%s" % (len(map_list), ', ^3'.join(map_list))
self.tell_say_message(sar, msg)
# maprestart - restart the map
elif (sar['command'] == '!maprestart' or sar['command'] == '!restart') and self.game.players[sar['player_num']].get_admin_role() >= COMMANDS['maprestart']['level']:
self.game.send_rcon('restart')
self.stats_reset()
# moon - activate Moon mode (low gravity)
elif sar['command'] in ('!moon', '!lowgravity') and self.game.players[sar['player_num']].get_admin_role() >= COMMANDS['moon']['level']:
if line.split(sar['command'])[1]:
arg = line.split(sar['command'])[1].strip()
if arg == "off":
self.game.send_rcon('g_gravity 800')
self.game.rcon_tell(sar['player_num'], "^7Moon mode: ^1Off")
elif arg == "on":
self.game.send_rcon('g_gravity 100')
self.game.rcon_tell(sar['player_num'], "^7Moon mode: ^2On")
else:
self.game.rcon_tell(sar['player_num'], COMMANDS['moon']['syntax'])
else:
self.game.rcon_tell(sar['player_num'], COMMANDS['moon']['syntax'])
# !setgravity <value> - set the gravity value. default = 800 (less means less gravity)
elif sar['command'] == '!setgravity' and self.game.players[sar['player_num']].get_admin_role() >= COMMANDS['setgravity']['level']:
if line.split(sar['command'])[1]:
arg = line.split(sar['command'])[1].strip()
if arg.isdigit():
self.game.send_rcon('g_gravity %s' % arg)
else:
self.game.rcon_tell(sar['player_num'], COMMANDS['setgravity']['syntax'])
else:
self.game.rcon_tell(sar['player_num'], COMMANDS['setgravity']['syntax'])
# instagib on/off
elif sar['command'] == '!instagib' and self.game.players[sar['player_num']].get_admin_role() >= COMMANDS['instagib']['level']:
if self.urt_modversion >= 43:
if line.split(sar['command'])[1]:
arg = line.split(sar['command'])[1].strip()
if arg == "off":
self.game.send_rcon('g_instagib 0')
self.game.rcon_tell(sar['player_num'], "^7Instagib: ^1Off")
self.game.rcon_tell(sar['player_num'], "^7Instagib changed for next map")
elif arg == "on":
self.game.send_rcon('g_instagib 1')
self.game.rcon_tell(sar['player_num'], "^7Instagib: ^2On")
self.game.rcon_tell(sar['player_num'], "^7Instagib changed for next map")
else:
self.game.rcon_tell(sar['player_num'], COMMANDS['instagib']['syntax'])
else:
self.game.rcon_tell(sar['player_num'], COMMANDS['instagib']['syntax'])
else:
self.game.rcon_tell(sar['player_num'], "^7The command ^3!instagib ^7is not supported")
# cyclemap - start next map in rotation
elif sar['command'] == '!cyclemap' and self.game.players[sar['player_num']].get_admin_role() >= COMMANDS['cyclemap']['level']:
self.game.send_rcon('cyclemap')
# setnextmap - set the given map as nextmap
elif sar['command'] == '!setnextmap' and self.game.players[sar['player_num']].get_admin_role() >= COMMANDS['setnextmap']['level']:
if line.split(sar['command'])[1]:
arg = line.split(sar['command'])[1].strip()
found, nextmap, msg = self.map_found(arg)
if not found:
self.game.rcon_tell(sar['player_num'], msg)
else:
self.game.send_rcon('g_nextmap %s' % nextmap)
self.game.next_mapname = nextmap
self.game.rcon_tell(sar['player_num'], "^7Next Map set to: ^3%s" % nextmap)
else:
self.game.rcon_tell(sar['player_num'], COMMANDS['setnextmap']['syntax'])
# rebuild - sync up all available maps
elif sar['command'] == '!rebuild' and self.game.players[sar['player_num']].get_admin_role() >= COMMANDS['rebuild']['level']:
# get full map list
self.game.set_all_maps()
self.game.maplist = filter(None, self.game.get_mapcycle_path())
self.game.rcon_tell(sar['player_num'], "^7Rebuild maps: ^3%s ^7maps found" % len(self.game.get_all_maps()))
# set current and next map
self.game.set_current_map()
self.game.rcon_tell(sar['player_num'], self.get_nextmap())
# swapteams - swap current teams
elif sar['command'] == '!swapteams' and self.game.players[sar['player_num']].get_admin_role() >= COMMANDS['swapteams']['level']:
self.game.send_rcon('swapteams')
# exec - execute given config file
elif sar['command'] == '!exec' and self.game.players[sar['player_num']].get_admin_role() >= COMMANDS['exec']['level']:
if line.split(sar['command'])[1]:
arg = line.split(sar['command'])[1].strip()
self.game.send_rcon('exec %s' % arg)
else:
self.game.rcon_tell(sar['player_num'], COMMANDS['exec']['syntax'])
# !gear - set allowed weapons
elif sar['command'] == '!gear' and self.game.players[sar['player_num']].get_admin_role() >= COMMANDS['gear']['level']:
if line.split(sar['command'])[1]:
arg = line.split(sar['command'])[1].strip()
# docs: http://www.urbanterror.info/support/180-server-cvars/#2
if "all" in arg:
self.game.send_rcon('g_gear 0')
self.game.rcon_say("^7Gear: ^2All weapons enabled")
elif "default" in arg:
self.game.send_rcon('g_gear "%s"' % self.default_gear)
self.game.rcon_say("^7Gear: ^2Server defaults enabled")
elif "knife" in arg:
self.game.send_rcon('g_gear "%s"' % 'FGHIJKLMNZacefghijklOQRSTUVWX' if self.urt_modversion > 41 else '63')
self.game.rcon_say("^7Gear: ^2Knife only")
elif "pistol" in arg:
self.game.send_rcon('g_gear "%s"' % 'HIJKLMNZacehijkOQ' if self.urt_modversion > 41 else '55')
self.game.rcon_say("^7Gear: ^2Pistols only")
elif "shotgun" in arg:
self.game.send_rcon('g_gear "%s"' % 'FGIJKLMNZacefghiklOQ' if self.urt_modversion > 41 else '59')
self.game.rcon_say("^7Gear: ^2Shotguns only")
elif "sniper" in arg:
self.game.send_rcon('g_gear "%s"' % 'FGHIJKLMacefghjklOQ' if self.urt_modversion > 41 else '61')
self.game.rcon_say("^7Gear: ^2Sniper rifles only")
elif "magnum" in arg and self.urt_modversion > 42:
self.game.send_rcon('g_gear FGHIJKLMNZacefghijkOQRSTUVWX')
self.game.rcon_say("^7Gear: ^2Magnums only")
elif "mac" in arg and self.urt_modversion > 42:
self.game.send_rcon('g_gear FGHIJKLMNZacefgijklOQ')
self.game.rcon_say("^7Gear: ^2MAC11 only")
else:
self.game.rcon_tell(sar['player_num'], COMMANDS['gear']['syntax'])
else:
self.game.rcon_tell(sar['player_num'], COMMANDS['gear']['syntax'])
# kill - kill a player
elif sar['command'] == '!kill' and self.game.players[sar['player_num']].get_admin_role() >= COMMANDS['kill']['level']:
if self.urt_modversion > 41:
if line.split(sar['command'])[1]:
user = line.split(sar['command'])[1].strip()
found, victim, msg = self.player_found(user)
if not found:
self.game.rcon_tell(sar['player_num'], msg)
else:
if victim.get_admin_role() >= self.game.players[sar['player_num']].get_admin_role():
self.game.rcon_tell(sar['player_num'], "^3Insufficient privileges to kill an admin")
else:
self.game.send_rcon("smite %d" % victim.get_player_num())
else:
self.game.rcon_tell(sar['player_num'], COMMANDS['kill']['syntax'])
else:
self.game.rcon_tell(sar['player_num'], "^7The command ^3!kill ^7is not supported")
# lastadmin - display the last disconnected admin
elif sar['command'] == '!lastadmin' and self.game.players[sar['player_num']].get_admin_role() >= COMMANDS['lastadmin']['level']:
msg = "^7Last disconnected admin: ^3%s [^2%d^3]" % (self.last_admin.get_name(), self.last_admin.get_admin_role()) if self.last_admin else "^3No admin left the server yet"
self.game.rcon_tell(sar['player_num'], msg)
# lookup - search for player in database
elif (sar['command'] == '!lookup' or sar['command'] == '!l') and self.game.players[sar['player_num']].get_admin_role() >= COMMANDS['lookup']['level']:
if line.split(sar['command'])[1]:
arg = line.split(sar['command'])[1].strip()
curs.execute("SELECT `id`,`name`,`time_joined` FROM `player` WHERE `name` like '%{}%' ORDER BY `time_joined` DESC LIMIT 8".format(arg))
result = curs.fetchall()
for row in result:
self.game.rcon_tell(sar['player_num'], "^7[^2@%s^7] %s ^7[^1%s^7]" % (str(row[0]), str(row[1]), str(row[2])), False)
if not result:
self.game.rcon_tell(sar['player_num'], "^3No Player found matching %s" % arg)
else:
self.game.rcon_tell(sar['player_num'], COMMANDS['lookup']['syntax'])
# permban - ban a player permanent
elif (sar['command'] == '!permban' or sar['command'] == '!pb') and self.game.players[sar['player_num']].get_admin_role() >= COMMANDS['permban']['level']:
if line.split(sar['command'])[1]:
arg = line.split(sar['command'])[1].split()
if len(arg) > 1:
user = arg[0]
reason = ' '.join(arg[1:])[:40].strip()
found, victim, msg = self.player_found(user)
if not found:
self.game.rcon_tell(sar['player_num'], msg)
else:
if sar['player_num'] == victim.get_player_num():
self.game.rcon_tell(sar['player_num'], "^7You cannot ban yourself")
elif victim.get_admin_role() >= self.game.players[sar['player_num']].get_admin_role():
self.game.rcon_tell(sar['player_num'], "^3Insufficient privileges to ban an admin")
else:
# ban for 20 years
victim.ban(duration=630720000, reason=reason, admin=self.game.players[sar['player_num']].get_name())
self.game.rcon_say("^2%s ^1banned permanently ^7by %s: ^4%s" % (victim.get_name(), self.game.players[sar['player_num']].get_name(), reason))
self.game.kick_player(victim.get_player_num())
# add IP address to bot-banlist.txt
with open(os.path.join(HOME, 'bot-banlist.txt'), 'a') as banlist:
banlist.write("%s:-1 // %s banned on %s, reason : %s\n" % (victim.get_ip_address(), victim.get_name(), time.strftime("%d/%m/%Y (%H:%M)", time.localtime(time.time())), reason))
else:
self.game.rcon_tell(sar['player_num'], "^7You need to enter a reason: ^3!permban <name> <reason>")
else:
self.game.rcon_tell(sar['player_num'], COMMANDS['permban']['syntax'])
# makereg - make a player a regular (Level 2) user
elif (sar['command'] == '!makereg' or sar['command'] == '!mr') and self.game.players[sar['player_num']].get_admin_role() >= COMMANDS['makereg']['level']:
if line.split(sar['command'])[1]:
user = line.split(sar['command'])[1].strip()
found, victim, msg = self.player_found(user)
if not found:
self.game.rcon_tell(sar['player_num'], msg)
else:
if victim.get_registered_user():
if victim.get_admin_role() < 2:
victim.update_db_admin_role(role=2)
self.game.rcon_tell(sar['player_num'], "^1%s ^7put in group Regular" % victim.get_name())
else:
self.game.rcon_tell(sar['player_num'], "^3%s is already in a higher level group" % victim.get_name())
else:
# register new user in DB and set role to 2
victim.register_user_db(role=2)
self.game.rcon_tell(sar['player_num'], "^1%s ^7put in group Regular" % victim.get_name())
else:
self.game.rcon_tell(sar['player_num'], COMMANDS['makereg']['syntax'])
# !unreg <player> - remove a player from the regular group
elif sar['command'] == '!unreg' and self.game.players[sar['player_num']].get_admin_role() >= COMMANDS['unreg']['level']:
if line.split(sar['command'])[1]:
user = line.split(sar['command'])[1].strip()
found, victim, msg = self.player_found(user)
if not found:
self.game.rcon_tell(sar['player_num'], msg)
else:
if victim.get_admin_role() == 2:
victim.update_db_admin_role(role=1)
self.game.rcon_tell(sar['player_num'], "^1%s ^7put in group User" % victim.get_name())
else:
self.game.rcon_tell(sar['player_num'], "^3%s is not in the regular group" % victim.get_name())
else:
self.game.rcon_tell(sar['player_num'], COMMANDS['unreg']['syntax'])
# putgroup - add a client to a group
elif sar['command'] == '!putgroup' and self.game.players[sar['player_num']].get_admin_role() >= COMMANDS['putgroup']['level']:
if line.split(sar['command'])[1]:
arg = line.split(sar['command'])[1].split()
if len(arg) > 1:
user = arg[0]
right = arg[1]
found, victim, msg = self.player_found(user)
if not found:
self.game.rcon_tell(sar['player_num'], msg)
else:
if victim.get_registered_user():
new_role = victim.get_admin_role()
else:
# register new user in DB and set role to 1
victim.register_user_db(role=1)
new_role = 1
if right == "user" and victim.get_admin_role() < 80:
self.game.rcon_tell(sar['player_num'], "^3%s put in group ^7User" % victim.get_name())
new_role = 1
elif "reg" in right and victim.get_admin_role() < 80:
self.game.rcon_tell(sar['player_num'], "^3%s put in group ^7Regular" % victim.get_name())
new_role = 2
elif "mod" in right and victim.get_admin_role() < 80:
self.game.rcon_tell(sar['player_num'], "^3%s added as ^7Moderator" % victim.get_name())
self.game.rcon_tell(victim.get_player_num(), "^3You are added as ^7Moderator")
new_role = 20
elif right == "admin" and victim.get_admin_role() < 80:
self.game.rcon_tell(sar['player_num'], "^3%s added as ^7Admin" % victim.get_name())
self.game.rcon_tell(victim.get_player_num(), "^3You are added as ^7Admin")
new_role = 40
elif "fulladmin" in right and victim.get_admin_role() < 80:
self.game.rcon_tell(sar['player_num'], "^3%s added as ^7Full Admin" % victim.get_name())
self.game.rcon_tell(victim.get_player_num(), "^3You are added as ^7Full Admin")
new_role = 60
# Note: senioradmin level can only be set by head admin or super admin
elif "senioradmin" in right and self.game.players[sar['player_num']].get_admin_role() >= 90 and victim.get_player_num() != sar['player_num']:
self.game.rcon_tell(sar['player_num'], "^3%s added as ^6Senior Admin" % victim.get_name())
self.game.rcon_tell(victim.get_player_num(), "^3You are added as ^6Senior Admin")
new_role = 80
# Note: superadmin level can only be set by head admin
elif "superadmin" in right and self.game.players[sar['player_num']].get_admin_role() == 100 and victim.get_player_num() != sar['player_num']:
self.game.rcon_tell(sar['player_num'], "^3%s added as ^2Super Admin" % victim.get_name())
self.game.rcon_tell(victim.get_player_num(), "^3You are added as ^2Super Admin")
new_role = 90
else:
self.game.rcon_tell(sar['player_num'], "^3Sorry, you cannot put %s in group <%s>" % (victim.get_name(), right))
victim.update_db_admin_role(role=new_role)
else:
self.game.rcon_tell(sar['player_num'], COMMANDS['putgroup']['syntax'])
else:
self.game.rcon_tell(sar['player_num'], COMMANDS['putgroup']['syntax'])
# banlist - display the last active 10 bans
elif sar['command'] == '!banlist' and self.game.players[sar['player_num']].get_admin_role() >= COMMANDS['banlist']['level']:
curs.execute("SELECT `id`,`name` FROM `ban_list` WHERE `expires` > '{}' ORDER BY `timestamp` DESC LIMIT 10".format(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time()))))
result = curs.fetchall()
banlist = ['^7[^2@%s^7] %s' % (row[0], row[1]) for row in result]
msg = 'Currently no one is banned' if not banlist else str(", ".join(banlist))
self.game.rcon_tell(sar['player_num'], "^7Banlist: %s" % msg)
# lastbans - list the last 4 bans
elif (sar['command'] == '!lastbans' or sar['command'] == '!bans') and self.game.players[sar['player_num']].get_admin_role() >= COMMANDS['lastbans']['level']:
curs.execute("SELECT `id`,`name`,`expires` FROM `ban_list` ORDER BY `timestamp` DESC LIMIT 4")
result = curs.fetchall()
lastbanlist = ['^3[^2@%s^3] ^7%s ^3(^1%s^3)' % (row[0], row[1], row[2]) for row in result]
if result:
for item in lastbanlist:
self.game.rcon_tell(sar['player_num'], str(item))
else:
self.game.rcon_tell(sar['player_num'], "^7Currently no one is banned")
# unban - unban a player from the database via ID
elif sar['command'] == '!unban' and self.game.players[sar['player_num']].get_admin_role() >= COMMANDS['unban']['level']:
if line.split(sar['command'])[1]:
arg = line.split(sar['command'])[1].strip().lstrip('@')
if arg.isdigit():
curs.execute("SELECT `guid`,`name`,`ip_address` FROM `ban_list` WHERE `id` = {}".format(int(arg)))
result = curs.fetchone()
if result:
guid = result[0]
name = str(result[1])
ip_addr = str(result[2])
curs.execute("DELETE FROM `ban_list` WHERE `id` = {}".format(int(arg)))
conn.commit()
self.game.rcon_tell(sar['player_num'], "^7Player ^2%s ^7unbanned" % name)
curs.execute("DELETE FROM `ban_list` WHERE `guid` = '{}' OR ip_address = '{}'".format(guid, ip_addr))
conn.commit()
self.game.rcon_tell(sar['player_num'], "^7Try to remove duplicates of [^1%s^7]" % ip_addr)
else:
self.game.rcon_tell(sar['player_num'], "^7Invalid ID, no Player found")
else:
self.game.rcon_tell(sar['player_num'], COMMANDS['unban']['syntax'])
else:
self.game.rcon_tell(sar['player_num'], COMMANDS['unban']['syntax'])
## head admin level 100 or super admin level 90
# password - set private server password
elif sar['command'] == '!password' and self.game.players[sar['player_num']].get_admin_role() >= COMMANDS['password']['level']:
if line.split(sar['command'])[1]:
arg = line.split(sar['command'])[1].strip()
self.game.send_rcon('g_password %s' % arg)
self.game.rcon_tell(sar['player_num'], "^7Password set to '%s' - Server is private" % arg)
else:
self.game.send_rcon('g_password ""')
self.game.rcon_tell(sar['player_num'], "^7Password removed - Server is public")
# reload
elif sar['command'] == '!reload' and self.game.players[sar['player_num']].get_admin_role() >= COMMANDS['reload']['level']:
self.game.send_rcon('reload')
# ungroup - remove the admin level from a player
elif sar['command'] == '!ungroup' and self.game.players[sar['player_num']].get_admin_role() >= COMMANDS['ungroup']['level']:
if line.split(sar['command'])[1]:
user = line.split(sar['command'])[1].strip()
found, victim, msg = self.player_found(user)
if not found:
self.game.rcon_tell(sar['player_num'], msg)
else:
if (1 < victim.get_admin_role() < COMMANDS['ungroup']['level'] or self.game.players[sar['player_num']].get_admin_role() == 100) and victim.get_player_num() != sar['player_num']:
self.game.rcon_tell(sar['player_num'], "^1%s ^7put in group User" % victim.get_name())
victim.update_db_admin_role(role=1)
else:
self.game.rcon_tell(sar['player_num'], "^3Sorry, you cannot put %s in group User" % victim.get_name())
else:
self.game.rcon_tell(sar['player_num'], COMMANDS['ungroup']['syntax'])
# switch to gametype
elif sar['command'] == '!ffa' and self.game.players[sar['player_num']].get_admin_role() >= COMMANDS['ffa']['level']:
self.game.send_rcon('g_gametype 0')
self.game.rcon_tell(sar['player_num'], "^7Game Mode: ^2Free For All")
self.game.rcon_tell(sar['player_num'], "^7Mode changed for next map")
elif sar['command'] == '!lms' and self.urt_modversion > 42 and self.game.players[sar['player_num']].get_admin_role() >= COMMANDS['lms']['level']:
self.game.send_rcon('g_gametype 1')
self.game.rcon_tell(sar['player_num'], "^7Game Mode: ^2Last Man Standing")
self.game.rcon_tell(sar['player_num'], "^7Mode changed for next map")
elif sar['command'] == '!tdm' and self.game.players[sar['player_num']].get_admin_role() >= COMMANDS['tdm']['level']:
self.game.send_rcon('g_gametype 3')
self.game.rcon_tell(sar['player_num'], "^7Game Mode: ^2Team Deathmatch")
self.game.rcon_tell(sar['player_num'], "^7Mode changed for next map")
elif sar['command'] == '!ts' and self.game.players[sar['player_num']].get_admin_role() >= COMMANDS['ts']['level']:
self.game.send_rcon('g_gametype 4')
self.game.rcon_tell(sar['player_num'], "^7Game Mode: ^2Team Survivor")
self.game.rcon_tell(sar['player_num'], "^7Mode changed for next map")
# 5: Follow The Leader
# 6: Capture And Hold
elif sar['command'] == '!ctf' and self.game.players[sar['player_num']].get_admin_role() >= COMMANDS['ctf']['level']:
self.game.send_rcon('g_gametype 7')
self.game.rcon_tell(sar['player_num'], "^7Game Mode: ^2Capture the Flag")
self.game.rcon_tell(sar['player_num'], "^7Mode changed for next map")
elif sar['command'] == '!bomb' and self.game.players[sar['player_num']].get_admin_role() >= COMMANDS['bomb']['level']:
self.game.send_rcon('g_gametype 8')
self.game.rcon_tell(sar['player_num'], "^7Game Mode: ^2Bomb")
self.game.rcon_tell(sar['player_num'], "^7Mode changed for next map")
elif sar['command'] == '!jump' and self.game.players[sar['player_num']].get_admin_role() >= COMMANDS['jump']['level']:
self.game.send_rcon('g_gametype 9')
self.game.rcon_tell(sar['player_num'], "^7Game Mode: ^2Jump")
self.game.rcon_tell(sar['player_num'], "^7Mode changed for next map")
# 10 Freeze Tag
elif sar['command'] == '!gungame' and self.urt_modversion > 42 and self.game.players[sar['player_num']].get_admin_role() >= COMMANDS['gungame']['level']:
self.game.send_rcon('g_gametype 11')
self.game.rcon_tell(sar['player_num'], "^7Game Mode: ^2Gun Game")
self.game.rcon_tell(sar['player_num'], "^7Mode changed for next map")
## iamgod
# iamgod - register user as Head Admin
elif sar['command'] == '!iamgod':
if self.iamgod:
if not self.game.players[sar['player_num']].get_registered_user():
# register new user in DB and set admin role to 100
self.game.players[sar['player_num']].register_user_db(role=100)
else:
self.game.players[sar['player_num']].update_db_admin_role(role=100)
self.iamgod = False
self.game.rcon_tell(sar['player_num'], "^7You are registered as ^6Head Admin")
## unknown command
elif sar['command'].startswith('!') and len(sar['command']) > 1 and self.game.players[sar['player_num']].get_admin_role() > 20:
if sar['command'].lstrip('!') in self.superadmin_cmds:
self.game.rcon_tell(sar['player_num'], "^7Insufficient privileges to use command ^3%s" % sar['command'])
else:
self.game.rcon_tell(sar['player_num'], "^7Unknown command ^3%s" % sar['command'])
## bad words
elif self.bad_words_autokick and [sample for sample in bad_words if sample in line.lower()] and self.game.players[sar['player_num']].get_admin_role() < 40:
victim = self.game.players[sar['player_num']]
victim.add_warning('bad language')
self.kick_high_warns(victim, 'bad language', 'Behave, stop using bad language')
def kick_high_warns(self, player, reason, text):
if player.get_warning() > 3:
self.game.rcon_say("^2%s ^7was kicked, %s" % (player.get_name(), reason))
self.game.kick_player(player.get_player_num(), reason=reason)
else:
self.game.rcon_tell(player.get_player_num(), "^1WARNING ^7[^3%d^7]: %s" % (player.get_warning(), text))
if player.get_warning() == 3:
self.game.rcon_say("^1ALERT: ^2%s ^7auto-kick from warnings if not cleared" % player.get_name())
def get_admins_online(self):
"""
return list of Admins online
"""
liste = "%s" % ", ".join(["^3%s [^2%d^3]" % (player.get_name(), player.get_admin_role()) for player in self.game.players.itervalues() if player.get_admin_role() >= 20])
return "^7Admins online: %s" % liste if liste else "^7No admins online"
def get_admin_count(self):
"""
return number of Admins online
"""
counter = sum(1 for player in self.game.players.itervalues() if player.get_admin_role() >= 20)
return "^7%d Admin%s online" % (counter, 's' if counter > 1 else '')
def get_nextmap(self):
"""
return the next map in the mapcycle
"""
g_nextmap = self.game.get_cvar('g_nextmap')
if not g_nextmap or "num score ping name" in g_nextmap:
g_nextmap = self.game.get_cvar('g_nextCycleMap')
if not g_nextmap:
g_nextmap = self.game.get_cvar('g_nextCycleMap')
if g_nextmap in self.game.get_all_maps():
msg = "^7Next Map: ^3%s" % g_nextmap
self.game.next_mapname = g_nextmap
else:
msg = "^7Next Map: ^3%s" % self.game.next_mapname
return msg
def tell_say_message(self, sar, msg):
"""
display message in private or global chat
"""
if sar['command'].startswith('@'):
self.game.rcon_say(msg)
else:
self.game.rcon_tell(sar['player_num'], msg)
def convert_time(self, time_string):
"""
convert time string in duration and time unit
"""
if time_string.endswith('d'):
duration_string = time_string.rstrip('d')
duration = int(duration_string) * 86400 if duration_string.isdigit() else 86400
elif time_string.endswith('h'):
duration_string = time_string.rstrip('h')
duration = int(duration_string) * 3600 if duration_string.isdigit() else 3600
elif time_string.endswith('m'):
duration_string = time_string.rstrip('m')
duration = int(duration_string) * 60 if duration_string.isdigit() else 60
elif time_string.endswith('s'):
duration_string = time_string.rstrip('s')
duration = int(duration_string) if duration_string.isdigit() else 30
else:
duration = 3600
# default ban duration = 1 hour
if duration == 0:
duration = 3600
# limit to max duration = 72 hours
elif duration > 259200:
duration = 259200
# modulo
days = (duration - (duration % 86400)) / 86400
hours = ((duration % 86400) - (duration % 3600)) / 3600
mins = ((duration % 3600) - (duration % 60)) / 60
secs = duration % 60
duration_output = []
append = duration_output.append
if days > 0:
append("%s day%s" % (days, 's' if days > 1 else ''))
if hours > 0:
append("%s hour%s" % (hours, 's' if hours > 1 else ''))
if mins > 0:
append("%s minute%s" % (mins, 's' if mins > 1 else ''))
if secs > 0:
append("%s second%s" % (secs, 's' if secs > 1 else ''))
return duration, ' '.join(duration_output)
def handle_flag(self, line):
"""
handle flag
"""
with self.players_lock:
tmp = line.split()
player_num = int(tmp[0])
player = self.game.players[player_num]
player_name = player.get_name()
action = tmp[1]
if action == '1:':
player.return_flag()
logger.debug("Player %d %s returned the flag", player_num, player_name)
elif action == '2:':
player.capture_flag()
cap_count = player.get_flags_captured()
self.game.send_rcon("^7%s has captured ^2%s ^7flag%s" % (player_name, cap_count, 's' if cap_count > 1 else ''))
logger.debug("Player %d %s captured the flag", player_num, player_name)
def handle_bomb(self, line):
"""
handle bomb
"""
with self.players_lock:
tmp = line.split("is") if "Bombholder" in line else line.split("by")
action = tmp[0].strip()
player_num = int(tmp[1].rstrip('!').strip())
name = self.game.players[player_num].get_name()
player = self.game.players[player_num]
if action == 'Bomb was defused':
player.defused_bomb()
logger.debug("Player %d %s defused the bomb", player_num, name)
self.game.send_rcon("^7The ^2BOMB ^7has been defused by ^2%s^7!" % name)
self.handle_teams_ts_mode('Blue')
# kill all survived red players
if self.kill_survived_opponents and self.urt_modversion > 41:
for player in self.game.players.itervalues():
if player.get_team() == 1 and player.get_alive():
self.game.send_rcon("smite %d" % player.get_player_num())
elif action == 'Bomb was planted':
player.planted_bomb()
logger.debug("Player %d %s planted the bomb", player_num, name)
self.game.send_rcon("^7The ^1BOMB ^7has been planted by ^1%s^7! ^2%s ^7seconds to detonation." % (name, self.explode_time))
if self.spam_bomb_planted_msg:
self.game.rcon_bigtext("^1The ^7BOMB ^1has been planted by ^7%s^1!" % name)
self.game.rcon_bigtext("^7The ^1BOMB ^7has been planted by ^1%s^7!" % name)
self.game.rcon_bigtext("^1The ^7BOMB ^1has been planted by ^7%s^1!" % name)
self.game.rcon_bigtext("^7The ^1BOMB ^7has been planted by ^1%s^7!" % name)
elif action == 'Bomb was tossed':
player.bomb_tossed()
for mate in self.game.players.itervalues():
if mate.get_team() == 1 and mate.get_alive() and mate != player:
self.game.rcon_tell(mate.get_player_num(), "^7The ^1BOMB ^7is loose!")
elif action == 'Bomb has been collected':
player.is_bombholder()
for mate in self.game.players.itervalues():
if mate.get_team() == 1 and mate.get_alive() and mate != player:
self.game.rcon_tell(mate.get_player_num(), "^7Help ^1%s ^7to plant the ^1BOMB" % name)
elif action == 'Bombholder':
player.is_bombholder()
def handle_bomb_exploded(self):
"""
handle bomb exploded
"""
logger.debug("Bomb exploded!")
if self.kill_survived_opponents and self.urt_modversion > 41:
# start Thread to kill all survived blue players
processor = Thread(target=self.kill_blue_team_bomb_exploded)
processor.setDaemon(True)
processor.start()
self.handle_teams_ts_mode('Red')
def kill_blue_team_bomb_exploded(self):
"""
Kill all survived blue players when the bomb exploded
"""
self.game.rcon_say("^7Planted?")
time.sleep(1.3)
with self.players_lock:
for player in self.game.players.itervalues():
if player.get_team() == 2 and player.get_alive():
self.game.send_rcon("smite %d" % player.get_player_num())
def handle_teams_ts_mode(self, line):
"""
handle team balance in Team Survivor mode
"""
logger.debug("SurvivorWinner: %s", line)
self.game.send_rcon("%s%s ^7team wins" % ('^1' if line == 'Red' else '^4', line) if 'Draw' not in line else "^7Draw")
self.autobalancer()
if self.ts_do_team_balance:
self.allow_cmd_teams = True
self.handle_team_balance()
if self.allow_cmd_teams_round_end:
self.allow_cmd_teams = False
def handle_team_balance(self):
"""
balance teams if needed
"""
with self.players_lock:
game_data = self.game.get_gamestats()
self.game.rcon_say("^7Red: ^1%s ^7- Blue: ^4%s ^7- Spectator: ^3%s" % (game_data[Player.teams[1]], game_data[Player.teams[2]], game_data[Player.teams[3]]))
if (abs(game_data[Player.teams[1]] - game_data[Player.teams[2]])) > 1:
if self.allow_cmd_teams:
self.game.balance_teams(game_data)
self.ts_do_team_balance = False
logger.debug("Balance teams by user request")
else:
if self.ts_gametype or self.bomb_gametype or self.freeze_gametype:
self.ts_do_team_balance = True
self.game.rcon_say("^7Teams will be balanced at the end of this round!")
else:
self.game.rcon_say("^7Teams are already balanced")
self.ts_do_team_balance = False
def autobalancer(self):
"""
auto balance teams at the end of the round if needed
"""
if self.teams_autobalancer:
with self.players_lock:
game_data = self.game.get_gamestats()
if (abs(game_data[Player.teams[1]] - game_data[Player.teams[2]])) > 1:
self.game.balance_teams(game_data)
logger.debug("Autobalancer performed team balance")
self.ts_do_team_balance = False
def handle_freeze(self, line):
"""
handle freeze
"""
with self.players_lock:
info = line.split(":", 1)[0].split()
player_num = int(info[0])
self.game.players[player_num].freeze()
def handle_thawout(self, line):
"""
handle thaw out
"""
with self.players_lock:
info = line.split(":", 1)[0].split()
player_num = int(info[0])
self.game.players[player_num].thawout()
def handle_awards(self):
"""
display awards and personal stats at the end of the round
"""
most_kills = 0
most_flags = 0
most_streak = 0
most_hs = 0
most_frozen = 0
most_thawouts = 0
most_defused = 0
most_planted = 0
most_he_kills = 0
most_knife_kills = 0
fastest_cap = 999
most_flag_returns = 0
flagrunner = ""
serialkiller = ""
streaker = ""
freezer = ""
thawouter = ""
headshooter = ""
defused_by = ""
planted_by = ""
nader = ""
knifer = ""
fastrunner = ""
defender = ""
msg = []
append = msg.append
with self.players_lock:
for player in self.game.players.itervalues():
player_num = player.get_player_num()
if player_num == BOT_PLAYER_NUM:
continue
player_name = player.get_name()
player_kills = player.get_kills()
player_headshots = player.get_headshots()
if player.get_flags_captured() > most_flags:
most_flags = player.get_flags_captured()
flagrunner = player_name
if player_kills > most_kills:
most_kills = player_kills
serialkiller = player_name
if player.get_max_kill_streak() > most_streak:
most_streak = player.get_max_kill_streak()
streaker = player_name
if player_headshots > most_hs:
most_hs = player_headshots
headshooter = player_name
if player.get_freeze() > most_frozen:
most_frozen = player.get_freeze()
freezer = player_name
if player.get_thawout() > most_thawouts:
most_thawouts = player.get_thawout()
thawouter = player_name
if player.get_defused_bomb() > most_defused:
most_defused = player.get_defused_bomb()
defused_by = player_name
if player.get_planted_bomb() > most_planted:
most_planted = player.get_planted_bomb()
planted_by = player_name
if player.get_he_kills() > most_he_kills:
most_he_kills = player.get_he_kills()
nader = player_name
if player.get_knife_kills() > most_knife_kills:
most_knife_kills = player.get_knife_kills()
knifer = player_name
if 0 < player.get_flag_capture_time() < fastest_cap:
fastest_cap = player.get_flag_capture_time()
fastrunner = player_name
if player.get_flags_returned() > most_flag_returns:
most_flag_returns = player.get_flags_returned()
defender = player_name
# display personal stats at the end of the round, stats for players in spec will not be displayed
if player.get_team() != 3:
if self.freeze_gametype:
self.game.rcon_tell(player_num, "^7Stats %s: ^7F ^2%d ^7T ^3%d ^7HS ^1%d ^7TK ^1%d" % (player_name, player.get_freeze(), player.get_thawout(), player.get_headshots(), player.get_team_kill_count()))
else:
self.game.rcon_tell(player_num, "^7Stats %s: ^7K ^2%d ^7D ^3%d ^7HS ^1%d ^7TK ^1%d" % (player_name, player_kills, player.get_deaths(), player_headshots, player.get_team_kill_count()))
# get Awards
if most_flags > 1:
append("^7%s: ^2%d ^4caps" % (flagrunner, most_flags))
if most_planted > 1:
append("^7%s: ^2%d ^5planted" % (planted_by, most_planted))
if most_defused > 1:
append("^7%s: ^2%d ^4defused" % (defused_by, most_defused))
if most_frozen > 1:
append("^7%s: ^2%d ^3freezes" % (freezer, most_frozen))
if most_thawouts > 1:
append("^7%s: ^2%d ^4thaws" % (thawouter, most_thawouts))
if most_kills > 1:
append("^7%s: ^2%d ^3kills" % (serialkiller, most_kills))
if most_streak > 1:
append("^7%s: ^2%d ^6streaks" % (streaker, most_streak))
if most_hs > 1:
append("^7%s: ^2%d ^1heads" % (headshooter, most_hs))
# Bomb statistics
if most_planted > 1 or most_defused > 1:
self.game.rcon_say("^2Top Objectives: ^7%s [^1%s^7]" % ((planted_by, most_planted) if most_planted > most_defused else (defused_by, most_defused)))
# CTF statistics
if most_flags > 1:
self.game.rcon_say("^2Top Objectives: ^7%s [^1%s^7]" % (flagrunner, most_flags))
# HE grenade kills
if most_he_kills > 1:
self.game.rcon_say("^2Most HE grenade kills: ^7%s (^1%d ^7HE kills)" % (nader, most_he_kills))
if most_knife_kills > 1:
self.game.rcon_say("^2Most knife kills: ^7%s (^1%d ^7kills)" % (knifer, most_knife_kills))
# CTF statistics
if fastest_cap < 999:
self.game.rcon_say("^2Fastest cap: ^7%s (^1%s ^7sec)" % (fastrunner, fastest_cap))
if most_flag_returns > 1:
self.game.rcon_say("^2Best defender: ^7%s (^1%d ^7flag returns)" % (defender, most_flag_returns))
# display Awards
if msg:
self.game.rcon_say("^1AWARDS: %s" % " ^7- ".join(msg))
### CLASS Player ###
class Player(object):
"""
Player class
"""
teams = {0: "green", 1: "red", 2: "blue", 3: "spectator"}
roles = {0: "Guest", 1: "User", 2: "Regular", 20: "Moderator", 40: "Admin", 60: "Full Admin", 80: "Senior Admin", 90: "Super Admin", 100: "Head Admin"}
def __init__(self, player_num, ip_address, guid, name, auth=''):
"""
create a new instance of Player
"""
self.player_num = player_num
self.guid = guid
self.name = ''
self.authname = auth
self.player_id = 0
self.aliases = []
self.registered_user = False
self.num_played = 0
self.last_visit = 0
self.admin_role = 0
self.first_seen = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time()))
self.kills = 0
self.froze = 0
self.thawouts = 0
self.db_kills = 0
self.killing_streak = 0
self.losing_streak = 0
self.max_kill_streak = 0
self.db_killing_streak = 0
self.deaths = 0
self.db_deaths = 0
self.db_suicide = 0
self.head_shots = 0
self.db_head_shots = 0
self.hitzone = {'body': 0, 'arms': 0, 'legs': 0}
self.all_hits = 0
self.he_kills = 0
self.knife_kills = 0
self.tk_count = 0
self.db_tk_count = 0
self.db_team_death = 0
self.tk_victim_names = []
self.tk_killer_names = []
self.grudged_player = []
self.ping_value = 0
self.warn_list = []
self.last_warn_time = 0
self.flags_captured = 0
self.flags_returned = 0
self.flag_capture_time = 999
self.bombholder = False
self.bomb_carrier_killed = 0
self.killed_with_bomb = 0
self.bomb_planted = 0
self.bomb_defused = 0
self.address = ip_address
self.team = 3
self.team_lock = None
self.time_joined = time.time()
self.welcome_msg = True
self.first_time = False
self.country = None
self.ban_id = 0
self.ban_msg = ''
self.alive = False
self.respawn_time = 0
self.monsterkill = {'time': 999, 'kills': 0}
# set player name
self.set_name(name)
# GeoIP lookup
info = GEOIP.lookup(ip_address)
if info.country:
self.country = "%s (%s)" % (info.country_name, info.country)
# check ban_list
now = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(self.time_joined))
curs.execute("SELECT `id`,`reason` FROM `ban_list` WHERE `guid` = '{}' AND `expires` > '{}'".format(self.guid, now))
result = curs.fetchone()
if result:
self.ban_id = result[0]
self.ban_msg = str(result[1]).split(',')[0]
else:
curs.execute("SELECT `id`,`reason` FROM `ban_list` WHERE `ip_address` = '{}' AND `expires` > '{}'".format(self.address, now))
result = curs.fetchone()
if result:
self.ban_id = result[0]
self.ban_msg = str(result[1]).split(',')[0]
def ban(self, duration=900, reason='tk', admin=None):
if admin:
reason = "%s, ban by %s" % (reason, admin)
try:
expire_date = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time() + duration))
except ValueError:
expire_date = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(2147483647))
timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time()))
curs.execute("SELECT `expires` FROM `ban_list` WHERE `guid` = '{}'".format(self.guid))
result = curs.fetchone()
if result:
if result[0] < expire_date:
# update already existing ban
curs.execute("UPDATE `ban_list` SET `ip_address` = '{}',`expires` = '{}',`reason` = '{}' WHERE `guid` = '{}'".format(self.address, expire_date, reason, self.guid))
conn.commit()
return True
# update IP address of existing ban
curs.execute("UPDATE `ban_list` SET `ip_address` = '{}' WHERE `guid` = '{}'".format(self.address, self.guid))
conn.commit()
return False
# create new ban
curs.execute("INSERT INTO `ban_list` (`id`,`guid`,`name`,`ip_address`,`expires`,`timestamp`,`reason`) VALUES ({},'{}','{}','{}','{}','{}','{}')".format(self.player_id, self.guid, self.name, self.address, expire_date, timestamp, reason))
conn.commit()
return True
def add_ban_point(self, point_type, duration):
try:
expire_date = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time() + duration))
except ValueError:
expire_date = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(2147483647))
# add ban_point to database
curs.execute("INSERT INTO `ban_points` (`guid`,`point_type`,`expires`) VALUES ('{}','{}','{}')".format(self.guid, point_type, expire_date))
conn.commit()
# check amount of ban_points
curs.execute("SELECT COUNT(*) FROM `ban_points` WHERE `guid` = '{}' AND `expires` > '{}'".format(self.guid, time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time()))))
# ban player when he gets more than 2 ban_points
if int(curs.fetchone()[0]) > 2:
# ban duration = 3 * expiration time
ban_duration = duration * 3
self.ban(duration=ban_duration, reason=point_type)
return ban_duration / 60
return 0
def reset(self, reset_headshot_hits=True, reset_kill_spree=True):
self.kills = 0
self.froze = 0
self.thawouts = 0
if reset_kill_spree:
self.killing_streak = 0
self.max_kill_streak = 0
self.losing_streak = 0
self.deaths = 0
if reset_headshot_hits:
self.head_shots = 0
self.hitzone = {'body': 0, 'arms': 0, 'legs': 0}
self.all_hits = 0
self.he_kills = 0
self.knife_kills = 0
self.tk_count = 0
self.tk_victim_names = []
self.tk_killer_names = []
self.grudged_player = []
self.warn_list = []
self.last_warn_time = 0
self.flags_captured = 0
self.flags_returned = 0
self.flag_capture_time = 999
self.bombholder = False
self.bomb_carrier_killed = 0
self.killed_with_bomb = 0
self.bomb_planted = 0
self.bomb_defused = 0
self.team_lock = None
self.alive = False
self.respawn_time = 0
self.monsterkill = {'time': 999, 'kills': 0}
def reset_flag_stats(self):
self.flags_captured = 0
self.flags_returned = 0
self.flag_capture_time = 999
def save_info(self):
if self.registered_user:
ratio = round(float(self.db_kills) / float(self.db_deaths), 2) if self.db_deaths > 0 else 1.0
curs.execute("UPDATE `xlrstats` SET `kills` = {},`deaths` = {},`headshots` = {},`team_kills` = {},`team_death` = {},`max_kill_streak` = {},`suicides` = {},`rounds` = `rounds` + 1,`ratio` = {} WHERE `guid` = '{}'".format(self.db_kills, self.db_deaths, self.db_head_shots, self.db_tk_count, self.db_team_death, self.db_killing_streak, self.db_suicide, ratio, self.guid))
conn.commit()
def check_database(self):
now = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time()))
# check player table
curs.execute("SELECT COUNT(*) FROM `player` WHERE `guid` = '{}'".format(self.guid))
if int(curs.fetchone()[0]) == 0:
# add new player to database
curs.execute("INSERT INTO `player` (`guid`,`name`,`ip_address`,`time_joined`,`aliases`) VALUES ('{}','{}','{}','{}','{}')".format(self.guid, self.name, self.address, now, self.name))
conn.commit()
self.aliases.append(self.name)
self.first_time = True
else:
# update name, IP address and last join date
curs.execute("UPDATE `player` SET `name` = '{}',`ip_address` = '{}',`time_joined` = '{}' WHERE `guid` = '{}'".format(self.name, self.address, now, self.guid))
conn.commit()
# get known aliases
curs.execute("SELECT `aliases` FROM `player` WHERE `guid` = '{}'".format(self.guid))
result = curs.fetchone()
# create list of aliases
self.aliases = result[0].split(', ')
if self.name not in self.aliases:
# add new alias to list
if len(self.aliases) < 15:
self.aliases.append(self.name)
alias_string = ', '.join(self.aliases)
values = (alias_string, self.guid)
curs.execute("UPDATE `player` SET `aliases` = ? WHERE `guid` = ?", values)
conn.commit()
# get player-id
curs.execute("SELECT `id` FROM `player` WHERE `guid` = '{}'".format(self.guid))
self.player_id = curs.fetchone()[0]
# check XLRSTATS table
curs.execute("SELECT COUNT(*) FROM `xlrstats` WHERE `guid` = '{}'".format(self.guid))
if int(curs.fetchone()[0]) == 0:
self.registered_user = False
else:
self.registered_user = True
# get DB DATA for XLRSTATS
curs.execute("SELECT `last_played`,`num_played`,`kills`,`deaths`,`headshots`,`team_kills`,`team_death`,`max_kill_streak`,`suicides`,`admin_role`,`first_seen` FROM `xlrstats` WHERE `guid` = '{}'".format(self.guid))
result = curs.fetchone()
self.last_visit = result[0]
self.num_played = result[1]
self.db_kills = result[2]
self.db_deaths = result[3]
self.db_head_shots = result[4]
self.db_tk_count = result[5]
self.db_team_death = result[6]
self.db_killing_streak = result[7]
self.db_suicide = result[8]
self.admin_role = result[9]
self.first_seen = result[10]
# update name, last_played and increase num_played counter
curs.execute("UPDATE `xlrstats` SET `name` = '{}',`last_played` = '{}',`num_played` = `num_played` + 1 WHERE `guid` = '{}'".format(self.name, now, self.guid))
conn.commit()
def define_offline_player(self, player_id):
self.player_id = player_id
# get known aliases
curs.execute("SELECT `aliases` FROM `player` WHERE `guid` = '{}'".format(self.guid))
result = curs.fetchone()
# create list of aliases
self.aliases = result[0].split(', ')
curs.execute("SELECT COUNT(*) FROM `xlrstats` WHERE `guid` = '{}'".format(self.guid))
if int(curs.fetchone()[0]) == 0:
self.admin_role = 0
self.registered_user = False
else:
curs.execute("SELECT `last_played`,`admin_role` FROM `xlrstats` WHERE `guid` = '{}'".format(self.guid))
result = curs.fetchone()
self.last_visit = result[0]
self.admin_role = result[1]
self.registered_user = True
def register_user_db(self, role=1):
if not self.registered_user:
now = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time()))
curs.execute("INSERT INTO `xlrstats` (`guid`,`name`,`ip_address`,`first_seen`,`last_played`,`num_played`,`admin_role`) VALUES ('{}','{}','{}','{}','{}',1,{})".format(self.guid, self.name, self.address, now, now, role))
conn.commit()
self.registered_user = True
self.admin_role = role
self.welcome_msg = False
self.first_seen = now
self.last_visit = now
def update_db_admin_role(self, role):
curs.execute("UPDATE `xlrstats` SET `admin_role` = {} WHERE `guid` = '{}'".format(role, self.guid))
conn.commit()
# overwrite admin role in game, no reconnect of player required
self.set_admin_role(role)
def get_ban_id(self):
return self.ban_id
def get_ban_msg(self):
return REASONS[self.ban_msg] if self.ban_msg in REASONS else self.ban_msg
def get_first_time(self):
return self.first_time
def set_name(self, name):
# remove whitespaces
self.name = name.replace(' ', '')
# remove color character
for item in xrange(10):
self.name = self.name.replace('^%d' % item, '')
# limit length of name to 20 character
self.name = self.name[:20]
def get_name(self):
return self.name
def set_authname(self, authname):
self.authname = authname
def get_authname(self):
return self.authname
def get_aliases(self):
if len(self.aliases) == 15:
self.aliases.append("and more...")
return str(", ^3".join(self.aliases))
def set_guid(self, guid):
self.guid = guid
def get_guid(self):
return self.guid
def get_player_num(self):
return self.player_num
def get_player_id(self):
return self.player_id
def set_team(self, team):
self.team = team
def get_team(self):
return self.team
def get_team_lock(self):
return self.team_lock
def set_team_lock(self, team):
self.team_lock = team
def get_num_played(self):
return self.num_played
def get_last_visit(self):
return str(self.last_visit)
def get_first_seen_date(self):
return str(self.first_seen)
def get_db_kills(self):
return self.db_kills
def get_kills(self):
return self.kills
def get_db_deaths(self):
return self.db_deaths
def get_deaths(self):
return self.deaths
def get_db_headshots(self):
return self.db_head_shots
def get_headshots(self):
return self.head_shots
def disable_welcome_msg(self):
self.welcome_msg = False
def get_welcome_msg(self):
return self.welcome_msg
def get_country(self):
return self.country
def get_registered_user(self):
return self.registered_user
def set_admin_role(self, role):
self.admin_role = role
def get_admin_role(self):
return self.admin_role
def get_ip_address(self):
return self.address
def get_time_joined(self):
return self.time_joined
def get_max_kill_streak(self):
return self.max_kill_streak
def kill(self):
now = time.time()
self.killing_streak += 1
self.losing_streak = 0
self.kills += 1
self.db_kills += 1
if now - self.monsterkill['time'] < 5:
self.monsterkill['kills'] += 1
else:
self.monsterkill['time'] = now
self.monsterkill['kills'] = 1
def die(self):
if self.killing_streak > self.max_kill_streak:
self.max_kill_streak = self.killing_streak
if self.max_kill_streak > self.db_killing_streak:
self.db_killing_streak = self.max_kill_streak
self.losing_streak += 1
self.killing_streak = 0
self.deaths += 1
self.db_deaths += 1
self.monsterkill = {'time': 999, 'kills': 0}
def get_losing_streak(self):
return self.losing_streak
def get_monsterkill(self):
return self.monsterkill['kills']
def set_alive(self, status):
self.alive = status
if status:
self.respawn_time = time.time()
def get_alive(self):
return self.alive
def get_respawn_time(self):
return self.respawn_time
def suicide(self):
self.db_suicide += 1
def headshot(self):
self.head_shots += 1
self.db_head_shots += 1
def set_hitzones(self, part):
self.hitzone[part] += 1
def get_hitzones(self, part):
return self.hitzone[part]
def set_all_hits(self):
self.all_hits += 1
def get_all_hits(self):
return self.all_hits
def set_he_kill(self):
self.he_kills += 1
def get_he_kills(self):
return self.he_kills
def set_knife_kill(self):
self.knife_kills += 1
def get_knife_kills(self):
return self.knife_kills
def get_killing_streak(self):
return self.killing_streak
def get_db_tks(self):
return self.db_tk_count
def get_team_kill_count(self):
return self.tk_count
def add_killed_me(self, killer):
self.tk_killer_names.append(killer)
def get_killed_me(self):
return self.tk_killer_names
def clear_killed_me(self, victim):
while self.tk_victim_names.count(victim) > 0:
self.warn_list.remove("stop team killing")
self.tk_victim_names.remove(victim)
def add_tk_victims(self, victim):
self.tk_victim_names.append(victim)
def get_tk_victim_names(self):
return self.tk_victim_names
def set_grudge(self, killer):
self.grudged_player.append(killer)
self.clear_tk(killer)
def get_grudged_player(self):
return self.grudged_player
def clear_grudged_player(self, killer):
while self.grudged_player.count(killer) > 0:
self.grudged_player.remove(killer)
def clear_tk(self, killer):
while self.tk_killer_names.count(killer) > 0:
self.tk_killer_names.remove(killer)
def clear_all_tk(self):
self.tk_killer_names = []
def clear_all_killed_me(self):
self.tk_victim_names = []
self.clear_specific_warning("stop team killing")
def add_high_ping(self, value):
self.warn_list.append('fix your ping')
self.ping_value = value
def get_ping_value(self):
return self.ping_value
def clear_specific_warning(self, warning):
while self.warn_list.count(warning) > 0:
self.warn_list.remove(warning)
def add_warning(self, warning, timer=True):
self.warn_list.append(warning)
if timer:
self.last_warn_time = time.time()
def get_warning(self):
return len(self.warn_list)
def get_all_warn_msg(self):
return list(set(self.warn_list))
def get_last_warn_msg(self):
return self.warn_list[-1] if self.warn_list else ''
def get_last_warn_time(self):
return self.last_warn_time
def clear_last_warning(self):
if self.warn_list:
last_warning = self.warn_list[-1]
self.warn_list.pop()
self.last_warn_time = self.last_warn_time - 60 if self.warn_list else 0
if "stop team killing" in last_warning:
self.tk_victim_names.pop()
return last_warning
return None
def clear_warning(self):
self.warn_list = []
self.tk_victim_names = []
self.tk_killer_names = []
self.last_warn_time = 0
# clear ban_points
curs.execute("DELETE FROM `ban_points` WHERE `guid` = '{}' and `expires` > '{}'".format(self.guid, time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time()))))
conn.commit()
def team_death(self):
# increase team death counter
self.db_team_death += 1
def team_kill(self):
# increase teamkill counter
self.tk_count += 1
self.db_tk_count += 1
# CTF Mode
def capture_flag(self):
self.flags_captured += 1
def get_flags_captured(self):
return self.flags_captured
def return_flag(self):
self.flags_returned += 1
def get_flags_returned(self):
return self.flags_returned
def set_flag_capture_time(self, cap_time):
if cap_time < self.flag_capture_time:
self.flag_capture_time = cap_time
def get_flag_capture_time(self):
if self.flag_capture_time == 999:
return 0
return self.flag_capture_time
# Bomb Mode
def is_bombholder(self):
self.bombholder = True
def bomb_tossed(self):
self.bombholder = False
def get_bombholder(self):
return self.bombholder
def kill_bomb_carrier(self):
self.bomb_carrier_killed += 1
def get_bomb_carrier_kills(self):
return self.bomb_carrier_killed
def kills_with_bomb(self):
self.killed_with_bomb += 1
def get_kills_with_bomb(self):
return self.killed_with_bomb
def planted_bomb(self):
self.bomb_planted += 1
self.bombholder = False
def get_planted_bomb(self):
return self.bomb_planted
def defused_bomb(self):
self.bomb_defused += 1
def get_defused_bomb(self):
return self.bomb_defused
# Freeze Tag
def freeze(self):
self.froze += 1
def get_freeze(self):
return self.froze
def thawout(self):
self.thawouts += 1
def get_thawout(self):
return self.thawouts
### CLASS Game ###
class Game(object):
"""
Game class
"""
def __init__(self, config_file, urt_modversion):
"""
create a new instance of Game
@param config_file: The full path of the bot configuration file
@type config_file: String
"""
self.all_maps_list = []
self.next_mapname = ''
self.mapname = ''
self.maplist = []
self.last_maps_list = []
self.players = {}
self.live = False
self.urt_modversion = urt_modversion
game_cfg = ConfigParser.ConfigParser()
game_cfg.read(config_file)
self.quake = PyQuake3("%s:%s" % (game_cfg.get('server', 'server_ip'), game_cfg.get('server', 'server_port')), game_cfg.get('server', 'rcon_password'))
self.queue = Queue()
self.rcon_lock = RLock()
self.thread_rcon()
logger.info("Opening RCON socket : OK")
# dynamic mapcycle
self.dynamic_mapcycle = game_cfg.getboolean('mapcycle', 'dynamic_mapcycle') if game_cfg.has_option('mapcycle', 'dynamic_mapcycle') else False
if self.dynamic_mapcycle:
self.switch_count = game_cfg.getint('mapcycle', 'switch_count') if game_cfg.has_option('mapcycle', 'switch_count') else 4
self.big_cycle = filter(None, game_cfg.get('mapcycle', 'big_cycle').replace(' ', '').split(',')) if game_cfg.has_option('mapcycle', 'big_cycle') else []
self.small_cycle = filter(None, game_cfg.get('mapcycle', 'small_cycle').replace(' ', '').split(',')) if game_cfg.has_option('mapcycle', 'small_cycle') else []
# add Spunky Bot as player 'World' to the game
spunky_bot = Player(BOT_PLAYER_NUM, '127.0.0.1', 'NONE', 'World')
self.add_player(spunky_bot)
logger.info("Activating the Bot : OK")
logger.info("Startup completed : Let's get ready to rumble!")
logger.info("Spunky Bot is running until you are closing this session or pressing CTRL + C to abort this process.")
logger.info("*** Note: Use the provided initscript to run Spunky Bot as daemon ***")
def thread_rcon(self):
"""
Thread process for starting method rcon_process
"""
# start Thread
processor = Thread(target=self.rcon_process)
processor.setDaemon(True)
processor.start()
def rcon_process(self):
"""
Thread process
"""
while 1:
if not self.queue.empty() and self.live:
with self.rcon_lock:
try:
command = self.queue.get()
if command != 'status':
self.quake.rcon(command)
else:
self.quake.rcon_update()
except Exception as err:
logger.error(err, exc_info=True)
time.sleep(RCON_DELAY)
def get_quake_value(self, value):
"""
get Quake3 value
@param value: The Quake3 value
@type value: String
"""
if self.live:
with self.rcon_lock:
self.quake.update()
return self.quake.values[value]
return ''
def get_rcon_output(self, value):
"""
get RCON output for value
@param value: The RCON output for value
@type value: String
"""
if self.live:
with self.rcon_lock:
return self.quake.rcon(value)
return ''
def get_cvar(self, value):
"""
get CVAR value
@param value: The CVAR value
@type value: String
"""
if self.live:
with self.rcon_lock:
try:
ret_val = self.quake.rcon(value)[1].split(':')[1].split('^7')[0].lstrip('"')
except IndexError:
ret_val = None
time.sleep(RCON_DELAY)
return ret_val
return ''
def get_number_players(self):
"""
get the number of online players
"""
return len(self.players) - 1 # bot is counted as player
def get_mapcycle_path(self):
"""
get the full path of mapcycle.txt file
"""
maplist = []
# get path of fs_homepath and fs_basepath
fs_homepath = self.get_cvar('fs_homepath')
logger.debug("fs_homepath : %s", fs_homepath)
fs_basepath = self.get_cvar('fs_basepath')
logger.debug("fs_basepath : %s", fs_basepath)
fs_game = self.get_cvar('fs_game')
# get file name of mapcycle.txt
mapcycle_file = self.get_cvar('g_mapcycle')
try:
# set full path of mapcycle.txt
mc_home_path = os.path.join(fs_homepath, fs_game, mapcycle_file) if fs_homepath else ""
mc_base_path = os.path.join(fs_basepath, fs_game, mapcycle_file) if fs_basepath else ""
except TypeError:
raise Exception('Server did not respond to mapcycle path request, please restart the Bot')
mapcycle_path = mc_home_path if os.path.isfile(mc_home_path) else mc_base_path if os.path.isfile(mc_base_path) else None
if mapcycle_path:
logger.info("Mapcycle path : %s", mapcycle_path)
with open(mapcycle_path, 'r') as file_handle:
lines = [line for line in file_handle if line != '\n']
try:
while 1:
tmp = lines.pop(0).strip()
if tmp[0] == '{':
while tmp[0] != '}':
tmp = lines.pop(0).strip()
tmp = lines.pop(0).strip()
maplist.append(tmp)
except IndexError:
pass
return maplist
def send_rcon(self, command):
"""
send RCON command
@param command: The RCON command
@type command: String
"""
if self.live:
with self.rcon_lock:
self.queue.put(command)
def rcon_say(self, msg):
"""
display message in global chat
@param msg: The message to display in global chat
@type msg: String
"""
# wrap long messages into shorter list elements
lines = textwrap.wrap(msg, 140)
for line in lines:
self.send_rcon('say %s' % line)
def rcon_tell(self, player_num, msg, pm_tag=True):
"""
tell message to a specific player
@param player_num: The player number
@type player_num: Integer
@param msg: The message to display in private chat
@type msg: String
@param pm_tag: Display '[pm]' (private message) in front of the message
@type pm_tag: bool
"""
lines = textwrap.wrap(msg, 128)
prefix = "^4[pm] "
for line in lines:
if pm_tag:
self.send_rcon('tell %d %s%s' % (player_num, prefix, line))
prefix = ""
else:
self.send_rcon('tell %d %s' % (player_num, line))
def rcon_bigtext(self, msg):
"""
display bigtext message
@param msg: The message to display in global chat
@type msg: String
"""
self.send_rcon('bigtext "%s"' % msg)
def rcon_forceteam(self, player_num, team):
"""
force player to given team
@param player_num: The player number
@type player_num: Integer
@param team: The team (red, blue, spectator)
@type team: String
"""
self.send_rcon('forceteam %d %s' % (player_num, team))
def rcon_clear(self):
"""
clear RCON queue
"""
self.queue.queue.clear()
def kick_player(self, player_num, reason=''):
"""
kick player
@param player_num: The player number
@type player_num: Integer
@param reason: Reason for kick
@type reason: String
"""
if reason and self.urt_modversion > 41:
self.send_rcon('kick %d "%s"' % (player_num, reason))
else:
self.send_rcon('kick %d' % player_num)
logger.debug("%s kicked%s%s", player_num, ": " if reason else '', reason)
def go_live(self):
"""
go live
"""
self.live = True
self.set_all_maps()
self.maplist = filter(None, self.get_mapcycle_path())
self.set_current_map()
self.rcon_say("^7Powered by ^8[Spunky Bot %s] ^1[www.spunkybot.de]" % __version__)
logger.info("Mapcycle: %s", ', '.join(self.maplist))
logger.info("*** Live tracking: Current map: %s / Next map: %s ***", self.mapname, self.next_mapname)
logger.info("Total number of maps : %s", len(self.get_all_maps()))
logger.info("Server CVAR g_logsync : %s", self.get_cvar('g_logsync'))
logger.info("Server CVAR g_loghits : %s", self.get_cvar('g_loghits'))
def set_current_map(self):
"""
set the current and next map in rotation
"""
if self.mapname:
self.last_maps_list = self.last_maps_list[-3:] + [self.mapname]
try:
self.mapname = self.get_quake_value('mapname')
except KeyError:
self.mapname = self.next_mapname
if self.dynamic_mapcycle:
self.maplist = filter(None, (self.small_cycle if self.get_number_players() < self.switch_count else self.big_cycle))
logger.debug("Players online: %s / Mapcycle: %s", self.get_number_players(), self.maplist)
self.send_rcon("g_mapcycle dynamic.fake")
else:
if self.get_cvar('g_mapcycle') == "dynamic.fake":
self.send_rcon("g_mapcycle mapcycle.txt")
if self.maplist:
if self.mapname in self.maplist:
if self.maplist.index(self.mapname) < (len(self.maplist) - 1):
self.next_mapname = self.maplist[self.maplist.index(self.mapname) + 1]
else:
self.next_mapname = self.maplist[0]
else:
self.next_mapname = self.maplist[0]
else:
self.next_mapname = self.mapname
logger.debug("Current map: %s / Next map: %s", self.mapname, self.next_mapname)
if self.dynamic_mapcycle:
self.send_rcon('g_nextmap %s' % self.next_mapname)
self.send_rcon('g_nextCycleMap %s' % self.next_mapname)
if self.mapname != self.next_mapname:
self.rcon_say("^7Next Map: ^3%s" % self.next_mapname)
def set_all_maps(self):
"""
set a list of all available maps
"""
try:
all_maps = []
count = 0
while True:
ret_val = self.get_rcon_output("dir map bsp")[1].split()
if "Directory" in ret_val:
count += 1
if count >= 2:
break
else:
all_maps += ret_val
all_maps_list = list(set([maps.replace("/", "").replace(".bsp", "").lower() for maps in all_maps if maps.startswith("/")]))
all_maps_list.sort()
if all_maps_list:
self.all_maps_list = all_maps_list
except Exception as err:
logger.error(err, exc_info=True)
def get_all_maps(self):
"""
get a list of all available maps
"""
return self.all_maps_list
def get_last_maps(self):
"""
get a list of the last played maps
"""
return self.last_maps_list
def add_player(self, player):
"""
add a player to the game
@param player: The instance of the player
@type player: Instance
"""
self.players[player.get_player_num()] = player
# check DB for real players and exclude bots which have IP 0.0.0.0
if player.get_ip_address() != '0.0.0.0':
player.check_database()
def get_gamestats(self):
"""
get number of players in red team, blue team and spectator
"""
game_data = {Player.teams[1]: 0, Player.teams[2]: 0, Player.teams[3]: -1}
for player in self.players.itervalues():
game_data[Player.teams[player.get_team()]] += 1
return game_data
def balance_teams(self, game_data):
"""
balance teams if needed
@param game_data: Dictionary of players in each team
@type game_data: dict
"""
if (game_data[Player.teams[1]] - game_data[Player.teams[2]]) > 1:
team1 = 1
team2 = 2
elif (game_data[Player.teams[2]] - game_data[Player.teams[1]]) > 1:
team1 = 2
team2 = 1
else:
self.rcon_say("^7Teams are already balanced")
return
self.rcon_bigtext("AUTOBALANCING TEAMS...")
num_ptm = math.floor((game_data[Player.teams[team1]] - game_data[Player.teams[team2]]) / 2)
player_list = [player for player in self.players.itervalues() if player.get_team() == team1 and not player.get_team_lock()]
player_list.sort(cmp=lambda player1, player2: cmp(player2.get_time_joined(), player1.get_time_joined()))
for player in player_list[:int(num_ptm)]:
self.rcon_forceteam(player.get_player_num(), Player.teams[team2])
self.rcon_say("^7Autobalance complete!")
### Main ###
if __name__ == "__main__":
# get full path of spunky.py
HOME = os.path.dirname(os.path.realpath(__file__))
# load the GEO database and store it globally in interpreter memory
GEOIP = pygeoip.Database(os.path.join(HOME, 'lib', 'GeoIP.dat'))
# connect to database
conn = sqlite3.connect(os.path.join(HOME, 'data.sqlite'))
curs = conn.cursor()
# create tables if not exists
curs.execute('CREATE TABLE IF NOT EXISTS xlrstats (id INTEGER PRIMARY KEY NOT NULL, guid TEXT NOT NULL, name TEXT NOT NULL, ip_address TEXT NOT NULL, first_seen DATETIME, last_played DATETIME, num_played INTEGER DEFAULT 1, kills INTEGER DEFAULT 0, deaths INTEGER DEFAULT 0, headshots INTEGER DEFAULT 0, team_kills INTEGER DEFAULT 0, team_death INTEGER DEFAULT 0, max_kill_streak INTEGER DEFAULT 0, suicides INTEGER DEFAULT 0, ratio REAL DEFAULT 0, rounds INTEGER DEFAULT 0, admin_role INTEGER DEFAULT 1)')
curs.execute('CREATE TABLE IF NOT EXISTS player (id INTEGER PRIMARY KEY NOT NULL, guid TEXT NOT NULL, name TEXT NOT NULL, ip_address TEXT NOT NULL, time_joined DATETIME, aliases TEXT)')
curs.execute('CREATE TABLE IF NOT EXISTS ban_list (id INTEGER PRIMARY KEY NOT NULL, guid TEXT NOT NULL, name TEXT, ip_address TEXT, expires DATETIME DEFAULT 259200, timestamp DATETIME, reason TEXT)')
curs.execute('CREATE TABLE IF NOT EXISTS ban_points (id INTEGER PRIMARY KEY NOT NULL, guid TEXT NOT NULL, point_type TEXT, expires DATETIME)')
# create instance of LogParser
LogParser(os.path.join(HOME, 'conf', 'settings.conf'))
# close database connection
conn.close()
|
the-stack_0_24447 | import importlib
import inspect
import os
import pkgutil
import unittest
import numpy as np
import scipy.sparse
import sklearn
import sklearn.base
import sklearn.datasets
def find_sklearn_classes(class_):
classifiers = set()
all_subdirectories = []
sklearn_path = sklearn.__path__[0]
for root, dirs, files in os.walk(sklearn_path):
all_subdirectories.append(root)
for module_loader, module_name, ispkg in \
pkgutil.iter_modules(all_subdirectories):
# Work around some issues...
if module_name in ["hmm", "mixture"]:
print("Skipping %s" % module_name)
continue
module_file = module_loader.__dict__["path"]
sklearn_module = module_file.replace(sklearn_path, "").replace("/", ".")
full_module_name = "sklearn" + sklearn_module + "." + module_name
pkg = importlib.import_module(full_module_name)
for member_name, obj in inspect.getmembers(pkg):
if inspect.isclass(obj) and \
issubclass(obj, class_):
classifier = obj
# print member_name, obj
classifiers.add(classifier)
print()
for classifier in sorted([str(cls) for cls in classifiers]):
print(classifier)
def get_dataset(dataset='iris', make_sparse=False, add_NaNs=False,
train_size_maximum=150, make_multilabel=False,
make_binary=False):
iris = getattr(sklearn.datasets, "load_%s" % dataset)()
X = iris.data.astype(np.float32)
Y = iris.target
rs = np.random.RandomState(42)
indices = np.arange(X.shape[0])
train_size = min(int(len(indices) / 3. * 2.), train_size_maximum)
rs.shuffle(indices)
X = X[indices]
Y = Y[indices]
X_train = X[:train_size]
Y_train = Y[:train_size]
X_test = X[train_size:]
Y_test = Y[train_size:]
if add_NaNs:
mask = rs.choice([True, False], size=(X_train.shape))
X_train[mask] = np.NaN
if make_sparse:
X_train[:, 0] = 0
X_train[rs.random_sample(X_train.shape) > 0.5] = 0
X_train = scipy.sparse.csc_matrix(X_train)
X_train.eliminate_zeros()
X_test[:, 0] = 0
X_test[rs.random_sample(X_test.shape) > 0.5] = 0
X_test = scipy.sparse.csc_matrix(X_test)
X_test.eliminate_zeros()
if make_binary and make_multilabel:
raise ValueError('Can convert dataset only to one of the two '
'options binary or multilabel!')
if make_binary:
Y_train[Y_train > 1] = 1
Y_test[Y_test > 1] = 1
if make_multilabel:
num_classes = len(np.unique(Y))
Y_train_ = np.zeros((Y_train.shape[0], num_classes))
for i in range(Y_train.shape[0]):
Y_train_[i, Y_train[i]] = 1
Y_train = Y_train_
Y_test_ = np.zeros((Y_test.shape[0], num_classes))
for i in range(Y_test.shape[0]):
Y_test_[i, Y_test[i]] = 1
Y_test = Y_test_
return X_train, Y_train, X_test, Y_test
def _test_classifier(classifier, dataset='iris', sparse=False,
train_size_maximum=150, make_multilabel=False,
make_binary=False):
X_train, Y_train, X_test, Y_test = get_dataset(dataset=dataset,
make_sparse=sparse,
train_size_maximum=train_size_maximum,
make_multilabel=make_multilabel,
make_binary=make_binary)
configuration_space = classifier.get_hyperparameter_search_space(
dataset_properties={'sparse': sparse}
)
default_config = configuration_space.get_default_configuration()
classifier = classifier(random_state=0, **default_config)
if hasattr(classifier, 'iterative_fit'):
class counter(object):
def __init__(self, func):
self.n_calls = 0
self.func = func
def __call__(self, *args, **kwargs):
self.n_calls += 1
return self.func(*args, **kwargs)
classifier.iterative_fit = counter(classifier.iterative_fit)
predictor = classifier.fit(X_train, Y_train)
if hasattr(classifier, 'iterative_fit'):
n_calls = classifier.iterative_fit.n_calls
else:
n_calls = None
predictions = predictor.predict(X_test)
return predictions, Y_test, n_calls
def _test_classifier_iterative_fit(classifier, dataset='iris', sparse=False):
X_train, Y_train, X_test, Y_test = get_dataset(dataset=dataset,
make_sparse=sparse)
configuration_space = classifier.get_hyperparameter_search_space(
dataset_properties={'sparse': sparse}
)
default_config = configuration_space.get_default_configuration()
classifier = classifier(random_state=0, **default_config)
classifier.iterative_fit(X_train, Y_train, n_iter=2, refit=True)
iteration = 2
while not classifier.configuration_fully_fitted():
n_iter = int(2 ** iteration / 2)
classifier.iterative_fit(X_train, Y_train, n_iter=n_iter)
iteration += 1
predictions = classifier.predict(X_test)
return predictions, Y_test, classifier
def _test_classifier_predict_proba(classifier, dataset='iris', sparse=False,
train_size_maximum=150,
make_multilabel=False,
make_binary=False):
X_train, Y_train, X_test, Y_test = get_dataset(dataset=dataset,
make_sparse=sparse,
train_size_maximum=train_size_maximum,
make_multilabel=make_multilabel,
make_binary=make_binary)
configuration_space = classifier.get_hyperparameter_search_space()
default_config = configuration_space.get_default_configuration()
classifier = classifier(random_state=0, **default_config)
predictor = classifier.fit(X_train, Y_train)
predictions = predictor.predict_proba(X_test)
return predictions, Y_test
def _test_preprocessing(Preprocessor, dataset='iris', make_sparse=False,
train_size_maximum=150):
X_train, Y_train, X_test, Y_test = get_dataset(dataset=dataset,
make_sparse=make_sparse,
train_size_maximum=train_size_maximum)
original_X_train = X_train.copy()
configuration_space = Preprocessor.get_hyperparameter_search_space()
default_config = configuration_space.get_default_configuration()
preprocessor = Preprocessor(random_state=0, **default_config)
transformer = preprocessor.fit(X_train, Y_train)
return transformer.transform(X_train), original_X_train
class PreprocessingTestCase(unittest.TestCase):
def _test_preprocessing_dtype(self, Preprocessor, add_NaNs=False,
test_sparse=True, dataset='iris'):
# Dense
# np.float32
X_train, Y_train, X_test, Y_test = get_dataset(dataset, add_NaNs=add_NaNs)
self.assertEqual(X_train.dtype, np.float32)
configuration_space = Preprocessor.get_hyperparameter_search_space()
default_config = configuration_space.get_default_configuration()
preprocessor = Preprocessor(random_state=0, **default_config)
preprocessor.fit(X_train, Y_train)
preprocessor.transform(X_train)
# self.assertEqual(Xt.dtype, np.float32)
# np.float64
X_train, Y_train, X_test, Y_test = get_dataset(dataset, add_NaNs=add_NaNs)
X_train = X_train.astype(np.float64)
configuration_space = Preprocessor.get_hyperparameter_search_space()
default_config = configuration_space.get_default_configuration()
preprocessor = Preprocessor(random_state=0, **default_config)
preprocessor.fit(X_train, Y_train)
preprocessor.transform(X_train)
# self.assertEqual(Xt.dtype, np.float64)
if test_sparse is True:
# Sparse
# np.float32
X_train, Y_train, X_test, Y_test = get_dataset(dataset, make_sparse=True,
add_NaNs=add_NaNs)
self.assertEqual(X_train.dtype, np.float32)
configuration_space = Preprocessor.get_hyperparameter_search_space()
default_config = configuration_space.get_default_configuration()
preprocessor = Preprocessor(random_state=0, **default_config)
preprocessor.fit(X_train, Y_train)
preprocessor.transform(X_train)
# self.assertEqual(Xt.dtype, np.float32)
# np.float64
X_train, Y_train, X_test, Y_test = get_dataset(dataset,
make_sparse=True,
add_NaNs=add_NaNs)
X_train = X_train.astype(np.float64)
configuration_space = Preprocessor.get_hyperparameter_search_space()
default_config = configuration_space.get_default_configuration()
preprocessor = Preprocessor(random_state=0, **default_config)
preprocessor.fit(X_train, Y_train)
preprocessor.transform(X_train)
# self.assertEqual(Xt.dtype, np.float64)
def _test_regressor(Regressor, dataset='diabetes', sparse=False):
X_train, Y_train, X_test, Y_test = get_dataset(dataset=dataset,
make_sparse=sparse)
configuration_space = Regressor.get_hyperparameter_search_space()
default_config = configuration_space.get_default_configuration()
regressor = Regressor(random_state=0, **default_config)
# Dumb incomplete hacky test to check that we do not alter the data
X_train_hash = hash(str(X_train))
X_test_hash = hash(str(X_test))
Y_train_hash = hash(str(Y_train))
if hasattr(regressor, 'iterative_fit'):
class counter(object):
def __init__(self, func):
self.n_calls = 0
self.func = func
def __call__(self, *args, **kwargs):
self.n_calls += 1
return self.func(*args, **kwargs)
regressor.iterative_fit = counter(regressor.iterative_fit)
predictor = regressor.fit(X_train, Y_train)
if hasattr(regressor, 'iterative_fit'):
n_calls = regressor.iterative_fit.n_calls
else:
n_calls = None
predictions = predictor.predict(X_test)
if X_train_hash != hash(str(X_train)) or \
X_test_hash != hash(str(X_test)) or \
Y_train_hash != hash(str(Y_train)):
raise ValueError("Model modified data")
return predictions, Y_test, n_calls
def _test_regressor_iterative_fit(Regressor, dataset='diabetes', sparse=False):
X_train, Y_train, X_test, Y_test = get_dataset(dataset=dataset,
make_sparse=sparse)
configuration_space = Regressor.get_hyperparameter_search_space(
dataset_properties={'sparse': sparse}
)
default_config = configuration_space.get_default_configuration()
regressor = Regressor(random_state=0, **default_config)
regressor.iterative_fit(X_train, Y_train, n_iter=2, refit=True)
iteration = 2
while not regressor.configuration_fully_fitted():
n_iter = int(2 ** iteration / 2)
regressor.iterative_fit(X_train, Y_train, n_iter=n_iter)
iteration += 1
predictions = regressor.predict(X_test)
return predictions, Y_test, regressor
if __name__ == "__main__":
find_sklearn_classes(sklearn.base.ClassifierMixin)
find_sklearn_classes(sklearn.base.RegressorMixin)
find_sklearn_classes(sklearn.base.TransformerMixin)
|
the-stack_0_24448 | # -------------------------------------------------------------
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# -------------------------------------------------------------
import unittest
from systemds.context import SystemDSContext
class TestContextCreation(unittest.TestCase):
def test_same_port(self):
# Same port should graciously change port
sds1 = SystemDSContext(port=9415)
sds2 = SystemDSContext(port=9415)
sds1.close()
sds2.close()
def test_create_10_contexts(self):
# Creating multiple contexts and closing them should be no problem.
for _ in range(0, 10):
SystemDSContext().close()
def test_create_multiple_context(self):
# Creating multiple contexts in sequence but open at the same time is okay.
a = SystemDSContext()
b = SystemDSContext()
c = SystemDSContext()
d = SystemDSContext()
a.close()
b.close()
c.close()
d.close()
|
the-stack_0_24450 | import re
from typing import Any
from typing import List
from typing import Match
from typing import Set
from typing import Tuple
from typing import TYPE_CHECKING
import mistune
from django.urls import reverse
from django.utils.html import strip_tags
from pygments import highlight
from pygments.formatters import HtmlFormatter
from pygments.lexers import get_lexer_by_name
from pygments.lexers import guess_lexer
from pygments.util import ClassNotFound
from ocfweb.caching import cache
# tags of a format like: [[!meta title="Backups"]]
META_REGEX = re.compile(r'\[\[!meta ([a-z]+)="([^"]*)"\]\]')
# Make mypy play nicely with mixins https://github.com/python/mypy/issues/5837
# TODO: issue has been resolved in mypy, patch when next version of mypy gets released
# More info: https://github.com/python/mypy/pull/7860
class MixinBase:
def __init__(self, rules: Any, default_rules: Any) -> None:
self.rules = rules
self.default_rules = default_rules
_Base: Any = object
if TYPE_CHECKING:
_Base = MixinBase
class HtmlCommentsLexerMixin(_Base):
"""Strip HTML comments as entire blocks or inside lines."""
def enable_html_comments(self) -> None:
self.rules.html_comment = re.compile(
r'^<!--(.*?)-->',
)
self.default_rules.insert(0, 'html_comment')
def output_html_comment(self, m: Match[Any]) -> str:
return ''
def parse_html_comment(self, m: Match[Any]) -> None:
pass
class BackslashLineBreakLexerMixin(_Base):
"""Convert lines that end in a backslash into a simple line break.
This follows GitHub-flavored Markdown on backslashes at the end of lines
being treated as a hard line break
(https://github.github.com/gfm/#backslash-escapes)
For example, something like this (escaped for python's sake since this in
in a string):
This is a test\\
with a line break
would be rendered as:
This is a test<br>
with a line break
"""
def enable_backslash_line_breaks(self) -> None:
self.rules.backslash_line_break = re.compile(
'^\\\\\n',
)
self.default_rules.insert(0, 'backslash_line_break')
def output_backslash_line_break(self, m: Match[Any]) -> str:
return '<br>'
class CodeRendererMixin(_Base):
"""Render highlighted code."""
# TODO: don't use inline styles; see https://pygments.org/docs/formatters.html
html_formatter = HtmlFormatter(noclasses=True)
def block_code(self, code: str, lang: str) -> str:
try:
if lang:
lexer = get_lexer_by_name(lang, stripall=True)
else:
lexer = guess_lexer(code)
except ClassNotFound:
lexer = get_lexer_by_name('shell')
return highlight(code, lexer, CodeRendererMixin.html_formatter)
class DjangoLinkInlineLexerMixin(_Base):
"""Turn special Markdown link syntax into Django links.
In Django templates, we can use `url` tags, such as:
{% url 'staff-hours' %}
{% url 'doc' 'staff/backend/backups' %}
In Markdown, we use the following fake syntax to generate Django links:
[[human readable name|staff-hours]]
[[human readable name|doc staff/backend/backups]]
You can link to fragments with a # at the very end:
[[human readable name|staff-hours#something]]
[[human readable name|doc staff/backend/backups#something]]
"""
split_words = re.compile(r'((?:\S|\\ )+)')
def enable_django_links(self) -> None:
self.rules.django_link = re.compile(
r'^\[\[(?!\!)'
r'([\s\S]+?)'
r'\|'
r'([^#]+?)'
r'(?:#(.*?))?'
r'\]\]',
)
self.default_rules.insert(0, 'django_link')
def output_django_link(self, m: Match[Any]) -> str:
text, target, fragment = m.group(1), m.group(2), m.group(3)
def href(link: str, fragment: str) -> str:
if fragment:
return link + '#' + fragment
return link
words = DjangoLinkInlineLexerMixin.split_words.findall(target)
name, *params = words
return self.renderer.link(
link=href(reverse(name, args=params), fragment),
title=None,
text=text,
)
class HeaderRendererMixin(_Base):
"""Mixin to render headers with auto-generated IDs (or provided IDs).
If headers are written as usual, they'll be given automatically-generated
IDs based on their header level and text.
Headers can also be specified with an ID at the end wrapped in curly braces:
### My Header {my_id}
This ID will be used directly without further manipulation, and can be
relied on for linking.
Custom IDs can consist only of lowercase a-z, 0-9, dash, and underscore.
IDs are tracked into a table of contents which should be reset before
rendering a document and read afterwards.
"""
def reset_toc(self) -> None:
self.toc: List[Any] = []
self.toc_ids: Set[Any] = set()
def get_toc(self) -> List[Any]:
return self.toc
def header(self, text: str, level: int, raw: None = None) -> str:
custom_id_match = re.match(r'^(.*?)\s+{([a-z0-9\-_]+)}\s*$', text)
if custom_id_match:
text = custom_id_match.group(1)
id = custom_id_match.group(2)
if id in self.toc_ids:
raise ValueError(f'Duplicate header ID in Markdown: "{id}"')
else:
id = 'h{level}_{title}'.format(
level=level,
title=re.sub(r'[^a-z0-9\-_ ]', '', strip_tags(text).lower()).strip().replace(' ', '-'),
)
# dumb collision avoidance
while id in self.toc_ids:
id += '_'
self.toc.append((level, text, id))
self.toc_ids.add(id)
return '<h{level} id="{id}">{text} <a class="anchor" href="#{id}"><span></span></a></h{level}>\n'.format(
level=level,
id=id,
text=text,
)
class OcfMarkdownRenderer(
HeaderRendererMixin,
CodeRendererMixin,
mistune.Renderer,
):
pass
class OcfMarkdownInlineLexer(
mistune.InlineLexer,
DjangoLinkInlineLexerMixin,
HtmlCommentsLexerMixin,
BackslashLineBreakLexerMixin,
):
pass
class OcfMarkdownBlockLexer(
mistune.BlockLexer,
HtmlCommentsLexerMixin,
):
pass
_renderer = OcfMarkdownRenderer(
escape=True,
hard_wrap=False,
)
_inline = OcfMarkdownInlineLexer(_renderer)
_inline.enable_html_comments()
_inline.enable_django_links()
_inline.enable_backslash_line_breaks()
_block = OcfMarkdownBlockLexer(mistune.BlockGrammar())
_block.enable_html_comments()
_markdown = mistune.Markdown(
renderer=_renderer,
inline=_inline,
block=_block,
)
def markdown(text: str) -> mistune.Markdown:
_renderer.reset_toc()
return _markdown(text)
def text_and_meta(f: Any) -> Tuple[str, Any]:
"""Return tuple (text, meta dict) for the given file.
Meta tags are stripped from the Markdown source, but the Markdown is
not rendered.
"""
text = f.read()
meta = {}
def repl(match: Match[Any]) -> str:
meta[match.group(1)] = match.group(2)
return ''
text = META_REGEX.sub(repl, text)
return text, meta
@cache()
def markdown_and_toc(text: str) -> Tuple[Any, Any]:
"""Return tuple (html, toc) for the given text."""
html = markdown(text)
return html, _renderer.get_toc()
|
the-stack_0_24452 | import sys
import unittest
import tkinter
from tkinter import ttk
from test.support import requires, run_unittest
from tkinter.test.support import AbstractTkTest, AbstractDefaultRootTest
requires('gui')
class LabeledScaleTest(AbstractTkTest, unittest.TestCase):
def tearDown(self):
self.root.update_idletasks()
super().tearDown()
def test_widget_destroy(self):
# automatically created variable
x = ttk.LabeledScale(self.root)
var = x._variable._name
x.destroy()
self.assertRaises(tkinter.TclError, x.tk.globalgetvar, var)
# manually created variable
myvar = tkinter.DoubleVar(self.root)
name = myvar._name
x = ttk.LabeledScale(self.root, variable=myvar)
x.destroy()
if self.wantobjects:
self.assertEqual(x.tk.globalgetvar(name), myvar.get())
else:
self.assertEqual(float(x.tk.globalgetvar(name)), myvar.get())
del myvar
self.assertRaises(tkinter.TclError, x.tk.globalgetvar, name)
# checking that the tracing callback is properly removed
myvar = tkinter.IntVar(self.root)
# LabeledScale will start tracing myvar
x = ttk.LabeledScale(self.root, variable=myvar)
x.destroy()
# Unless the tracing callback was removed, creating a new
# LabeledScale with the same var will cause an error now. This
# happens because the variable will be set to (possibly) a new
# value which causes the tracing callback to be called and then
# it tries calling instance attributes not yet defined.
ttk.LabeledScale(self.root, variable=myvar)
if hasattr(sys, 'last_type'):
self.assertNotEqual(sys.last_type, tkinter.TclError)
def test_initialization(self):
# master passing
master = tkinter.Frame(self.root)
x = ttk.LabeledScale(master)
self.assertEqual(x.master, master)
x.destroy()
# variable initialization/passing
passed_expected = (('0', 0), (0, 0), (10, 10),
(-1, -1), (sys.maxsize + 1, sys.maxsize + 1),
(2.5, 2), ('2.5', 2))
for pair in passed_expected:
x = ttk.LabeledScale(self.root, from_=pair[0])
self.assertEqual(x.value, pair[1])
x.destroy()
x = ttk.LabeledScale(self.root, from_=None)
self.assertRaises((ValueError, tkinter.TclError), x._variable.get)
x.destroy()
# variable should have its default value set to the from_ value
myvar = tkinter.DoubleVar(self.root, value=20)
x = ttk.LabeledScale(self.root, variable=myvar)
self.assertEqual(x.value, 0)
x.destroy()
# check that it is really using a DoubleVar
x = ttk.LabeledScale(self.root, variable=myvar, from_=0.5)
self.assertEqual(x.value, 0.5)
self.assertEqual(x._variable._name, myvar._name)
x.destroy()
# widget positionment
def check_positions(scale, scale_pos, label, label_pos):
self.assertEqual(scale.pack_info()['side'], scale_pos)
self.assertEqual(label.place_info()['anchor'], label_pos)
x = ttk.LabeledScale(self.root, compound='top')
check_positions(x.scale, 'bottom', x.label, 'n')
x.destroy()
x = ttk.LabeledScale(self.root, compound='bottom')
check_positions(x.scale, 'top', x.label, 's')
x.destroy()
# invert default positions
x = ttk.LabeledScale(self.root, compound='unknown')
check_positions(x.scale, 'top', x.label, 's')
x.destroy()
x = ttk.LabeledScale(self.root) # take default positions
check_positions(x.scale, 'bottom', x.label, 'n')
x.destroy()
# extra, and invalid, kwargs
self.assertRaises(tkinter.TclError, ttk.LabeledScale, master, a='b')
def test_horizontal_range(self):
lscale = ttk.LabeledScale(self.root, from_=0, to=10)
lscale.pack()
lscale.update()
linfo_1 = lscale.label.place_info()
prev_xcoord = lscale.scale.coords()[0]
self.assertEqual(prev_xcoord, int(linfo_1['x']))
# change range to: from -5 to 5. This should change the x coord of
# the scale widget, since 0 is at the middle of the new
# range.
lscale.scale.configure(from_=-5, to=5)
# The following update is needed since the test doesn't use mainloop,
# at the same time this shouldn't affect test outcome
lscale.update()
curr_xcoord = lscale.scale.coords()[0]
self.assertNotEqual(prev_xcoord, curr_xcoord)
# the label widget should have been repositioned too
linfo_2 = lscale.label.place_info()
self.assertEqual(lscale.label['text'], 0 if self.wantobjects else '0')
self.assertEqual(curr_xcoord, int(linfo_2['x']))
# change the range back
lscale.scale.configure(from_=0, to=10)
self.assertNotEqual(prev_xcoord, curr_xcoord)
self.assertEqual(prev_xcoord, int(linfo_1['x']))
lscale.destroy()
def test_variable_change(self):
x = ttk.LabeledScale(self.root)
x.pack()
x.update()
curr_xcoord = x.scale.coords()[0]
newval = x.value + 1
x.value = newval
# The following update is needed since the test doesn't use mainloop,
# at the same time this shouldn't affect test outcome
x.update()
self.assertEqual(x.value, newval)
self.assertEqual(x.label['text'],
newval if self.wantobjects else str(newval))
self.assertEqual(float(x.scale.get()), newval)
self.assertGreater(x.scale.coords()[0], curr_xcoord)
self.assertEqual(x.scale.coords()[0],
int(x.label.place_info()['x']))
# value outside range
if self.wantobjects:
conv = lambda x: x
else:
conv = int
x.value = conv(x.scale['to']) + 1 # no changes shouldn't happen
x.update()
self.assertEqual(x.value, newval)
self.assertEqual(conv(x.label['text']), newval)
self.assertEqual(float(x.scale.get()), newval)
self.assertEqual(x.scale.coords()[0],
int(x.label.place_info()['x']))
# non-integer value
x.value = newval = newval + 1.5
x.update()
self.assertEqual(x.value, int(newval))
self.assertEqual(conv(x.label['text']), int(newval))
self.assertEqual(float(x.scale.get()), newval)
x.destroy()
def test_resize(self):
x = ttk.LabeledScale(self.root)
x.pack(expand=True, fill='both')
x.update()
width, height = x.master.winfo_width(), x.master.winfo_height()
width_new, height_new = width * 2, height * 2
x.value = 3
x.update()
x.master.wm_geometry("%dx%d" % (width_new, height_new))
self.assertEqual(int(x.label.place_info()['x']),
x.scale.coords()[0])
# Reset geometry
x.master.wm_geometry("%dx%d" % (width, height))
x.destroy()
class OptionMenuTest(AbstractTkTest, unittest.TestCase):
def setUp(self):
super().setUp()
self.textvar = tkinter.StringVar(self.root)
def tearDown(self):
del self.textvar
super().tearDown()
def test_widget_destroy(self):
var = tkinter.StringVar(self.root)
optmenu = ttk.OptionMenu(self.root, var)
name = var._name
optmenu.update_idletasks()
optmenu.destroy()
self.assertEqual(optmenu.tk.globalgetvar(name), var.get())
del var
self.assertRaises(tkinter.TclError, optmenu.tk.globalgetvar, name)
def test_initialization(self):
self.assertRaises(tkinter.TclError,
ttk.OptionMenu, self.root, self.textvar, invalid='thing')
optmenu = ttk.OptionMenu(self.root, self.textvar, 'b', 'a', 'b')
self.assertEqual(optmenu._variable.get(), 'b')
self.assertTrue(optmenu['menu'])
self.assertTrue(optmenu['textvariable'])
optmenu.destroy()
def test_menu(self):
items = ('a', 'b', 'c')
default = 'a'
optmenu = ttk.OptionMenu(self.root, self.textvar, default, *items)
found_default = False
for i in range(len(items)):
value = optmenu['menu'].entrycget(i, 'value')
self.assertEqual(value, items[i])
if value == default:
found_default = True
self.assertTrue(found_default)
optmenu.destroy()
# default shouldn't be in menu if it is not part of values
default = 'd'
optmenu = ttk.OptionMenu(self.root, self.textvar, default, *items)
curr = None
i = 0
while True:
last, curr = curr, optmenu['menu'].entryconfigure(i, 'value')
if last == curr:
# no more menu entries
break
self.assertNotEqual(curr, default)
i += 1
self.assertEqual(i, len(items))
# check that variable is updated correctly
optmenu.pack()
optmenu['menu'].invoke(0)
self.assertEqual(optmenu._variable.get(), items[0])
# changing to an invalid index shouldn't change the variable
self.assertRaises(tkinter.TclError, optmenu['menu'].invoke, -1)
self.assertEqual(optmenu._variable.get(), items[0])
optmenu.destroy()
# specifying a callback
success = []
def cb_test(item):
self.assertEqual(item, items[1])
success.append(True)
optmenu = ttk.OptionMenu(self.root, self.textvar, 'a', command=cb_test,
*items)
optmenu['menu'].invoke(1)
if not success:
self.fail("Menu callback not invoked")
optmenu.destroy()
def test_unique_radiobuttons(self):
# check that radiobuttons are unique across instances (bpo25684)
items = ('a', 'b', 'c')
default = 'a'
optmenu = ttk.OptionMenu(self.root, self.textvar, default, *items)
textvar2 = tkinter.StringVar(self.root)
optmenu2 = ttk.OptionMenu(self.root, textvar2, default, *items)
optmenu.pack()
optmenu2.pack()
optmenu['menu'].invoke(1)
optmenu2['menu'].invoke(2)
optmenu_stringvar_name = optmenu['menu'].entrycget(0, 'variable')
optmenu2_stringvar_name = optmenu2['menu'].entrycget(0, 'variable')
self.assertNotEqual(optmenu_stringvar_name,
optmenu2_stringvar_name)
self.assertEqual(self.root.tk.globalgetvar(optmenu_stringvar_name),
items[1])
self.assertEqual(self.root.tk.globalgetvar(optmenu2_stringvar_name),
items[2])
optmenu.destroy()
optmenu2.destroy()
class DefaultRootTest(AbstractDefaultRootTest, unittest.TestCase):
def test_labeledscale(self):
self._test_widget(ttk.LabeledScale)
tests_gui = (LabeledScaleTest, OptionMenuTest, DefaultRootTest)
if __name__ == "__main__":
run_unittest(*tests_gui)
|
the-stack_0_24453 | #!/usr/bin/env python
import pytest
from igraph import Graph
from node_finder import most_detrimental
# Do we output None if there is no path between source and sink?
# GRAPH:
# 0->1 u
# 2->1 u
# SOURCE: 0
# SINK: 2
def test_directions():
# create a graph consisting of three nodes
# where the other two nodes are pointing towards the middle one:
# 0->1<-2
graph = Graph([(0,1), (2,1)], directed=True)
graph.es['type'] = ['up', 'up']
node = most_detrimental(graph, 0, 2)
assert node is None
# Same graph. But this time, there's an extra edge from node 0 to node 2. There are
# two possible paths to the sink, so this will test if the correct path was chosen.
# The path that directly connects node 0 with node 1 should be chosen over the path
# that first passes through the intermediate node 2
# GRAPH:
# 0->1 u
# 2->1 u
# 0->2 u
# SOURCE: 0
# SINK: 1
def test_two_paths_no_node():
# create a graph consisting of three nodes
# where the other two nodes are pointing towards the middle one:
# 0->1<-2
graph = Graph([(0,1), (2,1), (0,2)], directed=True)
graph.es['type'] = ['up']*3
node = most_detrimental(graph, 0, 1)
assert node is None
# GRAPH:
# 0->1 u
# 2->3 u
# SOURCE: 0
# SINK: 3
def test_not_connected():
# create a graph with two unconnected edges
graph = Graph([(0,1), (2,3)], directed=True)
graph.es['type'] = ['up', 'up']
node = most_detrimental(graph, 0, 3)
assert node is None
|
the-stack_0_24454 | import os
import datetime
import json
from officy import JsonFile, Dir, File, Stime
from rumpy import RumClient
father_dir = os.path.dirname(os.path.dirname(__file__))
seedsfile = os.path.join(father_dir, "data", "seeds.json")
infofile = os.path.join(father_dir, "data", "groupsinfo.json")
FLAG_JOINGROUPS = True
PORT = 58356
if FLAG_JOINGROUPS:
bot = RumClient(port=PORT)
def search_groups(blocks_num=50, last_update_days=-30):
groupsinfo = JsonFile(infofile).read()
last_update = f"{Stime.days_later(datetime.date.today(),last_update_days)}"
gids = []
for group_id in groupsinfo:
if groupsinfo[group_id]["highest_height"] >= blocks_num:
if groupsinfo[group_id]["last_update"] >= last_update:
gids.append(group_id)
return gids
def _check_name(name):
names = ["测试", "test", "mytest", "去中心"]
for i in names:
if i in name:
return False
return True
def init_mdfile(gids):
seeds = JsonFile(seedsfile).read()
groupsinfo = JsonFile(infofile).read()
lines = []
for gid in gids:
seed = seeds.get(gid)
if not seed:
continue
name = seed["group_name"]
if not _check_name(name):
continue
if groupsinfo[gid]["abandoned"]:
continue
# join the groups
if FLAG_JOINGROUPS:
bot.group.join(seed)
lines.extend(
[
f'### {seed["group_name"]}\n\n',
f'{seed["app_key"]} | 区块高度: {groupsinfo[gid]["highest_height"]}\n\n',
f'{Stime.ts2datetime(seed["genesis_block"]["TimeStamp"]).date()} 创建 | {groupsinfo[gid]["last_update"][:10]} 更新\n\n',
"```seed\n",
json.dumps(seed, ensure_ascii=False),
"\n```\n\n",
]
)
File("seeds_toshare.md").writelines(lines)
otherfile = os.path.join(
os.path.dirname(os.path.dirname(os.path.dirname(__file__))),
"rum-docs",
"docs",
"rum-app",
"README.md",
)
print(otherfile)
data = File(otherfile).read()
flag = "\n## 更多种子\n"
lines = [data.split(flag)[0], flag, "\n"] + lines
File(otherfile).writelines(lines)
if __name__ == "__main__":
groupseeds = search_groups(blocks_num=20, last_update_days=-30)
init_mdfile(groupseeds)
|
the-stack_0_24455 | """
Procedures for fitting marginal regression models to dependent data
using Generalized Estimating Equations.
References
----------
KY Liang and S Zeger. "Longitudinal data analysis using
generalized linear models". Biometrika (1986) 73 (1): 13-22.
S Zeger and KY Liang. "Longitudinal Data Analysis for Discrete and
Continuous Outcomes". Biometrics Vol. 42, No. 1 (Mar., 1986),
pp. 121-130
A Rotnitzky and NP Jewell (1990). "Hypothesis testing of regression
parameters in semiparametric generalized linear models for cluster
correlated data", Biometrika, 77, 485-497.
Xu Guo and Wei Pan (2002). "Small sample performance of the score
test in GEE".
http://www.sph.umn.edu/faculty1/wp-content/uploads/2012/11/rr2002-013.pdf
LA Mancl LA, TA DeRouen (2001). A covariance estimator for GEE with
improved small-sample properties. Biometrics. 2001 Mar;57(1):126-34.
"""
from statsmodels.compat.python import lzip
from statsmodels.compat.pandas import Appender
import numpy as np
from scipy import stats
import pandas as pd
import patsy
from collections import defaultdict
from statsmodels.tools.decorators import cache_readonly
import statsmodels.base.model as base
# used for wrapper:
import statsmodels.regression.linear_model as lm
import statsmodels.base.wrapper as wrap
from statsmodels.genmod import families
from statsmodels.genmod.generalized_linear_model import GLM, GLMResults
from statsmodels.genmod import cov_struct as cov_structs
import statsmodels.genmod.families.varfuncs as varfuncs
from statsmodels.genmod.families.links import Link
from statsmodels.tools.sm_exceptions import (ConvergenceWarning,
DomainWarning,
IterationLimitWarning,
ValueWarning)
import warnings
from statsmodels.graphics._regressionplots_doc import (
_plot_added_variable_doc,
_plot_partial_residuals_doc,
_plot_ceres_residuals_doc)
from statsmodels.discrete.discrete_margins import (
_get_margeff_exog, _check_margeff_args, _effects_at, margeff_cov_with_se,
_check_at_is_all, _transform_names, _check_discrete_args,
_get_dummy_index, _get_count_index)
class ParameterConstraint(object):
"""
A class for managing linear equality constraints for a parameter
vector.
"""
def __init__(self, lhs, rhs, exog):
"""
Parameters
----------
lhs : ndarray
A q x p matrix which is the left hand side of the
constraint lhs * param = rhs. The number of constraints is
q >= 1 and p is the dimension of the parameter vector.
rhs : ndarray
A 1-dimensional vector of length q which is the right hand
side of the constraint equation.
exog : ndarray
The n x p exognenous data for the full model.
"""
# In case a row or column vector is passed (patsy linear
# constraints passes a column vector).
rhs = np.atleast_1d(rhs.squeeze())
if rhs.ndim > 1:
raise ValueError("The right hand side of the constraint "
"must be a vector.")
if len(rhs) != lhs.shape[0]:
raise ValueError("The number of rows of the left hand "
"side constraint matrix L must equal "
"the length of the right hand side "
"constraint vector R.")
self.lhs = lhs
self.rhs = rhs
# The columns of lhs0 are an orthogonal basis for the
# orthogonal complement to row(lhs), the columns of lhs1 are
# an orthogonal basis for row(lhs). The columns of lhsf =
# [lhs0, lhs1] are mutually orthogonal.
lhs_u, lhs_s, lhs_vt = np.linalg.svd(lhs.T, full_matrices=1)
self.lhs0 = lhs_u[:, len(lhs_s):]
self.lhs1 = lhs_u[:, 0:len(lhs_s)]
self.lhsf = np.hstack((self.lhs0, self.lhs1))
# param0 is one solution to the underdetermined system
# L * param = R.
self.param0 = np.dot(self.lhs1, np.dot(lhs_vt, self.rhs) /
lhs_s)
self._offset_increment = np.dot(exog, self.param0)
self.orig_exog = exog
self.exog_fulltrans = np.dot(exog, self.lhsf)
def offset_increment(self):
"""
Returns a vector that should be added to the offset vector to
accommodate the constraint.
Parameters
----------
exog : array_like
The exogeneous data for the model.
"""
return self._offset_increment
def reduced_exog(self):
"""
Returns a linearly transformed exog matrix whose columns span
the constrained model space.
Parameters
----------
exog : array_like
The exogeneous data for the model.
"""
return self.exog_fulltrans[:, 0:self.lhs0.shape[1]]
def restore_exog(self):
"""
Returns the full exog matrix before it was reduced to
satisfy the constraint.
"""
return self.orig_exog
def unpack_param(self, params):
"""
Converts the parameter vector `params` from reduced to full
coordinates.
"""
return self.param0 + np.dot(self.lhs0, params)
def unpack_cov(self, bcov):
"""
Converts the covariance matrix `bcov` from reduced to full
coordinates.
"""
return np.dot(self.lhs0, np.dot(bcov, self.lhs0.T))
_gee_init_doc = """
Marginal regression model fit using Generalized Estimating Equations.
GEE can be used to fit Generalized Linear Models (GLMs) when the
data have a grouped structure, and the observations are possibly
correlated within groups but not between groups.
Parameters
----------
endog : array_like
1d array of endogenous values (i.e. responses, outcomes,
dependent variables, or 'Y' values).
exog : array_like
2d array of exogeneous values (i.e. covariates, predictors,
independent variables, regressors, or 'X' values). A `nobs x
k` array where `nobs` is the number of observations and `k` is
the number of regressors. An intercept is not included by
default and should be added by the user. See
`statsmodels.tools.add_constant`.
groups : array_like
A 1d array of length `nobs` containing the group labels.
time : array_like
A 2d array of time (or other index) values, used by some
dependence structures to define similarity relationships among
observations within a cluster.
family : family class instance
%(family_doc)s
cov_struct : CovStruct class instance
The default is Independence. To specify an exchangeable
structure use cov_struct = Exchangeable(). See
statsmodels.genmod.cov_struct.CovStruct for more
information.
offset : array_like
An offset to be included in the fit. If provided, must be
an array whose length is the number of rows in exog.
dep_data : array_like
Additional data passed to the dependence structure.
constraint : (ndarray, ndarray)
If provided, the constraint is a tuple (L, R) such that the
model parameters are estimated under the constraint L *
param = R, where L is a q x p matrix and R is a
q-dimensional vector. If constraint is provided, a score
test is performed to compare the constrained model to the
unconstrained model.
update_dep : bool
If true, the dependence parameters are optimized, otherwise
they are held fixed at their starting values.
weights : array_like
An array of weights to use in the analysis. The weights must
be constant within each group. These correspond to
probability weights (pweights) in Stata.
%(extra_params)s
See Also
--------
statsmodels.genmod.families.family
:ref:`families`
:ref:`links`
Notes
-----
Only the following combinations make sense for family and link ::
+ ident log logit probit cloglog pow opow nbinom loglog logc
Gaussian | x x x
inv Gaussian | x x x
binomial | x x x x x x x x x
Poisson | x x x
neg binomial | x x x x
gamma | x x x
Not all of these link functions are currently available.
Endog and exog are references so that if the data they refer
to are already arrays and these arrays are changed, endog and
exog will change.
The "robust" covariance type is the standard "sandwich estimator"
(e.g. Liang and Zeger (1986)). It is the default here and in most
other packages. The "naive" estimator gives smaller standard
errors, but is only correct if the working correlation structure
is correctly specified. The "bias reduced" estimator of Mancl and
DeRouen (Biometrics, 2001) reduces the downward bias of the robust
estimator.
The robust covariance provided here follows Liang and Zeger (1986)
and agrees with R's gee implementation. To obtain the robust
standard errors reported in Stata, multiply by sqrt(N / (N - g)),
where N is the total sample size, and g is the average group size.
Examples
--------
%(example)s
"""
_gee_family_doc = """\
The default is Gaussian. To specify the binomial
distribution use `family=sm.families.Binomial()`. Each family
can take a link instance as an argument. See
statsmodels.genmod.families.family for more information."""
_gee_ordinal_family_doc = """\
The only family supported is `Binomial`. The default `Logit`
link may be replaced with `probit` if desired."""
_gee_nominal_family_doc = """\
The default value `None` uses a multinomial logit family
specifically designed for use with GEE. Setting this
argument to a non-default value is not currently supported."""
_gee_fit_doc = """
Fits a marginal regression model using generalized estimating
equations (GEE).
Parameters
----------
maxiter : int
The maximum number of iterations
ctol : float
The convergence criterion for stopping the Gauss-Seidel
iterations
start_params : array_like
A vector of starting values for the regression
coefficients. If None, a default is chosen.
params_niter : int
The number of Gauss-Seidel updates of the mean structure
parameters that take place prior to each update of the
dependence structure.
first_dep_update : int
No dependence structure updates occur before this
iteration number.
cov_type : str
One of "robust", "naive", or "bias_reduced".
ddof_scale : scalar or None
The scale parameter is estimated as the sum of squared
Pearson residuals divided by `N - ddof_scale`, where N
is the total sample size. If `ddof_scale` is None, the
number of covariates (including an intercept if present)
is used.
scaling_factor : scalar
The estimated covariance of the parameter estimates is
scaled by this value. Default is 1, Stata uses N / (N - g),
where N is the total sample size and g is the average group
size.
scale : str or float, optional
`scale` can be None, 'X2', or a float
If a float, its value is used as the scale parameter.
The default value is None, which uses `X2` (Pearson's
chi-square) for Gamma, Gaussian, and Inverse Gaussian.
The default is 1 for the Binomial and Poisson families.
Returns
-------
An instance of the GEEResults class or subclass
Notes
-----
If convergence difficulties occur, increase the values of
`first_dep_update` and/or `params_niter`. Setting
`first_dep_update` to a greater value (e.g. ~10-20) causes the
algorithm to move close to the GLM solution before attempting
to identify the dependence structure.
For the Gaussian family, there is no benefit to setting
`params_niter` to a value greater than 1, since the mean
structure parameters converge in one step.
"""
_gee_results_doc = """
Attributes
----------
cov_params_default : ndarray
default covariance of the parameter estimates. Is chosen among one
of the following three based on `cov_type`
cov_robust : ndarray
covariance of the parameter estimates that is robust
cov_naive : ndarray
covariance of the parameter estimates that is not robust to
correlation or variance misspecification
cov_robust_bc : ndarray
covariance of the parameter estimates that is robust and bias
reduced
converged : bool
indicator for convergence of the optimization.
True if the norm of the score is smaller than a threshold
cov_type : str
string indicating whether a "robust", "naive" or "bias_reduced"
covariance is used as default
fit_history : dict
Contains information about the iterations.
fittedvalues : ndarray
Linear predicted values for the fitted model.
dot(exog, params)
model : class instance
Pointer to GEE model instance that called `fit`.
normalized_cov_params : ndarray
See GEE docstring
params : ndarray
The coefficients of the fitted model. Note that
interpretation of the coefficients often depends on the
distribution family and the data.
scale : float
The estimate of the scale / dispersion for the model fit.
See GEE.fit for more information.
score_norm : float
norm of the score at the end of the iterative estimation.
bse : ndarray
The standard errors of the fitted GEE parameters.
"""
_gee_example = """
Logistic regression with autoregressive working dependence:
>>> import statsmodels.api as sm
>>> family = sm.families.Binomial()
>>> va = sm.cov_struct.Autoregressive()
>>> model = sm.GEE(endog, exog, group, family=family, cov_struct=va)
>>> result = model.fit()
>>> print(result.summary())
Use formulas to fit a Poisson GLM with independent working
dependence:
>>> import statsmodels.api as sm
>>> fam = sm.families.Poisson()
>>> ind = sm.cov_struct.Independence()
>>> model = sm.GEE.from_formula("y ~ age + trt + base", "subject", \
data, cov_struct=ind, family=fam)
>>> result = model.fit()
>>> print(result.summary())
Equivalent, using the formula API:
>>> import statsmodels.api as sm
>>> import statsmodels.formula.api as smf
>>> fam = sm.families.Poisson()
>>> ind = sm.cov_struct.Independence()
>>> model = smf.gee("y ~ age + trt + base", "subject", \
data, cov_struct=ind, family=fam)
>>> result = model.fit()
>>> print(result.summary())
"""
_gee_ordinal_example = """
Fit an ordinal regression model using GEE, with "global
odds ratio" dependence:
>>> import statsmodels.api as sm
>>> gor = sm.cov_struct.GlobalOddsRatio("ordinal")
>>> model = sm.OrdinalGEE(endog, exog, groups, cov_struct=gor)
>>> result = model.fit()
>>> print(result.summary())
Using formulas:
>>> import statsmodels.formula.api as smf
>>> model = smf.ordinal_gee("y ~ x1 + x2", groups, data,
cov_struct=gor)
>>> result = model.fit()
>>> print(result.summary())
"""
_gee_nominal_example = """
Fit a nominal regression model using GEE:
>>> import statsmodels.api as sm
>>> import statsmodels.formula.api as smf
>>> gor = sm.cov_struct.GlobalOddsRatio("nominal")
>>> model = sm.NominalGEE(endog, exog, groups, cov_struct=gor)
>>> result = model.fit()
>>> print(result.summary())
Using formulas:
>>> import statsmodels.api as sm
>>> model = sm.NominalGEE.from_formula("y ~ x1 + x2", groups,
data, cov_struct=gor)
>>> result = model.fit()
>>> print(result.summary())
Using the formula API:
>>> import statsmodels.formula.api as smf
>>> model = smf.nominal_gee("y ~ x1 + x2", groups, data,
cov_struct=gor)
>>> result = model.fit()
>>> print(result.summary())
"""
def _check_args(endog, exog, groups, time, offset, exposure):
if endog.size != exog.shape[0]:
raise ValueError("Leading dimension of 'exog' should match "
"length of 'endog'")
if groups.size != endog.size:
raise ValueError("'groups' and 'endog' should have the same size")
if time is not None and (time.size != endog.size):
raise ValueError("'time' and 'endog' should have the same size")
if offset is not None and (offset.size != endog.size):
raise ValueError("'offset and 'endog' should have the same size")
if exposure is not None and (exposure.size != endog.size):
raise ValueError("'exposure' and 'endog' should have the same size")
class GEE(GLM):
__doc__ = (
" Marginal Regression Model using Generalized Estimating "
"Equations.\n" + _gee_init_doc %
{'extra_params': base._missing_param_doc,
'family_doc': _gee_family_doc,
'example': _gee_example})
cached_means = None
def __init__(self, endog, exog, groups, time=None, family=None,
cov_struct=None, missing='none', offset=None,
exposure=None, dep_data=None, constraint=None,
update_dep=True, weights=None, **kwargs):
if family is not None:
if not isinstance(family.link, tuple(family.safe_links)):
import warnings
msg = ("The {0} link function does not respect the "
"domain of the {1} family.")
warnings.warn(msg.format(family.link.__class__.__name__,
family.__class__.__name__),
DomainWarning)
groups = np.asarray(groups) # in case groups is pandas
if "missing_idx" in kwargs and kwargs["missing_idx"] is not None:
# If here, we are entering from super.from_formula; missing
# has already been dropped from endog and exog, but not from
# the other variables.
ii = ~kwargs["missing_idx"]
groups = groups[ii]
if time is not None:
time = time[ii]
if offset is not None:
offset = offset[ii]
if exposure is not None:
exposure = exposure[ii]
del kwargs["missing_idx"]
_check_args(endog, exog, groups, time, offset, exposure)
self.missing = missing
self.dep_data = dep_data
self.constraint = constraint
self.update_dep = update_dep
self._fit_history = defaultdict(list)
# Pass groups, time, offset, and dep_data so they are
# processed for missing data along with endog and exog.
# Calling super creates self.exog, self.endog, etc. as
# ndarrays and the original exog, endog, etc. are
# self.data.endog, etc.
super(GEE, self).__init__(endog, exog, groups=groups,
time=time, offset=offset,
exposure=exposure, weights=weights,
dep_data=dep_data, missing=missing,
family=family, **kwargs)
self._init_keys.extend(["update_dep", "constraint", "family",
"cov_struct"])
# Handle the family argument
if family is None:
family = families.Gaussian()
else:
if not issubclass(family.__class__, families.Family):
raise ValueError("GEE: `family` must be a genmod "
"family instance")
self.family = family
# Handle the cov_struct argument
if cov_struct is None:
cov_struct = cov_structs.Independence()
else:
if not issubclass(cov_struct.__class__, cov_structs.CovStruct):
raise ValueError("GEE: `cov_struct` must be a genmod "
"cov_struct instance")
self.cov_struct = cov_struct
# Handle the constraint
self.constraint = None
if constraint is not None:
if len(constraint) != 2:
raise ValueError("GEE: `constraint` must be a 2-tuple.")
if constraint[0].shape[1] != self.exog.shape[1]:
raise ValueError(
"GEE: the left hand side of the constraint must have "
"the same number of columns as the exog matrix.")
self.constraint = ParameterConstraint(constraint[0],
constraint[1],
self.exog)
if self._offset_exposure is not None:
self._offset_exposure += self.constraint.offset_increment()
else:
self._offset_exposure = (
self.constraint.offset_increment().copy())
self.exog = self.constraint.reduced_exog()
# Create list of row indices for each group
group_labels, ix = np.unique(self.groups, return_inverse=True)
se = pd.Series(index=np.arange(len(ix)), dtype="int")
gb = se.groupby(ix).groups
dk = [(lb, np.asarray(gb[k])) for k, lb in enumerate(group_labels)]
self.group_indices = dict(dk)
self.group_labels = group_labels
# Convert the data to the internal representation, which is a
# list of arrays, corresponding to the groups.
self.endog_li = self.cluster_list(self.endog)
self.exog_li = self.cluster_list(self.exog)
if self.weights is not None:
self.weights_li = self.cluster_list(self.weights)
self.weights_li = [x[0] for x in self.weights_li]
self.weights_li = np.asarray(self.weights_li)
self.num_group = len(self.endog_li)
# Time defaults to a 1d grid with equal spacing
if self.time is not None:
self.time = np.asarray(self.time, np.float64)
if self.time.ndim == 1:
self.time = self.time[:, None]
self.time_li = self.cluster_list(self.time)
else:
self.time_li = \
[np.arange(len(y), dtype=np.float64)[:, None]
for y in self.endog_li]
self.time = np.concatenate(self.time_li)
if (self._offset_exposure is None or
(np.isscalar(self._offset_exposure) and
self._offset_exposure == 0.)):
self.offset_li = None
else:
self.offset_li = self.cluster_list(self._offset_exposure)
if constraint is not None:
self.constraint.exog_fulltrans_li = \
self.cluster_list(self.constraint.exog_fulltrans)
self.family = family
self.cov_struct.initialize(self)
# Total sample size
group_ns = [len(y) for y in self.endog_li]
self.nobs = sum(group_ns)
# The following are column based, not on rank see #1928
self.df_model = self.exog.shape[1] - 1 # assumes constant
self.df_resid = self.nobs - self.exog.shape[1]
# Skip the covariance updates if all groups have a single
# observation (reduces to fitting a GLM).
maxgroup = max([len(x) for x in self.endog_li])
if maxgroup == 1:
self.update_dep = False
# Override to allow groups and time to be passed as variable
# names.
@classmethod
def from_formula(cls, formula, groups, data, subset=None,
time=None, offset=None, exposure=None,
*args, **kwargs):
"""
Create a GEE model instance from a formula and dataframe.
Parameters
----------
formula : str or generic Formula object
The formula specifying the model
groups : array_like or string
Array of grouping labels. If a string, this is the name
of a variable in `data` that contains the grouping labels.
data : array_like
The data for the model.
subset : array_like
An array-like object of booleans, integers, or index
values that indicate the subset of the data to used when
fitting the model.
time : array_like or string
The time values, used for dependence structures involving
distances between observations. If a string, this is the
name of a variable in `data` that contains the time
values.
offset : array_like or string
The offset values, added to the linear predictor. If a
string, this is the name of a variable in `data` that
contains the offset values.
exposure : array_like or string
The exposure values, only used if the link function is the
logarithm function, in which case the log of `exposure`
is added to the offset (if any). If a string, this is the
name of a variable in `data` that contains the offset
values.
%(missing_param_doc)s
args : extra arguments
These are passed to the model
kwargs : extra keyword arguments
These are passed to the model with two exceptions. `dep_data`
is processed as described below. The ``eval_env`` keyword is
passed to patsy. It can be either a
:class:`patsy:patsy.EvalEnvironment` object or an integer
indicating the depth of the namespace to use. For example, the
default ``eval_env=0`` uses the calling namespace.
If you wish to use a "clean" environment set ``eval_env=-1``.
Optional arguments
------------------
dep_data : str or array_like
Data used for estimating the dependence structure. See
specific dependence structure classes (e.g. Nested) for
details. If `dep_data` is a string, it is interpreted as
a formula that is applied to `data`. If it is an array, it
must be an array of strings corresponding to column names in
`data`. Otherwise it must be an array-like with the same
number of rows as data.
Returns
-------
model : GEE model instance
Notes
-----
`data` must define __getitem__ with the keys in the formula
terms args and kwargs are passed on to the model
instantiation. E.g., a numpy structured or rec array, a
dictionary, or a pandas DataFrame.
""" % {'missing_param_doc': base._missing_param_doc}
groups_name = "Groups"
if isinstance(groups, str):
groups_name = groups
groups = data[groups]
if isinstance(time, str):
time = data[time]
if isinstance(offset, str):
offset = data[offset]
if isinstance(exposure, str):
exposure = data[exposure]
dep_data = kwargs.get("dep_data")
dep_data_names = None
if dep_data is not None:
if isinstance(dep_data, str):
dep_data = patsy.dmatrix(dep_data, data,
return_type='dataframe')
dep_data_names = dep_data.columns.tolist()
else:
dep_data_names = list(dep_data)
dep_data = data[dep_data]
kwargs["dep_data"] = np.asarray(dep_data)
family = None
if "family" in kwargs:
family = kwargs["family"]
del kwargs["family"]
model = super(GEE, cls).from_formula(formula, data=data, subset=subset,
groups=groups, time=time,
offset=offset,
exposure=exposure,
family=family,
*args, **kwargs)
if dep_data_names is not None:
model._dep_data_names = dep_data_names
model._groups_name = groups_name
return model
def cluster_list(self, array):
"""
Returns `array` split into subarrays corresponding to the
cluster structure.
"""
if array.ndim == 1:
return [np.array(array[self.group_indices[k]])
for k in self.group_labels]
else:
return [np.array(array[self.group_indices[k], :])
for k in self.group_labels]
def compare_score_test(self, submodel):
"""
Perform a score test for the given submodel against this model.
Parameters
----------
submodel : GEEResults instance
A fitted GEE model that is a submodel of this model.
Returns
-------
A dictionary with keys "statistic", "p-value", and "df",
containing the score test statistic, its chi^2 p-value,
and the degrees of freedom used to compute the p-value.
Notes
-----
The score test can be performed without calling 'fit' on the
larger model. The provided submodel must be obtained from a
fitted GEE.
This method performs the same score test as can be obtained by
fitting the GEE with a linear constraint and calling `score_test`
on the results.
References
----------
Xu Guo and Wei Pan (2002). "Small sample performance of the score
test in GEE".
http://www.sph.umn.edu/faculty1/wp-content/uploads/2012/11/rr2002-013.pdf
"""
# Since the model has not been fit, its scaletype has not been
# set. So give it the scaletype of the submodel.
self.scaletype = submodel.model.scaletype
# Check consistency between model and submodel (not a comprehensive
# check)
submod = submodel.model
if self.exog.shape[0] != submod.exog.shape[0]:
msg = "Model and submodel have different numbers of cases."
raise ValueError(msg)
if self.exog.shape[1] == submod.exog.shape[1]:
msg = "Model and submodel have the same number of variables"
warnings.warn(msg)
if not isinstance(self.family, type(submod.family)):
msg = "Model and submodel have different GLM families."
warnings.warn(msg)
if not isinstance(self.cov_struct, type(submod.cov_struct)):
warnings.warn("Model and submodel have different GEE covariance "
"structures.")
if not np.equal(self.weights, submod.weights).all():
msg = "Model and submodel should have the same weights."
warnings.warn(msg)
# Get the positions of the submodel variables in the
# parent model
qm, qc = _score_test_submodel(self, submodel.model)
if qm is None:
msg = "The provided model is not a submodel."
raise ValueError(msg)
# Embed the submodel params into a params vector for the
# parent model
params_ex = np.dot(qm, submodel.params)
# Attempt to preserve the state of the parent model
cov_struct_save = self.cov_struct
import copy
cached_means_save = copy.deepcopy(self.cached_means)
# Get the score vector of the submodel params in
# the parent model
self.cov_struct = submodel.cov_struct
self.update_cached_means(params_ex)
_, score = self._update_mean_params()
if score is None:
msg = "Singular matrix encountered in GEE score test"
warnings.warn(msg, ConvergenceWarning)
return None
if not hasattr(self, "ddof_scale"):
self.ddof_scale = self.exog.shape[1]
if not hasattr(self, "scaling_factor"):
self.scaling_factor = 1
_, ncov1, cmat = self._covmat()
scale = self.estimate_scale()
cmat = cmat / scale ** 2
score2 = np.dot(qc.T, score) / scale
amat = np.linalg.inv(ncov1)
bmat_11 = np.dot(qm.T, np.dot(cmat, qm))
bmat_22 = np.dot(qc.T, np.dot(cmat, qc))
bmat_12 = np.dot(qm.T, np.dot(cmat, qc))
amat_11 = np.dot(qm.T, np.dot(amat, qm))
amat_12 = np.dot(qm.T, np.dot(amat, qc))
score_cov = bmat_22 - np.dot(amat_12.T,
np.linalg.solve(amat_11, bmat_12))
score_cov -= np.dot(bmat_12.T,
np.linalg.solve(amat_11, amat_12))
score_cov += np.dot(amat_12.T,
np.dot(np.linalg.solve(amat_11, bmat_11),
np.linalg.solve(amat_11, amat_12)))
# Attempt to restore state
self.cov_struct = cov_struct_save
self.cached_means = cached_means_save
from scipy.stats.distributions import chi2
score_statistic = np.dot(score2,
np.linalg.solve(score_cov, score2))
score_df = len(score2)
score_pvalue = 1 - chi2.cdf(score_statistic, score_df)
return {"statistic": score_statistic,
"df": score_df,
"p-value": score_pvalue}
def estimate_scale(self):
"""
Estimate the dispersion/scale.
"""
if self.scaletype is None:
if isinstance(self.family, (families.Binomial, families.Poisson,
families.NegativeBinomial,
_Multinomial)):
return 1.
elif isinstance(self.scaletype, float):
return np.array(self.scaletype)
endog = self.endog_li
cached_means = self.cached_means
nobs = self.nobs
varfunc = self.family.variance
scale = 0.
fsum = 0.
for i in range(self.num_group):
if len(endog[i]) == 0:
continue
expval, _ = cached_means[i]
f = self.weights_li[i] if self.weights is not None else 1.
sdev = np.sqrt(varfunc(expval))
resid = (endog[i] - expval) / sdev
scale += f * np.sum(resid ** 2)
fsum += f * len(endog[i])
scale /= (fsum * (nobs - self.ddof_scale) / float(nobs))
return scale
def mean_deriv(self, exog, lin_pred):
"""
Derivative of the expected endog with respect to the parameters.
Parameters
----------
exog : array_like
The exogeneous data at which the derivative is computed.
lin_pred : array_like
The values of the linear predictor.
Returns
-------
The value of the derivative of the expected endog with respect
to the parameter vector.
Notes
-----
If there is an offset or exposure, it should be added to
`lin_pred` prior to calling this function.
"""
idl = self.family.link.inverse_deriv(lin_pred)
dmat = exog * idl[:, None]
return dmat
def mean_deriv_exog(self, exog, params, offset_exposure=None):
"""
Derivative of the expected endog with respect to exog.
Parameters
----------
exog : array_like
Values of the independent variables at which the derivative
is calculated.
params : array_like
Parameter values at which the derivative is calculated.
offset_exposure : array_like, optional
Combined offset and exposure.
Returns
-------
The derivative of the expected endog with respect to exog.
"""
lin_pred = np.dot(exog, params)
if offset_exposure is not None:
lin_pred += offset_exposure
idl = self.family.link.inverse_deriv(lin_pred)
dmat = np.outer(idl, params)
return dmat
def _update_mean_params(self):
"""
Returns
-------
update : array_like
The update vector such that params + update is the next
iterate when solving the score equations.
score : array_like
The current value of the score equations, not
incorporating the scale parameter. If desired,
multiply this vector by the scale parameter to
incorporate the scale.
"""
endog = self.endog_li
exog = self.exog_li
cached_means = self.cached_means
varfunc = self.family.variance
bmat, score = 0, 0
for i in range(self.num_group):
expval, lpr = cached_means[i]
resid = endog[i] - expval
dmat = self.mean_deriv(exog[i], lpr)
sdev = np.sqrt(varfunc(expval))
rslt = self.cov_struct.covariance_matrix_solve(expval, i,
sdev, (dmat, resid))
if rslt is None:
return None, None
vinv_d, vinv_resid = tuple(rslt)
f = self.weights_li[i] if self.weights is not None else 1.
bmat += f * np.dot(dmat.T, vinv_d)
score += f * np.dot(dmat.T, vinv_resid)
update = np.linalg.solve(bmat, score)
self._fit_history["cov_adjust"].append(
self.cov_struct.cov_adjust)
return update, score
def update_cached_means(self, mean_params):
"""
cached_means should always contain the most recent calculation
of the group-wise mean vectors. This function should be
called every time the regression parameters are changed, to
keep the cached means up to date.
"""
endog = self.endog_li
exog = self.exog_li
offset = self.offset_li
linkinv = self.family.link.inverse
self.cached_means = []
for i in range(self.num_group):
if len(endog[i]) == 0:
continue
lpr = np.dot(exog[i], mean_params)
if offset is not None:
lpr += offset[i]
expval = linkinv(lpr)
self.cached_means.append((expval, lpr))
def _covmat(self):
"""
Returns the sampling covariance matrix of the regression
parameters and related quantities.
Returns
-------
cov_robust : array_like
The robust, or sandwich estimate of the covariance, which
is meaningful even if the working covariance structure is
incorrectly specified.
cov_naive : array_like
The model-based estimate of the covariance, which is
meaningful if the covariance structure is correctly
specified.
cmat : array_like
The center matrix of the sandwich expression, used in
obtaining score test results.
"""
endog = self.endog_li
exog = self.exog_li
varfunc = self.family.variance
cached_means = self.cached_means
# Calculate the naive (model-based) and robust (sandwich)
# covariances.
bmat, cmat = 0, 0
for i in range(self.num_group):
expval, lpr = cached_means[i]
resid = endog[i] - expval
dmat = self.mean_deriv(exog[i], lpr)
sdev = np.sqrt(varfunc(expval))
rslt = self.cov_struct.covariance_matrix_solve(
expval, i, sdev, (dmat, resid))
if rslt is None:
return None, None, None, None
vinv_d, vinv_resid = tuple(rslt)
f = self.weights_li[i] if self.weights is not None else 1.
bmat += f * np.dot(dmat.T, vinv_d)
dvinv_resid = f * np.dot(dmat.T, vinv_resid)
cmat += np.outer(dvinv_resid, dvinv_resid)
scale = self.estimate_scale()
bmati = np.linalg.inv(bmat)
cov_naive = bmati * scale
cov_robust = np.dot(bmati, np.dot(cmat, bmati))
cov_naive *= self.scaling_factor
cov_robust *= self.scaling_factor
return cov_robust, cov_naive, cmat
# Calculate the bias-corrected sandwich estimate of Mancl and
# DeRouen.
def _bc_covmat(self, cov_naive):
cov_naive = cov_naive / self.scaling_factor
endog = self.endog_li
exog = self.exog_li
varfunc = self.family.variance
cached_means = self.cached_means
scale = self.estimate_scale()
bcm = 0
for i in range(self.num_group):
expval, lpr = cached_means[i]
resid = endog[i] - expval
dmat = self.mean_deriv(exog[i], lpr)
sdev = np.sqrt(varfunc(expval))
rslt = self.cov_struct.covariance_matrix_solve(
expval, i, sdev, (dmat,))
if rslt is None:
return None
vinv_d = rslt[0]
vinv_d /= scale
hmat = np.dot(vinv_d, cov_naive)
hmat = np.dot(hmat, dmat.T).T
f = self.weights_li[i] if self.weights is not None else 1.
aresid = np.linalg.solve(np.eye(len(resid)) - hmat, resid)
rslt = self.cov_struct.covariance_matrix_solve(
expval, i, sdev, (aresid,))
if rslt is None:
return None
srt = rslt[0]
srt = f * np.dot(dmat.T, srt) / scale
bcm += np.outer(srt, srt)
cov_robust_bc = np.dot(cov_naive, np.dot(bcm, cov_naive))
cov_robust_bc *= self.scaling_factor
return cov_robust_bc
def _starting_params(self):
if np.isscalar(self._offset_exposure):
offset = None
else:
offset = self._offset_exposure
model = GLM(self.endog, self.exog, family=self.family,
offset=offset, freq_weights=self.weights)
result = model.fit()
return result.params
@Appender(_gee_fit_doc)
def fit(self, maxiter=60, ctol=1e-6, start_params=None,
params_niter=1, first_dep_update=0,
cov_type='robust', ddof_scale=None, scaling_factor=1.,
scale=None):
self.scaletype = scale
# Subtract this number from the total sample size when
# normalizing the scale parameter estimate.
if ddof_scale is None:
self.ddof_scale = self.exog.shape[1]
else:
if not ddof_scale >= 0:
raise ValueError(
"ddof_scale must be a non-negative number or None")
self.ddof_scale = ddof_scale
self.scaling_factor = scaling_factor
self._fit_history = defaultdict(list)
if self.weights is not None and cov_type == 'naive':
raise ValueError("when using weights, cov_type may not be naive")
if start_params is None:
mean_params = self._starting_params()
else:
start_params = np.asarray(start_params)
mean_params = start_params.copy()
self.update_cached_means(mean_params)
del_params = -1.
num_assoc_updates = 0
for itr in range(maxiter):
update, score = self._update_mean_params()
if update is None:
warnings.warn("Singular matrix encountered in GEE update",
ConvergenceWarning)
break
mean_params += update
self.update_cached_means(mean_params)
# L2 norm of the change in mean structure parameters at
# this iteration.
del_params = np.sqrt(np.sum(score ** 2))
self._fit_history['params'].append(mean_params.copy())
self._fit_history['score'].append(score)
self._fit_history['dep_params'].append(
self.cov_struct.dep_params)
# Do not exit until the association parameters have been
# updated at least once.
if (del_params < ctol and
(num_assoc_updates > 0 or self.update_dep is False)):
break
# Update the dependence structure
if (self.update_dep and (itr % params_niter) == 0
and (itr >= first_dep_update)):
self._update_assoc(mean_params)
num_assoc_updates += 1
if del_params >= ctol:
warnings.warn("Iteration limit reached prior to convergence",
IterationLimitWarning)
if mean_params is None:
warnings.warn("Unable to estimate GEE parameters.",
ConvergenceWarning)
return None
bcov, ncov, _ = self._covmat()
if bcov is None:
warnings.warn("Estimated covariance structure for GEE "
"estimates is singular", ConvergenceWarning)
return None
bc_cov = None
if cov_type == "bias_reduced":
bc_cov = self._bc_covmat(ncov)
if self.constraint is not None:
x = mean_params.copy()
mean_params, bcov = self._handle_constraint(mean_params, bcov)
if mean_params is None:
warnings.warn("Unable to estimate constrained GEE "
"parameters.", ConvergenceWarning)
return None
y, ncov = self._handle_constraint(x, ncov)
if y is None:
warnings.warn("Unable to estimate constrained GEE "
"parameters.", ConvergenceWarning)
return None
if bc_cov is not None:
y, bc_cov = self._handle_constraint(x, bc_cov)
if x is None:
warnings.warn("Unable to estimate constrained GEE "
"parameters.", ConvergenceWarning)
return None
scale = self.estimate_scale()
# kwargs to add to results instance, need to be available in __init__
res_kwds = dict(cov_type=cov_type,
cov_robust=bcov,
cov_naive=ncov,
cov_robust_bc=bc_cov)
# The superclass constructor will multiply the covariance
# matrix argument bcov by scale, which we do not want, so we
# divide bcov by the scale parameter here
results = GEEResults(self, mean_params, bcov / scale, scale,
cov_type=cov_type, use_t=False,
attr_kwds=res_kwds)
# attributes not needed during results__init__
results.fit_history = self._fit_history
self.fit_history = defaultdict(list)
results.score_norm = del_params
results.converged = (del_params < ctol)
results.cov_struct = self.cov_struct
results.params_niter = params_niter
results.first_dep_update = first_dep_update
results.ctol = ctol
results.maxiter = maxiter
# These will be copied over to subclasses when upgrading.
results._props = ["cov_type", "use_t",
"cov_params_default", "cov_robust",
"cov_naive", "cov_robust_bc",
"fit_history",
"score_norm", "converged", "cov_struct",
"params_niter", "first_dep_update", "ctol",
"maxiter"]
return GEEResultsWrapper(results)
def _update_regularized(self, params, pen_wt, scad_param, eps):
sn, hm = 0, 0
for i in range(self.num_group):
expval, _ = self.cached_means[i]
resid = self.endog_li[i] - expval
sdev = np.sqrt(self.family.variance(expval))
ex = self.exog_li[i] * sdev[:, None]**2
rslt = self.cov_struct.covariance_matrix_solve(
expval, i, sdev, (resid, ex))
sn0 = rslt[0]
sn += np.dot(ex.T, sn0)
hm0 = rslt[1]
hm += np.dot(ex.T, hm0)
# Wang et al. divide sn here by num_group, but that
# seems to be incorrect
ap = np.abs(params)
clipped = np.clip(scad_param * pen_wt - ap, 0, np.inf)
en = pen_wt * clipped * (ap > pen_wt)
en /= (scad_param - 1) * pen_wt
en += pen_wt * (ap <= pen_wt)
en /= eps + ap
hm.flat[::hm.shape[0] + 1] += self.num_group * en
sn -= self.num_group * en * params
update = np.linalg.solve(hm, sn)
hm *= self.estimate_scale()
return update, hm
def _regularized_covmat(self, mean_params):
self.update_cached_means(mean_params)
ma = 0
for i in range(self.num_group):
expval, _ = self.cached_means[i]
resid = self.endog_li[i] - expval
sdev = np.sqrt(self.family.variance(expval))
ex = self.exog_li[i] * sdev[:, None]**2
rslt = self.cov_struct.covariance_matrix_solve(
expval, i, sdev, (resid,))
ma0 = np.dot(ex.T, rslt[0])
ma += np.outer(ma0, ma0)
return ma
def fit_regularized(self, pen_wt, scad_param=3.7, maxiter=100,
ddof_scale=None, update_assoc=5,
ctol=1e-5, ztol=1e-3, eps=1e-6, scale=None):
"""
Regularized estimation for GEE.
Parameters
----------
pen_wt : float
The penalty weight (a non-negative scalar).
scad_param : float
Non-negative scalar determining the shape of the Scad
penalty.
maxiter : int
The maximum number of iterations.
ddof_scale : int
Value to subtract from `nobs` when calculating the
denominator degrees of freedom for t-statistics, defaults
to the number of columns in `exog`.
update_assoc : int
The dependence parameters are updated every `update_assoc`
iterations of the mean structure parameter updates.
ctol : float
Convergence criterion, default is one order of magnitude
smaller than proposed in section 3.1 of Wang et al.
ztol : float
Coefficients smaller than this value are treated as
being zero, default is based on section 5 of Wang et al.
eps : non-negative scalar
Numerical constant, see section 3.2 of Wang et al.
scale : float or string
If a float, this value is used as the scale parameter.
If "X2", the scale parameter is always estimated using
Pearson's chi-square method (e.g. as in a quasi-Poisson
analysis). If None, the default approach for the family
is used to estimate the scale parameter.
Returns
-------
GEEResults instance. Note that not all methods of the results
class make sense when the model has been fit with regularization.
Notes
-----
This implementation assumes that the link is canonical.
References
----------
Wang L, Zhou J, Qu A. (2012). Penalized generalized estimating
equations for high-dimensional longitudinal data analysis.
Biometrics. 2012 Jun;68(2):353-60.
doi: 10.1111/j.1541-0420.2011.01678.x.
https://www.ncbi.nlm.nih.gov/pubmed/21955051
http://users.stat.umn.edu/~wangx346/research/GEE_selection.pdf
"""
self.scaletype = scale
mean_params = np.zeros(self.exog.shape[1])
self.update_cached_means(mean_params)
converged = False
fit_history = defaultdict(list)
# Subtract this number from the total sample size when
# normalizing the scale parameter estimate.
if ddof_scale is None:
self.ddof_scale = self.exog.shape[1]
else:
if not ddof_scale >= 0:
raise ValueError(
"ddof_scale must be a non-negative number or None")
self.ddof_scale = ddof_scale
# Keep this private for now. In some cases the early steps are
# very small so it seems necessary to ensure a certain minimum
# number of iterations before testing for convergence.
miniter = 20
for itr in range(maxiter):
update, hm = self._update_regularized(
mean_params, pen_wt, scad_param, eps)
if update is None:
msg = "Singular matrix encountered in regularized GEE update",
warnings.warn(msg, ConvergenceWarning)
break
if itr > miniter and np.sqrt(np.sum(update**2)) < ctol:
converged = True
break
mean_params += update
fit_history['params'].append(mean_params.copy())
self.update_cached_means(mean_params)
if itr != 0 and (itr % update_assoc == 0):
self._update_assoc(mean_params)
if not converged:
msg = "GEE.fit_regularized did not converge"
warnings.warn(msg)
mean_params[np.abs(mean_params) < ztol] = 0
self._update_assoc(mean_params)
ma = self._regularized_covmat(mean_params)
cov = np.linalg.solve(hm, ma)
cov = np.linalg.solve(hm, cov.T)
# kwargs to add to results instance, need to be available in __init__
res_kwds = dict(cov_type="robust", cov_robust=cov)
scale = self.estimate_scale()
rslt = GEEResults(self, mean_params, cov, scale,
regularized=True, attr_kwds=res_kwds)
rslt.fit_history = fit_history
return GEEResultsWrapper(rslt)
def _handle_constraint(self, mean_params, bcov):
"""
Expand the parameter estimate `mean_params` and covariance matrix
`bcov` to the coordinate system of the unconstrained model.
Parameters
----------
mean_params : array_like
A parameter vector estimate for the reduced model.
bcov : array_like
The covariance matrix of mean_params.
Returns
-------
mean_params : array_like
The input parameter vector mean_params, expanded to the
coordinate system of the full model
bcov : array_like
The input covariance matrix bcov, expanded to the
coordinate system of the full model
"""
# The number of variables in the full model
red_p = len(mean_params)
full_p = self.constraint.lhs.shape[1]
mean_params0 = np.r_[mean_params, np.zeros(full_p - red_p)]
# Get the score vector under the full model.
save_exog_li = self.exog_li
self.exog_li = self.constraint.exog_fulltrans_li
import copy
save_cached_means = copy.deepcopy(self.cached_means)
self.update_cached_means(mean_params0)
_, score = self._update_mean_params()
if score is None:
warnings.warn("Singular matrix encountered in GEE score test",
ConvergenceWarning)
return None, None
_, ncov1, cmat = self._covmat()
scale = self.estimate_scale()
cmat = cmat / scale ** 2
score2 = score[red_p:] / scale
amat = np.linalg.inv(ncov1)
bmat_11 = cmat[0:red_p, 0:red_p]
bmat_22 = cmat[red_p:, red_p:]
bmat_12 = cmat[0:red_p, red_p:]
amat_11 = amat[0:red_p, 0:red_p]
amat_12 = amat[0:red_p, red_p:]
score_cov = bmat_22 - np.dot(amat_12.T,
np.linalg.solve(amat_11, bmat_12))
score_cov -= np.dot(bmat_12.T,
np.linalg.solve(amat_11, amat_12))
score_cov += np.dot(amat_12.T,
np.dot(np.linalg.solve(amat_11, bmat_11),
np.linalg.solve(amat_11, amat_12)))
from scipy.stats.distributions import chi2
score_statistic = np.dot(score2,
np.linalg.solve(score_cov, score2))
score_df = len(score2)
score_pvalue = 1 - chi2.cdf(score_statistic, score_df)
self.score_test_results = {"statistic": score_statistic,
"df": score_df,
"p-value": score_pvalue}
mean_params = self.constraint.unpack_param(mean_params)
bcov = self.constraint.unpack_cov(bcov)
self.exog_li = save_exog_li
self.cached_means = save_cached_means
self.exog = self.constraint.restore_exog()
return mean_params, bcov
def _update_assoc(self, params):
"""
Update the association parameters
"""
self.cov_struct.update(params)
def _derivative_exog(self, params, exog=None, transform='dydx',
dummy_idx=None, count_idx=None):
"""
For computing marginal effects, returns dF(XB) / dX where F(.)
is the fitted mean.
transform can be 'dydx', 'dyex', 'eydx', or 'eyex'.
Not all of these make sense in the presence of discrete regressors,
but checks are done in the results in get_margeff.
"""
# This form should be appropriate for group 1 probit, logit,
# logistic, cloglog, heckprob, xtprobit.
offset_exposure = None
if exog is None:
exog = self.exog
offset_exposure = self._offset_exposure
margeff = self.mean_deriv_exog(exog, params, offset_exposure)
if 'ex' in transform:
margeff *= exog
if 'ey' in transform:
margeff /= self.predict(params, exog)[:, None]
if count_idx is not None:
from statsmodels.discrete.discrete_margins import (
_get_count_effects)
margeff = _get_count_effects(margeff, exog, count_idx, transform,
self, params)
if dummy_idx is not None:
from statsmodels.discrete.discrete_margins import (
_get_dummy_effects)
margeff = _get_dummy_effects(margeff, exog, dummy_idx, transform,
self, params)
return margeff
def qic(self, params, scale, cov_params):
"""
Returns quasi-information criteria and quasi-likelihood values.
Parameters
----------
params : array_like
The GEE estimates of the regression parameters.
scale : scalar
Estimated scale parameter
cov_params : array_like
An estimate of the covariance matrix for the
model parameters. Conventionally this is the robust
covariance matrix.
Returns
-------
ql : scalar
The quasi-likelihood value
qic : scalar
A QIC that can be used to compare the mean and covariance
structures of the model.
qicu : scalar
A simplified QIC that can be used to compare mean structures
but not covariance structures
Notes
-----
The quasi-likelihood used here is obtained by numerically evaluating
Wedderburn's integral representation of the quasi-likelihood function.
This approach is valid for all families and links. Many other
packages use analytical expressions for quasi-likelihoods that are
valid in special cases where the link function is canonical. These
analytical expressions may omit additive constants that only depend
on the data. Therefore, the numerical values of our QL and QIC values
will differ from the values reported by other packages. However only
the differences between two QIC values calculated for different models
using the same data are meaningful. Our QIC should produce the same
QIC differences as other software.
When using the QIC for models with unknown scale parameter, use a
common estimate of the scale parameter for all models being compared.
References
----------
.. [*] W. Pan (2001). Akaike's information criterion in generalized
estimating equations. Biometrics (57) 1.
"""
varfunc = self.family.variance
means = []
omega = 0.0
# omega^-1 is the model-based covariance assuming independence
for i in range(self.num_group):
expval, lpr = self.cached_means[i]
means.append(expval)
dmat = self.mean_deriv(self.exog_li[i], lpr)
omega += np.dot(dmat.T, dmat) / scale
means = np.concatenate(means)
# The quasi-likelihood, use change of variables so the integration is
# from -1 to 1.
du = means - self.endog
nstep = 10000
qv = np.empty(nstep)
xv = np.linspace(-0.99999, 1, nstep)
for i, g in enumerate(xv):
u = self.endog + (g + 1) * du / 2.0
vu = varfunc(u)
qv[i] = -np.sum(du**2 * (g + 1) / vu)
qv /= (4 * scale)
from scipy.integrate import trapz
ql = trapz(qv, dx=xv[1] - xv[0])
qicu = -2 * ql + 2 * self.exog.shape[1]
qic = -2 * ql + 2 * np.trace(np.dot(omega, cov_params))
return ql, qic, qicu
class GEEResults(GLMResults):
__doc__ = (
"This class summarizes the fit of a marginal regression model "
"using GEE.\n" + _gee_results_doc)
def __init__(self, model, params, cov_params, scale,
cov_type='robust', use_t=False, regularized=False,
**kwds):
super(GEEResults, self).__init__(
model, params, normalized_cov_params=cov_params,
scale=scale)
# not added by super
self.df_resid = model.df_resid
self.df_model = model.df_model
self.family = model.family
attr_kwds = kwds.pop('attr_kwds', {})
self.__dict__.update(attr_kwds)
# we do not do this if the cov_type has already been set
# subclasses can set it through attr_kwds
if not (hasattr(self, 'cov_type') and
hasattr(self, 'cov_params_default')):
self.cov_type = cov_type # keep alias
covariance_type = self.cov_type.lower()
allowed_covariances = ["robust", "naive", "bias_reduced"]
if covariance_type not in allowed_covariances:
msg = ("GEE: `cov_type` must be one of " +
", ".join(allowed_covariances))
raise ValueError(msg)
if cov_type == "robust":
cov = self.cov_robust
elif cov_type == "naive":
cov = self.cov_naive
elif cov_type == "bias_reduced":
cov = self.cov_robust_bc
self.cov_params_default = cov
else:
if self.cov_type != cov_type:
raise ValueError('cov_type in argument is different from '
'already attached cov_type')
@cache_readonly
def resid(self):
"""
The response residuals.
"""
return self.resid_response
def standard_errors(self, cov_type="robust"):
"""
This is a convenience function that returns the standard
errors for any covariance type. The value of `bse` is the
standard errors for whichever covariance type is specified as
an argument to `fit` (defaults to "robust").
Parameters
----------
cov_type : str
One of "robust", "naive", or "bias_reduced". Determines
the covariance used to compute standard errors. Defaults
to "robust".
"""
# Check covariance_type
covariance_type = cov_type.lower()
allowed_covariances = ["robust", "naive", "bias_reduced"]
if covariance_type not in allowed_covariances:
msg = ("GEE: `covariance_type` must be one of " +
", ".join(allowed_covariances))
raise ValueError(msg)
if covariance_type == "robust":
return np.sqrt(np.diag(self.cov_robust))
elif covariance_type == "naive":
return np.sqrt(np.diag(self.cov_naive))
elif covariance_type == "bias_reduced":
if self.cov_robust_bc is None:
raise ValueError(
"GEE: `bias_reduced` covariance not available")
return np.sqrt(np.diag(self.cov_robust_bc))
# Need to override to allow for different covariance types.
@cache_readonly
def bse(self):
return self.standard_errors(self.cov_type)
def score_test(self):
"""
Return the results of a score test for a linear constraint.
Returns
-------
Adictionary containing the p-value, the test statistic,
and the degrees of freedom for the score test.
Notes
-----
See also GEE.compare_score_test for an alternative way to perform
a score test. GEEResults.score_test is more general, in that it
supports testing arbitrary linear equality constraints. However
GEE.compare_score_test might be easier to use when comparing
two explicit models.
References
----------
Xu Guo and Wei Pan (2002). "Small sample performance of the score
test in GEE".
http://www.sph.umn.edu/faculty1/wp-content/uploads/2012/11/rr2002-013.pdf
"""
if not hasattr(self.model, "score_test_results"):
msg = "score_test on results instance only available when "
msg += " model was fit with constraints"
raise ValueError(msg)
return self.model.score_test_results
@cache_readonly
def resid_split(self):
"""
Returns the residuals, the endogeneous data minus the fitted
values from the model. The residuals are returned as a list
of arrays containing the residuals for each cluster.
"""
sresid = []
for v in self.model.group_labels:
ii = self.model.group_indices[v]
sresid.append(self.resid[ii])
return sresid
@cache_readonly
def resid_centered(self):
"""
Returns the residuals centered within each group.
"""
cresid = self.resid.copy()
for v in self.model.group_labels:
ii = self.model.group_indices[v]
cresid[ii] -= cresid[ii].mean()
return cresid
@cache_readonly
def resid_centered_split(self):
"""
Returns the residuals centered within each group. The
residuals are returned as a list of arrays containing the
centered residuals for each cluster.
"""
sresid = []
for v in self.model.group_labels:
ii = self.model.group_indices[v]
sresid.append(self.centered_resid[ii])
return sresid
def qic(self, scale=None):
"""
Returns the QIC and QICu information criteria.
For families with a scale parameter (e.g. Gaussian), provide
as the scale argument the estimated scale from the largest
model under consideration.
If the scale parameter is not provided, the estimated scale
parameter is used. Doing this does not allow comparisons of
QIC values between models.
"""
# It is easy to forget to set the scale parameter. Sometimes
# this is intentional, so we warn.
if scale is None:
warnings.warn("QIC values obtained using scale=None are not "
"appropriate for comparing models")
if scale is None:
scale = self.scale
_, qic, qicu = self.model.qic(self.params, scale,
self.cov_params())
return qic, qicu
# FIXME: alias to be removed, temporary backwards compatibility
split_resid = resid_split
centered_resid = resid_centered
split_centered_resid = resid_centered_split
@Appender(_plot_added_variable_doc % {'extra_params_doc': ''})
def plot_added_variable(self, focus_exog, resid_type=None,
use_glm_weights=True, fit_kwargs=None,
ax=None):
from statsmodels.graphics.regressionplots import plot_added_variable
fig = plot_added_variable(self, focus_exog,
resid_type=resid_type,
use_glm_weights=use_glm_weights,
fit_kwargs=fit_kwargs, ax=ax)
return fig
@Appender(_plot_partial_residuals_doc % {'extra_params_doc': ''})
def plot_partial_residuals(self, focus_exog, ax=None):
from statsmodels.graphics.regressionplots import plot_partial_residuals
return plot_partial_residuals(self, focus_exog, ax=ax)
@Appender(_plot_ceres_residuals_doc % {'extra_params_doc': ''})
def plot_ceres_residuals(self, focus_exog, frac=0.66, cond_means=None,
ax=None):
from statsmodels.graphics.regressionplots import plot_ceres_residuals
return plot_ceres_residuals(self, focus_exog, frac,
cond_means=cond_means, ax=ax)
def conf_int(self, alpha=.05, cols=None, cov_type=None):
"""
Returns confidence intervals for the fitted parameters.
Parameters
----------
alpha : float, optional
The `alpha` level for the confidence interval. i.e., The
default `alpha` = .05 returns a 95% confidence interval.
cols : array_like, optional
`cols` specifies which confidence intervals to return
cov_type : str
The covariance type used for computing standard errors;
must be one of 'robust', 'naive', and 'bias reduced'.
See `GEE` for details.
Notes
-----
The confidence interval is based on the Gaussian distribution.
"""
# super does not allow to specify cov_type and method is not
# implemented,
# FIXME: remove this method here
if cov_type is None:
bse = self.bse
else:
bse = self.standard_errors(cov_type=cov_type)
params = self.params
dist = stats.norm
q = dist.ppf(1 - alpha / 2)
if cols is None:
lower = self.params - q * bse
upper = self.params + q * bse
else:
cols = np.asarray(cols)
lower = params[cols] - q * bse[cols]
upper = params[cols] + q * bse[cols]
return np.asarray(lzip(lower, upper))
def summary(self, yname=None, xname=None, title=None, alpha=.05):
"""
Summarize the GEE regression results
Parameters
----------
yname : str, optional
Default is `y`
xname : list[str], optional
Names for the exogenous variables, default is `var_#` for ## in
the number of regressors. Must match the number of parameters in
the model
title : str, optional
Title for the top table. If not None, then this replaces
the default title
alpha : float
significance level for the confidence intervals
cov_type : str
The covariance type used to compute the standard errors;
one of 'robust' (the usual robust sandwich-type covariance
estimate), 'naive' (ignores dependence), and 'bias
reduced' (the Mancl/DeRouen estimate).
Returns
-------
smry : Summary instance
this holds the summary tables and text, which can be
printed or converted to various output formats.
See Also
--------
statsmodels.iolib.summary.Summary : class to hold summary results
"""
top_left = [('Dep. Variable:', None),
('Model:', None),
('Method:', ['Generalized']),
('', ['Estimating Equations']),
('Family:', [self.model.family.__class__.__name__]),
('Dependence structure:',
[self.model.cov_struct.__class__.__name__]),
('Date:', None),
('Covariance type: ', [self.cov_type, ])
]
NY = [len(y) for y in self.model.endog_li]
top_right = [('No. Observations:', [sum(NY)]),
('No. clusters:', [len(self.model.endog_li)]),
('Min. cluster size:', [min(NY)]),
('Max. cluster size:', [max(NY)]),
('Mean cluster size:', ["%.1f" % np.mean(NY)]),
('Num. iterations:', ['%d' %
len(self.fit_history['params'])]),
('Scale:', ["%.3f" % self.scale]),
('Time:', None),
]
# The skew of the residuals
skew1 = stats.skew(self.resid)
kurt1 = stats.kurtosis(self.resid)
skew2 = stats.skew(self.centered_resid)
kurt2 = stats.kurtosis(self.centered_resid)
diagn_left = [('Skew:', ["%12.4f" % skew1]),
('Centered skew:', ["%12.4f" % skew2])]
diagn_right = [('Kurtosis:', ["%12.4f" % kurt1]),
('Centered kurtosis:', ["%12.4f" % kurt2])
]
if title is None:
title = self.model.__class__.__name__ + ' ' +\
"Regression Results"
# Override the exog variable names if xname is provided as an
# argument.
if xname is None:
xname = self.model.exog_names
if yname is None:
yname = self.model.endog_names
# Create summary table instance
from statsmodels.iolib.summary import Summary
smry = Summary()
smry.add_table_2cols(self, gleft=top_left, gright=top_right,
yname=yname, xname=xname,
title=title)
smry.add_table_params(self, yname=yname, xname=xname,
alpha=alpha, use_t=False)
smry.add_table_2cols(self, gleft=diagn_left,
gright=diagn_right, yname=yname,
xname=xname, title="")
return smry
def get_margeff(self, at='overall', method='dydx', atexog=None,
dummy=False, count=False):
"""Get marginal effects of the fitted model.
Parameters
----------
at : str, optional
Options are:
- 'overall', The average of the marginal effects at each
observation.
- 'mean', The marginal effects at the mean of each regressor.
- 'median', The marginal effects at the median of each regressor.
- 'zero', The marginal effects at zero for each regressor.
- 'all', The marginal effects at each observation. If `at` is 'all'
only margeff will be available.
Note that if `exog` is specified, then marginal effects for all
variables not specified by `exog` are calculated using the `at`
option.
method : str, optional
Options are:
- 'dydx' - dy/dx - No transformation is made and marginal effects
are returned. This is the default.
- 'eyex' - estimate elasticities of variables in `exog` --
d(lny)/d(lnx)
- 'dyex' - estimate semi-elasticity -- dy/d(lnx)
- 'eydx' - estimate semi-elasticity -- d(lny)/dx
Note that tranformations are done after each observation is
calculated. Semi-elasticities for binary variables are computed
using the midpoint method. 'dyex' and 'eyex' do not make sense
for discrete variables.
atexog : array_like, optional
Optionally, you can provide the exogenous variables over which to
get the marginal effects. This should be a dictionary with the key
as the zero-indexed column number and the value of the dictionary.
Default is None for all independent variables less the constant.
dummy : bool, optional
If False, treats binary variables (if present) as continuous. This
is the default. Else if True, treats binary variables as
changing from 0 to 1. Note that any variable that is either 0 or 1
is treated as binary. Each binary variable is treated separately
for now.
count : bool, optional
If False, treats count variables (if present) as continuous. This
is the default. Else if True, the marginal effect is the
change in probabilities when each observation is increased by one.
Returns
-------
effects : ndarray
the marginal effect corresponding to the input options
Notes
-----
When using after Poisson, returns the expected number of events
per period, assuming that the model is loglinear.
"""
if self.model.constraint is not None:
warnings.warn("marginal effects ignore constraints",
ValueWarning)
return GEEMargins(self, (at, method, atexog, dummy, count))
def plot_isotropic_dependence(self, ax=None, xpoints=10,
min_n=50):
"""
Create a plot of the pairwise products of within-group
residuals against the corresponding time differences. This
plot can be used to assess the possible form of an isotropic
covariance structure.
Parameters
----------
ax : AxesSubplot
An axes on which to draw the graph. If None, new
figure and axes objects are created
xpoints : scalar or array_like
If scalar, the number of points equally spaced points on
the time difference axis used to define bins for
calculating local means. If an array, the specific points
that define the bins.
min_n : int
The minimum sample size in a bin for the mean residual
product to be included on the plot.
"""
from statsmodels.graphics import utils as gutils
resid = self.model.cluster_list(self.resid)
time = self.model.cluster_list(self.model.time)
# All within-group pairwise time distances (xdt) and the
# corresponding products of scaled residuals (xre).
xre, xdt = [], []
for re, ti in zip(resid, time):
ix = np.tril_indices(re.shape[0], 0)
re = re[ix[0]] * re[ix[1]] / self.scale ** 2
xre.append(re)
dists = np.sqrt(((ti[ix[0], :] - ti[ix[1], :]) ** 2).sum(1))
xdt.append(dists)
xre = np.concatenate(xre)
xdt = np.concatenate(xdt)
if ax is None:
fig, ax = gutils.create_mpl_ax(ax)
else:
fig = ax.get_figure()
# Convert to a correlation
ii = np.flatnonzero(xdt == 0)
v0 = np.mean(xre[ii])
xre /= v0
# Use the simple average to smooth, since fancier smoothers
# that trim and downweight outliers give biased results (we
# need the actual mean of a skewed distribution).
if np.isscalar(xpoints):
xpoints = np.linspace(0, max(xdt), xpoints)
dg = np.digitize(xdt, xpoints)
dgu = np.unique(dg)
hist = np.asarray([np.sum(dg == k) for k in dgu])
ii = np.flatnonzero(hist >= min_n)
dgu = dgu[ii]
dgy = np.asarray([np.mean(xre[dg == k]) for k in dgu])
dgx = np.asarray([np.mean(xdt[dg == k]) for k in dgu])
ax.plot(dgx, dgy, '-', color='orange', lw=5)
ax.set_xlabel("Time difference")
ax.set_ylabel("Product of scaled residuals")
return fig
def sensitivity_params(self, dep_params_first,
dep_params_last, num_steps):
"""
Refits the GEE model using a sequence of values for the
dependence parameters.
Parameters
----------
dep_params_first : array_like
The first dep_params in the sequence
dep_params_last : array_like
The last dep_params in the sequence
num_steps : int
The number of dep_params in the sequence
Returns
-------
results : array_like
The GEEResults objects resulting from the fits.
"""
model = self.model
import copy
cov_struct = copy.deepcopy(self.model.cov_struct)
# We are fixing the dependence structure in each run.
update_dep = model.update_dep
model.update_dep = False
dep_params = []
results = []
for x in np.linspace(0, 1, num_steps):
dp = x * dep_params_last + (1 - x) * dep_params_first
dep_params.append(dp)
model.cov_struct = copy.deepcopy(cov_struct)
model.cov_struct.dep_params = dp
rslt = model.fit(start_params=self.params,
ctol=self.ctol,
params_niter=self.params_niter,
first_dep_update=self.first_dep_update,
cov_type=self.cov_type)
results.append(rslt)
model.update_dep = update_dep
return results
# FIXME: alias to be removed, temporary backwards compatibility
params_sensitivity = sensitivity_params
class GEEResultsWrapper(lm.RegressionResultsWrapper):
_attrs = {
'centered_resid': 'rows',
}
_wrap_attrs = wrap.union_dicts(lm.RegressionResultsWrapper._wrap_attrs,
_attrs)
wrap.populate_wrapper(GEEResultsWrapper, GEEResults) # noqa:E305
class OrdinalGEE(GEE):
__doc__ = (
" Ordinal Response Marginal Regression Model using GEE\n" +
_gee_init_doc % {'extra_params': base._missing_param_doc,
'family_doc': _gee_ordinal_family_doc,
'example': _gee_ordinal_example})
def __init__(self, endog, exog, groups, time=None, family=None,
cov_struct=None, missing='none', offset=None,
dep_data=None, constraint=None, **kwargs):
if family is None:
family = families.Binomial()
else:
if not isinstance(family, families.Binomial):
raise ValueError("ordinal GEE must use a Binomial family")
if cov_struct is None:
cov_struct = cov_structs.OrdinalIndependence()
endog, exog, groups, time, offset = self.setup_ordinal(
endog, exog, groups, time, offset)
super(OrdinalGEE, self).__init__(endog, exog, groups, time,
family, cov_struct, missing,
offset, dep_data, constraint)
def setup_ordinal(self, endog, exog, groups, time, offset):
"""
Restructure ordinal data as binary indicators so that they can
be analyzed using Generalized Estimating Equations.
"""
self.endog_orig = endog.copy()
self.exog_orig = exog.copy()
self.groups_orig = groups.copy()
if offset is not None:
self.offset_orig = offset.copy()
else:
self.offset_orig = None
offset = np.zeros(len(endog))
if time is not None:
self.time_orig = time.copy()
else:
self.time_orig = None
time = np.zeros((len(endog), 1))
exog = np.asarray(exog)
endog = np.asarray(endog)
groups = np.asarray(groups)
time = np.asarray(time)
offset = np.asarray(offset)
# The unique outcomes, except the greatest one.
self.endog_values = np.unique(endog)
endog_cuts = self.endog_values[0:-1]
ncut = len(endog_cuts)
nrows = ncut * len(endog)
exog_out = np.zeros((nrows, exog.shape[1]),
dtype=np.float64)
endog_out = np.zeros(nrows, dtype=np.float64)
intercepts = np.zeros((nrows, ncut), dtype=np.float64)
groups_out = np.zeros(nrows, dtype=groups.dtype)
time_out = np.zeros((nrows, time.shape[1]),
dtype=np.float64)
offset_out = np.zeros(nrows, dtype=np.float64)
jrow = 0
zipper = zip(exog, endog, groups, time, offset)
for (exog_row, endog_value, group_value, time_value,
offset_value) in zipper:
# Loop over thresholds for the indicators
for thresh_ix, thresh in enumerate(endog_cuts):
exog_out[jrow, :] = exog_row
endog_out[jrow] = (int(endog_value > thresh))
intercepts[jrow, thresh_ix] = 1
groups_out[jrow] = group_value
time_out[jrow] = time_value
offset_out[jrow] = offset_value
jrow += 1
exog_out = np.concatenate((intercepts, exog_out), axis=1)
# exog column names, including intercepts
xnames = ["I(y>%.1f)" % v for v in endog_cuts]
if type(self.exog_orig) == pd.DataFrame:
xnames.extend(self.exog_orig.columns)
else:
xnames.extend(["x%d" % k for k in range(1, exog.shape[1] + 1)])
exog_out = pd.DataFrame(exog_out, columns=xnames)
# Preserve the endog name if there is one
if type(self.endog_orig) == pd.Series:
endog_out = pd.Series(endog_out, name=self.endog_orig.name)
return endog_out, exog_out, groups_out, time_out, offset_out
def _starting_params(self):
exposure = getattr(self, "exposure", None)
model = GEE(self.endog, self.exog, self.groups,
time=self.time, family=families.Binomial(),
offset=self.offset, exposure=exposure)
result = model.fit()
return result.params
@Appender(_gee_fit_doc)
def fit(self, maxiter=60, ctol=1e-6, start_params=None,
params_niter=1, first_dep_update=0,
cov_type='robust'):
rslt = super(OrdinalGEE, self).fit(maxiter, ctol, start_params,
params_niter, first_dep_update,
cov_type=cov_type)
rslt = rslt._results # use unwrapped instance
res_kwds = dict(((k, getattr(rslt, k)) for k in rslt._props))
# Convert the GEEResults to an OrdinalGEEResults
ord_rslt = OrdinalGEEResults(self, rslt.params,
rslt.cov_params() / rslt.scale,
rslt.scale,
cov_type=cov_type,
attr_kwds=res_kwds)
# for k in rslt._props:
# setattr(ord_rslt, k, getattr(rslt, k))
# TODO: document or delete
return OrdinalGEEResultsWrapper(ord_rslt)
class OrdinalGEEResults(GEEResults):
__doc__ = (
"This class summarizes the fit of a marginal regression model"
"for an ordinal response using GEE.\n"
+ _gee_results_doc)
def plot_distribution(self, ax=None, exog_values=None):
"""
Plot the fitted probabilities of endog in an ordinal model,
for specified values of the predictors.
Parameters
----------
ax : AxesSubplot
An axes on which to draw the graph. If None, new
figure and axes objects are created
exog_values : array_like
A list of dictionaries, with each dictionary mapping
variable names to values at which the variable is held
fixed. The values P(endog=y | exog) are plotted for all
possible values of y, at the given exog value. Variables
not included in a dictionary are held fixed at the mean
value.
Example:
--------
We have a model with covariates 'age' and 'sex', and wish to
plot the probabilities P(endog=y | exog) for males (sex=0) and
for females (sex=1), as separate paths on the plot. Since
'age' is not included below in the map, it is held fixed at
its mean value.
>>> ev = [{"sex": 1}, {"sex": 0}]
>>> rslt.distribution_plot(exog_values=ev)
"""
from statsmodels.graphics import utils as gutils
if ax is None:
fig, ax = gutils.create_mpl_ax(ax)
else:
fig = ax.get_figure()
# If no covariate patterns are specified, create one with all
# variables set to their mean values.
if exog_values is None:
exog_values = [{}, ]
exog_means = self.model.exog.mean(0)
ix_icept = [i for i, x in enumerate(self.model.exog_names) if
x.startswith("I(")]
for ev in exog_values:
for k in ev.keys():
if k not in self.model.exog_names:
raise ValueError("%s is not a variable in the model"
% k)
# Get the fitted probability for each level, at the given
# covariate values.
pr = []
for j in ix_icept:
xp = np.zeros_like(self.params)
xp[j] = 1.
for i, vn in enumerate(self.model.exog_names):
if i in ix_icept:
continue
# User-specified value
if vn in ev:
xp[i] = ev[vn]
# Mean value
else:
xp[i] = exog_means[i]
p = 1 / (1 + np.exp(-np.dot(xp, self.params)))
pr.append(p)
pr.insert(0, 1)
pr.append(0)
pr = np.asarray(pr)
prd = -np.diff(pr)
ax.plot(self.model.endog_values, prd, 'o-')
ax.set_xlabel("Response value")
ax.set_ylabel("Probability")
ax.set_ylim(0, 1)
return fig
def _score_test_submodel(par, sub):
"""
Return transformation matrices for design matrices.
Parameters
----------
par : instance
The parent model
sub : instance
The sub-model
Returns
-------
qm : array_like
Matrix mapping the design matrix of the parent to the design matrix
for the sub-model.
qc : array_like
Matrix mapping the design matrix of the parent to the orthogonal
complement of the columnspace of the submodel in the columnspace
of the parent.
Notes
-----
Returns None, None if the provided submodel is not actually a submodel.
"""
x1 = par.exog
x2 = sub.exog
u, s, vt = np.linalg.svd(x1, 0)
# Get the orthogonal complement of col(x2) in col(x1).
a, _, _ = np.linalg.svd(x2, 0)
a = u - np.dot(a, np.dot(a.T, u))
x2c, sb, _ = np.linalg.svd(a, 0)
x2c = x2c[:, sb > 1e-12]
# x1 * qm = x2
qm = np.dot(vt.T, np.dot(u.T, x2) / s[:, None])
e = np.max(np.abs(x2 - np.dot(x1, qm)))
if e > 1e-8:
return None, None
# x1 * qc = x2c
qc = np.dot(vt.T, np.dot(u.T, x2c) / s[:, None])
return qm, qc
class OrdinalGEEResultsWrapper(GEEResultsWrapper):
pass
wrap.populate_wrapper(OrdinalGEEResultsWrapper, OrdinalGEEResults) # noqa:E305
class NominalGEE(GEE):
__doc__ = (
" Nominal Response Marginal Regression Model using GEE.\n" +
_gee_init_doc % {'extra_params': base._missing_param_doc,
'family_doc': _gee_nominal_family_doc,
'example': _gee_nominal_example})
def __init__(self, endog, exog, groups, time=None, family=None,
cov_struct=None, missing='none', offset=None,
dep_data=None, constraint=None, **kwargs):
endog, exog, groups, time, offset = self.setup_nominal(
endog, exog, groups, time, offset)
if family is None:
family = _Multinomial(self.ncut + 1)
if cov_struct is None:
cov_struct = cov_structs.NominalIndependence()
super(NominalGEE, self).__init__(
endog, exog, groups, time, family, cov_struct, missing,
offset, dep_data, constraint)
def _starting_params(self):
exposure = getattr(self, "exposure", None)
model = GEE(self.endog, self.exog, self.groups,
time=self.time, family=families.Binomial(),
offset=self.offset, exposure=exposure)
result = model.fit()
return result.params
def setup_nominal(self, endog, exog, groups, time, offset):
"""
Restructure nominal data as binary indicators so that they can
be analyzed using Generalized Estimating Equations.
"""
self.endog_orig = endog.copy()
self.exog_orig = exog.copy()
self.groups_orig = groups.copy()
if offset is not None:
self.offset_orig = offset.copy()
else:
self.offset_orig = None
offset = np.zeros(len(endog))
if time is not None:
self.time_orig = time.copy()
else:
self.time_orig = None
time = np.zeros((len(endog), 1))
exog = np.asarray(exog)
endog = np.asarray(endog)
groups = np.asarray(groups)
time = np.asarray(time)
offset = np.asarray(offset)
# The unique outcomes, except the greatest one.
self.endog_values = np.unique(endog)
endog_cuts = self.endog_values[0:-1]
ncut = len(endog_cuts)
self.ncut = ncut
nrows = len(endog_cuts) * exog.shape[0]
ncols = len(endog_cuts) * exog.shape[1]
exog_out = np.zeros((nrows, ncols), dtype=np.float64)
endog_out = np.zeros(nrows, dtype=np.float64)
groups_out = np.zeros(nrows, dtype=np.float64)
time_out = np.zeros((nrows, time.shape[1]),
dtype=np.float64)
offset_out = np.zeros(nrows, dtype=np.float64)
jrow = 0
zipper = zip(exog, endog, groups, time, offset)
for (exog_row, endog_value, group_value, time_value,
offset_value) in zipper:
# Loop over thresholds for the indicators
for thresh_ix, thresh in enumerate(endog_cuts):
u = np.zeros(len(endog_cuts), dtype=np.float64)
u[thresh_ix] = 1
exog_out[jrow, :] = np.kron(u, exog_row)
endog_out[jrow] = (int(endog_value == thresh))
groups_out[jrow] = group_value
time_out[jrow] = time_value
offset_out[jrow] = offset_value
jrow += 1
# exog names
if isinstance(self.exog_orig, pd.DataFrame):
xnames_in = self.exog_orig.columns
else:
xnames_in = ["x%d" % k for k in range(1, exog.shape[1] + 1)]
xnames = []
for tr in endog_cuts:
xnames.extend(["%s[%.1f]" % (v, tr) for v in xnames_in])
exog_out = pd.DataFrame(exog_out, columns=xnames)
exog_out = pd.DataFrame(exog_out, columns=xnames)
# Preserve endog name if there is one
if isinstance(self.endog_orig, pd.Series):
endog_out = pd.Series(endog_out, name=self.endog_orig.name)
return endog_out, exog_out, groups_out, time_out, offset_out
def mean_deriv(self, exog, lin_pred):
"""
Derivative of the expected endog with respect to the parameters.
Parameters
----------
exog : array_like
The exogeneous data at which the derivative is computed,
number of rows must be a multiple of `ncut`.
lin_pred : array_like
The values of the linear predictor, length must be multiple
of `ncut`.
Returns
-------
The derivative of the expected endog with respect to the
parameters.
"""
expval = np.exp(lin_pred)
# Reshape so that each row contains all the indicators
# corresponding to one multinomial observation.
expval_m = np.reshape(expval, (len(expval) // self.ncut,
self.ncut))
# The normalizing constant for the multinomial probabilities.
denom = 1 + expval_m.sum(1)
denom = np.kron(denom, np.ones(self.ncut, dtype=np.float64))
# The multinomial probabilities
mprob = expval / denom
# First term of the derivative: denom * expval' / denom^2 =
# expval' / denom.
dmat = mprob[:, None] * exog
# Second term of the derivative: -expval * denom' / denom^2
ddenom = expval[:, None] * exog
dmat -= mprob[:, None] * ddenom / denom[:, None]
return dmat
def mean_deriv_exog(self, exog, params, offset_exposure=None):
"""
Derivative of the expected endog with respect to exog for the
multinomial model, used in analyzing marginal effects.
Parameters
----------
exog : array_like
The exogeneous data at which the derivative is computed,
number of rows must be a multiple of `ncut`.
lpr : array_like
The linear predictor values, length must be multiple of
`ncut`.
Returns
-------
The value of the derivative of the expected endog with respect
to exog.
Notes
-----
offset_exposure must be set at None for the multinomial family.
"""
if offset_exposure is not None:
warnings.warn("Offset/exposure ignored for the multinomial family",
ValueWarning)
lpr = np.dot(exog, params)
expval = np.exp(lpr)
expval_m = np.reshape(expval, (len(expval) // self.ncut,
self.ncut))
denom = 1 + expval_m.sum(1)
denom = np.kron(denom, np.ones(self.ncut, dtype=np.float64))
bmat0 = np.outer(np.ones(exog.shape[0]), params)
# Masking matrix
qmat = []
for j in range(self.ncut):
ee = np.zeros(self.ncut, dtype=np.float64)
ee[j] = 1
qmat.append(np.kron(ee, np.ones(len(params) // self.ncut)))
qmat = np.array(qmat)
qmat = np.kron(np.ones((exog.shape[0] // self.ncut, 1)), qmat)
bmat = bmat0 * qmat
dmat = expval[:, None] * bmat / denom[:, None]
expval_mb = np.kron(expval_m, np.ones((self.ncut, 1)))
expval_mb = np.kron(expval_mb, np.ones((1, self.ncut)))
dmat -= expval[:, None] * (bmat * expval_mb) / denom[:, None] ** 2
return dmat
@Appender(_gee_fit_doc)
def fit(self, maxiter=60, ctol=1e-6, start_params=None,
params_niter=1, first_dep_update=0,
cov_type='robust'):
rslt = super(NominalGEE, self).fit(maxiter, ctol, start_params,
params_niter, first_dep_update,
cov_type=cov_type)
if rslt is None:
warnings.warn("GEE updates did not converge",
ConvergenceWarning)
return None
rslt = rslt._results # use unwrapped instance
res_kwds = dict(((k, getattr(rslt, k)) for k in rslt._props))
# Convert the GEEResults to a NominalGEEResults
nom_rslt = NominalGEEResults(self, rslt.params,
rslt.cov_params() / rslt.scale,
rslt.scale,
cov_type=cov_type,
attr_kwds=res_kwds)
# TODO: document or delete
# for k in rslt._props:
# setattr(nom_rslt, k, getattr(rslt, k))
return NominalGEEResultsWrapper(nom_rslt)
class NominalGEEResults(GEEResults):
__doc__ = (
"This class summarizes the fit of a marginal regression model"
"for a nominal response using GEE.\n"
+ _gee_results_doc)
def plot_distribution(self, ax=None, exog_values=None):
"""
Plot the fitted probabilities of endog in an nominal model,
for specified values of the predictors.
Parameters
----------
ax : AxesSubplot
An axes on which to draw the graph. If None, new
figure and axes objects are created
exog_values : array_like
A list of dictionaries, with each dictionary mapping
variable names to values at which the variable is held
fixed. The values P(endog=y | exog) are plotted for all
possible values of y, at the given exog value. Variables
not included in a dictionary are held fixed at the mean
value.
Example:
--------
We have a model with covariates 'age' and 'sex', and wish to
plot the probabilities P(endog=y | exog) for males (sex=0) and
for females (sex=1), as separate paths on the plot. Since
'age' is not included below in the map, it is held fixed at
its mean value.
>>> ex = [{"sex": 1}, {"sex": 0}]
>>> rslt.distribution_plot(exog_values=ex)
"""
from statsmodels.graphics import utils as gutils
if ax is None:
fig, ax = gutils.create_mpl_ax(ax)
else:
fig = ax.get_figure()
# If no covariate patterns are specified, create one with all
# variables set to their mean values.
if exog_values is None:
exog_values = [{}, ]
link = self.model.family.link.inverse
ncut = self.model.family.ncut
k = int(self.model.exog.shape[1] / ncut)
exog_means = self.model.exog.mean(0)[0:k]
exog_names = self.model.exog_names[0:k]
exog_names = [x.split("[")[0] for x in exog_names]
params = np.reshape(self.params,
(ncut, len(self.params) // ncut))
for ev in exog_values:
exog = exog_means.copy()
for k in ev.keys():
if k not in exog_names:
raise ValueError("%s is not a variable in the model"
% k)
ii = exog_names.index(k)
exog[ii] = ev[k]
lpr = np.dot(params, exog)
pr = link(lpr)
pr = np.r_[pr, 1 - pr.sum()]
ax.plot(self.model.endog_values, pr, 'o-')
ax.set_xlabel("Response value")
ax.set_ylabel("Probability")
ax.set_xticks(self.model.endog_values)
ax.set_xticklabels(self.model.endog_values)
ax.set_ylim(0, 1)
return fig
class NominalGEEResultsWrapper(GEEResultsWrapper):
pass
wrap.populate_wrapper(NominalGEEResultsWrapper, NominalGEEResults) # noqa:E305
class _MultinomialLogit(Link):
"""
The multinomial logit transform, only for use with GEE.
Notes
-----
The data are assumed coded as binary indicators, where each
observed multinomial value y is coded as I(y == S[0]), ..., I(y ==
S[-1]), where S is the set of possible response labels, excluding
the largest one. Thererefore functions in this class should only
be called using vector argument whose length is a multiple of |S|
= ncut, which is an argument to be provided when initializing the
class.
call and derivative use a private method _clean to trim p by 1e-10
so that p is in (0, 1)
"""
def __init__(self, ncut):
self.ncut = ncut
def inverse(self, lpr):
"""
Inverse of the multinomial logit transform, which gives the
expected values of the data as a function of the linear
predictors.
Parameters
----------
lpr : array_like (length must be divisible by `ncut`)
The linear predictors
Returns
-------
prob : ndarray
Probabilities, or expected values
"""
expval = np.exp(lpr)
denom = 1 + np.reshape(expval, (len(expval) // self.ncut,
self.ncut)).sum(1)
denom = np.kron(denom, np.ones(self.ncut, dtype=np.float64))
prob = expval / denom
return prob
class _Multinomial(families.Family):
"""
Pseudo-link function for fitting nominal multinomial models with
GEE. Not for use outside the GEE class.
"""
links = [_MultinomialLogit, ]
variance = varfuncs.binary
safe_links = [_MultinomialLogit, ]
def __init__(self, nlevels):
"""
Parameters
----------
nlevels : int
The number of distinct categories for the multinomial
distribution.
"""
self.initialize(nlevels)
def initialize(self, nlevels):
self.ncut = nlevels - 1
self.link = _MultinomialLogit(self.ncut)
class GEEMargins(object):
"""
Estimated marginal effects for a regression model fit with GEE.
Parameters
----------
results : GEEResults instance
The results instance of a fitted discrete choice model
args : tuple
Args are passed to `get_margeff`. This is the same as
results.get_margeff. See there for more information.
kwargs : dict
Keyword args are passed to `get_margeff`. This is the same as
results.get_margeff. See there for more information.
"""
def __init__(self, results, args, kwargs={}):
self._cache = {}
self.results = results
self.get_margeff(*args, **kwargs)
def _reset(self):
self._cache = {}
@cache_readonly
def tvalues(self):
_check_at_is_all(self.margeff_options)
return self.margeff / self.margeff_se
def summary_frame(self, alpha=.05):
"""
Returns a DataFrame summarizing the marginal effects.
Parameters
----------
alpha : float
Number between 0 and 1. The confidence intervals have the
probability 1-alpha.
Returns
-------
frame : DataFrames
A DataFrame summarizing the marginal effects.
"""
_check_at_is_all(self.margeff_options)
from pandas import DataFrame
names = [_transform_names[self.margeff_options['method']],
'Std. Err.', 'z', 'Pr(>|z|)',
'Conf. Int. Low', 'Cont. Int. Hi.']
ind = self.results.model.exog.var(0) != 0 # True if not a constant
exog_names = self.results.model.exog_names
var_names = [name for i, name in enumerate(exog_names) if ind[i]]
table = np.column_stack((self.margeff, self.margeff_se, self.tvalues,
self.pvalues, self.conf_int(alpha)))
return DataFrame(table, columns=names, index=var_names)
@cache_readonly
def pvalues(self):
_check_at_is_all(self.margeff_options)
return stats.norm.sf(np.abs(self.tvalues)) * 2
def conf_int(self, alpha=.05):
"""
Returns the confidence intervals of the marginal effects
Parameters
----------
alpha : float
Number between 0 and 1. The confidence intervals have the
probability 1-alpha.
Returns
-------
conf_int : ndarray
An array with lower, upper confidence intervals for the marginal
effects.
"""
_check_at_is_all(self.margeff_options)
me_se = self.margeff_se
q = stats.norm.ppf(1 - alpha / 2)
lower = self.margeff - q * me_se
upper = self.margeff + q * me_se
return np.asarray(lzip(lower, upper))
def summary(self, alpha=.05):
"""
Returns a summary table for marginal effects
Parameters
----------
alpha : float
Number between 0 and 1. The confidence intervals have the
probability 1-alpha.
Returns
-------
Summary : SummaryTable
A SummaryTable instance
"""
_check_at_is_all(self.margeff_options)
results = self.results
model = results.model
title = model.__class__.__name__ + " Marginal Effects"
method = self.margeff_options['method']
top_left = [('Dep. Variable:', [model.endog_names]),
('Method:', [method]),
('At:', [self.margeff_options['at']]), ]
from statsmodels.iolib.summary import (Summary, summary_params,
table_extend)
exog_names = model.exog_names[:] # copy
smry = Summary()
const_idx = model.data.const_idx
if const_idx is not None:
exog_names.pop(const_idx)
J = int(getattr(model, "J", 1))
if J > 1:
yname, yname_list = results._get_endog_name(model.endog_names,
None, all=True)
else:
yname = model.endog_names
yname_list = [yname]
smry.add_table_2cols(self, gleft=top_left, gright=[],
yname=yname, xname=exog_names, title=title)
# NOTE: add_table_params is not general enough yet for margeff
# could use a refactor with getattr instead of hard-coded params
# tvalues etc.
table = []
conf_int = self.conf_int(alpha)
margeff = self.margeff
margeff_se = self.margeff_se
tvalues = self.tvalues
pvalues = self.pvalues
if J > 1:
for eq in range(J):
restup = (results, margeff[:, eq], margeff_se[:, eq],
tvalues[:, eq], pvalues[:, eq], conf_int[:, :, eq])
tble = summary_params(restup, yname=yname_list[eq],
xname=exog_names, alpha=alpha,
use_t=False,
skip_header=True)
tble.title = yname_list[eq]
# overwrite coef with method name
header = ['', _transform_names[method], 'std err', 'z',
'P>|z|',
'[%3.1f%% Conf. Int.]' % (100 - alpha * 100)]
tble.insert_header_row(0, header)
# from IPython.core.debugger import Pdb; Pdb().set_trace()
table.append(tble)
table = table_extend(table, keep_headers=True)
else:
restup = (results, margeff, margeff_se, tvalues, pvalues, conf_int)
table = summary_params(restup, yname=yname, xname=exog_names,
alpha=alpha, use_t=False, skip_header=True)
header = ['', _transform_names[method], 'std err', 'z',
'P>|z|', '[%3.1f%% Conf. Int.]' % (100 - alpha * 100)]
table.insert_header_row(0, header)
smry.tables.append(table)
return smry
def get_margeff(self, at='overall', method='dydx', atexog=None,
dummy=False, count=False):
self._reset() # always reset the cache when this is called
# TODO: if at is not all or overall, we can also put atexog values
# in summary table head
method = method.lower()
at = at.lower()
_check_margeff_args(at, method)
self.margeff_options = dict(method=method, at=at)
results = self.results
model = results.model
params = results.params
exog = model.exog.copy() # copy because values are changed
effects_idx = exog.var(0) != 0
const_idx = model.data.const_idx
if dummy:
_check_discrete_args(at, method)
dummy_idx, dummy = _get_dummy_index(exog, const_idx)
else:
dummy_idx = None
if count:
_check_discrete_args(at, method)
count_idx, count = _get_count_index(exog, const_idx)
else:
count_idx = None
# get the exogenous variables
exog = _get_margeff_exog(exog, at, atexog, effects_idx)
# get base marginal effects, handled by sub-classes
effects = model._derivative_exog(params, exog, method,
dummy_idx, count_idx)
effects = _effects_at(effects, at)
if at == 'all':
self.margeff = effects[:, effects_idx]
else:
# Set standard error of the marginal effects by Delta method.
margeff_cov, margeff_se = margeff_cov_with_se(
model, params, exog, results.cov_params(), at,
model._derivative_exog, dummy_idx, count_idx,
method, 1)
# do not care about at constant
self.margeff_cov = margeff_cov[effects_idx][:, effects_idx]
self.margeff_se = margeff_se[effects_idx]
self.margeff = effects[effects_idx]
|
the-stack_0_24456 | # Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
import unittest
import dns.immutable
import dns._immutable_attr
try:
import dns._immutable_ctx as immutable_ctx
_have_contextvars = True
except ImportError:
_have_contextvars = False
class immutable_ctx:
pass
class ImmutableTestCase(unittest.TestCase):
def test_immutable_dict_hash(self):
d1 = dns.immutable.Dict({'a': 1, 'b': 2})
d2 = dns.immutable.Dict({'b': 2, 'a': 1})
d3 = {'b': 2, 'a': 1}
self.assertEqual(d1, d2)
self.assertEqual(d2, d3)
self.assertEqual(hash(d1), hash(d2))
def test_immutable_dict_hash_cache(self):
d = dns.immutable.Dict({'a': 1, 'b': 2})
self.assertEqual(d._hash, None)
h1 = hash(d)
self.assertEqual(d._hash, h1)
h2 = hash(d)
self.assertEqual(h1, h2)
def test_constify(self):
items = (
(bytearray([1, 2, 3]), b'\x01\x02\x03'),
((1, 2, 3), (1, 2, 3)),
((1, [2], 3), (1, (2,), 3)),
([1, 2, 3], (1, 2, 3)),
([1, {'a': [1, 2]}],
(1, dns.immutable.Dict({'a': (1, 2)}))),
('hi', 'hi'),
(b'hi', b'hi'),
)
for input, expected in items:
self.assertEqual(dns.immutable.constify(input), expected)
self.assertIsInstance(dns.immutable.constify({'a': 1}),
dns.immutable.Dict)
class DecoratorTestCase(unittest.TestCase):
immutable_module = dns._immutable_attr
def make_classes(self):
class A:
def __init__(self, a, akw=10):
self.a = a
self.akw = akw
class B(A):
def __init__(self, a, b):
super().__init__(a, akw=20)
self.b = b
B = self.immutable_module.immutable(B)
# note C is immutable by inheritance
class C(B):
def __init__(self, a, b, c):
super().__init__(a, b)
self.c = c
C = self.immutable_module.immutable(C)
class SA:
__slots__ = ('a', 'akw')
def __init__(self, a, akw=10):
self.a = a
self.akw = akw
class SB(A):
__slots__ = ('b')
def __init__(self, a, b):
super().__init__(a, akw=20)
self.b = b
SB = self.immutable_module.immutable(SB)
# note SC is immutable by inheritance and has no slots of its own
class SC(SB):
def __init__(self, a, b, c):
super().__init__(a, b)
self.c = c
SC = self.immutable_module.immutable(SC)
return ((A, B, C), (SA, SB, SC))
def test_basic(self):
for A, B, C in self.make_classes():
a = A(1)
self.assertEqual(a.a, 1)
self.assertEqual(a.akw, 10)
b = B(11, 21)
self.assertEqual(b.a, 11)
self.assertEqual(b.akw, 20)
self.assertEqual(b.b, 21)
c = C(111, 211, 311)
self.assertEqual(c.a, 111)
self.assertEqual(c.akw, 20)
self.assertEqual(c.b, 211)
self.assertEqual(c.c, 311)
# changing A is ok!
a.a = 11
self.assertEqual(a.a, 11)
# changing B is not!
with self.assertRaises(TypeError):
b.a = 11
with self.assertRaises(TypeError):
del b.a
def test_constructor_deletes_attribute(self):
class A:
def __init__(self, a):
self.a = a
self.b = a
del self.b
A = self.immutable_module.immutable(A)
a = A(10)
self.assertEqual(a.a, 10)
self.assertFalse(hasattr(a, 'b'))
def test_no_collateral_damage(self):
# A and B are immutable but not related. The magic that lets
# us write to immutable things while initializing B should not let
# B mess with A.
class A:
def __init__(self, a):
self.a = a
A = self.immutable_module.immutable(A)
class B:
def __init__(self, a, b):
self.b = a.a + b
# rudely attempt to mutate innocent immutable bystander 'a'
a.a = 1000
B = self.immutable_module.immutable(B)
a = A(10)
self.assertEqual(a.a, 10)
with self.assertRaises(TypeError):
B(a, 20)
self.assertEqual(a.a, 10)
@unittest.skipIf(not _have_contextvars, "contextvars not available")
class CtxDecoratorTestCase(DecoratorTestCase):
immutable_module = immutable_ctx
|
the-stack_0_24457 | """Support for Prometheus metrics export."""
import logging
import string
from aiohttp import web
import prometheus_client
import voluptuous as vol
from homeassistant import core as hacore
from homeassistant.components.climate.const import (
ATTR_CURRENT_TEMPERATURE,
ATTR_HVAC_ACTION,
CURRENT_HVAC_ACTIONS,
)
from homeassistant.components.http import HomeAssistantView
from homeassistant.components.humidifier.const import (
ATTR_AVAILABLE_MODES,
ATTR_HUMIDITY,
ATTR_MODE,
)
from homeassistant.const import (
ATTR_DEVICE_CLASS,
ATTR_TEMPERATURE,
ATTR_UNIT_OF_MEASUREMENT,
CONTENT_TYPE_TEXT_PLAIN,
EVENT_STATE_CHANGED,
STATE_ON,
STATE_UNAVAILABLE,
TEMP_CELSIUS,
TEMP_FAHRENHEIT,
UNIT_PERCENTAGE,
)
from homeassistant.helpers import entityfilter, state as state_helper
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity_values import EntityValues
from homeassistant.util.temperature import fahrenheit_to_celsius
_LOGGER = logging.getLogger(__name__)
API_ENDPOINT = "/api/prometheus"
DOMAIN = "prometheus"
CONF_FILTER = "filter"
CONF_PROM_NAMESPACE = "namespace"
CONF_COMPONENT_CONFIG = "component_config"
CONF_COMPONENT_CONFIG_GLOB = "component_config_glob"
CONF_COMPONENT_CONFIG_DOMAIN = "component_config_domain"
CONF_DEFAULT_METRIC = "default_metric"
CONF_OVERRIDE_METRIC = "override_metric"
COMPONENT_CONFIG_SCHEMA_ENTRY = vol.Schema(
{vol.Optional(CONF_OVERRIDE_METRIC): cv.string}
)
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.All(
{
vol.Optional(CONF_FILTER, default={}): entityfilter.FILTER_SCHEMA,
vol.Optional(CONF_PROM_NAMESPACE): cv.string,
vol.Optional(CONF_DEFAULT_METRIC): cv.string,
vol.Optional(CONF_OVERRIDE_METRIC): cv.string,
vol.Optional(CONF_COMPONENT_CONFIG, default={}): vol.Schema(
{cv.entity_id: COMPONENT_CONFIG_SCHEMA_ENTRY}
),
vol.Optional(CONF_COMPONENT_CONFIG_GLOB, default={}): vol.Schema(
{cv.string: COMPONENT_CONFIG_SCHEMA_ENTRY}
),
vol.Optional(CONF_COMPONENT_CONFIG_DOMAIN, default={}): vol.Schema(
{cv.string: COMPONENT_CONFIG_SCHEMA_ENTRY}
),
}
)
},
extra=vol.ALLOW_EXTRA,
)
def setup(hass, config):
"""Activate Prometheus component."""
hass.http.register_view(PrometheusView(prometheus_client))
conf = config[DOMAIN]
entity_filter = conf[CONF_FILTER]
namespace = conf.get(CONF_PROM_NAMESPACE)
climate_units = hass.config.units.temperature_unit
override_metric = conf.get(CONF_OVERRIDE_METRIC)
default_metric = conf.get(CONF_DEFAULT_METRIC)
component_config = EntityValues(
conf[CONF_COMPONENT_CONFIG],
conf[CONF_COMPONENT_CONFIG_DOMAIN],
conf[CONF_COMPONENT_CONFIG_GLOB],
)
metrics = PrometheusMetrics(
prometheus_client,
entity_filter,
namespace,
climate_units,
component_config,
override_metric,
default_metric,
)
hass.bus.listen(EVENT_STATE_CHANGED, metrics.handle_event)
return True
class PrometheusMetrics:
"""Model all of the metrics which should be exposed to Prometheus."""
def __init__(
self,
prometheus_cli,
entity_filter,
namespace,
climate_units,
component_config,
override_metric,
default_metric,
):
"""Initialize Prometheus Metrics."""
self.prometheus_cli = prometheus_cli
self._component_config = component_config
self._override_metric = override_metric
self._default_metric = default_metric
self._filter = entity_filter
self._sensor_metric_handlers = [
self._sensor_override_component_metric,
self._sensor_override_metric,
self._sensor_attribute_metric,
self._sensor_default_metric,
self._sensor_fallback_metric,
]
if namespace:
self.metrics_prefix = f"{namespace}_"
else:
self.metrics_prefix = ""
self._metrics = {}
self._climate_units = climate_units
def handle_event(self, event):
"""Listen for new messages on the bus, and add them to Prometheus."""
state = event.data.get("new_state")
if state is None:
return
entity_id = state.entity_id
_LOGGER.debug("Handling state update for %s", entity_id)
domain, _ = hacore.split_entity_id(entity_id)
if not self._filter(state.entity_id):
return
handler = f"_handle_{domain}"
if hasattr(self, handler) and state.state != STATE_UNAVAILABLE:
getattr(self, handler)(state)
labels = self._labels(state)
state_change = self._metric(
"state_change", self.prometheus_cli.Counter, "The number of state changes"
)
state_change.labels(**labels).inc()
entity_available = self._metric(
"entity_available",
self.prometheus_cli.Gauge,
"Entity is available (not in the unavailable state)",
)
entity_available.labels(**labels).set(float(state.state != STATE_UNAVAILABLE))
last_updated_time_seconds = self._metric(
"last_updated_time_seconds",
self.prometheus_cli.Gauge,
"The last_updated timestamp",
)
last_updated_time_seconds.labels(**labels).set(state.last_updated.timestamp())
def _handle_attributes(self, state):
for key, value in state.attributes.items():
metric = self._metric(
f"{state.domain}_attr_{key.lower()}",
self.prometheus_cli.Gauge,
f"{key} attribute of {state.domain} entity",
)
try:
value = float(value)
metric.labels(**self._labels(state)).set(value)
except (ValueError, TypeError):
pass
def _metric(self, metric, factory, documentation, extra_labels=None):
labels = ["entity", "friendly_name", "domain"]
if extra_labels is not None:
labels.extend(extra_labels)
try:
return self._metrics[metric]
except KeyError:
full_metric_name = self._sanitize_metric_name(
f"{self.metrics_prefix}{metric}"
)
self._metrics[metric] = factory(full_metric_name, documentation, labels)
return self._metrics[metric]
@staticmethod
def _sanitize_metric_name(metric: str) -> str:
return "".join(
[
c
if c in string.ascii_letters
or c in string.digits
or c == "_"
or c == ":"
else f"u{hex(ord(c))}"
for c in metric
]
)
@staticmethod
def state_as_number(state):
"""Return a state casted to a float."""
try:
value = state_helper.state_as_number(state)
except ValueError:
_LOGGER.debug("Could not convert %s to float", state)
value = 0
return value
@staticmethod
def _labels(state):
return {
"entity": state.entity_id,
"domain": state.domain,
"friendly_name": state.attributes.get("friendly_name"),
}
def _battery(self, state):
if "battery_level" in state.attributes:
metric = self._metric(
"battery_level_percent",
self.prometheus_cli.Gauge,
"Battery level as a percentage of its capacity",
)
try:
value = float(state.attributes["battery_level"])
metric.labels(**self._labels(state)).set(value)
except ValueError:
pass
def _handle_binary_sensor(self, state):
metric = self._metric(
"binary_sensor_state",
self.prometheus_cli.Gauge,
"State of the binary sensor (0/1)",
)
value = self.state_as_number(state)
metric.labels(**self._labels(state)).set(value)
def _handle_input_boolean(self, state):
metric = self._metric(
"input_boolean_state",
self.prometheus_cli.Gauge,
"State of the input boolean (0/1)",
)
value = self.state_as_number(state)
metric.labels(**self._labels(state)).set(value)
def _handle_device_tracker(self, state):
metric = self._metric(
"device_tracker_state",
self.prometheus_cli.Gauge,
"State of the device tracker (0/1)",
)
value = self.state_as_number(state)
metric.labels(**self._labels(state)).set(value)
def _handle_person(self, state):
metric = self._metric(
"person_state", self.prometheus_cli.Gauge, "State of the person (0/1)"
)
value = self.state_as_number(state)
metric.labels(**self._labels(state)).set(value)
def _handle_light(self, state):
metric = self._metric(
"light_state", self.prometheus_cli.Gauge, "Load level of a light (0..1)"
)
try:
if "brightness" in state.attributes and state.state == STATE_ON:
value = state.attributes["brightness"] / 255.0
else:
value = self.state_as_number(state)
value = value * 100
metric.labels(**self._labels(state)).set(value)
except ValueError:
pass
def _handle_lock(self, state):
metric = self._metric(
"lock_state", self.prometheus_cli.Gauge, "State of the lock (0/1)"
)
value = self.state_as_number(state)
metric.labels(**self._labels(state)).set(value)
def _handle_climate(self, state):
temp = state.attributes.get(ATTR_TEMPERATURE)
if temp:
if self._climate_units == TEMP_FAHRENHEIT:
temp = fahrenheit_to_celsius(temp)
metric = self._metric(
"temperature_c",
self.prometheus_cli.Gauge,
"Temperature in degrees Celsius",
)
metric.labels(**self._labels(state)).set(temp)
current_temp = state.attributes.get(ATTR_CURRENT_TEMPERATURE)
if current_temp:
if self._climate_units == TEMP_FAHRENHEIT:
current_temp = fahrenheit_to_celsius(current_temp)
metric = self._metric(
"current_temperature_c",
self.prometheus_cli.Gauge,
"Current Temperature in degrees Celsius",
)
metric.labels(**self._labels(state)).set(current_temp)
current_action = state.attributes.get(ATTR_HVAC_ACTION)
if current_action:
metric = self._metric(
"climate_action", self.prometheus_cli.Gauge, "HVAC action", ["action"],
)
for action in CURRENT_HVAC_ACTIONS:
metric.labels(**dict(self._labels(state), action=action)).set(
float(action == current_action)
)
def _handle_humidifier(self, state):
humidifier_target_humidity_percent = state.attributes.get(ATTR_HUMIDITY)
if humidifier_target_humidity_percent:
metric = self._metric(
"humidifier_target_humidity_percent",
self.prometheus_cli.Gauge,
"Target Relative Humidity",
)
metric.labels(**self._labels(state)).set(humidifier_target_humidity_percent)
metric = self._metric(
"humidifier_state",
self.prometheus_cli.Gauge,
"State of the humidifier (0/1)",
)
try:
value = self.state_as_number(state)
metric.labels(**self._labels(state)).set(value)
except ValueError:
pass
current_mode = state.attributes.get(ATTR_MODE)
available_modes = state.attributes.get(ATTR_AVAILABLE_MODES)
if current_mode and available_modes:
metric = self._metric(
"humidifier_mode",
self.prometheus_cli.Gauge,
"Humidifier Mode",
["mode"],
)
for mode in available_modes:
metric.labels(**dict(self._labels(state), mode=mode)).set(
float(mode == current_mode)
)
def _handle_sensor(self, state):
unit = self._unit_string(state.attributes.get(ATTR_UNIT_OF_MEASUREMENT))
for metric_handler in self._sensor_metric_handlers:
metric = metric_handler(state, unit)
if metric is not None:
break
if metric is not None:
_metric = self._metric(
metric, self.prometheus_cli.Gauge, f"Sensor data measured in {unit}"
)
try:
value = self.state_as_number(state)
if unit == TEMP_FAHRENHEIT:
value = fahrenheit_to_celsius(value)
_metric.labels(**self._labels(state)).set(value)
except ValueError:
pass
self._battery(state)
def _sensor_default_metric(self, state, unit):
"""Get default metric."""
return self._default_metric
@staticmethod
def _sensor_attribute_metric(state, unit):
"""Get metric based on device class attribute."""
metric = state.attributes.get(ATTR_DEVICE_CLASS)
if metric is not None:
return f"{metric}_{unit}"
return None
def _sensor_override_metric(self, state, unit):
"""Get metric from override in configuration."""
if self._override_metric:
return self._override_metric
return None
def _sensor_override_component_metric(self, state, unit):
"""Get metric from override in component confioguration."""
return self._component_config.get(state.entity_id).get(CONF_OVERRIDE_METRIC)
@staticmethod
def _sensor_fallback_metric(state, unit):
"""Get metric from fallback logic for compatibility."""
if unit in (None, ""):
_LOGGER.debug("Unsupported sensor: %s", state.entity_id)
return None
return f"sensor_unit_{unit}"
@staticmethod
def _unit_string(unit):
"""Get a formatted string of the unit."""
if unit is None:
return
units = {
TEMP_CELSIUS: "c",
TEMP_FAHRENHEIT: "c", # F should go into C metric
UNIT_PERCENTAGE: "percent",
}
default = unit.replace("/", "_per_")
default = default.lower()
return units.get(unit, default)
def _handle_switch(self, state):
metric = self._metric(
"switch_state", self.prometheus_cli.Gauge, "State of the switch (0/1)"
)
try:
value = self.state_as_number(state)
metric.labels(**self._labels(state)).set(value)
except ValueError:
pass
self._handle_attributes(state)
def _handle_zwave(self, state):
self._battery(state)
def _handle_automation(self, state):
metric = self._metric(
"automation_triggered_count",
self.prometheus_cli.Counter,
"Count of times an automation has been triggered",
)
metric.labels(**self._labels(state)).inc()
class PrometheusView(HomeAssistantView):
"""Handle Prometheus requests."""
url = API_ENDPOINT
name = "api:prometheus"
def __init__(self, prometheus_cli):
"""Initialize Prometheus view."""
self.prometheus_cli = prometheus_cli
async def get(self, request):
"""Handle request for Prometheus metrics."""
_LOGGER.debug("Received Prometheus metrics request")
return web.Response(
body=self.prometheus_cli.generate_latest(),
content_type=CONTENT_TYPE_TEXT_PLAIN,
)
|
the-stack_0_24459 | # Copyright 2016 Canonical Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import re
import pwd
import subprocess
import platform
import uuid
from itertools import chain
from base64 import b64decode
from copy import deepcopy
from subprocess import (
check_call,
check_output,
CalledProcessError
)
from charmhelpers.fetch import (
apt_update,
apt_upgrade,
apt_install,
apt_purge,
apt_autoremove,
apt_mark,
filter_missing_packages,
filter_installed_packages,
)
from charmhelpers.core.fstab import Fstab
from charmhelpers.core.host import (
mkdir,
service_restart,
lsb_release,
rsync,
CompareHostReleases,
mount,
fstab_add,
)
from charmhelpers.core.hookenv import (
charm_dir,
config,
log,
related_units,
relation_ids,
relation_get,
status_set,
DEBUG,
INFO,
WARNING,
storage_list,
storage_get,
hook_name,
)
from charmhelpers.core.decorators import retry_on_exception
from charmhelpers.contrib.openstack import templating, context
from charmhelpers.contrib.openstack.alternatives import install_alternative
from charmhelpers.contrib.openstack.utils import (
configure_installation_source,
get_os_codename_install_source,
os_release,
reset_os_release,
is_unit_paused_set,
make_assess_status_func,
pause_unit,
resume_unit,
os_application_version_set,
CompareOpenStackReleases,
)
from charmhelpers.core.hugepage import hugepage_support
from nova_compute_context import (
nova_metadata_requirement,
CloudComputeContext,
CloudComputeVendorJSONContext,
LxdContext,
MetadataServiceContext,
NovaComputeLibvirtContext,
NovaComputeLibvirtOverrideContext,
NovaComputeCephContext,
NeutronComputeContext,
InstanceConsoleContext,
IronicAPIContext,
CEPH_CONF,
ceph_config_file,
HostIPContext,
NovaComputeVirtContext,
NOVA_API_AA_PROFILE,
NOVA_COMPUTE_AA_PROFILE,
NOVA_NETWORK_AA_PROFILE,
NovaAPIAppArmorContext,
NovaComputeAppArmorContext,
NovaNetworkAppArmorContext,
SerialConsoleContext,
NovaComputeAvailabilityZoneContext,
NeutronPluginSubordinateConfigContext,
NovaComputePlacementContext,
)
import charmhelpers.contrib.openstack.vaultlocker as vaultlocker
from charmhelpers.core.unitdata import kv
from charmhelpers.contrib.storage.linux.utils import (
is_block_device,
is_device_mounted,
mkfs_xfs,
)
from charmhelpers.core.templating import render
CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt'
TEMPLATES = 'templates/'
BASE_PACKAGES = [
'nova-compute',
'genisoimage', # was missing as a package dependency until raring.
'librbd1', # bug 1440953
'python-six',
'python-psutil',
'xfsprogs',
'nfs-common',
'open-iscsi',
'python3-novaclient', # lib required by juju actions
'python3-neutronclient', # lib required by juju actions
'python3-keystoneauth1', # lib required by juju actions
'ovmf', # required for uefi based instances
]
PY3_PACKAGES = [
'python3-nova',
'python3-memcache',
'python3-rados',
'python3-rbd',
]
PURGE_PACKAGES = [
'python-ceilometer',
'python-neutron',
'python-neutron-fwaas',
'python-nova',
'python-nova-lxd',
]
MULTIPATH_PACKAGES = [
'multipath-tools',
'sysfsutils',
]
HELD_PACKAGES = [
'python-memcache',
'python-six',
'python-psutil',
]
VERSION_PACKAGE = 'nova-common'
DEFAULT_INSTANCE_PATH = '/var/lib/nova/instances'
NOVA_CONF_DIR = "/etc/nova"
QEMU_CONF = '/etc/libvirt/qemu.conf'
LIBVIRTD_CONF = '/etc/libvirt/libvirtd.conf'
LIBVIRT_BIN = '/etc/default/libvirt-bin'
LIBVIRT_BIN_OVERRIDES = '/etc/init/libvirt-bin.override'
NOVA_CONF = '%s/nova.conf' % NOVA_CONF_DIR
NOVA_COMPUTE_CONF = '%s/nova-compute.conf' % NOVA_CONF_DIR
VENDORDATA_FILE = '%s/vendor_data.json' % NOVA_CONF_DIR
QEMU_KVM = '/etc/default/qemu-kvm'
NOVA_API_AA_PROFILE_PATH = ('/etc/apparmor.d/{}'.format(NOVA_API_AA_PROFILE))
NOVA_COMPUTE_AA_PROFILE_PATH = ('/etc/apparmor.d/{}'
''.format(NOVA_COMPUTE_AA_PROFILE))
NOVA_NETWORK_AA_PROFILE_PATH = ('/etc/apparmor.d/{}'
''.format(NOVA_NETWORK_AA_PROFILE))
NOVA_COMPUTE_OVERRIDE_DIR = '/etc/systemd/system/nova-compute.service.d'
MOUNT_DEPENDENCY_OVERRIDE = '99-mount.conf'
LIBVIRT_TYPES = ['kvm', 'qemu', 'lxc']
USE_FQDN_KEY = 'nova-compute-charm-use-fqdn'
def use_fqdn_hint():
"""Hint for whether FQDN should be used for agent registration
:returns: True or False
:rtype: bool
"""
db = kv()
return db.get(USE_FQDN_KEY, False)
BASE_RESOURCE_MAP = {
NOVA_CONF: {
'services': ['nova-compute'],
'contexts': [context.AMQPContext(ssl_dir=NOVA_CONF_DIR),
context.SharedDBContext(
relation_prefix='nova', ssl_dir=NOVA_CONF_DIR),
context.ImageServiceContext(),
context.OSConfigFlagContext(),
CloudComputeContext(),
LxdContext(),
IronicAPIContext(),
NovaComputeLibvirtContext(),
NovaComputeCephContext(),
context.SyslogContext(),
NeutronPluginSubordinateConfigContext(
interface=['neutron-plugin'],
service=['nova-compute', 'nova'],
config_file=NOVA_CONF),
context.SubordinateConfigContext(
interface=['nova-ceilometer',
'ephemeral-backend'],
service=['nova-compute', 'nova'],
config_file=NOVA_CONF),
InstanceConsoleContext(),
context.ZeroMQContext(),
context.NotificationDriverContext(),
MetadataServiceContext(),
HostIPContext(),
NovaComputeVirtContext(),
context.LogLevelContext(),
context.InternalEndpointContext(),
context.VolumeAPIContext('nova-common'),
SerialConsoleContext(),
NovaComputeAvailabilityZoneContext(),
NovaComputePlacementContext(),
context.WorkerConfigContext(),
vaultlocker.VaultKVContext(
vaultlocker.VAULTLOCKER_BACKEND),
context.IdentityCredentialsContext(
rel_name='cloud-credentials'),
context.HostInfoContext(use_fqdn_hint_cb=use_fqdn_hint),
],
},
VENDORDATA_FILE: {
'services': [],
'contexts': [CloudComputeVendorJSONContext()],
},
NOVA_API_AA_PROFILE_PATH: {
'services': ['nova-api'],
'contexts': [NovaAPIAppArmorContext()],
},
NOVA_COMPUTE_AA_PROFILE_PATH: {
'services': ['nova-compute'],
'contexts': [NovaComputeAppArmorContext()],
},
NOVA_NETWORK_AA_PROFILE_PATH: {
'services': ['nova-network'],
'contexts': [NovaNetworkAppArmorContext()],
},
}
LIBVIRTD_DAEMON = 'libvirtd'
LIBVIRT_BIN_DAEMON = 'libvirt-bin'
LIBVIRT_RESOURCE_MAP = {
QEMU_CONF: {
'services': [LIBVIRT_BIN_DAEMON],
'contexts': [NovaComputeLibvirtContext()],
},
QEMU_KVM: {
'services': ['qemu-kvm'],
'contexts': [NovaComputeLibvirtContext()],
},
LIBVIRTD_CONF: {
'services': [LIBVIRT_BIN_DAEMON],
'contexts': [NovaComputeLibvirtContext()],
},
LIBVIRT_BIN: {
'services': [LIBVIRT_BIN_DAEMON],
'contexts': [NovaComputeLibvirtContext()],
},
LIBVIRT_BIN_OVERRIDES: {
'services': [LIBVIRT_BIN_DAEMON],
'contexts': [NovaComputeLibvirtOverrideContext()],
},
}
LIBVIRT_RESOURCE_MAP.update(BASE_RESOURCE_MAP)
CEPH_SECRET = '/etc/ceph/secret.xml'
CEPH_BACKEND_SECRET = '/etc/ceph/secret-{}.xml'
CEPH_RESOURCES = {
CEPH_SECRET: {
'contexts': [NovaComputeCephContext()],
'services': [],
}
}
# Maps virt-type config to a compute package(s).
VIRT_TYPES = {
'kvm': ['nova-compute-kvm'],
'qemu': ['nova-compute-qemu'],
'uml': ['nova-compute-uml'],
'lxc': ['nova-compute-lxc'],
'lxd': ['nova-compute-lxd'],
'ironic': ['nova-compute-ironic'],
}
# Maps virt-type config to a libvirt URI.
LIBVIRT_URIS = {
'kvm': 'qemu:///system',
'qemu': 'qemu:///system',
'uml': 'uml:///system',
'lxc': 'lxc:///',
}
# The interface is said to be satisfied if anyone of the interfaces in the
# list has a complete context.
REQUIRED_INTERFACES = {
'messaging': ['amqp'],
'image': ['image-service'],
'compute': ['cloud-compute'],
}
def libvirt_daemon():
'''Resolve the correct name of the libvirt daemon service'''
distro_codename = lsb_release()['DISTRIB_CODENAME'].lower()
if (CompareHostReleases(distro_codename) >= 'yakkety' or
CompareOpenStackReleases(os_release('nova-common')) >= 'ocata'):
return LIBVIRTD_DAEMON
else:
return LIBVIRT_BIN_DAEMON
def vaultlocker_installed():
return len(filter_installed_packages(['vaultlocker'])) == 0
def resource_map():
'''
Dynamically generate a map of resources that will be managed for a single
hook execution.
'''
# TODO: Cache this on first call?
virt_type = config('virt-type').lower()
if virt_type in ('lxd', 'ironic'):
resource_map = deepcopy(BASE_RESOURCE_MAP)
else:
resource_map = deepcopy(LIBVIRT_RESOURCE_MAP)
# if vault deps are not installed it is not yet possible to check the vault
# context status since it requires the hvac dependency.
if not vaultlocker_installed():
to_delete = []
for item in resource_map[NOVA_CONF]['contexts']:
if isinstance(item, type(vaultlocker.VaultKVContext())):
to_delete.append(item)
for item in to_delete:
resource_map[NOVA_CONF]['contexts'].remove(item)
net_manager = network_manager()
# Network manager gets set late by the cloud-compute interface.
# FlatDHCPManager only requires some extra packages.
cmp_os_release = CompareOpenStackReleases(os_release('nova-common'))
if (net_manager in ['flatmanager', 'flatdhcpmanager'] and
config('multi-host').lower() == 'yes' and
cmp_os_release < 'ocata'):
resource_map[NOVA_CONF]['services'].extend(
['nova-api', 'nova-network']
)
else:
resource_map.pop(NOVA_API_AA_PROFILE_PATH)
resource_map.pop(NOVA_NETWORK_AA_PROFILE_PATH)
if virt_type == 'ironic':
# NOTE(gsamfira): OpenStack versions prior to Victoria do not have a
# dedicated nova-compute-ironic package which provides a suitable
# nova-compute.conf file. We use a template to compensate for that.
if cmp_os_release < 'victoria':
resource_map[NOVA_COMPUTE_CONF] = {
"services": ["nova-compute"],
"contexts": [],
}
cmp_distro_codename = CompareHostReleases(
lsb_release()['DISTRIB_CODENAME'].lower())
if (cmp_distro_codename >= 'yakkety' or cmp_os_release >= 'ocata'):
for data in resource_map.values():
if LIBVIRT_BIN_DAEMON in data['services']:
data['services'].remove(LIBVIRT_BIN_DAEMON)
data['services'].append(LIBVIRTD_DAEMON)
# Neutron/quantum requires additional contexts, as well as new resources
# depending on the plugin used.
# NOTE(james-page): only required for ovs plugin right now
if net_manager in ['neutron', 'quantum']:
resource_map[NOVA_CONF]['contexts'].append(NeutronComputeContext())
if relation_ids('ceph'):
CEPH_RESOURCES[ceph_config_file()] = {
'contexts': [NovaComputeCephContext()],
'services': ['nova-compute']
}
resource_map.update(CEPH_RESOURCES)
enable_nova_metadata, _ = nova_metadata_requirement()
if enable_nova_metadata:
resource_map[NOVA_CONF]['services'].append('nova-api-metadata')
# NOTE(james-page): If not on an upstart based system, don't write
# and override file for libvirt-bin.
if not os.path.exists('/etc/init'):
if LIBVIRT_BIN_OVERRIDES in resource_map:
del resource_map[LIBVIRT_BIN_OVERRIDES]
return resource_map
def restart_map():
'''
Constructs a restart map based on charm config settings and relation
state.
'''
return {k: v['services'] for k, v in resource_map().items()}
def services():
''' Returns a list of services associated with this charm '''
return list(set(chain(*restart_map().values())))
def register_configs():
'''
Returns an OSTemplateRenderer object with all required configs registered.
'''
release = os_release('nova-common')
configs = templating.OSConfigRenderer(templates_dir=TEMPLATES,
openstack_release=release)
if relation_ids('ceph'):
# Add charm ceph configuration to resources and
# ensure directory actually exists
mkdir(os.path.dirname(ceph_config_file()))
mkdir(os.path.dirname(CEPH_CONF))
# Install ceph config as an alternative for co-location with
# ceph and ceph-osd charms - nova-compute ceph.conf will be
# lower priority that both of these but thats OK
if not os.path.exists(ceph_config_file()):
# touch file for pre-templated generation
open(ceph_config_file(), 'w').close()
install_alternative(os.path.basename(CEPH_CONF),
CEPH_CONF, ceph_config_file())
for cfg, d in resource_map().items():
configs.register(cfg, d['contexts'])
return configs
def determine_packages_arch():
'''Generate list of architecture-specific packages'''
packages = []
distro_codename = lsb_release()['DISTRIB_CODENAME'].lower()
if (platform.machine() == 'aarch64' and
CompareHostReleases(distro_codename) >= 'wily'):
packages.extend(['qemu-efi']), # AArch64 cloud images require UEFI fw
return packages
def determine_packages():
release = os_release('nova-common')
cmp_release = CompareOpenStackReleases(release)
packages = [] + BASE_PACKAGES
net_manager = network_manager()
if (net_manager in ['flatmanager', 'flatdhcpmanager'] and
config('multi-host').lower() == 'yes' and
CompareOpenStackReleases(os_release('nova-common')) < 'ocata'):
packages.extend(['nova-api', 'nova-network'])
if relation_ids('ceph'):
packages.append('ceph-common')
virt_type = config('virt-type')
if virt_type == 'ironic' and release < 'victoria':
# ironic compute driver is part of nova and
# gets installed allong with python3-nova
# The nova-compute-ironic metapackage that satisfies
# nova-compute-hypervisor does not exist for versions of
# OpenStack prior to Victoria. Use nova-compute-vmware,
# as that package has the least amount of dependencies.
# We also add python3-ironicclient here. This is a dependency
# which gets installed by nova-compute-ironic in Victoria and later.
VIRT_TYPES[virt_type] = [
'nova-compute-vmware',
'python3-ironicclient']
try:
packages.extend(VIRT_TYPES[virt_type])
except KeyError:
log('Unsupported virt-type configured: %s' % virt_type)
raise
enable_nova_metadata, _ = nova_metadata_requirement()
if enable_nova_metadata:
packages.append('nova-api-metadata')
packages.extend(determine_packages_arch())
# LP#1806830 - ensure that multipath packages are installed when
# use-multipath option is enabled.
if config('use-multipath'):
packages.extend(MULTIPATH_PACKAGES)
if cmp_release >= 'rocky':
packages = [p for p in packages if not p.startswith('python-')]
packages.extend(PY3_PACKAGES)
if filter_missing_packages(['python-ceilometer']):
packages.append('python3-ceilometer')
if filter_missing_packages(['python-neutron']):
packages.append('python3-neutron')
if filter_missing_packages(['python-neutron-fwaas']):
packages.append('python3-neutron-fwaas')
if virt_type == 'lxd':
packages.append('python3-nova-lxd')
return packages
def determine_purge_packages():
'''Return a list of packages to purge for the current OS release'''
cmp_os_source = CompareOpenStackReleases(os_release('nova-common'))
if cmp_os_source >= 'rocky':
return PURGE_PACKAGES
return []
def remove_old_packages():
'''Purge any packages that need ot be removed.
:returns: bool Whether packages were removed.
'''
installed_packages = filter_missing_packages(
determine_purge_packages()
)
if installed_packages:
apt_mark(filter_missing_packages(determine_held_packages()),
'auto')
apt_purge(installed_packages, fatal=True)
apt_autoremove(purge=True, fatal=True)
return bool(installed_packages)
def determine_held_packages():
'''Return a list of packages to mark as candidates for removal
for the current OS release'''
cmp_os_source = CompareOpenStackReleases(os_release('nova-common'))
if cmp_os_source >= 'rocky':
return HELD_PACKAGES
return []
def migration_enabled():
# XXX: confirm juju-core bool behavior is the same.
return config('enable-live-migration')
def _network_config():
'''
Obtain all relevant network configuration settings from nova-c-c via
cloud-compute interface.
'''
settings = ['network_manager', 'neutron_plugin', 'quantum_plugin']
net_config = {}
for rid in relation_ids('cloud-compute'):
for unit in related_units(rid):
for setting in settings:
value = relation_get(setting, rid=rid, unit=unit)
if value:
net_config[setting] = value
return net_config
def neutron_plugin():
return (_network_config().get('neutron_plugin') or
_network_config().get('quantum_plugin'))
def network_manager():
'''
Obtain the network manager advertised by nova-c-c, renaming to Quantum
if required
'''
manager = _network_config().get('network_manager')
if manager:
manager = manager.lower()
if manager != 'neutron':
return manager
else:
return 'neutron'
return manager
def public_ssh_key(user='root'):
home = pwd.getpwnam(user).pw_dir
try:
with open(os.path.join(home, '.ssh', 'id_rsa.pub')) as key:
return key.read().strip()
except OSError:
return None
def initialize_ssh_keys(user='root'):
home_dir = pwd.getpwnam(user).pw_dir
ssh_dir = os.path.join(home_dir, '.ssh')
if not os.path.isdir(ssh_dir):
os.mkdir(ssh_dir)
priv_key = os.path.join(ssh_dir, 'id_rsa')
if not os.path.isfile(priv_key):
log('Generating new ssh key for user %s.' % user)
cmd = ['ssh-keygen', '-q', '-N', '', '-t', 'rsa', '-b', '2048',
'-f', priv_key]
check_output(cmd)
pub_key = '%s.pub' % priv_key
if not os.path.isfile(pub_key):
log('Generating missing ssh public key @ %s.' % pub_key)
cmd = ['ssh-keygen', '-y', '-f', priv_key]
p = check_output(cmd).decode('UTF-8').strip()
with open(pub_key, 'wt') as out:
out.write(p)
check_output(['chown', '-R', user, ssh_dir])
def set_ppc64_cpu_smt_state(smt_state):
"""Set ppc64_cpu smt state."""
current_smt_state = check_output(['ppc64_cpu', '--smt']).decode('UTF-8')
# Possible smt state values are integer or 'off'
# Ex. common ppc64_cpu query command output values:
# SMT=8
# -or-
# SMT is off
if 'SMT={}'.format(smt_state) in current_smt_state:
log('Not changing ppc64_cpu smt state ({})'.format(smt_state))
elif smt_state == 'off' and 'SMT is off' in current_smt_state:
log('Not changing ppc64_cpu smt state (already off)')
else:
log('Setting ppc64_cpu smt state: {}'.format(smt_state))
cmd = ['ppc64_cpu', '--smt={}'.format(smt_state)]
try:
check_output(cmd)
except CalledProcessError as e:
# Known to fail in a container (host must pre-configure smt)
msg = 'Failed to set ppc64_cpu smt state: {}'.format(smt_state)
log(msg, level=WARNING)
status_set('blocked', msg)
raise e
def import_authorized_keys(user='root', prefix=None):
"""Import SSH authorized_keys + known_hosts from a cloud-compute relation.
Store known_hosts in user's $HOME/.ssh and authorized_keys in a path
specified using authorized-keys-path config option.
The relation_get data is a series of key values of the form:
[prefix_]known_hosts_max_index: <int>
[prefix_]authorized_keys_max_index: <int>
[prefix_]known_hosts_[n]: <str>
[prefix_]authorized_keys_[n]: <str>
:param user: the user to write the known hosts and keys for (default 'root)
:type user: str
:param prefix: A prefix to add to the relation data keys (default None)
:type prefix: Option[str, None]
"""
_prefix = "{}_".format(prefix) if prefix else ""
# get all the data at once with one relation_get call
rdata = relation_get() or {}
known_hosts_index = int(
rdata.get('{}known_hosts_max_index'.format(_prefix), '0'))
authorized_keys_index = int(
rdata.get('{}authorized_keys_max_index'.format(_prefix), '0'))
if known_hosts_index == 0 or authorized_keys_index == 0:
return
homedir = pwd.getpwnam(user).pw_dir
dest_auth_keys = config('authorized-keys-path').format(
homedir=homedir, username=user)
dest_known_hosts = os.path.join(homedir, '.ssh/known_hosts')
log('Saving new known_hosts file to %s and authorized_keys file to: %s.' %
(dest_known_hosts, dest_auth_keys))
# write known hosts using data from relation_get
with open(dest_known_hosts, 'wt') as f:
for index in range(known_hosts_index):
f.write("{}\n".format(
rdata.get("{}known_hosts_{}".format(_prefix, index))))
# write authorized keys using data from relation_get
with open(dest_auth_keys, 'wt') as f:
for index in range(authorized_keys_index):
f.write("{}\n".format(
rdata.get('{}authorized_keys_{}'.format(_prefix, index))))
def do_openstack_upgrade(configs):
# NOTE(jamespage) horrible hack to make utils forget a cached value
import charmhelpers.contrib.openstack.utils as utils
utils.os_rel = None
new_src = config('openstack-origin')
new_os_rel = get_os_codename_install_source(new_src)
log('Performing OpenStack upgrade to %s.' % (new_os_rel))
configure_installation_source(new_src)
apt_update(fatal=True)
dpkg_opts = [
'--option', 'Dpkg::Options::=--force-confnew',
'--option', 'Dpkg::Options::=--force-confdef',
]
apt_upgrade(options=dpkg_opts, fatal=True, dist=True)
reset_os_release()
apt_install(determine_packages(), fatal=True)
remove_old_packages()
configs.set_release(openstack_release=new_os_rel)
configs.write_all()
if not is_unit_paused_set():
for s in services():
service_restart(s)
def import_keystone_ca_cert():
"""If provided, import the Keystone CA cert that gets forwarded
to compute nodes via the cloud-compute interface
"""
ca_cert = relation_get('ca_cert')
if not ca_cert:
return
log('Writing Keystone CA certificate to %s' % CA_CERT_PATH)
with open(CA_CERT_PATH, 'wb') as out:
out.write(b64decode(ca_cert))
check_call(['update-ca-certificates'])
def create_libvirt_secret(secret_file, secret_uuid, key):
uri = LIBVIRT_URIS[config('virt-type')]
cmd = ['virsh', '-c', uri, 'secret-list']
if secret_uuid in check_output(cmd).decode('UTF-8'):
old_key = check_output(['virsh', '-c', uri, 'secret-get-value',
secret_uuid]).decode('UTF-8')
old_key = old_key.strip()
if old_key == key:
log('Libvirt secret already exists for uuid %s.' % secret_uuid,
level=DEBUG)
return
else:
log('Libvirt secret changed for uuid %s.' % secret_uuid,
level=INFO)
log('Defining new libvirt secret for uuid %s.' % secret_uuid)
cmd = ['virsh', '-c', uri, 'secret-define', '--file', secret_file]
check_call(cmd)
cmd = ['virsh', '-c', uri, 'secret-set-value', '--secret', secret_uuid,
'--base64', key]
check_call(cmd)
def _libvirt_network_exec(netname, action):
"""Run action on libvirt network"""
try:
cmd = ['virsh', 'net-list', '--all']
out = check_output(cmd).decode('UTF-8').splitlines()
if len(out) < 3:
return
for line in out[2:]:
res = re.search(r"^\s+{} ".format(netname), line)
if res:
check_call(['virsh', 'net-{}'.format(action), netname])
return
except CalledProcessError:
log("Failed to {} libvirt network '{}'".format(action, netname),
level=WARNING)
except OSError as e:
if e.errno == 2:
log("virsh is unavailable. Virt Type is '{}'. Not attempting to "
"{} libvirt network '{}'"
"".format(config('virt-type'), action, netname), level=DEBUG)
else:
raise e
def remove_libvirt_network(netname):
_libvirt_network_exec(netname, 'destroy')
_libvirt_network_exec(netname, 'undefine')
def configure_lxd(user='nova'):
''' Configure lxd use for nova user '''
_release = lsb_release()['DISTRIB_CODENAME'].lower()
if CompareHostReleases(_release) < "vivid":
raise Exception("LXD is not supported for Ubuntu "
"versions less than 15.04 (vivid)")
configure_subuid(user)
lxc_list(user)
@retry_on_exception(5, base_delay=2, exc_type=CalledProcessError)
def lxc_list(user):
cmd = ['sudo', '-u', user, 'lxc', 'list']
check_call(cmd)
def configure_subuid(user):
cmd = ['usermod', '-v', '100000-200000', '-w', '100000-200000', user]
check_call(cmd)
def enable_shell(user):
cmd = ['usermod', '-s', '/bin/bash', user]
check_call(cmd)
def disable_shell(user):
cmd = ['usermod', '-s', '/bin/false', user]
check_call(cmd)
def fix_path_ownership(path, user='nova'):
cmd = ['chown', user, path]
check_call(cmd)
def assert_charm_supports_ipv6():
"""Check whether we are able to support charms ipv6."""
_release = lsb_release()['DISTRIB_CODENAME'].lower()
if CompareHostReleases(_release) < "trusty":
raise Exception("IPv6 is not supported in the charms for Ubuntu "
"versions less than Trusty 14.04")
def get_hugepage_number():
# TODO: defaults to 2M - this should probably be configurable
# and support multiple pool sizes - e.g. 2M and 1G.
# NOTE(jamespage): 2M in bytes
hugepage_size = 2048 * 1024
hugepage_config = config('hugepages')
hugepages = None
if hugepage_config:
if hugepage_config.endswith('%'):
# NOTE(jamespage): return units of virtual_memory is
# bytes
import psutil
mem = psutil.virtual_memory()
hugepage_config_pct = hugepage_config.strip('%')
hugepage_multiplier = float(hugepage_config_pct) / 100
hugepages = int((mem.total * hugepage_multiplier) / hugepage_size)
else:
hugepages = int(hugepage_config)
return hugepages
def install_hugepages():
""" Configure hugepages """
hugepage_config = config('hugepages')
if hugepage_config:
mnt_point = '/run/hugepages/kvm'
hugepage_support(
'nova',
mnt_point=mnt_point,
group='root',
nr_hugepages=get_hugepage_number(),
mount=False,
set_shmmax=True,
)
# Remove hugepages entry if present due to Bug #1518771
Fstab.remove_by_mountpoint(mnt_point)
if subprocess.call(['mountpoint', mnt_point]):
service_restart('qemu-kvm')
rsync(
charm_dir() + '/files/qemu-hugefsdir',
'/etc/init.d/qemu-hugefsdir'
)
subprocess.check_call('/etc/init.d/qemu-hugefsdir')
subprocess.check_call(['update-rc.d', 'qemu-hugefsdir', 'defaults'])
def get_optional_relations():
"""Return a dictionary of optional relations.
@returns {relation: relation_name}
"""
optional_interfaces = {}
if relation_ids('ceph'):
optional_interfaces['storage-backend'] = ['ceph']
if relation_ids('neutron-plugin'):
optional_interfaces['neutron-plugin'] = ['neutron-plugin']
if config('encrypt'):
optional_interfaces['vault'] = ['secrets-storage']
if config('virt-type').lower() == 'ironic':
optional_interfaces['baremetal'] = ['ironic-api']
return optional_interfaces
def assess_status(configs):
"""Assess status of current unit
Decides what the state of the unit should be based on the current
configuration.
SIDE EFFECT: calls set_os_workload_status(...) which sets the workload
status of the unit.
Also calls status_set(...) directly if paused state isn't complete.
@param configs: a templating.OSConfigRenderer() object
@returns None - this function is executed for its side-effect
"""
if is_unit_paused_set():
services_to_check = services_to_pause_or_resume()
else:
services_to_check = services()
assess_status_func(configs, services_to_check)()
os_application_version_set(VERSION_PACKAGE)
def check_optional_config_and_relations(configs):
"""Validate optional configuration and relations when present.
This function is called from assess_status/set_os_workload_status as the
charm_func and needs to return either None, None if there is no problem or
the status, message if there is a problem.
:param configs: an OSConfigRender() instance.
:return 2-tuple: (string, string) = (status, message)
"""
if relation_ids('ceph'):
# Check that provided Ceph BlueStoe configuration is valid.
try:
bluestore_compression = context.CephBlueStoreCompressionContext()
bluestore_compression.validate()
except AttributeError:
# The charm does late installation of the `ceph-common` package and
# the class initializer above will throw an exception until it is.
pass
except ValueError as e:
return ('blocked', 'Invalid configuration: {}'.format(str(e)))
# return 'unknown' as the lowest priority to not clobber an existing
# status.
return "unknown", ""
def assess_status_func(configs, services_=None):
"""Helper function to create the function that will assess_status() for
the unit.
Uses charmhelpers.contrib.openstack.utils.make_assess_status_func() to
create the appropriate status function and then returns it.
Used directly by assess_status() and also for pausing and resuming
the unit.
NOTE(ajkavanagh) ports are not checked due to race hazards with services
that don't behave sychronously w.r.t their service scripts. e.g.
apache2.
@param configs: a templating.OSConfigRenderer() object
@return f() -> None : a function that assesses the unit's workload status
"""
required_interfaces = REQUIRED_INTERFACES.copy()
optional_relations = get_optional_relations()
if 'vault' in optional_relations:
# skip check if hvac dependency not installed yet
if not vaultlocker_installed():
log("Vault dependencies not yet met so removing from status check")
del optional_relations['vault']
else:
log("Vault dependencies met so including in status check")
required_interfaces.update(optional_relations)
return make_assess_status_func(
configs, required_interfaces,
charm_func=check_optional_config_and_relations,
services=services_ or services(), ports=None)
def pause_unit_helper(configs):
"""Helper function to pause a unit, and then call assess_status(...) in
effect, so that the status is correctly updated.
Uses charmhelpers.contrib.openstack.utils.pause_unit() to do the work.
@param configs: a templating.OSConfigRenderer() object
@returns None - this function is executed for its side-effect
"""
_pause_resume_helper(pause_unit, configs)
def resume_unit_helper(configs):
"""Helper function to resume a unit, and then call assess_status(...) in
effect, so that the status is correctly updated.
Uses charmhelpers.contrib.openstack.utils.resume_unit() to do the work.
@param configs: a templating.OSConfigRenderer() object
@returns None - this function is executed for its side-effect
"""
_pause_resume_helper(resume_unit, configs)
def services_to_pause_or_resume():
if "post-series-upgrade" in hook_name():
return services()
else:
return list(set(services()) - {libvirt_daemon()})
def _pause_resume_helper(f, configs):
"""Helper function that uses the make_assess_status_func(...) from
charmhelpers.contrib.openstack.utils to create an assess_status(...)
function that can be used with the pause/resume of the unit
@param f: the function to be used with the assess_status(...) function
@returns None - this function is executed for its side-effect
"""
# TODO(ajkavanagh) - ports= has been left off because of the race hazard
# that exists due to service_start()
f(assess_status_func(configs, services_to_pause_or_resume()),
services=services_to_pause_or_resume(),
ports=None)
def determine_block_device():
"""Determine the block device to use for ephemeral storage
:returns: Block device to use for storage
:rtype: str or None if not configured"""
config_dev = config('ephemeral-device')
if config_dev and os.path.exists(config_dev):
return config_dev
storage_ids = storage_list('ephemeral-device')
storage_devs = [storage_get('location', s) for s in storage_ids]
if storage_devs:
return storage_devs[0]
return None
def configure_local_ephemeral_storage():
"""Configure local block device for use as ephemeral instance storage"""
# Preflight check vault relation if encryption is enabled
encrypt = config('encrypt')
if encrypt:
if not vaultlocker_installed():
log("Encryption requested but vaultlocker not yet installed",
level=DEBUG)
return
vault_kv = vaultlocker.VaultKVContext(
secret_backend=vaultlocker.VAULTLOCKER_BACKEND
)
context = vault_kv()
if vault_kv.complete:
# NOTE: only write vaultlocker configuration once relation is
# complete otherwise we run the chance of an empty
# configuration file being installed on a machine with other
# vaultlocker based services
vaultlocker.write_vaultlocker_conf(context, priority=80)
else:
log("Encryption requested but vault relation not complete",
level=DEBUG)
return
mountpoint = config('instances-path') or '/var/lib/nova/instances'
db = kv()
storage_configured = db.get('storage-configured', False)
if storage_configured:
log("Ephemeral storage already configured, skipping",
level=DEBUG)
# NOTE(jamespage):
# Install mountpoint override to ensure that upgrades
# to the charm version which supports this change
# also start exhibiting the correct behaviour
install_mount_override(mountpoint)
return
dev = determine_block_device()
if not dev:
log('No block device configuration found, skipping',
level=DEBUG)
return
if not is_block_device(dev):
log("Device '{}' is not a block device, "
"unable to configure storage".format(dev),
level=DEBUG)
return
# NOTE: this deals with a dm-crypt'ed block device already in
# use
if is_device_mounted(dev):
log("Device '{}' is already mounted, "
"unable to configure storage".format(dev),
level=DEBUG)
return
options = None
if encrypt:
dev_uuid = str(uuid.uuid4())
check_call(['vaultlocker', 'encrypt',
'--uuid', dev_uuid,
dev])
dev = '/dev/mapper/crypt-{}'.format(dev_uuid)
options = ','.join([
"defaults",
"nofail",
("x-systemd.requires="
"vaultlocker-decrypt@{uuid}.service".format(uuid=dev_uuid)),
"comment=vaultlocker",
])
# If not cleaned and in use, mkfs should fail.
mkfs_xfs(dev, force=True)
filesystem = "xfs"
mount(dev, mountpoint, filesystem=filesystem)
fstab_add(dev, mountpoint, filesystem, options=options)
install_mount_override(mountpoint)
check_call(['chown', '-R', 'nova:nova', mountpoint])
check_call(['chmod', '-R', '0755', mountpoint])
# NOTE: record preparation of device - this ensures that ephemeral
# storage is never reconfigured by mistake, losing instance disks
db.set('storage-configured', True)
db.flush()
def install_mount_override(mountpoint):
"""Install override for nova-compute for configured mountpoint"""
render(
MOUNT_DEPENDENCY_OVERRIDE,
os.path.join(NOVA_COMPUTE_OVERRIDE_DIR, MOUNT_DEPENDENCY_OVERRIDE),
{'mount_point': mountpoint.replace('/', '-')[1:]},
perms=0o644,
)
def get_availability_zone():
use_juju_az = config('customize-failure-domain')
juju_az = os.environ.get('JUJU_AVAILABILITY_ZONE')
return (juju_az if use_juju_az and juju_az
else config('default-availability-zone'))
|
the-stack_0_24460 | """
Copyright (C) 2018-2020 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import numpy as np
from extensions.ops.activation_ops import SoftPlus
from mo.front.common.replacement import FrontReplacementSubgraph
from mo.front.subgraph_matcher import SubgraphMatch
from mo.graph.graph import Graph, rename_nodes
class SoftplusFusion(FrontReplacementSubgraph):
"""
The transformation looks for the pattern for the Softplus function: Softplus(x) = ln(1 + e^x)
"""
enabled = True
def pattern(self):
return dict(
nodes=[
('exp', dict(op='Exp')),
('add', dict(op='Add')),
('const_1', dict(op='Const', value=lambda v: v is not None and np.isclose(v, 1.0, atol=1e-6))),
('ln', dict(op='Log')),
],
edges=[
('exp', 'add', {}),
('const_1', 'add', {}),
('add', 'ln', {}),
])
def replace_sub_graph(self, graph: Graph, match: [dict, SubgraphMatch]):
ln = match['ln']
exp = match['exp']
ln_name = ln.soft_get('name', ln.id)
softplus = SoftPlus(graph, {}).create_node()
softplus.in_port(0).connect(exp.in_port(0).get_source())
ln.out_port(0).get_connection().set_source(softplus.out_port(0))
rename_nodes([(ln, ln_name + '/TBR'), (softplus, ln_name)])
|
the-stack_0_24461 | """
Plot training reward/success rate
"""
import argparse
import os
import numpy as np
import seaborn
from matplotlib import pyplot as plt
from stable_baselines3.common.monitor import LoadMonitorResultsError, load_results
from stable_baselines3.common.results_plotter import X_EPISODES, X_TIMESTEPS, X_WALLTIME, ts2xy, window_func
del os.environ['QT_QPA_PLATFORM_PLUGIN_PATH']
# Activate seaborn
seaborn.set()
parser = argparse.ArgumentParser("Gather results, plot training reward/success")
parser.add_argument("-a", "--algo", help="Algorithm to include", type=str, required=True)
parser.add_argument("-e", "--env", help="Environment to include", type=str, required=True)
parser.add_argument("-f", "--exp-folder", help="Folders to include", type=str, required=True)
parser.add_argument("--figsize", help="Figure size, width, height in inches.", nargs=2, type=int, default=[6.4, 4.8])
parser.add_argument("--fontsize", help="Font size", type=int, default=14)
parser.add_argument("-max", "--max-timesteps", help="Max number of timesteps to display", type=int)
parser.add_argument("-x", "--x-axis", help="X-axis", choices=["steps", "episodes", "time"], type=str, default="steps")
parser.add_argument("-y", "--y-axis", help="Y-axis", choices=["success", "reward"], type=str, default="reward")
parser.add_argument("-w", "--episode-window", help="Rolling window size", type=int, default=100)
args = parser.parse_args()
algo = args.algo
env = args.env
log_path = os.path.join(args.exp_folder, algo)
x_axis = {"steps": X_TIMESTEPS, "episodes": X_EPISODES, "time": X_WALLTIME}[args.x_axis]
x_label = {"steps": "Timesteps (in Million)", "episodes": "Episodes", "time": "Walltime (in hours)"}[args.x_axis]
y_axis = {"success": "is_success", "reward": "r"}[args.y_axis]
y_label = {"success": "Training Success Rate", "reward": "Training Episodic Reward"}[args.y_axis]
dirs = [
os.path.join(log_path, folder)
for folder in os.listdir(log_path)
if (env in folder and os.path.isdir(os.path.join(log_path, folder)))
]
for folder in dirs:
try:
data_frame = load_results(folder)
except LoadMonitorResultsError:
continue
if args.max_timesteps is not None:
data_frame = data_frame[data_frame.l.cumsum() <= args.max_timesteps]
try:
y = np.array(data_frame[y_axis])
except KeyError:
print(f"No data available for {folder}")
continue
x, _ = ts2xy(data_frame, x_axis)
# Do not plot the smoothed curve at all if the timeseries is shorter than window size.
if x.shape[0] >= args.episode_window:
name = folder.split('/')[-1]
# Compute and plot rolling mean with window of size args.episode_window
x, y_mean = window_func(x, y, args.episode_window, np.mean)
almost_there = np.where(y_mean >= 0.95*y_mean.max())[0][0]
print(name, '– 5% Deviation of maximum is first reached at timestep', x[almost_there])
plt.figure(y_label, figsize=args.figsize)
plt.title(y_label, fontsize=args.fontsize)
plt.xlabel(f"{x_label}", fontsize=args.fontsize)
plt.ylabel(y_label, fontsize=args.fontsize)
plt.ylim(0, 40)
plt.plot(x / 1e6, y_mean, linewidth=2)
#plt.legend()
plt.tight_layout()
#plt.show()
plt.savefig(name)
plt.show()
#plt.legend()
#plt.tight_layout()
#plt.show()
|
the-stack_0_24463 | #BSD 3-Clause License
#
# Copyright (c) 2018, Joseph deBlaquiere <[email protected]>
# All rights reserved
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of ecpy nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import unittest
from FSPKE import CHKPKE, Element
from binascii import hexlify, unhexlify
class TestCHKPKE(unittest.TestCase):
def setUp(self):
self.pke = CHKPKE(128,100,4,8)
def test_pubkey_export(self):
self.assertIsNotNone(self.pke.pubkey())
def test_privkey_export(self):
self.assertIsNotNone(self.pke.privkey())
self.assertIsNotNone(self.pke.privkey(0))
self.assertEqual(self.pke.privkey(0),self.pke.privkey(0))
self.assertIsNotNone(self.pke.privkey(7))
self.assertNotEqual(self.pke.privkey(0),self.pke.privkey(1))
with self.assertRaises(ValueError):
b = self.pke.privkey(-1)
b = self.pke.privkey(4096)
b = self.pke.privkey(1,4096)
b = self.pke.privkey(11,10)
def test_pubkey_export_import(self):
pub = self.pke.pubkey()
copy = CHKPKE(pubkey=pub)
self.assertEqual(pub, copy.pubkey())
with self.assertRaises(ValueError):
cpriv = copy.privkey()
self.assertIsNotNone(self.pke.privkey())
def test_privkey_export_import(self):
priv = self.pke.privkey()
copy = CHKPKE(privkey=priv)
self.assertEqual(priv, copy.privkey())
subkey = self.pke.privkey(3,4092)
copy = CHKPKE(privkey=subkey)
self.assertEqual(subkey, copy.privkey(3,4092))
with self.assertRaises(ValueError):
b = copy.privkey()
b = copy.privkey(3)
b = copy.privkey(0,4092)
b = copy.privkey(2,4093)
subkey = copy.privkey(5,4090)
copy = CHKPKE(privkey=subkey)
self.assertEqual(subkey, copy.privkey(5,4090))
with self.assertRaises(ValueError):
b = copy.privkey(4,4091)
class TestElement(unittest.TestCase):
def setUp(self):
self.pke = CHKPKE(128,100,4,8)
def test_element_export(self):
self.e = Element(self.pke)
self.assertIsNotNone(self.e.to_bytes())
self.ee = Element(self.pke, self.e.to_bytes())
self.assertIsNotNone(self.ee.to_bytes())
with self.assertRaises(TypeError):
f = Element(0)
f = Element(self.e)
class TestEncryptDecrypt(unittest.TestCase):
def setUp(self):
self.pke = CHKPKE(128,100,4,8)
def test_encrypt_decrypt(self):
pubpke = CHKPKE(pubkey=self.pke.pubkey())
self.assertIsNotNone(self.pke.privkey(12,12))
with self.assertRaises(ValueError):
cpriv = pubpke.privkey(12,12)
e = Element(self.pke).random()
m = pubpke.encrypt(e, 12);
f = self.pke.decrypt(m, 12);
self.assertEqual(e.to_bytes(), f.to_bytes())
for i in range(0,4096):
if i != 12:
g = self.pke.decrypt(m, i);
self.assertNotEqual(e.to_bytes(), g.to_bytes())
with self.assertRaises(ValueError):
h = pubpke.decrypt(m, 12)
if __name__ == '__main__':
unittest.main()
|
the-stack_0_24464 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.utils import is_image
from frappe.model.document import Document
class LetterHead(Document):
def before_insert(self):
# for better UX, let user set from attachment
self.source = 'Image'
def validate(self):
self.set_image()
if not self.is_default:
if not frappe.db.sql("""select count(*) from `tabLetter Head` where ifnull(is_default,0)=1"""):
self.is_default = 1
def set_image(self):
if self.source=='Image':
if self.image and is_image(self.image):
self.content = '<img src="{}" style="width: 100%;">'.format(self.image)
frappe.msgprint(frappe._('Header HTML set from attachment {0}').format(self.image), alert = True)
else:
frappe.msgprint(frappe._('Please attach an image file to set HTML'), alert = True, indicator = 'orange')
def on_update(self):
self.set_as_default()
# clear the cache so that the new letter head is uploaded
frappe.clear_cache()
def set_as_default(self):
from frappe.utils import set_default
if self.is_default:
frappe.db.sql("update `tabLetter Head` set is_default=0 where name != %s",
self.name)
set_default('letter_head', self.name)
# update control panel - so it loads new letter directly
frappe.db.set_default("default_letter_head_content", self.content)
|
the-stack_0_24467 | from service import Service
import config
import os
import sys
import re
import log
import time
import select
from subprocess import Popen
from subprocess import PIPE
from service_ovpn import ServiceOvpn
import services
import pathlib
ON_POSIX = 'posix' in sys.builtin_module_names
class ServiceOvpnClient(ServiceOvpn):
"""
Openvpn service client class
"""
OPTS = dict(
outbound_proxy_host=None, outbound_proxy_port=3128,
crt = None, key = None, crtkey = None,
paymentid='authid1',
tundev = "tun1",
mgmtport = "11193",
reneg = 600,
enabled = True
)
OPTS_HELP = dict(
http_proxy_host = "HTTP proxy used for connection to ovpn",
reneg = "Renegotiation interval"
)
def connect(self, sdp):
while True:
services.SERVICES.sleep(10)
def orchestrate(self):
ret = super().orchestrate()
return True
def createConfig(self):
if (not os.path.exists(self.dir)):
os.mkdir(self.dir)
os.chdir(self.dir)
tfile = config.Config.PREFIX + "/etc/openvpn_client.tmpl"
try:
tf = open(tfile, "rb")
tmpl = tf.read()
except (IOError, OSError):
log.L.error("Cannot open openvpn template file %s" % (tfile))
sys.exit(1)
ca = services.SERVICES.sdp.getCertificates()
cafile = self.dir + "ca.crt"
try:
caf = open(cafile, "wb")
caf.write(ca.encode())
caf.close()
except (IOError, OSError):
log.L.error("Cannot write ca file %s" % (cafile))
sys.exit(1)
if (config.Config.CAP.servicePort):
self.cfg['port'] = config.Config.CAP.servicePort
elif ('port' not in self.cfg):
self.cfg['port'] = self.json['vpn'][0]['port'].split('/')[0]
if (config.Config.CAP.serviceProto):
self.cfg['proto'] = config.Config.CAP.serviceProto
elif ('proto' not in self.cfg):
self.cfg['proto'] = self.json['vpn'][0]['port'].split('/')[1]
if (config.Config.CAP.serviceFqdn):
self.cfg['endpoint'] = config.Config.CAP.serviceFqdn
elif ('endpoint' not in self.cfg):
self.cfg['endpoint'] = self.json['vpn'][0]['endpoint']
if (config.CONFIG.CAP.vpncStandalone):
mgmt_comment = '#'
authfile=str(pathlib.Path(self.dir + 'vpnc.auth')).replace('\\','\\\\')
try:
af = open(authfile, "w")
af.write(self.cfg["paymentid"].upper() + "\n")
af.write(self.cfg["paymentid"].upper() + "\n")
af.close()
except (IOError, OSError):
log.L.error("Cannot write auth file %s" % (authfile))
sys.exit(1)
else:
mgmt_comment = ''
authfile=''
if (config.CONFIG.CAP.httpsProxyHost):
proxy_comment = ''
http_proxy = config.CONFIG.CAP.httpsProxyHost
http_proxy_port = config.CONFIG.CAP.httpsProxyPort
elif 'outbound_proxy_host' in self.cfg:
proxy_comment = ''
http_proxy = self.cfg['outbound_proxy_host']
http_proxy_port = self.cfg['outbound_proxy_port']
else:
proxy_comment = '#'
http_proxy = ''
http_proxy_port = ''
if self.cfg['proto']=='UDP' and proxy_comment!='#':
log.L.error("Cannot use outbound HTTP proxy to proxy UDP connection to OpenVPN!. Exiting.")
sys.exit(14)
if (config.CONFIG.CAP.vpncBlockDns):
bdns_comment='#'
log.L.warning("block-outside-dns not supported yet.")
else:
bdns_comment='#'
if (config.CONFIG.CAP.vpncBlockRoute):
rgw_comment='#'
else:
rgw_comment=''
pull_filter=""
if (config.CONFIG.CAP.vpncBlockDns):
pull_filter += "pull-filter ignore dhcp-option\n"
if (config.CONFIG.CAP.vpncBlockRoute):
pull_filter += "pull-filter ignore route\n"
pull_filter += "pull-filter ignore route-gateway\n"
self.cfg["tundev"] = config.Config.CAP.vpncTun
self.cfg["mgmtport"] = config.Config.CAP.vpncMgmtPort
if (config.CONFIG.isWindows()):
wc='#'
else:
wc=''
out = tmpl.decode("utf-8").format(
port=self.cfg['port'],
proto=self.cfg['proto'].lower(),
ip=self.cfg['endpoint'],
f_ca=ca,
tundev=self.cfg["tundev"],
tunnode=str(pathlib.Path(config.Config.PREFIX + '/dev/net/tun')).replace('\\','\\\\'),
reneg=60,
mtu=1400,
mssfix=1300,
hproxy_comment=proxy_comment,
http_proxy=http_proxy,
http_proxy_port=http_proxy_port,
payment_header=config.Config.CAP.authidHeader,
mgmt_header=config.Config.CAP.mgmtHeader,
mgmt_sock="127.0.0.1 %s" % self.cfg["mgmtport"],
rgw_comment=rgw_comment,
bdns_comment=bdns_comment,
auth_file=authfile,
pull_filters=pull_filter,
mgmt_comment=mgmt_comment,
comment_dn=wc,
comment_syslog=wc
)
try:
cf = open(self.cfgfile, "wb")
cf.write(out.encode())
log.L.warning("Configuration files created at %s" % (self.dir))
except (IOError, OSError):
log.L.error("Cannot write haproxy config file %s" % (self.cfgfile))
|
the-stack_0_24468 | # coding=utf-8
import torch
import fairseq
import os
#
# List available models
torch.hub.list('pytorch/fairseq', force_reload=True) # [..., 'transformer.wmt16.en-de', ... ]
# Load a transformer trained on WMT'16 En-De
# Note: WMT'19 models use fastBPE instead of subword_nmt, see instructions below
en2de = torch.hub.load('pytorch/fairseq', 'transformer.wmt16.en-de',
tokenizer='moses', bpe='subword_nmt')
en2de.eval() # disable dropout
en2de.translate('Hello world!')
# print(en2de.translate('Hello world!'))
# The underlying model is available under the *models* attribute
assert isinstance(en2de.models[0], fairseq.models.transformer.TransformerModel)
# Move model to GPU for faster translation
# en2de.cuda()
print(en2de.translate('I do not know. How are you feeling? Is it better?'))
# Batched translation
# print(en2de.translate(['Hello world!', 'The cat sat on the mat.']))
# ['Hallo Welt!', 'Die Katze saß auf der Matte.']
|
the-stack_0_24469 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test function extraction"""
import tvm
from tvm import relay
from tvm.relay.testing.resnet import get_workload
def get_conv_net():
"""This gets the net for a case described in fuse_ops.cc:
conv2d
/ | \
/ | \
op op op
\ | /
\ | /
elemwise add
|
"""
dshape = (1, 1, 5, 1)
x = relay.var("x", shape=dshape)
y = relay.nn.conv2d(x, relay.var("w1"),
kernel_size=(3, 3),
padding=(1, 1),
channels=1)
x1 = relay.nn.conv2d(y, relay.var("w2"),
kernel_size=(3, 3),
padding=(1, 1),
channels=1)
x2 = relay.nn.conv2d(y, relay.var("w3"),
kernel_size=(3, 3),
padding=(1, 1),
channels=1)
x3 = relay.nn.conv2d(y, relay.var("w4"),
kernel_size=(3, 3),
padding=(1, 1),
channels=1)
z = relay.add(x1, x2)
z = relay.add(x3, z)
return tvm.IRModule.from_expr(z)
def get_conv2d():
x = relay.var("x", shape=(1, 56, 56, 64))
weight1 = relay.var('weight1', shape=(3, 3, 64, 32))
y = relay.nn.conv2d(x, weight1,
channels=32,
kernel_size=(3, 3),
padding=(1, 1),
data_layout='NHWC',
kernel_layout='HWIO')
return tvm.IRModule.from_expr(y)
def test_extract_identity():
mod = get_conv2d()
items = relay.analysis.extract_fused_functions(mod)
assert len(items) == 1
mod["main"] = mod["main"].with_attr(
"Primitive", tvm.tir.IntImm("int32", 1))
relay.analysis.assert_graph_equal(list(items.values())[0], mod["main"])
def test_extract_conv_net():
mod = get_conv_net()
items = relay.analysis.extract_fused_functions(mod)
functions = list(items.values())
assert len(functions) == 2
x = functions[0]
y = functions[1]
def is_conv(func):
conv2d = relay.op.op.get("nn.conv2d")
call_node = func.body
return call_node.op == conv2d
def is_conv_add(func):
add = relay.op.op.get("add")
call_node = func.body
maybe_conv_module = tvm.IRModule.from_expr(call_node.args[0])
return call_node.op == add and is_conv(maybe_conv_module["main"])
# Function traversal order isn't obvious, so checking both orders is more consistent
assert (is_conv(x) and is_conv_add(y)) or (is_conv_add(x) and is_conv(y))
def test_extract_resnet():
mod, _params = get_workload()
items = relay.analysis.extract_fused_functions(mod)
assert len(items) == 34
if __name__ == '__main__':
test_extract_identity()
test_extract_conv_net()
test_extract_resnet()
|
the-stack_0_24470 | # -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for StartMigrationWorkflow
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-bigquery-migration
# [START bigquerymigration_v2_generated_MigrationService_StartMigrationWorkflow_async]
from google.cloud import bigquery_migration_v2
async def sample_start_migration_workflow():
# Create a client
client = bigquery_migration_v2.MigrationServiceAsyncClient()
# Initialize request argument(s)
request = bigquery_migration_v2.StartMigrationWorkflowRequest(
name="name_value",
)
# Make the request
await client.start_migration_workflow(request=request)
# [END bigquerymigration_v2_generated_MigrationService_StartMigrationWorkflow_async]
|
the-stack_0_24471 | #! /usr/bin/env python
'''Both the regular Job and RecurringJob classes'''
import os
import time
import types
import traceback
import simplejson as json
from six.moves import reload_module
# Internal imports
from qless import logger
from qless.exceptions import LostLockException, QlessException
class BaseJob(object):
'''This is a dictionary of all the classes that we've seen, and
the last load time for each of them. We'll use this either for
the debug mode or the general mechanism'''
_loaded = {}
def __init__(self, client, **kwargs):
self.client = client
for att in ['jid', 'priority']:
object.__setattr__(self, att, kwargs[att])
object.__setattr__(self, 'klass_name', kwargs['klass'])
object.__setattr__(self, 'queue_name', kwargs['queue'])
# Because of how Lua parses JSON, empty tags comes through as {}
object.__setattr__(self, 'tags', kwargs['tags'] or [])
object.__setattr__(self, 'data', json.loads(kwargs['data']))
def __setattr__(self, key, value):
if key == 'priority':
return self.client('priority', self.jid,
value) and object.__setattr__(self, key, value)
else:
return object.__setattr__(self, key, value)
def __getattr__(self, key):
if key == 'queue':
# An actual queue instance
object.__setattr__(self, 'queue',
self.client.queues[self.queue_name])
return self.queue
elif key == 'klass':
# Get a reference to the provided klass
object.__setattr__(self, 'klass', self._import(self.klass_name))
return self.klass
raise AttributeError('%s has no attribute %s' % (
self.__class__.__module__ + '.' + self.__class__.__name__, key))
@staticmethod
def reload(klass):
'''Force a reload of this klass on next import'''
BaseJob._loaded[klass] = 0
@staticmethod
def _import(klass):
'''1) Get a reference to the module
2) Check the file that module's imported from
3) If that file's been updated, force a reload of that module
return it'''
mod = __import__(klass.rpartition('.')[0])
for segment in klass.split('.')[1:-1]:
mod = getattr(mod, segment)
# Alright, now check the file associated with it. Note that clases
# defined in __main__ don't have a __file__ attribute
if klass not in BaseJob._loaded:
BaseJob._loaded[klass] = time.time()
if hasattr(mod, '__file__'):
try:
mtime = os.stat(mod.__file__).st_mtime
if BaseJob._loaded[klass] < mtime:
mod = reload_module(mod)
except OSError:
logger.warn('Could not check modification time of %s',
mod.__file__)
return getattr(mod, klass.rpartition('.')[2])
def cancel(self):
'''Cancel a job. It will be deleted from the system, the thinking
being that if you don't want to do any work on it, it shouldn't be in
the queuing system.'''
return self.client('cancel', self.jid)
def tag(self, *tags):
'''Tag a job with additional tags'''
return self.client('tag', 'add', self.jid, *tags)
def untag(self, *tags):
'''Remove tags from a job'''
return self.client('tag', 'remove', self.jid, *tags)
class Job(BaseJob):
'''The Job class'''
def __init__(self, client, **kwargs):
BaseJob.__init__(self, client, **kwargs)
self.client = client
for att in ['state', 'tracked', 'failure',
'history', 'dependents', 'dependencies']:
object.__setattr__(self, att, kwargs[att])
# The reason we're using object.__setattr__ directly is because
# we have __setattr__ defined for this class, and we're actually
# just interested in setting these members directly
object.__setattr__(self, 'expires_at', kwargs['expires'])
object.__setattr__(self, 'original_retries', kwargs['retries'])
object.__setattr__(self, 'retries_left', kwargs['remaining'])
object.__setattr__(self, 'worker_name', kwargs['worker'])
# Because of how Lua parses JSON, empty lists come through as {}
object.__setattr__(self, 'dependents', kwargs['dependents'] or [])
object.__setattr__(self, 'dependencies', kwargs['dependencies'] or [])
def __getattr__(self, key):
if key == 'ttl':
# How long until this expires, in seconds
return self.expires_at - time.time()
return BaseJob.__getattr__(self, key)
def __getitem__(self, key):
return self.data.get(key)
def __setitem__(self, key, value):
self.data[key] = value
def __repr__(self):
return '<%s %s>' % (self.klass_name, self.jid)
def process(self):
'''Load the module containing your class, and run the appropriate
method. For example, if this job was popped from the queue
``testing``, then this would invoke the ``testing`` staticmethod of
your class.'''
try:
method = getattr(self.klass, self.queue_name,
getattr(self.klass, 'process', None))
except Exception as exc:
# We failed to import the module containing this class
logger.exception('Failed to import %s', self.klass_name)
return self.fail(self.queue_name + '-' + exc.__class__.__name__,
'Failed to import %s' % self.klass_name)
if method:
if isinstance(method, types.FunctionType):
try:
logger.info('Processing %s in %s',
self.jid, self.queue_name)
method(self)
logger.info('Completed %s in %s',
self.jid, self.queue_name)
except Exception as exc:
# Make error type based on exception type
logger.exception('Failed %s in %s: %s',
self.jid, self.queue_name, repr(method))
self.fail(self.queue_name + '-' + exc.__class__.__name__,
traceback.format_exc())
else:
# Or fail with a message to that effect
logger.error('Failed %s in %s : %s is not static',
self.jid, self.queue_name, repr(method))
self.fail(self.queue_name + '-method-type',
repr(method) + ' is not static')
else:
# Or fail with a message to that effect
logger.error(
'Failed %s : %s is missing a method "%s" or "process"',
self.jid, self.klass_name, self.queue_name)
self.fail(self.queue_name + '-method-missing', self.klass_name +
' is missing a method "' + self.queue_name + '" or "process"')
def move(self, queue, delay=0, depends=None):
'''Move this job out of its existing state and into another queue. If
a worker has been given this job, then that worker's attempts to
heartbeat that job will fail. Like ``Queue.put``, this accepts a
delay, and dependencies'''
logger.info('Moving %s to %s from %s',
self.jid, queue, self.queue_name)
return self.client('put', self.worker_name,
queue, self.jid, self.klass_name,
json.dumps(self.data), delay, 'depends', json.dumps(depends or [])
)
def complete(self, nextq=None, delay=None, depends=None):
'''Turn this job in as complete, optionally advancing it to another
queue. Like ``Queue.put`` and ``move``, it accepts a delay, and
dependencies'''
if nextq:
logger.info('Advancing %s to %s from %s',
self.jid, nextq, self.queue_name)
return self.client('complete', self.jid, self.client.worker_name,
self.queue_name, json.dumps(self.data), 'next', nextq,
'delay', delay or 0, 'depends',
json.dumps(depends or [])) or False
else:
logger.info('Completing %s', self.jid)
return self.client('complete', self.jid, self.client.worker_name,
self.queue_name, json.dumps(self.data)) or False
def heartbeat(self):
'''Renew the heartbeat, if possible, and optionally update the job's
user data.'''
logger.debug('Heartbeating %s (ttl = %s)', self.jid, self.ttl)
try:
self.expires_at = float(self.client('heartbeat', self.jid,
self.client.worker_name, json.dumps(self.data)) or 0)
except QlessException:
raise LostLockException(self.jid)
logger.debug('Heartbeated %s (ttl = %s)', self.jid, self.ttl)
return self.expires_at
def fail(self, group, message):
'''Mark the particular job as failed, with the provided type, and a
more specific message. By `type`, we mean some phrase that might be
one of several categorical modes of failure. The `message` is
something more job-specific, like perhaps a traceback.
This method should __not__ be used to note that a job has been dropped
or has failed in a transient way. This method __should__ be used to
note that a job has something really wrong with it that must be
remedied.
The motivation behind the `type` is so that similar errors can be
grouped together. Optionally, updated data can be provided for the job.
A job in any state can be marked as failed. If it has been given to a
worker as a job, then its subsequent requests to heartbeat or complete
that job will fail. Failed jobs are kept until they are canceled or
completed. __Returns__ the id of the failed job if successful, or
`False` on failure.'''
logger.warn('Failing %s (%s): %s', self.jid, group, message)
return self.client('fail', self.jid, self.client.worker_name, group,
message, json.dumps(self.data)) or False
def track(self):
'''Begin tracking this job'''
return self.client('track', 'track', self.jid)
def untrack(self):
'''Stop tracking this job'''
return self.client('track', 'untrack', self.jid)
def retry(self, delay=0, group=None, message=None):
'''Retry this job in a little bit, in the same queue. This is meant
for the times when you detect a transient failure yourself'''
args = ['retry', self.jid, self.queue_name, self.worker_name, delay]
if group is not None and message is not None:
args.append(group)
args.append(message)
return self.client(*args)
def depend(self, *args):
'''If and only if a job already has other dependencies, this will add
more jids to the list of this job's dependencies.'''
return self.client('depends', self.jid, 'on', *args) or False
def undepend(self, *args, **kwargs):
'''Remove specific (or all) job dependencies from this job:
job.remove(jid1, jid2)
job.remove(all=True)'''
if kwargs.get('all', False):
return self.client('depends', self.jid, 'off', 'all') or False
else:
return self.client('depends', self.jid, 'off', *args) or False
def timeout(self):
'''Time out this job'''
self.client('timeout', self.jid)
class RecurringJob(BaseJob):
'''Recurring Job object'''
def __init__(self, client, **kwargs):
BaseJob.__init__(self, client, **kwargs)
for att in ['jid', 'priority', 'tags',
'retries', 'interval', 'count']:
object.__setattr__(self, att, kwargs[att])
object.__setattr__(self, 'client', client)
object.__setattr__(self, 'klass_name', kwargs['klass'])
object.__setattr__(self, 'queue_name', kwargs['queue'])
object.__setattr__(self, 'tags', self.tags or [])
object.__setattr__(self, 'data', json.loads(kwargs['data']))
def __setattr__(self, key, value):
if key in ('priority', 'retries', 'interval'):
return self.client('recur.update', self.jid, key,
value) and object.__setattr__(self, key, value)
if key == 'data':
return self.client('recur.update', self.jid, key,
json.dumps(value)) and object.__setattr__(self, 'data', value)
if key == 'klass':
name = value.__module__ + '.' + value.__name__
return self.client('recur.update', self.jid, 'klass',
name) and object.__setattr__(self, 'klass_name',
name) and object.__setattr__(self, 'klass', value)
return object.__setattr__(self, key, value)
def __getattr__(self, key):
if key == 'next':
# The time (seconds since epoch) until the next time it's run
return self.client.redis.zscore(
'ql:q:' + self.queue_name + '-recur', self.jid)
return BaseJob.__getattr__(self, key)
def move(self, queue):
'''Make this recurring job attached to another queue'''
return self.client('recur.update', self.jid, 'queue', queue)
def cancel(self):
'''Cancel all future recurring jobs'''
self.client('unrecur', self.jid)
def tag(self, *tags):
'''Add tags to this recurring job'''
return self.client('recur.tag', self.jid, *tags)
def untag(self, *tags):
'''Remove tags from this job'''
return self.client('recur.untag', self.jid, *tags)
|
the-stack_0_24472 | #!/usr/bin/env python
# encoding: utf-8
# Hans-Martin von Gaudecker, 2012-14
"""
Run a R script in the directory specified by **ctx.bldnode**.
Strings supplied to the **prepend** and **append** keywords will be
added to the command line.
Usage::
ctx(
features='run_r_script',
source='some_script.r',
target=['some_table.tex', 'some_figure.eps'],
deps='some_data.csv',
append='',
prepend=''
)
"""
from waflib import Task, TaskGen, Logs
R_COMMANDS = ['RScript', 'Rscript']
def configure(ctx):
ctx.find_program(
R_COMMANDS,
var='RCMD',
errmsg="""\n
No R executable found!\n\n
If R is needed:\n
1) Check the settings of your system path.
2) Note we are looking for R executables called: %s
If yours has a different name, please report to hmgaudecker [at] gmail\n
Else:\n
Do not load the 'run_r_script' tool in the main wscript.\n\n"""
% R_COMMANDS
)
ctx.env.RFLAGS = ''
class run_r_script(Task.Task):
"""Run a R script."""
run_str = '${PREPEND} "${RCMD}" ${RFLAGS} "${SRC[0].abspath()}" ${APPEND}'
shell = True
def exec_command(self, cmd, **kw):
bld = self.generator.bld
try:
if not kw.get('cwd', None):
kw['cwd'] = bld.cwd
except AttributeError:
bld.cwd = kw['cwd'] = bld.variant_dir
if not self.buffer_output:
kw["stdout"] = kw["stderr"] = None
return bld.exec_command(cmd, **kw)
def keyword(self):
"""
Override the 'Compiling' default.
"""
return 'Running'
def __str__(self):
"""
More useful output.
"""
return "{prepend} [R] {rflags} {fn} {append}".format(
prepend=self.env.PREPEND,
rflags=self.env.RFLAGS,
fn=self.inputs[0].path_from(self.inputs[0].ctx.launch_node()),
append=self.env.APPEND
)
@TaskGen.feature('run_r_script')
@TaskGen.before_method('process_source')
def apply_run_r_script(tg):
"""Task generator customising the options etc. to call R in batch
mode for running a R script.
"""
# Convert sources and targets to nodes
src_node = tg.path.find_resource(tg.source)
tgt_nodes = [tg.path.find_or_declare(t) for t in tg.to_list(tg.target)]
tsk = tg.create_task('run_r_script', src=src_node, tgt=tgt_nodes)
tsk.env.APPEND = getattr(tg, 'append', '')
tsk.env.PREPEND = getattr(tg, 'prepend', '')
tsk.buffer_output = getattr(tg, 'buffer_output', True)
# dependencies (if the attribute 'deps' changes, trigger a recompilation)
for x in tg.to_list(getattr(tg, 'deps', [])):
node = tg.path.find_resource(x)
if not node:
tg.bld.fatal(
'Could not find dependency %r for running %r'
% (x, src_node.relpath())
)
else:
tsk.dep_nodes.append(node)
Logs.debug(
'deps: found dependencies %r for running %r' % (
tsk.dep_nodes, src_node.relpath())
)
# Bypass the execution of process_source by setting the source to an empty
# list
tg.source = []
|
the-stack_0_24474 | # Copyright 2021 Jetperch LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pytation import time
import argparse
import importlib
import os
_LOG_PATH_DEFAULT = '{base_path}/{station}/log/{station_timestr}_{process_id}.log'
_OUTPUT_PATH_DEFAULT = '{base_path}/{station}/data/{suite_timestr}.zip'
_PROGRESS_PATH_DEFAULT = '{base_path}/{station}/progress.csv'
_DEVICE_LIFECYCLE = ['station', 'suite', 'test', 'manual'] # defaults to 'station'
SETUP_TEARDOWN_FN = [
'station_setup', 'station_teardown',
'suite_setup', 'suite_teardown',
'test_setup', 'test_teardown',
]
ENV_EXCLUDE = ['error_count', 'suite_timestamp', 'suite_timestr', 'suite_isostr']
ENV_DEFAULTS = {
'error_count_to_halt': 1,
}
def parser_config(p: argparse.ArgumentParser, station=None):
"""Add station definition to an argparse.
:param p: The argument parser instance, which will be modified
in place.
:param station: If provided, use the default station value with
an optional '--station' override. None (default) adds the station
as a fixed position argument.
"""
if station is not None:
p.add_argument('--station',
default=station,
help='The fully-qualified station definition')
else:
p.add_argument('station',
help='The fully-qualified station definition')
p.add_argument('--exclude',
help='The comma-separated list of tests to exlude. Defaults to "".')
p.add_argument('--include',
help='The comma-separated list of tests to include. Defaults to all available tests.')
def _states_validate(states):
# Shallow copy and set name field
d = {}
for name, s in states.items():
s = dict(s)
s['name'] = name
d[name] = s
return d
def _test_validate(test):
if test is None:
return None
t = dict(test)
fn = t['fn']
if isinstance(fn, str):
parts = fn.split('.')
fn_name = parts[-1]
module_name = '.'.join(parts[:-1])
module = importlib.import_module(module_name)
fn = getattr(module, fn_name)
t['fn'] = fn
t.setdefault('name', getattr(fn, 'NAME', getattr(fn, '__name__', '__unknown__')))
t.setdefault('config', {})
if 'devices' not in t:
t['devices'] = getattr(fn, 'DEVICES', [])
return t
def _tests_validate(test_list):
d = []
names = {}
for t in test_list:
t = _test_validate(t)
name = t['name']
if name in names:
raise ValueError(f'Duplicate test name: {name}')
d.append(t)
names[name] = t
return d
def _devices_validate(devices_list):
"""Convert self._station['devices'] from list of defs to dict name:def."""
devices_map = {}
for d in devices_list:
d = dict(d)
clz = d['clz']
if 'name' in d:
name = d['name']
elif hasattr(clz, 'NAME'):
name = clz.NAME
else:
name = clz.__name__
d['name'] = name
d.setdefault('lifecycle', 'station')
d.setdefault('config', {})
if d['lifecycle'] not in _DEVICE_LIFECYCLE:
raise ValueError(f'invalid device lifecycle {d["lifecycle"]} for {name}')
if name in devices_map:
raise ValueError('Duplicate device name: %s', name)
devices_map[name] = d
return devices_map
def validate(station):
"""Validate the station and fully populate optional fields.
:param station: The station data structure.
:return: The station modified in place.
"""
s = {}
s['name'] = station['name']
s['full_name'] = station.get('full_name', station['name'])
# Construct the environment
station_start_time = time.now()
env = {}
env_no_override = {
'station': station['name'],
'process_id': os.getpid(),
'error_count': 0,
'station_timestamp': station_start_time,
'station_timestr': time.time_to_filename(station_start_time),
'station_isostr': time.time_to_isostr(station_start_time),
# updated at the start of each suite
'suite_timestamp': 0,
'suite_timestr': time.time_to_filename(0),
'suite_isostr': time.time_to_isostr(0),
}
env.update(station.get('env', {}))
env.update(env_no_override)
for key, value in ENV_DEFAULTS.items():
env.setdefault(key, value)
s['env'] = env
# Construct the station
paths = station.get('paths', {})
paths.setdefault('base_path', os.path.join(os.path.expanduser('~'), 'pytation'))
paths.setdefault('log', _LOG_PATH_DEFAULT)
paths.setdefault('output', _OUTPUT_PATH_DEFAULT)
paths.setdefault('progress', _PROGRESS_PATH_DEFAULT)
s['paths'] = paths
s['states'] = _states_validate(station.get('states', {}))
s['tests'] = _tests_validate(station['tests'])
s['devices'] = _devices_validate(station['devices'])
for k in SETUP_TEARDOWN_FN:
s[k] = _test_validate(station.get(k, None))
s['gui_resources'] = station.get('gui_resources', [])
return s
def load(args):
"""Load a station from the command-line arguments.
:param args: The command-line arguments.
:return: The station, which is also fully validated.
:see: parser_config()
:see: validate()
"""
parts = args.station.split('.')
def_name = parts[-1]
module_name = '.'.join(parts[:-1])
module = importlib.import_module(module_name)
station = getattr(module, def_name)
station = validate(station)
if args.exclude is not None:
exclude = args.exclude.split(',')
tests = []
for test in station['tests']:
if test['name'] in exclude:
exclude.remove(test['name'])
else:
tests.append(test)
if len(exclude):
raise ValueError(f'Excluded tests not found: {exclude}')
station['tests'] = tests
if args.include is not None:
include = args.include.split(',')
tests = []
for test in station['tests']:
if test['name'] in include:
include.remove(test['name'])
tests.append(test)
if len(include):
raise ValueError(f'Include tests not found: {include}')
station['tests'] = tests
return station
|
the-stack_0_24476 | from fileinput import filename
import json
import parser
userInput = ""
searchTreeRoot = None
def getExistingFileNameFromUser(prompt: str="Enter file name: "):
while True:
file = input(prompt)
try:
with open(file, "r") as inFile:
pass
break
except FileNotFoundError as e:
print(e)
print("Try again")
return file
while True:
userInput = input("Enter search mode(h for help): ")
if userInput == "h":
print("lj: load pre computed json format file")
print("lf: load strings from text file")
print("s: search mode")
print("wj: write computed json file")
print("q: quit")
elif userInput == "lj":
assert "Not yet implemented"
elif userInput == "lf":
fileName = getExistingFileNameFromUser()
with open(fileName, "r") as inFile:
searchTreeRoot = parser.makeSearchTreeFromLines(inFile.readlines())
elif userInput == "s":
searchString = input("Enter string to be searched: ")
result = parser.searchTreeForString(searchTreeRoot, searchString)
for i in result:
print(i)
elif userInput == "wj":
fileName = input("Enter file name to write out to: ")
print(f"Writing {fileName}")
with open(fileName, "w") as outFile:
outFile.write(json.dumps(searchTreeRoot.getDict()))
print("Done")
elif userInput == "q":
print("Bye")
break
else:
print("Unknown command")
|
the-stack_0_24477 | from hydroDL import pathSMAP, master
import os
from hydroDL.data import dbCsv
optData = master.default.update(
master.default.optDataSMAP,
rootDB=pathSMAP['DB_L3_Global'],
subset='Globalv4f1_NorthAmerica',
tRange=[20150401, 20160401],
varT=dbCsv.varForcingGlobal)
optModel = master.default.optLstm
optLoss = master.default.optLossSigma
optTrain = master.default.optTrainSMAP
out = os.path.join(pathSMAP['Out_L3_Global'], 'test')
masterDict = master.wrapMaster(out, optData, optModel, optLoss, optTrain)
master.train(masterDict)
|
the-stack_0_24478 | from typing import List, Dict, Union
import torch
from collections import Counter
from collections import defaultdict
from segtok.segmenter import split_single
from segtok.tokenizer import split_contractions
from segtok.tokenizer import word_tokenizer
class Dictionary:
"""
This class holds a dictionary that maps strings to IDs, used to generate one-hot encodings of strings.
"""
def __init__(self, add_unk=True):
# init dictionaries
self.item2idx: Dict[str, int] = {}
self.idx2item: List[str] = []
# in order to deal with unknown tokens, add <unk>
if add_unk:
self.add_item('<unk>')
def add_item(self, item: str) -> int:
"""
add string - if already in dictionary returns its ID. if not in dictionary, it will get a new ID.
:param item: a string for which to assign an id
:return: ID of string
"""
item = item.encode('utf-8')
if item not in self.item2idx:
self.idx2item.append(item)
self.item2idx[item] = len(self.idx2item) - 1
return self.item2idx[item]
def get_idx_for_item(self, item: str) -> int:
"""
returns the ID of the string, otherwise 0
:param item: string for which ID is requested
:return: ID of string, otherwise 0
"""
item = item.encode('utf-8')
if item in self.item2idx.keys():
return self.item2idx[item]
else:
return 0
def get_items(self) -> List[str]:
items = []
for item in self.idx2item:
items.append(item.decode('UTF-8'))
return items
def __len__(self) -> int:
return len(self.idx2item)
def get_item_for_index(self, idx):
return self.idx2item[idx].decode('UTF-8')
def save(self, savefile):
import pickle
with open(savefile, 'wb') as f:
mappings = {
'idx2item': self.idx2item,
'item2idx': self.item2idx
}
pickle.dump(mappings, f)
@classmethod
def load_from_file(cls, filename: str):
import pickle
dictionary: Dictionary = Dictionary()
with open(filename, 'rb') as f:
mappings = pickle.load(f, encoding='latin1')
idx2item = mappings['idx2item']
item2idx = mappings['item2idx']
dictionary.item2idx = item2idx
dictionary.idx2item = idx2item
return dictionary
@classmethod
def load(cls, name: str):
from flairrelex.file_utils import cached_path
if name == 'chars' or name == 'common-chars':
base_path = 'https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/models/common_characters'
char_dict = cached_path(base_path, cache_dir='datasets')
return Dictionary.load_from_file(char_dict)
return Dictionary.load_from_file(name)
class Label:
"""
This class represents a label of a sentence. Each label has a name and optional a confidence value. The confidence
value needs to be between 0.0 and 1.0. Default value for the confidence is 1.0.
"""
def __init__(self, name: str, confidence: float = 1.0):
self.name = name
self.confidence = confidence
@property
def name(self):
return self._name
@name.setter
def name(self, name):
if not name:
raise ValueError('Incorrect label name provided. Label name needs to be set.')
else:
self._name = name
@property
def confidence(self):
return self._confidence
@confidence.setter
def confidence(self, confidence):
if 0.0 <= confidence <= 1.0:
self._confidence = confidence
else:
self._confidence = 0.0
def __str__(self):
return "{} ({})".format(self._name, self._confidence)
def __repr__(self):
return "{} ({})".format(self._name, self._confidence)
class Token:
"""
This class represents one word in a tokenized sentence. Each token may have any number of tags. It may also point
to its head in a dependency tree.
"""
def __init__(self,
text: str,
idx: int = None,
head_id: int = None,
whitespace_after: bool = True,
):
self.text: str = text
self.idx: int = idx
self.head_id: int = head_id
self.whitespace_after: bool = whitespace_after
self.sentence: Sentence = None
self._embeddings: Dict = {}
self.tags: Dict[str, str] = {}
def add_tag(self, tag_type: str, tag_value: str):
self.tags[tag_type] = tag_value
def get_tag(self, tag_type: str) -> str:
if tag_type in self.tags: return self.tags[tag_type]
return ''
def get_head(self):
return self.sentence.get_token(self.head_id)
def __str__(self) -> str:
return 'Token: %d %s' % (self.idx, self.text)
def __repr__(self) -> str:
return 'Token: %d %s' % (self.idx, self.text)
def set_embedding(self, name: str, vector: torch.autograd.Variable):
self._embeddings[name] = vector.cpu()
def clear_embeddings(self):
self._embeddings: Dict = {}
def get_embedding(self) -> torch.autograd.Variable:
embeddings = []
for embed in sorted(self._embeddings.keys()):
embeddings.append(self._embeddings[embed])
if embeddings:
return torch.cat(embeddings, dim=0)
return torch.FloatTensor()
@property
def embedding(self):
return self.get_embedding()
class Sentence:
def __init__(self, text: str = None, use_tokenizer: bool = False, labels: Union[List[Label], List[str]] = None):
super(Sentence, self).__init__()
self.tokens: List[Token] = []
self.labels: List[Label] = []
if labels is not None: self.add_labels(labels)
self._embeddings: Dict = {}
# if text is passed, instantiate sentence with tokens (words)
if text is not None:
# tokenize the text first if option selected
if use_tokenizer:
# use segtok for tokenization
tokens = []
sentences = split_single(text)
for sentence in sentences:
contractions = split_contractions(word_tokenizer(sentence))
tokens.extend(contractions)
# determine offsets for whitespace_after field
index = text.index
running_offset = 0
last_word_offset = -1
last_token = None
for word in tokens:
token = Token(word)
self.add_token(token)
try:
word_offset = index(word, running_offset)
except:
word_offset = last_word_offset + 1
if word_offset - 1 == last_word_offset and last_token is not None:
last_token.whitespace_after = False
word_len = len(word)
running_offset = word_offset + word_len
last_word_offset = running_offset - 1
last_token = token
# otherwise assumes whitespace tokenized text
else:
# add each word in tokenized string as Token object to Sentence
for word in text.split(' '):
if word:
token = Token(word)
self.add_token(token)
def _infer_space_after(self):
"""
Heuristics in case you wish to infer whitespace_after values for tokenized text. This is useful for some old NLP
tasks (such as CoNLL-03 and CoNLL-2000) that provide only tokenized data with no info of original whitespacing.
:return:
"""
last_token = None
quote_count: int = 0
# infer whitespace after field
for token in self.tokens:
if token.text == '"':
quote_count += 1
if quote_count % 2 != 0:
token.whitespace_after = False
elif last_token is not None:
last_token.whitespace_after = False
if last_token is not None:
if token.text in ['.', ':', ',', ';', ')', 'n\'t', '!', '?']:
last_token.whitespace_after = False
if token.text.startswith('\''):
last_token.whitespace_after = False
if token.text in ['(']:
token.whitespace_after = False
last_token = token
return self
def __getitem__(self, idx: int) -> Token:
return self.tokens[idx]
def __iter__(self):
return iter(self.tokens)
def add_label(self, label: Union[Label, str]):
if type(label) is Label:
self.labels.append(label)
elif type(label) is str:
self.labels.append(Label(label))
def add_labels(self, labels: Union[List[Label], List[str]]):
for label in labels:
self.add_label(label)
def get_label_names(self) -> List[str]:
return [label.name for label in self.labels]
def get_token(self, token_id: int) -> Token:
for token in self.tokens:
if token.idx == token_id:
return token
def add_token(self, token: Token):
self.tokens.append(token)
# set token idx if not set
token.sentence = self
if token.idx is None:
token.idx = len(self.tokens)
def set_embedding(self, name: str, vector):
self._embeddings[name] = vector.cpu()
def clear_embeddings(self, also_clear_word_embeddings: bool = True):
self._embeddings: Dict = {}
if also_clear_word_embeddings:
for token in self:
token.clear_embeddings()
def cpu_embeddings(self):
for name, vector in self._embeddings.items():
self._embeddings[name] = vector.cpu()
def get_embedding(self) -> torch.autograd.Variable:
embeddings = []
for embed in sorted(self._embeddings.keys()):
embedding = self._embeddings[embed]
embeddings.append(embedding)
if embeddings:
return torch.cat(embeddings, dim=0)
return torch.FloatTensor()
@property
def embedding(self):
return self.get_embedding()
def to_tagged_string(self) -> str:
list = []
for token in self.tokens:
list.append(token.text)
tags = []
for tag_type in token.tags.keys():
if token.get_tag(tag_type) == '' or token.get_tag(tag_type) == 'O': continue
tags.append(token.get_tag(tag_type))
all_tags = '<' + '/'.join(tags) + '>'
if all_tags != '<>':
list.append(all_tags)
return ' '.join(list)
def convert_tag_scheme(self, tag_type: str = 'ner', target_scheme: str = 'iob'):
tags: List[str] = []
for token in self.tokens:
token: Token = token
tags.append(token.get_tag(tag_type))
if target_scheme == 'iob':
iob2(tags)
if target_scheme == 'iobes':
iob2(tags)
tags = iob_iobes(tags)
for index, tag in enumerate(tags):
self.tokens[index].add_tag(tag_type, tag)
def __repr__(self):
return 'Sentence: "' + ' '.join([t.text for t in self.tokens]) + '" - %d Tokens' % len(self)
def __copy__(self):
s = Sentence()
for token in self.tokens:
nt = Token(token.text)
for tag_type in token.tags:
nt.add_tag(tag_type, token.get_tag(tag_type))
s.add_token(nt)
return s
def __str__(self) -> str:
return 'Sentence: "' + ' '.join([t.text for t in self.tokens]) + '" - %d Tokens' % len(self)
def __len__(self) -> int:
return len(self.tokens)
def to_tokenized_string(self) -> str:
return ' '.join([t.text for t in self.tokens])
def to_plain_string(self):
plain = ''
for token in self.tokens:
plain += token.text
if token.whitespace_after: plain += ' '
return plain.rstrip()
class TaggedCorpus:
def __init__(self, train: List[Sentence], dev: List[Sentence], test: List[Sentence]):
self.train: List[Sentence] = train
self.dev: List[Sentence] = dev
self.test: List[Sentence] = test
def downsample(self, percentage: float = 0.1, only_downsample_train=False):
self.train = self._downsample_to_proportion(self.train, percentage)
if not only_downsample_train:
self.dev = self._downsample_to_proportion(self.dev, percentage)
self.test = self._downsample_to_proportion(self.test, percentage)
return self
def clear_embeddings(self):
for sentence in self.get_all_sentences():
for token in sentence.tokens:
token.clear_embeddings()
def get_all_sentences(self) -> List[Sentence]:
all_sentences: List[Sentence] = []
all_sentences.extend(self.train)
all_sentences.extend(self.dev)
all_sentences.extend(self.test)
return all_sentences
def make_tag_dictionary(self, tag_type: str) -> Dictionary:
# Make the tag dictionary
tag_dictionary: Dictionary = Dictionary()
tag_dictionary.add_item('O')
for sentence in self.get_all_sentences():
for token in sentence.tokens:
token: Token = token
tag_dictionary.add_item(token.get_tag(tag_type))
tag_dictionary.add_item('<START>')
tag_dictionary.add_item('<STOP>')
return tag_dictionary
def make_label_dictionary(self) -> Dictionary:
"""
Creates a dictionary of all labels assigned to the sentences in the corpus.
:return: dictionary of labels
"""
labels = set(self._get_all_label_names())
label_dictionary: Dictionary = Dictionary(add_unk=False)
for label in labels:
label_dictionary.add_item(label)
return label_dictionary
def make_vocab_dictionary(self, max_tokens=-1, min_freq=1) -> Dictionary:
"""
Creates a dictionary of all tokens contained in the corpus.
By defining `max_tokens` you can set the maximum number of tokens that should be contained in the dictionary.
If there are more than `max_tokens` tokens in the corpus, the most frequent tokens are added first.
If `min_freq` is set the a value greater than 1 only tokens occurring more than `min_freq` times are considered
to be added to the dictionary.
:param max_tokens: the maximum number of tokens that should be added to the dictionary (-1 = take all tokens)
:param min_freq: a token needs to occur at least `min_freq` times to be added to the dictionary (-1 = there is no limitation)
:return: dictionary of tokens
"""
tokens = self._get_most_common_tokens(max_tokens, min_freq)
vocab_dictionary: Dictionary = Dictionary()
for token in tokens:
vocab_dictionary.add_item(token)
return vocab_dictionary
def _get_most_common_tokens(self, max_tokens, min_freq) -> List[str]:
tokens_and_frequencies = Counter(self._get_all_tokens())
tokens_and_frequencies = tokens_and_frequencies.most_common()
tokens = []
for token, freq in tokens_and_frequencies:
if (min_freq != -1 and freq < min_freq) or (max_tokens != -1 and len(tokens) == max_tokens):
break
tokens.append(token)
return tokens
def _get_all_label_names(self) -> List[str]:
return [label.name for sent in self.train for label in sent.labels]
def _get_all_tokens(self) -> List[str]:
tokens = list(map((lambda s: s.tokens), self.train))
tokens = [token for sublist in tokens for token in sublist]
return list(map((lambda t: t.text), tokens))
def _downsample_to_proportion(self, list: List, proportion: float):
counter = 0.0
last_counter = None
downsampled: List = []
for item in list:
counter += proportion
if int(counter) != last_counter:
downsampled.append(item)
last_counter = int(counter)
return downsampled
def print_statistics(self):
"""
Print statistics about the class distribution (only labels of sentences are taken into account) and sentence
sizes.
"""
self._print_statistics_for(self.train, "TRAIN")
self._print_statistics_for(self.test, "TEST")
self._print_statistics_for(self.dev, "DEV")
@staticmethod
def _print_statistics_for(sentences, name):
if len(sentences) == 0:
return
classes_to_count = TaggedCorpus._get_classes_to_count(sentences)
tokens_per_sentence = TaggedCorpus._get_tokens_per_sentence(sentences)
print(name)
print("total size: " + str(len(sentences)))
for l, c in classes_to_count.items():
print("size of class {}: {}".format(l, c))
print("total # of tokens: " + str(sum(tokens_per_sentence)))
print("min # of tokens: " + str(min(tokens_per_sentence)))
print("max # of tokens: " + str(max(tokens_per_sentence)))
print("avg # of tokens: " + str(sum(tokens_per_sentence) / len(sentences)))
@staticmethod
def _get_tokens_per_sentence(sentences):
return list(map(lambda x: len(x.tokens), sentences))
@staticmethod
def _get_classes_to_count(sentences):
classes_to_count = defaultdict(lambda: 0)
for sent in sentences:
for label in sent.labels:
classes_to_count[label.name] += 1
return classes_to_count
def __str__(self) -> str:
return 'TaggedCorpus: %d train + %d dev + %d test sentences' % (len(self.train), len(self.dev), len(self.test))
def iob2(tags):
"""
Check that tags have a valid IOB format.
Tags in IOB1 format are converted to IOB2.
"""
for i, tag in enumerate(tags):
if tag == 'O':
continue
split = tag.split('-')
if len(split) != 2 or split[0] not in ['I', 'B']:
return False
if split[0] == 'B':
continue
elif i == 0 or tags[i - 1] == 'O': # conversion IOB1 to IOB2
tags[i] = 'B' + tag[1:]
elif tags[i - 1][1:] == tag[1:]:
continue
else: # conversion IOB1 to IOB2
tags[i] = 'B' + tag[1:]
return True
def iob_iobes(tags):
"""
IOB -> IOBES
"""
new_tags = []
for i, tag in enumerate(tags):
if tag == 'O':
new_tags.append(tag)
elif tag.split('-')[0] == 'B':
if i + 1 != len(tags) and \
tags[i + 1].split('-')[0] == 'I':
new_tags.append(tag)
else:
new_tags.append(tag.replace('B-', 'S-'))
elif tag.split('-')[0] == 'I':
if i + 1 < len(tags) and \
tags[i + 1].split('-')[0] == 'I':
new_tags.append(tag)
else:
new_tags.append(tag.replace('I-', 'E-'))
else:
raise Exception('Invalid IOB format!')
return new_tags
|
the-stack_0_24479 | """ SEC Model """
__docformat__ = "numpy"
import logging
from datetime import datetime
import pandas as pd
import requests
from bs4 import BeautifulSoup
from openbb_terminal.decorators import log_start_end
from openbb_terminal.helper_funcs import get_user_agent
logger = logging.getLogger(__name__)
@log_start_end(log=logger)
def get_fails_to_deliver(
ticker: str,
start: datetime,
end: datetime,
num: int,
):
"""Display fails-to-deliver data for a given ticker. [Source: SEC]
Parameters
----------
ticker : str
Stock ticker
start : datetime
Start of data
end : datetime
End of data
num : int
Number of latest fails-to-deliver being printed
"""
ftds_data = pd.DataFrame()
# Filter by number of last FTD
if num > 0:
url_ftds = "https://www.sec.gov/data/foiadocsfailsdatahtm"
text_soup_ftds = BeautifulSoup(
requests.get(url_ftds, headers={"User-Agent": get_user_agent()}).text,
"lxml",
)
table = text_soup_ftds.find("table", {"class": "list"})
links = table.findAll("a")
link_idx = 0
while len(ftds_data) < num:
if link_idx > len(links):
break
link = links[link_idx]
url = "https://www.sec.gov" + link["href"]
all_ftds = pd.read_csv(
url,
compression="zip",
sep="|",
engine="python",
skipfooter=2,
usecols=[0, 2, 3, 5],
dtype={"QUANTITY (FAILS)": "int"},
encoding="iso8859",
)
tmp_ftds = all_ftds[all_ftds["SYMBOL"] == ticker]
del tmp_ftds["PRICE"]
del tmp_ftds["SYMBOL"]
# merge the data from this archive
ftds_data = pd.concat([ftds_data, tmp_ftds], ignore_index=True)
link_idx += 1
# clip away extra rows
ftds_data = ftds_data.sort_values("SETTLEMENT DATE")[-num:]
ftds_data["SETTLEMENT DATE"] = ftds_data["SETTLEMENT DATE"].apply(
lambda x: datetime.strptime(str(x), "%Y%m%d")
)
# Filter by start and end dates for FTD
else:
base_url = "https://www.sec.gov/files/data/fails-deliver-data/cnsfails"
ftd_dates = []
for y in range(start.year, end.year + 1):
if y < end.year:
for a_month in range(start.month, 13):
formatted_month = f"{a_month:02d}"
if a_month == start.month and y == start.year:
if start.day < 16:
ftd_dates.append(str(y) + formatted_month + "a")
ftd_dates.append(str(y) + formatted_month + "b")
else:
ftd_dates.append(str(y) + formatted_month + "a")
ftd_dates.append(str(y) + formatted_month + "b")
else:
for a_month in range(1, end.month):
formatted_month = f"{a_month:02d}"
if a_month == end.month - 1:
ftd_dates.append(str(y) + formatted_month + "a")
if end.day > 15:
ftd_dates.append(str(y) + formatted_month + "b")
else:
ftd_dates.append(str(y) + formatted_month + "a")
ftd_dates.append(str(y) + formatted_month + "b")
ftd_urls = [base_url + ftd_date + ".zip" for ftd_date in ftd_dates]
for ftd_link in ftd_urls:
all_ftds = pd.read_csv(
ftd_link,
compression="zip",
sep="|",
engine="python",
skipfooter=2,
usecols=[0, 2, 3, 5],
dtype={"QUANTITY (FAILS)": "int"},
encoding="iso8859",
)
tmp_ftds = all_ftds[all_ftds["SYMBOL"] == ticker]
del tmp_ftds["PRICE"]
del tmp_ftds["SYMBOL"]
# merge the data from this archive
ftds_data = pd.concat([ftds_data, tmp_ftds], ignore_index=True)
ftds_data["SETTLEMENT DATE"] = ftds_data["SETTLEMENT DATE"].apply(
lambda x: datetime.strptime(str(x), "%Y%m%d")
)
ftds_data = ftds_data[ftds_data["SETTLEMENT DATE"] > start]
ftds_data = ftds_data[ftds_data["SETTLEMENT DATE"] < end]
return ftds_data
|
the-stack_0_24480 | #!/usr/bin/env python
# coding: utf-8
import numpy as np
import torch
import torch.nn as nn
from skrobot import coordinates
from torch.nn import Module
def two_vectors_angle(v1, v2):
cos = torch.dot(v1, v2) / (torch.norm(v1) * torch.norm(v2))
return torch.acos(cos)
def quaternion2matrix(q):
q0 = q[0]
q1 = q[1]
q2 = q[2]
q3 = q[3]
m = torch.zeros((3, 3)).to('cuda')
m[0, 0] = q0 * q0 + q1 * q1 - q2 * q2 - q3 * q3
m[0, 1] = 2 * (q1 * q2 - q0 * q3)
m[0, 2] = 2 * (q1 * q3 + q0 * q2)
m[1, 0] = 2 * (q1 * q2 + q0 * q3)
m[1, 1] = q0 * q0 - q1 * q1 + q2 * q2 - q3 * q3
m[1, 2] = 2 * (q2 * q3 - q0 * q1)
m[2, 0] = 2 * (q1 * q3 - q0 * q2)
m[2, 1] = 2 * (q2 * q3 + q0 * q1)
m[2, 2] = q0 * q0 - q1 * q1 - q2 * q2 + q3 * q3
return m
class HPNETLoss(Module):
def __init__(self, use_coords):
super(HPNETLoss, self).__init__()
self.Ry = torch.tensor(
[[-1, 0, 0], [0, 1, 0], [0, 0, -1]],
dtype=torch.float32).to('cuda')
self.vx = torch.tensor([1., 0, 0], dtype=torch.float32).to('cuda')
self.use_coords = use_coords
def forward(self, confidence, confidence_gt,
weight, depth, rotation, annotated_rois):
sigma = 1.0 # huber
confidence_diff = confidence[:, 0, ...] - confidence_gt[:, 0, ...]
confidence_loss = torch.sum(
weight * torch.where(
torch.abs(confidence_diff) <= sigma,
0.5 * (confidence_diff ** 2),
sigma * torch.abs(confidence_diff) - 0.5 * (sigma ** 2)
)) / (256 ** 2)
# confidence_loss = torch.sum((weight * confidence_diff) ** 2)
depth_loss, rotation_loss = torch.tensor(
0.).to('cuda'), torch.tensor(0.).to('cuda')
for i, ar in enumerate(annotated_rois):
if ar[2]:
# print(depth[i])
depth_diff = depth[i][0] - ar[1][0]
# depth_diff = depth[i][0] - torch.tensor(0.7)
# depth_diff = depth[i]- ar[1][0]
# print(depth_diff)
sigma = 0.1 # huber
# import ipdb
# ipdb.set_trace()
depth_loss += 10 * torch.where(
torch.abs(depth_diff) <= sigma,
0.5 * (depth_diff ** 2),
sigma * torch.abs(depth_diff) - 0.5 * (sigma ** 2))
# depth_loss = depth_loss + torch.abs(depth_diff)
# print('depth ', depth[i], depth_loss)
# 1 dof
if self.use_coords:
q = depth_and_rotation[i, 1:]
q = q / torch.norm(q)
m_pred = quaternion2matrix(q)
v_pred = torch.matmul(m_pred, self.vx)
else:
# v_pred = depth_and_rotation[i, 1:4]
v_pred = rotation[i]
v_pred = v_pred / torch.norm(v_pred)
if torch.any(v_pred == torch.tensor([np.inf] * 3).to('cuda')) \
or torch.any(
v_pred == torch.tensor([np.nan] * 3).to('cuda')):
continue
m_gt = quaternion2matrix(ar[1][1:])
v_gt = torch.matmul(m_gt, self.vx)
rotation_loss += 0.1 * torch.min(
two_vectors_angle(v_pred, v_gt),
two_vectors_angle(v_pred, -v_gt))
# 3 dof
# m_gt = quaternion2matrix(ar[1][1:])
# q = depth_and_rotation[i, 1:]
# q = q / torch.norm(q)
# m_pred = quaternion2matrix(q)
# rotation_loss += torch.min(
# torch.norm(m_gt - m_pred),
# torch.norm(m_gt - m_pred.mm(self.Ry)))
if len(annotated_rois) > 0:
depth_loss /= len(annotated_rois)
rotation_loss /= len(annotated_rois)
# depth_loss *= 10000
# rotation_loss *= 10000
print('confidence_diff', float(torch.sum(confidence_diff)))
print('confidence_loss', float(confidence_loss))
print('depth_loss', float(depth_loss))
print('rotation_loss', float(rotation_loss))
return confidence_loss, depth_loss, rotation_loss
|
the-stack_0_24481 | """
This module mainly implements special orthogonal polynomials.
See also functions.combinatorial.numbers which contains some
combinatorial polynomials.
"""
from __future__ import print_function, division
from sympy.core import Rational
from sympy.core.function import Function, ArgumentIndexError
from sympy.core.singleton import S
from sympy.core.symbol import Dummy
from sympy.functions.combinatorial.factorials import binomial, factorial, RisingFactorial
from sympy.functions.elementary.complexes import re
from sympy.functions.elementary.exponential import exp
from sympy.functions.elementary.integers import floor
from sympy.functions.elementary.miscellaneous import sqrt
from sympy.functions.elementary.trigonometric import cos, sec
from sympy.functions.special.gamma_functions import gamma
from sympy.functions.special.hyper import hyper
from sympy.polys.orthopolys import (
jacobi_poly,
gegenbauer_poly,
chebyshevt_poly,
chebyshevu_poly,
laguerre_poly,
hermite_poly,
legendre_poly
)
_x = Dummy('x')
class OrthogonalPolynomial(Function):
"""Base class for orthogonal polynomials.
"""
@classmethod
def _eval_at_order(cls, n, x):
if n.is_integer and n >= 0:
return cls._ortho_poly(int(n), _x).subs(_x, x)
def _eval_conjugate(self):
return self.func(self.args[0], self.args[1].conjugate())
#----------------------------------------------------------------------------
# Jacobi polynomials
#
class jacobi(OrthogonalPolynomial):
r"""
Jacobi polynomial :math:`P_n^{\left(\alpha, \beta\right)}(x)`
jacobi(n, alpha, beta, x) gives the nth Jacobi polynomial
in x, :math:`P_n^{\left(\alpha, \beta\right)}(x)`.
The Jacobi polynomials are orthogonal on :math:`[-1, 1]` with respect
to the weight :math:`\left(1-x\right)^\alpha \left(1+x\right)^\beta`.
Examples
========
>>> from sympy import jacobi, S, conjugate, diff
>>> from sympy.abc import n,a,b,x
>>> jacobi(0, a, b, x)
1
>>> jacobi(1, a, b, x)
a/2 - b/2 + x*(a/2 + b/2 + 1)
>>> jacobi(2, a, b, x) # doctest:+SKIP
(a**2/8 - a*b/4 - a/8 + b**2/8 - b/8 + x**2*(a**2/8 + a*b/4 + 7*a/8 +
b**2/8 + 7*b/8 + 3/2) + x*(a**2/4 + 3*a/4 - b**2/4 - 3*b/4) - 1/2)
>>> jacobi(n, a, b, x)
jacobi(n, a, b, x)
>>> jacobi(n, a, a, x)
RisingFactorial(a + 1, n)*gegenbauer(n,
a + 1/2, x)/RisingFactorial(2*a + 1, n)
>>> jacobi(n, 0, 0, x)
legendre(n, x)
>>> jacobi(n, S(1)/2, S(1)/2, x)
RisingFactorial(3/2, n)*chebyshevu(n, x)/factorial(n + 1)
>>> jacobi(n, -S(1)/2, -S(1)/2, x)
RisingFactorial(1/2, n)*chebyshevt(n, x)/factorial(n)
>>> jacobi(n, a, b, -x)
(-1)**n*jacobi(n, b, a, x)
>>> jacobi(n, a, b, 0)
2**(-n)*gamma(a + n + 1)*hyper((-b - n, -n), (a + 1,), -1)/(factorial(n)*gamma(a + 1))
>>> jacobi(n, a, b, 1)
RisingFactorial(a + 1, n)/factorial(n)
>>> conjugate(jacobi(n, a, b, x))
jacobi(n, conjugate(a), conjugate(b), conjugate(x))
>>> diff(jacobi(n,a,b,x), x)
(a/2 + b/2 + n/2 + 1/2)*jacobi(n - 1, a + 1, b + 1, x)
See Also
========
gegenbauer,
chebyshevt_root, chebyshevu, chebyshevu_root,
legendre, assoc_legendre,
hermite,
laguerre, assoc_laguerre,
sympy.polys.orthopolys.jacobi_poly,
sympy.polys.orthopolys.gegenbauer_poly
sympy.polys.orthopolys.chebyshevt_poly
sympy.polys.orthopolys.chebyshevu_poly
sympy.polys.orthopolys.hermite_poly
sympy.polys.orthopolys.legendre_poly
sympy.polys.orthopolys.laguerre_poly
References
==========
.. [1] https://en.wikipedia.org/wiki/Jacobi_polynomials
.. [2] http://mathworld.wolfram.com/JacobiPolynomial.html
.. [3] http://functions.wolfram.com/Polynomials/JacobiP/
"""
@classmethod
def eval(cls, n, a, b, x):
# Simplify to other polynomials
# P^{a, a}_n(x)
if a == b:
if a == -S.Half:
return RisingFactorial(S.Half, n) / factorial(n) * chebyshevt(n, x)
elif a == S.Zero:
return legendre(n, x)
elif a == S.Half:
return RisingFactorial(3*S.Half, n) / factorial(n + 1) * chebyshevu(n, x)
else:
return RisingFactorial(a + 1, n) / RisingFactorial(2*a + 1, n) * gegenbauer(n, a + S.Half, x)
elif b == -a:
# P^{a, -a}_n(x)
return gamma(n + a + 1) / gamma(n + 1) * (1 + x)**(a/2) / (1 - x)**(a/2) * assoc_legendre(n, -a, x)
if not n.is_Number:
# Symbolic result P^{a,b}_n(x)
# P^{a,b}_n(-x) ---> (-1)**n * P^{b,a}_n(-x)
if x.could_extract_minus_sign():
return S.NegativeOne**n * jacobi(n, b, a, -x)
# We can evaluate for some special values of x
if x == S.Zero:
return (2**(-n) * gamma(a + n + 1) / (gamma(a + 1) * factorial(n)) *
hyper([-b - n, -n], [a + 1], -1))
if x == S.One:
return RisingFactorial(a + 1, n) / factorial(n)
elif x == S.Infinity:
if n.is_positive:
# Make sure a+b+2*n \notin Z
if (a + b + 2*n).is_integer:
raise ValueError("Error. a + b + 2*n should not be an integer.")
return RisingFactorial(a + b + n + 1, n) * S.Infinity
else:
# n is a given fixed integer, evaluate into polynomial
return jacobi_poly(n, a, b, x)
def fdiff(self, argindex=4):
from sympy import Sum
if argindex == 1:
# Diff wrt n
raise ArgumentIndexError(self, argindex)
elif argindex == 2:
# Diff wrt a
n, a, b, x = self.args
k = Dummy("k")
f1 = 1 / (a + b + n + k + 1)
f2 = ((a + b + 2*k + 1) * RisingFactorial(b + k + 1, n - k) /
((n - k) * RisingFactorial(a + b + k + 1, n - k)))
return Sum(f1 * (jacobi(n, a, b, x) + f2*jacobi(k, a, b, x)), (k, 0, n - 1))
elif argindex == 3:
# Diff wrt b
n, a, b, x = self.args
k = Dummy("k")
f1 = 1 / (a + b + n + k + 1)
f2 = (-1)**(n - k) * ((a + b + 2*k + 1) * RisingFactorial(a + k + 1, n - k) /
((n - k) * RisingFactorial(a + b + k + 1, n - k)))
return Sum(f1 * (jacobi(n, a, b, x) + f2*jacobi(k, a, b, x)), (k, 0, n - 1))
elif argindex == 4:
# Diff wrt x
n, a, b, x = self.args
return S.Half * (a + b + n + 1) * jacobi(n - 1, a + 1, b + 1, x)
else:
raise ArgumentIndexError(self, argindex)
def _eval_rewrite_as_polynomial(self, n, a, b, x, **kwargs):
from sympy import Sum
# Make sure n \in N
if n.is_negative or n.is_integer is False:
raise ValueError("Error: n should be a non-negative integer.")
k = Dummy("k")
kern = (RisingFactorial(-n, k) * RisingFactorial(a + b + n + 1, k) * RisingFactorial(a + k + 1, n - k) /
factorial(k) * ((1 - x)/2)**k)
return 1 / factorial(n) * Sum(kern, (k, 0, n))
def _eval_conjugate(self):
n, a, b, x = self.args
return self.func(n, a.conjugate(), b.conjugate(), x.conjugate())
def jacobi_normalized(n, a, b, x):
r"""
Jacobi polynomial :math:`P_n^{\left(\alpha, \beta\right)}(x)`
jacobi_normalized(n, alpha, beta, x) gives the nth Jacobi polynomial
in x, :math:`P_n^{\left(\alpha, \beta\right)}(x)`.
The Jacobi polynomials are orthogonal on :math:`[-1, 1]` with respect
to the weight :math:`\left(1-x\right)^\alpha \left(1+x\right)^\beta`.
This functions returns the polynomials normilzed:
.. math::
\int_{-1}^{1}
P_m^{\left(\alpha, \beta\right)}(x)
P_n^{\left(\alpha, \beta\right)}(x)
(1-x)^{\alpha} (1+x)^{\beta} \mathrm{d}x
= \delta_{m,n}
Examples
========
>>> from sympy import jacobi_normalized
>>> from sympy.abc import n,a,b,x
>>> jacobi_normalized(n, a, b, x)
jacobi(n, a, b, x)/sqrt(2**(a + b + 1)*gamma(a + n + 1)*gamma(b + n + 1)/((a + b + 2*n + 1)*factorial(n)*gamma(a + b + n + 1)))
See Also
========
gegenbauer,
chebyshevt_root, chebyshevu, chebyshevu_root,
legendre, assoc_legendre,
hermite,
laguerre, assoc_laguerre,
sympy.polys.orthopolys.jacobi_poly,
sympy.polys.orthopolys.gegenbauer_poly
sympy.polys.orthopolys.chebyshevt_poly
sympy.polys.orthopolys.chebyshevu_poly
sympy.polys.orthopolys.hermite_poly
sympy.polys.orthopolys.legendre_poly
sympy.polys.orthopolys.laguerre_poly
References
==========
.. [1] https://en.wikipedia.org/wiki/Jacobi_polynomials
.. [2] http://mathworld.wolfram.com/JacobiPolynomial.html
.. [3] http://functions.wolfram.com/Polynomials/JacobiP/
"""
nfactor = (S(2)**(a + b + 1) * (gamma(n + a + 1) * gamma(n + b + 1))
/ (2*n + a + b + 1) / (factorial(n) * gamma(n + a + b + 1)))
return jacobi(n, a, b, x) / sqrt(nfactor)
#----------------------------------------------------------------------------
# Gegenbauer polynomials
#
class gegenbauer(OrthogonalPolynomial):
r"""
Gegenbauer polynomial :math:`C_n^{\left(\alpha\right)}(x)`
gegenbauer(n, alpha, x) gives the nth Gegenbauer polynomial
in x, :math:`C_n^{\left(\alpha\right)}(x)`.
The Gegenbauer polynomials are orthogonal on :math:`[-1, 1]` with
respect to the weight :math:`\left(1-x^2\right)^{\alpha-\frac{1}{2}}`.
Examples
========
>>> from sympy import gegenbauer, conjugate, diff
>>> from sympy.abc import n,a,x
>>> gegenbauer(0, a, x)
1
>>> gegenbauer(1, a, x)
2*a*x
>>> gegenbauer(2, a, x)
-a + x**2*(2*a**2 + 2*a)
>>> gegenbauer(3, a, x)
x**3*(4*a**3/3 + 4*a**2 + 8*a/3) + x*(-2*a**2 - 2*a)
>>> gegenbauer(n, a, x)
gegenbauer(n, a, x)
>>> gegenbauer(n, a, -x)
(-1)**n*gegenbauer(n, a, x)
>>> gegenbauer(n, a, 0)
2**n*sqrt(pi)*gamma(a + n/2)/(gamma(a)*gamma(1/2 - n/2)*gamma(n + 1))
>>> gegenbauer(n, a, 1)
gamma(2*a + n)/(gamma(2*a)*gamma(n + 1))
>>> conjugate(gegenbauer(n, a, x))
gegenbauer(n, conjugate(a), conjugate(x))
>>> diff(gegenbauer(n, a, x), x)
2*a*gegenbauer(n - 1, a + 1, x)
See Also
========
jacobi,
chebyshevt_root, chebyshevu, chebyshevu_root,
legendre, assoc_legendre,
hermite,
laguerre, assoc_laguerre,
sympy.polys.orthopolys.jacobi_poly
sympy.polys.orthopolys.gegenbauer_poly
sympy.polys.orthopolys.chebyshevt_poly
sympy.polys.orthopolys.chebyshevu_poly
sympy.polys.orthopolys.hermite_poly
sympy.polys.orthopolys.legendre_poly
sympy.polys.orthopolys.laguerre_poly
References
==========
.. [1] https://en.wikipedia.org/wiki/Gegenbauer_polynomials
.. [2] http://mathworld.wolfram.com/GegenbauerPolynomial.html
.. [3] http://functions.wolfram.com/Polynomials/GegenbauerC3/
"""
@classmethod
def eval(cls, n, a, x):
# For negative n the polynomials vanish
# See http://functions.wolfram.com/Polynomials/GegenbauerC3/03/01/03/0012/
if n.is_negative:
return S.Zero
# Some special values for fixed a
if a == S.Half:
return legendre(n, x)
elif a == S.One:
return chebyshevu(n, x)
elif a == S.NegativeOne:
return S.Zero
if not n.is_Number:
# Handle this before the general sign extraction rule
if x == S.NegativeOne:
if (re(a) > S.Half) == True:
return S.ComplexInfinity
else:
return (cos(S.Pi*(a+n)) * sec(S.Pi*a) * gamma(2*a+n) /
(gamma(2*a) * gamma(n+1)))
# Symbolic result C^a_n(x)
# C^a_n(-x) ---> (-1)**n * C^a_n(x)
if x.could_extract_minus_sign():
return S.NegativeOne**n * gegenbauer(n, a, -x)
# We can evaluate for some special values of x
if x == S.Zero:
return (2**n * sqrt(S.Pi) * gamma(a + S.Half*n) /
(gamma((1 - n)/2) * gamma(n + 1) * gamma(a)) )
if x == S.One:
return gamma(2*a + n) / (gamma(2*a) * gamma(n + 1))
elif x == S.Infinity:
if n.is_positive:
return RisingFactorial(a, n) * S.Infinity
else:
# n is a given fixed integer, evaluate into polynomial
return gegenbauer_poly(n, a, x)
def fdiff(self, argindex=3):
from sympy import Sum
if argindex == 1:
# Diff wrt n
raise ArgumentIndexError(self, argindex)
elif argindex == 2:
# Diff wrt a
n, a, x = self.args
k = Dummy("k")
factor1 = 2 * (1 + (-1)**(n - k)) * (k + a) / ((k +
n + 2*a) * (n - k))
factor2 = 2*(k + 1) / ((k + 2*a) * (2*k + 2*a + 1)) + \
2 / (k + n + 2*a)
kern = factor1*gegenbauer(k, a, x) + factor2*gegenbauer(n, a, x)
return Sum(kern, (k, 0, n - 1))
elif argindex == 3:
# Diff wrt x
n, a, x = self.args
return 2*a*gegenbauer(n - 1, a + 1, x)
else:
raise ArgumentIndexError(self, argindex)
def _eval_rewrite_as_polynomial(self, n, a, x, **kwargs):
from sympy import Sum
k = Dummy("k")
kern = ((-1)**k * RisingFactorial(a, n - k) * (2*x)**(n - 2*k) /
(factorial(k) * factorial(n - 2*k)))
return Sum(kern, (k, 0, floor(n/2)))
def _eval_conjugate(self):
n, a, x = self.args
return self.func(n, a.conjugate(), x.conjugate())
#----------------------------------------------------------------------------
# Chebyshev polynomials of first and second kind
#
class chebyshevt(OrthogonalPolynomial):
r"""
Chebyshev polynomial of the first kind, :math:`T_n(x)`
chebyshevt(n, x) gives the nth Chebyshev polynomial (of the first
kind) in x, :math:`T_n(x)`.
The Chebyshev polynomials of the first kind are orthogonal on
:math:`[-1, 1]` with respect to the weight :math:`\frac{1}{\sqrt{1-x^2}}`.
Examples
========
>>> from sympy import chebyshevt, chebyshevu, diff
>>> from sympy.abc import n,x
>>> chebyshevt(0, x)
1
>>> chebyshevt(1, x)
x
>>> chebyshevt(2, x)
2*x**2 - 1
>>> chebyshevt(n, x)
chebyshevt(n, x)
>>> chebyshevt(n, -x)
(-1)**n*chebyshevt(n, x)
>>> chebyshevt(-n, x)
chebyshevt(n, x)
>>> chebyshevt(n, 0)
cos(pi*n/2)
>>> chebyshevt(n, -1)
(-1)**n
>>> diff(chebyshevt(n, x), x)
n*chebyshevu(n - 1, x)
See Also
========
jacobi, gegenbauer,
chebyshevt_root, chebyshevu, chebyshevu_root,
legendre, assoc_legendre,
hermite,
laguerre, assoc_laguerre,
sympy.polys.orthopolys.jacobi_poly
sympy.polys.orthopolys.gegenbauer_poly
sympy.polys.orthopolys.chebyshevt_poly
sympy.polys.orthopolys.chebyshevu_poly
sympy.polys.orthopolys.hermite_poly
sympy.polys.orthopolys.legendre_poly
sympy.polys.orthopolys.laguerre_poly
References
==========
.. [1] https://en.wikipedia.org/wiki/Chebyshev_polynomial
.. [2] http://mathworld.wolfram.com/ChebyshevPolynomialoftheFirstKind.html
.. [3] http://mathworld.wolfram.com/ChebyshevPolynomialoftheSecondKind.html
.. [4] http://functions.wolfram.com/Polynomials/ChebyshevT/
.. [5] http://functions.wolfram.com/Polynomials/ChebyshevU/
"""
_ortho_poly = staticmethod(chebyshevt_poly)
@classmethod
def eval(cls, n, x):
if not n.is_Number:
# Symbolic result T_n(x)
# T_n(-x) ---> (-1)**n * T_n(x)
if x.could_extract_minus_sign():
return S.NegativeOne**n * chebyshevt(n, -x)
# T_{-n}(x) ---> T_n(x)
if n.could_extract_minus_sign():
return chebyshevt(-n, x)
# We can evaluate for some special values of x
if x == S.Zero:
return cos(S.Half * S.Pi * n)
if x == S.One:
return S.One
elif x == S.Infinity:
return S.Infinity
else:
# n is a given fixed integer, evaluate into polynomial
if n.is_negative:
# T_{-n}(x) == T_n(x)
return cls._eval_at_order(-n, x)
else:
return cls._eval_at_order(n, x)
def fdiff(self, argindex=2):
if argindex == 1:
# Diff wrt n
raise ArgumentIndexError(self, argindex)
elif argindex == 2:
# Diff wrt x
n, x = self.args
return n * chebyshevu(n - 1, x)
else:
raise ArgumentIndexError(self, argindex)
def _eval_rewrite_as_polynomial(self, n, x, **kwargs):
from sympy import Sum
k = Dummy("k")
kern = binomial(n, 2*k) * (x**2 - 1)**k * x**(n - 2*k)
return Sum(kern, (k, 0, floor(n/2)))
class chebyshevu(OrthogonalPolynomial):
r"""
Chebyshev polynomial of the second kind, :math:`U_n(x)`
chebyshevu(n, x) gives the nth Chebyshev polynomial of the second
kind in x, :math:`U_n(x)`.
The Chebyshev polynomials of the second kind are orthogonal on
:math:`[-1, 1]` with respect to the weight :math:`\sqrt{1-x^2}`.
Examples
========
>>> from sympy import chebyshevt, chebyshevu, diff
>>> from sympy.abc import n,x
>>> chebyshevu(0, x)
1
>>> chebyshevu(1, x)
2*x
>>> chebyshevu(2, x)
4*x**2 - 1
>>> chebyshevu(n, x)
chebyshevu(n, x)
>>> chebyshevu(n, -x)
(-1)**n*chebyshevu(n, x)
>>> chebyshevu(-n, x)
-chebyshevu(n - 2, x)
>>> chebyshevu(n, 0)
cos(pi*n/2)
>>> chebyshevu(n, 1)
n + 1
>>> diff(chebyshevu(n, x), x)
(-x*chebyshevu(n, x) + (n + 1)*chebyshevt(n + 1, x))/(x**2 - 1)
See Also
========
jacobi, gegenbauer,
chebyshevt, chebyshevt_root, chebyshevu_root,
legendre, assoc_legendre,
hermite,
laguerre, assoc_laguerre,
sympy.polys.orthopolys.jacobi_poly
sympy.polys.orthopolys.gegenbauer_poly
sympy.polys.orthopolys.chebyshevt_poly
sympy.polys.orthopolys.chebyshevu_poly
sympy.polys.orthopolys.hermite_poly
sympy.polys.orthopolys.legendre_poly
sympy.polys.orthopolys.laguerre_poly
References
==========
.. [1] https://en.wikipedia.org/wiki/Chebyshev_polynomial
.. [2] http://mathworld.wolfram.com/ChebyshevPolynomialoftheFirstKind.html
.. [3] http://mathworld.wolfram.com/ChebyshevPolynomialoftheSecondKind.html
.. [4] http://functions.wolfram.com/Polynomials/ChebyshevT/
.. [5] http://functions.wolfram.com/Polynomials/ChebyshevU/
"""
_ortho_poly = staticmethod(chebyshevu_poly)
@classmethod
def eval(cls, n, x):
if not n.is_Number:
# Symbolic result U_n(x)
# U_n(-x) ---> (-1)**n * U_n(x)
if x.could_extract_minus_sign():
return S.NegativeOne**n * chebyshevu(n, -x)
# U_{-n}(x) ---> -U_{n-2}(x)
if n.could_extract_minus_sign():
if n == S.NegativeOne:
# n can not be -1 here
return S.Zero
elif not (-n - 2).could_extract_minus_sign():
return -chebyshevu(-n - 2, x)
# We can evaluate for some special values of x
if x == S.Zero:
return cos(S.Half * S.Pi * n)
if x == S.One:
return S.One + n
elif x == S.Infinity:
return S.Infinity
else:
# n is a given fixed integer, evaluate into polynomial
if n.is_negative:
# U_{-n}(x) ---> -U_{n-2}(x)
if n == S.NegativeOne:
return S.Zero
else:
return -cls._eval_at_order(-n - 2, x)
else:
return cls._eval_at_order(n, x)
def fdiff(self, argindex=2):
if argindex == 1:
# Diff wrt n
raise ArgumentIndexError(self, argindex)
elif argindex == 2:
# Diff wrt x
n, x = self.args
return ((n + 1) * chebyshevt(n + 1, x) - x * chebyshevu(n, x)) / (x**2 - 1)
else:
raise ArgumentIndexError(self, argindex)
def _eval_rewrite_as_polynomial(self, n, x, **kwargs):
from sympy import Sum
k = Dummy("k")
kern = S.NegativeOne**k * factorial(
n - k) * (2*x)**(n - 2*k) / (factorial(k) * factorial(n - 2*k))
return Sum(kern, (k, 0, floor(n/2)))
class chebyshevt_root(Function):
r"""
chebyshev_root(n, k) returns the kth root (indexed from zero) of
the nth Chebyshev polynomial of the first kind; that is, if
0 <= k < n, chebyshevt(n, chebyshevt_root(n, k)) == 0.
Examples
========
>>> from sympy import chebyshevt, chebyshevt_root
>>> chebyshevt_root(3, 2)
-sqrt(3)/2
>>> chebyshevt(3, chebyshevt_root(3, 2))
0
See Also
========
jacobi, gegenbauer,
chebyshevt, chebyshevu, chebyshevu_root,
legendre, assoc_legendre,
hermite,
laguerre, assoc_laguerre,
sympy.polys.orthopolys.jacobi_poly
sympy.polys.orthopolys.gegenbauer_poly
sympy.polys.orthopolys.chebyshevt_poly
sympy.polys.orthopolys.chebyshevu_poly
sympy.polys.orthopolys.hermite_poly
sympy.polys.orthopolys.legendre_poly
sympy.polys.orthopolys.laguerre_poly
"""
@classmethod
def eval(cls, n, k):
if not ((0 <= k) and (k < n)):
raise ValueError("must have 0 <= k < n, "
"got k = %s and n = %s" % (k, n))
return cos(S.Pi*(2*k + 1)/(2*n))
class chebyshevu_root(Function):
r"""
chebyshevu_root(n, k) returns the kth root (indexed from zero) of the
nth Chebyshev polynomial of the second kind; that is, if 0 <= k < n,
chebyshevu(n, chebyshevu_root(n, k)) == 0.
Examples
========
>>> from sympy import chebyshevu, chebyshevu_root
>>> chebyshevu_root(3, 2)
-sqrt(2)/2
>>> chebyshevu(3, chebyshevu_root(3, 2))
0
See Also
========
chebyshevt, chebyshevt_root, chebyshevu,
legendre, assoc_legendre,
hermite,
laguerre, assoc_laguerre,
sympy.polys.orthopolys.jacobi_poly
sympy.polys.orthopolys.gegenbauer_poly
sympy.polys.orthopolys.chebyshevt_poly
sympy.polys.orthopolys.chebyshevu_poly
sympy.polys.orthopolys.hermite_poly
sympy.polys.orthopolys.legendre_poly
sympy.polys.orthopolys.laguerre_poly
"""
@classmethod
def eval(cls, n, k):
if not ((0 <= k) and (k < n)):
raise ValueError("must have 0 <= k < n, "
"got k = %s and n = %s" % (k, n))
return cos(S.Pi*(k + 1)/(n + 1))
#----------------------------------------------------------------------------
# Legendre polynomials and Associated Legendre polynomials
#
class legendre(OrthogonalPolynomial):
r"""
legendre(n, x) gives the nth Legendre polynomial of x, :math:`P_n(x)`
The Legendre polynomials are orthogonal on [-1, 1] with respect to
the constant weight 1. They satisfy :math:`P_n(1) = 1` for all n; further,
:math:`P_n` is odd for odd n and even for even n.
Examples
========
>>> from sympy import legendre, diff
>>> from sympy.abc import x, n
>>> legendre(0, x)
1
>>> legendre(1, x)
x
>>> legendre(2, x)
3*x**2/2 - 1/2
>>> legendre(n, x)
legendre(n, x)
>>> diff(legendre(n,x), x)
n*(x*legendre(n, x) - legendre(n - 1, x))/(x**2 - 1)
See Also
========
jacobi, gegenbauer,
chebyshevt, chebyshevt_root, chebyshevu, chebyshevu_root,
assoc_legendre,
hermite,
laguerre, assoc_laguerre,
sympy.polys.orthopolys.jacobi_poly
sympy.polys.orthopolys.gegenbauer_poly
sympy.polys.orthopolys.chebyshevt_poly
sympy.polys.orthopolys.chebyshevu_poly
sympy.polys.orthopolys.hermite_poly
sympy.polys.orthopolys.legendre_poly
sympy.polys.orthopolys.laguerre_poly
References
==========
.. [1] https://en.wikipedia.org/wiki/Legendre_polynomial
.. [2] http://mathworld.wolfram.com/LegendrePolynomial.html
.. [3] http://functions.wolfram.com/Polynomials/LegendreP/
.. [4] http://functions.wolfram.com/Polynomials/LegendreP2/
"""
_ortho_poly = staticmethod(legendre_poly)
@classmethod
def eval(cls, n, x):
if not n.is_Number:
# Symbolic result L_n(x)
# L_n(-x) ---> (-1)**n * L_n(x)
if x.could_extract_minus_sign():
return S.NegativeOne**n * legendre(n, -x)
# L_{-n}(x) ---> L_{n-1}(x)
if n.could_extract_minus_sign() and not(-n - 1).could_extract_minus_sign():
return legendre(-n - S.One, x)
# We can evaluate for some special values of x
if x == S.Zero:
return sqrt(S.Pi)/(gamma(S.Half - n/2)*gamma(S.One + n/2))
elif x == S.One:
return S.One
elif x == S.Infinity:
return S.Infinity
else:
# n is a given fixed integer, evaluate into polynomial;
# L_{-n}(x) ---> L_{n-1}(x)
if n.is_negative:
n = -n - S.One
return cls._eval_at_order(n, x)
def fdiff(self, argindex=2):
if argindex == 1:
# Diff wrt n
raise ArgumentIndexError(self, argindex)
elif argindex == 2:
# Diff wrt x
# Find better formula, this is unsuitable for x = +/-1
# http://www.autodiff.org/ad16/Oral/Buecker_Legendre.pdf says
# at x = 1:
# n*(n + 1)/2 , m = 0
# oo , m = 1
# -(n-1)*n*(n+1)*(n+2)/4 , m = 2
# 0 , m = 3, 4, ..., n
#
# at x = -1
# (-1)**(n+1)*n*(n + 1)/2 , m = 0
# (-1)**n*oo , m = 1
# (-1)**n*(n-1)*n*(n+1)*(n+2)/4 , m = 2
# 0 , m = 3, 4, ..., n
n, x = self.args
return n/(x**2 - 1)*(x*legendre(n, x) - legendre(n - 1, x))
else:
raise ArgumentIndexError(self, argindex)
def _eval_rewrite_as_polynomial(self, n, x, **kwargs):
from sympy import Sum
k = Dummy("k")
kern = (-1)**k*binomial(n, k)**2*((1 + x)/2)**(n - k)*((1 - x)/2)**k
return Sum(kern, (k, 0, n))
class assoc_legendre(Function):
r"""
assoc_legendre(n,m, x) gives :math:`P_n^m(x)`, where n and m are
the degree and order or an expression which is related to the nth
order Legendre polynomial, :math:`P_n(x)` in the following manner:
.. math::
P_n^m(x) = (-1)^m (1 - x^2)^{\frac{m}{2}}
\frac{\mathrm{d}^m P_n(x)}{\mathrm{d} x^m}
Associated Legendre polynomial are orthogonal on [-1, 1] with:
- weight = 1 for the same m, and different n.
- weight = 1/(1-x**2) for the same n, and different m.
Examples
========
>>> from sympy import assoc_legendre
>>> from sympy.abc import x, m, n
>>> assoc_legendre(0,0, x)
1
>>> assoc_legendre(1,0, x)
x
>>> assoc_legendre(1,1, x)
-sqrt(1 - x**2)
>>> assoc_legendre(n,m,x)
assoc_legendre(n, m, x)
See Also
========
jacobi, gegenbauer,
chebyshevt, chebyshevt_root, chebyshevu, chebyshevu_root,
legendre,
hermite,
laguerre, assoc_laguerre,
sympy.polys.orthopolys.jacobi_poly
sympy.polys.orthopolys.gegenbauer_poly
sympy.polys.orthopolys.chebyshevt_poly
sympy.polys.orthopolys.chebyshevu_poly
sympy.polys.orthopolys.hermite_poly
sympy.polys.orthopolys.legendre_poly
sympy.polys.orthopolys.laguerre_poly
References
==========
.. [1] https://en.wikipedia.org/wiki/Associated_Legendre_polynomials
.. [2] http://mathworld.wolfram.com/LegendrePolynomial.html
.. [3] http://functions.wolfram.com/Polynomials/LegendreP/
.. [4] http://functions.wolfram.com/Polynomials/LegendreP2/
"""
@classmethod
def _eval_at_order(cls, n, m):
P = legendre_poly(n, _x, polys=True).diff((_x, m))
return (-1)**m * (1 - _x**2)**Rational(m, 2) * P.as_expr()
@classmethod
def eval(cls, n, m, x):
if m.could_extract_minus_sign():
# P^{-m}_n ---> F * P^m_n
return S.NegativeOne**(-m) * (factorial(m + n)/factorial(n - m)) * assoc_legendre(n, -m, x)
if m == 0:
# P^0_n ---> L_n
return legendre(n, x)
if x == 0:
return 2**m*sqrt(S.Pi) / (gamma((1 - m - n)/2)*gamma(1 - (m - n)/2))
if n.is_Number and m.is_Number and n.is_integer and m.is_integer:
if n.is_negative:
raise ValueError("%s : 1st index must be nonnegative integer (got %r)" % (cls, n))
if abs(m) > n:
raise ValueError("%s : abs('2nd index') must be <= '1st index' (got %r, %r)" % (cls, n, m))
return cls._eval_at_order(int(n), abs(int(m))).subs(_x, x)
def fdiff(self, argindex=3):
if argindex == 1:
# Diff wrt n
raise ArgumentIndexError(self, argindex)
elif argindex == 2:
# Diff wrt m
raise ArgumentIndexError(self, argindex)
elif argindex == 3:
# Diff wrt x
# Find better formula, this is unsuitable for x = 1
n, m, x = self.args
return 1/(x**2 - 1)*(x*n*assoc_legendre(n, m, x) - (m + n)*assoc_legendre(n - 1, m, x))
else:
raise ArgumentIndexError(self, argindex)
def _eval_rewrite_as_polynomial(self, n, m, x, **kwargs):
from sympy import Sum
k = Dummy("k")
kern = factorial(2*n - 2*k)/(2**n*factorial(n - k)*factorial(
k)*factorial(n - 2*k - m))*(-1)**k*x**(n - m - 2*k)
return (1 - x**2)**(m/2) * Sum(kern, (k, 0, floor((n - m)*S.Half)))
def _eval_conjugate(self):
n, m, x = self.args
return self.func(n, m.conjugate(), x.conjugate())
#----------------------------------------------------------------------------
# Hermite polynomials
#
class hermite(OrthogonalPolynomial):
r"""
hermite(n, x) gives the nth Hermite polynomial in x, :math:`H_n(x)`
The Hermite polynomials are orthogonal on :math:`(-\infty, \infty)`
with respect to the weight :math:`\exp\left(-x^2\right)`.
Examples
========
>>> from sympy import hermite, diff
>>> from sympy.abc import x, n
>>> hermite(0, x)
1
>>> hermite(1, x)
2*x
>>> hermite(2, x)
4*x**2 - 2
>>> hermite(n, x)
hermite(n, x)
>>> diff(hermite(n,x), x)
2*n*hermite(n - 1, x)
>>> hermite(n, -x)
(-1)**n*hermite(n, x)
See Also
========
jacobi, gegenbauer,
chebyshevt, chebyshevt_root, chebyshevu, chebyshevu_root,
legendre, assoc_legendre,
laguerre, assoc_laguerre,
sympy.polys.orthopolys.jacobi_poly
sympy.polys.orthopolys.gegenbauer_poly
sympy.polys.orthopolys.chebyshevt_poly
sympy.polys.orthopolys.chebyshevu_poly
sympy.polys.orthopolys.hermite_poly
sympy.polys.orthopolys.legendre_poly
sympy.polys.orthopolys.laguerre_poly
References
==========
.. [1] https://en.wikipedia.org/wiki/Hermite_polynomial
.. [2] http://mathworld.wolfram.com/HermitePolynomial.html
.. [3] http://functions.wolfram.com/Polynomials/HermiteH/
"""
_ortho_poly = staticmethod(hermite_poly)
@classmethod
def eval(cls, n, x):
if not n.is_Number:
# Symbolic result H_n(x)
# H_n(-x) ---> (-1)**n * H_n(x)
if x.could_extract_minus_sign():
return S.NegativeOne**n * hermite(n, -x)
# We can evaluate for some special values of x
if x == S.Zero:
return 2**n * sqrt(S.Pi) / gamma((S.One - n)/2)
elif x == S.Infinity:
return S.Infinity
else:
# n is a given fixed integer, evaluate into polynomial
if n.is_negative:
raise ValueError(
"The index n must be nonnegative integer (got %r)" % n)
else:
return cls._eval_at_order(n, x)
def fdiff(self, argindex=2):
if argindex == 1:
# Diff wrt n
raise ArgumentIndexError(self, argindex)
elif argindex == 2:
# Diff wrt x
n, x = self.args
return 2*n*hermite(n - 1, x)
else:
raise ArgumentIndexError(self, argindex)
def _eval_rewrite_as_polynomial(self, n, x, **kwargs):
from sympy import Sum
k = Dummy("k")
kern = (-1)**k / (factorial(k)*factorial(n - 2*k)) * (2*x)**(n - 2*k)
return factorial(n)*Sum(kern, (k, 0, floor(n/2)))
#----------------------------------------------------------------------------
# Laguerre polynomials
#
class laguerre(OrthogonalPolynomial):
r"""
Returns the nth Laguerre polynomial in x, :math:`L_n(x)`.
Parameters
==========
n : int
Degree of Laguerre polynomial. Must be ``n >= 0``.
Examples
========
>>> from sympy import laguerre, diff
>>> from sympy.abc import x, n
>>> laguerre(0, x)
1
>>> laguerre(1, x)
1 - x
>>> laguerre(2, x)
x**2/2 - 2*x + 1
>>> laguerre(3, x)
-x**3/6 + 3*x**2/2 - 3*x + 1
>>> laguerre(n, x)
laguerre(n, x)
>>> diff(laguerre(n, x), x)
-assoc_laguerre(n - 1, 1, x)
See Also
========
jacobi, gegenbauer,
chebyshevt, chebyshevt_root, chebyshevu, chebyshevu_root,
legendre, assoc_legendre,
hermite,
assoc_laguerre,
sympy.polys.orthopolys.jacobi_poly
sympy.polys.orthopolys.gegenbauer_poly
sympy.polys.orthopolys.chebyshevt_poly
sympy.polys.orthopolys.chebyshevu_poly
sympy.polys.orthopolys.hermite_poly
sympy.polys.orthopolys.legendre_poly
sympy.polys.orthopolys.laguerre_poly
References
==========
.. [1] https://en.wikipedia.org/wiki/Laguerre_polynomial
.. [2] http://mathworld.wolfram.com/LaguerrePolynomial.html
.. [3] http://functions.wolfram.com/Polynomials/LaguerreL/
.. [4] http://functions.wolfram.com/Polynomials/LaguerreL3/
"""
_ortho_poly = staticmethod(laguerre_poly)
@classmethod
def eval(cls, n, x):
if n.is_integer is False:
raise ValueError("Error: n should be an integer.")
if not n.is_Number:
# Symbolic result L_n(x)
# L_{n}(-x) ---> exp(-x) * L_{-n-1}(x)
# L_{-n}(x) ---> exp(x) * L_{n-1}(-x)
if n.could_extract_minus_sign() and not(-n - 1).could_extract_minus_sign():
return exp(x)*laguerre(-n - 1, -x)
# We can evaluate for some special values of x
if x == S.Zero:
return S.One
elif x == S.NegativeInfinity:
return S.Infinity
elif x == S.Infinity:
return S.NegativeOne**n * S.Infinity
else:
if n.is_negative:
return exp(x)*laguerre(-n - 1, -x)
else:
return cls._eval_at_order(n, x)
def fdiff(self, argindex=2):
if argindex == 1:
# Diff wrt n
raise ArgumentIndexError(self, argindex)
elif argindex == 2:
# Diff wrt x
n, x = self.args
return -assoc_laguerre(n - 1, 1, x)
else:
raise ArgumentIndexError(self, argindex)
def _eval_rewrite_as_polynomial(self, n, x, **kwargs):
from sympy import Sum
# Make sure n \in N_0
if n.is_negative:
return exp(x) * self._eval_rewrite_as_polynomial(-n - 1, -x, **kwargs)
if n.is_integer is False:
raise ValueError("Error: n should be an integer.")
k = Dummy("k")
kern = RisingFactorial(-n, k) / factorial(k)**2 * x**k
return Sum(kern, (k, 0, n))
class assoc_laguerre(OrthogonalPolynomial):
r"""
Returns the nth generalized Laguerre polynomial in x, :math:`L_n(x)`.
Parameters
==========
n : int
Degree of Laguerre polynomial. Must be ``n >= 0``.
alpha : Expr
Arbitrary expression. For ``alpha=0`` regular Laguerre
polynomials will be generated.
Examples
========
>>> from sympy import laguerre, assoc_laguerre, diff
>>> from sympy.abc import x, n, a
>>> assoc_laguerre(0, a, x)
1
>>> assoc_laguerre(1, a, x)
a - x + 1
>>> assoc_laguerre(2, a, x)
a**2/2 + 3*a/2 + x**2/2 + x*(-a - 2) + 1
>>> assoc_laguerre(3, a, x)
a**3/6 + a**2 + 11*a/6 - x**3/6 + x**2*(a/2 + 3/2) +
x*(-a**2/2 - 5*a/2 - 3) + 1
>>> assoc_laguerre(n, a, 0)
binomial(a + n, a)
>>> assoc_laguerre(n, a, x)
assoc_laguerre(n, a, x)
>>> assoc_laguerre(n, 0, x)
laguerre(n, x)
>>> diff(assoc_laguerre(n, a, x), x)
-assoc_laguerre(n - 1, a + 1, x)
>>> diff(assoc_laguerre(n, a, x), a)
Sum(assoc_laguerre(_k, a, x)/(-a + n), (_k, 0, n - 1))
See Also
========
jacobi, gegenbauer,
chebyshevt, chebyshevt_root, chebyshevu, chebyshevu_root,
legendre, assoc_legendre,
hermite,
laguerre,
sympy.polys.orthopolys.jacobi_poly
sympy.polys.orthopolys.gegenbauer_poly
sympy.polys.orthopolys.chebyshevt_poly
sympy.polys.orthopolys.chebyshevu_poly
sympy.polys.orthopolys.hermite_poly
sympy.polys.orthopolys.legendre_poly
sympy.polys.orthopolys.laguerre_poly
References
==========
.. [1] https://en.wikipedia.org/wiki/Laguerre_polynomial#Generalized_Laguerre_polynomials
.. [2] http://mathworld.wolfram.com/AssociatedLaguerrePolynomial.html
.. [3] http://functions.wolfram.com/Polynomials/LaguerreL/
.. [4] http://functions.wolfram.com/Polynomials/LaguerreL3/
"""
@classmethod
def eval(cls, n, alpha, x):
# L_{n}^{0}(x) ---> L_{n}(x)
if alpha == S.Zero:
return laguerre(n, x)
if not n.is_Number:
# We can evaluate for some special values of x
if x == S.Zero:
return binomial(n + alpha, alpha)
elif x == S.Infinity and n > S.Zero:
return S.NegativeOne**n * S.Infinity
elif x == S.NegativeInfinity and n > S.Zero:
return S.Infinity
else:
# n is a given fixed integer, evaluate into polynomial
if n.is_negative:
raise ValueError(
"The index n must be nonnegative integer (got %r)" % n)
else:
return laguerre_poly(n, x, alpha)
def fdiff(self, argindex=3):
from sympy import Sum
if argindex == 1:
# Diff wrt n
raise ArgumentIndexError(self, argindex)
elif argindex == 2:
# Diff wrt alpha
n, alpha, x = self.args
k = Dummy("k")
return Sum(assoc_laguerre(k, alpha, x) / (n - alpha), (k, 0, n - 1))
elif argindex == 3:
# Diff wrt x
n, alpha, x = self.args
return -assoc_laguerre(n - 1, alpha + 1, x)
else:
raise ArgumentIndexError(self, argindex)
def _eval_rewrite_as_polynomial(self, n, alpha, x, **kwargs):
from sympy import Sum
# Make sure n \in N_0
if n.is_negative or n.is_integer is False:
raise ValueError("Error: n should be a non-negative integer.")
k = Dummy("k")
kern = RisingFactorial(
-n, k) / (gamma(k + alpha + 1) * factorial(k)) * x**k
return gamma(n + alpha + 1) / factorial(n) * Sum(kern, (k, 0, n))
def _eval_conjugate(self):
n, alpha, x = self.args
return self.func(n, alpha.conjugate(), x.conjugate())
|
the-stack_0_24482 | #!/usr/bin/env python
#
# (c) Copyright 2015-2016 Hewlett Packard Enterprise Development LP
# (c) Copyright 2017 SUSE LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import glob
from ardana_packager.ansible import AnsibleModule
import os
import os.path
import yaml
def main():
module = AnsibleModule(
argument_spec=dict(
in_files=dict(required=True),
key=dict(default="symlinks"),
config_path=dict(required=True),
target_path=dict(required=True),
source_path=dict(required=True),
),
supports_check_mode=False
)
params = module.params
in_files = params['in_files']
if ',' in params['key']:
keys = [k.strip() for k in params['key'].split(',')]
else:
keys = [params['key']]
config_dir = params['config_path']
target_dir = params['target_path']
source_dir = params['source_path']
changed = False
for fn in glob.glob(in_files):
with open(fn) as f:
temp = yaml.safe_load(f)
for key in keys:
for src, dest in temp.get(key, {}).items():
d = os.path.join(config_dir, src)
p_d = os.path.dirname(d)
# The logic here to reproduce the j2 declaration
# "{{ deployer_symlink_target_path |
# joinpath(item.value) |
# relpath(deployer_symlink_source_path |
# joinpath(item.key) |
# dirname)
# }}"
# in python.
s_1 = os.path.join(target_dir, dest)
s = os.path.dirname(os.path.join(source_dir, src))
if not os.path.exists(p_d):
os.makedirs(p_d)
if not os.path.islink(d):
os.symlink(os.path.relpath(s_1, s), d)
changed = True
module.exit_json(in_files=in_files,
key=','.join(keys),
config_path=config_dir,
target_path=target_dir,
source_path=source_dir,
changed=changed)
if __name__ == '__main__':
main()
|
the-stack_0_24486 | # Copyright (c) 2021 NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
import os
from typing import Tuple, Optional, Callable, Dict, Sequence
import torch
from torch.utils.data import Dataset, Sampler, RandomSampler
from dlrm.data.datasets import CriteoBinDataset, SyntheticDataset, SplitCriteoDataset
from dlrm.data.samplers import RandomDistributedSampler
from dlrm.data.utils import collate_array, write_dataset_to_disk, get_categorical_feature_sizes, \
collate_split_tensors
from dlrm.utils.distributed import is_distributed, is_main_process, get_rank
def create_synthetic_datasets(flags, device_mapping: Optional[Dict] = None):
dataset_train = SyntheticDataset(num_entries=flags.synthetic_dataset_num_entries,
batch_size=flags.batch_size,
numerical_features=flags.num_numerical_features,
categorical_feature_sizes=get_categorical_feature_sizes(flags),
device_mapping=device_mapping)
dataset_test = SyntheticDataset(num_entries=flags.synthetic_dataset_num_entries,
batch_size=flags.test_batch_size,
numerical_features=flags.num_numerical_features,
categorical_feature_sizes=get_categorical_feature_sizes(flags),
device_mapping=device_mapping)
return dataset_train, dataset_test
def create_real_datasets(
flags,
path,
dataset_class: type = SplitCriteoDataset,
train_dataset_path="train",
test_dataset_path="test",
**kwargs
):
train_dataset = os.path.join(path, train_dataset_path)
test_dataset = os.path.join(path, test_dataset_path)
categorical_sizes = get_categorical_feature_sizes(flags)
dataset_train = dataset_class(
data_path=train_dataset,
batch_size=flags.batch_size,
numerical_features=flags.num_numerical_features,
categorical_features=range(len(categorical_sizes)),
categorical_feature_sizes=categorical_sizes,
**kwargs
)
dataset_test = dataset_class(
data_path=test_dataset,
batch_size=flags.test_batch_size,
numerical_features=flags.num_numerical_features,
categorical_features=range(len(categorical_sizes)),
categorical_feature_sizes=categorical_sizes,
**kwargs
)
return dataset_train, dataset_test
class DatasetFactory:
def __init__(self, flags, device_mapping: Optional[Dict] = None):
self._flags = flags
self._device_mapping = device_mapping
def create_collate_fn(self) -> Optional[Callable]:
if self._device_mapping is not None:
# selection of categorical features assigned to this device
device_cat_features = torch.tensor(
self._device_mapping["embedding"][get_rank()], device=self._flags.base_device,
dtype=torch.long)
else:
device_cat_features = None
orig_stream = torch.cuda.current_stream() if self._flags.base_device == 'cuda' else None
return functools.partial(
collate_array,
device=self._flags.base_device,
orig_stream=orig_stream,
num_numerical_features=self._flags.num_numerical_features,
selected_categorical_features=device_cat_features
)
def create_sampler(self, dataset: Dataset) -> Optional[Sampler]:
return RandomDistributedSampler(dataset) if is_distributed() else RandomSampler(dataset)
def create_datasets(self) -> Tuple[Dataset, Dataset]:
raise NotImplementedError()
def create_data_loader(
self,
dataset,
collate_fn: Optional[Callable] = None,
sampler: Optional[Sampler] = None
):
return torch.utils.data.DataLoader(
dataset, collate_fn=collate_fn, sampler=sampler, batch_size=None,
num_workers=0, pin_memory=False
)
class SyntheticDiskDatasetFactory(DatasetFactory):
def create_sampler(self, dataset: Dataset) -> Optional[Sampler]:
return None
def create_datasets(self) -> Tuple[Dataset, Dataset]:
synthetic_train, synthetic_test = create_synthetic_datasets(self._flags)
if is_distributed():
self._synchronized_write(synthetic_train, synthetic_test)
else:
self._write(synthetic_train, synthetic_test)
return create_real_datasets(
self._flags, self._flags.synthetic_dataset_dir,
SplitCriteoDataset, "train", "test",
prefetch_depth=10
)
def _synchronized_write(self, train_dataset: Dataset, test_dataset: Dataset):
if is_main_process():
self._write(train_dataset, test_dataset)
torch.distributed.barrier()
def _write(self, train_dataset: Dataset, test_dataset: Dataset):
write_dataset_to_disk(self._flags.synthetic_dataset_dir, train_dataset, test_dataset,
self._flags.synthetic_dataset_table_sizes)
class SyntheticGpuDatasetFactory(DatasetFactory):
def create_collate_fn(self) -> Optional[Callable]:
return None
def create_sampler(self, dataset) -> Optional[Sampler]:
return None
def create_datasets(self) -> Tuple[Dataset, Dataset]:
return create_synthetic_datasets(self._flags, self._device_mapping)
class BinaryDatasetFactory(DatasetFactory):
def create_datasets(self) -> Tuple[Dataset, Dataset]:
return create_real_datasets(
self._flags,
self._flags.dataset,
dataset_class=CriteoBinDataset,
train_dataset_path="train_data.bin",
test_dataset_path="test_data.bin"
)
class SplitBinaryDatasetFactory(DatasetFactory):
def __init__(self, flags, numerical_features: bool,
categorical_features: Sequence[int]):
super().__init__(flags)
self._numerical_features = numerical_features
self._categorical_features = categorical_features
def create_collate_fn(self):
orig_stream = torch.cuda.current_stream() if self._flags.base_device == 'cuda' else None
return functools.partial(
collate_split_tensors,
device=self._flags.base_device,
orig_stream=orig_stream,
numerical_type=torch.float16 if self._flags.amp else torch.float32
)
def create_datasets(self) -> Tuple[Dataset, Dataset]:
train_dataset_path = os.path.join(self._flags.dataset, "train")
test_dataset_path = os.path.join(self._flags.dataset, "test")
categorical_sizes = get_categorical_feature_sizes(self._flags)
# prefetching is currently unsupported if using the batch-wise shuffle
prefetch_depth = 0 if self._flags.shuffle_batch_order else 10
dataset_train = SplitCriteoDataset(
data_path=train_dataset_path,
batch_size=self._flags.batch_size,
numerical_features=self._numerical_features,
categorical_features=self._categorical_features,
categorical_feature_sizes=categorical_sizes,
prefetch_depth=prefetch_depth
)
dataset_test = SplitCriteoDataset(
data_path=test_dataset_path,
batch_size=self._flags.test_batch_size,
numerical_features=self._numerical_features,
categorical_features=self._categorical_features,
categorical_feature_sizes=categorical_sizes,
prefetch_depth=prefetch_depth
)
return dataset_train, dataset_test
def create_dataset_factory(flags, device_mapping: Optional[dict] = None) -> DatasetFactory:
"""
By default each dataset can be used in single GPU or distributed setting - please keep that in mind when adding
new datasets. Distributed case requires selection of categorical features provided in `device_mapping`
(see `DatasetFactory#create_collate_fn`).
:param flags:
:param device_mapping: dict, information about model bottom mlp and embeddings devices assignment
:return:
"""
dataset_type = flags.dataset_type
if dataset_type == "binary":
return BinaryDatasetFactory(flags, device_mapping)
if dataset_type == "split":
if is_distributed() or device_mapping:
assert device_mapping is not None, "Distributed dataset requires information about model device mapping."
rank = get_rank()
return SplitBinaryDatasetFactory(
flags=flags,
numerical_features=device_mapping["bottom_mlp"] == rank,
categorical_features=device_mapping["embedding"][rank]
)
return SplitBinaryDatasetFactory(
flags=flags,
numerical_features=True,
categorical_features=range(len(get_categorical_feature_sizes(flags)))
)
if dataset_type == "synthetic_gpu":
return SyntheticGpuDatasetFactory(flags, device_mapping)
if dataset_type == "synthetic_disk":
return SyntheticDiskDatasetFactory(flags, device_mapping)
raise NotImplementedError(f"unknown dataset type: {dataset_type}")
|
the-stack_0_24487 | import os
import sys
from polygfinder import findAllPolysInStrain
from Bio import SeqIO
from natsort import natsorted
import re
import logging
import settings
def main() :
db = crawlGenomes('c.jejuni')
for strain in db :
print(strain['name'], len(strain['tracts']))
def crawlGenomes(path, filter, minLengths, plugins) :
db =[]
fn = lambda x : addGenomeToDatabase(db, x, filter, minLengths)
pathCrawl(path, ['gb', 'gbk', 'gbf', 'embl'], fn)
if plugins :
for plugin in plugins :
if hasattr(plugin, 'annotateDb') :
plugin.annotateDb(db)
db = reorderDatabase(db)
return db
def reorderDatabase(db) :
precedence = settings.firstStrains #['NCTC 11168','81-176', '81-176 - (pTet)', '81-176 - (pVir)', 'PT14']
speciesPrecedence = settings.firstSpecies #['C. jejuni', 'C. coli']
def sortFunc(strain) :
strainName = strain['name']
species = strain['species']
if species in speciesPrecedence :
key = str(speciesPrecedence.index(species))
else :
key = str(len(speciesPrecedence))
if strainName in precedence :
key = key + '/'+str(precedence.index(strainName))
else :
key = key + '/'+str(len(precedence))
return key+species+strainName
return natsorted(db, key=sortFunc)
def pathCrawl(target, types, function) :
print('Crawling', end='')
sys.stdout.flush()
for root, dirs, files in os.walk(target) :
for file in files :
ext = os.path.splitext(file)[1][1:]
if ext not in types :
continue
path = os.path.join(root, file)
function(path)
print('.', end='')
sys.stdout.flush()
print('')
def createRawGenomeName(genomeRecord, path) :
qualifiers = genomeRecord.features[0].qualifiers
if 'strain' in qualifiers:
name = qualifiers['strain'][0]
elif 'isolate' in qualifiers :
name = qualifiers['isolate'][0]
else :
name = os.path.basename(path)
if 'plasmid' in qualifiers :
name = name + " - (" + qualifiers['plasmid'][0] + ")"
return name
def createGenomeName(genomeRecord, path, db) :
name = createRawGenomeName(genomeRecord, path)
duplicates = 0
for strain in db :
if strain['rawName'] == name :
duplicates += 1
if not duplicates :
return name
name = name + ' (' + str(duplicates+1) + ')'
return name
class GenomeNamer :
def __init__(self,baseName) :
self.baseName = baseName
self.count = 0
def nextName(self) :
name = self.baseName + ' [' + str(self.count) + ']'
self.count += 1
return name
def addGenomeToDatabase(db, path, filter, minLengths) :
contigs = []
# The namer comes into play for files without clear annotation
ext = os.path.splitext(path)[-1]
format = 'embl' if ext.lower() == '.embl' else 'gb'
for genomeRecord in SeqIO.parse(path, format) :
# Skip plasmids
if 'plasmid' in genomeRecord.features[0].qualifiers :
return
if not len(genomeRecord.description) :
genomeRecord.description = genomeRecord.id
sortAndMapFeatures(genomeRecord)
contigs.append({'record' : genomeRecord, 'inGenome' : path} )
strain = {
'name' : createGenomeName(genomeRecord, path, db),
'rawName' : createRawGenomeName(genomeRecord, path),
'path' : path,
'contigs' : contigs }
strain['species'] = getSpeciesName(strain)
makeFastaVersion(strain)
namer = GenomeNamer(strain['name'])
polys = findAllPolysInStrain(strain, filter, minLengths, namer)
for n, found in enumerate(polys) :
strain['contigs'][n]['tracts'] = found
db.append(strain)
logging.info('File {0} added to database.'.format(path))
def makeFastaVersion(strain) :
# Create .fasta versions of each Genbank file so we can then make blast databases from them and use bossref
gbpath = strain['path']
fapath = os.path.splitext(gbpath)[0] + ".fasta"
if os.path.exists(fapath) :
# check if the fasta file is up to date
gbdate = os.stat(gbpath).st_mtime
fadate = os.stat(fapath).st_mtime
if fadate > gbdate :
return
SeqIO.write([contig['record'] for contig in strain['contigs']], fapath, "fasta")
def getSpeciesName(strain) :
features = strain['contigs'][0]['record'].features
if not features or len(features) < 1 :
return 'Unknown sp.'
qualifiers = features[0].qualifiers
organism = None
if 'organism' in qualifiers :
organism = qualifiers['organism'][0]
elif 'source' in qualifiers :
organism = qualifiers['source'][0]
species = 'Unknown sp.'
if organism :
match = re.match('(\S*) (\S*)', organism)
if match :
species = match.group(1)[0]+". "+match.group(2)
return species
def sortAndMapFeatures(record) :
# Sort the features to ensure they are ordered
# But keep the source record first
record.features = sorted(record.features, key=lambda x : (x.type != 'source', x.type, x.location.start))
record.lookup = {}
lastType = record.features[0].type
first = 0
for i, feature in enumerate(record.features) :
if feature.type != lastType :
record.lookup[lastType] = (first, i)
first = i
lastType = feature.type
record.lookup[lastType] = (first, len(record.features))
record.featureStarts = [f.location.start for f in record.features]
if __name__ == "__main__":
main()
|
the-stack_0_24490 | import argparse
import enum
import os.path
import math
import skyfield.almanac
import skyfield.api
import sys
def adjacent_filter(it, pred):
it = iter(it)
try:
last = next(it)
for current in it:
if pred(last, current):
yield last
last = current
except StopIteration:
pass
def lookahead(it, scope=1):
it = iter(it)
view = []
try:
for i in range(scope):
view.append(next(it))
for item in it:
view.append(item)
yield tuple(view)
del view[0]
except StopIteration:
pass
# Julian date = days since 1 Jan -4713 noon
# Time in Julian date is UT (=UT1), which is successor to GMT, so it's centred on Greenwhich.
script_dir = sys.path[0]
timescale = skyfield.api.load.timescale(builtin=True)
sf_load = skyfield.api.Loader(os.path.join(script_dir, "skyfield_data", "loaded"))
planets = sf_load("de406.bsp") # goes from -3000 to +3000
class Season(enum.IntEnum):
Spring = 0
Summer = 1
Autumn = 2
Winter = 3
def find_epoch(options):
print("Started")
seasons = skyfield.almanac.seasons(planets)
def isWinter(time):
return seasons(time) == Season.Winter
def isSafe(time):
fract = math.modf(time.tt + 0.5)[0]
return epsilon < fract < 1-epsilon
isWinter.rough_period = 365.0
epsilon = 1/24/2 # Half-hour precision is good enough
midpoint = 2_100_000 # Roughly +1000
period = 500 * 365
t, s = skyfield.almanac.find_discrete(
# timescale.tt_jd(1_000_000) # Roughly -2000
# , timescale.tt_jd(2_500_000) # Roughly +2000
timescale.tt_jd(midpoint - period/2)
, timescale.tt_jd(midpoint + period/2)
, isWinter
, epsilon = epsilon
)
solstices = (p[0] for p in zip(t, s) if p[1])
candidates = (p for p in lookahead(solstices) if
isSafe(p[0]) and isSafe(p[1])
and math.floor(p[1].tt + 0.5) - math.floor(p[0].tt + 0.5) > 365
)
# last = next(solstices)
# candidates = []
# for s in solstices:
# if math.floor(s.tt + 0.5) - math.floor(last.tt + 0.5) > 365:
# candidates.append((last, s))
# last = s
# candidates = filter(lambda c: isSafe(c[0]) and isSafe(c[1]), candidates)
print("\n".join(f"{p.tt} ({p.utc_jpl()}) - ({s.utc_jpl()})" for p, s in candidates))
def play(options):
for i in lookahead([1, 2, 3, 4, 5, 6]):
print(i)
def main(args):
parser = argparse.ArgumentParser(
prog=os.path.basename(args[0]),
description="Diary processing tool",
)
parser.set_defaults(process=lambda _: parser.parse_args(['-h']))
subcommands = parser.add_subparsers()
command_find_epoch = subcommands.add_parser(
"find-epoch"
)
command_find_epoch.set_defaults(process=find_epoch)
command_play = subcommands.add_parser(
"play"
)
command_play.set_defaults(process=play)
options = parser.parse_args(args[1:])
options.process(options)
if __name__ == "__main__":
main(sys.argv)
|
the-stack_0_24491 | from django.conf.urls import url
from django.contrib.auth import views as auth_views
from . import views
app_name = 'accounts'
urlpatterns = [
url(r"login/$", auth_views.LoginView.as_view(template_name="accounts/login.html"),name='login'),
url(r"logout/$", auth_views.LogoutView.as_view(), name="logout"),
url(r"signup/$", views.SignUp.as_view(), name="signup"),
url(r"forget/$", views.Forget.as_view(), name="forget"),
]
|
the-stack_0_24493 | # Copyright 2018 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
docstring ${module}
"""
# Futures
from __future__ import absolute_import, print_function
# Built-in modules
import os
import shutil
from tempfile import SpooledTemporaryFile, mkdtemp
from unittest import TestCase, main, skipIf
# Third party modules
import six
from six.moves import range
# Own modules
import microprobe
if six.PY2:
import subprocess32 as subprocess # @UnresolvedImport @UnusedImport
else:
import subprocess # @Reimport
# Constants
BASEPATH = os.path.join(os.path.dirname(microprobe.__file__), "..", "..")
MP_TESTING_ARCH = os.environ.get("MP_TESTING_ARCH", None)
# Classes
class seq(TestCase): # pylint: disable=invalid-name
"""
seq Test Class.
"""
_multiprocess_can_split_ = True
name = "mp_seq"
description = "mp_seq tool tests"
cmd = [os.path.join(BASEPATH, "targets", "generic", "tools", "mp_seq.py")]
target = os.path.join(BASEPATH, "targets")
trials = 3
@classmethod
def setUpClass(cls):
pass
@classmethod
def tearDownClass(cls):
pass
def setUp(self):
self.tdirname = mkdtemp(prefix="microprobe_%s_" % self.name,
suffix=".seq")
def tearDown(self):
shutil.rmtree(self.tdirname, True)
@skipIf(MP_TESTING_ARCH not in [None, "RISCV"], "Long testing")
def test_001(self):
"""
mp_seq_test001 generic
"""
self._wrapper("none", "-h")
def _wrapper(self, target, extra=None):
"""
Common execution wrapper
"""
test_cmd = self.cmd[:]
if extra is not None:
extra = extra.strip()
test_cmd.extend(extra.split(' '))
test_cmd.extend(["-T", target])
test_cmd.extend(["-P", self.target])
test_cmd.extend(["-D", self.tdirname])
print(" ".join(test_cmd))
for trial in range(0, self.trials):
print("Trial %s" % trial)
tfile = SpooledTemporaryFile()
error_code = subprocess.call(
test_cmd,
stdout=tfile,
stderr=subprocess.STDOUT
)
if error_code == 0:
break
if error_code != 0:
tfile.seek(0)
print(tfile.read())
self.assertEqual(error_code, 0)
TEST_CLASSES = [seq]
if __name__ == '__main__':
main()
|
the-stack_0_24498 | import streamlit as st
import SessionState
import pandas as pd
# Dicionário
pesso = {
'Nome': ['Guilherme', 'Fulana', 'Ciclano', 'Deltrana'],
'Idade': [22, 14, 50, 30],
'Sexo': ['M', 'F', 'M', 'F'],
'Salario': [2000, 5000, 10000, 7300]
}
# Conversão para Dataframe
df = pd.DataFrame(data=pesso)
# Armazenagem por sessão
session_state = SessionState.get(pessoas=df)
# Função main
def main():
st.title('Dataframe Filter')
st.header('Programa que filtra valores de um Dataframe Pandas')
# Parametros para restrição
parametros = st.selectbox(
"Restringir: ",
('Nenhum', 'Idade', 'Sexo', 'Salario'))
# conversão definitiva para Dataframe Pandas
pessoas = pd.DataFrame(session_state.pessoas)
# Restrição para parâmetros
if parametros == 'Idade':
radio_idade = st.radio('Escolha o intervalo', ['Maior que', 'Menor que', 'Intervalo'])
if radio_idade == 'Maior que':
input_idade = st.number_input('Digite o intervalo entre a idade', step=1)
pessoas = pessoas[pessoas["Idade"] > input_idade]
elif radio_idade == 'Menor que':
input_idade = st.number_input('Digite o intervalo entre a idade', step=1, value=100)
pessoas = pessoas[pessoas["Idade"] < input_idade]
else:
col1, col2 = st.beta_columns(2)
menor = col1.number_input('Menor que', step=1)
maior = col2.number_input('Maior que', step=1, value=100)
pessoas = pessoas[(pessoas['Idade'] < maior) & (pessoas['Idade'] > menor)]
elif parametros == 'Sexo':
sexo_select = st.selectbox(
"Escolha o sexo desejado: ",
('M', 'F'))
if sexo_select == 'M':
pessoas = pessoas[pessoas["Sexo"] == 'M']
else:
pessoas = pessoas[pessoas["Sexo"] == 'F']
elif parametros == 'Salario':
radio_salario = st.radio('Escolha o intervalo', ['Maior que', 'Menor que', 'Intervalo'])
if radio_salario == 'Maior que':
input_salario = st.number_input('Digite o intervalo entre o salário', step=1)
pessoas = pessoas[pessoas["Salario"] > input_salario]
elif radio_salario == 'Menor que':
input_salario = st.number_input('Digite o intervalo entre o salário', step=1, value=100)
pessoas = pessoas[pessoas["Salario"] < input_salario]
else:
col1, col2 = st.beta_columns(2)
menor = col1.number_input('Menor que', step=1)
maior = col2.number_input('Maior que', step=1, value=12000)
pessoas = pessoas[(pessoas['Salario'] < maior) & (pessoas['Salario'] > menor)]
# Mostrando a tabela
st.table(pessoas)
if __name__ == '__main__':
main()
|
the-stack_0_24500 | """Derivation of variable `rsnt`."""
from iris import Constraint
from ._baseclass import DerivedVariableBase
class DerivedVariable(DerivedVariableBase):
"""Derivation of variable `rsnt`."""
# Required variables
required = [
{
'short_name': 'rsdt'
},
{
'short_name': 'rsut'
},
]
@staticmethod
def calculate(cubes):
"""Compute toa net downward shortwave radiation."""
rsdt_cube = cubes.extract_strict(
Constraint(name='toa_incoming_shortwave_flux'))
rsut_cube = cubes.extract_strict(
Constraint(name='toa_outgoing_shortwave_flux'))
rsnt_cube = rsdt_cube - rsut_cube
return rsnt_cube
|
the-stack_0_24501 | import sys
import time
import pygame
import colors
pygame.init()
# Height and width of the game screen
width, height = 500, 600
screen = pygame.display.set_mode((width, height))
# caption and icon
pygame.display.set_caption("Tic Tac Toe")
icon = pygame.image.load("images/icon.png")
pygame.display.set_icon(icon)
# defining Fonts
very_small_font = pygame.font.SysFont('corbel', 20)
small_font = pygame.font.SysFont('corbel', 35)
medium_font = pygame.font.SysFont('corbel', 50)
# Text with above fonts
text = small_font.render('Start', True, colors.white)
player1 = small_font.render('Player 1', True, colors.white)
player2 = small_font.render('Player 2', True, colors.white)
score = medium_font.render("0:0", True, colors.red)
# Images
tic_tac_toe_board = pygame.image.load("images/board.png") # Grid image
tic_tac_toe_board = pygame.transform.scale(tic_tac_toe_board, (300, 300))
x = pygame.image.load("images/x.png") # X image
x = pygame.transform.scale(x, (60, 60))
o = pygame.image.load("images/o.png") # O image
o = pygame.transform.scale(o, (60, 60))
red_line = pygame.image.load("images/red_line.png") # red line image
black_line = pygame.image.load("images/black_line.png") # black line image
# board to record the positions of choice
board = dict()
# initial score
player1_score = 0
player2_score = 0
turn = "player1"
player1_with_x = True
x_o = []
# Numbers for 1 to 9 with the font stored in x_o list
for i in range(9):
x_o.append(very_small_font.render(str(i + 1), True, colors.red))
# Making the score to change and appear
def winner(win):
global board
global score
global player1_score
global player2_score
if win == "player1":
player1_score += 1
else:
player2_score += 1
# Score is changed
score = medium_font.render(f"{player1_score}:{player2_score}", True, colors.red)
board = dict()
# Winner line
def cross_line(black_line_position, red_line_position, black_line, red_line):
if turn == "player1":
if player1_with_x:
screen.blit(black_line, black_line_position)
else:
screen.blit(red_line, red_line_position)
else:
if player1_with_x:
screen.blit(red_line, red_line_position)
else:
screen.blit(black_line, black_line_position)
pygame.display.update()
time.sleep(1)
# Function to check winner
def check_winner():
global board
global red_line
global black_line
# Column Check
for i in range(3):
if board.get(str(i + 1)) == board.get(str(i + 4)) and board.get(str(i + 4)) == board.get(
str(i + 7)) and board.get(str(i + 7)):
winner(board.get(str(i + 1)))
black_line_position = (100 + (i % 3) * 90, -50) # and change = 90
red_line_position = (60 + (i % 3) * 90, 60)
# Transforming and rotating the lines accordingly
temp_red_line = pygame.transform.scale(red_line, (260, 100))
temp_red_line = pygame.transform.rotate(temp_red_line, -70)
temp_black_line = pygame.transform.scale(black_line, (510, 100))
temp_black_line = pygame.transform.rotate(temp_black_line, - 90)
cross_line(black_line_position, red_line_position, temp_black_line, temp_red_line)
return
# Row check
for i in range(0, 7, 3):
if board.get(str(i + 1)) == board.get(str(i + 2)) and board.get(str(i + 2)) == board.get(
str(i + 3)) and board.get(str(i + 3)):
winner(board.get(str(i + 1)))
black_line_position = (-20, 245 - (i // 3) * 100) # and change = 100
red_line_position = (95, 205 - (i // 3) * 100)
# Transforming and rotating the lines accordingly
temp_red_line = pygame.transform.scale(red_line, (260, 100))
temp_red_line = pygame.transform.rotate(temp_red_line, 20)
temp_black_line = pygame.transform.scale(black_line, (510, 100))
cross_line(black_line_position, red_line_position, temp_black_line, temp_red_line)
return
# Diagonal
if board.get('1') == board.get('5') and board.get('5') == board.get('9') and board.get('9'):
winner(board.get('1'))
black_line_position = (-20, -40)
red_line_position = (110, 50)
# Transforming and rotating the lines accordingly
temp_red_line = pygame.transform.scale(red_line, (320, 80))
temp_red_line = pygame.transform.rotate(temp_red_line, 20 + 38)
temp_black_line = pygame.transform.scale(black_line, (610, 100))
temp_black_line = pygame.transform.rotate(temp_black_line, 45)
cross_line(black_line_position, red_line_position, temp_black_line, temp_red_line)
return
# Diagonal
if board.get('3') == board.get('5') and board.get('5') == board.get('7') and board.get('7'):
winner(board.get('3'))
black_line_position = (-10, -60)
red_line_position = (85, 75)
# Transforming and rotating the lines accordingly
temp_red_line = pygame.transform.scale(red_line, (320, 80))
temp_red_line = pygame.transform.rotate(temp_red_line, 20 + 38 - 90)
temp_black_line = pygame.transform.scale(black_line, (610, 100))
temp_black_line = pygame.transform.rotate(temp_black_line, 45 - 90)
cross_line(black_line_position, red_line_position, temp_black_line, temp_red_line)
return
# Its a draw
if len(board) == 9:
board = dict()
# Swapping between players
def swap():
global turn
# print(turn)
if turn == "player1":
turn = "player2"
else:
turn = "player1"
# X, O positions in the grid
def x_o_position(num, co):
if board[num] == "player1":
if player1_with_x:
screen.blit(x, co)
else:
screen.blit(o, co)
else:
if player1_with_x:
screen.blit(o, co)
else:
screen.blit(x, co)
def game_selection():
global board
length = len(board)
while True:
for event in pygame.event.get():
# Quit option
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
# Input press and It will not work for keypad numbers
# if event.type == pygame.KEYDOWN:
# for i in range(9):
# if event.key == ord(str(i + 1)) and not board.get(str(i + 1)):
# board[str(i + 1)] = turn
# print("in swap")
# swap()
if event.type == pygame.KEYDOWN:
if (event.key == ord("1") or event.key == pygame.K_KP1) and not board.get("1"):
board["1"] = turn
swap()
elif (event.key == ord("2") or event.key == pygame.K_KP2) and not board.get("2"):
board["2"] = turn
swap()
elif (event.key == ord("3") or event.key == pygame.K_KP3) and not board.get("3"):
board["3"] = turn
swap()
elif (event.key == ord("4") or event.key == pygame.K_KP4) and not board.get("4"):
board["4"] = turn
swap()
elif (event.key == ord("5") or event.key == pygame.K_KP5) and not board.get("5"):
board["5"] = turn
swap()
elif (event.key == ord("6") or event.key == pygame.K_KP6) and not board.get("6"):
board["6"] = turn
swap()
elif (event.key == ord("7") or event.key == pygame.K_KP7) and not board.get("7"):
board["7"] = turn
swap()
elif (event.key == ord("8") or event.key == pygame.K_KP8) and not board.get("8"):
board["8"] = turn
swap()
elif (event.key == ord("9") or event.key == pygame.K_KP9) and not board.get("9"):
board["9"] = turn
swap()
# Tic Tac Toe Grid
screen.fill((100, 100, 100))
screen.blit(tic_tac_toe_board, (90, 50))
# Players and Score
screen.blit(player1, (80, 420))
screen.blit(player2, (310, 420))
screen.blit(score, (215, 390))
# Making the Numbers for position reference for the players if there is an empty slot
for i in range(9):
if not board.get(str(i + 1)):
screen.blit(x_o[i], (90 + (i % 3) * 105, 320 - (i // 3) * 100))
else:
coordinates = (110 + (i % 3) * 100, 270 - (i // 3) * 100)
x_o_position(str(i + 1), coordinates)
# It shows the turn of the players with corresponding to their X or O
if turn == "player1":
if player1_with_x:
screen.blit(x, (90, 490))
else:
screen.blit(o, (90, 490))
else:
if player1_with_x:
screen.blit(o, (340, 490))
else:
screen.blit(x, (340, 490))
pygame.display.update()
# If any changes in board length then we check for the winner
if len(board) != length:
length = len(board)
check_winner()
print(board)
# Start Button in Home page
# On clicking the button, goes to game() function
def home_page():
clicked = False
while not clicked:
# Mouse position
mouse = pygame.mouse.get_pos()
for event in pygame.event.get():
# Quit option
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
# Check if a mouse is clicked
if event.type == pygame.MOUSEBUTTONDOWN:
# If the mouse is clicked on the button
if 190 <= mouse[0] <= 300 and 270 <= mouse[1] <= 310:
clicked = True
# Check if a button is pressed on keyboard
if event.type == pygame.KEYDOWN:
# If enter is pressed
if event.key == pygame.K_RETURN or event.key == pygame.K_KP_ENTER:
clicked = True
# Background color
screen.fill((60, 25, 60))
# if mouse is hovered on a button it changes to lighter shade
if 190 <= mouse[0] <= 300 and 270 <= mouse[1] <= 310:
pygame.draw.rect(screen, colors.color_light, [190, 270, 110, 50])
else:
pygame.draw.rect(screen, colors.color_dark, [190, 270, 110, 50])
# Start text
screen.blit(text, (210, 280))
pygame.display.update()
game_selection()
# Starting the game
home_page()
|
the-stack_0_24503 | import html
from django.utils.module_loading import import_string
from jinja2 import Markup, contextfunction
from jinja2.ext import Extension
from core.feature_flags import environment_is
from v1.jinja2tags.datetimes import DatetimesExtension
from v1.jinja2tags.fragment_cache import FragmentCacheExtension
from v1.models.images import CFGOVRendition
from v1.templatetags.app_urls import app_page_url, app_url
from v1.templatetags.banners import (
collect_outage_banner, complaint_issue_banner,
complaint_maintenance_banner, omwi_salesforce_outage_banner
)
from v1.templatetags.email_popup import email_popup
from v1.util import ref
from v1.util.util import get_unique_id
def get_model(model_name):
model_class = import_string(model_name)
return model_class
def image_alt_value(image):
"""Given an ImageBasic block or a CFGOVImage rendition as `image`,
return the appropriate alt text.
Return the CFGOVImage rendition's alt field, if present.
Returns the alt field from the block, if it is set.
Otherwise, returns the alt field from the CFGOVImage object, if it is set.
Otherwise, returns an empty string.
"""
# Check to see if the passed value is a CFGOVRendition
if isinstance(image, CFGOVRendition):
return image.alt
# Otherwise, if it is a block
if image and hasattr(image, 'get'):
block_alt = image.get('alt')
upload = image.get('upload')
if block_alt:
return block_alt
elif upload and upload.alt:
return upload.alt
return ''
def is_filter_selected(context, fieldname, value):
"""Check URL query parameters to see if a filter option should be selected
Returns True if fieldname=value is found in the GET data in order to output
the `checked` attribute on a checkbox or radio button in the
_filter_selectable macro (see: filterable-list-controls.html).
"""
request_get = context['request'].GET
query_string_values = [
k for k in
request_get.getlist(fieldname) +
request_get.getlist('filter_' + fieldname)
if k
]
# Dirty hack to check the default option for the `archived` filter
if fieldname == 'archived' and value == 'include':
return True
return value in query_string_values
def render_stream_child(context, stream_child):
# Use the django_jinja to get the template content based on its name
try:
template = context.environment.get_template(
stream_child.block.meta.template)
except Exception:
return stream_child
# Create a new context based on the current one as we can't edit it
# directly
new_context = context.get_all()
# Add the value on the context (value is the keyword chosen by
# wagtail for the blocks context)
try:
new_context['value'] = stream_child.value
except AttributeError:
new_context['value'] = stream_child
# Render the template with the context
html_result = template.render(new_context)
unescaped = html.unescape(html_result)
# Return the rendered template as safe html
return Markup(unescaped)
def unique_id_in_context(context):
"""Return an ID that is unique within the given context
For a given request, return a unique ID each time this method is
called. The goal is to generate IDs to uniquely identify elements
in a template that are consistent between page loads.
If the context has a request object, the generated id will increment:
>>> context = {'request': request}
>>> unique_id_in_context(context) # returns 1
>>> unique_id_in_context(context) # returns 2
>>> unique_id_in_context(context) # returns 3
If the context lacks a request, this function will return a 14-character
unique alphanumeric string.
"""
request = context.get('request')
if request:
attribute_name = '__last_unique_id'
if not hasattr(request, attribute_name):
setattr(request, attribute_name, 0)
id = getattr(request, attribute_name) + 1
setattr(request, attribute_name, id)
return id
else:
return get_unique_id()
def search_gov_affiliate(context):
"""Given a request, return the appropriate affiliate for Search.gov.
Our default affiliate code is "cfpb". We have a separate Spanish-language
index named "cfpb_es". We then have two additional indexes, "cfpb_beta"
and "cfpb_beta_es", for use on beta.consumerfinance.gov.
"""
affiliate = 'cfpb'
if environment_is('beta'):
affiliate += '_beta'
language = context.get('language')
if language == 'es':
affiliate += '_es'
return affiliate
class V1Extension(Extension):
def __init__(self, environment):
super(V1Extension, self).__init__(environment)
self.environment.globals.update({
'category_label': ref.category_label,
'choices_for_page_type': ref.choices_for_page_type,
'email_popup': email_popup,
'collect_outage_banner': collect_outage_banner,
'complaint_issue_banner': complaint_issue_banner,
'complaint_maintenance_banner': complaint_maintenance_banner,
'omwi_salesforce_outage_banner': omwi_salesforce_outage_banner,
'get_model': get_model,
'get_unique_id': get_unique_id,
'image_alt_value': image_alt_value,
'is_blog': ref.is_blog,
'is_event': ref.is_event,
'is_report': ref.is_report,
'is_filter_selected': contextfunction(is_filter_selected),
'render_stream_child': contextfunction(render_stream_child),
'unique_id_in_context': contextfunction(unique_id_in_context),
'app_url': app_url,
'app_page_url': app_page_url,
'search_gov_affiliate': contextfunction(search_gov_affiliate),
})
# Nicer import names
datetimes_extension = DatetimesExtension
fragment_cache_extension = FragmentCacheExtension
v1_extension = V1Extension
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.