repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
TheAlgorithms/Python | machine_learning/similarity_search.py | 1 | 4778 | """
Similarity Search : https://en.wikipedia.org/wiki/Similarity_search
Similarity search is a search algorithm for finding the nearest vector from
vectors, used in natural language processing.
In this algorithm, it calculates distance with euclidean distance and
returns a list containing two data for each vector:
1. the nearest vector
2. distance between the vector and the nearest vector (float)
"""
import math
from typing import List, Union
import numpy as np
def euclidean(input_a: np.ndarray, input_b: np.ndarray) -> float:
"""
Calculates euclidean distance between two data.
:param input_a: ndarray of first vector.
:param input_b: ndarray of second vector.
:return: Euclidean distance of input_a and input_b. By using math.sqrt(),
result will be float.
>>> euclidean(np.array([0]), np.array([1]))
1.0
>>> euclidean(np.array([0, 1]), np.array([1, 1]))
1.0
>>> euclidean(np.array([0, 0, 0]), np.array([0, 0, 1]))
1.0
"""
return math.sqrt(sum(pow(a - b, 2) for a, b in zip(input_a, input_b)))
def similarity_search(
dataset: np.ndarray, value_array: np.ndarray
) -> List[List[Union[List[float], float]]]:
"""
:param dataset: Set containing the vectors. Should be ndarray.
:param value_array: vector/vectors we want to know the nearest vector from dataset.
:return: Result will be a list containing
1. the nearest vector
2. distance from the vector
>>> dataset = np.array([[0], [1], [2]])
>>> value_array = np.array([[0]])
>>> similarity_search(dataset, value_array)
[[[0], 0.0]]
>>> dataset = np.array([[0, 0], [1, 1], [2, 2]])
>>> value_array = np.array([[0, 1]])
>>> similarity_search(dataset, value_array)
[[[0, 0], 1.0]]
>>> dataset = np.array([[0, 0, 0], [1, 1, 1], [2, 2, 2]])
>>> value_array = np.array([[0, 0, 1]])
>>> similarity_search(dataset, value_array)
[[[0, 0, 0], 1.0]]
>>> dataset = np.array([[0, 0, 0], [1, 1, 1], [2, 2, 2]])
>>> value_array = np.array([[0, 0, 0], [0, 0, 1]])
>>> similarity_search(dataset, value_array)
[[[0, 0, 0], 0.0], [[0, 0, 0], 1.0]]
These are the errors that might occur:
1. If dimensions are different.
For example, dataset has 2d array and value_array has 1d array:
>>> dataset = np.array([[1]])
>>> value_array = np.array([1])
>>> similarity_search(dataset, value_array)
Traceback (most recent call last):
...
ValueError: Wrong input data's dimensions... dataset : 2, value_array : 1
2. If data's shapes are different.
For example, dataset has shape of (3, 2) and value_array has (2, 3).
We are expecting same shapes of two arrays, so it is wrong.
>>> dataset = np.array([[0, 0], [1, 1], [2, 2]])
>>> value_array = np.array([[0, 0, 0], [0, 0, 1]])
>>> similarity_search(dataset, value_array)
Traceback (most recent call last):
...
ValueError: Wrong input data's shape... dataset : 2, value_array : 3
3. If data types are different.
When trying to compare, we are expecting same types so they should be same.
If not, it'll come up with errors.
>>> dataset = np.array([[0, 0], [1, 1], [2, 2]], dtype=np.float32)
>>> value_array = np.array([[0, 0], [0, 1]], dtype=np.int32)
>>> similarity_search(dataset, value_array) # doctest: +NORMALIZE_WHITESPACE
Traceback (most recent call last):
...
TypeError: Input data have different datatype...
dataset : float32, value_array : int32
"""
if dataset.ndim != value_array.ndim:
raise ValueError(
f"Wrong input data's dimensions... dataset : {dataset.ndim}, "
f"value_array : {value_array.ndim}"
)
try:
if dataset.shape[1] != value_array.shape[1]:
raise ValueError(
f"Wrong input data's shape... dataset : {dataset.shape[1]}, "
f"value_array : {value_array.shape[1]}"
)
except IndexError:
if dataset.ndim != value_array.ndim:
raise TypeError("Wrong shape")
if dataset.dtype != value_array.dtype:
raise TypeError(
f"Input data have different datatype... dataset : {dataset.dtype}, "
f"value_array : {value_array.dtype}"
)
answer = []
for value in value_array:
dist = euclidean(value, dataset[0])
vector = dataset[0].tolist()
for dataset_value in dataset[1:]:
temp_dist = euclidean(value, dataset_value)
if dist > temp_dist:
dist = temp_dist
vector = dataset_value.tolist()
answer.append([vector, dist])
return answer
if __name__ == "__main__":
import doctest
doctest.testmod()
| mit | -4,778,309,454,849,145,000 | 33.128571 | 87 | 0.593554 | false |
pfalcon/picotui | picotui/widgets.py | 1 | 15228 | from .basewidget import *
from .editorext import *
from .defs import *
__all__ = (
"ACTION_OK",
"ACTION_CANCEL",
"ACTION_NEXT",
"ACTION_PREV",
"EditableWidget",
"Dialog",
"WLabel",
"WFrame",
"WButton",
"WCheckbox",
"WRadioButton",
"WListBox",
"WPopupList",
"WDropDown",
"WTextEntry",
"WMultiEntry",
"WComboBox",
"WCompletionList",
"WAutoComplete",
)
class Dialog(Widget):
finish_on_esc = True
def __init__(self, x, y, w=0, h=0, title=""):
super().__init__()
self.x = x
self.y = y
self.w = w
self.h = h
self.title = ""
if title:
self.title = " %s " % title
self.childs = []
# On both sides
self.border_w = 2
self.border_h = 2
self.focus_w = None
self.focus_idx = -1
def add(self, x, y, widget):
if isinstance(widget, str):
# Convert raw string to WLabel
widget = WLabel(widget)
widget.set_xy(self.x + x, self.y + y)
self.childs.append(widget)
widget.owner = self
def autosize(self, extra_w=0, extra_h=0):
w = 0
h = 0
for wid in self.childs:
w = max(w, wid.x - self.x + wid.w)
h = max(h, wid.y - self.y + wid.h)
self.w = max(self.w, w + self.border_w - 1) + extra_w
self.h = max(self.h, h + self.border_h - 1) + extra_h
def redraw(self):
# Init some state on first redraw
if self.focus_idx == -1:
self.autosize()
self.focus_idx, self.focus_w = self.find_focusable_by_idx(0, 1)
if self.focus_w:
self.focus_w.focus = True
# Redraw widgets with cursor off
self.cursor(False)
self.dialog_box(self.x, self.y, self.w, self.h, self.title)
for w in self.childs:
w.redraw()
# Then give widget in focus a chance to enable cursor
if self.focus_w:
self.focus_w.set_cursor()
def find_focusable_by_idx(self, from_idx, direction):
sz = len(self.childs)
while 0 <= from_idx < sz:
if isinstance(self.childs[from_idx], FocusableWidget):
return from_idx, self.childs[from_idx]
from_idx = (from_idx + direction) % sz
return None, None
def find_focusable_by_xy(self, x, y):
i = 0
for w in self.childs:
if isinstance(w, FocusableWidget) and w.inside(x, y):
return i, w
i += 1
return None, None
def change_focus(self, widget):
if widget is self.focus_w:
return
if self.focus_w:
self.focus_w.focus = False
self.focus_w.redraw()
self.focus_w = widget
widget.focus = True
widget.redraw()
widget.set_cursor()
def move_focus(self, direction):
prev_idx = (self.focus_idx + direction) % len(self.childs)
self.focus_idx, new_w = self.find_focusable_by_idx(prev_idx, direction)
self.change_focus(new_w)
def handle_key(self, key):
if key == KEY_QUIT:
return key
if key == KEY_ESC and self.finish_on_esc:
return ACTION_CANCEL
if key == KEY_TAB:
self.move_focus(1)
elif key == KEY_SHIFT_TAB:
self.move_focus(-1)
elif self.focus_w:
if key == KEY_ENTER:
if self.focus_w.finish_dialog is not False:
return self.focus_w.finish_dialog
res = self.focus_w.handle_key(key)
if res == ACTION_PREV:
self.move_focus(-1)
elif res == ACTION_NEXT:
self.move_focus(1)
else:
return res
def handle_mouse(self, x, y):
# Work in absolute coordinates
if self.inside(x, y):
self.focus_idx, w = self.find_focusable_by_xy(x, y)
# print(w)
if w:
self.change_focus(w)
return w.handle_mouse(x, y)
class WLabel(Widget):
def __init__(self, text, w=0):
self.t = text
self.h = 1
self.w = w
if not w:
self.w = len(text)
def redraw(self):
self.goto(self.x, self.y)
self.wr_fixedw(self.t, self.w)
class WFrame(Widget):
def __init__(self, w, h, title=""):
self.w = w
self.h = h
self.t = title
def redraw(self):
self.draw_box(self.x, self.y, self.w, self.h)
if self.t:
pos = 1
self.goto(self.x + pos, self.y)
self.wr(" %s " % self.t)
class WButton(FocusableWidget):
def __init__(self, w, text):
Widget.__init__(self)
self.t = text
self.h = 1
self.w = w or len(text) + 2
self.disabled = False
self.focus = False
self.finish_dialog = False
def redraw(self):
self.goto(self.x, self.y)
if self.disabled:
self.attr_color(C_WHITE, C_GRAY)
else:
if self.focus:
self.attr_color(C_B_WHITE, C_GREEN)
else:
self.attr_color(C_BLACK, C_GREEN)
self.wr(self.t.center(self.w))
self.attr_reset()
def handle_mouse(self, x, y):
if not self.disabled:
if self.finish_dialog is not False:
return self.finish_dialog
else:
self.signal("click")
def handle_key(self, key):
if key == KEY_UP or key == KEY_LEFT:
return ACTION_PREV
if key == KEY_DOWN or key == KEY_RIGHT:
return ACTION_NEXT
# For dialog buttons (.finish_dialog=True), KEY_ENTER won't
# reach here.
if key == KEY_ENTER:
self.signal("click")
def on_click(self):
pass
class WCheckbox(ChoiceWidget):
def __init__(self, title, choice=False):
super().__init__(choice)
self.t = title
self.h = 1
self.w = 4 + len(title)
self.focus = False
def redraw(self):
self.goto(self.x, self.y)
if self.focus:
self.attr_color(C_B_BLUE, None)
self.wr("[x] " if self.choice else "[ ] ")
self.wr(self.t)
self.attr_reset()
def flip(self):
self.choice = not self.choice
self.redraw()
self.signal("changed")
def handle_mouse(self, x, y):
self.flip()
def handle_key(self, key):
if key == KEY_UP:
return ACTION_PREV
if key == KEY_DOWN:
return ACTION_NEXT
if key == b" ":
self.flip()
class WRadioButton(ItemSelWidget):
def __init__(self, items):
super().__init__(items)
self.h = len(items)
self.w = 4 + self.longest(items)
self.focus = False
def redraw(self):
i = 0
if self.focus:
self.attr_color(C_B_BLUE, None)
for t in self.items:
self.goto(self.x, self.y + i)
self.wr("(*) " if self.choice == i else "( ) ")
self.wr(t)
i += 1
self.attr_reset()
def handle_mouse(self, x, y):
self.choice = y - self.y
self.redraw()
self.signal("changed")
def handle_key(self, key):
if key == KEY_UP:
self.move_sel(-1)
elif key == KEY_DOWN:
self.move_sel(1)
class WListBox(EditorExt, ChoiceWidget):
def __init__(self, w, h, items):
EditorExt.__init__(self)
ChoiceWidget.__init__(self, 0)
self.width = w
self.w = w
self.height = h
self.h = h
self.set_items(items)
self.focus = False
def set_items(self, items):
self.items = items
self.set_lines(items)
def render_line(self, l):
# Default identity implementation is suitable for
# items being list of strings.
return l
def show_line(self, l, i):
hlite = self.cur_line == i
if hlite:
if self.focus:
self.attr_color(C_B_WHITE, C_GREEN)
else:
self.attr_color(C_BLACK, C_GREEN)
if i != -1:
l = self.render_line(l)[:self.width]
self.wr(l)
self.clear_num_pos(self.width - len(l))
if hlite:
self.attr_reset()
def handle_mouse(self, x, y):
res = super().handle_mouse(x, y)
self.choice = self.cur_line
self.redraw()
self.signal("changed")
return res
def handle_key(self, key):
res = super().handle_key(key)
self.choice = self.cur_line
self.redraw()
self.signal("changed")
return res
def handle_edit_key(self, key):
pass
def set_cursor(self):
Widget.set_cursor(self)
def cursor(self, state):
# Force off
super().cursor(False)
class WPopupList(Dialog):
class OneShotList(WListBox):
def handle_key(self, key):
if key == KEY_ENTER:
return ACTION_OK
if key == KEY_ESC:
return ACTION_CANCEL
return super().handle_key(key)
def handle_mouse(self, x, y):
if super().handle_mouse(x, y) == True:
# (Processed) mouse click finishes selection
return ACTION_OK
def __init__(self, x, y, w, h, items, sel_item=0):
super().__init__(x, y, w, h)
self.list = self.OneShotList(w - 2, h - 2, items)
self.list.cur_line = sel_item
self.add(1, 1, self.list)
def handle_mouse(self, x, y):
if not self.inside(x, y):
return ACTION_CANCEL
return super().handle_mouse(x, y)
def get_choice(self):
return self.list.cur_line
def get_selected_value(self):
if not self.list.content:
return None
return self.list.content[self.list.cur_line]
class WDropDown(ChoiceWidget):
def __init__(self, w, items, *, dropdown_h=5):
super().__init__(0)
self.items = items
self.h = 1
self.w = w
self.dropdown_h = dropdown_h
self.focus = False
def redraw(self):
self.goto(self.x, self.y)
if self.focus:
self.attr_color(C_B_WHITE, C_CYAN)
else:
self.attr_color(C_BLACK, C_CYAN)
self.wr_fixedw(self.items[self.choice], self.w - 1)
self.attr_reset()
self.wr(DOWN_ARROW)
def handle_mouse(self, x, y):
popup = WPopupList(self.x, self.y + 1, self.w, self.dropdown_h, self.items, self.choice)
res = popup.loop()
if res == ACTION_OK:
self.choice = popup.get_choice()
self.signal("changed")
self.owner.redraw()
def handle_key(self, key):
self.handle_mouse(0, 0)
class WTextEntry(EditorExt, EditableWidget):
def __init__(self, w, text):
EditorExt.__init__(self, width=w, height=1)
self.t = text
self.h = 1
self.w = w
self.focus = False
self.set(text)
self.col = len(text)
self.adjust_cursor_eol()
self.just_started = True
def get(self):
return self.get_cur_line()
def set(self, text):
self.set_lines([text])
def handle_cursor_keys(self, key):
if super().handle_cursor_keys(key):
if self.just_started:
self.just_started = False
self.redraw()
return True
return False
def handle_edit_key(self, key):
if key == KEY_ENTER:
# Don't treat as editing key
return True
if self.just_started:
if key != KEY_BACKSPACE:
# Overwrite initial string with new content
self.set_lines([""])
self.col = 0
self.just_started = False
return super().handle_edit_key(key)
def handle_mouse(self, x, y):
if self.just_started:
self.just_started = False
self.redraw()
super().handle_mouse(x, y)
def show_line(self, l, i):
if self.just_started:
fg = C_WHITE
else:
fg = C_BLACK
self.attr_color(fg, C_CYAN)
super().show_line(l, i)
self.attr_reset()
class WMultiEntry(EditorExt, EditableWidget):
def __init__(self, w, h, lines):
EditorExt.__init__(self, width=w, height=h)
self.h = h
self.w = w
self.focus = False
self.set_lines(lines)
def get(self):
return self.content
def set(self, lines):
self.set_lines(lines)
def show_line(self, l, i):
self.attr_color(C_BLACK, C_CYAN)
super().show_line(l, i)
self.attr_reset()
class WComboBox(WTextEntry):
popup_class = WPopupList
popup_h = 5
def __init__(self, w, text, items):
# w - 1 width goes to Editor widget
super().__init__(w - 1, text)
# We have full requested width, will show arrow symbol as last char
self.w = w
self.items = items
def redraw(self):
self.goto(self.x + self.w - 1, self.y)
self.wr(DOWN_ARROW)
super().redraw()
def get_choices(self, substr):
return self.items
def show_popup(self):
choices = self.get_choices(self.get())
popup = self.popup_class(self.x, self.y + 1, self.longest(choices) + 2, self.popup_h, choices)
popup.main_widget = self
res = popup.loop()
if res == ACTION_OK:
val = popup.get_selected_value()
if val is not None:
self.set_lines([val])
self.margin = 0
self.col = sys.maxsize
self.adjust_cursor_eol()
self.just_started = False
self.owner.redraw()
def handle_key(self, key):
if key == KEY_DOWN:
self.show_popup()
else:
return super().handle_key(key)
def handle_mouse(self, x, y):
if x == self.x + self.w - 1:
self.show_popup()
else:
super().handle_mouse(x, y)
class WCompletionList(WPopupList):
def __init__(self, x, y, w, h, items):
Dialog.__init__(self, x, y, w, h)
self.list = self.OneShotList(w - 2, h - 2, items)
self.add(1, 1, self.list)
chk = WCheckbox("Prefix")
def is_prefix_changed(wid):
main = self.main_widget
choices = main.get_choices(main.get(), wid.choice)
self.list.set_lines(choices)
self.list.top_line = 0
self.list.cur_line = 0
self.list.row = 0
self.list.redraw()
chk.on("changed", is_prefix_changed)
self.add(1, h - 1, chk)
class WAutoComplete(WComboBox):
popup_class = WCompletionList
def get_choices(self, substr, only_prefix=False):
substr = substr.lower()
if only_prefix:
choices = list(filter(lambda x: x.lower().startswith(substr), self.items))
else:
choices = list(filter(lambda x: substr in x.lower(), self.items))
return choices
| mit | 1,381,513,039,429,334,800 | 25.952212 | 102 | 0.517271 | false |
allanliebold/data-structures | src/dll.py | 1 | 2785 | """Implementation of Doubly-Linked list with a head and tail."""
from linked_list import LinkedList
from linked_list import Node
class Dll(object):
"""Doubly-Linked List class object."""
def __init__(self):
"""Doubly-linked list initialization.
Composed of some attributes from linked-list, and also has a tail.
"""
self._linkedlist = LinkedList()
self.head = self._linkedlist.head
self._length = self._linkedlist._length
self.tail = None
def push(self, data):
"""Push node to head of list."""
prev_head = self.head
new_head = self._linkedlist.push(data)
if self.tail is None:
self.tail = new_head
if self.head:
prev_head.prev = new_head
self.head = new_head
self.head.next_node = prev_head
self._length += 1
self.head.prev = None
def pop(self):
"""Remove node at head of list."""
if not self.head:
raise IndexError('List empty')
deleted_node = self.head.data
self._length -= 1
if not self.head.next_node:
self.head = None
self.tail = None
else:
self.head = self.head.next_node
self.head.prev = None
return deleted_node
def append(self, data):
"""Append method for Dll to add to tail."""
prev_tail = self.tail
new_tail = Node(data)
if self._length == 0:
self.tail = new_tail
self.head = new_tail
self.tail.prev = None
self.tail = new_tail
if self._length > 0:
prev_tail.next_node = new_tail
self.tail.prev = prev_tail
self._length += 1
def shift(self):
"""Shift method for Dll to remove from tail end."""
if self._length == 0:
raise IndexError('List empty')
deleted_node = self.tail.data
self._length -= 1
if not self.tail.prev:
self.head = None
self.tail = None
else:
self.tail = self.tail.prev
self.tail.next_node = None
return deleted_node
def remove(self, val):
"""Remove method for Dll to remove specified node."""
if self._length < 1:
raise IndexError('Value not present. List empty.')
if self._length == 1:
self.head = None
self.tail = None
target = self._linkedlist.search(val)
if target.prev:
target.prev.next_node = target.next_node
if target.next_node:
target.next_node.prev = target.prev
return target
def __len__(self):
"""Function uses built-in len function to show length."""
return self._length
| mit | 4,066,315,183,033,655,300 | 29.944444 | 74 | 0.547217 | false |
nuagenetworks/vspk-python | vspk/v5_0/nulocation.py | 1 | 13395 | # -*- coding: utf-8 -*-
#
# Copyright (c) 2015, Alcatel-Lucent Inc, 2017 Nokia
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from .fetchers import NUMetadatasFetcher
from .fetchers import NUGlobalMetadatasFetcher
from bambou import NURESTObject
class NULocation(NURESTObject):
""" Represents a Location in the VSD
Notes:
Gateway location details.
"""
__rest_name__ = "location"
__resource_name__ = "locations"
## Constants
CONST_ENTITY_SCOPE_GLOBAL = "GLOBAL"
CONST_ENTITY_SCOPE_ENTERPRISE = "ENTERPRISE"
def __init__(self, **kwargs):
""" Initializes a Location instance
Notes:
You can specify all parameters while calling this methods.
A special argument named `data` will enable you to load the
object from a Python dictionary
Examples:
>>> location = NULocation(id=u'xxxx-xxx-xxx-xxx', name=u'Location')
>>> location = NULocation(data=my_dict)
"""
super(NULocation, self).__init__()
# Read/Write Attributes
self._last_updated_by = None
self._latitude = None
self._address = None
self._ignore_geocode = None
self._time_zone_id = None
self._entity_scope = None
self._locality = None
self._longitude = None
self._country = None
self._associated_entity_name = None
self._associated_entity_type = None
self._state = None
self._external_id = None
self.expose_attribute(local_name="last_updated_by", remote_name="lastUpdatedBy", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="latitude", remote_name="latitude", attribute_type=float, is_required=False, is_unique=False)
self.expose_attribute(local_name="address", remote_name="address", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="ignore_geocode", remote_name="ignoreGeocode", attribute_type=bool, is_required=False, is_unique=False)
self.expose_attribute(local_name="time_zone_id", remote_name="timeZoneID", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="entity_scope", remote_name="entityScope", attribute_type=str, is_required=False, is_unique=False, choices=[u'ENTERPRISE', u'GLOBAL'])
self.expose_attribute(local_name="locality", remote_name="locality", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="longitude", remote_name="longitude", attribute_type=float, is_required=False, is_unique=False)
self.expose_attribute(local_name="country", remote_name="country", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="associated_entity_name", remote_name="associatedEntityName", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="associated_entity_type", remote_name="associatedEntityType", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="state", remote_name="state", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="external_id", remote_name="externalID", attribute_type=str, is_required=False, is_unique=True)
# Fetchers
self.metadatas = NUMetadatasFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.global_metadatas = NUGlobalMetadatasFetcher.fetcher_with_object(parent_object=self, relationship="child")
self._compute_args(**kwargs)
# Properties
@property
def last_updated_by(self):
""" Get last_updated_by value.
Notes:
ID of the user who last updated the object.
This attribute is named `lastUpdatedBy` in VSD API.
"""
return self._last_updated_by
@last_updated_by.setter
def last_updated_by(self, value):
""" Set last_updated_by value.
Notes:
ID of the user who last updated the object.
This attribute is named `lastUpdatedBy` in VSD API.
"""
self._last_updated_by = value
@property
def latitude(self):
""" Get latitude value.
Notes:
Latitude in decimal format.
"""
return self._latitude
@latitude.setter
def latitude(self, value):
""" Set latitude value.
Notes:
Latitude in decimal format.
"""
self._latitude = value
@property
def address(self):
""" Get address value.
Notes:
Formatted address including property number, street name, suite or office number, ...
"""
return self._address
@address.setter
def address(self, value):
""" Set address value.
Notes:
Formatted address including property number, street name, suite or office number, ...
"""
self._address = value
@property
def ignore_geocode(self):
""" Get ignore_geocode value.
Notes:
Request BSS to perform a geocode on the address - If no value passed, requestGeocode will be set to true
This attribute is named `ignoreGeocode` in VSD API.
"""
return self._ignore_geocode
@ignore_geocode.setter
def ignore_geocode(self, value):
""" Set ignore_geocode value.
Notes:
Request BSS to perform a geocode on the address - If no value passed, requestGeocode will be set to true
This attribute is named `ignoreGeocode` in VSD API.
"""
self._ignore_geocode = value
@property
def time_zone_id(self):
""" Get time_zone_id value.
Notes:
Time zone in which the Gateway is located. This can be in the form of a UTC/GMT offset, continent/city location, or country/region. The available time zones can be found in /usr/share/zoneinfo on a Linux machine or retrieved with TimeZone.getAvailableIDs() in Java. Refer to the IANA (Internet Assigned Numbers Authority) for a list of time zones. URL : http://www.iana.org/time-zones Default value is UTC (translating to Etc/Zulu)
This attribute is named `timeZoneID` in VSD API.
"""
return self._time_zone_id
@time_zone_id.setter
def time_zone_id(self, value):
""" Set time_zone_id value.
Notes:
Time zone in which the Gateway is located. This can be in the form of a UTC/GMT offset, continent/city location, or country/region. The available time zones can be found in /usr/share/zoneinfo on a Linux machine or retrieved with TimeZone.getAvailableIDs() in Java. Refer to the IANA (Internet Assigned Numbers Authority) for a list of time zones. URL : http://www.iana.org/time-zones Default value is UTC (translating to Etc/Zulu)
This attribute is named `timeZoneID` in VSD API.
"""
self._time_zone_id = value
@property
def entity_scope(self):
""" Get entity_scope value.
Notes:
Specify if scope of entity is Data center or Enterprise level
This attribute is named `entityScope` in VSD API.
"""
return self._entity_scope
@entity_scope.setter
def entity_scope(self, value):
""" Set entity_scope value.
Notes:
Specify if scope of entity is Data center or Enterprise level
This attribute is named `entityScope` in VSD API.
"""
self._entity_scope = value
@property
def locality(self):
""" Get locality value.
Notes:
Locality/City/County
"""
return self._locality
@locality.setter
def locality(self, value):
""" Set locality value.
Notes:
Locality/City/County
"""
self._locality = value
@property
def longitude(self):
""" Get longitude value.
Notes:
Longitude in decimal format.
"""
return self._longitude
@longitude.setter
def longitude(self, value):
""" Set longitude value.
Notes:
Longitude in decimal format.
"""
self._longitude = value
@property
def country(self):
""" Get country value.
Notes:
Country
"""
return self._country
@country.setter
def country(self, value):
""" Set country value.
Notes:
Country
"""
self._country = value
@property
def associated_entity_name(self):
""" Get associated_entity_name value.
Notes:
Name of the associated entity.
This attribute is named `associatedEntityName` in VSD API.
"""
return self._associated_entity_name
@associated_entity_name.setter
def associated_entity_name(self, value):
""" Set associated_entity_name value.
Notes:
Name of the associated entity.
This attribute is named `associatedEntityName` in VSD API.
"""
self._associated_entity_name = value
@property
def associated_entity_type(self):
""" Get associated_entity_type value.
Notes:
Object type of the associated entity.
This attribute is named `associatedEntityType` in VSD API.
"""
return self._associated_entity_type
@associated_entity_type.setter
def associated_entity_type(self, value):
""" Set associated_entity_type value.
Notes:
Object type of the associated entity.
This attribute is named `associatedEntityType` in VSD API.
"""
self._associated_entity_type = value
@property
def state(self):
""" Get state value.
Notes:
State/Province/Region
"""
return self._state
@state.setter
def state(self, value):
""" Set state value.
Notes:
State/Province/Region
"""
self._state = value
@property
def external_id(self):
""" Get external_id value.
Notes:
External object ID. Used for integration with third party systems
This attribute is named `externalID` in VSD API.
"""
return self._external_id
@external_id.setter
def external_id(self, value):
""" Set external_id value.
Notes:
External object ID. Used for integration with third party systems
This attribute is named `externalID` in VSD API.
"""
self._external_id = value
| bsd-3-clause | -3,558,432,641,807,527,000 | 29.103371 | 453 | 0.582605 | false |
albireox/marvin | python/marvin/utils/datamodel/dap/MPL6.py | 1 | 22491 | # !usr/bin/env python
# -*- coding: utf-8 -*-
#
# Licensed under a 3-clause BSD license.
#
# @Author: Brian Cherinka
# @Date: 2017-09-13 16:05:56
# @Last modified by: José Sánchez-Gallego ([email protected])
# @Last modified time: 2018-08-06 11:45:33
from __future__ import absolute_import, division, print_function
from astropy import units as u
from marvin.utils.datamodel.maskbit import get_maskbits
from .base import Bintype, Channel, DAPDataModel, Model, MultiChannelProperty, Property
from .base import spaxel as spaxel_unit
from .MPL5 import ALL, GAU_MILESHC, NRE, SPX, VOR10
HYB10 = Bintype('HYB10', description='Binning and stellar continuum fitting as VOR10, '
'but emission lines are fitted per spaxel.')
# The two lines in the OII doublet is fitted independently for gaussian
# measurements. In that case oii_3727 and oii_3729 are populated. For summed
# flux measurements, the lines cannot be separated so oiid_3728 contains
# the summed flux. In that case, oii_3729 is null and only kept to maintain`
# the number of channels constant.
oiid_channel = Channel('oiid_3728', formats={'string': 'OIId 3728',
'latex': r'$\forb{O\,IId}\;\lambda\lambda 3728$'}, idx=0)
oii_channel = Channel('oii_3727', formats={'string': 'OII 3727',
'latex': r'$\forb{O\,II}\;\lambda 3727$'}, idx=0)
MPL6_emline_channels = [
Channel('oii_3729', formats={'string': 'OII 3729',
'latex': r'$\forb{O\,II}\;\lambda 3729$'}, idx=1),
Channel('hthe_3798', formats={'string': 'H-theta 3798',
'latex': r'H$\theta\;\lambda 3798$'}, idx=2),
Channel('heta_3836', formats={'string': 'H-eta 3836',
'latex': r'H$\eta\;\lambda 3836$'}, idx=3),
Channel('neiii_3869', formats={'string': 'NeIII 3869',
'latex': r'$\forb{Ne\,III}\;\lambda 3869$'}, idx=4),
Channel('hzet_3890', formats={'string': 'H-zeta 3890',
'latex': r'H$\zeta\;\lambda 3890$'}, idx=5),
Channel('neiii_3968', formats={'string': 'NeIII 3968',
'latex': r'$\forb{Ne\,III}\;\lambda 3968$'}, idx=6),
Channel('heps_3971', formats={'string': 'H-epsilon 3971',
'latex': r'H$\epsilon\;\lambda 3971$'}, idx=7),
Channel('hdel_4102', formats={'string': 'H-delta 4102',
'latex': r'H$\delta\;\lambda 4102$'}, idx=8),
Channel('hgam_4341', formats={'string': 'H-gamma 4341',
'latex': r'H$\gamma\;\lambda 4341$'}, idx=9),
Channel('heii_4687', formats={'string': 'HeII 4681',
'latex': r'He\,II$\;\lambda 4687$'}, idx=10),
Channel('hb_4862', formats={'string': 'H-beta 4862',
'latex': r'H$\beta\;\lambda 4862$'}, idx=11),
Channel('oiii_4960', formats={'string': 'OIII 4960',
'latex': r'$\forb{O\,III}\;\lambda 4960$'}, idx=12),
Channel('oiii_5008', formats={'string': 'OIII 5008',
'latex': r'$\forb{O\,III}\;\lambda 5008$'}, idx=13),
Channel('hei_5877', formats={'string': 'HeI 5877',
'latex': r'He\,I$\;\lambda 5877$'}, idx=14),
Channel('oi_6302', formats={'string': 'OI 6302',
'latex': r'$\forb{O\,I}\;\lambda 6302$'}, idx=15),
Channel('oi_6365', formats={'string': 'OI 6365',
'latex': r'$\forb{O\,I}\;\lambda 6365$'}, idx=16),
Channel('nii_6549', formats={'string': 'NII 6549',
'latex': r'$\forb{N\,II}\;\lambda 6549$'}, idx=17),
Channel('ha_6564', formats={'string': 'H-alpha 6564',
'latex': r'H$\alpha\;\lambda 6564$'}, idx=18),
Channel('nii_6585', formats={'string': 'NII 6585',
'latex': r'$\forb{N\,II}\;\lambda 6585$'}, idx=19),
Channel('sii_6718', formats={'string': 'SII 6718',
'latex': r'$\forb{S\,II}\;\lambda 6718$'}, idx=20),
Channel('sii_6732', formats={'string': 'SII 6732',
'latex': r'$\forb{S\,II\]\;\lambda 6732$'}, idx=21)
]
MPL6_specindex_channels = [
Channel('cn1', formats={'string': 'CN1'}, unit=u.mag, idx=0),
Channel('cn2', formats={'string': 'CN2'}, unit=u.mag, idx=1),
Channel('ca4227', formats={'string': 'Ca 4227',
'latex': r'Ca\,\lambda 4227'}, unit=u.Angstrom, idx=2),
Channel('g4300', formats={'string': 'G4300',
'latex': r'G\,\lambda 4300'}, unit=u.Angstrom, idx=3),
Channel('fe4383', formats={'string': 'Fe 4383',
'latex': r'Fe\,\lambda 4383'}, unit=u.Angstrom, idx=4),
Channel('ca4455', formats={'string': 'Ca 4455',
'latex': r'Ca\,\lambda 4455'}, unit=u.Angstrom, idx=5),
Channel('fe4531', formats={'string': 'Fe 4531',
'latex': r'Fe\,\lambda 4531'}, unit=u.Angstrom, idx=6),
Channel('c24668', formats={'string': 'C24668',
'latex': r'C2\,\lambda 4668'}, unit=u.Angstrom, idx=7),
Channel('hb', formats={'string': 'Hb',
'latex': r'H\beta'}, unit=u.Angstrom, idx=8),
Channel('fe5015', formats={'string': 'Fe 5015',
'latex': r'Fe\,\lambda 5015'}, unit=u.Angstrom, idx=9),
Channel('mg1', formats={'string': 'Mg1'}, unit=u.mag, idx=10),
Channel('mg2', formats={'string': 'Mg2'}, unit=u.mag, idx=11),
Channel('mgb', formats={'string': 'Mgb'}, unit=u.Angstrom, idx=12),
Channel('fe5270', formats={'string': 'Fe 5270',
'latex': r'Fe\,\lambda 5270'}, unit=u.Angstrom, idx=13),
Channel('fe5335', formats={'string': 'Fe 5335',
'latex': r'Fe\,\lambda 5335'}, unit=u.Angstrom, idx=14),
Channel('fe5406', formats={'string': 'Fe 5406',
'latex': r'Fe\,\lambda 5406'}, unit=u.Angstrom, idx=15),
Channel('fe5709', formats={'string': 'Fe 5709',
'latex': r'Fe\,\lambda 5709'}, unit=u.Angstrom, idx=16),
Channel('fe5782', formats={'string': 'Fe 5782',
'latex': r'Fe\,\lambda 5782'}, unit=u.Angstrom, idx=17),
Channel('nad', formats={'string': 'NaD'}, unit=u.Angstrom, idx=18),
Channel('tio1', formats={'string': 'TiO1'}, unit=u.mag, idx=19),
Channel('tio2', formats={'string': 'TiO2'}, unit=u.mag, idx=20),
Channel('hdeltaa', formats={'string': 'HDeltaA',
'latex': r'H\delta\,A'}, unit=u.Angstrom, idx=21),
Channel('hgammaa', formats={'string': 'HGammaA',
'latex': r'H\gamma\,F'}, unit=u.Angstrom, idx=22),
Channel('hdeltaf', formats={'string': 'HDeltaA',
'latex': r'H\delta\,F'}, unit=u.Angstrom, idx=23),
Channel('hgammaf', formats={'string': 'HGammaF',
'latex': r'H\gamma\,F'}, unit=u.Angstrom, idx=24),
Channel('cahk', formats={'string': 'CaHK'}, unit=u.Angstrom, idx=25),
Channel('caii1', formats={'string': 'CaII1'}, unit=u.Angstrom, idx=26),
Channel('caii2', formats={'string': 'CaII2'}, unit=u.Angstrom, idx=27),
Channel('caii3', formats={'string': 'CaII3'}, unit=u.Angstrom, idx=28),
Channel('pa17', formats={'string': 'Pa17'}, unit=u.Angstrom, idx=29),
Channel('pa14', formats={'string': 'Pa14'}, unit=u.Angstrom, idx=30),
Channel('pa12', formats={'string': 'Pa12'}, unit=u.Angstrom, idx=31),
Channel('mgicvd', formats={'string': 'MgICvD'}, unit=u.Angstrom, idx=32),
Channel('naicvd', formats={'string': 'NaICvD'}, unit=u.Angstrom, idx=33),
Channel('mgiir', formats={'string': 'MgIIR'}, unit=u.Angstrom, idx=34),
Channel('fehcvd', formats={'string': 'FeHCvD'}, unit=u.Angstrom, idx=35),
Channel('nai', formats={'string': 'NaI'}, unit=u.Angstrom, idx=36),
Channel('btio', formats={'string': 'bTiO'}, unit=u.mag, idx=37),
Channel('atio', formats={'string': 'aTiO'}, unit=u.mag, idx=38),
Channel('cah1', formats={'string': 'CaH1'}, unit=u.mag, idx=39),
Channel('cah2', formats={'string': 'CaH2'}, unit=u.mag, idx=40),
Channel('naisdss', formats={'string': 'NaISDSS'}, unit=u.Angstrom, idx=41),
Channel('tio2sdss', formats={'string': 'TiO2SDSS'}, unit=u.Angstrom, idx=42),
Channel('d4000', formats={'string': 'D4000'}, unit=u.dimensionless_unscaled, idx=43),
Channel('dn4000', formats={'string': 'Dn4000'}, unit=u.dimensionless_unscaled, idx=44),
Channel('tiocvd', formats={'string': 'TiOCvD'}, unit=u.dimensionless_unscaled, idx=45)
]
MPL6_binid_channels = [
Channel('binned_spectra', formats={'string': 'Binned spectra'},
unit=u.dimensionless_unscaled, idx=0),
Channel('stellar_continua', formats={'string': 'Stellar continua'},
unit=u.dimensionless_unscaled, idx=1),
Channel('em_line_moments', formats={'string': 'Emission line moments'},
unit=u.dimensionless_unscaled, idx=2),
Channel('em_line_models', formats={'string': 'Emission line models'},
unit=u.dimensionless_unscaled, idx=3),
Channel('spectral_indices', formats={'string': 'Spectral indices'},
unit=u.dimensionless_unscaled, idx=4)]
binid_properties = MultiChannelProperty('binid', ivar=False, mask=False,
channels=MPL6_binid_channels,
description='Numerical ID for spatial bins.')
MPL6_maps = [
MultiChannelProperty('spx_skycoo', ivar=False, mask=False,
channels=[Channel('on_sky_x', formats={'string': 'On-sky X'}, idx=0),
Channel('on_sky_y', formats={'string': 'On-sky Y'}, idx=1)],
unit=u.arcsec,
formats={'string': 'Sky coordinates'},
description='Offsets of each spaxel from the galaxy center.'),
MultiChannelProperty('spx_ellcoo', ivar=False, mask=False,
channels=[Channel('elliptical_radius',
formats={'string': 'Elliptical radius'},
idx=0, unit=u.arcsec),
Channel('r_re',
formats={'string': 'R/Reff'},
idx=1),
Channel('elliptical_azimuth',
formats={'string': 'Elliptical azimuth'},
idx=2, unit=u.deg)],
formats={'string': 'Elliptical coordinates'},
description='Elliptical polar coordinates of each spaxel from '
'the galaxy center.'),
Property('spx_mflux', ivar=True, mask=False,
unit=u.erg / u.s / (u.cm ** 2) / spaxel_unit, scale=1e-17,
formats={'string': 'r-band mean flux'},
description='Mean flux in r-band (5600.1-6750.0 ang).'),
Property('spx_snr', ivar=False, mask=False,
formats={'string': 'r-band SNR'},
description='r-band signal-to-noise ratio per pixel.'),
binid_properties,
MultiChannelProperty('bin_lwskycoo', ivar=False, mask=False,
channels=[Channel('lum_weighted_on_sky_x',
formats={'string': 'Light-weighted offset X'},
idx=0, unit=u.arcsec),
Channel('lum_weighted_on_sky_y',
formats={'string': 'Light-weighted offset Y'},
idx=1, unit=u.arcsec)],
description='Light-weighted offset of each bin from the galaxy center.'),
MultiChannelProperty('bin_lwellcoo', ivar=False, mask=False,
channels=[Channel('lum_weighted_elliptical_radius',
formats={'string': 'Light-weighted radial offset'},
idx=0, unit=u.arcsec),
Channel('r_re',
formats={'string': 'R/REff'},
idx=1),
Channel('lum_weighted_elliptical_azimuth',
formats={'string': 'Light-weighted azimuthal offset'},
idx=2, unit=u.deg)],
description='Light-weighted elliptical polar coordinates of each bin '
'from the galaxy center.'),
Property('bin_area', ivar=False, mask=False,
unit=u.arcsec ** 2,
formats={'string': 'Bin area'},
description='Area of each bin.'),
Property('bin_farea', ivar=False, mask=False,
formats={'string': 'Bin fractional area'},
description='Fractional area that the bin covers for the expected bin '
'shape (only relevant for radial binning).'),
Property('bin_mflux', ivar=True, mask=True,
unit=u.erg / u.s / (u.cm ** 2) / spaxel_unit, scale=1e-17,
formats={'string': 'r-band binned spectra mean flux'},
description='Mean flux in the r-band for the binned spectra.'),
Property('bin_snr', ivar=False, mask=False,
formats={'string': 'Bin SNR'},
description='r-band signal-to-noise ratio per pixel in the binned spectra.'),
Property('stellar_vel', ivar=True, mask=True,
unit=u.km / u.s,
formats={'string': 'Stellar velocity'},
description='Stellar velocity relative to NSA redshift.'),
Property('stellar_sigma', ivar=True, mask=True,
unit=u.km / u.s,
formats={'string': 'Stellar velocity dispersion', 'latex': r'Stellar $\sigma$'},
description='Stellar velocity dispersion (must be corrected using '
'STELLAR_SIGMACORR)'),
Property('stellar_sigmacorr', ivar=False, mask=False,
unit=u.km / u.s,
formats={'string': 'Stellar sigma correction',
'latex': r'Stellar $\sigma$ correction'},
description='Quadrature correction for STELLAR_SIGMA to obtain the '
'astrophysical velocity dispersion.)'),
MultiChannelProperty('stellar_cont_fresid', ivar=False, mask=False,
channels=[Channel('68th_percentile',
formats={'string': '68th percentile',
'latex': r'68^{th} percentile'}, idx=0),
Channel('99th_percentile',
formats={'string': '99th percentile',
'latex': r'99^{th} percentile'}, idx=1)],
formats={'string': 'Fractional residual growth'},
description='68%% and 99%% growth of the fractional residuals between '
'the model and data.'),
Property('stellar_cont_rchi2', ivar=False, mask=False,
formats={'string': 'Stellar continuum reduced chi-square',
'latex': r'Stellar\ continuum\ reduced\ \chi^2'},
description='Reduced chi-square of the stellar continuum fit.'),
MultiChannelProperty('emline_sflux', ivar=True, mask=True,
channels=[oiid_channel] + MPL6_emline_channels,
formats={'string': 'Emission line summed flux'},
unit=u.erg / u.s / (u.cm ** 2) / spaxel_unit, scale=1e-17,
binid=binid_properties[3],
description='Non-parametric summed flux for emission lines.'),
MultiChannelProperty('emline_sew', ivar=True, mask=True,
channels=[oiid_channel] + MPL6_emline_channels,
formats={'string': 'Emission line EW'},
unit=u.Angstrom,
binid=binid_properties[3],
description='Emission line non-parametric equivalent '
'widths measurements.'),
MultiChannelProperty('emline_gflux', ivar=True, mask=True,
channels=[oii_channel] + MPL6_emline_channels,
formats={'string': 'Emission line Gaussian flux'},
unit=u.erg / u.s / (u.cm ** 2) / spaxel_unit, scale=1e-17,
binid=binid_properties[3],
description='Gaussian profile integrated flux for emission lines.'),
MultiChannelProperty('emline_gvel', ivar=True, mask=True,
channels=[oii_channel] + MPL6_emline_channels,
formats={'string': 'Emission line Gaussian velocity'},
unit=u.km / u.s,
binid=binid_properties[3],
description='Gaussian profile velocity for emission lines.'),
MultiChannelProperty('emline_gew', ivar=True, mask=True,
channels=[oii_channel] + MPL6_emline_channels,
formats={'string': 'Emission line Gaussian EW'},
unit=u.Angstrom,
binid=binid_properties[3],
description='Gaussian-fitted equivalent widths measurements '
'(based on EMLINE_GFLUX).'),
MultiChannelProperty('emline_gsigma', ivar=True, mask=True,
channels=[oii_channel] + MPL6_emline_channels,
formats={'string': 'Emission line Gaussian sigma',
'latex': r'Emission line Gaussian $\sigma$'},
unit=u.km / u.s,
binid=binid_properties[3],
description='Gaussian profile velocity dispersion for emission lines; '
'must be corrected using EMLINE_INSTSIGMA.'),
MultiChannelProperty('emline_instsigma', ivar=False, mask=False,
channels=[oii_channel] + MPL6_emline_channels,
formats={'string': 'Emission line instrumental sigma',
'latex': r'Emission line instrumental $\sigma$'},
unit=u.km / u.s,
binid=binid_properties[3],
description='Instrumental dispersion at the fitted line center.'),
MultiChannelProperty('emline_tplsigma', ivar=False, mask=False,
channels=[oii_channel] + MPL6_emline_channels,
formats={'string': 'Emission line template instrumental sigma',
'latex': r'Emission line template instrumental $\sigma$'},
unit=u.km / u.s,
binid=binid_properties[3],
description='The dispersion of each emission line used in '
'the template spectra'),
MultiChannelProperty('specindex', ivar=True, mask=True,
channels=MPL6_specindex_channels,
formats={'string': 'Spectral index'},
description='Measurements of spectral indices.'),
MultiChannelProperty('specindex_corr', ivar=False, mask=False,
channels=MPL6_specindex_channels,
formats={'string': 'Spectral index sigma correction',
'latex': r'Spectral index $\sigma$ correction'},
description='Velocity dispersion corrections for the '
'spectral index measurements '
'(can be ignored for D4000, Dn4000).')
]
MPL6_models = [
Model('binned_flux', 'FLUX', 'WAVE', extension_ivar='IVAR',
extension_mask='MASK', unit=u.erg / u.s / (u.cm ** 2) / spaxel_unit,
scale=1e-17, formats={'string': 'Binned flux'},
description='Flux of the binned spectra',
binid=binid_properties[0]),
Model('full_fit', 'MODEL', 'WAVE', extension_ivar=None,
extension_mask='MASK', unit=u.erg / u.s / (u.cm ** 2) / spaxel_unit,
scale=1e-17, formats={'string': 'Best fitting model'},
description='The best fitting model spectra (sum of the fitted '
'continuum and emission-line models)',
binid=binid_properties[0]),
Model('emline_fit', 'EMLINE', 'WAVE', extension_ivar=None,
extension_mask='EMLINE_MASK',
unit=u.erg / u.s / (u.cm ** 2) / spaxel_unit,
scale=1e-17, formats={'string': 'Emission line model spectrum'},
description='The model spectrum with only the emission lines.',
binid=binid_properties[3]),
Model('emline_base_fit', 'EMLINE_BASE', 'WAVE', extension_ivar=None,
extension_mask='EMLINE_MASK',
unit=u.erg / u.s / (u.cm ** 2) / spaxel_unit,
scale=1e-17, formats={'string': 'Emission line baseline fit'},
description='The model of the constant baseline fitted beneath the '
'emission lines.',
binid=binid_properties[3])
]
# MPL-6 DapDataModel goes here
MPL6 = DAPDataModel('2.1.3', aliases=['MPL-6', 'MPL6'],
bintypes=[SPX, HYB10, VOR10, ALL, NRE],
db_only=[SPX, HYB10],
templates=[GAU_MILESHC],
properties=MPL6_maps,
models=MPL6_models,
bitmasks=get_maskbits('MPL-6'),
default_bintype='SPX',
default_template='GAU-MILESHC',
property_table='SpaxelProp6',
default_binid=binid_properties[0],
default_mapmask=['NOCOV', 'UNRELIABLE', 'DONOTUSE'],
qual_flag='DAPQUAL')
| bsd-3-clause | 7,902,900,672,459,805,000 | 60.111413 | 98 | 0.516475 | false |
Dutchj/pbtweeter | pbtweeter/twitter/tweets.py | 1 | 2532 | import config as cfg
import random
import speedrun
from datetime import datetime
from seconds import seconds_to_time
def post_tweet(api, lb, cat, p, t):
player_name = p
twitter_handle = speedrun.get_twitter_handle(p)
if twitter_handle is None:
return
if not twitter_handle == '':
player_name = twitter_handle
if t < int(lb[cfg.game][cat]['1']['time']):
return post_wr_tweet(api, cat, player_name, t)
elif t == int(lb[cfg.game][cat]['1']['time']):
return post_tie_tweet(api, cat, player_name, t)
else:
return post_pb_tweet(api, cat, player_name, t)
def post_pb_tweet(api, cat, p, t):
try:
if not cfg.debug:
api.update_status(status=random.choice(cfg.pb_messages).format(game=cfg.game, category=cat, player=p,
time=seconds_to_time(t)))
except Exception, e:
print datetime.now().strftime('[%Y-%m-%d %H:%M:%S]'), e
else:
print datetime.now().strftime('[%Y-%m-%d %H:%M:%S]'), "Tweeted out {player}'s PB ({time}) in {category}".format(
player=p, time=seconds_to_time(t), category=cat)
if cfg.debug:
return False
return True
def post_wr_tweet(api, cat, p, t):
try:
if not cfg.debug:
api.update_status(status=random.choice(cfg.wr_messages).format(game=cfg.game, category=cat, player=p,
time=seconds_to_time(t)))
except Exception, e:
print datetime.now().strftime('[%Y-%m-%d %H:%M:%S]'), e
else:
print datetime.now().strftime('[%Y-%m-%d %H:%M:%S]'), "Tweeted out {player}'s WR ({time}) in {category}".format(
player=p, time=seconds_to_time(t), category=cat)
if cfg.debug:
return False
return True
def post_tie_tweet(api, cat, p, t):
try:
if not cfg.debug:
api.update_status(status=random.choice(cfg.tie_messages).format(game=cfg.game, category=cat, player=p,
time=seconds_to_time(t)))
except Exception, e:
print datetime.now().strftime('[%Y-%m-%d %H:%M:%S]'), e
else:
print datetime.now().strftime('[%Y-%m-%d %H:%M:%S]'), "Tweeted out {player}'s WR tie ({time}) in {category}"\
.format(player=p, time=seconds_to_time(t), category=cat)
if cfg.debug:
return False
return True
| gpl-2.0 | 7,368,153,244,888,053,000 | 37.363636 | 120 | 0.541074 | false |
iSchool-Zambia/django-ischool-oppia | oppia/profile/forms.py | 1 | 18863 | # oppia/profile/forms.py
import hashlib
import urllib
from django import forms
from django.conf import settings
from django.contrib.auth import (authenticate, login, views)
from django.core.urlresolvers import reverse
from django.core.validators import validate_email
from django.contrib.auth.models import User
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext as _
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Button, Layout, Fieldset, ButtonHolder, Submit, Div, HTML
class LoginForm(forms.Form):
username = forms.CharField(max_length=30,
error_messages={'required': _(u'Please enter a username.')},)
password = forms.CharField(widget=forms.PasswordInput,
error_messages={'required': _(u'Please enter a password.'),},
required=True)
next = forms.CharField(widget=forms.HiddenInput())
def __init__(self, *args, **kwargs):
super(LoginForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.form_action = reverse('profile_login')
self.helper.form_class = 'form-horizontal'
self.helper.label_class = 'col-lg-2'
self.helper.field_class = 'col-lg-4'
self.helper.layout = Layout(
'username',
'password',
'next',
Div(
Submit('submit', _(u'Login'), css_class='btn btn-default'),
HTML("""<a class="btn btn-default" href="{% url 'profile_reset' %}">"""+_(u'Forgotten password?') + """</a>"""),
css_class='col-lg-offset-2 col-lg-4',
),
)
def clean(self):
cleaned_data = self.cleaned_data
username = cleaned_data.get("username")
password = cleaned_data.get("password")
user = authenticate(username=username, password=password)
if user is None or not user.is_active:
raise forms.ValidationError( _(u"Invalid username or password. Please try again."))
return cleaned_data
class RegisterForm(forms.Form):
username = forms.CharField(max_length=30,
min_length=4,
error_messages={'required': _(u'Please enter a username.')},)
email = forms.CharField(validators=[validate_email],
error_messages={'invalid': _(u'Please enter a valid e-mail address.'),
'required': _(u'Please enter your e-mail address.')},
required=True)
password = forms.CharField(widget=forms.PasswordInput,
error_messages={'required': _(u'Please enter a password.'),
'min_length': _(u'Your password should be at least 6 characters long.')},
min_length=6,
required=True)
password_again = forms.CharField(widget=forms.PasswordInput,
min_length=6,
error_messages={'required': _(u'Please enter your password again.'),
'min_length': _(u'Your password again should be at least 6 characters long.')},
required=True)
first_name = forms.CharField(max_length=100,
error_messages={'required': _(u'Please enter your first name.'),
'min_length': _(u'Your first name should be at least 2 characters long.')},
min_length=2,
required=True)
last_name = forms.CharField(max_length=100,
error_messages={'required': _(u'Please enter your last name.'),
'min_length': _(u'Your last name should be at least 2 characters long.')},
min_length=2,
required=True)
job_title = forms.CharField(max_length=100,required=True)
organisation = forms.CharField(max_length=100,required=True)
profession = forms.CharField(max_length=100,required=True)
service_entry_date = forms.DateField(
required=True,
error_messages={'required': _('Please enter a valid date'),
'invalid':_('Please enter a valid date')},
)
location = forms.ChoiceField(widget=forms.Select, required=False)
def __init__(self, *args, **kwargs):
super(RegisterForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.form_action = reverse('profile_register')
self.helper.form_class = 'form-horizontal'
self.helper.label_class = 'col-lg-2'
self.helper.field_class = 'col-lg-4'
self.helper.layout = Layout(
'username',
'email',
'password',
'password_again',
'first_name',
'last_name',
'job_title',
'organisation',
'profession',
'service_entry_date',
'location',
Div(
Submit('submit', _(u'Register'), css_class='btn btn-default'),
css_class='col-lg-offset-2 col-lg-4',
),
)
def clean(self):
cleaned_data = self.cleaned_data
email = cleaned_data.get("email")
password = cleaned_data.get("password")
password_again = cleaned_data.get("password_again")
username = cleaned_data.get("username")
# check the username not already used
num_rows = User.objects.filter(username=username).count()
if num_rows != 0:
raise forms.ValidationError( _(u"Username has already been registered, please select another."))
# check the email address not already used
num_rows = User.objects.filter(email=email).count()
if num_rows != 0:
raise forms.ValidationError( _(u"Email has already been registered"))
# check the password are the same
if password and password_again:
if password != password_again:
raise forms.ValidationError( _(u"Passwords do not match."))
# Always return the full collection of cleaned data.
return cleaned_data
class RegisterFormAPI(forms.Form):
username = forms.CharField(max_length=30,
min_length=4,
error_messages={'required': _(u'Please enter a username.')},)
email = forms.CharField(validators=[validate_email],
error_messages={'invalid': _(u'Please enter a valid e-mail address.'),
'required': _(u'Please enter your e-mail address.')},
required=True)
password = forms.CharField(widget=forms.PasswordInput,
error_messages={'required': _(u'Please enter a password.'),
'min_length': _(u'Your password should be at least 6 characters long.')},
min_length=6,
required=True)
password_again = forms.CharField(widget=forms.PasswordInput,
min_length=6,
error_messages={'required': _(u'Please enter your password again.'),
'min_length': _(u'Your password again should be at least 6 characters long.')},
required=True)
first_name = forms.CharField(max_length=100,
error_messages={'required': _(u'Please enter your first name.'),
'min_length': _(u'Your first name should be at least 2 characters long.')},
min_length=2,
required=True)
last_name = forms.CharField(max_length=100,
error_messages={'required': _(u'Please enter your last name.'),
'min_length': _(u'Your last name should be at least 2 characters long.')},
min_length=2,
required=True)
def __init__(self, *args, **kwargs):
super(RegisterFormAPI, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.form_action = reverse('profile_register')
self.helper.form_class = 'form-horizontal'
self.helper.label_class = 'col-lg-2'
self.helper.field_class = 'col-lg-4'
self.helper.layout = Layout(
'username',
'email',
'password',
'password_again',
'first_name',
'last_name',
'job_title',
'organisation',
'profession',
'service_entry_date',
'location',
Div(
Submit('submit', _(u'Register'), css_class='btn btn-default'),
css_class='col-lg-offset-2 col-lg-4',
),
)
def clean(self):
cleaned_data = self.cleaned_data
email = cleaned_data.get("email")
password = cleaned_data.get("password")
password_again = cleaned_data.get("password_again")
username = cleaned_data.get("username")
# check the username not already used
num_rows = User.objects.filter(username=username).count()
if num_rows != 0:
raise forms.ValidationError( _(u"Username has already been registered, please select another."))
# check the email address not already used
num_rows = User.objects.filter(email=email).count()
if num_rows != 0:
raise forms.ValidationError( _(u"Email has already been registered"))
# check the password are the same
if password and password_again:
if password != password_again:
raise forms.ValidationError( _(u"Passwords do not match."))
# Always return the full collection of cleaned data.
return cleaned_data
class ResetForm(forms.Form):
username = forms.CharField(max_length=30,
error_messages={'invalid': _(u'Please enter a username or email address.')},
required=True)
def __init__(self, *args, **kwargs):
super(ResetForm, self).__init__(*args, **kwargs)
self.fields['username'].label = "Username or email"
self.helper = FormHelper()
self.helper.form_action = reverse('profile_reset')
self.helper.form_class = 'form-horizontal'
self.helper.label_class = 'col-lg-2'
self.helper.field_class = 'col-lg-4'
self.helper.layout = Layout(
'username',
Div(
Submit('submit', _(u'Reset password'), css_class='btn btn-default'),
css_class='col-lg-offset-2 col-lg-4',
),
)
def clean(self):
cleaned_data = self.cleaned_data
username = cleaned_data.get("username")
try:
user = User.objects.get(username__exact=username)
except User.DoesNotExist:
try:
user = User.objects.get(email__exact=username)
except User.DoesNotExist:
raise forms.ValidationError( _(u"Username/email not found"))
return cleaned_data
class ProfileForm(forms.Form):
api_key = forms.CharField(widget = forms.TextInput(attrs={'readonly':'readonly'}),
required=False, help_text=_(u'You cannot edit the API Key.'))
username = forms.CharField(widget = forms.TextInput(attrs={'readonly':'readonly'}),
required=False, help_text=_(u'You cannot edit the username.'))
email = forms.CharField(validators=[validate_email],
error_messages={'invalid': _(u'Please enter a valid e-mail address.')},
required=True)
password = forms.CharField(widget=forms.PasswordInput,
required=False,
min_length=6,
error_messages={'min_length': _(u'The new password should be at least 6 characters long')},)
password_again = forms.CharField(widget=forms.PasswordInput,
required=False,
min_length=6)
first_name = forms.CharField(max_length=100,
min_length=2,
required=True)
last_name = forms.CharField(max_length=100,
min_length=2,
required=True)
job_title = forms.CharField(max_length=100,required=True)
organisation = forms.CharField(max_length=100,required=True)
profession = forms.CharField(max_length=100,required=True)
service_entry_date = forms.DateField(
required=True,
error_messages={'required': _('Please enter a valid date'),
'invalid':_('Please enter a valid date')},
)
location = forms.ChoiceField(widget=forms.Select, required=False)
def __init__(self, *args, **kwargs):
super(ProfileForm, self).__init__(*args, **kwargs)
if len(args) == 1:
email = args[0]['email']
username = args[0]['username']
else:
kw = kwargs.pop('initial')
email = kw['email']
username = kw['username']
self.helper = FormHelper()
self.helper.form_class = 'form-horizontal'
self.helper.label_class = 'col-lg-2'
self.helper.field_class = 'col-lg-4'
if settings.OPPIA_SHOW_GRAVATARS:
gravatar_url = "https://www.gravatar.com/avatar.php?"
gravatar_url += urllib.urlencode({
'gravatar_id':hashlib.md5(email).hexdigest(),
'size':64
})
self.helper.layout = Layout(
Div(
HTML("""<label class="control-label col-lg-2">"""+_(u'Photo') + """</label>"""),
Div(
HTML(mark_safe('<img src="{0}" alt="gravatar for {1}" class="gravatar" width="{2}" height="{2}"/>'.format(gravatar_url, username, 64))),
HTML("""<br/>"""),
HTML("""<a href="https://www.gravatar.com">"""+_(u'Update gravatar')+"""</a>"""),
css_class="col-lg-4",
),
css_class="form-group",
),
'api_key',
'username',
'email',
'first_name',
'last_name',
'job_title',
'organisation',
'profession',
'service_entry_date',
'location',
Div(
HTML("""<h3>"""+_(u'Change password') + """</h3>"""),
),
'password',
'password_again',
Div(
Submit('submit', _(u'Save'), css_class='btn btn-default'),
css_class='col-lg-offset-2 col-lg-4',
),
)
else:
self.helper.layout = Layout(
'api_key',
'username',
'email',
'first_name',
'last_name',
Div(
HTML("""<h3>"""+_(u'Change password') + """</h3>"""),
),
'password',
'password_again',
Div(
Submit('submit', _(u'Save'), css_class='btn btn-default'),
css_class='col-lg-offset-2 col-lg-4',
),
)
def clean(self):
cleaned_data = self.cleaned_data
# check email not used by anyone else
email = cleaned_data.get("email")
username = cleaned_data.get("username")
num_rows = User.objects.exclude(username__exact=username).filter(email=email).count()
if num_rows != 0:
raise forms.ValidationError( _(u"Email address already in use"))
# if password entered then check they are the same
password = cleaned_data.get("password")
password_again = cleaned_data.get("password_again")
if password and password_again:
if password != password_again:
raise forms.ValidationError( _(u"Passwords do not match."))
return cleaned_data
class UploadProfileForm(forms.Form):
upload_file = forms.FileField(
required=True,
error_messages={'required': _('Please select a file to upload')},)
def __init__(self, *args, **kwargs):
super(UploadProfileForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.form_action = reverse('profile_upload')
self.helper.form_class = 'form-horizontal'
self.helper.label_class = 'col-lg-2'
self.helper.field_class = 'col-lg-4'
self.helper.layout = Layout(
'upload_file',
Div(
Submit('submit', _(u'Upload'), css_class='btn btn-default'),
css_class='col-lg-offset-2 col-lg-4',
),
)
| gpl-3.0 | 3,110,669,798,487,709,000 | 47.369231 | 164 | 0.483168 | false |
Rav3nPL/p2pool-yac | p2pool/networks.py | 1 | 1811 | from p2pool.bitcoin import networks
from p2pool.util import math
# CHAIN_LENGTH = number of shares back client keeps
# REAL_CHAIN_LENGTH = maximum number of shares back client uses to compute payout
# REAL_CHAIN_LENGTH must always be <= CHAIN_LENGTH
# REAL_CHAIN_LENGTH must be changed in sync with all other clients
# changes can be done by changing one, then the other
nets = dict(
yacoin=math.Object(
PARENT=networks.nets['yacoin'],
SHARE_PERIOD=10, # seconds
CHAIN_LENGTH=12*60*60//10, # shares
REAL_CHAIN_LENGTH=12*60*60//10, # shares
TARGET_LOOKBEHIND=30, # shares
SPREAD=10, # blocks
IDENTIFIER='c138eee9e7923514'.decode('hex'),
PREFIX='d206c3aaaee749b4'.decode('hex'),
P2P_PORT=8337,
MIN_TARGET=0,
MAX_TARGET=2**256//2**20 - 1,
PERSIST=True,
WORKER_PORT=8336,
BOOTSTRAP_ADDRS='rav3n.dtdns.net 37.59.119.242 95.138.185.176 213.239.207.114 81.17.30.121 46.163.105.201 88.190.223.101'.split(' '),
ANNOUNCE_CHANNEL='#p2pool-alt',
VERSION_CHECK=lambda v: v >= 60004,
),
yacoin_testnet=math.Object(
PARENT=networks.nets['yacoin_testnet'],
SHARE_PERIOD=3, # seconds
CHAIN_LENGTH=20*60//3, # shares
REAL_CHAIN_LENGTH=20*60//3, # shares
TARGET_LOOKBEHIND=200, # shares
SPREAD=12, # blocks
IDENTIFIER='e037d5b8c7923510'.decode('hex'),
PREFIX='7208c1a54ef649b0'.decode('hex'),
P2P_PORT=19777,
MIN_TARGET=0,
MAX_TARGET=2**256//2**20 - 1,
PERSIST=False,
WORKER_PORT=18336,
BOOTSTRAP_ADDRS=' '.split(' '),
ANNOUNCE_CHANNEL='#p2pool-alt',
VERSION_CHECK=lambda v: v >= 60004,
),
)
for net_name, net in nets.iteritems():
net.NAME = net_name
| gpl-3.0 | 8,911,000,933,144,216,000 | 35.22 | 141 | 0.62286 | false |
praekelt/marathon-sync | marathon_sync/cli.py | 1 | 1733 | import json
import sys
from twisted.internet.task import react
from twisted.python import usage
class Options(usage.Options):
optParameters = [
["marathon", "m", "http://localhost:8080",
"The address for the Marathon HTTP API endpoint."],
["config", "c", None,
"The path to a config file containing a list of paths to Marathon "
"JSON group definitions."],
]
def postOptions(self):
if self['config'] is None:
raise usage.UsageError("Please specify a config file.")
def load_config_groups(config_path):
group_paths = read_config(config_path)
return [read_group(group_path) for group_path in group_paths]
def read_config(config_path):
group_paths = []
with open(config_path) as config_file:
for line in config_file:
group_path = line.strip()
if group_path:
group_paths.append(group_path)
return group_paths
def read_group(group_path):
with open(group_path) as group_file:
return json.load(group_file)
def main(_reactor, name, *args):
from marathon_sync.main import MarathonSync
from twisted.python import log
log.startLogging(sys.stdout)
try:
options = Options()
options.parseOptions(args)
except usage.UsageError, errortext:
print '%s: %s' % (name, errortext)
print '%s: Try --help for usage details.' % (name,)
sys.exit(1)
marathon_endpoint = options['marathon']
groups = load_config_groups(options['config'])
marathon_sync = MarathonSync(marathon_endpoint, groups)
return marathon_sync.run()
def entry_point():
react(main, sys.argv)
if __name__ == '__main__':
entry_point()
| bsd-3-clause | 2,610,826,351,682,082,000 | 24.865672 | 76 | 0.63416 | false |
davidkeegan/dklrt | Time.py | 1 | 2701 | #!/usr/bin/python
# Time and Date Utilities (dklrt).
# (c) David Keegan 2011-08-06.
import sys, re
from time import *
import datetime
import Misc
ModuleName = __name__
ReDateSep = '[-/]'
ReDate = '\d{4}%s\d{1,2}%s\d{1,2}' % (ReDateSep, ReDateSep)
RePeriod = '(\d+)([ymwdh])'
DateFormat = '%Y-%m-%d'
ReDateTimeSep = "[-/: ]";
DateTimeFormat = '%Y%m%d%H%M%S'
SecPerHour = 60
SecPerDay = 24 * SecPerHour * SecPerHour
def _Throw(Msg): Misc.Throw(Msg, ModuleName)
def DateTimeParse(DateTimeStr):
"""Converts a date(/time) string to seconds since the epoch.
Assumes zeroes for missing time components.
"""
Dts = re.sub(ReDateTimeSep, '', DateTimeStr);
if len(Dts) < 8:
_Throw('Bad Date/Time string: "%s"!' % DateTimeStr)
while len(Dts) < 14: Dts = Dts + "0";
return mktime(strptime(Dts, DateTimeFormat))
def DateToText(Seconds):
# Round seconds to integer first as we're truncating the time
# component.
return strftime(DateFormat, localtime(round(Seconds)))
def DateToday():
return DateTimeParse(DateToText(time()))
def DateAddPeriod(Seconds, Periodstr):
"""Adds the period to the Seconds (a date)."""
Match = re.match(RePeriod, Periodstr)
if not Match: _Throw("Bad Period String: %s!" % Periodstr)
Count = int(Match.group(1))
Unit = Match.group(2)
Rv = Seconds
if Unit == 'y': Rv = DateAddYears(Rv, Count)
elif Unit== 'm': Rv = DateAddMonths(Rv, Count)
elif Unit == 'w': Rv = Rv + (Count * SecPerDay * 7)
elif Unit == 'd': Rv = Rv + (Count * SecPerDay)
elif Unit == 'h': Rv = Rv + (Count * SecPerHour)
else: _Throw('Bad Period Unit: "%s"!' % Unit)
return Rv
def DateAddYears(Seconds, Count):
"""Shifts Seconds (a date) forward by Count years.
If Seconds is Feb 29, shifts to Feb 28, even if shifing to a
leap year.
"""
if not isinstance(Count, (int, long)):
_Throw("Count argument not an int!")
dtd = datetime.date.fromtimestamp(Seconds)
if not Count == 0:
if (dtd.month == 2) and (dtd.day == 29):
dtd = dtd.replace(day=28)
dtd = dtd.replace(year=(dtd.year + Count))
return mktime(dtd.timetuple())
def DateAddMonths(Seconds, Count):
"""Shifts Seconds (a date) forward by Count months.
If the day is >= 29, shifts to 28.
"""
if not isinstance(Count, (int, long)):
_Throw("Count argument not an int!")
dtd = datetime.date.fromtimestamp(Seconds)
if not Count == 0:
if dtd.day >= 29: dtd = dtd.replace(day=28)
Month = (dtd.month + Count) - 1
Years = Month / 12
dtd = dtd.replace(year=(dtd.year + Years))
Month = (Month % 12) + 1
dtd = dtd.replace(month=Month)
return mktime(dtd.timetuple())
| gpl-3.0 | 7,456,639,810,320,415,000 | 29.693182 | 66 | 0.630507 | false |
CKrawczyk/python-reducers-for-caesar | panoptes_aggregation/tests/extractor_tests/test_survey_extractor.py | 1 | 1784 | import unittest
import json
import flask
from panoptes_aggregation import extractors
from panoptes_aggregation.extractors.test_utils import annotation_by_task
classification = {
'annotations': [{
'task': 'T0',
'value': [
{
'choice': 'AGOUTI',
'answers': {'HOWMANY': '1'},
'filters': {}
}, {
'choice': 'PECCARYCOLLARED',
'answers': {'HOWMANY': '3', 'WHATDOING': ['standing', 'sleeping']},
'filters': {}
}, {
'choice': 'NOTHINGHERE',
'answers': {},
'filters': {}
}
]
}]
}
expected = [
{
'choice': 'agouti',
'answers_howmany': {'1': 1}
},
{
'choice': 'peccarycollared',
'answers_howmany': {'3': 1},
'answers_whatdoing': {'standing': 1, 'sleeping': 1}
},
{
'choice': 'nothinghere',
}
]
class TestSurveyExtractor(unittest.TestCase):
def test_extract(self):
result = extractors.survey_extractor(classification)
for i in range(len(result)):
with self.subTest(i=i):
self.assertDictEqual(result[i], expected[i])
def test_request(self):
request_kwargs = {
'data': json.dumps(annotation_by_task(classification)),
'content_type': 'application/json'
}
app = flask.Flask(__name__)
with app.test_request_context(**request_kwargs):
result = extractors.survey_extractor(flask.request)
for i in range(len(result)):
with self.subTest(i=i):
self.assertDictEqual(result[i], expected[i])
if __name__ == '__main__':
unittest.main()
| apache-2.0 | 4,267,468,890,257,697,300 | 26.446154 | 83 | 0.503363 | false |
awni/tensorflow | tensorflow/contrib/skflow/python/skflow/ops/dropout_ops.py | 1 | 1561 | """Dropout operations and handling."""
# Copyright 2015-present The Scikit Flow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
# Key to collect dropout probabilities.
DROPOUTS = "dropouts"
def dropout(tensor_in, prob, name=None):
"""Adds dropout node and stores probability tensor into graph collection.
Args:
tensor_in: Input tensor.
prob: Float or Tensor.
Returns:
Tensor of the same shape of `tensor_in`.
Raises:
ValueError: If `keep_prob` is not in `(0, 1]`.
"""
with tf.op_scope([tensor_in], name, "dropout") as name:
if isinstance(prob, float):
prob = tf.get_variable("prob", [],
initializer=tf.constant_initializer(prob),
trainable=False)
tf.add_to_collection(DROPOUTS, prob)
return tf.nn.dropout(tensor_in, prob)
| apache-2.0 | 6,417,583,460,618,329,000 | 32.934783 | 77 | 0.667521 | false |
ryanraaum/african-mtdna | popdata_sources/montano2013/process.py | 1 | 1562 | from oldowan.mtconvert import seq2sites, sites2seq, str2sites
from string import translate
import pandas as pd
import numpy as np
import sys
sys.path.append('../../scripts')
from utils import *
## load metadata
metadata = pd.read_csv('metadata.csv', index_col=0)
region = range2region(metadata.ix[0,'SeqRange'])
with open('montano2013.csv', 'rU') as f:
f.readline() # skip past header
data = f.readlines()
counts = np.zeros((len(data), 5), dtype=np.int)
hids = []
sites = []
for i in range(len(data)):
x = data[i].strip().split(',')
hids.append(x[0])
sites.append(x[2])
count = x[4:]
for j in range(5):
if count[j] == '':
count[j] = '0'
counts[i,] = [int(y) for y in count]
## Validate
passed_validation = True
for i in range(len(sites)):
curr_sites = str2sites(sites[i])
seq = sites2seq(curr_sites, region)
mysites = seq2sites(seq)
if not mysites == curr_sites:
myseq = translate(sites2seq(mysites, region), None, '-')
if not seq == myseq:
passed_validation = False
print i, hids[i]
if passed_validation:
counter = [0] * 5
with open('processed.csv', 'w') as f:
for i in range(len(sites)):
hid = hids[i]
curr_sites = str2sites(sites[i])
seq = sites2seq(curr_sites, region)
mysites = ' '.join([str(x) for x in seq2sites(seq)])
for j in range(5):
prefix = metadata.ix[j,'NewPrefix']
for k in range(counts[i,j]):
counter[j] += 1
num = str(counter[j]).zfill(3)
newid = prefix + num
f.write('%s,%s,%s\n' % (newid, hid, mysites)) | cc0-1.0 | -4,966,553,181,960,053,000 | 25.05 | 61 | 0.619718 | false |
jeromecn/caravel_viz_full | caravel/dataframe.py | 1 | 3190 | """ Caravel wrapper around pandas.DataFrame.
TODO(bkyryliuk): add support for the conventions like: *_dim or dim_*
dimensions, *_ts, ts_*, ds_*, *_ds - datetime, etc.
TODO(bkyryliuk): recognize integer encoded enums.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import pandas as pd
import numpy as np
INFER_COL_TYPES_THRESHOLD = 95
INFER_COL_TYPES_SAMPLE_SIZE = 100
class CaravelDataFrame(object):
def __init__(self, df):
self.__df = df.where((pd.notnull(df)), None)
@property
def size(self):
return len(self.__df.index)
@property
def data(self):
return self.__df.to_dict(orient='records')
@property
def columns_dict(self):
"""Provides metadata about columns for data visualization.
:return: dict, with the fields name, type, is_date, is_dim and agg.
"""
if self.__df.empty:
return None
columns = []
sample_size = min(INFER_COL_TYPES_SAMPLE_SIZE, len(self.__df.index))
sample = self.__df
if sample_size:
sample = self.__df.sample(sample_size)
for col in self.__df.dtypes.keys():
column = {
'name': col,
'type': self.__df.dtypes[col].name,
'is_date': is_date(self.__df.dtypes[col]),
'is_dim': is_dimension(self.__df.dtypes[col], col),
}
agg = agg_func(self.__df.dtypes[col], col)
if agg_func:
column['agg'] = agg
if column['type'] == 'object':
# check if encoded datetime
if (datetime_conversion_rate(sample[col]) >
INFER_COL_TYPES_THRESHOLD):
column.update({
'type': 'datetime_string',
'is_date': True,
'is_dim': False,
'agg': None
})
# 'agg' is optional attribute
if not column['agg']:
column.pop('agg', None)
columns.append(column)
return columns
# It will give false positives on the numbers that are stored as strings.
# It is hard to distinguish integer numbers and timestamps
def datetime_conversion_rate(data_series):
success = 0
total = 0
for value in data_series:
total = total + 1
try:
pd.to_datetime(value)
success = success + 1
except Exception:
continue
return 100 * success / total
def is_date(dtype):
if dtype.name:
return dtype.name.startswith('datetime')
def is_dimension(dtype, column_name):
if is_id(column_name):
return False
return dtype.name in ('object', 'bool')
def is_id(column_name):
return column_name.startswith('id') or column_name.endswith('id')
def agg_func(dtype, column_name):
# consider checking for key substring too.
if is_id(column_name):
return 'count_distinct'
if np.issubdtype(dtype, np.number):
return 'sum'
return None
| apache-2.0 | -5,963,044,960,317,949,000 | 27.482143 | 76 | 0.558621 | false |
robertu94/autograder | autograder/discover/handin.py | 1 | 4601 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
This module is part of the Clemson ACM Auto Grader
Copyright (c) 2016, Robert Underwood
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
This module is contains the Clemson Handin interaction
"""
import itertools
import os
import yaml
from autograder.source import clone, update
def clone_metadata(settings):
"""
Clones metadata for the first time
"""
discovery_settings = {
"clone": {
"timeout": None,
"method": "hg"
}
}
clone.clone(discovery_settings, settings['project']['discovery'])
def update_metadata(settings):
"""
Clones metadata for the first time
"""
discovery_settings = {
"update": {
"timeout": None,
"method": "hg"
}
}
update.update(discovery_settings, settings['project']['discovery'])
def discover(settings):
"""
Discovers metadata from a Handin Repository
"""
project_directory = settings['project']['discovery']['directory']
assignment_name = settings['project']['discovery']['assignment']
if not os.path.exists(project_directory):
clone_metadata(settings)
#We are going to unintentionally update all repos when we clone them
#So we need to force an update here.
settings['update']['forced'] = True
else:
update_metadata(settings)
manifest_file = os.path.join(project_directory, "admin/manifest.yaml")
assingment_manifest_file = os.path.join(project_directory, "admin/assignments",
assignment_name + ".yaml")
with open(manifest_file) as infile:
manifest = yaml.load(infile)
students_usernames = set(manifest['students'])
with open(assingment_manifest_file) as infile:
assignment_manifest = yaml.load(infile)
shared_buckets_users = set(itertools.chain(
*[assignment_manifest['buckets'][bucket] for bucket in assignment_manifest['buckets']]))
ungrouped_students = students_usernames - shared_buckets_users
student_objects = []
for student in ungrouped_students:
student_objects.append(student_from_username(settings, student, student))
for bucket in assignment_manifest['buckets']:
needs_grading = True
for student in assignment_manifest['buckets'][bucket]:
if student in student_objects:
raise RuntimeError("Students must be uniquely mapped to a bucket")
student_objects.append(
student_from_username(settings, bucket, student, needs_grading))
needs_grading = False
return student_objects
def student_from_username(settings, bucket_name, username, needs_grading=True):
"""
Format student structures from usernames
"""
directory = settings['project']['discovery']['directory']
assignment = settings['project']['discovery']['assignment']
domain = settings['project']['discovery']['domain']
base_repo = settings['project']['discovery']['repo']
return {
"directory": os.path.join(directory, "assignments", assignment, username),
"email": "{username}@{domain}".format(username=username, domain=domain),
"username": username,
"repo": os.path.join(base_repo, "assignments", assignment, bucket_name),
"needs_grading": needs_grading
}
| bsd-2-clause | 7,051,272,176,042,949,000 | 36.104839 | 96 | 0.690285 | false |
universalcore/springboard | springboard/utils.py | 1 | 6735 | import os
import re
from functools import wraps
from urlparse import urlparse
import math
from elasticutils import S
from elasticgit.search import RepoHelper
default_excluded_paths = ['/health/', '/api/notify/']
def is_excluded_path(path, excluded_paths):
excl_paths = config_list(excluded_paths) + default_excluded_paths
return (
path and
any([p for p in excl_paths if path.startswith(p)]))
def parse_repo_name(repo_url):
pr = urlparse(repo_url)
_, _, repo_name_dot_ext = pr.path.rpartition('/')
if any([
repo_name_dot_ext.endswith('.git'),
repo_name_dot_ext.endswith('.json')]):
repo_name, _, _ = repo_name_dot_ext.partition('.')
return repo_name
return repo_name_dot_ext
def is_remote_repo_url(repo_url):
return any([
repo_url.startswith('http://'),
repo_url.startswith('https://')])
def repo_url(repo_dir, repo_location):
# If repo_location is an http URL we leave it as is and
# assume it specifies a unicore.distribute repo endpoint.
# If repo_location is not an http URL, we assume it specifies
# a local repo in repo_dir.
if is_remote_repo_url(repo_location):
return repo_location
return os.path.abspath(os.path.join(repo_dir, repo_location))
def ga_context(context_func):
"""
A decorator for Cornice views that allows one to set extra parameters
for Google Analytics tracking::
@ga_context(lambda context: {'dt': context['category'].title, })
@view_config(route_name='page')
def view(request):
return {
'category': self.workspace.S(Category).filter(title='foo')[0],
}
:param func context_func:
A function which takes one argument, a context dictionary made
available to the template.
:returns:
A dict containing the extra variables for Google Analytics
tracking.
"""
def decorator(func):
@wraps(func)
def wrapper(self, *args, **kwargs):
context = func(self, *args, **kwargs)
self.request.google_analytics.update(context_func(context))
return context
return wrapper
return decorator
def config_list(data):
"""
A function that takes a string of values separated by newline characters
and returns a list of those values
:param func context_func:
A function which takes one argument, a string of values separated by
newline characters
:returns:
A list containing the values separated by newline characters,
stripped of whitespace between the value and newline character
"""
return filter(None, (x.strip() for x in data.splitlines()))
def config_dict(data):
"""
A function that takes a string of pair values, indicated by '=', separated
by newline characters and returns a dict of those value pairs
:param func context_func:
A function which takes one argument, a string of value pairs with
'= between them' separated by newline characters
:returns:
A dict containing the value pairs separated by newline characters
"""
lines = config_list(data)
return dict(re.split('\s*=\s*', value) for value in lines)
class Paginator(object):
"""
A thing that helps us page through result sets
:param iterable results:
The iterable of objects to paginate.
:param int page:
The page number, zero-based.
:param int results_per_page:
The number of objects in each page.
:param int slider_value:
The number of page numbers to display, excluding the current page.
"""
def __init__(self, results, page, results_per_page=10, slider_value=5):
self.results = results
self.page = page
self.results_per_page = results_per_page
self.slider_value = slider_value
self.buffer_value = self.slider_value / 2
def total_count(self):
if isinstance(self.results, S):
return self.results.count()
return len(self.results)
def get_page(self):
return self.results[self.page * self.results_per_page:
(self.page + 1) * self.results_per_page]
def has_next_page(self):
return ((self.page + 1) * self.results_per_page) < self.total_count()
def has_previous_page(self):
return self.page
def total_pages(self):
return int(
math.ceil(
float(self.total_count()) / float(self.results_per_page)))
def page_numbers(self):
if (self.page - self.buffer_value) < 0:
return [page_number
for page_number in range(
0, min([self.slider_value, self.total_pages()]))]
elif (self.page + self.buffer_value) >= self.total_pages():
return [page_number
for page_number in range(
max((self.total_pages() - self.slider_value), 0),
self.total_pages())
]
else:
return range(self.page - self.buffer_value,
self.page + self.buffer_value + 1)
def page_numbers_left(self):
page_numbers = self.page_numbers()
if not any(page_numbers):
return False
return page_numbers[:page_numbers.index(self.page)]
def page_numbers_right(self):
page_numbers = self.page_numbers()
if not any(page_numbers):
return False
return page_numbers[page_numbers.index(self.page) + 1:]
def needs_start_ellipsis(self):
page_numbers = self.page_numbers()
if not any(page_numbers):
return False
return page_numbers[0] > 1
def needs_end_ellipsis(self):
page_numbers = self.page_numbers()
if not any(page_numbers):
return False
return page_numbers[-1] < (self.total_pages() - 2)
def show_start(self):
page_numbers = self.page_numbers()
if not any(page_numbers):
return False
return page_numbers[0] > 0
def show_end(self):
page_numbers = self.page_numbers()
if not any(page_numbers):
return False
return page_numbers[-1] < self.total_pages() - 1
class CachingRepoHelper(RepoHelper):
"""
A subclass of RepoHelper that caches the repo's active
branch name to avoid remote calls to get the repo branch.
"""
def active_branch_name(self):
if not hasattr(self, '_active_branch_name'):
self._active_branch_name = super(
CachingRepoHelper, self).active_branch_name()
return self._active_branch_name
| bsd-2-clause | 7,822,912,496,108,433,000 | 30.325581 | 78 | 0.610987 | false |
8l/beri | cheritest/trunk/tests/cache/test_cache_instruction_data.py | 1 | 3232 | #-
# Copyright (c) 2011 Robert N. M. Watson
# All rights reserved.
#
# This software was developed by SRI International and the University of
# Cambridge Computer Laboratory under DARPA/AFRL contract FA8750-10-C-0237
# ("CTSRD"), as part of the DARPA CRASH research programme.
#
# @BERI_LICENSE_HEADER_START@
#
# Licensed to BERI Open Systems C.I.C. (BERI) under one or more contributor
# license agreements. See the NOTICE file distributed with this work for
# additional information regarding copyright ownership. BERI licenses this
# file to you under the BERI Hardware-Software License, Version 1.0 (the
# "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at:
#
# http://www.beri-open-systems.org/legal/license-1-0.txt
#
# Unless required by applicable law or agreed to in writing, Work distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#
# @BERI_LICENSE_HEADER_END@
#
from beritest_tools import BaseBERITestCase
from nose.plugins.attrib import attr
#
# XXX: our test code saves the CP0 config register in self.MIPS.s1 so that
# we can determine how this test should behave. Our test cases don't
# currently check that, so may return undesired failures. The third check
# below should be conditioned on (DC > 0) || (SC == 1) -- i.e., a cache is
# present, which might cause it not to incorrectly fire for gxemul.
#
class test_cache_instruction_data(BaseBERITestCase):
@attr('cache')
@attr('counterdev')
def test_initial_uncached_read(self):
self.assertRegisterEqual(self.MIPS.a0, 0, "Initial read of count register is incorrect")
@attr('cache')
@attr('counterdev')
def test_initial_cached_read(self):
self.assertRegisterEqual(self.MIPS.a1, 1, "Initial cached read failure")
@attr('cache')
@attr('counterdev')
def test_second_cached_read(self):
self.assertRegisterEqual(self.MIPS.a2, 1, "Second cached read failure")
@attr('cache')
@attr('counterdev')
def test_after_L1_writeback_cached_read(self):
self.assertRegisterEqual(self.MIPS.a3, 1, "Cached read after data L1 writeback is incorrect")
@attr('cache')
@attr('counterdev')
def test_after_L1_writeback_invalidate_cached_read(self):
self.assertRegisterEqual(self.MIPS.a4, 1, "Cached read after data L1 writeback/invalidate is incorrect")
@attr('cache')
@attr('counterdev')
def test_after_L1_invalidate_cached_read(self):
self.assertRegisterEqual(self.MIPS.a5, 1, "Cached read after data L1 invalidate is incorrect")
@attr('cache')
@attr('counterdev')
def test_after_L1_and_L2_invalidate_cached_read(self):
self.assertRegisterEqual(self.MIPS.a6, 1, "Cached read after data and L2 invalidate is incorrect")
@attr('cache')
@attr('loadcachetag')
def test_index_load_tag_dcache(self):
self.assertRegisterEqual(self.MIPS.a7, 0x000000004001fe00, "Index load tag dCache unexpected value")
| apache-2.0 | -8,806,127,678,014,033,000 | 39.911392 | 113 | 0.710705 | false |
wbond/oscrypto | oscrypto/_openssl/_libcrypto_cffi.py | 1 | 9503 | # coding: utf-8
from __future__ import unicode_literals, division, absolute_import, print_function
import re
from .. import _backend_config
from .._errors import pretty_message
from .._ffi import get_library, register_ffi
from ..errors import LibraryNotFoundError
from cffi import FFI
__all__ = [
'is_libressl',
'libcrypto',
'libressl_version',
'libressl_version_info',
'version',
'version_info',
]
libcrypto_path = _backend_config().get('libcrypto_path')
if libcrypto_path is None:
libcrypto_path = get_library('crypto', 'libcrypto.dylib', '42')
if not libcrypto_path:
raise LibraryNotFoundError('The library libcrypto could not be found')
try:
vffi = FFI()
vffi.cdef("const char *SSLeay_version(int type);")
version_string = vffi.string(vffi.dlopen(libcrypto_path).SSLeay_version(0)).decode('utf-8')
except (AttributeError):
vffi = FFI()
vffi.cdef("const char *OpenSSL_version(int type);")
version_string = vffi.string(vffi.dlopen(libcrypto_path).OpenSSL_version(0)).decode('utf-8')
is_libressl = 'LibreSSL' in version_string
version_match = re.search('\\b(\\d\\.\\d\\.\\d[a-z]*)\\b', version_string)
if not version_match:
version_match = re.search('(?<=LibreSSL )(\\d\\.\\d(\\.\\d)?)\\b', version_string)
if not version_match:
raise LibraryNotFoundError('Error detecting the version of libcrypto')
version = version_match.group(1)
version_parts = re.sub('(\\d)([a-z]+)', '\\1.\\2', version).split('.')
version_info = tuple(int(part) if part.isdigit() else part for part in version_parts)
# LibreSSL is compatible with libcrypto from OpenSSL 1.0.1
libressl_version = ''
libressl_version_info = tuple()
if is_libressl:
libressl_version = version
libressl_version_info = version_info
version = '1.0.1'
version_info = (1, 0, 1)
ffi = FFI()
libcrypto = ffi.dlopen(libcrypto_path)
register_ffi(libcrypto, ffi)
if version_info < (0, 9, 8):
raise LibraryNotFoundError(pretty_message(
'''
OpenSSL versions older than 0.9.8 are not supported - found version %s
''',
version
))
if version_info < (1, 1):
ffi.cdef("""
void ERR_load_crypto_strings(void);
void ERR_free_strings(void);
""")
# The typedef uintptr_t lines here allow us to check for a NULL pointer,
# without having to redefine the structs in our code. This is kind of a hack,
# but it should cause problems since we treat these as opaque.
ffi.cdef("""
typedef ... EVP_MD;
typedef uintptr_t EVP_CIPHER_CTX;
typedef ... EVP_CIPHER;
typedef ... ENGINE;
typedef uintptr_t EVP_PKEY;
typedef uintptr_t X509;
typedef uintptr_t DH;
typedef uintptr_t RSA;
typedef uintptr_t DSA;
typedef uintptr_t EC_KEY;
typedef ... EVP_MD_CTX;
typedef ... EVP_PKEY_CTX;
typedef ... BN_GENCB;
typedef ... BIGNUM;
unsigned long ERR_get_error(void);
char *ERR_error_string(unsigned long e, char *buf);
unsigned long ERR_peek_error(void);
void OPENSSL_config(const char *config_name);
EVP_CIPHER_CTX *EVP_CIPHER_CTX_new(void);
void EVP_CIPHER_CTX_free(EVP_CIPHER_CTX *ctx);
int EVP_CIPHER_CTX_set_key_length(EVP_CIPHER_CTX *x, int keylen);
int EVP_CIPHER_CTX_set_padding(EVP_CIPHER_CTX *x, int padding);
int EVP_CIPHER_CTX_ctrl(EVP_CIPHER_CTX *ctx, int type, int arg, void *ptr);
const EVP_CIPHER *EVP_aes_128_cbc(void);
const EVP_CIPHER *EVP_aes_192_cbc(void);
const EVP_CIPHER *EVP_aes_256_cbc(void);
const EVP_CIPHER *EVP_des_cbc(void);
const EVP_CIPHER *EVP_des_ede_cbc(void);
const EVP_CIPHER *EVP_des_ede3_cbc(void);
const EVP_CIPHER *EVP_rc4(void);
const EVP_CIPHER *EVP_rc2_cbc(void);
int EVP_EncryptInit_ex(EVP_CIPHER_CTX *ctx, const EVP_CIPHER *cipher,
ENGINE *impl, const char *key,
const char *iv);
int EVP_EncryptUpdate(EVP_CIPHER_CTX *ctx, char *out, int *outl,
const char *in, int inl);
int EVP_EncryptFinal_ex(EVP_CIPHER_CTX *ctx, char *out, int *outl);
int EVP_DecryptInit_ex(EVP_CIPHER_CTX *ctx, const EVP_CIPHER *cipher,
ENGINE *impl, const char *key,
const char *iv);
int EVP_DecryptUpdate(EVP_CIPHER_CTX *ctx, char *out, int *outl,
const char *in, int inl);
int EVP_DecryptFinal_ex(EVP_CIPHER_CTX *ctx, char *out, int *outl);
EVP_PKEY *d2i_AutoPrivateKey(EVP_PKEY **a, const char **pp,
long length);
EVP_PKEY *d2i_PUBKEY(EVP_PKEY **a, const char **pp, long length);
int i2d_PUBKEY(EVP_PKEY *a, char **pp);
void EVP_PKEY_free(EVP_PKEY *key);
X509 *d2i_X509(X509 **px, const char **in, int len);
int i2d_X509(X509 *x, char **out);
EVP_PKEY *X509_get_pubkey(X509 *x);
void X509_free(X509 *a);
int EVP_PKEY_size(EVP_PKEY *pkey);
RSA *EVP_PKEY_get1_RSA(EVP_PKEY *pkey);
void RSA_free(RSA *r);
int RSA_public_encrypt(int flen, const char *from,
char *to, RSA *rsa, int padding);
int RSA_private_encrypt(int flen, const char *from,
char *to, RSA *rsa, int padding);
int RSA_public_decrypt(int flen, const char *from,
char *to, RSA *rsa, int padding);
int RSA_private_decrypt(int flen, const char *from,
char *to, RSA *rsa, int padding);
int EVP_DigestUpdate(EVP_MD_CTX *ctx, const void *d, unsigned int cnt);
const EVP_MD *EVP_md5(void);
const EVP_MD *EVP_sha1(void);
const EVP_MD *EVP_sha224(void);
const EVP_MD *EVP_sha256(void);
const EVP_MD *EVP_sha384(void);
const EVP_MD *EVP_sha512(void);
int PKCS12_key_gen_uni(char *pass, int passlen, char *salt,
int saltlen, int id, int iter, int n,
char *out, const EVP_MD *md_type);
void BN_free(BIGNUM *a);
int BN_dec2bn(BIGNUM **a, const char *str);
DH *DH_new(void);
int DH_generate_parameters_ex(DH *dh, int prime_len, int generator, BN_GENCB *cb);
int i2d_DHparams(const DH *a, char **pp);
void DH_free(DH *dh);
RSA *RSA_new(void);
int RSA_generate_key_ex(RSA *rsa, int bits, BIGNUM *e, BN_GENCB *cb);
int i2d_RSAPublicKey(RSA *a, char **pp);
int i2d_RSAPrivateKey(RSA *a, char **pp);
DSA *DSA_new(void);
int DSA_generate_parameters_ex(DSA *dsa, int bits,
const char *seed, int seed_len, int *counter_ret,
unsigned long *h_ret, BN_GENCB *cb);
int DSA_generate_key(DSA *a);
int i2d_DSA_PUBKEY(const DSA *a, char **pp);
int i2d_DSAPrivateKey(const DSA *a, char **pp);
void DSA_free(DSA *dsa);
EC_KEY *EC_KEY_new_by_curve_name(int nid);
int EC_KEY_generate_key(EC_KEY *key);
void EC_KEY_set_asn1_flag(EC_KEY *, int);
int i2d_ECPrivateKey(EC_KEY *key, char **out);
int i2o_ECPublicKey(EC_KEY *key, char **out);
void EC_KEY_free(EC_KEY *key);
""")
if version_info < (1, 1):
ffi.cdef("""
EVP_MD_CTX *EVP_MD_CTX_create(void);
void EVP_MD_CTX_destroy(EVP_MD_CTX *ctx);
""")
else:
ffi.cdef("""
EVP_MD_CTX *EVP_MD_CTX_new(void);
void EVP_MD_CTX_free(EVP_MD_CTX *ctx);
""")
if version_info < (1,):
ffi.cdef("""
typedef ... *DSA_SIG;
typedef ... *ECDSA_SIG;
DSA_SIG *DSA_do_sign(const char *dgst, int dlen, DSA *dsa);
ECDSA_SIG *ECDSA_do_sign(const char *dgst, int dgst_len, EC_KEY *eckey);
DSA_SIG *d2i_DSA_SIG(DSA_SIG **v, const char **pp, long length);
ECDSA_SIG *d2i_ECDSA_SIG(ECDSA_SIG **v, const char **pp, long len);
int i2d_DSA_SIG(const DSA_SIG *a, char **pp);
int i2d_ECDSA_SIG(const ECDSA_SIG *a, char **pp);
int DSA_do_verify(const char *dgst, int dgst_len, DSA_SIG *sig, DSA *dsa);
int ECDSA_do_verify(const char *dgst, int dgst_len, const ECDSA_SIG *sig, EC_KEY *eckey);
void DSA_SIG_free(DSA_SIG *a);
void ECDSA_SIG_free(ECDSA_SIG *a);
DSA *EVP_PKEY_get1_DSA(EVP_PKEY *pkey);
EC_KEY *EVP_PKEY_get1_EC_KEY(EVP_PKEY *pkey);
int RSA_verify_PKCS1_PSS(RSA *rsa, const char *mHash,
const EVP_MD *Hash, const char *EM,
int sLen);
int RSA_padding_add_PKCS1_PSS(RSA *rsa, char *EM,
const char *mHash, const EVP_MD *Hash,
int sLen);
int EVP_DigestInit_ex(EVP_MD_CTX *ctx, const EVP_MD *type, ENGINE *impl);
int EVP_SignFinal(EVP_MD_CTX *ctx, char *sig, unsigned int *s, EVP_PKEY *pkey);
int EVP_VerifyFinal(EVP_MD_CTX *ctx, char *sigbuf, unsigned int siglen, EVP_PKEY *pkey);
void EVP_MD_CTX_set_flags(EVP_MD_CTX *ctx, int flags);
""")
else:
ffi.cdef("""
int PKCS5_PBKDF2_HMAC(const char *pass, int passlen,
const char *salt, int saltlen, int iter,
const EVP_MD *digest,
int keylen, char *out);
int EVP_DigestSignInit(EVP_MD_CTX *ctx, EVP_PKEY_CTX **pctx, const EVP_MD *type, ENGINE *e, EVP_PKEY *pkey);
int EVP_DigestSignFinal(EVP_MD_CTX *ctx, char *sig, size_t *siglen);
int EVP_DigestVerifyInit(EVP_MD_CTX *ctx, EVP_PKEY_CTX **pctx, const EVP_MD *type, ENGINE *e, EVP_PKEY *pkey);
int EVP_DigestVerifyFinal(EVP_MD_CTX *ctx, const char *sig, size_t siglen);
int EVP_PKEY_CTX_ctrl(EVP_PKEY_CTX *ctx, int keytype, int optype, int cmd, int p1, void *p2);
""")
| mit | 1,742,790,982,224,963,300 | 35.55 | 118 | 0.614964 | false |
paris-ci/CloudBot | plugins/remind.py | 1 | 5860 | """
remind.py
Allows users to add reminders for various tasks.
Created By:
- Pangea <https://github.com/PangeaCake>
- Luke Rogers <https://github.com/lukeroge>
License:
GPL v3
"""
from datetime import datetime
import time
import asyncio
from sqlalchemy import Table, Column, String, DateTime, PrimaryKeyConstraint
from cloudbot import hook
from cloudbot.util import database
from cloudbot.util.timeparse import time_parse
from cloudbot.util.timeformat import format_time, time_since
from cloudbot.util import colors
table = Table(
'reminders',
database.metadata,
Column('network', String(50)),
Column('added_user', String(30)),
Column('added_time', DateTime),
Column('added_chan', String(50)),
Column('message', String(512)),
Column('remind_time', DateTime),
PrimaryKeyConstraint('network', 'added_user', 'added_time')
)
@asyncio.coroutine
def delete_reminder(async, db, network, remind_time, user):
query = table.delete() \
.where(table.c.network == network.lower()) \
.where(table.c.remind_time == remind_time) \
.where(table.c.added_user == user.lower())
yield from async(db.execute, query)
yield from async(db.commit)
@asyncio.coroutine
def delete_all(async, db, network, user):
query = table.delete() \
.where(table.c.network == network.lower()) \
.where(table.c.added_user == user.lower())
yield from async(db.execute, query)
yield from async(db.commit)
@asyncio.coroutine
def add_reminder(async, db, network, added_user, added_chan, message, remind_time, added_time):
query = table.insert().values(
network=network.lower(),
added_user=added_user.lower(),
added_time=added_time,
added_chan=added_chan.lower(),
message=message,
remind_time=remind_time
)
yield from async(db.execute, query)
yield from async(db.commit)
@asyncio.coroutine
@hook.on_start()
def load_cache(async, db):
global reminder_cache
reminder_cache = []
for network, remind_time, added_time, user, message in (yield from async(_load_cache_db, db)):
reminder_cache.append((network, remind_time, added_time, user, message))
def _load_cache_db(db):
query = db.execute(table.select())
return [(row["network"], row["remind_time"], row["added_time"], row["added_user"], row["message"]) for row in query]
@asyncio.coroutine
@hook.periodic(30, initial_interval=30)
def check_reminders(bot, async, db):
current_time = datetime.now()
for reminder in reminder_cache:
network, remind_time, added_time, user, message = reminder
if remind_time <= current_time:
if network not in bot.connections:
# connection is invalid
yield from add_reminder(async, db, network, remind_time, user)
yield from load_cache(async, db)
continue
conn = bot.connections[network]
if not conn.ready:
return
remind_text = colors.parse(time_since(added_time, count=2))
alert = colors.parse("{}, you have a reminder from $(b){}$(clear) ago!".format(user, remind_text))
conn.message(user, alert)
conn.message(user, '"{}"'.format(message))
delta = (remind_time - added_time).seconds
if delta > (30 * 60):
late_time = time_since(remind_time, count=2)
late = "(I'm sorry for delivering this message $(b){}$(clear) late," \
" it seems I was unable to deliver it on time)".format(late_time)
conn.message(user, colors.parse(late))
yield from delete_reminder(async, db, network, remind_time, user)
yield from load_cache(async, db)
@asyncio.coroutine
@hook.command('remind', 'reminder')
def remind(text, nick, chan, db, conn, notice, async):
"""<1 minute, 30 seconds>: <do task> -- reminds you to <do task> in <1 minute, 30 seconds>"""
count = len([x for x in reminder_cache if x[0] == conn.name and x[3] == nick.lower()])
if text == "clear":
if count == 0:
return "You have no reminders to delete."
yield from delete_all(async, db, conn.name, nick)
yield from load_cache(async, db)
return "Deleted all ({}) reminders for {}!".format(count, nick)
# split the input on the first ":"
parts = text.split(":", 1)
if len(parts) == 1:
# user didn't add a message, send them help
notice(remind.__doc__)
return
if count > 10:
return "Sorry, you already have too many reminders queued (10), you will need to wait or " \
"clear your reminders to add any more."
time_string = parts[0].strip()
message = colors.strip_all(parts[1].strip())
# get the current time in both DateTime and Unix Epoch
current_epoch = time.time()
current_time = datetime.fromtimestamp(current_epoch)
# parse the time input, return error if invalid
seconds = time_parse(time_string)
if not seconds:
return "Invalid input."
if seconds > 2764800 or seconds < 60:
return "Sorry, remind input must be more then a minute, and less then one month."
# work out the time to remind the user, and check if that time is in the past
remind_time = datetime.fromtimestamp(current_epoch + seconds)
if remind_time < current_time:
return "I can't remind you in the past!"
# finally, add the reminder and send a confirmation message
yield from add_reminder(async, db, conn.name, nick, chan, message, remind_time, current_time)
yield from load_cache(async, db)
remind_text = format_time(seconds, count=2)
output = "Alright, I'll remind you \"{}\" in $(b){}$(clear)!".format(message, remind_text)
return colors.parse(output)
| gpl-3.0 | 2,699,591,006,697,351,000 | 32.107345 | 120 | 0.635836 | false |
kovidgoyal/kitty | docs/installer.py | 1 | 7947 | #!/usr/bin/env python3
# vim:fileencoding=utf-8
# License: GPL v3 Copyright: 2018, Kovid Goyal <kovid at kovidgoyal.net>
from __future__ import (
absolute_import, division, print_function, unicode_literals
)
import atexit
import json
import os
import platform
import re
import shlex
import shutil
import subprocess
import sys
import tempfile
py3 = sys.version_info[0] > 2
is64bit = platform.architecture()[0] == '64bit'
is_macos = 'darwin' in sys.platform.lower()
if is_macos:
mac_ver = tuple(map(int, platform.mac_ver()[0].split('.')))
if mac_ver[:2] < (10, 12):
raise SystemExit('Your version of macOS is too old, at least 10.12 is required')
try:
__file__
from_file = True
except NameError:
from_file = False
if py3:
unicode = str
raw_input = input
import urllib.request as urllib
def encode_for_subprocess(x):
return x
else:
from future_builtins import map
import urllib2 as urllib
def encode_for_subprocess(x):
if isinstance(x, unicode):
x = x.encode('utf-8')
return x
def run(*args):
if len(args) == 1:
args = shlex.split(args[0])
args = list(map(encode_for_subprocess, args))
ret = subprocess.Popen(args).wait()
if ret != 0:
raise SystemExit(ret)
class Reporter: # {{{
def __init__(self, fname):
self.fname = fname
self.last_percent = 0
def __call__(self, blocks, block_size, total_size):
percent = (blocks*block_size)/float(total_size)
report = '\rDownloaded {:.1%} '.format(percent)
if percent - self.last_percent > 0.05:
self.last_percent = percent
print(report, end='')
sys.stdout.flush()
# }}}
def get_latest_release_data():
print('Checking for latest release on GitHub...')
req = urllib.Request('https://api.github.com/repos/kovidgoyal/kitty/releases/latest', headers={'Accept': 'application/vnd.github.v3+json'})
try:
res = urllib.urlopen(req).read().decode('utf-8')
except Exception as err:
raise SystemExit('Failed to contact {} with error: {}'.format(req.get_full_url(), err))
data = json.loads(res)
html_url = data['html_url'].replace('/tag/', '/download/').rstrip('/')
for asset in data.get('assets', ()):
name = asset['name']
if is_macos:
if name.endswith('.dmg'):
return html_url + '/' + name, asset['size']
else:
if name.endswith('.txz'):
if is64bit:
if name.endswith('-x86_64.txz'):
return html_url + '/' + name, asset['size']
else:
if name.endswith('-i686.txz'):
return html_url + '/' + name, asset['size']
raise SystemExit('Failed to find the installer package on github')
def do_download(url, size, dest):
print('Will download and install', os.path.basename(dest))
reporter = Reporter(os.path.basename(dest))
# Get content length and check if range is supported
rq = urllib.urlopen(url)
headers = rq.info()
sent_size = int(headers['content-length'])
if sent_size != size:
raise SystemExit('Failed to download from {} Content-Length ({}) != {}'.format(url, sent_size, size))
with open(dest, 'wb') as f:
while f.tell() < size:
raw = rq.read(8192)
if not raw:
break
f.write(raw)
reporter(f.tell(), 1, size)
rq.close()
if os.path.getsize(dest) < size:
raise SystemExit('Download failed, try again later')
print('\rDownloaded {} bytes'.format(os.path.getsize(dest)))
def clean_cache(cache, fname):
for x in os.listdir(cache):
if fname not in x:
os.remove(os.path.join(cache, x))
def download_installer(url, size):
fname = url.rpartition('/')[-1]
tdir = tempfile.gettempdir()
cache = os.path.join(tdir, 'kitty-installer-cache')
if not os.path.exists(cache):
os.makedirs(cache)
clean_cache(cache, fname)
dest = os.path.join(cache, fname)
if os.path.exists(dest) and os.path.getsize(dest) == size:
print('Using previously downloaded', fname)
return dest
if os.path.exists(dest):
os.remove(dest)
do_download(url, size, dest)
return dest
def macos_install(dmg, dest='/Applications', launch=True):
mp = tempfile.mkdtemp()
atexit.register(shutil.rmtree, mp)
run('hdiutil', 'attach', dmg, '-mountpoint', mp)
try:
os.chdir(mp)
app = 'kitty.app'
d = os.path.join(dest, app)
if os.path.exists(d):
shutil.rmtree(d)
dest = os.path.join(dest, app)
run('ditto', '-v', app, dest)
print('Successfully installed kitty into', dest)
if launch:
run('open', dest)
finally:
os.chdir('/')
run('hdiutil', 'detach', mp)
def linux_install(installer, dest=os.path.expanduser('~/.local'), launch=True):
dest = os.path.join(dest, 'kitty.app')
if os.path.exists(dest):
shutil.rmtree(dest)
os.makedirs(dest)
print('Extracting tarball...')
run('tar', '-C', dest, '-xJof', installer)
print('kitty successfully installed to', dest)
kitty = os.path.join(dest, 'bin', 'kitty')
print('Use', kitty, 'to run kitty')
if launch:
run(kitty, '--detach')
def main(dest=None, launch=True, installer=None):
if not dest:
if is_macos:
dest = '/Applications'
else:
dest = os.path.expanduser('~/.local')
machine = os.uname()[4]
if machine and machine.lower().startswith('arm'):
raise SystemExit(
'You are running on an ARM system. The kitty binaries are only'
' available for x86 systems. You will have to build from'
' source.')
if not installer:
url, size = get_latest_release_data()
installer = download_installer(url, size)
else:
installer = os.path.abspath(installer)
if not os.access(installer, os.R_OK):
raise SystemExit('Could not read from: {}'.format(installer))
if is_macos:
macos_install(installer, dest=dest, launch=launch)
else:
linux_install(installer, dest=dest, launch=launch)
def script_launch():
# To test: python3 -c "import runpy; runpy.run_path('installer.py', run_name='script_launch')"
def path(x):
return os.path.expandvars(os.path.expanduser(x))
def to_bool(x):
return x.lower() in {'y', 'yes', '1', 'true'}
type_map = {x: path for x in 'dest installer'.split()}
type_map['launch'] = to_bool
kwargs = {}
for arg in sys.argv[1:]:
if arg:
m = re.match('([a-z_]+)=(.+)', arg)
if m is None:
raise SystemExit('Unrecognized command line argument: ' + arg)
k = m.group(1)
if k not in type_map:
raise SystemExit('Unrecognized command line argument: ' + arg)
kwargs[k] = type_map[k](m.group(2))
main(**kwargs)
def update_intaller_wrapper():
# To run: python3 -c "import runpy; runpy.run_path('installer.py', run_name='update_wrapper')" installer.sh
with open(__file__, 'rb') as f:
src = f.read().decode('utf-8')
wrapper = sys.argv[-1]
with open(wrapper, 'r+b') as f:
raw = f.read().decode('utf-8')
nraw = re.sub(r'^# HEREDOC_START.+^# HEREDOC_END', lambda m: '# HEREDOC_START\n{}\n# HEREDOC_END'.format(src), raw, flags=re.MULTILINE | re.DOTALL)
if 'update_intaller_wrapper()' not in nraw:
raise SystemExit('regex substitute of HEREDOC failed')
f.seek(0), f.truncate()
f.write(nraw.encode('utf-8'))
if __name__ == '__main__' and from_file:
main()
elif __name__ == 'update_wrapper':
update_intaller_wrapper()
elif __name__ == 'script_launch':
script_launch()
| gpl-3.0 | -6,564,165,642,314,307,000 | 30.915663 | 155 | 0.587014 | false |
rudatalab/python-objectcube | api/api/__init__.py | 1 | 1147 | from flask import Flask, jsonify, render_template
from flask_restful import Api
from resource.concept import ConceptResource, ConceptResourceByID
from resource.tag import TagResource, TagResourceByID, TagResourceByValue
from resource.object import ObjectResource, ObjectResourceByID
from resource.blob import BlobResourceByURI
from resource.meta import get_all_meta
app = Flask(__name__)
api = Api(app)
# Concept API
api.add_resource(ConceptResource, '/api/concepts')
api.add_resource(ConceptResourceByID, '/api/concepts/<int:id_>')
# Tag API
api.add_resource(TagResource, '/api/tags')
api.add_resource(TagResourceByID, '/api/tags/<int:id_>')
api.add_resource(TagResourceByValue, '/api/tags/values')
# Object API
api.add_resource(ObjectResource, '/api/objects')
api.add_resource(ObjectResourceByID, '/api/objects/<int:id_>')
# Blob API
api.add_resource(BlobResourceByURI, '/api/blobs/uri/<string:digest>')
@app.route('/api/description')
def api_client():
f = get_all_meta()
return jsonify(**f)
@app.route('/api')
def index():
return render_template('api.html')
if __name__ == '__main__':
app.run(debug=True, port=4000)
| bsd-2-clause | -2,096,492,035,668,442,600 | 26.309524 | 73 | 0.744551 | false |
emCOMP/visualization | src/python/uploadLabels.py | 1 | 5630 | #!/usr/bin/env python
import json, re, csv
from pprint import pprint
import mysql.connector
event = -9
subset = 1100
feature = 'JustBot' # JustBot, Cluster, BotLabel, Truthy
# Queries
#query = ("INSERT INTO UserLabel "
# "(Event, Subset, UserID, Screenname, Bot, Botnet, Sample, Cluster, Cluster2, Profile, Status) "
# "VALUES (%(Event)s, %(Subset)s, %(UserID)s, %(Screenname)s, %(Bot)s, %(Botnet)s, %(Sample)s, %(Cluster)s, %(Cluster2)s, %(Profile)s, %(Status)s) "
# "ON DUPLICATE KEY UPDATE Screenname=%(Screenname)s, Bot=%(Bot)s, Botnet=%(Botnet)s, Sample=%(Sample)s, Cluster=%(Cluster)s, Cluster2=%(Cluster2)s, Profile=%(Profile)s, Status=%(Status)s;")
query = ("UPDATE IGNORE UserLabel "
"SET Bot=%(Bot)s "
"WHERE Event = %(Event)s "
" AND Subset = %(Subset)s "
" AND UserID = %(UserID)s ;")
if(feature == 'Truthy'):
query = ("UPDATE IGNORE UserLabel "
"SET Screenname=%(Screenname)s, Bot=%(Bot)s, Botnet=%(Botnet)s, Sample=%(Sample)s, Cluster=%(Cluster)s, Profile=%(Profile)s, Status=%(Status)s "
"WHERE Event = %(Event)s "
" AND Subset = %(Subset)s "
" AND UserID = %(UserID)s ;")
if(feature == 'Truthy'):
query = ("UPDATE IGNORE UserLabel "
"SET TruthyScore = %(TruthyScore)s, "
" TruthyContent = %(TruthyContent)s, "
" TruthyTemporal = %(TruthyTemporal)s, "
" TruthyNetwork = %(TruthyNetwork)s, "
" TruthyFriend = %(TruthyFriend)s, "
" TruthySentiment = %(TruthySentiment)s, "
" TruthyUser = %(TruthyUser)s "
"WHERE Event = %(Event)s "
" AND Subset = %(Subset)s "
" AND Screenname = %(Screenname)s ")
elif (feature == 'Cluster'):
query = ("UPDATE IGNORE UserLabel "
"SET Screenname = %(Screenname)s, FollowerCluster=%(FollowerCluster)s, FollowingCluster=%(FollowingCluster)s, FollowshipCluster=%(FollowshipCluster)s, FollowerClusterRaw=%(FollowerClusterRaw)s, FollowingClusterRaw=%(FollowingClusterRaw)s, FollowshipClusterRaw=%(FollowshipClusterRaw)s "
"WHERE Event = %(Event)s "
" AND Subset = %(Subset)s "
" AND UserID = %(UserID)s ;")
def main():
database = connectToServer()
cursor = database.cursor()
folder = 'C:\\Users\\anied\\Google Drive\\Grad School\\MisInfo Group\\Twitter Bots\\'
filename = 'ManualAnnotation_20160520_1516.tsv'
if(feature == 'JustBot'):
filename = 'Both Crisis Actors Graph\\SimpleBotLabels.csv'
if(feature == 'Truthy'):
filename = 'bot_scores_all_crisis_actor.csv'
elif (feature == 'Cluster'):
filename = 'Both Crisis Actors Graph\\BothCrisisActors_Clusters.csv'
counted = 0
with open(folder + filename, 'r') as csvfile:
delim = '\t'
if(feature == 'Truthy' or feature == 'Cluster'):
delim = ','
csvrows = csv.reader(csvfile, delimiter=delim)
for row in csvrows:
data = {
'Event': event,
'Subset': subset,
}
if(feature == 'JustBot'):
data['UserID'] = row[0]
data['Screenname'] = row[1][:25]
data['Bot'] = row[2]
if(feature == 'BotLabel'):
botnet = row[5]
bot = 'Bot'
if(botnet == 'Unknowable' or len(botnet) == 0): bot = 'Unlabeled'
if(botnet == 'No'): bot = 'Human'
if('Probably' in botnet or 'Hybrid' == botnet): bot = 'Uncertain'
if('TRS' in botnet): bot = 'TRS'
data['UserID'] = row[0]
data['Screenname'] = row[1][:25]
data['Sample'] = row[2][:25]
data['Cluster'] = row[3][:25]
# data['Cluster2'] = row[4][:25]
data['Bot'] = bot
data['Botnet'] = botnet[:25]
data['Status'] = row[6][:10] or 'Active'
data['Profile'] = row[7][:45]
elif(feature == 'Truthy'):
data['Screenname'] = row[0]
data['TruthyContent'] = row[1]
data['TruthyTemporal'] = row[2]
data['TruthyNetwork'] = row[3]
data['TruthyFriend'] = row[4]
data['TruthySentiment'] = row[5]
data['TruthyUser'] = row[6]
data['TruthyScore'] = row[7]
elif (feature == 'Cluster'):
data['UserID'] = row[0]
data['Screenname'] = row[1][:25]
data['FollowerCluster'] = row[2]
data['FollowingCluster'] = row[3]
data['FollowshipCluster'] = row[4]
data['FollowerClusterRaw'] = row[5]
data['FollowingClusterRaw'] = row[6]
data['FollowshipClusterRaw'] = row[7]
cursor.execute(query, data)
counted += 1
if(counted % 1000 == 0):
print(str(counted))
database.commit()
print(str(counted))
database.commit()
cursor.close()
database.close()
def connectToServer():
with open('../../local.conf') as config_file:
config = json.load(config_file)
# MySQL Storage
return mysql.connector.connect(
user=config["storage"]["user"],
password=config["storage"]["password"],
host=config["storage"]["host"],
database=config["storage"]["database"]
)
if __name__ == "__main__":
main()
| mit | -8,778,681,882,279,005,000 | 40.094891 | 298 | 0.52238 | false |
benjohnston24/scaling-pancake | nnet/tests_unit/test_resources.py | 1 | 8019 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# S.D.G
"""
Test the resources module of the package
"""
# Imports
import unittest
from unittest.mock import mock_open, patch
import nnet.resources as resources
import os
import pandas
import numpy as np
from collections import OrderedDict
import random
__author__ = 'Ben Johnston'
__revision__ = '0.1'
__date__ = '04-Aug-2016 14:34:55 AEST'
__license__ = 'MPL v2.0'
mock_file_open = mock_open()
def assert_data_division(utest_obj, x_train, y_train, x_valid, y_valid, split_ratio, split_ratio_calculated):
# Check equal lengths
utest_obj.assertEqual(len(x_train), len(y_train), 'x and y train dataset lengths not equal: %d != %d' %
(len(x_train), len(y_train)))
utest_obj.assertEqual(len(x_valid), len(y_valid), 'x and y valid dataset lengths not equal: %d != %d' %
(len(x_valid), len(y_valid)))
# Check the correct ratios
utest_obj.assertEqual(split_ratio_calculated, split_ratio,
'incorrect split ratio: %0.2f' % split_ratio_calculated)
class TestResources(unittest.TestCase):
"""Test the resources"""
def setUp(self):
self.train_data_extract_landmarks = pandas.DataFrame({
'left_eye_center_x': pandas.Series([1, 1]),
'left_eye_center_y': pandas.Series([2, 2]),
'left_eye_inner_corner_x': pandas.Series([3, 3]),
'right_eye_center_x': pandas.Series([4, 4]),
'right_eye_center_y': pandas.Series([5, 5]),
'Image': pandas.Series(["255 255 255 255", "255 255 255 255"]),
})
def test_resources_path(self):
"""Test the correct resources path """
# Check the path is correct
self.assertEqual(os.path.relpath(resources.RESOURCE_DIR, __file__),
'../../resources')
def test_training_set_filename(self):
"""Test the training set filename"""
# Check the default training set name
self.assertEqual(os.path.basename(resources.DEFAULT_TRAIN_SET), 'training.csv')
def test_load_training_data(self):
"""Load the training set"""
train_data = resources.load_training_data()
# Check the default number of training samples
self.assertEqual(train_data.shape[0], 7049, 'incorrect number of training samples %d != %d' %
(train_data.shape[0], 7049))
self.assertEqual(train_data.shape[1], 31, 'incorrect number of training features %d != %d' %
(train_data.shape[1], 31))
def test_load_data(self):
"""Load the data set with landmarks extracted and training / validation sets split"""
train_data = resources.load_data()
self.assertEqual(len(train_data), 4)
self.assertEqual(train_data[0].shape[0], train_data[1].shape[0])
self.assertEqual(train_data[2].shape[0], train_data[3].shape[0])
self.assertEqual(train_data[0].shape[1], train_data[2].shape[1])
self.assertEqual(train_data[1].shape[1], train_data[3].shape[1])
def test_load_data_different(self):
"""Test the loaded data is in fact different"""
x_train, y_train, x_valid, y_valid = resources.load_data()
self.assertFalse(np.all(x_train == x_valid))
self.assertFalse(np.all(y_train == y_valid))
def test_load_data_from_different_file(self):
"""Test load_data tries to load from a different file, when not present and exception is raised"""
with self.assertRaises(OSError):
train_data = resources.load_data("new_training_set.csv")
def test_remove_incomplete(self):
"""Remove incomplete data"""
train_data = pandas.DataFrame(np.array([
[1, 2],
[3, 4],
[5, np.NaN]]))
selected_data = resources.remove_incomplete_data(train_data)
self.assertLess(selected_data.shape[0], train_data.shape[0])
self.assertEqual(selected_data.shape[1], 2)
def test_image_landmark_extraction_shape(self):
"""Extract landmarks and images"""
train_data = self.train_data_extract_landmarks
x, y = resources.extract_image_landmarks(train_data)
self.assertEqual(len(x), len(y))
self.assertEqual(x.shape[1], 4)
self.assertEqual(y.shape[1], 5)
def test_image_landmark_extraction_x(self):
"""Test image extraction of extract_image_landmarks"""
train_data = self.train_data_extract_landmarks
x, y = resources.extract_image_landmarks(train_data)
np.testing.assert_allclose(x[0], [0, 0, 0, 0])
def test_image_landmark_extraction_y_0(self):
"""Test landmark extraction of extract_image_landmarks 0"""
train_data = self.train_data_extract_landmarks
x, y = resources.extract_image_landmarks(train_data)
np.testing.assert_approx_equal(y[0, 0], np.float32((1 - 48) / 48))
def test_image_landmark_extraction_y_1(self):
"""Test landmark extraction of extract_image_landmarks 1"""
train_data = self.train_data_extract_landmarks
x, y = resources.extract_image_landmarks(train_data)
np.testing.assert_approx_equal(y[0, 1], np.float32((2 - 48) / 48))
def test_image_landmark_extraction_y_2(self):
"""Test landmark extraction of extract_image_landmarks 2"""
train_data = self.train_data_extract_landmarks
x, y = resources.extract_image_landmarks(train_data)
np.testing.assert_approx_equal(y[0, 2], np.float32((3 - 48) / 48))
def test_image_landmark_extraction_y_3(self):
"""Test landmark extraction of extract_image_landmarks 3"""
train_data = self.train_data_extract_landmarks
x, y = resources.extract_image_landmarks(train_data)
np.testing.assert_approx_equal(y[0, 3], np.float32((4 - 48) / 48))
def test_image_landmark_extraction_y_4(self):
"""Test landmark extraction of extract_image_landmarks 4"""
train_data = self.train_data_extract_landmarks
x, y = resources.extract_image_landmarks(train_data)
np.testing.assert_approx_equal(y[0, 4], np.float32((5 - 48) / 48))
def test_splitting_training_data(self):
"""Test default train / valid set split"""
train_data = pandas.DataFrame({
'left_eye_center_x': pandas.Series(random.sample(range(1000), 10)),
'left_eye_center_y': pandas.Series(random.sample(range(1000), 10)),
'left_eye_inner_corner_x': pandas.Series(random.sample(range(1000), 10)),
'right_eye_center_x': pandas.Series(random.sample(range(1000), 10)),
'right_eye_center_y': pandas.Series(random.sample(range(1000), 10)),
'Image': pandas.Series([
"".join(["%s " % str(x) for x in random.sample(range(255), 4)]).strip()
for i in range(10)
]),
})
# Generate random images
x, y = resources.extract_image_landmarks(train_data)
for split_ratio in [0.5, 0.7]:
x_train, y_train, x_valid, y_valid = \
resources.split_training_data(x, y, split_ratio=split_ratio)
split_ratio_calculated = np.round(len(x_train) / (len(x_train) + len(x_valid)), 1)
with self.subTest(split_ratio=split_ratio):
assert_data_division(self, x_train, y_train, x_valid, y_valid, split_ratio, split_ratio_calculated)
# Check the shape of the features
self.assertEqual(x_train.shape[1], 4)
self.assertEqual(y_train.shape[1], 5)
self.assertEqual(x_train.shape[0], y_train.shape[0])
self.assertEqual(x_valid.shape[1], 4)
self.assertEqual(y_valid.shape[1], 5)
self.assertEqual(x_valid.shape[0], y_valid.shape[0])
# Check the data is not equal
self.assertFalse(np.all(x_train == x_valid))
self.assertFalse(np.all(y_train == y_valid))
| mpl-2.0 | 5,614,275,613,330,028,000 | 43.55 | 115 | 0.611548 | false |
Skolopedrion/Theria | src/animation/animation.py | 1 | 1370 | #!/usr/bin/env python3
# coding: utf-8
import os
import glob
import sfml as sf
class Animation:
"""
An animated texture.
"""
def __init__(self, frames, interval=0):
"""
:param frames: Iterable of sf.Texture objects
:param interval: Time between two frames (default: 0.0s)
"""
self.frames = frames
self.interval = interval
self.index = 0
self.time = 0
@classmethod
def load_from_dir(cls, path, interval=None):
"""
Load an animation from a directory. Directory must contain some image
files named by their index (e.g. "1.png", "2.png", etc...)
:param path: str object, path to the directory to load
:param interval: Time between two frames
:return: Animation
"""
if path[-1] not in (os.sep, '/'):
path += os.sep
frames = list()
for frame_path in glob.iglob(path + '[0-9].png'):
frame = sf.Texture.from_file(frame_path)
frames.append(frame)
if interval is None:
return cls(frames)
else:
return cls(frames, interval)
def get_frame(self, dt):
"""
Returns the texture of the entity.
:param dt: The time between the current and the previous frame.
:return: A sf.Texture instance
"""
self.time += dt
if self.time > self.interval:
self.time = 0
self.index += 1
self.index %= len(self.frames)
return self.frames[self.index]
def reset(self):
self.time = 0
self.index = 0
| mit | -2,537,892,201,827,231,000 | 18.571429 | 71 | 0.656204 | false |
gdanezis/rousseau-chain | rousseau-package/attic/chain.py | 1 | 3538 | # Make a hash chain with O(1) update and O(log(N)) proof of membership
from hashlib import sha256 as H
from struct import pack
# Some constants
# The initial value of any chain
# https://en.wikipedia.org/wiki/Om
initialH = H("Om").digest()
def pointFingers(seqLen):
""" Returns the indexes for a particular sequence ID """
seq = 1
while seq <= seqLen:
yield seqLen - seq
seq = seq * 2
class chain(object):
def __init__(self, entries=None, nodes=None):
""" Create a new chain object """
# This holds the actual log entries
# it is a sequnence of byte arrays
self.entries = []
if entries is not None:
self.entries = entries
# The list of 'nodes' holding hashes of the current entry,
# and a sequence of previous node hashes.
self.nodes = []
if nodes is not None:
self.nodes = nodes
def head(self):
""" Return the head of the chain """
if self.nodes == []:
return initialH
else:
return self.nodes[-1]
def add(self, entry):
""" Add an entry at the end of the chain. Returns the index of the new entry. """
# Create the new node head:
entryH = H(entry).digest()
nodeDigest = H(pack("L", len(self.entries)))
nodeDigest.update(entryH)
# Gather which other nodes are to be included:
for i in pointFingers(len(self.entries)):
nodeDigest.update(self.nodes[i])
nodeH = nodeDigest.digest()
self.entries.append(entryH)
self.nodes.append(nodeH)
return len(self.entries) - 1
def evidence(self, seq):
""" Gather evidence that the entry is at a sequence number in the chain. """
entries = {}
nodes = {}
# Add evidence as required
target = len(self.entries) - 1
while seq not in entries:
# Store the entry for the current target
entries[target] = self.entries[target]
nodes[target] = self.nodes[target]
# Store the nodes on which we depend
for i in pointFingers(target):
nodes[i] = self.nodes[i]
if i >= seq:
target = i
# Return all necessary entries and nodes
return entries, nodes
def check_evidence(head, seq, evidence, entry=None, node=None):
""" Check that a bundle of evidence is correct, and correspond to,
a known head, and optionally a known entry and known node. Returns
True or raises an exception. """
entries, nodes = evidence
head_index = max(entries.keys())
# CHECK 1: the head equals the head
if not (head == nodes[head_index]):
raise Exception("Wrong Head")
# CHECK 2: all the hashes match
target = head_index
while target != seq:
new_target = target
# Make the digest
d = H(pack("L", target))
d.update(entries[target])
for i in pointFingers(target):
d.update(nodes[i])
if i >= seq:
new_target = i
if d.digest() != nodes[target]:
raise Exception("Broken Chain")
target = new_target
# CHECK 3: is the node correct?
if node:
if not (node == nodes[seq]):
raise Exception("Wrong end node")
# CHECK 4: is the actual entry correct?
if entry:
if not (H(entry).digest() == entries[seq]):
raise Exception("Wrong end entry")
return True
| bsd-2-clause | 8,261,124,379,964,596,000 | 27.079365 | 89 | 0.574901 | false |
MeadowHillSoftware/Nativity-in-Bits | NiB.py | 1 | 56126 | # Nativity in Bits 0.1.5
# Copyright 2008, 2009 Meadow Hill Software
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
from random import randrange
character = {}
def diceRoll(number, die):
rolls = []
num = 0
die += 1
while num < number:
roll = randrange(1, die)
rolls.append(roll)
num += 1
result = 0
for result in rolls:
result += result
return result
def neutrality(morals):
global character
if morals == "Neutral":
character["Alignment"] = "True " + morals
else:
character["Alignment"] = "Neutral " + morals
def humanCommunity():
number = randrange(1, 101)
global character
if number < 6:
character["Community"] = "Small Tribe"
elif number < 11:
character["Community"] = "Religious, Arcane, Monastic, or Military Compound"
elif number < 21:
character["Community"] = "Frontier Homestead"
elif number < 36:
character["Community"] = "Thorp"
elif number < 56:
character["Community"] = "Hamlet"
elif number < 76:
character["Community"] = "Village"
elif number < 81:
character["Community"] = "Small Town"
elif number < 86:
character["Community"] = "Large Town"
elif number < 91:
character["Community"] = "Small City"
elif number < 96:
character["Community"] = "Large City"
else:
character["Community"] = "Metropolis"
def dwarvenCommunity():
number = randrange(1, 91)
global character
if number < 11:
character["Community"] = "Single-Family Redoubt"
elif number < 21:
character["Community"] = "Prospecting Camp"
elif number < 31:
character["Community"] = "Small Mine"
elif number < 46:
character["Community"] = "Large Mine"
elif number < 66:
character["Community"] = "Delve"
else:
character["Community"] = "Large Delve"
def elvenCommunity():
number = randrange(1, 96)
global character
if number < 51:
character["Community"] = "Encampment"
elif number < 86:
character["Community"] = "Village"
else:
character["Community"] = "City"
def ethics(morals):
global character
number = randrange(1, 7)
if number < 3:
character["Alignment"] = "Lawful " + morals
elif number < 5:
neutrality(morals)
else:
character["Alignment"] = "Chaotic " + morals
def nonlawfulEthics(morals):
global character
number = randrange(1, 5)
if number < 3:
character["Alignment"] = "Chaotic " + morals
else:
neutrality(morals)
def dwarvenEthics(morals):
global character
number = randrange(1, 97)
if number < 66:
character["Alignment"] = "Lawful " + morals
elif number < 86:
neutrality(morals)
else:
character["Alignment"] = "Chaotic " + morals
def nonlawfulDwarf(morals):
global character
number = randrange(1, 37)
if number < 26:
neutrality(morals)
else:
character["Alignment"] = "Chaotic " + morals
def elvenEthics(morals):
global character
number = randrange(1, 97)
if number < 66:
character["Alignment"] = "Chaotic " + morals
elif number < 86:
neutrality(morals)
else:
character["Alignment"] = "Lawful " + morals
def nonlawfulElf(morals):
global character
number = randrange(1, 86)
if number < 66:
character["Alignment"] = "Chaotic " + morals
else:
neutrality(morals)
def hinEthics(morals):
global character
number = randrange(1, 101)
if number < 61:
neutrality(morals)
elif number < 81:
character["Alignment"] = "Chaotic " + morals
else:
character["Alignment"] = "Lawful " + morals
def nonlawfulHin(morals):
global character
number = randrange(1, 81)
if number < 61:
neutrality(morals)
else:
character["Alignment"] = "Chaotic " + morals
def specialist():
global character
align = character["Alignment"]
number = randrange(1, 101)
if align == "Lawful Good":
if number < 52:
character["Class"] = "Abjurer"
elif number < 54:
character["Class"] = "Conjurer"
elif number < 69:
character["Class"] = "Diviner"
elif number < 73:
character["Class"] = "Enchanter"
elif number < 85:
character["Class"] = "Evoker"
elif number < 89:
character["Class"] = "Illusionist"
elif number < 97:
character["Class"] = "Necromancer"
else:
character["Class"] = "Transmuter"
elif align == "Lawful Neutral":
if number < 18:
character["Class"] = "Abjurer"
elif number < 23:
character["Class"] = "Conjurer"
elif number < 71:
character["Class"] = "Diviner"
elif number < 75:
character["Class"] = "Enchanter"
elif number < 89:
character["Class"] = "Evoker"
elif number < 93:
character["Class"] = "Illusionist"
elif number < 97:
character["Class"] = "Necromancer"
else:
character["Class"] = "Transmuter"
elif align == "Lawful Evil":
if number < 12:
character["Class"] = "Abjurer"
elif number < 18:
character["Class"] = "Conjurer"
elif number < 38:
character["Class"] = "Diviner"
elif number < 43:
character["Class"] = "Enchanter"
elif number < 59:
character["Class"] = "Evoker"
elif number < 64:
character["Class"] = "Illusionist"
elif number < 96:
character["Class"] = "Necromancer"
else:
character["Class"] = "Transmuter"
elif align == "Neutral Good":
if number < 24:
character["Class"] = "Abjurer"
elif number < 31:
character["Class"] = "Conjurer"
elif number < 38:
character["Class"] = "Diviner"
elif number < 49:
character["Class"] = "Enchanter"
elif number < 67:
character["Class"] = "Evoker"
elif number < 78:
character["Class"] = "Illusionist"
elif number < 90:
character["Class"] = "Necromancer"
else:
character["Class"] = "Transmuter"
elif align == "True Neutral":
if number < 8:
character["Class"] = "Abjurer"
elif number < 22:
character["Class"] = "Conjurer"
elif number < 42:
character["Class"] = "Diviner"
elif number < 54:
character["Class"] = "Enchanter"
elif number < 73:
character["Class"] = "Evoker"
elif number < 84:
character["Class"] = "Illusionist"
elif number < 90:
character["Class"] = "Necromancer"
else:
character["Class"] = "Transmuter"
elif align == "Neutral Evil":
if number < 4:
character["Class"] = "Abjurer"
elif number < 16:
character["Class"] = "Conjurer"
elif number < 22:
character["Class"] = "Diviner"
elif number < 32:
character["Class"] = "Enchanter"
elif number < 48:
character["Class"] = "Evoker"
elif number < 58:
character["Class"] = "Illusionist"
elif number < 91:
character["Class"] = "Necromancer"
else:
character["Class"] = "Transmuter"
elif align == "Chaotic Good":
if number < 8:
character["Class"] = "Abjurer"
elif number < 20:
character["Class"] = "Conjurer"
elif number < 22:
character["Class"] = "Diviner"
elif number < 43:
character["Class"] = "Enchanter"
elif number < 53:
character["Class"] = "Evoker"
elif number < 74:
character["Class"] = "Illusionist"
elif number < 80:
character["Class"] = "Necromancer"
else:
character["Class"] = "Transmuter"
elif align == "Chaotic Neutral":
if number < 3:
character["Class"] = "Abjurer"
elif number < 26:
character["Class"] = "Conjurer"
elif number < 32:
character["Class"] = "Diviner"
elif number < 51:
character["Class"] = "Enchanter"
elif number < 60:
character["Class"] = "Evoker"
elif number < 79:
character["Class"] = "Illusionist"
elif number < 82:
character["Class"] = "Necromancer"
else:
character["Class"] = "Transmuter"
else:
if number < 2:
character["Class"] = "Abjurer"
elif number < 23:
character["Class"] = "Conjurer"
elif number < 25:
character["Class"] = "Diviner"
elif number < 42:
character["Class"] = "Enchanter"
elif number < 50:
character["Class"] = "Evoker"
elif number < 67:
character["Class"] = "Illusionist"
elif number < 84:
character["Class"] = "Necromancer"
else:
character["Class"] = "Transmuter"
def write_file():
stats = file("adventurer.txt", "w")
stats.write("Generated by Nativity in Bits 0.1.5\nSee the Hero Builder's Guidebook (pg. 38) and Player's Handbook II (pg. 136) for more information about some of these terms.\n\nAdventurer Statistics\n")
stats.write("-----------------------------------\n")
stats.write("Class = " + character["Class"] + "\n")
stats.write("Race = " + character["Race"] + "\n")
stats.write("Alignment = " + character["Alignment"] + "\n")
stats.write("Age = " + character["Age"] + "\n")
stats.write("Gender = " + character["Gender"] + "\n")
stats.write("Height = " + character["Height"] + "\n")
stats.write("Temperature Zone = " + character["Temperature Zone"] + "\n")
stats.write("Terrain = " + character["Terrain"] + "\n")
stats.write("Community = " + character["Community"] + "\n")
stats.write("Family Economic Status = " + character["Family Economic Status"] + "\n")
stats.write("Family Social Standing = " + character["Family Social Standing"] + "\n")
stats.write("Family Defense Readiness = " + character["Family Defense Readiness"] + "\n")
stats.write("Family Private Ethics = " + character["Family Private Ethics"] + "\n")
stats.write("Family Public Ethics = " + character["Family Public Ethics"] + "\n")
stats.write("Family Religious Commitment = " + character["Family Religious Commitment"] + "\n")
stats.write("Family Reputation = " + character["Family Reputation"] + "\n")
stats.write("Family Political Views = " + character["Family Political Views"] + "\n")
stats.write("Family Power Structure = " + character["Family Power Structure"] + "\n")
stats.write("Ancestors of Note = " + character["Ancestors of Note"] + "\n")
stats.write("Early Childhood Instruction = " + character["Early Childhood Instruction"] + "\n")
stats.write("Formal Education = " + character["Formal Education"] + "\n")
stats.write("Learning a Trade = " + character["Learning a Trade"] + "\n")
stats.write("Early Childhood Events = " + character["Early Childhood Events"] + "\n")
stats.write("Youth Events = " + character["Youth Events"] + "\n")
stats.write("Pivotal Events = " + character["Pivotal Events"] + "\n")
stats.write("Parents = " + character["Parents"] + "\n")
stats.write("Siblings = " + character["Siblings"] + "\n")
stats.write("Grandparents = " + character["Grandparents"] + "\n")
stats.write("Extended Family = " + character["Extended Family"] + "\n")
stats.write("Friends = " + character["Friends"] + "\n")
stats.write("Enemies = " + character["Enemies"] + "\n")
stats.write("Instructors = " + character["Instructors"] + "\n")
stats.write("Personality Archetype = " + character["Archetype"] + "\n")
stats.write("Personality Traits = " + character["Traits"] + "\n")
stats.close()
number = randrange(1, 101)
if number < 51:
character["Alignment"] = "Good"
elif number < 91:
character["Alignment"] = "Neutral"
else:
character["Alignment"] = "Evil"
number = randrange(1, 101)
if character["Alignment"] == "Good":
if number < 6:
character["Class"] = "Barbarian"
elif number < 11:
character["Class"] = "Bard"
elif number < 31:
character["Class"] = "Cleric"
elif number < 36:
character["Class"] = "Druid"
elif number < 46:
character["Class"] = "Fighter"
elif number < 51:
character["Class"] = "Monk"
elif number < 56:
character["Class"] = "Paladin"
elif number < 66:
character["Class"] = "Ranger"
elif number < 76:
character["Class"] = "Rogue"
elif number < 81:
character["Class"] = "Sorcerer"
else:
character["Class"] = "Wizard"
elif character["Alignment"] == "Neutral":
if number < 6:
character["Class"] = "Barbarian"
elif number < 11:
character["Class"] = "Bard"
elif number < 16:
character["Class"] = "Cleric"
elif number < 26:
character["Class"] = "Druid"
elif number < 46:
character["Class"] = "Fighter"
elif number < 51:
character["Class"] = "Monk"
elif number < 56:
character["Class"] = "Ranger"
elif number < 76:
character["Class"] = "Rogue"
elif number < 81:
character["Class"] = "Sorcerer"
else:
character["Class"] = "Wizard"
else:
if number < 11:
character["Class"] = "Barbarian"
elif number < 16:
character["Class"] = "Bard"
elif number < 36:
character["Class"] = "Cleric"
elif number < 41:
character["Class"] = "Druid"
elif number < 51:
character["Class"] = "Fighter"
elif number < 56:
character["Class"] = "Monk"
elif number < 61:
character["Class"] = "Ranger"
elif number < 81:
character["Class"] = "Rogue"
elif number < 86:
character["Class"] = "Sorcerer"
else:
character["Class"] = "Wizard"
if character["Alignment"] == "Good":
if character["Class"] == "Barbarian":
#table figures multiplied by 75. Assuming one-third of 1% of good barbarians are gnomes, this yields 25 good gnome barbarians.
number = randrange(1, 7376)
if number < 151:
character["Race"] = "Dwarf"
elif number < 2551:
character["Race"] = "Elf"
elif number < 2576:
character["Race"] = "Gnome"
elif number < 2651:
character["Race"] = "Half-Elf"
elif number < 2726:
character["Race"] = "Halfling"
elif number < 4601:
character["Race"] = "Half-Orc"
else:
character["Race"] = "Human"
elif character["Class"] == "Bard":
#table figures multiplied by 3. This yields 18 good gnome bards.
number = randrange(1, 319)
if number < 16:
character["Race"] = "Dwarf"
elif number < 112:
character["Race"] = "Elf"
elif number < 130:
character["Race"] = "Gnome"
elif number < 157:
character["Race"] = "Half-Elf"
elif number < 166:
character["Race"] = "Halfling"
elif number < 169:
character["Race"] = "Half-Orc"
else:
character["Race"] = "Human"
elif character["Class"] == "Cleric":
#table figures multiplied by 5. This yields 50 good gnome clerics.
number = randrange(1, 471)
if number < 116:
character["Race"] = "Dwarf"
elif number < 201:
character["Race"] = "Elf"
elif number < 251:
character["Race"] = "Gnome"
elif number < 276:
character["Race"] = "Half-Elf"
elif number < 341:
character["Race"] = "Halfling"
elif number < 346:
character["Race"] = "Half-Orc"
else:
character["Race"] = "Human"
elif character["Class"] == "Druid":
#table figures multiplied by 36. Assuming one-third of 1% of good druids are dwarves, this yields 12 good dwarf druids.
number = randrange(1, 3577)
if number < 13:
character["Race"] = "Dwarf"
elif number < 1129:
character["Race"] = "Elf"
elif number < 1345:
character["Race"] = "Gnome"
elif number < 1669:
character["Race"] = "Half-Elf"
elif number < 1741:
character["Race"] = "Halfling"
elif number < 1777:
character["Race"] = "Half-Orc"
else:
character["Race"] = "Human"
elif character["Class"] == "Fighter":
#table figures multiplied by 25. This yields 25 good gnome fighters.
number = randrange(1, 2426)
if number < 1026:
character["Race"] = "Dwarf"
elif number < 1176:
character["Race"] = "Elf"
elif number < 1201:
character["Race"] = "Gnome"
elif number < 1251:
character["Race"] = "Half-Elf"
elif number < 1301:
character["Race"] = "Halfling"
elif number < 1426:
character["Race"] = "Half-Orc"
else:
character["Race"] = "Human"
elif character["Class"] == "Monk":
#table figures multiplied by 75. Assuming one-third of 1% of good monks are gnomes, this yields 25 good gnome monks.
number = randrange(1, 7151)
if number < 76:
character["Race"] = "Dwarf"
elif number < 826:
character["Race"] = "Elf"
elif number < 851:
character["Race"] = "Gnome"
elif number < 1226:
character["Race"] = "Half-Elf"
elif number < 1376:
character["Race"] = "Halfling"
elif number < 1751:
character["Race"] = "Half-Orc"
else:
character["Race"] = "Human"
elif character["Class"] == "Paladin":
#table figures multiplied by 3. Assuming one-third of 1% of paladins are elves, this yields 1 elf paladin.
number = randrange(1, 263)
if number < 34:
character["Race"] = "Dwarf"
elif number < 35:
character["Race"] = "Elf"
elif number < 38:
character["Race"] = "Gnome"
elif number < 53:
character["Race"] = "Half-Elf"
elif number < 59:
character["Race"] = "Halfling"
elif number < 62:
character["Race"] = "Half-Orc"
else:
character["Race"] = "Human"
elif character["Class"] == "Ranger":
#table figures multiplied by 9. This yields 45 good dwarf rangers.
number = randrange(1, 874)
if number < 46:
character["Race"] = "Dwarf"
elif number < 325:
character["Race"] = "Elf"
elif number < 379:
character["Race"] = "Gnome"
elif number < 514:
character["Race"] = "Half-Elf"
elif number < 532:
character["Race"] = "Halfling"
elif number < 577:
character["Race"] = "Half-Orc"
else:
character["Race"] = "Human"
elif character["Class"] == "Rogue":
#table figures multiplied by 5. This yields 30 good gnome rogues.
number = randrange(1, 481)
if number < 31:
character["Race"] = "Dwarf"
elif number < 96:
character["Race"] = "Elf"
elif number < 126:
character["Race"] = "Gnome"
elif number < 176:
character["Race"] = "Half-Elf"
elif number < 361:
character["Race"] = "Halfling"
elif number < 386:
character["Race"] = "Half-Orc"
else:
character["Race"] = "Human"
elif character["Class"] == "Sorcerer":
#table figures multiplied by 9. This yields 36 good dwarf sorcerers.
number = randrange(1, 838)
if number < 37:
character["Race"] = "Dwarf"
elif number < 316:
character["Race"] = "Elf"
elif number < 343:
character["Race"] = "Gnome"
elif number < 388:
character["Race"] = "Half-Elf"
elif number < 487:
character["Race"] = "Halfling"
elif number < 505:
character["Race"] = "Half-Orc"
else:
character["Race"] = "Human"
elif character["Class"] == "Wizard":
#table figures multiplied by 12. This yields 12 good dwarf wizards.
number = randrange(1, 1141)
if number < 13:
character["Race"] = "Dwarf"
elif number < 493:
character["Race"] = "Elf"
elif number < 565:
character["Race"] = "Gnome"
elif number < 685:
character["Race"] = "Half-Elf"
elif number < 793:
character["Race"] = "Halfling"
elif number < 805:
character["Race"] = "Half-Orc"
else:
character["Race"] = "Human"
elif character["Alignment"] == "Neutral":
if character["Class"] == "Barbarian":
#gnomes drop by a factor of 5. This yields 5 neutral gnome barbarians.
number = randrange(1, 6531)
if number < 151:
character["Race"] = "Dwarf"
elif number < 1051:
character["Race"] = "Elf"
elif number < 1056:
character["Race"] = "Gnome"
elif number < 1206:
character["Race"] = "Half-Elf"
elif number < 1431:
character["Race"] = "Halfling"
elif number < 4356:
character["Race"] = "Half-Orc"
else:
character["Race"] = "Human"
elif character["Class"] == "Bard":
#gnomes drop by a factor of 3. This yields 6 neutral gnome bards.
number = randrange(1, 268)
if number < 10:
character["Race"] = "Dwarf"
elif number < 64:
character["Race"] = "Elf"
elif number < 70:
character["Race"] = "Gnome"
elif number < 100:
character["Race"] = "Half-Elf"
elif number < 115:
character["Race"] = "Halfling"
elif number < 121:
character["Race"] = "Half-Orc"
else:
character["Race"] = "Human"
elif character["Class"] == "Cleric":
#gnomes drop by a factor of 10. This yields 5 neutral gnome clerics.
number = randrange(1, 451)
if number < 131:
character["Race"] = "Dwarf"
elif number < 191:
character["Race"] = "Elf"
elif number < 196:
character["Race"] = "Gnome"
elif number < 241:
character["Race"] = "Half-Elf"
elif number < 301:
character["Race"] = "Halfling"
elif number < 311:
character["Race"] = "Half-Orc"
else:
character["Race"] = "Human"
elif character["Class"] == "Druid":
#dwarves drop by one-third. This yields 8 neutral dwarf druids.
number = randrange(1, 3177)
if number < 9:
character["Race"] = "Dwarf"
elif number < 1125:
character["Race"] = "Elf"
elif number < 1161:
character["Race"] = "Gnome"
elif number < 1341:
character["Race"] = "Half-Elf"
elif number < 1413:
character["Race"] = "Halfling"
elif number < 1449:
character["Race"] = "Half-Orc"
else:
character["Race"] = "Human"
elif character["Class"] == "Fighter":
#gnomes drop by a factor of 5. This yields 5 neutral gnome fighters.
number = randrange(1, 2406)
if number < 851:
character["Race"] = "Dwarf"
elif number < 1026:
character["Race"] = "Elf"
elif number < 1031:
character["Race"] = "Gnome"
elif number < 1156:
character["Race"] = "Half-Elf"
elif number < 1206:
character["Race"] = "Halfling"
elif number < 1456:
character["Race"] = "Half-Orc"
else:
character["Race"] = "Human"
elif character["Class"] == "Monk":
#gnomes drop by a factor of 5. This yields 5 neutral gnome monks.
number = randrange(1, 7556)
if number < 51:
character["Race"] = "Dwarf"
elif number < 276:
character["Race"] = "Elf"
elif number < 281:
character["Race"] = "Gnome"
elif number < 1031:
character["Race"] = "Half-Elf"
elif number < 1181:
character["Race"] = "Halfling"
elif number < 1931:
character["Race"] = "Half-Orc"
else:
character["Race"] = "Human"
elif character["Class"] == "Ranger":
#dwarves drop by a factor of 5. This yields 9 neutral dwarf rangers.
number = randrange(1, 865)
if number < 10:
character["Race"] = "Dwarf"
elif number < 325:
character["Race"] = "Elf"
elif number < 343:
character["Race"] = "Gnome"
elif number < 496:
character["Race"] = "Half-Elf"
elif number < 514:
character["Race"] = "Halfling"
elif number < 604:
character["Race"] = "Half-Orc"
else:
character["Race"] = "Human"
elif character["Class"] == "Rogue":
#gnomes drop by a factor of 6. This yields 5 neutral gnome rogues.
number = randrange(1, 486)
if number < 21:
character["Race"] = "Dwarf"
elif number < 46:
character["Race"] = "Elf"
elif number < 51:
character["Race"] = "Gnome"
elif number < 126:
character["Race"] = "Half-Elf"
elif number < 316:
character["Race"] = "Halfling"
elif number < 366:
character["Race"] = "Half-Orc"
else:
character["Race"] = "Human"
elif character["Class"] == "Sorcerer":
#dwarves drop by a factor of 4. This yields 9 neutral dwarf sorcerers.
number = randrange(1, 856)
if number < 10:
character["Race"] = "Dwarf"
elif number < 136:
character["Race"] = "Elf"
elif number < 145:
character["Race"] = "Gnome"
elif number < 280:
character["Race"] = "Half-Elf"
elif number < 388:
character["Race"] = "Halfling"
elif number < 433:
character["Race"] = "Half-Orc"
else:
character["Race"] = "Human"
elif character["Class"] == "Wizard":
#dwarves drop by one-third. This yields 8 neutral dwarf wizards.
number = randrange(1, 1173)
if number < 9:
character["Race"] = "Dwarf"
elif number < 345:
character["Race"] = "Elf"
elif number < 357:
character["Race"] = "Gnome"
elif number < 537:
character["Race"] = "Half-Elf"
elif number < 597:
character["Race"] = "Halfling"
elif number < 609:
character["Race"] = "Half-Orc"
else:
character["Race"] = "Human"
else:
if character["Class"] == "Barbarian":
#gnomes drop by another factor of 5. This yields 1 evil gnome barbarian.
number - randrange(1, 2944)
if number < 18:
character["Race"] = "Dwarf"
elif number < 243:
character["Race"] = "Elf"
elif number < 244:
character["Race"] = "Gnome"
elif number < 319:
character["Race"] = "Half-Elf"
elif number < 469:
character["Race"] = "Halfling"
elif number < 2194:
character["Race"] = "Half-Orc"
else:
character["Race"] = "Human"
elif character["Class"] == "Bard":
#gnomes drop by a factor of 5. This yields 1 evil gnome bard.
number = randrange(1, 120)
if number < 2:
character["Race"] = "Dwarf"
elif number < 11:
character["Race"] = "Elf"
elif number < 12:
character["Race"] = "Gnome"
elif number < 15:
character["Race"] = "Half-Elf"
elif number < 21:
character["Race"] = "Halfling"
elif number < 90:
character["Race"] = "Half-Orc"
else:
character["Race"] = "Human"
elif character["Class"] == "Cleric":
#gnomes drop by a factor of 5. This yields 1 evil gnome cleric.
number = randrange(1, 282)
if number < 16:
character["Race"] = "Dwarf"
elif number < 41:
character["Race"] = "Elf"
elif number < 42:
character["Race"] = "Gnome"
elif number < 92:
character["Race"] = "Half-Elf"
elif number < 112:
character["Race"] = "Halfling"
elif number < 127:
character["Race"] = "Half-Orc"
else:
character["Race"] = "Human"
elif character["Class"] == "Druid":
#dwarves drop by a factor of 9. This yields 1 evil dwarf druid.
number = randrange(1, 2025)
if number < 2:
character["Race"] = "Dwarf"
elif number < 73:
character["Race"] = "Elf"
elif number < 81:
character["Race"] = "Gnome"
elif number < 117:
character["Race"] = "Half-Elf"
elif number < 153:
character["Race"] = "Halfling"
elif number < 225:
character["Race"] = "Half-Orc"
else:
character["Race"] = "Human"
elif character["Class"] == "Fighter":
#gnomes drop by another factor of 5. This yields 1 evil gnome fighter.
number = randrange(1, 1327)
if number < 101:
character["Race"] = "Dwarf"
elif number < 176:
character["Race"] = "Elf"
elif number < 177:
character["Race"] = "Gnome"
elif number < 302:
character["Race"] = "Half-Elf"
elif number < 352:
character["Race"] = "Halfling"
elif number < 577:
character["Race"] = "Half-Orc"
else:
character["Race"] = "Human"
elif character["Class"] == "Monk":
#gnomes drop by another factor of 5. This yields 1 evil gnome monk.
number = randrange(1, 6889)
if number < 7:
character["Race"] = "Dwarf"
elif number < 63:
character["Race"] = "Elf"
elif number < 64:
character["Race"] = "Gnome"
elif number < 814:
character["Race"] = "Half-Elf"
elif number < 889:
character["Race"] = "Halfling"
elif number < 1639:
character["Race"] = "Half-Orc"
else:
character["Race"] = "Human"
elif character["Class"] == "Ranger":
#dwarves drop by a factor of 9. This yields 1 evil dwarf ranger.
number = randrange(1, 627)
if number < 2:
character["Race"] = "Dwarf"
elif number < 101:
character["Race"] = "Elf"
elif number < 105:
character["Race"] = "Gnome"
elif number < 258:
character["Race"] = "Half-Elf"
elif number < 276:
character["Race"] = "Halfling"
elif number < 357:
character["Race"] = "Half-Orc"
else:
character["Race"] = "Human"
elif character["Class"] == "Rogue":
#gnomes drop by a factor of 5. This yields 1 evil gnome rogue.
number = randrange(1, 352)
if number < 6:
character["Race"] = "Dwarf"
elif number < 16:
character["Race"] = "Elf"
elif number < 17:
character["Race"] = "Gnome"
elif number < 92:
character["Race"] = "Half-Elf"
elif number < 202:
character["Race"] = "Halfling"
elif number < 252:
character["Race"] = "Half-Orc"
else:
character["Race"] = "Human"
elif character["Class"] == "Sorcerer":
#dwarves drop by a factor of 9. This yields 1 evil dwarf sorcerer.
number = randrange(1, 616)
if number < 2:
character["Race"] = "Dwarf"
elif number < 11:
character["Race"] = "Elf"
elif number < 13:
character["Race"] = "Gnome"
elif number < 148:
character["Race"] = "Half-Elf"
elif number < 211:
character["Race"] = "Halfling"
elif number < 256:
character["Race"] = "Half-Orc"
else:
character["Race"] = "Human"
elif character["Class"] == "Wizard":
#dwarves drop by a factor of 9. This yields 1 evil dwarf wizard.
number = randrange(1, 944)
if number < 2:
character["Race"] = "Dwarf"
elif number < 134:
character["Race"] = "Elf"
elif number < 136:
character["Race"] = "Gnome"
elif number < 316:
character["Race"] = "Half-Elf"
elif number < 340:
character["Race"] = "Halfling"
elif number < 344:
character["Race"] = "Half-Orc"
else:
character["Race"] = "Human"
job = character["Class"]
morals = character["Alignment"]
race = character["Race"]
if job == "Bard" or job == "Barbarian":
if race == "Dwarf":
nonlawfulDwarf(morals)
elif race == "Halfling":
nonlawfulHin(morals)
elif race == "Gnome" or race == "Human":
nonlawfulEthics(morals)
else:
nonlawfulElf(morals)
elif job == "Druid":
if morals != "Neutral":
character["Alignment"] = "Neutral " + morals
else:
if race == "Dwarf":
dwarvenEthics(morals)
elif race == "Halfling":
hinEthics(morals)
elif race == "Gnome" or race == "Human":
ethics(morals)
else:
elvenEthics(morals)
elif job == "Monk":
character["Alignment"] = "Lawful " + morals
elif job == "Paladin":
character["Alignment"] = "Lawful Good"
else:
if race == "Dwarf":
dwarvenEthics(morals)
elif race == "Halfling":
hinEthics(morals)
elif race == "Gnome" or race == "Human":
ethics(morals)
else:
elvenEthics(morals)
if job == "Wizard":
number = randrange(1, 86)
if race == "Gnome":
if number < 66:
character["Class"] = "Illusionist"
else:
number = randrange(1, 86)
if number > 65:
specialist()
else:
if number > 65:
specialist()
number = randrange(1, 101)
if number < 16:
character["Temperature Zone"] = "Cold"
elif number > 65:
character["Temperature Zone"] = "Warm"
else:
character["Temperature Zone"] = "Temperate"
number = randrange(1, 101)
if number < 11:
character["Terrain"] = "Desert"
elif number < 31:
character["Terrain"] = "Plains"
elif number < 46:
character["Terrain"] = "Forest"
elif number < 61:
character["Terrain"] = "Hills"
elif number < 71:
character["Terrain"] = "Mountains"
elif number < 81:
character["Terrain"] = "Marsh"
elif number < 86:
character["Terrain"] = "Aquatic"
elif number < 91:
character["Terrain"] = "Underground"
else:
character["Terrain"] = "Nomadic"
if character["Race"] == "Dwarf":
number = randrange(1, 101)
if number < 11:
character["Community"] = "Single-Family Redoubt"
elif number < 21:
character["Community"] = "Prospecting Camp"
elif number < 31:
character["Community"] = "Small Mine"
elif number < 46:
character["Community"] = "Large Mine"
elif number < 66:
character["Community"] = "Delve"
elif number < 91:
character["Community"] = "Large Delve"
else:
humanCommunity()
value = character["Community"]
value = "Human Area: " + value
character["Community"] = value
elif character["Race"] == "Elf":
number = randrange(1, 101)
if number < 51:
character["Community"] = "Encampment"
elif number < 86:
character["Community"] = "Village"
elif number < 96:
character["Community"] = "City"
else:
humanCommunity()
value = character["Community"]
value = "Human Area: " + value
character["Community"] = value
elif character["Race"] == "Gnome":
number = randrange(1, 101)
if number < 11:
character["Community"] = "Solitary Family"
elif number < 41:
character["Community"] = "Cluster"
elif number < 71:
character["Community"] = "Gathering"
elif number < 81:
humanCommunity()
value = character["Community"]
value = "Human Area: " + value
character["Community"] = value
elif number < 91:
dwarvenCommunity()
value = character["Community"]
value = "Dwarven Area: " + value
character["Community"] = value
else:
elvenCommunity()
value = character["Community"]
value = "Elven Area: " + value
character["Community"] = value
elif character["Race"] == "Half-Elf":
number = randrange(1, 101)
if number < 21:
character["Community"] = "Fringe Community"
elif number < 86:
humanCommunity()
value = character["Community"]
value = "Human Area: " + value
character["Community"] = value
else:
elvenCommunity()
value = character["Community"]
value = "Elven Area: " + value
character["Community"] = value
elif character["Race"] == "Halfling":
number = randrange(1, 101)
if number < 31:
character["Community"] = "Clan"
elif number < 66:
character["Community"] = "Troupe"
elif number < 81:
character["Community"] = "Shire"
elif number < 91:
character["Community"] = "Town"
elif number < 96:
character["Community"] = "County"
else:
humanCommunity()
value = character["Community"]
value = "Human Area: " + value
character["Community"] = value
elif character["Race"] == "Half-Orc":
number = randrange(1, 101)
if number < 21:
character["Community"] = "Fringe Community"
elif number < 86:
humanCommunity()
value = character["Community"]
value = "Human Area: " + value
character["Community"] = value
else:
character["Community"] = "Orc-Dominated Area"
elif character["Race"] == "Human":
humanCommunity()
number = randrange(1, 101)
if number < 6:
character["Family Economic Status"] = "Orphan"
elif number < 16:
character["Family Economic Status"] = "Refugee"
elif number < 41:
character["Family Economic Status"] = "Poor"
elif number < 61:
character["Family Economic Status"] = "Moderate"
elif number < 76:
character["Family Economic Status"] = "Wealthy"
elif number < 81:
character["Family Economic Status"] = "Religious Order"
elif number < 86:
character["Family Economic Status"] = "Arcane Order"
elif number < 91:
character["Family Economic Status"] = "Monastic Order"
elif number < 96:
character["Family Economic Status"] = "Wealth Unimportant"
else:
character["Family Economic Status"] = "Military Support"
number = randrange(1, 101)
if number < 11:
character["Family Social Standing"] = "Newcomer"
elif number < 16:
character["Family Social Standing"] = "Criminal"
elif number < 21:
character["Family Social Standing"] = "Slave"
elif number < 46:
character["Family Social Standing"] = "Lower Class"
elif number < 66:
character["Family Social Standing"] = "Skilled Trade or Merchant Family"
elif number < 76:
character["Family Social Standing"] = "Positive Religious, Arcane, Monastic, or Military Affiliation"
elif number < 86:
character["Family Social Standing"] = "Negative Religious, Arcane, Monastic, or Military Affiliation"
elif number < 96:
character["Family Social Standing"] = "Upper Class"
else:
character["Family Social Standing"] = "Noble"
number = randrange(1, 101)
if number < 11:
character["Family Defense Readiness"] = "None"
elif number < 21:
character["Family Defense Readiness"] = "Low"
elif number < 41:
character["Family Defense Readiness"] = "Rudimentary"
elif number < 56:
character["Family Defense Readiness"] = "Medium"
elif number < 71:
character["Family Defense Readiness"] = "High"
elif number < 81:
character["Family Defense Readiness"] = "Outstanding"
elif number < 91:
character["Family Defense Readiness"] = "Hired"
elif number < 96:
character["Family Defense Readiness"] = "Magical"
else:
character["Family Defense Readiness"] = "Mixed"
number = randrange(1, 101)
if number < 26:
character["Family Private Ethics"] = "Neutral"
elif number < 51:
character["Family Private Ethics"] = "Fair"
elif number < 76:
character["Family Private Ethics"] = "Good"
elif number < 91:
character["Family Private Ethics"] = "Untrustworthy"
else:
character["Family Private Ethics"] = "Evil"
number = randrange(1, 101)
if number < 61:
character["Family Public Ethics"] = "Normal"
elif number < 76:
character["Family Public Ethics"] = "Undeserved"
elif number < 91:
character["Family Public Ethics"] = "Recent Change"
else:
character["Family Public Ethics"] = "Beyond Reproach/Beyond Contempt"
number = randrange(1, 101)
if number < 21:
character["Family Religious Commitment"] = "Neutral/Uninterested"
elif number < 41:
character["Family Religious Commitment"] = "Strong"
elif number < 61:
character["Family Religious Commitment"] = "Historical"
elif number < 71:
character["Family Religious Commitment"] = "Enmity"
elif number < 81:
character["Family Religious Commitment"] = "Participatory"
elif number < 86:
character["Family Religious Commitment"] = "Open Heretics"
elif number < 91:
character["Family Religious Commitment"] = "Hidden Heretics"
else:
character["Family Religious Commitment"] = "Mixed"
number = randrange(1, 101)
if number < 41:
character["Family Reputation"] = "Unknown"
elif number < 56:
character["Family Reputation"] = "Good"
elif number < 66:
character["Family Reputation"] = "Outstanding"
elif number < 76:
character["Family Reputation"] = "A Black Sheep or Two"
elif number < 91:
character["Family Reputation"] = "Mostly Bad"
else:
character["Family Reputation"] = "Bad"
number = randrange(1, 101)
if number < 16:
character["Family Political Views"] = "Apolitical"
elif number < 31:
character["Family Political Views"] = "Supportive"
elif number < 41:
character["Family Political Views"] = "Enfranchised"
elif number < 46:
character["Family Political Views"] = "Enfranchised Progressive"
elif number < 51:
character["Family Political Views"] = "Enfranchised Radical"
elif number < 66:
character["Family Political Views"] = "Loyal Opposition"
elif number < 76:
character["Family Political Views"] = "Dissatisfied"
elif number < 86:
character["Family Political Views"] = "Dissident"
elif number < 91:
character["Family Political Views"] = "Radical"
else:
character["Family Political Views"] = "Mixed"
number = randrange(1, 101)
if number < 11:
character["Family Power Structure"] = "Unorganized"
elif number < 31:
character["Family Power Structure"] = "Elders"
elif number < 41:
character["Family Power Structure"] = "Patriarchy"
elif number < 51:
character["Family Power Structure"] = "Matriarchy"
elif number < 61:
character["Family Power Structure"] = "Oligarchy"
elif number < 71:
character["Family Power Structure"] = "Meritocracy"
elif number < 91:
character["Family Power Structure"] = "Divided"
elif number < 96:
character["Family Power Structure"] = "External"
else:
character["Family Power Structure"] = "Domination"
number = randrange(1, 101)
if number < 50:
character["Ancestors of Note"] = "None"
elif number < 56:
character["Ancestors of Note"] = "Forgotten"
elif number < 61:
character["Ancestors of Note"] = "Immigrant"
elif number < 64:
character["Ancestors of Note"] = "Master Artisan"
elif number < 67:
character["Ancestors of Note"] = "Successful Merchant"
elif number < 70:
character["Ancestors of Note"] = "Unsuccessful Merchant"
elif number < 73:
character["Ancestors of Note"] = "Cleric"
elif number < 76:
character["Ancestors of Note"] = "Arcanist"
elif number < 78:
character["Ancestors of Note"] = "Magic Item"
elif number == 78:
character["Ancestors of Note"] = "Spell Creator"
elif number == 79:
character["Ancestors of Note"] = "Item Creator"
elif number < 82:
character["Ancestors of Note"] = "Victorious Hero"
elif number < 84:
character["Ancestors of Note"] = "Defeated Hero"
elif number == 84:
character["Ancestors of Note"] = "Successful Founder"
elif number == 85:
character["Ancestors of Note"] = "Unsuccessful Founder"
elif number == 86:
character["Ancestors of Note"] = "Successful Leader"
elif number == 87:
character["Ancestors of Note"] = "Unsuccessful Leader"
elif number < 91:
character["Ancestors of Note"] = "Successful Hero"
elif number == 91:
character["Ancestors of Note"] = "Disbelieved Hero"
elif number == 92:
character["Ancestors of Note"] = "False Hero"
elif number == 93:
character["Ancestors of Note"] = "Exile"
elif number == 94:
character["Ancestors of Note"] = "Failed Rebel"
elif number == 95:
character["Ancestors of Note"] = "Traitor"
elif number == 96:
character["Ancestors of Note"] = "Cultist"
elif number == 97:
character["Ancestors of Note"] = "Villain"
elif number == 98:
character["Ancestors of Note"] = "Prophecy"
elif number == 99:
character["Ancestors of Note"] = "God-Touched"
elif number == 100:
character["Ancestors of Note"] = "Otherworldly"
number = randrange(1, 101)
if number < 21:
character["Early Childhood Instruction"] = "Outdoors"
elif number < 41:
character["Early Childhood Instruction"] = "Book Learning"
elif number < 56:
character["Early Childhood Instruction"] = "Religious"
elif number < 66:
character["Early Childhood Instruction"] = "Language"
elif number < 76:
character["Early Childhood Instruction"] = "Arts"
elif number < 86:
character["Early Childhood Instruction"] = "Multicultural"
elif number < 96:
character["Early Childhood Instruction"] = "Business/Politics"
else:
character["Early Childhood Instruction"] = "Magic"
number = randrange(1, 101)
if number < 26:
character["Formal Education"] = "Agriculture"
elif number < 31:
character["Formal Education"] = "History"
elif number < 36:
character["Formal Education"] = "Politics"
elif number < 41:
character["Formal Education"] = "Religion"
elif number < 46:
character["Formal Education"] = "Natural History"
elif number < 51:
character["Formal Education"] = "Multicultural"
elif number < 56:
character["Formal Education"] = "Arts"
elif number < 61:
character["Formal Education"] = "Literature"
elif number < 66:
character["Formal Education"] = "Math"
elif number < 71:
character["Formal Education"] = "Advanced Math"
elif number < 76:
character["Formal Education"] = "Astronomy"
elif number < 86:
character["Formal Education"] = "Finishing School"
elif number < 96:
character["Formal Education"] = "School of Hard Knocks"
else:
character["Formal Education"] = "Magic"
number = randrange(1, 101)
if number < 21:
character["Learning a Trade"] = "Farmer"
elif number < 31:
character["Learning a Trade"] = "Hunter/Trapper"
elif number < 41:
character["Learning a Trade"] = "Craft"
elif number < 51:
character["Learning a Trade"] = "Religious"
elif number < 61:
character["Learning a Trade"] = "Politics"
elif number < 71:
character["Learning a Trade"] = "Healing"
elif number < 76:
character["Learning a Trade"] = "Specialized"
elif number < 86:
character["Learning a Trade"] = "Military Training"
elif number < 91:
character["Learning a Trade"] = "Special Military Training"
elif number < 96:
character["Learning a Trade"] = "Monastery/Knightly Order"
else:
character["Learning a Trade"] = "Arcanist"
number = randrange(1, 101)
if number < 16:
character["Early Childhood Events"] = "Survived Childhood Danger"
elif number < 31:
character["Early Childhood Events"] = "Survived Major Danger to Community"
elif number < 46:
character["Early Childhood Events"] = "Undertook a Long Journey"
elif number < 56:
character["Early Childhood Events"] = "Witness"
elif number < 61:
character["Early Childhood Events"] = "Astronomical Event"
elif number < 66:
character["Early Childhood Events"] = "Personal Epiphany"
elif number < 76:
character["Early Childhood Events"] = "Became a Refugee"
elif number < 86:
character["Early Childhood Events"] = "Death in the Family"
elif number < 96:
character["Early Childhood Events"] = "Illness"
else:
character["Early Childhood Events"] = "Injury or Physical Defect"
number = randrange(1, 101)
if number < 16:
character["Youth Events"] = "Battle"
elif number < 26:
character["Youth Events"] = "Adventure"
elif number < 36:
character["Youth Events"] = "Politics"
elif number < 51:
character["Youth Events"] = "Great Romance"
elif number < 61:
character["Youth Events"] = "Religion"
elif number < 71:
character["Youth Events"] = "Arcane"
elif number < 81:
character["Youth Events"] = "Healing"
elif number < 96:
character["Youth Events"] = "Crime"
else:
character["Youth Events"] = "Discovery"
number = randrange(1, 101)
if number < 56:
character["Pivotal Events"] = "No Pivotal Events"
elif number < 66:
character["Pivotal Events"] = "Refugee"
elif number < 71:
character["Pivotal Events"] = "Cultural Shift"
elif number < 76:
character["Pivotal Events"] = "Under Siege"
elif number < 81:
character["Pivotal Events"] = "Climactic Battle"
elif number < 86:
character["Pivotal Events"] = "All-Out War"
elif number < 96:
character["Pivotal Events"] = "Community Crisis"
else:
character["Pivotal Events"] = "Religious Awakening"
number = randrange(1, 101)
if number < 56:
character["Parents"] = "Two Living Parents"
elif number < 66:
character["Parents"] = "One Living Parent"
elif number < 71:
character["Parents"] = "Both Parents Dead"
elif number < 81:
character["Parents"] = "One Ill"
elif number < 86:
character["Parents"] = "Both Ill"
elif number < 96:
character["Parents"] = "Parents Lost or Unknown"
else:
character["Parents"] = "Adoptive or Foster Parents"
number = randrange(1, 101)
if number < 26:
character["Siblings"] = "No Siblings"
elif number < 46:
sibs = randrange(1, 5)
character["Siblings"] = "Oldest (Younger Siblings: %d)" % sibs
elif number < 76:
sibs1 = randrange(1, 4)
sibs2 = randrange(1, 4)
character["Siblings"] = "Middle (Younger Siblings: %d, Older Siblings: %d)" % (sibs1, sibs2)
elif number < 96:
sibs = randrange(1, 5)
character["Siblings"] = "Youngest (Older Siblings: %d)" % sibs
else:
character["Siblings"] = "Twin"
number = randrange(1, 101)
if number < 21:
character["Grandparents"] = "No Grandparents"
elif number < 31:
character["Grandparents"] = "Mother's Parents Alive"
elif number < 41:
character["Grandparents"] = "Father's Parents Alive"
elif number < 61:
character["Grandparents"] = "One Grandparent on Each Side"
elif number < 71:
character["Grandparents"] = "Three Grandparents Alive"
elif number < 81:
character["Grandparents"] = "Great-Grandparent Alive"
else:
character["Grandparents"] = "Grandparents Unknown"
number = randrange(1, 101)
if number < 11:
character["Extended Family"] = "None"
elif number < 21:
character["Extended Family"] = "No Known Relatives"
elif number < 56:
relatives = randrange(1, 11)
character["Extended Family"] = "%d Living Relatives" % relatives
elif number < 91:
relatives = randrange(1, 13)
relatives = relatives + randrange(1, 13)
character["Extended Family"] = "%d Living Relatives" % relatives
else:
character["Extended Family"] = "Huge Extended Family"
number = randrange(1, 101)
if number < 16:
character["Friends"] = "No Friends"
elif number < 31:
character["Friends"] = "Lost"
elif number < 51:
character["Friends"] = "Few"
elif number < 81:
character["Friends"] = "Some"
else:
character["Friends"] = "Many"
number = randrange(1, 101)
if number < 16:
character["Enemies"] = "No Enemies. Yet..."
elif number < 26:
character["Enemies"] = "Minor Childhood Enemy"
elif number < 31:
character["Enemies"] = "Jilted Lover"
elif number < 36:
character["Enemies"] = "Jilted Lover's Friend or Relative"
elif number < 41:
character["Enemies"] = "Romantic Rival"
elif number < 51:
character["Enemies"] = "Enemy of the Family"
elif number < 56:
character["Enemies"] = "The Enemy of My Friend Is My Enemy"
elif number < 61:
character["Enemies"] = "Social Rival"
elif number < 66:
character["Enemies"] = "Villain"
elif number < 71:
character["Enemies"] = "Monster"
elif number < 76:
character["Enemies"] = "Alignment Enemy"
elif number < 81:
character["Enemies"] = "Political Enemy"
elif number < 86:
character["Enemies"] = "Arcane Rival"
elif number < 91:
character["Enemies"] = "Diabolic Enemy"
elif number < 96:
character["Enemies"] = "Enemy Within"
else:
character["Enemies"] = "Imaginary Foe"
number = randrange(1, 101)
if number < 16:
character["Instructors"] = "No Instructors of Note"
elif number < 41:
character["Instructors"] = "Basic"
elif number < 51:
character["Instructors"] = "Advanced"
elif number < 56:
character["Instructors"] = "Angry"
elif number < 61:
character["Instructors"] = "Vanished"
elif number < 66:
character["Instructors"] = "Favor"
elif number < 81:
character["Instructors"] = "Unrelated"
elif number < 91:
character["Instructors"] = "Lower Class"
elif number < 96:
character["Instructors"] = "Other Race"
else:
character["Instructors"] = "Exotic"
number = randrange(1, 24)
if number == 1:
character["Archetype"] = "Agent"
elif number == 2:
character["Archetype"] = "Challenger"
elif number == 3:
character["Archetype"] = "Companion"
elif number == 4:
character["Archetype"] = "Crusader"
elif number == 5:
character["Archetype"] = "Daredevil"
elif number == 6:
character["Archetype"] = "Explorer"
elif number == 7:
character["Archetype"] = "Innocent"
elif number == 8:
character["Archetype"] = "Leader"
elif number == 9:
character["Archetype"] = "Martyr"
elif number == 10:
character["Archetype"] = "Mercentary"
elif number == 11:
character["Archetype"] = "Orphan"
elif number == 12:
character["Archetype"] = "Prophet"
elif number == 13:
character["Archetype"] = "Rebel"
elif number == 14:
character["Archetype"] = "Renegade"
elif number == 15:
character["Archetype"] = "Royalty"
elif number == 16:
character["Archetype"] = "Sage"
elif number == 17:
character["Archetype"] = "Savage"
elif number == 18:
character["Archetype"] = "Seeker"
elif number == 19:
character["Archetype"] = "Simple Soul"
elif number == 20:
character["Archetype"] = "Strategist"
elif number == 21:
character["Archetype"] = "Theorist"
elif number == 22:
character["Archetype"] = "Trickster"
else:
character["Archetype"] = "Wanderer"
personalityTraits = []
traitNumber = randrange(2, 5)
traits = ["Ambitious", "Angry", "Boastful", "Bold", "Brutal", "Calm", "Carefree", "Charming", "Connected", "Conservative", "Disciplined", "Driven", "Energetic", "Erudite", "Exotic", "Fatalistic", "Flamboyant", "Funny", "Greedy", "Kind", "Loyal", "Merciful", "Naive", "Patriotic", "Peaceful", "Reformed", "Religious", "Serious", "Skilled", "Vengeful"]
while traitNumber > 0:
number = randrange(0, len(traits))
trait = traits[number]
personalityTraits.append(trait)
traits.remove(trait)
traitNumber -= 1
personalityTraits.sort()
number = len(personalityTraits)
string = ""
while number > 0:
trait = personalityTraits[0]
if number > 1:
string = string + trait + ", "
else:
string = string + trait
personalityTraits.remove(trait)
number -= 1
character["Traits"] = string
number = randrange(1, 5)
if number < 3:
character["Gender"] = "Male"
else:
character["Gender"] = "Female"
age_dic = {"Human": 15, "Dwarf": 40, "Elf": 110, "Gnome": 40, "Half-Elf": 20, "Halfling": 20, "Half-Orc": 14}
if job in ["Barbarian", "Rogue", "Sorcerer"]:
if race in ["Human", "Half-Orc"]:
number = 1
die = 4
elif race == "Dwarf":
number = 3
die = 6
elif race in ["Elf", "Gnome"]:
number = 4
die = 6
elif race == "Half-Elf":
number = 1
die = 6
else:
number = 2
die = 4
elif job in ["Bard", "Fighter", "Paladin", "Ranger"]:
if race in ["Human", "Half-Orc"]:
number = 1
die = 6
elif race == "Dwarf":
number = 5
die = 6
elif race in ["Elf", "Gnome"]:
number = 6
die = 6
elif race == "Half-Elf":
number = 2
die = 6
else:
number = 3
die = 6
else:
if race in ["Human", "Half-Orc"]:
number = 2
die = 6
elif race == "Dwarf":
number = 7
die = 6
elif race == "Elf":
number = 10
die = 6
elif race == "Gnome":
number = 9
die = 6
elif race == "Half-Elf":
number = 3
die = 6
else:
number = 4
die = 6
result = diceRoll(number, die)
age = age_dic[race] + result
character["Age"] = str(age)
gender = character["Gender"]
result = 0
if race == "Human":
if gender == "Male":
base = 58
else:
base = 53
result = diceRoll(2, 10)
elif race == "Dwarf":
if gender == "Male":
base = 45
else:
base = 43
result = diceRoll(2, 4)
elif race == "Elf":
if gender == "Male":
base = 53
else:
base = 53
result = diceRoll(2, 6)
elif race == "Gnome":
if gender == "Male":
base = 36
else:
base = 34
result = diceRoll(2, 4)
elif race == "Half-Elf":
if gender == "Male":
base = 55
else:
base = 53
result = diceRoll(2, 8)
elif race == "Half-Orc":
if gender == "Male":
base = 58
else:
base = 53
result = diceRoll(2, 12)
else:
if gender == "Male":
base = 32
else:
base = 30
result = diceRoll(2, 4)
inches = base + result
quotient = inches / 12
multiple = quotient * 12
difference = inches - multiple
height = "%s ft. %s in." % (quotient, difference)
character["Height"] = height
print "Generated by Nativity in Bits 0.1.5\nSee the Hero Builder's Guidebook (pg. 38) for more information about some of these terms.\n\nAdventurer Statistics"
print "-----------------------------------"
print "Class = " + character["Class"] + ""
print "Race = " + character["Race"] + ""
print "Alignment = " + character["Alignment"] + ""
print "Age = " + character["Age"] + ""
print "Gender = " + character["Gender"] + ""
print "Height = " + character["Height"] + ""
print "Temperature Zone = " + character["Temperature Zone"] + ""
print "Terrain = " + character["Terrain"] + ""
print "Community = " + character["Community"] + ""
print "Family Economic Status = " + character["Family Economic Status"] + ""
print "Family Social Standing = " + character["Family Social Standing"] + ""
print "Family Defense Readiness = " + character["Family Defense Readiness"] + ""
print "Family Private Ethics = " + character["Family Private Ethics"] + ""
print "Family Public Ethics = " + character["Family Public Ethics"] + ""
print "Family Religious Commitment = " + character["Family Religious Commitment"] + ""
print "Family Reputation = " + character["Family Reputation"] + ""
print "Family Political Views = " + character["Family Political Views"] + ""
print "Family Power Structure = " + character["Family Power Structure"] + ""
print "Ancestors of Note = " + character["Ancestors of Note"] + ""
print "Early Childhood Instruction = " + character["Early Childhood Instruction"] + ""
print "Formal Education = " + character["Formal Education"] + ""
print "Learning a Trade = " + character["Learning a Trade"] + ""
print "Early Childhood Events = " + character["Early Childhood Events"] + ""
print "Youth Events = " + character["Youth Events"] + ""
print "Pivotal Events = " + character["Pivotal Events"] + ""
print "Parents = " + character["Parents"] + ""
print "Siblings = " + character["Siblings"] + ""
print "Grandparents = " + character["Grandparents"] + ""
print "Extended Family = " + character["Extended Family"] + ""
print "Friends = " + character["Friends"] + ""
print "Enemies = " + character["Enemies"] + ""
print "Instructors = " + character["Instructors"] + ""
print "Personality Archetype = " + character["Archetype"] + ""
print "Personality Traits = " + character["Traits"] + ""
loop = 1
while loop == 1:
print "\n\n\nDo you want to save this data?"
print "\n--Options--"
print "1. Yes"
print "2. No\n"
try:
selection = input("Make a selection: ")
except (NameError, SyntaxError):
print "\nInvalid Selection"
else:
if selection is 1 or selection is 2:
loop = 0
if selection is 1:
write_file()
print '\nData saved in file "adventurer.txt"'
print "\nShutting down..."
else:
print "\nInvalid Selection"
| gpl-3.0 | -7,664,956,768,203,536,000 | 29.126677 | 350 | 0.642839 | false |
guardicore/monkey | monkey/infection_monkey/model/host.py | 1 | 1374 | __author__ = "itamar"
class VictimHost(object):
def __init__(self, ip_addr, domain_name=""):
self.ip_addr = ip_addr
self.domain_name = str(domain_name)
self.os = {}
self.services = {}
self.icmp = False
self.monkey_exe = None
self.default_tunnel = None
self.default_server = None
def as_dict(self):
return self.__dict__
def __hash__(self):
return hash(self.ip_addr)
def __eq__(self, other):
if not isinstance(other, VictimHost):
return False
return self.ip_addr.__eq__(other.ip_addr)
def __cmp__(self, other):
if not isinstance(other, VictimHost):
return -1
return self.ip_addr.__cmp__(other.ip_addr)
def __repr__(self):
return "VictimHost({0!r})".format(self.ip_addr)
def __str__(self):
victim = "Victim Host %s: " % self.ip_addr
victim += "OS - ["
for k, v in list(self.os.items()):
victim += "%s-%s " % (k, v)
victim += "] Services - ["
for k, v in list(self.services.items()):
victim += "%s-%s " % (k, v)
victim += "] ICMP: %s " % (self.icmp)
victim += "target monkey: %s" % self.monkey_exe
return victim
def set_default_server(self, default_server):
self.default_server = default_server
| gpl-3.0 | 2,676,835,333,894,688,300 | 27.040816 | 55 | 0.524017 | false |
pyQode/pyqode.core | test/test_panels/test_folding.py | 1 | 4719 | import os
import pytest
from pyqode.core.api import TextHelper, TextBlockHelper
from pyqode.qt import QtCore
from pyqode.qt.QtTest import QTest
from pyqode.core import panels
from test.helpers import editor_open, ensure_visible
def get_panel(editor):
return editor.panels.get(panels.FoldingPanel)
def test_enabled(editor):
panel = get_panel(editor)
assert panel.enabled
panel.enabled = False
panel.enabled = True
# @ensure_visible
# @editor_open('test/test_api/folding_cases/foo.py')
# @pytest.mark.skipif('TRAVIS' not in os.environ,
# reason="tested only on travis")
# @pytest.mark.xfail
# def test_mouse_move(editor):
# panel = get_panel(editor)
# panel.highlight_caret_scope = False
# nb_decos = len(editor.decorations)
# y_pos = TextHelper(editor).line_pos_from_number(8)
# QTest.mouseMove(panel, QtCore.QPoint(3, y_pos + 5))
# QTest.qWait(1000)
# assert len(editor.decorations) >= 2
# y_pos = TextHelper(editor).line_pos_from_number(14)
# QTest.mouseMove(panel, QtCore.QPoint(3, y_pos + 5))
# QTest.qWait(1000)
# assert len(editor.decorations) >= 4
# QTest.mouseMove(panel, QtCore.QPoint(0, 0))
# panel.leaveEvent(None)
# editor.setFocus()
# panel.highlight_caret_scope = True
# @ensure_visible
# def toggle_fold_trigger(editor, line, panel):
# y_pos = TextHelper(editor).line_pos_from_number(line) + 5
# QTest.mouseMove(panel, QtCore.QPoint(3, y_pos))
# QTest.qWait(1000)
# QTest.mousePress(panel, QtCore.Qt.RightButton, QtCore.Qt.NoModifier,
# QtCore.QPoint(3, y_pos))
# QTest.qWait(1000)
# @ensure_visible
# @editor_open('test/test_api/folding_cases/foo.py')
# @pytest.mark.skipif('TRAVIS' not in os.environ,
# reason="tested only on travis")
# @pytest.mark.xfail
# def test_mouse_press(editor):
# panel = get_panel(editor)
# panel.highlight_caret_scope = False
# # fold child block
# toggle_fold_trigger(editor, 15, panel)
# block = editor.document().findBlockByNumber(14)
# assert TextBlockHelper.is_fold_trigger(block) is True
# assert TextBlockHelper.is_collapsed(block) is True
# block = block.next()
# while block.blockNumber() < 21:
# assert block.isVisible() is False
# block = block.next()
# # fold top level block
# toggle_fold_trigger(editor, 9, panel)
# block = editor.document().findBlockByNumber(8)
# assert TextBlockHelper.is_fold_trigger(block)
# block = block.next()
# while block.blockNumber() < 27:
# if block.blockNumber() == 14:
# assert TextBlockHelper.is_fold_trigger(block) is True
# assert TextBlockHelper.is_collapsed(block) is True
# assert block.isVisible() is False
# block = block.next()
# # unfold it top level block
# toggle_fold_trigger(editor, 9, panel)
# block = editor.document().findBlockByNumber(8)
# assert TextBlockHelper.is_fold_trigger(block)
# block = block.next()
# while block.blockNumber() < 27:
# assert block.isVisible() is True
# block = block.next()
# # cleanup
# QTest.mouseMove(panel, QtCore.QPoint(0, 0))
# panel.leaveEvent(None)
# editor.setFocus()
# panel.highlight_caret_scope = True
# @ensure_visible
# @editor_open('test/test_api/folding_cases/foo.py')
# @pytest.mark.skipif('TRAVIS' not in os.environ,
# reason="tested only on travis")
# @pytest.mark.xfail
# def test_collapse_all(editor):
# panel = get_panel(editor)
# QTest.qWait(1000)
# panel.collapse_all()
# QTest.qWait(1000)
# block = editor.document().firstBlock()
# while block.blockNumber() < editor.document().blockCount() - 1:
# blank_line = len(block.text().strip()) == 0
# if TextBlockHelper.get_fold_lvl(block) > 0:
# if not blank_line:
# assert block.isVisible() is False
# else:
# assert block.isVisible() is True
# if TextBlockHelper.is_fold_trigger(block):
# assert TextBlockHelper.is_collapsed(block) is True
# block = block.next()
# @ensure_visible
# @editor_open('test/test_api/folding_cases/foo.py')
# @pytest.mark.xfail
# def test_expand_all(editor):
# panel = get_panel(editor)
# QTest.qWait(1000)
# panel.collapse_all()
# QTest.qWait(1000)
# panel.expand_all()
# block = editor.document().firstBlock()
# while block.blockNumber() < editor.document().blockCount() - 1:
# assert block.isVisible()
# if TextBlockHelper.is_fold_trigger(block):
# assert TextBlockHelper.is_collapsed(block) is False
# block = block.next()
| mit | 1,535,670,619,670,955,300 | 33.955556 | 74 | 0.643357 | false |
cmr/automatafl | old_python_prototype/rl_learn.py | 1 | 4730 | import argparse, random
import numpy as np
from keras.models import Sequential
from keras.layers import Dense, AlphaDropout, Dropout, Flatten
from keras.optimizers import RMSprop, Adam
from rl.agents.dqn import DQNAgent
from rl.policy import BoltzmannQPolicy
from rl.memory import SequentialMemory
from model import Game, Board, Plebeian
import model
parser = argparse.ArgumentParser(description='Train a learning agent to play Automatafl.')
parser.add_argument('save', help='Save weights to this file')
parser.add_argument('-L', '--load', dest='load', help='Load these weights before training')
parser.add_argument('-s', '--steps', dest='steps', type=int, default=100000, help='Perform this many training steps')
parser.add_argument('--dropout', dest='dropout', type=float, default=0.02, help='Drop this fraction of values betwen the internal layers to prevent overfit')
parser.add_argument('--memory', dest='memory', type=int, default=10000, help='Remember this many past moves for the learner')
parser.add_argument('--against', dest='against', help='Load this file as the adversary (instead of a random agent)')
parser.add_argument('--rand-rate', dest='rand_rate', type=float, default=0.02, help='Have the adversary move randomly at this rate')
parser.add_argument('--learn-rate', dest='learn_rate', type=float, default=0.1, help='Initial learning rate')
parser.add_argument('--layers', dest='layers', type=int, default=8, help='Use this many hidden layers')
parser.add_argument('--width', dest='width', type=int, default=128, help='Each hidden layer has this many neurons')
parser.add_argument('--update', dest='update', type=int, default=32, help='Update the target model with learned data after this many steps')
args = parser.parse_args()
plebs = [Plebeian(i) for i in range(1, 3)]
def setup_game():
return Game(*plebs, setup=[
# [2, 0, 0, 2, 0, 0, 2],
# [0, 0, 1, 2, 1, 0, 0],
# [1, 0, 0, 0, 0, 0, 1],
# [2, 0, 0, 3, 0, 0, 2],
# [1, 0, 0, 0, 0, 0, 1],
# [0, 0, 1, 2, 1, 0, 0],
# [2, 0, 0, 2, 0, 0, 2],
# ], goals=[[(0, 0), (0, 6)], [(6, 0), (6, 6)]])
[2, 0, 1, 0, 2],
[0, 0, 0, 0, 0],
[2, 0, 3, 0, 2],
[0, 0, 0, 0, 0],
[2, 0, 1, 0, 2],
], goals=[[(0, 0), (4, 0)], [(0, 4), (4, 4)]])
game = setup_game()
NUM_ACTIONS = game.NumActions()
NUM_STATES = len(game.StateVector(plebs[0]))
#print(NUM_ACTIONS)
#print(NUM_STATES)
#exit()
def make_net(primary):
mdl = Sequential()
mdl.add(Flatten(input_shape=(args.memory, NUM_STATES)))
mdl.add(Dropout(args.dropout))
mdl.add(Dense(args.width, input_shape=(NUM_STATES,), activation='relu'))
mdl.add(Dropout(args.dropout))
if primary:
for i in range(args.layers - 1):
mdl.add(Dense(args.width, activation='relu', kernel_initializer='lecun_uniform'))
mdl.add(Dropout(args.dropout))
mdl.add(Dense(NUM_ACTIONS))
return mdl
def make_agent(prim, load):
nn = make_net(True)
mem = SequentialMemory(limit=args.memory, window_length=args.memory)
pol = BoltzmannQPolicy()
dqn = DQNAgent(model=nn, nb_actions=NUM_ACTIONS, memory=mem, policy=pol, target_model_update=args.update)
dqn.compile(Adam(lr=args.learn_rate), metrics=['mae'])
if load:
dqn.load_weights(load)
return dqn
cur = make_agent(True, args.load)
if args.against:
adv = make_agent(True, args.against)
steps = 0
class GameEnv(object):
def reset(self):
global game, steps
game = setup_game()
steps = 0
print('Game reset')
return game.StateVector(plebs[0])
def render(self, mode='human', close=False):
pass
def close(self):
pass
def step(self, act):
global steps
steps += 1
game.PoseAgentMove(plebs[0], act)
if args.against and random.random() > args.rand_rate:
game.PoseAgentMove(plebs[1], adv.forward(game.StateVector(plebs[1])))
else:
game.PoseAgentMove(plebs[1], random.randrange(0, NUM_ACTIONS))
winner = None
for ev in game.GlobalEvents():
if ev.__class__ is model.TurnOver and ev.winner is not None:
winner = ev.winner
print(f'Game won on step {steps} by {winner}')
if ev.__class__ is model.Conflict:
print(f'Conflict on step {steps}')
for pleb in plebs:
pleb.Events()
retval = (
game.StateVector(plebs[0]),
game.RewardScalar(plebs[0]),
winner is not None,
{},
)
return retval
cur.fit(GameEnv(), nb_steps=args.steps, log_interval=args.update)
cur.save_weights(args.save, overwrite=True)
| apache-2.0 | -586,225,406,380,804,200 | 35.666667 | 157 | 0.625581 | false |
jarshwah/optimising-django-queries | shop/shop/models.py | 1 | 1521 | from django.db import models
from django.utils.functional import cached_property as buffered_property
from django.utils import timezone
class Category(models.Model):
name = models.CharField(max_length=32)
def __str__(self):
return self.name
class Feature(models.Model):
name = models.CharField(max_length=32)
value = models.CharField(max_length=32)
visible = models.BooleanField(default=True)
class Meta:
ordering = ['name']
def __str__(self):
return f'{self.name} = {self.value}'
class Product(models.Model):
name = models.CharField(max_length=32)
category = models.ForeignKey(Category)
features = models.ManyToManyField(Feature)
price = models.DecimalField(max_digits=6, decimal_places=2)
def __str__(self):
return self.name
@buffered_property
def all_features(self):
return list(self.features.all())
@property
def visible_features_python(self):
return [feature for feature in self.all_features if feature.visible]
@property
def invisible_features_python(self):
return [feature for feature in self.all_features if not feature.visible]
@property
def visible_features_database(self):
return self.features.filter(visible=True)
@property
def invisible_features_database(self):
return self.features.filter(visible=False)
class Sale(models.Model):
product = models.ForeignKey(Product)
sale_date = models.DateTimeField(default=timezone.now)
| bsd-2-clause | 446,022,595,812,349,600 | 25.684211 | 80 | 0.692308 | false |
shikhir-arora/Giesela | musicbot/cleverbot.py | 1 | 3577 | """
CleverWrap.py
Python wrapper for Cleverbot's API.
http://www.cleverbot.com/api
Copyright 2017 Andrew Edwards
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import requests
class CleverWrap:
""" A simple wrapper class for the www.cleverbot.com api. """
url = "https://www.cleverbot.com/getreply"
def __init__(self, api_key, name="CleverBot"):
""" Initialize the class with an api key and optional name
:type name: string
:type api_key: string
:type history: dict or maybe a list
:type convo_id: string
:type cs: string
:type count: int
:type time_elapsed: int
:type time_taken: int
:type output: string
"""
self.name = name
self.key = api_key
self.history = {}
self.convo_id = ""
self.cs = ""
self.count = 0
self.time_elapsed = 0
self.time_taken = 0
self.output = ""
def say(self, text):
"""
Say something to www.cleverbot.com
:type text: string
Returns: string
"""
params = {
"input": text,
"key": self.key,
"cs": self.cs,
"conversation_id": self.convo_id,
"wrapper": "CleverWrap.py"
}
reply = self._send(params)
self._process_reply(reply)
return self.output
def _send(self, params):
"""
Make the request to www.cleverbot.com
:type params: dict
Returns: dict
"""
# Get a response
try:
r = requests.get(self.url, params=params)
# catch errors, print then exit.
except requests.exceptions.RequestException as e:
print(e)
return r.json()
def _process_reply(self, reply):
""" take the cleverbot.com response and populate properties. """
self.cs = reply.get("cs", None)
self.count = int(reply.get("interaction_count", None))
self.output = reply.get("output", None).encode(
"latin-1").decode("utf-8")
self.convo_id = reply.get("conversation_id", None)
self.history = {key: value for key,
value in reply.items() if key.startswith("interaction")}
self.time_taken = int(reply.get("time_taken", None))
self.time_elapsed = int(reply.get("time_elapsed", None))
def reset(self):
"""
Drop values for self.cs and self.conversation_id
this will start a new conversation with the bot.
"""
self.cs = ""
self.convo_id = ""
| mit | 1,497,654,019,481,257,000 | 37.880435 | 460 | 0.621471 | false |
sekikn/ambari | ambari-agent/src/main/python/ambari_agent/alerts/port_alert.py | 2 | 7478 | #!/usr/bin/env python
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import logging
import socket
import time
from alerts.base_alert import BaseAlert
from resource_management.libraries.functions.get_port_from_url import get_port_from_url
from ambari_commons import OSCheck
from ambari_commons.inet_utils import resolve_address, get_host_from_url
logger = logging.getLogger(__name__)
# default timeouts
DEFAULT_WARNING_TIMEOUT = 1.5
DEFAULT_CRITICAL_TIMEOUT = 5.0
class PortAlert(BaseAlert):
def __init__(self, alert_meta, alert_source_meta, config):
super(PortAlert, self).__init__(alert_meta, alert_source_meta, config)
self.uri = None
self.default_port = None
self.socket_command = None
self.socket_command_response = None
self.warning_timeout = DEFAULT_WARNING_TIMEOUT
self.critical_timeout = DEFAULT_CRITICAL_TIMEOUT
if 'uri' in alert_source_meta:
self.uri = alert_source_meta['uri']
# always static
if 'default_port' in alert_source_meta:
self.default_port = alert_source_meta['default_port']
if 'reporting' in alert_source_meta:
reporting = alert_source_meta['reporting']
reporting_state_warning = self.RESULT_WARNING.lower()
reporting_state_critical = self.RESULT_CRITICAL.lower()
if reporting_state_warning in reporting and \
'value' in reporting[reporting_state_warning]:
self.warning_timeout = reporting[reporting_state_warning]['value']
if reporting_state_critical in reporting and \
'value' in reporting[reporting_state_critical]:
self.critical_timeout = reporting[reporting_state_critical]['value']
if 'parameters' in alert_source_meta:
for parameter in alert_source_meta['parameters']:
if 'socket.command' == parameter['name']:
self.socket_command = parameter['value']
if 'socket.command.response' == parameter['name']:
self.socket_command_response = parameter['value']
# check warning threshold for sanity
if self.warning_timeout >= 30:
logger.warn("[Alert][{0}] The warning threshold of {1}s is too large, resetting to {2}s".format(
self.get_name(), str(self.warning_timeout), str(DEFAULT_WARNING_TIMEOUT)))
self.warning_timeout = DEFAULT_WARNING_TIMEOUT
# check critical threshold for sanity
if self.critical_timeout >= 30:
logger.warn("[Alert][{0}] The critical threshold of {1}s is too large, resetting to {2}s".format(
self.get_name(), str(self.critical_timeout), str(DEFAULT_CRITICAL_TIMEOUT)))
self.critical_timeout = DEFAULT_CRITICAL_TIMEOUT
def _collect(self):
configurations = self.configuration_builder.get_configuration(self.cluster_id, None, None)
# can be parameterized or static
# if not parameterized, this will return the static value
uri_value = self._get_configuration_value(configurations, self.uri)
host_not_specified = False
if uri_value is None:
host_not_specified = True
uri_value = self.host_name
logger.debug("[Alert][{0}] Setting the URI to this host since it wasn't specified".format(
self.get_name()))
# in some cases, a single property is a comma-separated list like
# host1:8080,host2:8081,host3:8083
uri_value_array = uri_value.split(',')
if len(uri_value_array) > 1:
for item in uri_value_array:
if self.host_name in item:
uri_value = item
if logger.isEnabledFor(logging.DEBUG):
logger.debug("[Alert][{0}] Extracted {1} as the host name while parsing the CSV URI {2}".format(
self.get_name(), uri_value, str(uri_value_array)))
break
host = get_host_from_url(uri_value)
if host is None or host == "localhost" or host == "0.0.0.0":
host = self.host_name
host_not_specified = True
hosts = [host]
# If host is not specified in the uri, hence we are using current host name
# then also add public host name as a fallback.
if host_not_specified and host.lower() == self.host_name.lower() \
and self.host_name.lower() != self.public_host_name.lower():
hosts.append(self.public_host_name)
if logger.isEnabledFor(logging.DEBUG):
logger.debug("[Alert][{0}] List of hosts = {1}".format(self.get_name(), hosts))
try:
port = int(get_port_from_url(uri_value))
except:
if self.default_port is None:
label = 'Unable to determine port from URI {0}'.format(uri_value)
return (self.RESULT_UNKNOWN, [label])
port = self.default_port
exceptions = []
for host in hosts:
if logger.isEnabledFor(logging.DEBUG):
logger.debug("[Alert][{0}] Checking {1} on port {2}".format(
self.get_name(), host, str(port)))
s = None
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(self.critical_timeout)
if OSCheck.is_windows_family():
# on windows 0.0.0.0 is invalid address to connect but on linux it resolved to 127.0.0.1
host = resolve_address(host)
start_time = time.time()
s.connect((host, port))
if self.socket_command is not None:
s.sendall(self.socket_command)
data = s.recv(1024)
if self.socket_command_response is not None and data != self.socket_command_response:
raise Exception("Expected response {0}, Actual response {1}".format(
self.socket_command_response, data))
end_time = time.time()
milliseconds = end_time - start_time
seconds = milliseconds / 1000.0
# not sure why this happens sometimes, but we don't always get a
# socket exception if the connect() is > than the critical threshold
if seconds >= self.critical_timeout:
return (self.RESULT_CRITICAL, ['Socket Timeout', host, port])
result = self.RESULT_OK
if seconds >= self.warning_timeout:
result = self.RESULT_WARNING
return (result, [seconds, port])
except Exception as e:
exceptions.append(e)
finally:
if s is not None:
try:
s.close()
except:
# no need to log a close failure
pass
if exceptions:
return (self.RESULT_CRITICAL, [str(exceptions[0]), hosts[0], port])
def _get_reporting_text(self, state):
'''
Gets the default reporting text to use when the alert definition does not
contain any.
:param state: the state of the alert in uppercase (such as OK, WARNING, etc)
:return: the parameterized text
'''
if state == self.RESULT_OK or state == self.RESULT_WARNING:
return 'TCP OK - {0:.4f} response on port {1}'
return 'Connection failed: {0} to {1}:{2}'
| apache-2.0 | -2,506,284,335,676,159,000 | 36.577889 | 108 | 0.664482 | false |
alunduil/muniments | test_muniments/test_unit/test_scheduler/test_api/test_schedule/collection_head_36de4ea8bd99487d891408ee0b739c3c.py | 1 | 1470 | # Copyright (C) 2015 by Alex Brandt <[email protected]>
#
# muniments is freely distributable under the terms of an MIT-style license.
# See COPYING or http://www.opensource.org/licenses/mit-license.php.
from muniments.scheduler.api import information
from test_muniments.test_fixtures import register_fixture
from test_muniments.test_unit.test_scheduler.test_api.test_schedule import SchedulesRequestFixture
from test_muniments.test_unit.test_scheduler.test_models.test_schedule.list_1fd27e1f92ba4ff5b0bffff979556e92 import f_1fd27e1f92ba4ff5b0bffff979556e92 as SCHEDULES
register_fixture(globals(), ( SchedulesRequestFixture, ), {
'request': {
'method': 'HEAD',
'url': '/{i.API_VERSION}/schedules/'.format(i = information),
},
'response': lambda self: {
'status': 200,
'headers': {
'Content-Type': 'application/json',
},
},
'list_fixture': SCHEDULES,
})
register_fixture(globals(), ( SchedulesRequestFixture, ), {
'description': 'CORS',
'request': {
'method': 'HEAD',
'url': '/{i.API_VERSION}/schedules/'.format(i = information),
'headers': {
'Origin': 'http://127.0.0.1:5000',
},
},
'response': lambda self: {
'status': 200,
'headers': {
'Access-Control-Allow-Origin': 'http://127.0.0.1:5000',
'Content-Type': 'application/json',
},
},
'list_fixture': SCHEDULES,
})
| mit | -3,445,732,559,696,103,400 | 29.625 | 163 | 0.632653 | false |
insertion/opinion-mining | Spider/scrapy/getComment/getComment/spiders/getcomment.py | 1 | 2917 | # -*- coding: utf-8 -*-
import scrapy
import json
from getComment.items import GetcommentItem
import codecs
#需要加入cookie,不然有些页面没有权限
class GetcommentSpider(scrapy.Spider):
name = "getComment"
allowed_domains = ["douban.com"]
cookie={ '__utma':"30149280.901747088.1445074673.1463148044.1463205092.69",
'__utma':"223695111.47263706.1446025707.1463148044.1463205092.27",
'__utmb':"30149280.0.10.1463205092",
'__utmb':"223695111.0.10.1463205092",
'__utmc':"30149280",
'__utmc':"223695111",
'__utmv':"30149280.13938",
'__utmz':"30149280.1463051064.63.51.utmcsr=baidu|utmccn=(organic)|utmcmd=organic",
'__utmz':"223695111.1463035423.19.13.utmcsr=baidu|utmccn=(organic)|utmcmd=organic",
'_pk_id.100001.4cf6':"54f6d2f316960e51.1446025708.27.1463205379.1463148922.",
'_pk_ref.100001.4cf6':'["","",1463204969,"http://www.baidu.com/link?url=YQLEs5QV1zmk47dXRps0dqtoMVwYwRFUN5-N9639eoU21p9BFeaxhNRstgUq9Vvs&wd=&eqid=f68d50f40003ae9a000000035734261a"]',
'_pk_ses.100001.4cf6':"*",
'ap':"1",
'bid':'"8P5Iz4n5Ws8"',
'ck':"8vtY",
'ct':"y",
'dbcl2':'"59034306:TCI0yjpqBT4"',
'gr_user_id':"8121958b-b647-4f44-bc4a-6ce28baf2d5d",
'll':'"118163"',
'ps':"y",
'push_doumail_num':"38",
'push_noty_num':"6",
'ue':'"[email protected]"',
'viewed':'"1756954_1052241_1831698_25952655_1231361_7906768_24703171_3288908_2305237_6510682"',
}
#cookie是个字典
start_urls = []
def __init__(self):
file=open('film_URL.json')
for line in file.readlines():
js=json.loads(line)
url=js['url'][0]+'comments'
self.start_urls.append(url)
file.close()
def parse(self, response):
filmname=response.xpath('//*[@id="content"]/h1/text()').extract()[0]+'.json'
#extract()返回的是一个列表,里面第一个元素是unicode字符串
file=codecs.open(filmname,'ab',encoding='utf-8')
next=response.xpath('//*[@id="paginator"]/a[@class="next"]/@href').extract()
item=GetcommentItem()
item['comment']=response.xpath('//*[@id="comments"]/div[@class="comment-item"]/div[2]/p/text()').extract()
item['title'] =response.xpath('//*[@id="comments"]/div[@class="comment-item"]/div[2]/h3/span[2]/span[1]/@title').extract()
commentlines = json.dumps(dict(item),ensure_ascii=False) + "\n"
file.write(commentlines)
if next:
next_url=response.url.split('?')[0]+next[0]
if int(next[0].split('=')[1].split('&')[0]) < 10000:
#只取前10000条评论
return scrapy.Request(next_url,self.parse,cookies=self.cookie)
| mit | 7,418,473,381,262,514,000 | 46.116667 | 195 | 0.576937 | false |
OrbitzWorldwide/droned | droned/lib/droned/responders/events.py | 1 | 2722 | ###############################################################################
# Copyright 2006 to the present, Orbitz Worldwide, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
from droned.models.event import Event
from droned.responders import responder, dispatch
@responder(pattern="^subscribe (?P<event>\S+)", form="subscribe <event>|all", help="Subscribe to event notifications")
def subscribe(conversation, event):
subscriptions = set(conversation.context.get('subscriptions', set()))
if event == 'all':
events = Event.objects
else:
events = [ Event(event) ]
for event in events:
conversation.say("Subscribed to %s events" % event.name)
subscriptions.add(event.name)
event.subscribe(conversation.notify)
conversation.context['subscriptions'] = subscriptions
@responder(pattern="^unsubscribe (?P<event>\S+)", form="unsubscribe <event>|all", help="Unsubscribe from event notifications")
def unsubscribe(conversation, event):
subscriptions = set(conversation.context.get('subscriptions', []))
if event == 'all':
eventList = Event.objects
else:
eventList = [ Event(event) ]
for event in eventList:
conversation.say("Unsubscribed from %s events" % event.name)
event.unsubscribe(conversation.notify)
subscriptions.discard(event.name)
conversation.context['subscriptions'] = sorted(subscriptions)
@responder(pattern=".*<b>(?P<event>\S+)</b>.*occurred \((?P<string>.*)\).*")
def notification(conversation, event, string):
#hopefully you know how to parse this string
if Event.exists(event):
context = {
'conversation': conversation,
'message': string,
'event': event,
}
Event(event).fire(**context)
@responder(pattern="<b>Announcement from .*</b> ::: (?P<string>.*)")
def annoucement(conversation, string):
return dispatch(conversation, string)
@responder(pattern="Sorry I don't know what you mean by that.*")
def circular_conversation(conversation, *args):
"""Blackhole these circular conversations"""
return
| apache-2.0 | -4,877,406,572,939,446,000 | 39.029412 | 126 | 0.646951 | false |
mohanprasath/Course-Work | data_analysis/uh_data_analysis_with_python/hy-data-analysis-with-python-spring-2020/part02-e07_file_extensions/test/test_file_extensions.py | 1 | 1537 | #!/usr/bin/env python3
import unittest
from unittest.mock import patch
from tmc import points
from tmc.utils import load, get_out
module_name="src.file_extensions"
file_extensions = load(module_name, "file_extensions")
main = load(module_name, "main")
class FileExtensions(unittest.TestCase):
@points('p02-07.1')
def test_first(self):
correct_d = {'txt': ['file1.txt', 'file2.txt'],
'pdf': ['mydocument.pdf'],
'gz': ['archive.tar.gz']}
no_extension, d = file_extensions("src/filenames.txt")
self.assertEqual(no_extension, ["test"],
msg="There should be exactly one filename without an extension!")
self.assertEqual(d, correct_d, msg="The dictionary of files with an extension is incorrect!")
@points('p02-07.1')
def test_calls(self):
with patch('builtins.open', side_effect=open) as o:
file_extensions("src/filenames.txt")
o.assert_called_once()
@points('p02-07.2')
def test_main(self):
with patch('src.file_extensions.file_extensions', side_effect=[([], {})]) as fe:
main()
self.assertEqual(fe.call_count, 1,
msg="You should call function 'file_extensions' from main!")
result = get_out().split('\n')
self.assertEqual(len(result), 1, msg="Expected one line of output!")
self.assertEqual(result[0], "0 files with no extension")
if __name__ == '__main__':
unittest.main()
| gpl-3.0 | -542,415,368,093,614,340 | 33.931818 | 101 | 0.595316 | false |
github-borat/cinder | cinder/volume/drivers/san/hp/hp_3par_fc.py | 1 | 15398 | # (c) Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""
Volume driver for HP 3PAR Storage array.
This driver requires 3.1.3 firmware on the 3PAR array, using
the 3.x version of the hp3parclient.
You will need to install the python hp3parclient.
sudo pip install --upgrade "hp3parclient>=3.0"
Set the following in the cinder.conf file to enable the
3PAR Fibre Channel Driver along with the required flags:
volume_driver=cinder.volume.drivers.san.hp.hp_3par_fc.HP3PARFCDriver
"""
try:
from hp3parclient import exceptions as hpexceptions
except ImportError:
hpexceptions = None
from cinder.openstack.common.gettextutils import _
from cinder.openstack.common import log as logging
from cinder import utils
import cinder.volume.driver
from cinder.volume.drivers.san.hp import hp_3par_common as hpcommon
from cinder.volume.drivers.san import san
from cinder.zonemanager import utils as fczm_utils
LOG = logging.getLogger(__name__)
class HP3PARFCDriver(cinder.volume.driver.FibreChannelDriver):
"""OpenStack Fibre Channel driver to enable 3PAR storage array.
Version history:
1.0 - Initial driver
1.1 - QoS, extend volume, multiple iscsi ports, remove domain,
session changes, faster clone, requires 3.1.2 MU2 firmware,
copy volume <--> Image.
1.2.0 - Updated the use of the hp3parclient to 2.0.0 and refactored
the drivers to use the new APIs.
1.2.1 - Synchronized extend_volume method.
1.2.2 - Added try/finally around client login/logout.
1.2.3 - Added ability to add WWNs to host.
1.2.4 - Added metadata during attach/detach bug #1258033.
1.3.0 - Removed all SSH code. We rely on the hp3parclient now.
2.0.0 - Update hp3parclient API uses 3.0.x
2.0.2 - Add back-end assisted volume migrate
2.0.3 - Added initiator-target map for FC Zone Manager
2.0.4 - Added support for managing/unmanaging of volumes
2.0.5 - Only remove FC Zone on last volume detach
2.0.6 - Added support for volume retype
"""
VERSION = "2.0.6"
def __init__(self, *args, **kwargs):
super(HP3PARFCDriver, self).__init__(*args, **kwargs)
self.common = None
self.configuration.append_config_values(hpcommon.hp3par_opts)
self.configuration.append_config_values(san.san_opts)
def _init_common(self):
return hpcommon.HP3PARCommon(self.configuration)
def _check_flags(self):
"""Sanity check to ensure we have required options set."""
required_flags = ['hp3par_api_url', 'hp3par_username',
'hp3par_password',
'san_ip', 'san_login', 'san_password']
self.common.check_flags(self.configuration, required_flags)
@utils.synchronized('3par', external=True)
def get_volume_stats(self, refresh):
self.common.client_login()
try:
stats = self.common.get_volume_stats(refresh)
stats['storage_protocol'] = 'FC'
stats['driver_version'] = self.VERSION
backend_name = self.configuration.safe_get('volume_backend_name')
stats['volume_backend_name'] = (backend_name or
self.__class__.__name__)
return stats
finally:
self.common.client_logout()
def do_setup(self, context):
self.common = self._init_common()
self._check_flags()
self.common.do_setup(context)
def check_for_setup_error(self):
"""Returns an error if prerequisites aren't met."""
self._check_flags()
@utils.synchronized('3par', external=True)
def create_volume(self, volume):
self.common.client_login()
try:
metadata = self.common.create_volume(volume)
return {'metadata': metadata}
finally:
self.common.client_logout()
@utils.synchronized('3par', external=True)
def create_cloned_volume(self, volume, src_vref):
self.common.client_login()
try:
new_vol = self.common.create_cloned_volume(volume, src_vref)
return {'metadata': new_vol}
finally:
self.common.client_logout()
@utils.synchronized('3par', external=True)
def delete_volume(self, volume):
self.common.client_login()
try:
self.common.delete_volume(volume)
finally:
self.common.client_logout()
@utils.synchronized('3par', external=True)
def create_volume_from_snapshot(self, volume, snapshot):
"""Create a volume from a snapshot.
TODO: support using the size from the user.
"""
self.common.client_login()
try:
metadata = self.common.create_volume_from_snapshot(volume,
snapshot)
return {'metadata': metadata}
finally:
self.common.client_logout()
@utils.synchronized('3par', external=True)
def create_snapshot(self, snapshot):
self.common.client_login()
try:
self.common.create_snapshot(snapshot)
finally:
self.common.client_logout()
@utils.synchronized('3par', external=True)
def delete_snapshot(self, snapshot):
self.common.client_login()
try:
self.common.delete_snapshot(snapshot)
finally:
self.common.client_logout()
@fczm_utils.AddFCZone
@utils.synchronized('3par', external=True)
def initialize_connection(self, volume, connector):
"""Assigns the volume to a server.
Assign any created volume to a compute node/host so that it can be
used from that host.
The driver returns a driver_volume_type of 'fibre_channel'.
The target_wwn can be a single entry or a list of wwns that
correspond to the list of remote wwn(s) that will export the volume.
Example return values:
{
'driver_volume_type': 'fibre_channel'
'data': {
'target_discovered': True,
'target_lun': 1,
'target_wwn': '1234567890123',
}
}
or
{
'driver_volume_type': 'fibre_channel'
'data': {
'target_discovered': True,
'target_lun': 1,
'target_wwn': ['1234567890123', '0987654321321'],
}
}
Steps to export a volume on 3PAR
* Create a host on the 3par with the target wwn
* Create a VLUN for that HOST with the volume we want to export.
"""
self.common.client_login()
try:
# we have to make sure we have a host
host = self._create_host(volume, connector)
# now that we have a host, create the VLUN
vlun = self.common.create_vlun(volume, host)
target_wwns, init_targ_map = self._build_initiator_target_map(
connector)
info = {'driver_volume_type': 'fibre_channel',
'data': {'target_lun': vlun['lun'],
'target_discovered': True,
'target_wwn': target_wwns,
'initiator_target_map': init_targ_map}}
return info
finally:
self.common.client_logout()
@fczm_utils.RemoveFCZone
@utils.synchronized('3par', external=True)
def terminate_connection(self, volume, connector, **kwargs):
"""Driver entry point to unattach a volume from an instance."""
self.common.client_login()
try:
hostname = self.common._safe_hostname(connector['host'])
self.common.terminate_connection(volume, hostname,
wwn=connector['wwpns'])
info = {'driver_volume_type': 'fibre_channel',
'data': {}}
try:
self.common.client.getHostVLUNs(hostname)
except hpexceptions.HTTPNotFound:
# No more exports for this host.
LOG.info(_("Need to remove FC Zone, building initiator "
"target map"))
target_wwns, init_targ_map = self._build_initiator_target_map(
connector)
info['data'] = {'target_wwn': target_wwns,
'initiator_target_map': init_targ_map}
return info
finally:
self.common.client_logout()
def _build_initiator_target_map(self, connector):
"""Build the target_wwns and the initiator target map."""
fc_ports = self.common.get_active_fc_target_ports()
target_wwns = []
for port in fc_ports:
target_wwns.append(port['portWWN'])
initiator_wwns = connector['wwpns']
init_targ_map = {}
for initiator in initiator_wwns:
init_targ_map[initiator] = target_wwns
return target_wwns, init_targ_map
def _create_3par_fibrechan_host(self, hostname, wwns, domain, persona_id):
"""Create a 3PAR host.
Create a 3PAR host, if there is already a host on the 3par using
the same wwn but with a different hostname, return the hostname
used by 3PAR.
"""
# first search for an existing host
host_found = None
for wwn in wwns:
host_found = self.common.client.findHost(wwn=wwn)
if host_found is not None:
break
if host_found is not None:
self.common.hosts_naming_dict[hostname] = host_found
return host_found
else:
persona_id = int(persona_id)
self.common.client.createHost(hostname, FCWwns=wwns,
optional={'domain': domain,
'persona': persona_id})
return hostname
def _modify_3par_fibrechan_host(self, hostname, wwn):
mod_request = {'pathOperation': self.common.client.HOST_EDIT_ADD,
'FCWWNs': wwn}
self.common.client.modifyHost(hostname, mod_request)
def _create_host(self, volume, connector):
"""Creates or modifies existing 3PAR host."""
host = None
hostname = self.common._safe_hostname(connector['host'])
cpg = self.common.get_cpg(volume, allowSnap=True)
domain = self.common.get_domain(cpg)
try:
host = self.common._get_3par_host(hostname)
except hpexceptions.HTTPNotFound:
# get persona from the volume type extra specs
persona_id = self.common.get_persona_type(volume)
# host doesn't exist, we have to create it
hostname = self._create_3par_fibrechan_host(hostname,
connector['wwpns'],
domain,
persona_id)
host = self.common._get_3par_host(hostname)
return self._add_new_wwn_to_host(host, connector['wwpns'])
def _add_new_wwn_to_host(self, host, wwns):
"""Add wwns to a host if one or more don't exist.
Identify if argument wwns contains any world wide names
not configured in the 3PAR host path. If any are found,
add them to the 3PAR host.
"""
# get the currently configured wwns
# from the host's FC paths
host_wwns = []
if 'FCPaths' in host:
for path in host['FCPaths']:
wwn = path.get('wwn', None)
if wwn is not None:
host_wwns.append(wwn.lower())
# lower case all wwns in the compare list
compare_wwns = [x.lower() for x in wwns]
# calculate wwns in compare list, but not in host_wwns list
new_wwns = list(set(compare_wwns).difference(host_wwns))
# if any wwns found that were not in host list,
# add them to the host
if (len(new_wwns) > 0):
self._modify_3par_fibrechan_host(host['name'], new_wwns)
host = self.common._get_3par_host(host['name'])
return host
@utils.synchronized('3par', external=True)
def create_export(self, context, volume):
pass
@utils.synchronized('3par', external=True)
def ensure_export(self, context, volume):
pass
@utils.synchronized('3par', external=True)
def remove_export(self, context, volume):
pass
@utils.synchronized('3par', external=True)
def extend_volume(self, volume, new_size):
self.common.client_login()
try:
self.common.extend_volume(volume, new_size)
finally:
self.common.client_logout()
@utils.synchronized('3par', external=True)
def manage_existing(self, volume, existing_ref):
self.common.client_login()
try:
return self.common.manage_existing(volume, existing_ref)
finally:
self.common.client_logout()
@utils.synchronized('3par', external=True)
def manage_existing_get_size(self, volume, existing_ref):
self.common.client_login()
try:
size = self.common.manage_existing_get_size(volume, existing_ref)
finally:
self.common.client_logout()
return size
@utils.synchronized('3par', external=True)
def unmanage(self, volume):
self.common.client_login()
try:
self.common.unmanage(volume)
finally:
self.common.client_logout()
@utils.synchronized('3par', external=True)
def attach_volume(self, context, volume, instance_uuid, host_name,
mountpoint):
self.common.attach_volume(volume, instance_uuid)
@utils.synchronized('3par', external=True)
def detach_volume(self, context, volume):
self.common.detach_volume(volume)
@utils.synchronized('3par', external=True)
def retype(self, context, volume, new_type, diff, host):
"""Convert the volume to be of the new type."""
self.common.client_login()
try:
return self.common.retype(volume, new_type, diff, host)
finally:
self.common.client_logout()
@utils.synchronized('3par', external=True)
def migrate_volume(self, context, volume, host):
self.common.client_login()
try:
return self.common.migrate_volume(volume, host)
finally:
self.common.client_logout()
| apache-2.0 | -8,664,282,981,225,181,000 | 35.401891 | 78 | 0.58657 | false |
brchiu/tensorflow | tensorflow/python/ops/metrics_impl.py | 1 | 162465 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implementation of tf.metrics module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.compat import compat
from tensorflow.python.eager import context
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import confusion_matrix
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import sets
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import weights_broadcast_ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import distribution_strategy_context
from tensorflow.python.util.deprecation import deprecated
from tensorflow.python.util.tf_export import tf_export
def metric_variable(shape, dtype, validate_shape=True, name=None):
"""Create variable in `GraphKeys.(LOCAL|METRIC_VARIABLES)` collections.
If running in a `DistributionStrategy` context, the variable will be
"replica local". This means:
* The returned object will be a container with separate variables
per replica of the model.
* When writing to the variable, e.g. using `assign_add` in a metric
update, the update will be applied to the variable local to the
replica.
* To get a metric's result value, we need to sum the variable values
across the replicas before computing the final answer. Furthermore,
the final answer should be computed once instead of in every
replica. Both of these are accomplished by running the computation
of the final result value inside
`distribution_strategy_context.get_replica_context().merge_call(fn)`.
Inside the `merge_call()`, ops are only added to the graph once
and access to a replica-local variable in a computation returns
the sum across all replicas.
Args:
shape: Shape of the created variable.
dtype: Type of the created variable.
validate_shape: (Optional) Whether shape validation is enabled for
the created variable.
name: (Optional) String name of the created variable.
Returns:
A (non-trainable) variable initialized to zero, or if inside a
`DistributionStrategy` scope a replica-local variable container.
"""
# Note that synchronization "ON_READ" implies trainable=False.
return variable_scope.variable(
lambda: array_ops.zeros(shape, dtype),
collections=[
ops.GraphKeys.LOCAL_VARIABLES, ops.GraphKeys.METRIC_VARIABLES
],
validate_shape=validate_shape,
synchronization=variable_scope.VariableSynchronization.ON_READ,
aggregation=variable_scope.VariableAggregation.SUM,
name=name)
def _remove_squeezable_dimensions(predictions, labels, weights):
"""Squeeze or expand last dim if needed.
Squeezes last dim of `predictions` or `labels` if their rank differs by 1
(using confusion_matrix.remove_squeezable_dimensions).
Squeezes or expands last dim of `weights` if its rank differs by 1 from the
new rank of `predictions`.
If `weights` is scalar, it is kept scalar.
This will use static shape if available. Otherwise, it will add graph
operations, which could result in a performance hit.
Args:
predictions: Predicted values, a `Tensor` of arbitrary dimensions.
labels: Optional label `Tensor` whose dimensions match `predictions`.
weights: Optional weight scalar or `Tensor` whose dimensions match
`predictions`.
Returns:
Tuple of `predictions`, `labels` and `weights`. Each of them possibly has
the last dimension squeezed, `weights` could be extended by one dimension.
"""
predictions = ops.convert_to_tensor(predictions)
if labels is not None:
labels, predictions = confusion_matrix.remove_squeezable_dimensions(
labels, predictions)
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
if weights is None:
return predictions, labels, None
weights = ops.convert_to_tensor(weights)
weights_shape = weights.get_shape()
weights_rank = weights_shape.ndims
if weights_rank == 0:
return predictions, labels, weights
predictions_shape = predictions.get_shape()
predictions_rank = predictions_shape.ndims
if (predictions_rank is not None) and (weights_rank is not None):
# Use static rank.
if weights_rank - predictions_rank == 1:
weights = array_ops.squeeze(weights, [-1])
elif predictions_rank - weights_rank == 1:
weights = array_ops.expand_dims(weights, [-1])
else:
# Use dynamic rank.
weights_rank_tensor = array_ops.rank(weights)
rank_diff = weights_rank_tensor - array_ops.rank(predictions)
def _maybe_expand_weights():
return control_flow_ops.cond(
math_ops.equal(rank_diff, -1),
lambda: array_ops.expand_dims(weights, [-1]), lambda: weights)
# Don't attempt squeeze if it will fail based on static check.
if ((weights_rank is not None) and
(not weights_shape.dims[-1].is_compatible_with(1))):
maybe_squeeze_weights = lambda: weights
else:
maybe_squeeze_weights = lambda: array_ops.squeeze(weights, [-1])
def _maybe_adjust_weights():
return control_flow_ops.cond(
math_ops.equal(rank_diff, 1), maybe_squeeze_weights,
_maybe_expand_weights)
# If weights are scalar, do nothing. Otherwise, try to add or remove a
# dimension to match predictions.
weights = control_flow_ops.cond(
math_ops.equal(weights_rank_tensor, 0), lambda: weights,
_maybe_adjust_weights)
return predictions, labels, weights
def _maybe_expand_labels(labels, predictions):
"""If necessary, expand `labels` along last dimension to match `predictions`.
Args:
labels: `Tensor` or `SparseTensor` with shape
[D1, ... DN, num_labels] or [D1, ... DN]. The latter implies
num_labels=1, in which case the result is an expanded `labels` with shape
[D1, ... DN, 1].
predictions: `Tensor` with shape [D1, ... DN, num_classes].
Returns:
`labels` with the same rank as `predictions`.
Raises:
ValueError: if `labels` has invalid shape.
"""
with ops.name_scope(None, 'expand_labels', (labels, predictions)) as scope:
labels = sparse_tensor.convert_to_tensor_or_sparse_tensor(labels)
# If sparse, expand sparse shape.
if isinstance(labels, sparse_tensor.SparseTensor):
return control_flow_ops.cond(
math_ops.equal(
array_ops.rank(predictions),
array_ops.size(labels.dense_shape) + 1),
lambda: sparse_ops.sparse_reshape( # pylint: disable=g-long-lambda
labels,
shape=array_ops.concat((labels.dense_shape, (1,)), 0),
name=scope),
lambda: labels)
# Otherwise, try to use static shape.
labels_rank = labels.get_shape().ndims
if labels_rank is not None:
predictions_rank = predictions.get_shape().ndims
if predictions_rank is not None:
if predictions_rank == labels_rank:
return labels
if predictions_rank == labels_rank + 1:
return array_ops.expand_dims(labels, -1, name=scope)
raise ValueError(
'Unexpected labels shape %s for predictions shape %s.' %
(labels.get_shape(), predictions.get_shape()))
# Otherwise, use dynamic shape.
return control_flow_ops.cond(
math_ops.equal(array_ops.rank(predictions),
array_ops.rank(labels) + 1),
lambda: array_ops.expand_dims(labels, -1, name=scope), lambda: labels)
def _safe_div(numerator, denominator, name):
"""Divides two tensors element-wise, returning 0 if the denominator is <= 0.
Args:
numerator: A real `Tensor`.
denominator: A real `Tensor`, with dtype matching `numerator`.
name: Name for the returned op.
Returns:
0 if `denominator` <= 0, else `numerator` / `denominator`
"""
if compat.forward_compatible(2018, 11, 1):
return math_ops.div_no_nan(numerator, denominator, name=name)
t = math_ops.truediv(numerator, denominator)
zero = array_ops.zeros_like(t, dtype=denominator.dtype)
condition = math_ops.greater(denominator, zero)
zero = math_ops.cast(zero, t.dtype)
return array_ops.where(condition, t, zero, name=name)
def _safe_scalar_div(numerator, denominator, name):
"""Divides two values, returning 0 if the denominator is 0.
Args:
numerator: A scalar `float64` `Tensor`.
denominator: A scalar `float64` `Tensor`.
name: Name for the returned op.
Returns:
0 if `denominator` == 0, else `numerator` / `denominator`
"""
numerator.get_shape().with_rank_at_most(1)
denominator.get_shape().with_rank_at_most(1)
return _safe_div(numerator, denominator, name=name)
def _streaming_confusion_matrix(labels, predictions, num_classes, weights=None):
"""Calculate a streaming confusion matrix.
Calculates a confusion matrix. For estimation over a stream of data,
the function creates an `update_op` operation.
Args:
labels: A `Tensor` of ground truth labels with shape [batch size] and of
type `int32` or `int64`. The tensor will be flattened if its rank > 1.
predictions: A `Tensor` of prediction results for semantic labels, whose
shape is [batch size] and type `int32` or `int64`. The tensor will be
flattened if its rank > 1.
num_classes: The possible number of labels the prediction task can
have. This value must be provided, since a confusion matrix of
dimension = [num_classes, num_classes] will be allocated.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
Returns:
total_cm: A `Tensor` representing the confusion matrix.
update_op: An operation that increments the confusion matrix.
"""
# Local variable to accumulate the predictions in the confusion matrix.
total_cm = metric_variable(
[num_classes, num_classes], dtypes.float64, name='total_confusion_matrix')
# Cast the type to int64 required by confusion_matrix_ops.
predictions = math_ops.to_int64(predictions)
labels = math_ops.to_int64(labels)
num_classes = math_ops.to_int64(num_classes)
# Flatten the input if its rank > 1.
if predictions.get_shape().ndims > 1:
predictions = array_ops.reshape(predictions, [-1])
if labels.get_shape().ndims > 1:
labels = array_ops.reshape(labels, [-1])
if (weights is not None) and (weights.get_shape().ndims > 1):
weights = array_ops.reshape(weights, [-1])
# Accumulate the prediction to current confusion matrix.
current_cm = confusion_matrix.confusion_matrix(
labels, predictions, num_classes, weights=weights, dtype=dtypes.float64)
update_op = state_ops.assign_add(total_cm, current_cm)
return total_cm, update_op
def _aggregate_across_replicas(metrics_collections, metric_value_fn, *args):
"""Aggregate metric value across replicas."""
def fn(distribution, *a):
"""Call `metric_value_fn` in the correct control flow context."""
if hasattr(distribution.extended, '_outer_control_flow_context'):
# If there was an outer context captured before this method was called,
# then we enter that context to create the metric value op. If the
# caputred context is `None`, ops.control_dependencies(None) gives the
# desired behavior. Else we use `Enter` and `Exit` to enter and exit the
# captured context.
# This special handling is needed because sometimes the metric is created
# inside a while_loop (and perhaps a TPU rewrite context). But we don't
# want the value op to be evaluated every step or on the TPU. So we
# create it outside so that it can be evaluated at the end on the host,
# once the update ops have been evaluted.
# pylint: disable=protected-access
if distribution.extended._outer_control_flow_context is None:
with ops.control_dependencies(None):
metric_value = metric_value_fn(distribution, *a)
else:
distribution.extended._outer_control_flow_context.Enter()
metric_value = metric_value_fn(distribution, *a)
distribution.extended._outer_control_flow_context.Exit()
# pylint: enable=protected-access
else:
metric_value = metric_value_fn(distribution, *a)
if metrics_collections:
ops.add_to_collections(metrics_collections, metric_value)
return metric_value
return distribution_strategy_context.get_replica_context().merge_call(
fn, args=args)
@tf_export('metrics.mean')
def mean(values,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the (weighted) mean of the given values.
The `mean` function creates two local variables, `total` and `count`
that are used to compute the average of `values`. This average is ultimately
returned as `mean` which is an idempotent operation that simply divides
`total` by `count`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the `mean`.
`update_op` increments `total` with the reduced sum of the product of `values`
and `weights`, and it increments `count` with the reduced sum of `weights`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
values: A `Tensor` of arbitrary dimensions.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`values`, and must be broadcastable to `values` (i.e., all dimensions must
be either `1`, or the same as the corresponding `values` dimension).
metrics_collections: An optional list of collections that `mean`
should be added to.
updates_collections: An optional list of collections that `update_op`
should be added to.
name: An optional variable_scope name.
Returns:
mean: A `Tensor` representing the current mean, the value of `total` divided
by `count`.
update_op: An operation that increments the `total` and `count` variables
appropriately and whose value matches `mean_value`.
Raises:
ValueError: If `weights` is not `None` and its shape doesn't match `values`,
or if either `metrics_collections` or `updates_collections` are not a list
or tuple.
RuntimeError: If eager execution is enabled.
"""
if context.executing_eagerly():
raise RuntimeError('tf.metrics.mean is not supported when eager execution '
'is enabled.')
with variable_scope.variable_scope(name, 'mean', (values, weights)):
values = math_ops.to_float(values)
total = metric_variable([], dtypes.float32, name='total')
count = metric_variable([], dtypes.float32, name='count')
if weights is None:
num_values = math_ops.to_float(array_ops.size(values))
else:
values, _, weights = _remove_squeezable_dimensions(
predictions=values, labels=None, weights=weights)
weights = weights_broadcast_ops.broadcast_weights(
math_ops.to_float(weights), values)
values = math_ops.multiply(values, weights)
num_values = math_ops.reduce_sum(weights)
update_total_op = state_ops.assign_add(total, math_ops.reduce_sum(values))
with ops.control_dependencies([values]):
update_count_op = state_ops.assign_add(count, num_values)
def compute_mean(_, t, c):
return _safe_div(t, math_ops.maximum(c, 0), name='value')
mean_t = _aggregate_across_replicas(
metrics_collections, compute_mean, total, count)
update_op = _safe_div(update_total_op,
math_ops.maximum(update_count_op, 0),
name='update_op')
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return mean_t, update_op
@tf_export('metrics.accuracy')
def accuracy(labels,
predictions,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Calculates how often `predictions` matches `labels`.
The `accuracy` function creates two local variables, `total` and
`count` that are used to compute the frequency with which `predictions`
matches `labels`. This frequency is ultimately returned as `accuracy`: an
idempotent operation that simply divides `total` by `count`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the `accuracy`.
Internally, an `is_correct` operation computes a `Tensor` with elements 1.0
where the corresponding elements of `predictions` and `labels` match and 0.0
otherwise. Then `update_op` increments `total` with the reduced sum of the
product of `weights` and `is_correct`, and it increments `count` with the
reduced sum of `weights`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: The ground truth values, a `Tensor` whose shape matches
`predictions`.
predictions: The predicted values, a `Tensor` of any shape.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that `accuracy` should
be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
accuracy: A `Tensor` representing the accuracy, the value of `total` divided
by `count`.
update_op: An operation that increments the `total` and `count` variables
appropriately and whose value matches `accuracy`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
RuntimeError: If eager execution is enabled.
"""
if context.executing_eagerly():
raise RuntimeError('tf.metrics.accuracy is not supported when eager '
'execution is enabled.')
predictions, labels, weights = _remove_squeezable_dimensions(
predictions=predictions, labels=labels, weights=weights)
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
if labels.dtype != predictions.dtype:
predictions = math_ops.cast(predictions, labels.dtype)
is_correct = math_ops.to_float(math_ops.equal(predictions, labels))
return mean(is_correct, weights, metrics_collections, updates_collections,
name or 'accuracy')
def _confusion_matrix_at_thresholds(labels,
predictions,
thresholds,
weights=None,
includes=None):
"""Computes true_positives, false_negatives, true_negatives, false_positives.
This function creates up to four local variables, `true_positives`,
`true_negatives`, `false_positives` and `false_negatives`.
`true_positive[i]` is defined as the total weight of values in `predictions`
above `thresholds[i]` whose corresponding entry in `labels` is `True`.
`false_negatives[i]` is defined as the total weight of values in `predictions`
at most `thresholds[i]` whose corresponding entry in `labels` is `True`.
`true_negatives[i]` is defined as the total weight of values in `predictions`
at most `thresholds[i]` whose corresponding entry in `labels` is `False`.
`false_positives[i]` is defined as the total weight of values in `predictions`
above `thresholds[i]` whose corresponding entry in `labels` is `False`.
For estimation of these metrics over a stream of data, for each metric the
function respectively creates an `update_op` operation that updates the
variable and returns its value.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: A `Tensor` whose shape matches `predictions`. Will be cast to
`bool`.
predictions: A floating point `Tensor` of arbitrary shape and whose values
are in the range `[0, 1]`.
thresholds: A python list or tuple of float thresholds in `[0, 1]`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
includes: Tuple of keys to return, from 'tp', 'fn', 'tn', fp'. If `None`,
default to all four.
Returns:
values: Dict of variables of shape `[len(thresholds)]`. Keys are from
`includes`.
update_ops: Dict of operations that increments the `values`. Keys are from
`includes`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
`includes` contains invalid keys.
"""
all_includes = ('tp', 'fn', 'tn', 'fp')
if includes is None:
includes = all_includes
else:
for include in includes:
if include not in all_includes:
raise ValueError('Invalid key: %s.' % include)
with ops.control_dependencies([
check_ops.assert_greater_equal(
predictions,
math_ops.cast(0.0, dtype=predictions.dtype),
message='predictions must be in [0, 1]'),
check_ops.assert_less_equal(
predictions,
math_ops.cast(1.0, dtype=predictions.dtype),
message='predictions must be in [0, 1]')
]):
predictions, labels, weights = _remove_squeezable_dimensions(
predictions=math_ops.to_float(predictions),
labels=math_ops.cast(labels, dtype=dtypes.bool),
weights=weights)
num_thresholds = len(thresholds)
# Reshape predictions and labels.
predictions_2d = array_ops.reshape(predictions, [-1, 1])
labels_2d = array_ops.reshape(
math_ops.cast(labels, dtype=dtypes.bool), [1, -1])
# Use static shape if known.
num_predictions = predictions_2d.get_shape().as_list()[0]
# Otherwise use dynamic shape.
if num_predictions is None:
num_predictions = array_ops.shape(predictions_2d)[0]
thresh_tiled = array_ops.tile(
array_ops.expand_dims(array_ops.constant(thresholds), [1]),
array_ops.stack([1, num_predictions]))
# Tile the predictions after thresholding them across different thresholds.
pred_is_pos = math_ops.greater(
array_ops.tile(array_ops.transpose(predictions_2d), [num_thresholds, 1]),
thresh_tiled)
if ('fn' in includes) or ('tn' in includes):
pred_is_neg = math_ops.logical_not(pred_is_pos)
# Tile labels by number of thresholds
label_is_pos = array_ops.tile(labels_2d, [num_thresholds, 1])
if ('fp' in includes) or ('tn' in includes):
label_is_neg = math_ops.logical_not(label_is_pos)
if weights is not None:
weights = weights_broadcast_ops.broadcast_weights(
math_ops.to_float(weights), predictions)
weights_tiled = array_ops.tile(
array_ops.reshape(weights, [1, -1]), [num_thresholds, 1])
thresh_tiled.get_shape().assert_is_compatible_with(
weights_tiled.get_shape())
else:
weights_tiled = None
values = {}
update_ops = {}
if 'tp' in includes:
true_p = metric_variable(
[num_thresholds], dtypes.float32, name='true_positives')
is_true_positive = math_ops.to_float(
math_ops.logical_and(label_is_pos, pred_is_pos))
if weights_tiled is not None:
is_true_positive *= weights_tiled
update_ops['tp'] = state_ops.assign_add(true_p,
math_ops.reduce_sum(
is_true_positive, 1))
values['tp'] = true_p
if 'fn' in includes:
false_n = metric_variable(
[num_thresholds], dtypes.float32, name='false_negatives')
is_false_negative = math_ops.to_float(
math_ops.logical_and(label_is_pos, pred_is_neg))
if weights_tiled is not None:
is_false_negative *= weights_tiled
update_ops['fn'] = state_ops.assign_add(false_n,
math_ops.reduce_sum(
is_false_negative, 1))
values['fn'] = false_n
if 'tn' in includes:
true_n = metric_variable(
[num_thresholds], dtypes.float32, name='true_negatives')
is_true_negative = math_ops.to_float(
math_ops.logical_and(label_is_neg, pred_is_neg))
if weights_tiled is not None:
is_true_negative *= weights_tiled
update_ops['tn'] = state_ops.assign_add(true_n,
math_ops.reduce_sum(
is_true_negative, 1))
values['tn'] = true_n
if 'fp' in includes:
false_p = metric_variable(
[num_thresholds], dtypes.float32, name='false_positives')
is_false_positive = math_ops.to_float(
math_ops.logical_and(label_is_neg, pred_is_pos))
if weights_tiled is not None:
is_false_positive *= weights_tiled
update_ops['fp'] = state_ops.assign_add(false_p,
math_ops.reduce_sum(
is_false_positive, 1))
values['fp'] = false_p
return values, update_ops
def _aggregate_variable(v, collections):
f = lambda distribution, value: distribution.read_var(value)
return _aggregate_across_replicas(collections, f, v)
@tf_export('metrics.auc')
def auc(labels,
predictions,
weights=None,
num_thresholds=200,
metrics_collections=None,
updates_collections=None,
curve='ROC',
name=None,
summation_method='trapezoidal'):
"""Computes the approximate AUC via a Riemann sum.
The `auc` function creates four local variables, `true_positives`,
`true_negatives`, `false_positives` and `false_negatives` that are used to
compute the AUC. To discretize the AUC curve, a linearly spaced set of
thresholds is used to compute pairs of recall and precision values. The area
under the ROC-curve is therefore computed using the height of the recall
values by the false positive rate, while the area under the PR-curve is the
computed using the height of the precision values by the recall.
This value is ultimately returned as `auc`, an idempotent operation that
computes the area under a discretized curve of precision versus recall values
(computed using the aforementioned variables). The `num_thresholds` variable
controls the degree of discretization with larger numbers of thresholds more
closely approximating the true AUC. The quality of the approximation may vary
dramatically depending on `num_thresholds`.
For best results, `predictions` should be distributed approximately uniformly
in the range [0, 1] and not peaked around 0 or 1. The quality of the AUC
approximation may be poor if this is not the case. Setting `summation_method`
to 'minoring' or 'majoring' can help quantify the error in the approximation
by providing lower or upper bound estimate of the AUC.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the `auc`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: A `Tensor` whose shape matches `predictions`. Will be cast to
`bool`.
predictions: A floating point `Tensor` of arbitrary shape and whose values
are in the range `[0, 1]`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
num_thresholds: The number of thresholds to use when discretizing the roc
curve.
metrics_collections: An optional list of collections that `auc` should be
added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
curve: Specifies the name of the curve to be computed, 'ROC' [default] or
'PR' for the Precision-Recall-curve.
name: An optional variable_scope name.
summation_method: Specifies the Riemann summation method used
(https://en.wikipedia.org/wiki/Riemann_sum): 'trapezoidal' [default] that
applies the trapezoidal rule; 'careful_interpolation', a variant of it
differing only by a more correct interpolation scheme for PR-AUC -
interpolating (true/false) positives but not the ratio that is precision;
'minoring' that applies left summation for increasing intervals and right
summation for decreasing intervals; 'majoring' that does the opposite.
Note that 'careful_interpolation' is strictly preferred to 'trapezoidal'
(to be deprecated soon) as it applies the same method for ROC, and a
better one (see Davis & Goadrich 2006 for details) for the PR curve.
Returns:
auc: A scalar `Tensor` representing the current area-under-curve.
update_op: An operation that increments the `true_positives`,
`true_negatives`, `false_positives` and `false_negatives` variables
appropriately and whose value matches `auc`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
RuntimeError: If eager execution is enabled.
"""
if context.executing_eagerly():
raise RuntimeError('tf.metrics.auc is not supported when eager execution '
'is enabled.')
with variable_scope.variable_scope(name, 'auc',
(labels, predictions, weights)):
if curve != 'ROC' and curve != 'PR':
raise ValueError('curve must be either ROC or PR, %s unknown' % (curve))
kepsilon = 1e-7 # to account for floating point imprecisions
thresholds = [
(i + 1) * 1.0 / (num_thresholds - 1) for i in range(num_thresholds - 2)
]
thresholds = [0.0 - kepsilon] + thresholds + [1.0 + kepsilon]
values, update_ops = _confusion_matrix_at_thresholds(
labels, predictions, thresholds, weights)
# Add epsilons to avoid dividing by 0.
epsilon = 1.0e-6
def interpolate_pr_auc(tp, fp, fn):
"""Interpolation formula inspired by section 4 of Davis & Goadrich 2006.
Note here we derive & use a closed formula not present in the paper
- as follows:
Modeling all of TP (true positive weight),
FP (false positive weight) and their sum P = TP + FP (positive weight)
as varying linearly within each interval [A, B] between successive
thresholds, we get
Precision = (TP_A + slope * (P - P_A)) / P
with slope = dTP / dP = (TP_B - TP_A) / (P_B - P_A).
The area within the interval is thus (slope / total_pos_weight) times
int_A^B{Precision.dP} = int_A^B{(TP_A + slope * (P - P_A)) * dP / P}
int_A^B{Precision.dP} = int_A^B{slope * dP + intercept * dP / P}
where intercept = TP_A - slope * P_A = TP_B - slope * P_B, resulting in
int_A^B{Precision.dP} = TP_B - TP_A + intercept * log(P_B / P_A)
Bringing back the factor (slope / total_pos_weight) we'd put aside, we get
slope * [dTP + intercept * log(P_B / P_A)] / total_pos_weight
where dTP == TP_B - TP_A.
Note that when P_A == 0 the above calculation simplifies into
int_A^B{Precision.dTP} = int_A^B{slope * dTP} = slope * (TP_B - TP_A)
which is really equivalent to imputing constant precision throughout the
first bucket having >0 true positives.
Args:
tp: true positive counts
fp: false positive counts
fn: false negative counts
Returns:
pr_auc: an approximation of the area under the P-R curve.
"""
dtp = tp[:num_thresholds - 1] - tp[1:]
p = tp + fp
prec_slope = _safe_div(
dtp,
math_ops.maximum(p[:num_thresholds - 1] - p[1:], 0),
name='prec_slope')
intercept = tp[1:] - math_ops.multiply(prec_slope, p[1:])
safe_p_ratio = array_ops.where(
math_ops.logical_and(p[:num_thresholds - 1] > 0, p[1:] > 0),
_safe_div(p[:num_thresholds - 1],
math_ops.maximum(p[1:], 0),
name='recall_relative_ratio'),
array_ops.ones_like(p[1:]))
return math_ops.reduce_sum(
_safe_div(
prec_slope * (dtp + intercept * math_ops.log(safe_p_ratio)),
math_ops.maximum(tp[1:] + fn[1:], 0),
name='pr_auc_increment'),
name='interpolate_pr_auc')
def compute_auc(tp, fn, tn, fp, name):
"""Computes the roc-auc or pr-auc based on confusion counts."""
if curve == 'PR':
if summation_method == 'trapezoidal':
logging.warning(
'Trapezoidal rule is known to produce incorrect PR-AUCs; '
'please switch to "careful_interpolation" instead.')
elif summation_method == 'careful_interpolation':
# This one is a bit tricky and is handled separately.
return interpolate_pr_auc(tp, fp, fn)
rec = math_ops.div(tp + epsilon, tp + fn + epsilon)
if curve == 'ROC':
fp_rate = math_ops.div(fp, fp + tn + epsilon)
x = fp_rate
y = rec
else: # curve == 'PR'.
prec = math_ops.div(tp + epsilon, tp + fp + epsilon)
x = rec
y = prec
if summation_method in ('trapezoidal', 'careful_interpolation'):
# Note that the case ('PR', 'careful_interpolation') has been handled
# above.
return math_ops.reduce_sum(
math_ops.multiply(x[:num_thresholds - 1] - x[1:],
(y[:num_thresholds - 1] + y[1:]) / 2.),
name=name)
elif summation_method == 'minoring':
return math_ops.reduce_sum(
math_ops.multiply(x[:num_thresholds - 1] - x[1:],
math_ops.minimum(y[:num_thresholds - 1], y[1:])),
name=name)
elif summation_method == 'majoring':
return math_ops.reduce_sum(
math_ops.multiply(x[:num_thresholds - 1] - x[1:],
math_ops.maximum(y[:num_thresholds - 1], y[1:])),
name=name)
else:
raise ValueError('Invalid summation_method: %s' % summation_method)
# sum up the areas of all the trapeziums
def compute_auc_value(_, values):
return compute_auc(values['tp'], values['fn'], values['tn'], values['fp'],
'value')
auc_value = _aggregate_across_replicas(
metrics_collections, compute_auc_value, values)
update_op = compute_auc(update_ops['tp'], update_ops['fn'],
update_ops['tn'], update_ops['fp'], 'update_op')
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return auc_value, update_op
@tf_export('metrics.mean_absolute_error')
def mean_absolute_error(labels,
predictions,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the mean absolute error between the labels and predictions.
The `mean_absolute_error` function creates two local variables,
`total` and `count` that are used to compute the mean absolute error. This
average is weighted by `weights`, and it is ultimately returned as
`mean_absolute_error`: an idempotent operation that simply divides `total` by
`count`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`mean_absolute_error`. Internally, an `absolute_errors` operation computes the
absolute value of the differences between `predictions` and `labels`. Then
`update_op` increments `total` with the reduced sum of the product of
`weights` and `absolute_errors`, and it increments `count` with the reduced
sum of `weights`
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: A `Tensor` of the same shape as `predictions`.
predictions: A `Tensor` of arbitrary shape.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that
`mean_absolute_error` should be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
mean_absolute_error: A `Tensor` representing the current mean, the value of
`total` divided by `count`.
update_op: An operation that increments the `total` and `count` variables
appropriately and whose value matches `mean_absolute_error`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
RuntimeError: If eager execution is enabled.
"""
if context.executing_eagerly():
raise RuntimeError('tf.metrics.mean_absolute_error is not supported '
'when eager execution is enabled.')
predictions, labels, weights = _remove_squeezable_dimensions(
predictions=predictions, labels=labels, weights=weights)
absolute_errors = math_ops.abs(predictions - labels)
return mean(absolute_errors, weights, metrics_collections,
updates_collections, name or 'mean_absolute_error')
@tf_export('metrics.mean_cosine_distance')
def mean_cosine_distance(labels,
predictions,
dim,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the cosine distance between the labels and predictions.
The `mean_cosine_distance` function creates two local variables,
`total` and `count` that are used to compute the average cosine distance
between `predictions` and `labels`. This average is weighted by `weights`,
and it is ultimately returned as `mean_distance`, which is an idempotent
operation that simply divides `total` by `count`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`mean_distance`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: A `Tensor` of arbitrary shape.
predictions: A `Tensor` of the same shape as `labels`.
dim: The dimension along which the cosine distance is computed.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension). Also,
dimension `dim` must be `1`.
metrics_collections: An optional list of collections that the metric
value variable should be added to.
updates_collections: An optional list of collections that the metric update
ops should be added to.
name: An optional variable_scope name.
Returns:
mean_distance: A `Tensor` representing the current mean, the value of
`total` divided by `count`.
update_op: An operation that increments the `total` and `count` variables
appropriately.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
RuntimeError: If eager execution is enabled.
"""
if context.executing_eagerly():
raise RuntimeError('tf.metrics.mean_cosine_distance is not supported when '
'eager execution is enabled.')
predictions, labels, weights = _remove_squeezable_dimensions(
predictions=predictions, labels=labels, weights=weights)
radial_diffs = math_ops.multiply(predictions, labels)
radial_diffs = math_ops.reduce_sum(
radial_diffs, reduction_indices=[
dim,
], keepdims=True)
mean_distance, update_op = mean(radial_diffs, weights, None, None, name or
'mean_cosine_distance')
mean_distance = math_ops.subtract(1.0, mean_distance)
update_op = math_ops.subtract(1.0, update_op)
if metrics_collections:
ops.add_to_collections(metrics_collections, mean_distance)
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return mean_distance, update_op
@tf_export('metrics.mean_per_class_accuracy')
def mean_per_class_accuracy(labels,
predictions,
num_classes,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Calculates the mean of the per-class accuracies.
Calculates the accuracy for each class, then takes the mean of that.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates the accuracy of each class and returns
them.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: A `Tensor` of ground truth labels with shape [batch size] and of
type `int32` or `int64`. The tensor will be flattened if its rank > 1.
predictions: A `Tensor` of prediction results for semantic labels, whose
shape is [batch size] and type `int32` or `int64`. The tensor will be
flattened if its rank > 1.
num_classes: The possible number of labels the prediction task can
have. This value must be provided, since two variables with shape =
[num_classes] will be allocated.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that
`mean_per_class_accuracy'
should be added to.
updates_collections: An optional list of collections `update_op` should be
added to.
name: An optional variable_scope name.
Returns:
mean_accuracy: A `Tensor` representing the mean per class accuracy.
update_op: An operation that updates the accuracy tensor.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
RuntimeError: If eager execution is enabled.
"""
if context.executing_eagerly():
raise RuntimeError('tf.metrics.mean_per_class_accuracy is not supported '
'when eager execution is enabled.')
with variable_scope.variable_scope(name, 'mean_accuracy',
(predictions, labels, weights)):
labels = math_ops.to_int64(labels)
# Flatten the input if its rank > 1.
if labels.get_shape().ndims > 1:
labels = array_ops.reshape(labels, [-1])
if predictions.get_shape().ndims > 1:
predictions = array_ops.reshape(predictions, [-1])
# Check if shape is compatible.
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
total = metric_variable([num_classes], dtypes.float32, name='total')
count = metric_variable([num_classes], dtypes.float32, name='count')
ones = array_ops.ones([array_ops.size(labels)], dtypes.float32)
if labels.dtype != predictions.dtype:
predictions = math_ops.cast(predictions, labels.dtype)
is_correct = math_ops.to_float(math_ops.equal(predictions, labels))
if weights is not None:
if weights.get_shape().ndims > 1:
weights = array_ops.reshape(weights, [-1])
weights = math_ops.to_float(weights)
is_correct *= weights
ones *= weights
update_total_op = state_ops.scatter_add(total, labels, ones)
update_count_op = state_ops.scatter_add(count, labels, is_correct)
def compute_mean_accuracy(_, count, total):
per_class_accuracy = _safe_div(
count, math_ops.maximum(total, 0), name=None)
mean_accuracy_v = math_ops.reduce_mean(
per_class_accuracy, name='mean_accuracy')
return mean_accuracy_v
mean_accuracy_v = _aggregate_across_replicas(
metrics_collections, compute_mean_accuracy, count, total)
update_op = _safe_div(update_count_op,
math_ops.maximum(update_total_op, 0),
name='update_op')
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return mean_accuracy_v, update_op
@tf_export('metrics.mean_iou')
def mean_iou(labels,
predictions,
num_classes,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Calculate per-step mean Intersection-Over-Union (mIOU).
Mean Intersection-Over-Union is a common evaluation metric for
semantic image segmentation, which first computes the IOU for each
semantic class and then computes the average over classes.
IOU is defined as follows:
IOU = true_positive / (true_positive + false_positive + false_negative).
The predictions are accumulated in a confusion matrix, weighted by `weights`,
and mIOU is then calculated from it.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the `mean_iou`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: A `Tensor` of ground truth labels with shape [batch size] and of
type `int32` or `int64`. The tensor will be flattened if its rank > 1.
predictions: A `Tensor` of prediction results for semantic labels, whose
shape is [batch size] and type `int32` or `int64`. The tensor will be
flattened if its rank > 1.
num_classes: The possible number of labels the prediction task can
have. This value must be provided, since a confusion matrix of
dimension = [num_classes, num_classes] will be allocated.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that `mean_iou`
should be added to.
updates_collections: An optional list of collections `update_op` should be
added to.
name: An optional variable_scope name.
Returns:
mean_iou: A `Tensor` representing the mean intersection-over-union.
update_op: An operation that increments the confusion matrix.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
RuntimeError: If eager execution is enabled.
"""
if context.executing_eagerly():
raise RuntimeError('tf.metrics.mean_iou is not supported when '
'eager execution is enabled.')
with variable_scope.variable_scope(name, 'mean_iou',
(predictions, labels, weights)):
# Check if shape is compatible.
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
total_cm, update_op = _streaming_confusion_matrix(labels, predictions,
num_classes, weights)
def compute_mean_iou(_, total_cm):
"""Compute the mean intersection-over-union via the confusion matrix."""
sum_over_row = math_ops.to_float(math_ops.reduce_sum(total_cm, 0))
sum_over_col = math_ops.to_float(math_ops.reduce_sum(total_cm, 1))
cm_diag = math_ops.to_float(array_ops.diag_part(total_cm))
denominator = sum_over_row + sum_over_col - cm_diag
# The mean is only computed over classes that appear in the
# label or prediction tensor. If the denominator is 0, we need to
# ignore the class.
num_valid_entries = math_ops.reduce_sum(
math_ops.cast(
math_ops.not_equal(denominator, 0), dtype=dtypes.float32))
# If the value of the denominator is 0, set it to 1 to avoid
# zero division.
denominator = array_ops.where(
math_ops.greater(denominator, 0), denominator,
array_ops.ones_like(denominator))
iou = math_ops.div(cm_diag, denominator)
# If the number of valid entries is 0 (no classes) we return 0.
result = array_ops.where(
math_ops.greater(num_valid_entries, 0),
math_ops.reduce_sum(iou, name='mean_iou') / num_valid_entries, 0)
return result
# TODO(priyag): Use outside_compilation if in TPU context.
mean_iou_v = _aggregate_across_replicas(
metrics_collections, compute_mean_iou, total_cm)
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return mean_iou_v, update_op
@tf_export('metrics.mean_relative_error')
def mean_relative_error(labels,
predictions,
normalizer,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the mean relative error by normalizing with the given values.
The `mean_relative_error` function creates two local variables,
`total` and `count` that are used to compute the mean relative absolute error.
This average is weighted by `weights`, and it is ultimately returned as
`mean_relative_error`: an idempotent operation that simply divides `total` by
`count`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`mean_reative_error`. Internally, a `relative_errors` operation divides the
absolute value of the differences between `predictions` and `labels` by the
`normalizer`. Then `update_op` increments `total` with the reduced sum of the
product of `weights` and `relative_errors`, and it increments `count` with the
reduced sum of `weights`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: A `Tensor` of the same shape as `predictions`.
predictions: A `Tensor` of arbitrary shape.
normalizer: A `Tensor` of the same shape as `predictions`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that
`mean_relative_error` should be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
mean_relative_error: A `Tensor` representing the current mean, the value of
`total` divided by `count`.
update_op: An operation that increments the `total` and `count` variables
appropriately and whose value matches `mean_relative_error`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
RuntimeError: If eager execution is enabled.
"""
if context.executing_eagerly():
raise RuntimeError('tf.metrics.mean_relative_error is not supported when '
'eager execution is enabled.')
predictions, labels, weights = _remove_squeezable_dimensions(
predictions=predictions, labels=labels, weights=weights)
predictions, normalizer = confusion_matrix.remove_squeezable_dimensions(
predictions, normalizer)
predictions.get_shape().assert_is_compatible_with(normalizer.get_shape())
relative_errors = array_ops.where(
math_ops.equal(normalizer, 0.0), array_ops.zeros_like(labels),
math_ops.div(math_ops.abs(labels - predictions), normalizer))
return mean(relative_errors, weights, metrics_collections,
updates_collections, name or 'mean_relative_error')
@tf_export('metrics.mean_squared_error')
def mean_squared_error(labels,
predictions,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the mean squared error between the labels and predictions.
The `mean_squared_error` function creates two local variables,
`total` and `count` that are used to compute the mean squared error.
This average is weighted by `weights`, and it is ultimately returned as
`mean_squared_error`: an idempotent operation that simply divides `total` by
`count`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`mean_squared_error`. Internally, a `squared_error` operation computes the
element-wise square of the difference between `predictions` and `labels`. Then
`update_op` increments `total` with the reduced sum of the product of
`weights` and `squared_error`, and it increments `count` with the reduced sum
of `weights`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: A `Tensor` of the same shape as `predictions`.
predictions: A `Tensor` of arbitrary shape.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that
`mean_squared_error` should be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
mean_squared_error: A `Tensor` representing the current mean, the value of
`total` divided by `count`.
update_op: An operation that increments the `total` and `count` variables
appropriately and whose value matches `mean_squared_error`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
RuntimeError: If eager execution is enabled.
"""
if context.executing_eagerly():
raise RuntimeError('tf.metrics.mean_squared_error is not supported when '
'eager execution is enabled.')
predictions, labels, weights = _remove_squeezable_dimensions(
predictions=predictions, labels=labels, weights=weights)
squared_error = math_ops.square(labels - predictions)
return mean(squared_error, weights, metrics_collections, updates_collections,
name or 'mean_squared_error')
@tf_export('metrics.mean_tensor')
def mean_tensor(values,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the element-wise (weighted) mean of the given tensors.
In contrast to the `mean` function which returns a scalar with the
mean, this function returns an average tensor with the same shape as the
input tensors.
The `mean_tensor` function creates two local variables,
`total_tensor` and `count_tensor` that are used to compute the average of
`values`. This average is ultimately returned as `mean` which is an idempotent
operation that simply divides `total` by `count`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the `mean`.
`update_op` increments `total` with the reduced sum of the product of `values`
and `weights`, and it increments `count` with the reduced sum of `weights`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
values: A `Tensor` of arbitrary dimensions.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`values`, and must be broadcastable to `values` (i.e., all dimensions must
be either `1`, or the same as the corresponding `values` dimension).
metrics_collections: An optional list of collections that `mean`
should be added to.
updates_collections: An optional list of collections that `update_op`
should be added to.
name: An optional variable_scope name.
Returns:
mean: A float `Tensor` representing the current mean, the value of `total`
divided by `count`.
update_op: An operation that increments the `total` and `count` variables
appropriately and whose value matches `mean_value`.
Raises:
ValueError: If `weights` is not `None` and its shape doesn't match `values`,
or if either `metrics_collections` or `updates_collections` are not a list
or tuple.
RuntimeError: If eager execution is enabled.
"""
if context.executing_eagerly():
raise RuntimeError('tf.metrics.mean_tensor is not supported when '
'eager execution is enabled.')
with variable_scope.variable_scope(name, 'mean', (values, weights)):
values = math_ops.to_float(values)
total = metric_variable(
values.get_shape(), dtypes.float32, name='total_tensor')
count = metric_variable(
values.get_shape(), dtypes.float32, name='count_tensor')
num_values = array_ops.ones_like(values)
if weights is not None:
values, _, weights = _remove_squeezable_dimensions(
predictions=values, labels=None, weights=weights)
weights = weights_broadcast_ops.broadcast_weights(
math_ops.to_float(weights), values)
values = math_ops.multiply(values, weights)
num_values = math_ops.multiply(num_values, weights)
update_total_op = state_ops.assign_add(total, values)
with ops.control_dependencies([values]):
update_count_op = state_ops.assign_add(count, num_values)
compute_mean = lambda _, t, c: _safe_div(
t, math_ops.maximum(c, 0), name='value')
mean_t = _aggregate_across_replicas(
metrics_collections, compute_mean, total, count)
update_op = _safe_div(update_total_op,
math_ops.maximum(update_count_op, 0),
name='update_op')
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return mean_t, update_op
@tf_export('metrics.percentage_below')
def percentage_below(values,
threshold,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the percentage of values less than the given threshold.
The `percentage_below` function creates two local variables,
`total` and `count` that are used to compute the percentage of `values` that
fall below `threshold`. This rate is weighted by `weights`, and it is
ultimately returned as `percentage` which is an idempotent operation that
simply divides `total` by `count`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`percentage`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
values: A numeric `Tensor` of arbitrary size.
threshold: A scalar threshold.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`values`, and must be broadcastable to `values` (i.e., all dimensions must
be either `1`, or the same as the corresponding `values` dimension).
metrics_collections: An optional list of collections that the metric
value variable should be added to.
updates_collections: An optional list of collections that the metric update
ops should be added to.
name: An optional variable_scope name.
Returns:
percentage: A `Tensor` representing the current mean, the value of `total`
divided by `count`.
update_op: An operation that increments the `total` and `count` variables
appropriately.
Raises:
ValueError: If `weights` is not `None` and its shape doesn't match `values`,
or if either `metrics_collections` or `updates_collections` are not a list
or tuple.
RuntimeError: If eager execution is enabled.
"""
if context.executing_eagerly():
raise RuntimeError('tf.metrics.percentage_below is not supported when '
'eager execution is enabled.')
is_below_threshold = math_ops.to_float(math_ops.less(values, threshold))
return mean(is_below_threshold, weights, metrics_collections,
updates_collections, name or 'percentage_below_threshold')
def _count_condition(values,
weights=None,
metrics_collections=None,
updates_collections=None):
"""Sums the weights of cases where the given values are True.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
values: A `bool` `Tensor` of arbitrary size.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`values`, and must be broadcastable to `values` (i.e., all dimensions must
be either `1`, or the same as the corresponding `values` dimension).
metrics_collections: An optional list of collections that the metric
value variable should be added to.
updates_collections: An optional list of collections that the metric update
ops should be added to.
Returns:
value_tensor: A `Tensor` representing the current value of the metric.
update_op: An operation that accumulates the error from a batch of data.
Raises:
ValueError: If `weights` is not `None` and its shape doesn't match `values`,
or if either `metrics_collections` or `updates_collections` are not a list
or tuple.
"""
check_ops.assert_type(values, dtypes.bool)
count = metric_variable([], dtypes.float32, name='count')
values = math_ops.to_float(values)
if weights is not None:
with ops.control_dependencies((check_ops.assert_rank_in(
weights, (0, array_ops.rank(values))),)):
weights = math_ops.to_float(weights)
values = math_ops.multiply(values, weights)
value_tensor = _aggregate_variable(count, metrics_collections)
update_op = state_ops.assign_add(count, math_ops.reduce_sum(values))
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return value_tensor, update_op
@tf_export('metrics.false_negatives')
def false_negatives(labels,
predictions,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the total number of false negatives.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: The ground truth values, a `Tensor` whose dimensions must match
`predictions`. Will be cast to `bool`.
predictions: The predicted values, a `Tensor` of arbitrary dimensions. Will
be cast to `bool`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that the metric
value variable should be added to.
updates_collections: An optional list of collections that the metric update
ops should be added to.
name: An optional variable_scope name.
Returns:
value_tensor: A `Tensor` representing the current value of the metric.
update_op: An operation that accumulates the error from a batch of data.
Raises:
ValueError: If `weights` is not `None` and its shape doesn't match `values`,
or if either `metrics_collections` or `updates_collections` are not a list
or tuple.
RuntimeError: If eager execution is enabled.
"""
if context.executing_eagerly():
raise RuntimeError('tf.metrics.false_negatives is not supported when '
'eager execution is enabled.')
with variable_scope.variable_scope(name, 'false_negatives',
(predictions, labels, weights)):
predictions, labels, weights = _remove_squeezable_dimensions(
predictions=math_ops.cast(predictions, dtype=dtypes.bool),
labels=math_ops.cast(labels, dtype=dtypes.bool),
weights=weights)
is_false_negative = math_ops.logical_and(
math_ops.equal(labels, True), math_ops.equal(predictions, False))
return _count_condition(is_false_negative, weights, metrics_collections,
updates_collections)
@tf_export('metrics.false_negatives_at_thresholds')
def false_negatives_at_thresholds(labels,
predictions,
thresholds,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes false negatives at provided threshold values.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: A `Tensor` whose shape matches `predictions`. Will be cast to
`bool`.
predictions: A floating point `Tensor` of arbitrary shape and whose values
are in the range `[0, 1]`.
thresholds: A python list or tuple of float thresholds in `[0, 1]`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that `false_negatives`
should be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
false_negatives: A float `Tensor` of shape `[len(thresholds)]`.
update_op: An operation that updates the `false_negatives` variable and
returns its current value.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
RuntimeError: If eager execution is enabled.
"""
if context.executing_eagerly():
raise RuntimeError('tf.metrics.false_negatives_at_thresholds is not '
'supported when eager execution is enabled.')
with variable_scope.variable_scope(name, 'false_negatives',
(predictions, labels, weights)):
values, update_ops = _confusion_matrix_at_thresholds(
labels, predictions, thresholds, weights=weights, includes=('fn',))
fn_value = _aggregate_variable(values['fn'], metrics_collections)
if updates_collections:
ops.add_to_collections(updates_collections, update_ops['fn'])
return fn_value, update_ops['fn']
@tf_export('metrics.false_positives')
def false_positives(labels,
predictions,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Sum the weights of false positives.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: The ground truth values, a `Tensor` whose dimensions must match
`predictions`. Will be cast to `bool`.
predictions: The predicted values, a `Tensor` of arbitrary dimensions. Will
be cast to `bool`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that the metric
value variable should be added to.
updates_collections: An optional list of collections that the metric update
ops should be added to.
name: An optional variable_scope name.
Returns:
value_tensor: A `Tensor` representing the current value of the metric.
update_op: An operation that accumulates the error from a batch of data.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
RuntimeError: If eager execution is enabled.
"""
if context.executing_eagerly():
raise RuntimeError('tf.metrics.false_positives is not supported when '
'eager execution is enabled.')
with variable_scope.variable_scope(name, 'false_positives',
(predictions, labels, weights)):
predictions, labels, weights = _remove_squeezable_dimensions(
predictions=math_ops.cast(predictions, dtype=dtypes.bool),
labels=math_ops.cast(labels, dtype=dtypes.bool),
weights=weights)
is_false_positive = math_ops.logical_and(
math_ops.equal(labels, False), math_ops.equal(predictions, True))
return _count_condition(is_false_positive, weights, metrics_collections,
updates_collections)
@tf_export('metrics.false_positives_at_thresholds')
def false_positives_at_thresholds(labels,
predictions,
thresholds,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes false positives at provided threshold values.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: A `Tensor` whose shape matches `predictions`. Will be cast to
`bool`.
predictions: A floating point `Tensor` of arbitrary shape and whose values
are in the range `[0, 1]`.
thresholds: A python list or tuple of float thresholds in `[0, 1]`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that `false_positives`
should be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
false_positives: A float `Tensor` of shape `[len(thresholds)]`.
update_op: An operation that updates the `false_positives` variable and
returns its current value.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
RuntimeError: If eager execution is enabled.
"""
if context.executing_eagerly():
raise RuntimeError('tf.metrics.false_positives_at_thresholds is not '
'supported when eager execution is enabled.')
with variable_scope.variable_scope(name, 'false_positives',
(predictions, labels, weights)):
values, update_ops = _confusion_matrix_at_thresholds(
labels, predictions, thresholds, weights=weights, includes=('fp',))
fp_value = _aggregate_variable(values['fp'], metrics_collections)
if updates_collections:
ops.add_to_collections(updates_collections, update_ops['fp'])
return fp_value, update_ops['fp']
@tf_export('metrics.true_negatives')
def true_negatives(labels,
predictions,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Sum the weights of true_negatives.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: The ground truth values, a `Tensor` whose dimensions must match
`predictions`. Will be cast to `bool`.
predictions: The predicted values, a `Tensor` of arbitrary dimensions. Will
be cast to `bool`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that the metric
value variable should be added to.
updates_collections: An optional list of collections that the metric update
ops should be added to.
name: An optional variable_scope name.
Returns:
value_tensor: A `Tensor` representing the current value of the metric.
update_op: An operation that accumulates the error from a batch of data.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
RuntimeError: If eager execution is enabled.
"""
if context.executing_eagerly():
raise RuntimeError('tf.metrics.true_negatives is not '
'supported when eager execution is enabled.')
with variable_scope.variable_scope(name, 'true_negatives',
(predictions, labels, weights)):
predictions, labels, weights = _remove_squeezable_dimensions(
predictions=math_ops.cast(predictions, dtype=dtypes.bool),
labels=math_ops.cast(labels, dtype=dtypes.bool),
weights=weights)
is_true_negative = math_ops.logical_and(
math_ops.equal(labels, False), math_ops.equal(predictions, False))
return _count_condition(is_true_negative, weights, metrics_collections,
updates_collections)
@tf_export('metrics.true_negatives_at_thresholds')
def true_negatives_at_thresholds(labels,
predictions,
thresholds,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes true negatives at provided threshold values.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: A `Tensor` whose shape matches `predictions`. Will be cast to
`bool`.
predictions: A floating point `Tensor` of arbitrary shape and whose values
are in the range `[0, 1]`.
thresholds: A python list or tuple of float thresholds in `[0, 1]`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that `true_negatives`
should be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
true_negatives: A float `Tensor` of shape `[len(thresholds)]`.
update_op: An operation that updates the `true_negatives` variable and
returns its current value.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
RuntimeError: If eager execution is enabled.
"""
if context.executing_eagerly():
raise RuntimeError('tf.metrics.true_negatives_at_thresholds is not '
'supported when eager execution is enabled.')
with variable_scope.variable_scope(name, 'true_negatives',
(predictions, labels, weights)):
values, update_ops = _confusion_matrix_at_thresholds(
labels, predictions, thresholds, weights=weights, includes=('tn',))
tn_value = _aggregate_variable(values['tn'], metrics_collections)
if updates_collections:
ops.add_to_collections(updates_collections, update_ops['tn'])
return tn_value, update_ops['tn']
@tf_export('metrics.true_positives')
def true_positives(labels,
predictions,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Sum the weights of true_positives.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: The ground truth values, a `Tensor` whose dimensions must match
`predictions`. Will be cast to `bool`.
predictions: The predicted values, a `Tensor` of arbitrary dimensions. Will
be cast to `bool`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that the metric
value variable should be added to.
updates_collections: An optional list of collections that the metric update
ops should be added to.
name: An optional variable_scope name.
Returns:
value_tensor: A `Tensor` representing the current value of the metric.
update_op: An operation that accumulates the error from a batch of data.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
RuntimeError: If eager execution is enabled.
"""
if context.executing_eagerly():
raise RuntimeError('tf.metrics.true_positives is not '
'supported when eager execution is enabled.')
with variable_scope.variable_scope(name, 'true_positives',
(predictions, labels, weights)):
predictions, labels, weights = _remove_squeezable_dimensions(
predictions=math_ops.cast(predictions, dtype=dtypes.bool),
labels=math_ops.cast(labels, dtype=dtypes.bool),
weights=weights)
is_true_positive = math_ops.logical_and(
math_ops.equal(labels, True), math_ops.equal(predictions, True))
return _count_condition(is_true_positive, weights, metrics_collections,
updates_collections)
@tf_export('metrics.true_positives_at_thresholds')
def true_positives_at_thresholds(labels,
predictions,
thresholds,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes true positives at provided threshold values.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: A `Tensor` whose shape matches `predictions`. Will be cast to
`bool`.
predictions: A floating point `Tensor` of arbitrary shape and whose values
are in the range `[0, 1]`.
thresholds: A python list or tuple of float thresholds in `[0, 1]`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that `true_positives`
should be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
true_positives: A float `Tensor` of shape `[len(thresholds)]`.
update_op: An operation that updates the `true_positives` variable and
returns its current value.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
RuntimeError: If eager execution is enabled.
"""
if context.executing_eagerly():
raise RuntimeError('tf.metrics.true_positives_at_thresholds is not '
'supported when eager execution is enabled.')
with variable_scope.variable_scope(name, 'true_positives',
(predictions, labels, weights)):
values, update_ops = _confusion_matrix_at_thresholds(
labels, predictions, thresholds, weights=weights, includes=('tp',))
tp_value = _aggregate_variable(values['tp'], metrics_collections)
if updates_collections:
ops.add_to_collections(updates_collections, update_ops['tp'])
return tp_value, update_ops['tp']
@tf_export('metrics.precision')
def precision(labels,
predictions,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the precision of the predictions with respect to the labels.
The `precision` function creates two local variables,
`true_positives` and `false_positives`, that are used to compute the
precision. This value is ultimately returned as `precision`, an idempotent
operation that simply divides `true_positives` by the sum of `true_positives`
and `false_positives`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`precision`. `update_op` weights each prediction by the corresponding value in
`weights`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: The ground truth values, a `Tensor` whose dimensions must match
`predictions`. Will be cast to `bool`.
predictions: The predicted values, a `Tensor` of arbitrary dimensions. Will
be cast to `bool`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that `precision` should
be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
precision: Scalar float `Tensor` with the value of `true_positives`
divided by the sum of `true_positives` and `false_positives`.
update_op: `Operation` that increments `true_positives` and
`false_positives` variables appropriately and whose value matches
`precision`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
RuntimeError: If eager execution is enabled.
"""
if context.executing_eagerly():
raise RuntimeError('tf.metrics.precision is not '
'supported when eager execution is enabled.')
with variable_scope.variable_scope(name, 'precision',
(predictions, labels, weights)):
predictions, labels, weights = _remove_squeezable_dimensions(
predictions=math_ops.cast(predictions, dtype=dtypes.bool),
labels=math_ops.cast(labels, dtype=dtypes.bool),
weights=weights)
true_p, true_positives_update_op = true_positives(
labels,
predictions,
weights,
metrics_collections=None,
updates_collections=None,
name=None)
false_p, false_positives_update_op = false_positives(
labels,
predictions,
weights,
metrics_collections=None,
updates_collections=None,
name=None)
def compute_precision(tp, fp, name):
return array_ops.where(
math_ops.greater(tp + fp, 0), math_ops.div(tp, tp + fp), 0, name)
def once_across_replicas(_, true_p, false_p):
return compute_precision(true_p, false_p, 'value')
p = _aggregate_across_replicas(metrics_collections, once_across_replicas,
true_p, false_p)
update_op = compute_precision(true_positives_update_op,
false_positives_update_op, 'update_op')
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return p, update_op
@tf_export('metrics.precision_at_thresholds')
def precision_at_thresholds(labels,
predictions,
thresholds,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes precision values for different `thresholds` on `predictions`.
The `precision_at_thresholds` function creates four local variables,
`true_positives`, `true_negatives`, `false_positives` and `false_negatives`
for various values of thresholds. `precision[i]` is defined as the total
weight of values in `predictions` above `thresholds[i]` whose corresponding
entry in `labels` is `True`, divided by the total weight of values in
`predictions` above `thresholds[i]` (`true_positives[i] / (true_positives[i] +
false_positives[i])`).
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`precision`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: The ground truth values, a `Tensor` whose dimensions must match
`predictions`. Will be cast to `bool`.
predictions: A floating point `Tensor` of arbitrary shape and whose values
are in the range `[0, 1]`.
thresholds: A python list or tuple of float thresholds in `[0, 1]`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that `auc` should be
added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
precision: A float `Tensor` of shape `[len(thresholds)]`.
update_op: An operation that increments the `true_positives`,
`true_negatives`, `false_positives` and `false_negatives` variables that
are used in the computation of `precision`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
RuntimeError: If eager execution is enabled.
"""
if context.executing_eagerly():
raise RuntimeError('tf.metrics.precision_at_thresholds is not '
'supported when eager execution is enabled.')
with variable_scope.variable_scope(name, 'precision_at_thresholds',
(predictions, labels, weights)):
values, update_ops = _confusion_matrix_at_thresholds(
labels, predictions, thresholds, weights, includes=('tp', 'fp'))
# Avoid division by zero.
epsilon = 1e-7
def compute_precision(tp, fp, name):
return math_ops.div(tp, epsilon + tp + fp, name='precision_' + name)
def precision_across_replicas(_, values):
return compute_precision(values['tp'], values['fp'], 'value')
prec = _aggregate_across_replicas(
metrics_collections, precision_across_replicas, values)
update_op = compute_precision(update_ops['tp'], update_ops['fp'],
'update_op')
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return prec, update_op
@tf_export('metrics.recall')
def recall(labels,
predictions,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the recall of the predictions with respect to the labels.
The `recall` function creates two local variables, `true_positives`
and `false_negatives`, that are used to compute the recall. This value is
ultimately returned as `recall`, an idempotent operation that simply divides
`true_positives` by the sum of `true_positives` and `false_negatives`.
For estimation of the metric over a stream of data, the function creates an
`update_op` that updates these variables and returns the `recall`. `update_op`
weights each prediction by the corresponding value in `weights`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: The ground truth values, a `Tensor` whose dimensions must match
`predictions`. Will be cast to `bool`.
predictions: The predicted values, a `Tensor` of arbitrary dimensions. Will
be cast to `bool`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that `recall` should
be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
recall: Scalar float `Tensor` with the value of `true_positives` divided
by the sum of `true_positives` and `false_negatives`.
update_op: `Operation` that increments `true_positives` and
`false_negatives` variables appropriately and whose value matches
`recall`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
RuntimeError: If eager execution is enabled.
"""
if context.executing_eagerly():
raise RuntimeError('tf.metrics.recall is not supported is not '
'supported when eager execution is enabled.')
with variable_scope.variable_scope(name, 'recall',
(predictions, labels, weights)):
predictions, labels, weights = _remove_squeezable_dimensions(
predictions=math_ops.cast(predictions, dtype=dtypes.bool),
labels=math_ops.cast(labels, dtype=dtypes.bool),
weights=weights)
true_p, true_positives_update_op = true_positives(
labels,
predictions,
weights,
metrics_collections=None,
updates_collections=None,
name=None)
false_n, false_negatives_update_op = false_negatives(
labels,
predictions,
weights,
metrics_collections=None,
updates_collections=None,
name=None)
def compute_recall(true_p, false_n, name):
return array_ops.where(
math_ops.greater(true_p + false_n, 0),
math_ops.div(true_p, true_p + false_n), 0, name)
def once_across_replicas(_, true_p, false_n):
return compute_recall(true_p, false_n, 'value')
rec = _aggregate_across_replicas(
metrics_collections, once_across_replicas, true_p, false_n)
update_op = compute_recall(true_positives_update_op,
false_negatives_update_op, 'update_op')
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return rec, update_op
def _at_k_name(name, k=None, class_id=None):
if k is not None:
name = '%s_at_%d' % (name, k)
else:
name = '%s_at_k' % (name)
if class_id is not None:
name = '%s_class%d' % (name, class_id)
return name
def _select_class_id(ids, selected_id):
"""Filter all but `selected_id` out of `ids`.
Args:
ids: `int64` `Tensor` or `SparseTensor` of IDs.
selected_id: Int id to select.
Returns:
`SparseTensor` of same dimensions as `ids`. This contains only the entries
equal to `selected_id`.
"""
ids = sparse_tensor.convert_to_tensor_or_sparse_tensor(ids)
if isinstance(ids, sparse_tensor.SparseTensor):
return sparse_ops.sparse_retain(ids, math_ops.equal(ids.values,
selected_id))
# TODO(ptucker): Make this more efficient, maybe add a sparse version of
# tf.equal and tf.reduce_any?
# Shape of filled IDs is the same as `ids` with the last dim collapsed to 1.
ids_shape = array_ops.shape(ids, out_type=dtypes.int64)
ids_last_dim = array_ops.size(ids_shape) - 1
filled_selected_id_shape = math_ops.reduced_shape(ids_shape,
array_ops.reshape(
ids_last_dim, [1]))
# Intersect `ids` with the selected ID.
filled_selected_id = array_ops.fill(filled_selected_id_shape,
math_ops.to_int64(selected_id))
result = sets.set_intersection(filled_selected_id, ids)
return sparse_tensor.SparseTensor(
indices=result.indices, values=result.values, dense_shape=ids_shape)
def _maybe_select_class_id(labels, predictions_idx, selected_id=None):
"""If class ID is specified, filter all other classes.
Args:
labels: `int64` `Tensor` or `SparseTensor` with shape
[D1, ... DN, num_labels], where N >= 1 and num_labels is the number of
target classes for the associated prediction. Commonly, N=1 and `labels`
has shape [batch_size, num_labels]. [D1, ... DN] must match
`predictions_idx`.
predictions_idx: `int64` `Tensor` of class IDs, with shape [D1, ... DN, k]
where N >= 1. Commonly, N=1 and `predictions_idx` has shape
[batch size, k].
selected_id: Int id to select.
Returns:
Tuple of `labels` and `predictions_idx`, possibly with classes removed.
"""
if selected_id is None:
return labels, predictions_idx
return (_select_class_id(labels, selected_id),
_select_class_id(predictions_idx, selected_id))
def _sparse_true_positive_at_k(labels,
predictions_idx,
class_id=None,
weights=None,
name=None):
"""Calculates true positives for recall@k and precision@k.
If `class_id` is specified, calculate binary true positives for `class_id`
only.
If `class_id` is not specified, calculate metrics for `k` predicted vs
`n` label classes, where `n` is the 2nd dimension of `labels_sparse`.
Args:
labels: `int64` `Tensor` or `SparseTensor` with shape
[D1, ... DN, num_labels], where N >= 1 and num_labels is the number of
target classes for the associated prediction. Commonly, N=1 and `labels`
has shape [batch_size, num_labels]. [D1, ... DN] must match
`predictions_idx`.
predictions_idx: 1-D or higher `int64` `Tensor` with last dimension `k`,
top `k` predicted classes. For rank `n`, the first `n-1` dimensions must
match `labels`.
class_id: Class for which we want binary metrics.
weights: `Tensor` whose rank is either 0, or n-1, where n is the rank of
`labels`. If the latter, it must be broadcastable to `labels` (i.e., all
dimensions must be either `1`, or the same as the corresponding `labels`
dimension).
name: Name of operation.
Returns:
A [D1, ... DN] `Tensor` of true positive counts.
"""
with ops.name_scope(name, 'true_positives',
(predictions_idx, labels, weights)):
labels, predictions_idx = _maybe_select_class_id(labels, predictions_idx,
class_id)
tp = sets.set_size(sets.set_intersection(predictions_idx, labels))
tp = math_ops.to_double(tp)
if weights is not None:
with ops.control_dependencies((weights_broadcast_ops.assert_broadcastable(
weights, tp),)):
weights = math_ops.to_double(weights)
tp = math_ops.multiply(tp, weights)
return tp
def _streaming_sparse_true_positive_at_k(labels,
predictions_idx,
k=None,
class_id=None,
weights=None,
name=None):
"""Calculates weighted per step true positives for recall@k and precision@k.
If `class_id` is specified, calculate binary true positives for `class_id`
only.
If `class_id` is not specified, calculate metrics for `k` predicted vs
`n` label classes, where `n` is the 2nd dimension of `labels`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: `int64` `Tensor` or `SparseTensor` with shape
[D1, ... DN, num_labels], where N >= 1 and num_labels is the number of
target classes for the associated prediction. Commonly, N=1 and `labels`
has shape [batch_size, num_labels]. [D1, ... DN] must match
`predictions_idx`.
predictions_idx: 1-D or higher `int64` `Tensor` with last dimension `k`,
top `k` predicted classes. For rank `n`, the first `n-1` dimensions must
match `labels`.
k: Integer, k for @k metric. This is only used for default op name.
class_id: Class for which we want binary metrics.
weights: `Tensor` whose rank is either 0, or n-1, where n is the rank of
`labels`. If the latter, it must be broadcastable to `labels` (i.e., all
dimensions must be either `1`, or the same as the corresponding `labels`
dimension).
name: Name of new variable, and namespace for other dependent ops.
Returns:
A tuple of `Variable` and update `Operation`.
Raises:
ValueError: If `weights` is not `None` and has an incompatible shape.
"""
with ops.name_scope(name, _at_k_name('true_positive', k, class_id=class_id),
(predictions_idx, labels, weights)) as scope:
tp = _sparse_true_positive_at_k(
predictions_idx=predictions_idx,
labels=labels,
class_id=class_id,
weights=weights)
batch_total_tp = math_ops.to_double(math_ops.reduce_sum(tp))
var = metric_variable([], dtypes.float64, name=scope)
return var, state_ops.assign_add(var, batch_total_tp, name='update')
def _sparse_false_negative_at_k(labels,
predictions_idx,
class_id=None,
weights=None):
"""Calculates false negatives for recall@k.
If `class_id` is specified, calculate binary true positives for `class_id`
only.
If `class_id` is not specified, calculate metrics for `k` predicted vs
`n` label classes, where `n` is the 2nd dimension of `labels_sparse`.
Args:
labels: `int64` `Tensor` or `SparseTensor` with shape
[D1, ... DN, num_labels], where N >= 1 and num_labels is the number of
target classes for the associated prediction. Commonly, N=1 and `labels`
has shape [batch_size, num_labels]. [D1, ... DN] must match
`predictions_idx`.
predictions_idx: 1-D or higher `int64` `Tensor` with last dimension `k`,
top `k` predicted classes. For rank `n`, the first `n-1` dimensions must
match `labels`.
class_id: Class for which we want binary metrics.
weights: `Tensor` whose rank is either 0, or n-1, where n is the rank of
`labels`. If the latter, it must be broadcastable to `labels` (i.e., all
dimensions must be either `1`, or the same as the corresponding `labels`
dimension).
Returns:
A [D1, ... DN] `Tensor` of false negative counts.
"""
with ops.name_scope(None, 'false_negatives',
(predictions_idx, labels, weights)):
labels, predictions_idx = _maybe_select_class_id(labels, predictions_idx,
class_id)
fn = sets.set_size(
sets.set_difference(predictions_idx, labels, aminusb=False))
fn = math_ops.to_double(fn)
if weights is not None:
with ops.control_dependencies((weights_broadcast_ops.assert_broadcastable(
weights, fn),)):
weights = math_ops.to_double(weights)
fn = math_ops.multiply(fn, weights)
return fn
def _streaming_sparse_false_negative_at_k(labels,
predictions_idx,
k,
class_id=None,
weights=None,
name=None):
"""Calculates weighted per step false negatives for recall@k.
If `class_id` is specified, calculate binary true positives for `class_id`
only.
If `class_id` is not specified, calculate metrics for `k` predicted vs
`n` label classes, where `n` is the 2nd dimension of `labels`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: `int64` `Tensor` or `SparseTensor` with shape
[D1, ... DN, num_labels], where N >= 1 and num_labels is the number of
target classes for the associated prediction. Commonly, N=1 and `labels`
has shape [batch_size, num_labels]. [D1, ... DN] must match
`predictions_idx`.
predictions_idx: 1-D or higher `int64` `Tensor` with last dimension `k`,
top `k` predicted classes. For rank `n`, the first `n-1` dimensions must
match `labels`.
k: Integer, k for @k metric. This is only used for default op name.
class_id: Class for which we want binary metrics.
weights: `Tensor` whose rank is either 0, or n-1, where n is the rank of
`labels`. If the latter, it must be broadcastable to `labels` (i.e., all
dimensions must be either `1`, or the same as the corresponding `labels`
dimension).
name: Name of new variable, and namespace for other dependent ops.
Returns:
A tuple of `Variable` and update `Operation`.
Raises:
ValueError: If `weights` is not `None` and has an incompatible shape.
"""
with ops.name_scope(name, _at_k_name('false_negative', k, class_id=class_id),
(predictions_idx, labels, weights)) as scope:
fn = _sparse_false_negative_at_k(
predictions_idx=predictions_idx,
labels=labels,
class_id=class_id,
weights=weights)
batch_total_fn = math_ops.to_double(math_ops.reduce_sum(fn))
var = metric_variable([], dtypes.float64, name=scope)
return var, state_ops.assign_add(var, batch_total_fn, name='update')
@tf_export('metrics.recall_at_k')
def recall_at_k(labels,
predictions,
k,
class_id=None,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes recall@k of the predictions with respect to sparse labels.
If `class_id` is specified, we calculate recall by considering only the
entries in the batch for which `class_id` is in the label, and computing
the fraction of them for which `class_id` is in the top-k `predictions`.
If `class_id` is not specified, we'll calculate recall as how often on
average a class among the labels of a batch entry is in the top-k
`predictions`.
`sparse_recall_at_k` creates two local variables,
`true_positive_at_<k>` and `false_negative_at_<k>`, that are used to compute
the recall_at_k frequency. This frequency is ultimately returned as
`recall_at_<k>`: an idempotent operation that simply divides
`true_positive_at_<k>` by total (`true_positive_at_<k>` +
`false_negative_at_<k>`).
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`recall_at_<k>`. Internally, a `top_k` operation computes a `Tensor`
indicating the top `k` `predictions`. Set operations applied to `top_k` and
`labels` calculate the true positives and false negatives weighted by
`weights`. Then `update_op` increments `true_positive_at_<k>` and
`false_negative_at_<k>` using these values.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: `int64` `Tensor` or `SparseTensor` with shape
[D1, ... DN, num_labels] or [D1, ... DN], where the latter implies
num_labels=1. N >= 1 and num_labels is the number of target classes for
the associated prediction. Commonly, N=1 and `labels` has shape
[batch_size, num_labels]. [D1, ... DN] must match `predictions`. Values
should be in range [0, num_classes), where num_classes is the last
dimension of `predictions`. Values outside this range always count
towards `false_negative_at_<k>`.
predictions: Float `Tensor` with shape [D1, ... DN, num_classes] where
N >= 1. Commonly, N=1 and predictions has shape [batch size, num_classes].
The final dimension contains the logit values for each class. [D1, ... DN]
must match `labels`.
k: Integer, k for @k metric.
class_id: Integer class ID for which we want binary metrics. This should be
in range [0, num_classes), where num_classes is the last dimension of
`predictions`. If class_id is outside this range, the method returns NAN.
weights: `Tensor` whose rank is either 0, or n-1, where n is the rank of
`labels`. If the latter, it must be broadcastable to `labels` (i.e., all
dimensions must be either `1`, or the same as the corresponding `labels`
dimension).
metrics_collections: An optional list of collections that values should
be added to.
updates_collections: An optional list of collections that updates should
be added to.
name: Name of new update operation, and namespace for other dependent ops.
Returns:
recall: Scalar `float64` `Tensor` with the value of `true_positives` divided
by the sum of `true_positives` and `false_negatives`.
update_op: `Operation` that increments `true_positives` and
`false_negatives` variables appropriately, and whose value matches
`recall`.
Raises:
ValueError: If `weights` is not `None` and its shape doesn't match
`predictions`, or if either `metrics_collections` or `updates_collections`
are not a list or tuple.
RuntimeError: If eager execution is enabled.
"""
if context.executing_eagerly():
raise RuntimeError('tf.metrics.recall_at_k is not '
'supported when eager execution is enabled.')
with ops.name_scope(name, _at_k_name('recall', k, class_id=class_id),
(predictions, labels, weights)) as scope:
_, top_k_idx = nn.top_k(predictions, k)
return recall_at_top_k(
labels=labels,
predictions_idx=top_k_idx,
k=k,
class_id=class_id,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=scope)
@tf_export('metrics.recall_at_top_k')
def recall_at_top_k(labels,
predictions_idx,
k=None,
class_id=None,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes recall@k of top-k predictions with respect to sparse labels.
Differs from `recall_at_k` in that predictions must be in the form of top `k`
class indices, whereas `recall_at_k` expects logits. Refer to `recall_at_k`
for more details.
Args:
labels: `int64` `Tensor` or `SparseTensor` with shape
[D1, ... DN, num_labels] or [D1, ... DN], where the latter implies
num_labels=1. N >= 1 and num_labels is the number of target classes for
the associated prediction. Commonly, N=1 and `labels` has shape
[batch_size, num_labels]. [D1, ... DN] must match `predictions`. Values
should be in range [0, num_classes), where num_classes is the last
dimension of `predictions`. Values outside this range always count
towards `false_negative_at_<k>`.
predictions_idx: Integer `Tensor` with shape [D1, ... DN, k] where N >= 1.
Commonly, N=1 and predictions has shape [batch size, k]. The final
dimension contains the top `k` predicted class indices. [D1, ... DN] must
match `labels`.
k: Integer, k for @k metric. Only used for the default op name.
class_id: Integer class ID for which we want binary metrics. This should be
in range [0, num_classes), where num_classes is the last dimension of
`predictions`. If class_id is outside this range, the method returns NAN.
weights: `Tensor` whose rank is either 0, or n-1, where n is the rank of
`labels`. If the latter, it must be broadcastable to `labels` (i.e., all
dimensions must be either `1`, or the same as the corresponding `labels`
dimension).
metrics_collections: An optional list of collections that values should
be added to.
updates_collections: An optional list of collections that updates should
be added to.
name: Name of new update operation, and namespace for other dependent ops.
Returns:
recall: Scalar `float64` `Tensor` with the value of `true_positives` divided
by the sum of `true_positives` and `false_negatives`.
update_op: `Operation` that increments `true_positives` and
`false_negatives` variables appropriately, and whose value matches
`recall`.
Raises:
ValueError: If `weights` is not `None` and its shape doesn't match
`predictions`, or if either `metrics_collections` or `updates_collections`
are not a list or tuple.
"""
with ops.name_scope(name, _at_k_name('recall', k, class_id=class_id),
(predictions_idx, labels, weights)) as scope:
labels = _maybe_expand_labels(labels, predictions_idx)
top_k_idx = math_ops.to_int64(predictions_idx)
tp, tp_update = _streaming_sparse_true_positive_at_k(
predictions_idx=top_k_idx,
labels=labels,
k=k,
class_id=class_id,
weights=weights)
fn, fn_update = _streaming_sparse_false_negative_at_k(
predictions_idx=top_k_idx,
labels=labels,
k=k,
class_id=class_id,
weights=weights)
def compute_recall(_, tp, fn):
return math_ops.div(tp, math_ops.add(tp, fn), name=scope)
metric = _aggregate_across_replicas(
metrics_collections, compute_recall, tp, fn)
update = math_ops.div(
tp_update, math_ops.add(tp_update, fn_update), name='update')
if updates_collections:
ops.add_to_collections(updates_collections, update)
return metric, update
@tf_export('metrics.recall_at_thresholds')
def recall_at_thresholds(labels,
predictions,
thresholds,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes various recall values for different `thresholds` on `predictions`.
The `recall_at_thresholds` function creates four local variables,
`true_positives`, `true_negatives`, `false_positives` and `false_negatives`
for various values of thresholds. `recall[i]` is defined as the total weight
of values in `predictions` above `thresholds[i]` whose corresponding entry in
`labels` is `True`, divided by the total weight of `True` values in `labels`
(`true_positives[i] / (true_positives[i] + false_negatives[i])`).
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the `recall`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: The ground truth values, a `Tensor` whose dimensions must match
`predictions`. Will be cast to `bool`.
predictions: A floating point `Tensor` of arbitrary shape and whose values
are in the range `[0, 1]`.
thresholds: A python list or tuple of float thresholds in `[0, 1]`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that `recall` should be
added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
recall: A float `Tensor` of shape `[len(thresholds)]`.
update_op: An operation that increments the `true_positives`,
`true_negatives`, `false_positives` and `false_negatives` variables that
are used in the computation of `recall`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
RuntimeError: If eager execution is enabled.
"""
if context.executing_eagerly():
raise RuntimeError('tf.metrics.recall_at_thresholds is not '
'supported when eager execution is enabled.')
with variable_scope.variable_scope(name, 'recall_at_thresholds',
(predictions, labels, weights)):
values, update_ops = _confusion_matrix_at_thresholds(
labels, predictions, thresholds, weights, includes=('tp', 'fn'))
# Avoid division by zero.
epsilon = 1e-7
def compute_recall(tp, fn, name):
return math_ops.div(tp, epsilon + tp + fn, name='recall_' + name)
def recall_across_replicas(_, values):
return compute_recall(values['tp'], values['fn'], 'value')
rec = _aggregate_across_replicas(
metrics_collections, recall_across_replicas, values)
update_op = compute_recall(update_ops['tp'], update_ops['fn'], 'update_op')
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return rec, update_op
@tf_export('metrics.root_mean_squared_error')
def root_mean_squared_error(labels,
predictions,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the root mean squared error between the labels and predictions.
The `root_mean_squared_error` function creates two local variables,
`total` and `count` that are used to compute the root mean squared error.
This average is weighted by `weights`, and it is ultimately returned as
`root_mean_squared_error`: an idempotent operation that takes the square root
of the division of `total` by `count`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`root_mean_squared_error`. Internally, a `squared_error` operation computes
the element-wise square of the difference between `predictions` and `labels`.
Then `update_op` increments `total` with the reduced sum of the product of
`weights` and `squared_error`, and it increments `count` with the reduced sum
of `weights`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: A `Tensor` of the same shape as `predictions`.
predictions: A `Tensor` of arbitrary shape.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that
`root_mean_squared_error` should be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
root_mean_squared_error: A `Tensor` representing the current mean, the value
of `total` divided by `count`.
update_op: An operation that increments the `total` and `count` variables
appropriately and whose value matches `root_mean_squared_error`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
RuntimeError: If eager execution is enabled.
"""
if context.executing_eagerly():
raise RuntimeError('tf.metrics.root_mean_squared_error is not '
'supported when eager execution is enabled.')
predictions, labels, weights = _remove_squeezable_dimensions(
predictions=predictions, labels=labels, weights=weights)
mse, update_mse_op = mean_squared_error(labels, predictions, weights, None,
None, name or
'root_mean_squared_error')
once_across_replicas = lambda _, mse: math_ops.sqrt(mse)
rmse = _aggregate_across_replicas(
metrics_collections, once_across_replicas, mse)
update_rmse_op = math_ops.sqrt(update_mse_op)
if updates_collections:
ops.add_to_collections(updates_collections, update_rmse_op)
return rmse, update_rmse_op
@tf_export('metrics.sensitivity_at_specificity')
def sensitivity_at_specificity(labels,
predictions,
specificity,
weights=None,
num_thresholds=200,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the specificity at a given sensitivity.
The `sensitivity_at_specificity` function creates four local
variables, `true_positives`, `true_negatives`, `false_positives` and
`false_negatives` that are used to compute the sensitivity at the given
specificity value. The threshold for the given specificity value is computed
and used to evaluate the corresponding sensitivity.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`sensitivity`. `update_op` increments the `true_positives`, `true_negatives`,
`false_positives` and `false_negatives` counts with the weight of each case
found in the `predictions` and `labels`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
For additional information about specificity and sensitivity, see the
following: https://en.wikipedia.org/wiki/Sensitivity_and_specificity
Args:
labels: The ground truth values, a `Tensor` whose dimensions must match
`predictions`. Will be cast to `bool`.
predictions: A floating point `Tensor` of arbitrary shape and whose values
are in the range `[0, 1]`.
specificity: A scalar value in range `[0, 1]`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
num_thresholds: The number of thresholds to use for matching the given
specificity.
metrics_collections: An optional list of collections that `sensitivity`
should be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
sensitivity: A scalar `Tensor` representing the sensitivity at the given
`specificity` value.
update_op: An operation that increments the `true_positives`,
`true_negatives`, `false_positives` and `false_negatives` variables
appropriately and whose value matches `sensitivity`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, if
`weights` is not `None` and its shape doesn't match `predictions`, or if
`specificity` is not between 0 and 1, or if either `metrics_collections`
or `updates_collections` are not a list or tuple.
RuntimeError: If eager execution is enabled.
"""
if context.executing_eagerly():
raise RuntimeError('tf.metrics.sensitivity_at_specificity is not '
'supported when eager execution is enabled.')
if specificity < 0 or specificity > 1:
raise ValueError('`specificity` must be in the range [0, 1].')
with variable_scope.variable_scope(name, 'sensitivity_at_specificity',
(predictions, labels, weights)):
kepsilon = 1e-7 # to account for floating point imprecisions
thresholds = [
(i + 1) * 1.0 / (num_thresholds - 1) for i in range(num_thresholds - 2)
]
thresholds = [0.0 - kepsilon] + thresholds + [1.0 + kepsilon]
values, update_ops = _confusion_matrix_at_thresholds(
labels, predictions, thresholds, weights)
def compute_sensitivity_at_specificity(tp, tn, fp, fn, name):
specificities = math_ops.div(tn, tn + fp + kepsilon)
tf_index = math_ops.argmin(math_ops.abs(specificities - specificity), 0)
tf_index = math_ops.cast(tf_index, dtypes.int32)
# Now, we have the implicit threshold, so compute the sensitivity:
return math_ops.div(tp[tf_index], tp[tf_index] + fn[tf_index] + kepsilon,
name)
def sensitivity_across_replicas(_, values):
return compute_sensitivity_at_specificity(
values['tp'], values['tn'], values['fp'], values['fn'], 'value')
sensitivity = _aggregate_across_replicas(
metrics_collections, sensitivity_across_replicas, values)
update_op = compute_sensitivity_at_specificity(
update_ops['tp'], update_ops['tn'], update_ops['fp'], update_ops['fn'],
'update_op')
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return sensitivity, update_op
def _expand_and_tile(tensor, multiple, dim=0, name=None):
"""Slice `tensor` shape in 2, then tile along the sliced dimension.
A new dimension is inserted in shape of `tensor` before `dim`, then values are
tiled `multiple` times along the new dimension.
Args:
tensor: Input `Tensor` or `SparseTensor`.
multiple: Integer, number of times to tile.
dim: Integer, dimension along which to tile.
name: Name of operation.
Returns:
`Tensor` result of expanding and tiling `tensor`.
Raises:
ValueError: if `multiple` is less than 1, or `dim` is not in
`[-rank(tensor), rank(tensor)]`.
"""
if multiple < 1:
raise ValueError('Invalid multiple %s, must be > 0.' % multiple)
with ops.name_scope(name, 'expand_and_tile',
(tensor, multiple, dim)) as scope:
# Sparse.
tensor = sparse_tensor.convert_to_tensor_or_sparse_tensor(tensor)
if isinstance(tensor, sparse_tensor.SparseTensor):
if dim < 0:
expand_dims = array_ops.reshape(
array_ops.size(tensor.dense_shape) + dim, [1])
else:
expand_dims = [dim]
expanded_shape = array_ops.concat(
(array_ops.slice(tensor.dense_shape, [0], expand_dims), [1],
array_ops.slice(tensor.dense_shape, expand_dims, [-1])),
0,
name='expanded_shape')
expanded = sparse_ops.sparse_reshape(
tensor, shape=expanded_shape, name='expand')
if multiple == 1:
return expanded
return sparse_ops.sparse_concat(
dim - 1 if dim < 0 else dim, [expanded] * multiple, name=scope)
# Dense.
expanded = array_ops.expand_dims(
tensor, dim if (dim >= 0) else (dim - 1), name='expand')
if multiple == 1:
return expanded
ones = array_ops.ones_like(array_ops.shape(tensor))
tile_multiples = array_ops.concat(
(ones[:dim], (multiple,), ones[dim:]), 0, name='multiples')
return array_ops.tile(expanded, tile_multiples, name=scope)
def _num_relevant(labels, k):
"""Computes number of relevant values for each row in labels.
For labels with shape [D1, ... DN, num_labels], this is the minimum of
`num_labels` and `k`.
Args:
labels: `int64` `Tensor` or `SparseTensor` with shape
[D1, ... DN, num_labels], where N >= 1 and num_labels is the number of
target classes for the associated prediction. Commonly, N=1 and `labels`
has shape [batch_size, num_labels].
k: Integer, k for @k metric.
Returns:
Integer `Tensor` of shape [D1, ... DN], where each value is the number of
relevant values for that row.
Raises:
ValueError: if inputs have invalid dtypes or values.
"""
if k < 1:
raise ValueError('Invalid k=%s.' % k)
with ops.name_scope(None, 'num_relevant', (labels,)) as scope:
# For SparseTensor, calculate separate count for each row.
labels = sparse_tensor.convert_to_tensor_or_sparse_tensor(labels)
if isinstance(labels, sparse_tensor.SparseTensor):
return math_ops.minimum(sets.set_size(labels), k, name=scope)
# For dense Tensor, calculate scalar count based on last dimension, and
# tile across labels shape.
labels_shape = array_ops.shape(labels)
labels_size = labels_shape[-1]
num_relevant_scalar = math_ops.minimum(labels_size, k)
return array_ops.fill(labels_shape[0:-1], num_relevant_scalar, name=scope)
def _sparse_average_precision_at_top_k(labels, predictions_idx):
"""Computes average precision@k of predictions with respect to sparse labels.
From en.wikipedia.org/wiki/Information_retrieval#Average_precision, formula
for each row is:
AveP = sum_{i=1...k} P_{i} * rel_{i} / num_relevant_items
A "row" is the elements in dimension [D1, ... DN] of `predictions_idx`,
`labels`, and the result `Tensors`. In the common case, this is [batch_size].
Each row of the results contains the average precision for that row.
Args:
labels: `int64` `Tensor` or `SparseTensor` with shape
[D1, ... DN, num_labels] or [D1, ... DN], where the latter implies
num_labels=1. N >= 1 and num_labels is the number of target classes for
the associated prediction. Commonly, N=1 and `labels` has shape
[batch_size, num_labels]. [D1, ... DN] must match `predictions_idx`.
Values should be in range [0, num_classes).
predictions_idx: Integer `Tensor` with shape [D1, ... DN, k] where N >= 1.
Commonly, N=1 and `predictions_idx` has shape [batch size, k]. The final
dimension must be set and contains the top `k` predicted class indices.
[D1, ... DN] must match `labels`. Values should be in range
[0, num_classes).
Returns:
`float64` `Tensor` of shape [D1, ... DN], where each value is the average
precision for that row.
Raises:
ValueError: if the last dimension of predictions_idx is not set.
"""
with ops.name_scope(None, 'average_precision',
(predictions_idx, labels)) as scope:
predictions_idx = math_ops.to_int64(predictions_idx, name='predictions_idx')
if predictions_idx.get_shape().ndims == 0:
raise ValueError('The rank of predictions_idx must be at least 1.')
k = predictions_idx.get_shape().as_list()[-1]
if k is None:
raise ValueError('The last dimension of predictions_idx must be set.')
labels = _maybe_expand_labels(labels, predictions_idx)
# Expand dims to produce [D1, ... DN, k, 1] tensor. This gives us a separate
# prediction for each k, so we can calculate separate true positive values
# for each k.
predictions_idx_per_k = array_ops.expand_dims(
predictions_idx, -1, name='predictions_idx_per_k')
# Replicate labels k times to produce [D1, ... DN, k, num_labels] tensor.
labels_per_k = _expand_and_tile(
labels, multiple=k, dim=-1, name='labels_per_k')
# The following tensors are all of shape [D1, ... DN, k], containing values
# per row, per k value.
# `relevant_per_k` (int32) - Relevance indicator, 1 if the prediction at
# that k value is correct, 0 otherwise. This is the "rel_{i}" term from
# the formula above.
# `tp_per_k` (int32) - True positive counts.
# `retrieved_per_k` (int32) - Number of predicted values at each k. This is
# the precision denominator.
# `precision_per_k` (float64) - Precision at each k. This is the "P_{i}"
# term from the formula above.
# `relevant_precision_per_k` (float64) - Relevant precisions; i.e.,
# precisions at all k for which relevance indicator is true.
relevant_per_k = _sparse_true_positive_at_k(
labels_per_k, predictions_idx_per_k, name='relevant_per_k')
tp_per_k = math_ops.cumsum(relevant_per_k, axis=-1, name='tp_per_k')
retrieved_per_k = math_ops.cumsum(
array_ops.ones_like(relevant_per_k), axis=-1, name='retrieved_per_k')
precision_per_k = math_ops.div(
math_ops.to_double(tp_per_k),
math_ops.to_double(retrieved_per_k),
name='precision_per_k')
relevant_precision_per_k = math_ops.multiply(
precision_per_k,
math_ops.to_double(relevant_per_k),
name='relevant_precision_per_k')
# Reduce along k dimension to get the sum, yielding a [D1, ... DN] tensor.
precision_sum = math_ops.reduce_sum(
relevant_precision_per_k, reduction_indices=(-1,), name='precision_sum')
# Divide by number of relevant items to get average precision. These are
# the "num_relevant_items" and "AveP" terms from the formula above.
num_relevant_items = math_ops.to_double(_num_relevant(labels, k))
return math_ops.div(precision_sum, num_relevant_items, name=scope)
def _streaming_sparse_average_precision_at_top_k(labels,
predictions_idx,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes average precision@k of predictions with respect to sparse labels.
`sparse_average_precision_at_top_k` creates two local variables,
`average_precision_at_<k>/total` and `average_precision_at_<k>/max`, that
are used to compute the frequency. This frequency is ultimately returned as
`average_precision_at_<k>`: an idempotent operation that simply divides
`average_precision_at_<k>/total` by `average_precision_at_<k>/max`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`precision_at_<k>`. Set operations applied to `top_k` and `labels` calculate
the true positives and false positives weighted by `weights`. Then `update_op`
increments `true_positive_at_<k>` and `false_positive_at_<k>` using these
values.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: `int64` `Tensor` or `SparseTensor` with shape
[D1, ... DN, num_labels] or [D1, ... DN], where the latter implies
num_labels=1. N >= 1 and num_labels is the number of target classes for
the associated prediction. Commonly, N=1 and `labels` has shape
[batch_size, num_labels]. [D1, ... DN] must match `predictions_idx`.
Values should be in range [0, num_classes).
predictions_idx: Integer `Tensor` with shape [D1, ... DN, k] where N >= 1.
Commonly, N=1 and `predictions_idx` has shape [batch size, k]. The final
dimension contains the top `k` predicted class indices. [D1, ... DN] must
match `labels`. Values should be in range [0, num_classes).
weights: `Tensor` whose rank is either 0, or n-1, where n is the rank of
`labels`. If the latter, it must be broadcastable to `labels` (i.e., all
dimensions must be either `1`, or the same as the corresponding `labels`
dimension).
metrics_collections: An optional list of collections that values should
be added to.
updates_collections: An optional list of collections that updates should
be added to.
name: Name of new update operation, and namespace for other dependent ops.
Returns:
mean_average_precision: Scalar `float64` `Tensor` with the mean average
precision values.
update: `Operation` that increments variables appropriately, and whose
value matches `metric`.
"""
with ops.name_scope(name, 'average_precision_at_top_k',
(predictions_idx, labels, weights)) as scope:
# Calculate per-example average precision, and apply weights.
average_precision = _sparse_average_precision_at_top_k(
predictions_idx=predictions_idx, labels=labels)
if weights is not None:
weights = weights_broadcast_ops.broadcast_weights(
math_ops.to_double(weights), average_precision)
average_precision = math_ops.multiply(average_precision, weights)
# Create accumulation variables and update ops for max average precision and
# total average precision.
with ops.name_scope(None, 'max', (average_precision,)) as max_scope:
# `max` is the max possible precision. Since max for any row is 1.0:
# - For the unweighted case, this is just the number of rows.
# - For the weighted case, it's the sum of the weights broadcast across
# `average_precision` rows.
max_var = metric_variable([], dtypes.float64, name=max_scope)
if weights is None:
batch_max = math_ops.to_double(
array_ops.size(average_precision, name='batch_max'))
else:
batch_max = math_ops.reduce_sum(weights, name='batch_max')
max_update = state_ops.assign_add(max_var, batch_max, name='update')
with ops.name_scope(None, 'total', (average_precision,)) as total_scope:
total_var = metric_variable([], dtypes.float64, name=total_scope)
batch_total = math_ops.reduce_sum(average_precision, name='batch_total')
total_update = state_ops.assign_add(total_var, batch_total, name='update')
# Divide total by max to get mean, for both vars and the update ops.
def precision_across_replicas(_, total_var, max_var):
return _safe_scalar_div(total_var, max_var, name='mean')
mean_average_precision = _aggregate_across_replicas(
metrics_collections, precision_across_replicas, total_var, max_var)
update = _safe_scalar_div(total_update, max_update, name=scope)
if updates_collections:
ops.add_to_collections(updates_collections, update)
return mean_average_precision, update
@tf_export('metrics.sparse_average_precision_at_k')
@deprecated(None, 'Use average_precision_at_k instead')
def sparse_average_precision_at_k(labels,
predictions,
k,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Renamed to `average_precision_at_k`, please use that method instead."""
return average_precision_at_k(
labels=labels,
predictions=predictions,
k=k,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
@tf_export('metrics.average_precision_at_k')
def average_precision_at_k(labels,
predictions,
k,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes average precision@k of predictions with respect to sparse labels.
`average_precision_at_k` creates two local variables,
`average_precision_at_<k>/total` and `average_precision_at_<k>/max`, that
are used to compute the frequency. This frequency is ultimately returned as
`average_precision_at_<k>`: an idempotent operation that simply divides
`average_precision_at_<k>/total` by `average_precision_at_<k>/max`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`precision_at_<k>`. Internally, a `top_k` operation computes a `Tensor`
indicating the top `k` `predictions`. Set operations applied to `top_k` and
`labels` calculate the true positives and false positives weighted by
`weights`. Then `update_op` increments `true_positive_at_<k>` and
`false_positive_at_<k>` using these values.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: `int64` `Tensor` or `SparseTensor` with shape
[D1, ... DN, num_labels] or [D1, ... DN], where the latter implies
num_labels=1. N >= 1 and num_labels is the number of target classes for
the associated prediction. Commonly, N=1 and `labels` has shape
[batch_size, num_labels]. [D1, ... DN] must match `predictions`. Values
should be in range [0, num_classes), where num_classes is the last
dimension of `predictions`. Values outside this range are ignored.
predictions: Float `Tensor` with shape [D1, ... DN, num_classes] where
N >= 1. Commonly, N=1 and `predictions` has shape
[batch size, num_classes]. The final dimension contains the logit values
for each class. [D1, ... DN] must match `labels`.
k: Integer, k for @k metric. This will calculate an average precision for
range `[1,k]`, as documented above.
weights: `Tensor` whose rank is either 0, or n-1, where n is the rank of
`labels`. If the latter, it must be broadcastable to `labels` (i.e., all
dimensions must be either `1`, or the same as the corresponding `labels`
dimension).
metrics_collections: An optional list of collections that values should
be added to.
updates_collections: An optional list of collections that updates should
be added to.
name: Name of new update operation, and namespace for other dependent ops.
Returns:
mean_average_precision: Scalar `float64` `Tensor` with the mean average
precision values.
update: `Operation` that increments variables appropriately, and whose
value matches `metric`.
Raises:
ValueError: if k is invalid.
RuntimeError: If eager execution is enabled.
"""
if context.executing_eagerly():
raise RuntimeError('tf.metrics.sparse_average_precision_at_k is not '
'supported when eager execution is enabled.')
if k < 1:
raise ValueError('Invalid k=%s.' % k)
with ops.name_scope(name, _at_k_name('average_precision', k),
(predictions, labels, weights)) as scope:
# Calculate top k indices to produce [D1, ... DN, k] tensor.
_, predictions_idx = nn.top_k(predictions, k)
return _streaming_sparse_average_precision_at_top_k(
labels=labels,
predictions_idx=predictions_idx,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=scope)
def _sparse_false_positive_at_k(labels,
predictions_idx,
class_id=None,
weights=None):
"""Calculates false positives for precision@k.
If `class_id` is specified, calculate binary true positives for `class_id`
only.
If `class_id` is not specified, calculate metrics for `k` predicted vs
`n` label classes, where `n` is the 2nd dimension of `labels_sparse`.
Args:
labels: `int64` `Tensor` or `SparseTensor` with shape
[D1, ... DN, num_labels], where N >= 1 and num_labels is the number of
target classes for the associated prediction. Commonly, N=1 and `labels`
has shape [batch_size, num_labels]. [D1, ... DN] must match
`predictions_idx`.
predictions_idx: 1-D or higher `int64` `Tensor` with last dimension `k`,
top `k` predicted classes. For rank `n`, the first `n-1` dimensions must
match `labels`.
class_id: Class for which we want binary metrics.
weights: `Tensor` whose rank is either 0, or n-1, where n is the rank of
`labels`. If the latter, it must be broadcastable to `labels` (i.e., all
dimensions must be either `1`, or the same as the corresponding `labels`
dimension).
Returns:
A [D1, ... DN] `Tensor` of false positive counts.
"""
with ops.name_scope(None, 'false_positives',
(predictions_idx, labels, weights)):
labels, predictions_idx = _maybe_select_class_id(labels, predictions_idx,
class_id)
fp = sets.set_size(
sets.set_difference(predictions_idx, labels, aminusb=True))
fp = math_ops.to_double(fp)
if weights is not None:
with ops.control_dependencies((weights_broadcast_ops.assert_broadcastable(
weights, fp),)):
weights = math_ops.to_double(weights)
fp = math_ops.multiply(fp, weights)
return fp
def _streaming_sparse_false_positive_at_k(labels,
predictions_idx,
k=None,
class_id=None,
weights=None,
name=None):
"""Calculates weighted per step false positives for precision@k.
If `class_id` is specified, calculate binary true positives for `class_id`
only.
If `class_id` is not specified, calculate metrics for `k` predicted vs
`n` label classes, where `n` is the 2nd dimension of `labels`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: `int64` `Tensor` or `SparseTensor` with shape
[D1, ... DN, num_labels], where N >= 1 and num_labels is the number of
target classes for the associated prediction. Commonly, N=1 and `labels`
has shape [batch_size, num_labels]. [D1, ... DN] must match
`predictions_idx`.
predictions_idx: 1-D or higher `int64` `Tensor` with last dimension `k`,
top `k` predicted classes. For rank `n`, the first `n-1` dimensions must
match `labels`.
k: Integer, k for @k metric. This is only used for default op name.
class_id: Class for which we want binary metrics.
weights: `Tensor` whose rank is either 0, or n-1, where n is the rank of
`labels`. If the latter, it must be broadcastable to `labels` (i.e., all
dimensions must be either `1`, or the same as the corresponding `labels`
dimension).
name: Name of new variable, and namespace for other dependent ops.
Returns:
A tuple of `Variable` and update `Operation`.
Raises:
ValueError: If `weights` is not `None` and has an incompatible shape.
"""
with ops.name_scope(name, _at_k_name('false_positive', k, class_id=class_id),
(predictions_idx, labels, weights)) as scope:
fp = _sparse_false_positive_at_k(
predictions_idx=predictions_idx,
labels=labels,
class_id=class_id,
weights=weights)
batch_total_fp = math_ops.to_double(math_ops.reduce_sum(fp))
var = metric_variable([], dtypes.float64, name=scope)
return var, state_ops.assign_add(var, batch_total_fp, name='update')
@tf_export('metrics.precision_at_top_k')
def precision_at_top_k(labels,
predictions_idx,
k=None,
class_id=None,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes precision@k of the predictions with respect to sparse labels.
Differs from `sparse_precision_at_k` in that predictions must be in the form
of top `k` class indices, whereas `sparse_precision_at_k` expects logits.
Refer to `sparse_precision_at_k` for more details.
Args:
labels: `int64` `Tensor` or `SparseTensor` with shape
[D1, ... DN, num_labels] or [D1, ... DN], where the latter implies
num_labels=1. N >= 1 and num_labels is the number of target classes for
the associated prediction. Commonly, N=1 and `labels` has shape
[batch_size, num_labels]. [D1, ... DN] must match `predictions`. Values
should be in range [0, num_classes), where num_classes is the last
dimension of `predictions`. Values outside this range are ignored.
predictions_idx: Integer `Tensor` with shape [D1, ... DN, k] where
N >= 1. Commonly, N=1 and predictions has shape [batch size, k].
The final dimension contains the top `k` predicted class indices.
[D1, ... DN] must match `labels`.
k: Integer, k for @k metric. Only used for the default op name.
class_id: Integer class ID for which we want binary metrics. This should be
in range [0, num_classes], where num_classes is the last dimension of
`predictions`. If `class_id` is outside this range, the method returns
NAN.
weights: `Tensor` whose rank is either 0, or n-1, where n is the rank of
`labels`. If the latter, it must be broadcastable to `labels` (i.e., all
dimensions must be either `1`, or the same as the corresponding `labels`
dimension).
metrics_collections: An optional list of collections that values should
be added to.
updates_collections: An optional list of collections that updates should
be added to.
name: Name of new update operation, and namespace for other dependent ops.
Returns:
precision: Scalar `float64` `Tensor` with the value of `true_positives`
divided by the sum of `true_positives` and `false_positives`.
update_op: `Operation` that increments `true_positives` and
`false_positives` variables appropriately, and whose value matches
`precision`.
Raises:
ValueError: If `weights` is not `None` and its shape doesn't match
`predictions`, or if either `metrics_collections` or `updates_collections`
are not a list or tuple.
RuntimeError: If eager execution is enabled.
"""
if context.executing_eagerly():
raise RuntimeError('tf.metrics.precision_at_top_k is not '
'supported when eager execution is enabled.')
with ops.name_scope(name, _at_k_name('precision', k, class_id=class_id),
(predictions_idx, labels, weights)) as scope:
labels = _maybe_expand_labels(labels, predictions_idx)
top_k_idx = math_ops.to_int64(predictions_idx)
tp, tp_update = _streaming_sparse_true_positive_at_k(
predictions_idx=top_k_idx,
labels=labels,
k=k,
class_id=class_id,
weights=weights)
fp, fp_update = _streaming_sparse_false_positive_at_k(
predictions_idx=top_k_idx,
labels=labels,
k=k,
class_id=class_id,
weights=weights)
def precision_across_replicas(_, tp, fp):
return math_ops.div(tp, math_ops.add(tp, fp), name=scope)
metric = _aggregate_across_replicas(
metrics_collections, precision_across_replicas, tp, fp)
update = math_ops.div(
tp_update, math_ops.add(tp_update, fp_update), name='update')
if updates_collections:
ops.add_to_collections(updates_collections, update)
return metric, update
@tf_export('metrics.sparse_precision_at_k')
@deprecated(None, 'Use precision_at_k instead')
def sparse_precision_at_k(labels,
predictions,
k,
class_id=None,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Renamed to `precision_at_k`, please use that method instead."""
return precision_at_k(
labels=labels,
predictions=predictions,
k=k,
class_id=class_id,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
@tf_export('metrics.precision_at_k')
def precision_at_k(labels,
predictions,
k,
class_id=None,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes precision@k of the predictions with respect to sparse labels.
If `class_id` is specified, we calculate precision by considering only the
entries in the batch for which `class_id` is in the top-k highest
`predictions`, and computing the fraction of them for which `class_id` is
indeed a correct label.
If `class_id` is not specified, we'll calculate precision as how often on
average a class among the top-k classes with the highest predicted values
of a batch entry is correct and can be found in the label for that entry.
`precision_at_k` creates two local variables,
`true_positive_at_<k>` and `false_positive_at_<k>`, that are used to compute
the precision@k frequency. This frequency is ultimately returned as
`precision_at_<k>`: an idempotent operation that simply divides
`true_positive_at_<k>` by total (`true_positive_at_<k>` +
`false_positive_at_<k>`).
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`precision_at_<k>`. Internally, a `top_k` operation computes a `Tensor`
indicating the top `k` `predictions`. Set operations applied to `top_k` and
`labels` calculate the true positives and false positives weighted by
`weights`. Then `update_op` increments `true_positive_at_<k>` and
`false_positive_at_<k>` using these values.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: `int64` `Tensor` or `SparseTensor` with shape
[D1, ... DN, num_labels] or [D1, ... DN], where the latter implies
num_labels=1. N >= 1 and num_labels is the number of target classes for
the associated prediction. Commonly, N=1 and `labels` has shape
[batch_size, num_labels]. [D1, ... DN] must match `predictions`. Values
should be in range [0, num_classes), where num_classes is the last
dimension of `predictions`. Values outside this range are ignored.
predictions: Float `Tensor` with shape [D1, ... DN, num_classes] where
N >= 1. Commonly, N=1 and predictions has shape [batch size, num_classes].
The final dimension contains the logit values for each class. [D1, ... DN]
must match `labels`.
k: Integer, k for @k metric.
class_id: Integer class ID for which we want binary metrics. This should be
in range [0, num_classes], where num_classes is the last dimension of
`predictions`. If `class_id` is outside this range, the method returns
NAN.
weights: `Tensor` whose rank is either 0, or n-1, where n is the rank of
`labels`. If the latter, it must be broadcastable to `labels` (i.e., all
dimensions must be either `1`, or the same as the corresponding `labels`
dimension).
metrics_collections: An optional list of collections that values should
be added to.
updates_collections: An optional list of collections that updates should
be added to.
name: Name of new update operation, and namespace for other dependent ops.
Returns:
precision: Scalar `float64` `Tensor` with the value of `true_positives`
divided by the sum of `true_positives` and `false_positives`.
update_op: `Operation` that increments `true_positives` and
`false_positives` variables appropriately, and whose value matches
`precision`.
Raises:
ValueError: If `weights` is not `None` and its shape doesn't match
`predictions`, or if either `metrics_collections` or `updates_collections`
are not a list or tuple.
RuntimeError: If eager execution is enabled.
"""
if context.executing_eagerly():
raise RuntimeError('tf.metrics.sparse_precision_at_k is not '
'supported when eager execution is enabled.')
with ops.name_scope(name, _at_k_name('precision', k, class_id=class_id),
(predictions, labels, weights)) as scope:
_, top_k_idx = nn.top_k(predictions, k)
return precision_at_top_k(
labels=labels,
predictions_idx=top_k_idx,
k=k,
class_id=class_id,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=scope)
@tf_export('metrics.specificity_at_sensitivity')
def specificity_at_sensitivity(labels,
predictions,
sensitivity,
weights=None,
num_thresholds=200,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the specificity at a given sensitivity.
The `specificity_at_sensitivity` function creates four local
variables, `true_positives`, `true_negatives`, `false_positives` and
`false_negatives` that are used to compute the specificity at the given
sensitivity value. The threshold for the given sensitivity value is computed
and used to evaluate the corresponding specificity.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`specificity`. `update_op` increments the `true_positives`, `true_negatives`,
`false_positives` and `false_negatives` counts with the weight of each case
found in the `predictions` and `labels`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
For additional information about specificity and sensitivity, see the
following: https://en.wikipedia.org/wiki/Sensitivity_and_specificity
Args:
labels: The ground truth values, a `Tensor` whose dimensions must match
`predictions`. Will be cast to `bool`.
predictions: A floating point `Tensor` of arbitrary shape and whose values
are in the range `[0, 1]`.
sensitivity: A scalar value in range `[0, 1]`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
num_thresholds: The number of thresholds to use for matching the given
sensitivity.
metrics_collections: An optional list of collections that `specificity`
should be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
specificity: A scalar `Tensor` representing the specificity at the given
`specificity` value.
update_op: An operation that increments the `true_positives`,
`true_negatives`, `false_positives` and `false_negatives` variables
appropriately and whose value matches `specificity`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, if
`weights` is not `None` and its shape doesn't match `predictions`, or if
`sensitivity` is not between 0 and 1, or if either `metrics_collections`
or `updates_collections` are not a list or tuple.
RuntimeError: If eager execution is enabled.
"""
if context.executing_eagerly():
raise RuntimeError('tf.metrics.specificity_at_sensitivity is not '
'supported when eager execution is enabled.')
if sensitivity < 0 or sensitivity > 1:
raise ValueError('`sensitivity` must be in the range [0, 1].')
with variable_scope.variable_scope(name, 'specificity_at_sensitivity',
(predictions, labels, weights)):
kepsilon = 1e-7 # to account for floating point imprecisions
thresholds = [
(i + 1) * 1.0 / (num_thresholds - 1) for i in range(num_thresholds - 2)
]
thresholds = [0.0 - kepsilon] + thresholds + [1.0 - kepsilon]
values, update_ops = _confusion_matrix_at_thresholds(
labels, predictions, thresholds, weights)
def compute_specificity_at_sensitivity(tp, tn, fp, fn, name):
"""Computes the specificity at the given sensitivity.
Args:
tp: True positives.
tn: True negatives.
fp: False positives.
fn: False negatives.
name: The name of the operation.
Returns:
The specificity using the aggregated values.
"""
sensitivities = math_ops.div(tp, tp + fn + kepsilon)
# We'll need to use this trick until tf.argmax allows us to specify
# whether we should use the first or last index in case of ties.
min_val = math_ops.reduce_min(math_ops.abs(sensitivities - sensitivity))
indices_at_minval = math_ops.equal(
math_ops.abs(sensitivities - sensitivity), min_val)
indices_at_minval = math_ops.to_int64(indices_at_minval)
indices_at_minval = math_ops.cumsum(indices_at_minval)
tf_index = math_ops.argmax(indices_at_minval, 0)
tf_index = math_ops.cast(tf_index, dtypes.int32)
# Now, we have the implicit threshold, so compute the specificity:
return math_ops.div(tn[tf_index], tn[tf_index] + fp[tf_index] + kepsilon,
name)
def specificity_across_replicas(_, values):
return compute_specificity_at_sensitivity(
values['tp'], values['tn'], values['fp'], values['fn'], 'value')
specificity = _aggregate_across_replicas(
metrics_collections, specificity_across_replicas, values)
update_op = compute_specificity_at_sensitivity(
update_ops['tp'], update_ops['tn'], update_ops['fp'], update_ops['fn'],
'update_op')
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return specificity, update_op
| apache-2.0 | -4,886,929,974,480,028,000 | 43.016527 | 80 | 0.658819 | false |
marzig76/blexplor | opcode.py | 1 | 3840 | """A simple module housing Bitcoin Script OP Code values."""
class opcode(object):
"""Define Bitcoin Script OP Code values."""
opcodes = {}
# Constants
opcodes['OP_FALSE'] = 0x00
opcodes['OP_1NEGATE'] = 0x4f
opcodes['OP_TRUE'] = 0x51
opcodes['OP_2'] = 0x52
opcodes['OP_3'] = 0x53
opcodes['OP_4'] = 0x54
opcodes['OP_5'] = 0x55
opcodes['OP_6'] = 0x56
opcodes['OP_7'] = 0x57
opcodes['OP_8'] = 0x58
opcodes['OP_9'] = 0x59
opcodes['OP_10'] = 0x5a
opcodes['OP_11'] = 0x5b
opcodes['OP_12'] = 0x5c
opcodes['OP_13'] = 0x5d
opcodes['OP_14'] = 0x5e
opcodes['OP_15'] = 0x5f
opcodes['OP_16'] = 0x60
# Flow Control
opcodes['OP_NOP'] = 0x61
opcodes['OP_IF'] = 0x63
opcodes['OP_NOTIF'] = 0x64
opcodes['OP_ELSE'] = 0x67
opcodes['OP_ENDIF'] = 0x68
opcodes['OP_VERIFY'] = 0x69
opcodes['OP_RETURN'] = 0x6a
# Stack
opcodes['OP_TOALTSTACK'] = 0x6b
opcodes['OP_FROMALTSTACK'] = 0x6c
opcodes['OP_IFDUP'] = 0x73
opcodes['OP_DEPTH'] = 0x74
opcodes['OP_DROP'] = 0x75
opcodes['OP_DUP'] = 0x76
opcodes['OP_NIP'] = 0x77
opcodes['OP_OVER'] = 0x78
opcodes['OP_PICK'] = 0x79
opcodes['OP_ROLL'] = 0x7a
opcodes['OP_ROT'] = 0x7b
opcodes['OP_SWAP'] = 0x7c
opcodes['OP_TUCK'] = 0x7d
opcodes['OP_2DROP'] = 0x6d
opcodes['OP_2DUP'] = 0x6e
opcodes['OP_3DUP'] = 0x6f
opcodes['OP_2OVER'] = 0x70
opcodes['OP_2ROT'] = 0x71
opcodes['OP_2SWAP'] = 0x72
# Splice
opcodes['OP_CAT'] = 0x7e
opcodes['OP_SUBSTR'] = 0x7f
opcodes['OP_LEFT'] = 0x80
opcodes['OP_RIGHT'] = 0x81
opcodes['OP_SIZE'] = 0x82
# Bitwise logic
opcodes['OP_INVERT'] = 0x83
opcodes['OP_AND'] = 0x84
opcodes['OP_OR'] = 0x85
opcodes['OP_XOR'] = 0x86
opcodes['OP_EQUAL'] = 0x87
opcodes['OP_EQUALVERIFY'] = 0x88
# Arithmetic
opcodes['OP_1ADD'] = 0x8b
opcodes['OP_1SUB'] = 0x8c
opcodes['OP_2MUL'] = 0x8d
opcodes['OP_2DIV'] = 0x8e
opcodes['OP_NEGATE'] = 0x8f
opcodes['OP_ABS'] = 0x90
opcodes['OP_NOT'] = 0x91
opcodes['OP_0NOTEQUAL'] = 0x92
opcodes['OP_ADD'] = 0x93
opcodes['OP_SUB'] = 0x94
opcodes['OP_MUL'] = 0x95
opcodes['OP_DIV'] = 0x96
opcodes['OP_MOD'] = 0x97
opcodes['OP_LSHIFT'] = 0x98
opcodes['OP_RSHIFT'] = 0x99
opcodes['OP_BOOLAND'] = 0x9a
opcodes['OP_BOOLOR'] = 0x9b
opcodes['OP_NUMEQUAL'] = 0x9c
opcodes['OP_NUMEQUALVERIFY'] = 0x9d
opcodes['OP_NUMNOTEQUAL'] = 0x9e
opcodes['OP_LESSTHAN'] = 0x9f
opcodes['OP_GREATERTHAN'] = 0xa0
opcodes['OP_LESSTHANOREQUAL'] = 0xa1
opcodes['OP_GREATERTHANOREQUAL'] = 0xa2
opcodes['OP_MIN'] = 0xa3
opcodes['OP_MAX'] = 0xa4
opcodes['OP_WITHIN'] = 0xa5
# Crypto
opcodes['OP_RIPEMD160'] = 0xa6
opcodes['OP_SHA1'] = 0xa7
opcodes['OP_SHA256'] = 0xa8
opcodes['OP_HASH160'] = 0xa9
opcodes['OP_HASH256'] = 0xaa
opcodes['opcodeSEPARATOR'] = 0xab
opcodes['OP_CHECKSIG'] = 0xac
opcodes['OP_CHECKSIGVERIFY'] = 0xad
opcodes['OP_CHECKMULTISIG'] = 0xae
opcodes['OP_CHECKMULTISIGVERIFY'] = 0xaf
# Locktime
opcodes['OP_CHECKLOCKTIMEVERIFY'] = 0xb1
opcodes['OP_CHECKSEQUENCEVERIFY'] = 0xb2
# Pseudo-words
opcodes['OP_PUBKEYHASH'] = 0xfd
opcodes['OP_PUBKEY'] = 0xfe
opcodes['OP_INVALIDOPCODE'] = 0xff
# Reserved words
opcodes['OP_RESERVED'] = 0x50
opcodes['OP_VER'] = 0x62
opcodes['OP_VERIF'] = 0x65
opcodes['OP_VERNOTIF'] = 0x66
opcodes['OP_RESERVED1'] = 0x89
opcodes['OP_RESERVED2'] = 0x8a
opcodes['OP_NOP1'] = 0xb0
opcodes['OP_NOP4'] = 0xb3
opcodes['OP_NOP5'] = 0xb4
opcodes['OP_NOP6'] = 0xb5
opcodes['OP_NOP7'] = 0xb6
opcodes['OP_NOP8'] = 0xb7
opcodes['OP_NOP9'] = 0xb8
opcodes['OP_NOP10'] = 0xb9
| gpl-3.0 | 5,800,116,986,678,497,000 | 26.826087 | 60 | 0.589323 | false |
shadycuz/cloudatcost-ansible-module | cac_inv.py | 1 | 6017 | #!/usr/bin/env python
"""
CloudAtCost external inventory script. Automatically finds hosts and
returns them under the host group 'cloudatcost'
Some code borrowed from linode.py inventory script by Dan Slimmon
"""
import os.path
# import re
import sys
import argparse
# from time import time
from cacpy import CACPy
# import ConfigParser
try:
import json
except ImportError:
import simplejson as json
_group = 'cloudatcost' # a default group
_prepend = 'cloud_' # Prepend all CloudAtCost data, to avoid conflicts
class CloudAtCostInventory(object):
def __init__(self):
"""Main execution path."""
self.api_key = None
self.api_user = None
self.args = self.parse_cli_args()
self.inventory = {}
self.groups = []
# CloudAtCost API Object
self.setupAPI()
self.update_inventory()
# Data to print
if self.args.host:
data_to_print = self.get_host_info(self.args.host)
elif self.args.list:
# Generates output
groups = {}
for group in self.groups:
groups[group]= [server['label'] for server
in self.inventory if server['label'] and server['group_label'] == group]
meta = {
'_meta': {
'hostvars': dict((server['label'],
self.get_host_info(label=server['label']))
for server in self.inventory)
}
}
data_to_print = groups.copy()
data_to_print.update(meta)
else:
data_to_print = "Error: Invalid options"
print(json_format_dict(data_to_print, True))
def update_inventory(self):
"""Makes a CloudAtCost API call to get the list of servers."""
res = self.api.get_server_info()
if res['status'] == 'ok':
self.inventory = res['data']
for server in self.inventory:
server['isnew'] = False
if not server['label']:
server['label'] = server['servername']
server['group_label'] = 'New'
if 'New' not in self.groups:
self.groups.append('New')
server['isnew'] = True
else:
if ' ' in server['label']:
split = (server['label']).split()
server['label'] = split[1]
if split[0] not in self.groups:
self.groups.append(split[0])
server['group_label'] = split[0]
else:
if _group not in self.groups:
self.groups.append(_group)
server['group_label'] = _group
else:
print("Looks like CloudAtCost's API is down:")
print("")
print(res)
sys.exit(1)
def get_server(self, server_id=None, label=None):
"""Gets details about a specific server."""
for server in self.inventory:
if (server_id and server['id'] == server_id) or \
(label and server['label'] == label):
return server
return None
def get_host_info(self, label):
"""Get variables about a specific host."""
server = self.get_server(label=label)
if not server:
return json_format_dict({}, True)
retval = {}
for (key, value) in server.iteritems():
retval["{}{}".format(_prepend, key)] = value
# Set the SSH host information, so these inventory items can be used if
# their labels aren't FQDNs
retval['ansible_ssh_host'] = server["ip"]
retval['ansible_host'] = server["ip"]
if server['isnew'] or 'New' in server['group_label']:
retval['ansible_ssh_pass'] = server["rootpass"]
retval['ansible_pass'] = server["rootpass"]
return retval
def setupAPI(self):
# Setup the api_key
if not self.api_key:
try:
self.api_key = os.environ['CAC_API_KEY']
except KeyError, e:
print "Please provide API Key."
sys.exit(1)
# Setup the api_user
if not self.api_user:
try:
self.api_user = os.environ['CAC_API_USER']
except KeyError, e:
print "Please provide API User."
sys.exit(1)
# setup the auth
try:
self.api = CACPy(self.api_user, self.api_key)
self.api.get_resources()
except Exception, e:
print "Failed to contact CloudAtCost API."
print ""
print e
sys.exit(1)
@staticmethod
def parse_cli_args():
"""Command line argument processing"""
parser = argparse.ArgumentParser(
description='Produce an Ansible Inventory file based on CloudAtCost')
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('--list', action='store_true', default=True,
help='List servers (default: True)')
group.add_argument('--host', action='store',
help='Get all the variables about a specific server')
parser.add_argument('--refresh-cache', action='store_true',
default=False,
help='Force refresh of cache by making API requests to CloudAtCost (default: False - use cache files)')
return parser.parse_args()
def json_format_dict(data, pretty=False):
"""Converts a dict to a JSON object and dumps it as a formatted string.
:param data: string
"""
if pretty:
return json.dumps(data, sort_keys=True, indent=2)
else:
return json.dumps(data)
CloudAtCostInventory()
| mit | -8,786,081,238,855,413,000 | 32.243094 | 131 | 0.525179 | false |
joferkington/tutorials | 1412_Tuning_and_AVO/tuning_wedge_v2.py | 1 | 10772 | """
Python script to generate a zero-offset synthetic from a 3-layer wedge model.
Created by: Wes Hamlyn
Create Date: 19-Aug-2014
Last Mod: 5-Feb-2015
-addition of bandpass wavelet
This script is provided without warranty of any kind.
"""
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import gridspec
###########################################################
#
# DEFINE MODELING PARAMETERS HERE
#
# 3-Layer Model Parameters [Layer1, Layer2, Layer 3]
vp_mod = [2500.0, 2600.0, 2550.0] # P-wave velocity (m/s)
vs_mod = [1200.0, 1300.0, 1200.0] # S-wave velocity (m/s)
rho_mod= [1.95, 2.0, 1.98] # Density (g/cc)
dz_min = 0.0 # Minimum thickness of Layer 2 (m)
dz_max = 60.0 # Maximum thickness of Layer 2 (m)
dz_step= 1.0 # Thickness step from trace-to-trace (normally 1.0 m)
# Wavelet Parameters
wvlt_type = 'bandpass' # Valid values: 'ricker' or 'bandpass'
wvlt_length= 0.128 # Wavelet length in seconds
wvlt_phase = 0.0 # Wavelet phase in degrees
wvlt_scalar = 1.0 # Multiplier to scale wavelet amplitude (default = 1.0)
wvlt_cfreq = 30.0 # Ricker wavelet central frequency
f1 = 5.0 # Bandpass wavelet low truncation frequency
f2 = 10.0 # Bandpass wavelet low cut frequency
f3 = 50.0 # Bandpass wavelet high cut frequency
f4 = 65.0 # Bandpass wavelet high truncation frequency
# Trace Parameters
tmin = 0.0
tmax = 0.5
dt = 0.0001 # changing this from 0.0001 can affect the display quality
# Plot Parameters
min_plot_time = 0.15
max_plot_time = 0.3
excursion = 2
###########################################################
#
# FUNCTIONS DEFINITIONS
#
def plot_vawig(axhdl, data, t, excursion, highlight=None):
import numpy as np
import matplotlib.pyplot as plt
[ntrc, nsamp] = data.shape
t = np.hstack([0, t, t.max()])
for i in range(0, ntrc):
tbuf = excursion * data[i] / np.max(np.abs(data)) + i
tbuf = np.hstack([i, tbuf, i])
if i==highlight:
lw = 2
else:
lw = 0.5
axhdl.plot(tbuf, t, color='black', linewidth=lw)
plt.fill_betweenx(t, tbuf, i, where=tbuf>i, facecolor=[0.6,0.6,1.0], linewidth=0)
plt.fill_betweenx(t, tbuf, i, where=tbuf<i, facecolor=[1.0,0.7,0.7], linewidth=0)
axhdl.set_xlim((-excursion, ntrc+excursion))
axhdl.xaxis.tick_top()
axhdl.xaxis.set_label_position('top')
axhdl.invert_yaxis()
def ricker(cfreq, phase, dt, wvlt_length):
'''
Calculate a ricker wavelet
Usage:
------
t, wvlt = wvlt_ricker(cfreq, phase, dt, wvlt_length)
cfreq: central frequency of wavelet in Hz
phase: wavelet phase in degrees
dt: sample rate in seconds
wvlt_length: length of wavelet in seconds
'''
import numpy as np
import scipy.signal as signal
nsamp = int(wvlt_length/dt + 1)
t_max = wvlt_length*0.5
t_min = -t_max
t = np.arange(t_min, t_max, dt)
t = np.linspace(-wvlt_length/2, (wvlt_length-dt)/2, wvlt_length/dt)
wvlt = (1.0 - 2.0*(np.pi**2)*(cfreq**2)*(t**2)) * np.exp(-(np.pi**2)*(cfreq**2)*(t**2))
if phase != 0:
phase = phase*np.pi/180.0
wvlth = signal.hilbert(wvlt)
wvlth = np.imag(wvlth)
wvlt = np.cos(phase)*wvlt - np.sin(phase)*wvlth
return t, wvlt
def wvlt_bpass(f1, f2, f3, f4, phase, dt, wvlt_length):
'''
Calculate a trapezoidal bandpass wavelet
Usage:
------
t, wvlt = wvlt_ricker(f1, f2, f3, f4, phase, dt, wvlt_length)
f1: Low truncation frequency of wavelet in Hz
f2: Low cut frequency of wavelet in Hz
f3: High cut frequency of wavelet in Hz
f4: High truncation frequency of wavelet in Hz
phase: wavelet phase in degrees
dt: sample rate in seconds
wvlt_length: length of wavelet in seconds
'''
from numpy.fft import fft, ifft, fftfreq, fftshift, ifftshift
nsamp = int(wvlt_length/dt + 1)
freq = fftfreq(nsamp, dt)
freq = fftshift(freq)
aspec = freq*0.0
pspec = freq*0.0
# Calculate slope and y-int for low frequency ramp
M1 = 1/(f2-f1)
b1 = -M1*f1
# Calculate slop and y-int for high frequency ramp
M2 = -1/(f4-f3)
b2 = -M2*f4
# Build initial frequency and filter arrays
freq = fftfreq(nsamp, dt)
freq = fftshift(freq)
filt = np.zeros(nsamp)
# Build LF ramp
idx = np.nonzero((np.abs(freq)>=f1) & (np.abs(freq)<f2))
filt[idx] = M1*np.abs(freq)[idx]+b1
# Build central filter flat
idx = np.nonzero((np.abs(freq)>=f2) & (np.abs(freq)<=f3))
filt[idx] = 1.0
# Build HF ramp
idx = np.nonzero((np.abs(freq)>f3) & (np.abs(freq)<=f4))
filt[idx] = M2*np.abs(freq)[idx]+b2
# Unshift the frequencies and convert filter to fourier coefficients
filt2 = ifftshift(filt)
Af = filt2*np.exp(np.zeros(filt2.shape)*1j)
# Convert filter to time-domain wavelet
wvlt = fftshift(ifft(Af))
wvlt = np.real(wvlt)
wvlt = wvlt/np.max(np.abs(wvlt)) # normalize wavelet by peak amplitude
# Generate array of wavelet times
t = np.linspace(-wvlt_length*0.5, wvlt_length*0.5, nsamp)
# Apply phase rotation if desired
if phase != 0:
phase = phase*np.pi/180.0
wvlth = signal.hilbert(wvlt)
wvlth = np.imag(wvlth)
wvlt = np.cos(phase)*wvlt - np.sin(phase)*wvlth
return t, wvlt
def calc_rc(vp_mod, rho_mod):
'''
rc_int = calc_rc(vp_mod, rho_mod)
'''
nlayers = len(vp_mod)
nint = nlayers - 1
rc_int = []
for i in range(0, nint):
buf1 = vp_mod[i+1]*rho_mod[i+1]-vp_mod[i]*rho_mod[i]
buf2 = vp_mod[i+1]*rho_mod[i+1]+vp_mod[i]*rho_mod[i]
buf3 = buf1/buf2
rc_int.append(buf3)
return rc_int
def calc_times(z_int, vp_mod):
'''
t_int = calc_times(z_int, vp_mod)
'''
nlayers = len(vp_mod)
nint = nlayers - 1
t_int = []
for i in range(0, nint):
if i == 0:
tbuf = z_int[i]/vp_mod[i]
t_int.append(tbuf)
else:
zdiff = z_int[i]-z_int[i-1]
tbuf = 2*zdiff/vp_mod[i] + t_int[i-1]
t_int.append(tbuf)
return t_int
def digitize_model(rc_int, t_int, t):
'''
rc = digitize_model(rc, t_int, t)
rc = reflection coefficients corresponding to interface times
t_int = interface times
t = regularly sampled time series defining model sampling
'''
import numpy as np
nlayers = len(rc_int)
nint = nlayers - 1
nsamp = len(t)
rc = list(np.zeros(nsamp,dtype='float'))
lyr = 0
for i in range(0, nsamp):
if t[i] >= t_int[lyr]:
rc[i] = rc_int[lyr]
lyr = lyr + 1
if lyr > nint:
break
return rc
##########################################################
#
# COMPUTATIONS BELOW HERE...
#
# Some handy constants
nlayers = len(vp_mod)
nint = nlayers - 1
nmodel = int((dz_max-dz_min)/dz_step+1)
# Generate wavelet
if wvlt_type == 'ricker':
wvlt_t, wvlt_amp = ricker(wvlt_cfreq, wvlt_phase, dt, wvlt_length)
elif wvlt_type == 'bandpass':
wvlt_t, wvlt_amp = wvlt_bpass(f1, f2, f3, f4, wvlt_phase, dt, wvlt_length)
# Apply amplitude scale factor to wavelet (to match seismic amplitude values)
wvlt_amp = wvlt_scalar * wvlt_amp
# Calculate reflectivities from model parameters
rc_int = calc_rc(vp_mod, rho_mod)
syn_zo = []
rc_zo = []
lyr_times = []
for model in range(0, nmodel):
# Calculate interface depths
z_int = [500.0]
z_int.append(z_int[0]+dz_min+dz_step*model)
# Calculate interface times
t_int = calc_times(z_int, vp_mod)
lyr_times.append(t_int)
# Digitize 3-layer model
nsamp = int((tmax-tmin)/dt) + 1
t = []
for i in range(0,nsamp):
t.append(i*dt)
rc = digitize_model(rc_int, t_int, t)
rc_zo.append(rc)
# Convolve wavelet with reflectivities
syn_buf = np.convolve(rc, wvlt_amp, mode='same')
syn_buf = list(syn_buf)
syn_zo.append(syn_buf)
print "finished step %i" % (model)
syn_zo = np.array(syn_zo)
t = np.array(t)
lyr_times = np.array(lyr_times)
lyr_indx = np.array(np.round(lyr_times/dt), dtype='int16')
# Use the transpose because rows are traces;
# columns are time samples.
tuning_trace = np.argmax(np.abs(syn_zo.T)) % syn_zo.T.shape[1]
tuning_thickness = tuning_trace * dz_step
# Plotting Code
[ntrc, nsamp] = syn_zo.shape
fig = plt.figure(figsize=(12, 14))
fig.set_facecolor('white')
gs = gridspec.GridSpec(3, 1, height_ratios=[1, 1, 1])
ax0 = fig.add_subplot(gs[0])
ax0.plot(lyr_times[:,0], color='blue', lw=1.5)
ax0.plot(lyr_times[:,1], color='red', lw=1.5)
ax0.set_ylim((min_plot_time,max_plot_time))
ax0.invert_yaxis()
ax0.set_xlabel('Thickness (m)')
ax0.set_ylabel('Time (s)')
plt.text(2,
min_plot_time + (lyr_times[0,0] - min_plot_time)/2.,
'Layer 1',
fontsize=16)
plt.text(dz_max/dz_step - 2,
lyr_times[-1,0] + (lyr_times[-1,1] - lyr_times[-1,0])/2.,
'Layer 2',
fontsize=16,
horizontalalignment='right')
plt.text(2,
lyr_times[0,0] + (max_plot_time - lyr_times[0,0])/2.,
'Layer 3',
fontsize=16)
plt.gca().xaxis.tick_top()
plt.gca().xaxis.set_label_position('top')
ax0.set_xlim((-excursion, ntrc+excursion))
ax1 = fig.add_subplot(gs[1])
plot_vawig(ax1, syn_zo, t, excursion, highlight=tuning_trace)
ax1.plot(lyr_times[:,0], color='blue', lw=1.5)
ax1.plot(lyr_times[:,1], color='red', lw=1.5)
ax1.set_ylim((min_plot_time,max_plot_time))
ax1.invert_yaxis()
ax1.set_xlabel('Thickness (m)')
ax1.set_ylabel('Time (s)')
ax2 = fig.add_subplot(gs[2])
ax2.plot(syn_zo[:,lyr_indx[:,0]], color='blue')
ax2.set_xlim((-excursion, ntrc+excursion))
ax2.axvline(tuning_trace, color='k', lw=2)
ax2.grid()
ax2.set_title('Upper interface amplitude')
ax2.set_xlabel('Thickness (m)')
ax2.set_ylabel('Amplitude')
plt.text(tuning_trace + 2,
plt.ylim()[0] * 1.1,
'tuning thickness = {0} m'.format(str(tuning_thickness)),
fontsize=16)
plt.savefig('figure_1.png')
plt.show()
| apache-2.0 | -3,679,153,622,030,970,000 | 24.79602 | 91 | 0.562755 | false |
d-Rickyy-b/Python-BlackJackBot | blackjack/game/card.py | 1 | 1732 | # -*- coding: utf-8 -*-
from enum import Enum
class Card(object):
class Type(Enum):
NUMBER = "card_number"
JACK = "card_jack"
QUEEN = "card_queen"
KING = "card_king"
ACE = "card_ace"
symbols = ["♥", "♦", "♣", "♠"]
value_str = ["2", "3", "4", "5", "6", "7", "8", "9", "10", "Jack", "Queen", "King", "Ace"]
def __init__(self, card_id):
self.card_id = card_id
def is_ace(self):
return self.value == 11
@property
def symbol(self):
return self.symbols[self.card_id // 13]
@property
def value(self):
values = [2, 3, 4, 5, 6, 7, 8, 9, 10, 10, 10, 10, 11]
return values[self.card_id % 13]
@property
def face(self):
return self.value_str[self.card_id % 13]
@property
def type(self):
if (self.card_id % 13) in range(0, 9):
return Card.Type.NUMBER
elif (self.card_id % 13) == 9:
return Card.Type.JACK
elif (self.card_id % 13) == 10:
return Card.Type.QUEEN
elif (self.card_id % 13) == 11:
return Card.Type.KING
elif (self.card_id % 13) == 12:
return Card.Type.ACE
else:
raise ValueError("card_id '{}' can't be mapped to card type!".format(self.card_id))
@property
def str_id(self):
str_ids = ["card_2", "card_3", "card_4", "card_5", "card_6",
"card_7", "card_8", "card_9", "card_10",
"card_jack", "card_queen", "card_king", "card_ace"]
return str_ids[self.card_id % 13]
def __str__(self):
return "{} {}".format(self.symbol, self.face)
def __repr__(self):
return self.__str__()
| gpl-3.0 | -4,021,522,245,757,826,600 | 26.806452 | 95 | 0.493039 | false |
luboslenco/cyclesgame | blender/arm/handlers.py | 1 | 5807 | import os
import sys
import bpy
import importlib
from bpy.app.handlers import persistent
import arm.utils
import arm.props as props
import arm.make as make
import arm.make_state as state
import arm.api
@persistent
def on_depsgraph_update_post(self):
if state.proc_build != None:
return
# Recache
if hasattr(bpy.context, 'evaluated_depsgraph_get'):
depsgraph = bpy.context.evaluated_depsgraph_get()
else: # TODO: deprecated
depsgraph = bpy.context.depsgraph
for update in depsgraph.updates:
uid = update.id
if hasattr(uid, 'arm_cached'):
# uid.arm_cached = False # TODO: does not trigger update
if isinstance(uid, bpy.types.Mesh) and uid.name in bpy.data.meshes:
bpy.data.meshes[uid.name].arm_cached = False
elif isinstance(uid, bpy.types.Curve) and uid.name in bpy.data.curves:
bpy.data.curves[uid.name].arm_cached = False
elif isinstance(uid, bpy.types.MetaBall) and uid.name in bpy.data.metaballs:
bpy.data.metaballs[uid.name].arm_cached = False
elif isinstance(uid, bpy.types.Armature) and uid.name in bpy.data.armatures:
bpy.data.armatures[uid.name].arm_cached = False
elif isinstance(uid, bpy.types.NodeTree) and uid.name in bpy.data.node_groups:
bpy.data.node_groups[uid.name].arm_cached = False
elif isinstance(uid, bpy.types.Material) and uid.name in bpy.data.materials:
bpy.data.materials[uid.name].arm_cached = False
# Send last operator to Krom
wrd = bpy.data.worlds['Arm']
if state.proc_play != None and \
state.target == 'krom' and \
wrd.arm_live_patch:
ops = bpy.context.window_manager.operators
if len(ops) > 0 and ops[-1] != None:
send_operator(ops[-1])
def send_operator(op):
if hasattr(bpy.context, 'object') and bpy.context.object != None:
obj = bpy.context.object.name
if op.name == 'Move':
vec = bpy.context.object.location
js = 'var o = iron.Scene.active.getChild("' + obj + '"); o.transform.loc.set(' + str(vec[0]) + ', ' + str(vec[1]) + ', ' + str(vec[2]) + '); o.transform.dirty = true;'
make.write_patch(js)
elif op.name == 'Resize':
vec = bpy.context.object.scale
js = 'var o = iron.Scene.active.getChild("' + obj + '"); o.transform.scale.set(' + str(vec[0]) + ', ' + str(vec[1]) + ', ' + str(vec[2]) + '); o.transform.dirty = true;'
make.write_patch(js)
elif op.name == 'Rotate':
vec = bpy.context.object.rotation_euler.to_quaternion()
js = 'var o = iron.Scene.active.getChild("' + obj + '"); o.transform.rot.set(' + str(vec[1]) + ', ' + str(vec[2]) + ', ' + str(vec[3]) + ' ,' + str(vec[0]) + '); o.transform.dirty = true;'
make.write_patch(js)
else: # Rebuild
make.patch()
def always():
# Force ui redraw
if state.redraw_ui and context_screen != None:
for area in context_screen.areas:
if area.type == 'VIEW_3D' or area.type == 'PROPERTIES':
area.tag_redraw()
state.redraw_ui = False
# TODO: depsgraph.updates only triggers material trees
space = arm.utils.logic_editor_space(context_screen)
if space != None:
space.node_tree.arm_cached = False
return 0.5
appended_py_paths = []
context_screen = None
@persistent
def on_load_post(context):
global appended_py_paths
global context_screen
context_screen = bpy.context.screen
props.init_properties_on_load()
reload_blend_data()
bpy.ops.arm.sync_proxy()
wrd = bpy.data.worlds['Arm']
wrd.arm_recompile = True
arm.api.drivers = dict()
# Load libraries
if os.path.exists(arm.utils.get_fp() + '/Libraries'):
libs = os.listdir(arm.utils.get_fp() + '/Libraries')
for lib in libs:
if os.path.isdir(arm.utils.get_fp() + '/Libraries/' + lib):
fp = arm.utils.get_fp() + '/Libraries/' + lib
if fp not in appended_py_paths and os.path.exists(fp + '/blender.py'):
appended_py_paths.append(fp)
sys.path.append(fp)
import blender
importlib.reload(blender)
blender.register()
sys.path.remove(fp)
# Show trait users as collections
arm.utils.update_trait_collections()
def reload_blend_data():
armory_pbr = bpy.data.node_groups.get('Armory PBR')
if armory_pbr == None:
load_library('Armory PBR')
def load_library(asset_name):
if bpy.data.filepath.endswith('arm_data.blend'): # Prevent load in library itself
return
sdk_path = arm.utils.get_sdk_path()
data_path = sdk_path + '/armory/blender/data/arm_data.blend'
data_names = [asset_name]
# Import
data_refs = data_names.copy()
with bpy.data.libraries.load(data_path, link=False) as (data_from, data_to):
data_to.node_groups = data_refs
for ref in data_refs:
ref.use_fake_user = True
def register():
bpy.app.handlers.load_post.append(on_load_post)
bpy.app.handlers.depsgraph_update_post.append(on_depsgraph_update_post)
# bpy.app.handlers.undo_post.append(on_undo_post)
bpy.app.timers.register(always, persistent=True)
# TODO: On windows, on_load_post is not called when opening .blend file from explorer
if arm.utils.get_os() == 'win' and arm.utils.get_fp() != '':
on_load_post(None)
reload_blend_data()
def unregister():
bpy.app.handlers.load_post.remove(on_load_post)
bpy.app.handlers.depsgraph_update_post.remove(on_depsgraph_update_post)
# bpy.app.handlers.undo_post.remove(on_undo_post)
| lgpl-3.0 | -1,077,355,546,783,990,800 | 37.713333 | 200 | 0.610126 | false |
pdamodaran/yellowbrick | tests/test_pipeline.py | 1 | 7223 | # tests.test_pipeline
# Tests to ensure that the visual pipeline works as expected.
#
# Author: Benjamin Bengfort <[email protected]>
# Created: Fri Oct 07 22:10:50 2016 -0400
#
# Copyright (C) 2016 District Data Labs
# For license information, see LICENSE.txt
#
# ID: test_pipeline.py [1efae1f] [email protected] $
"""
Tests to ensure that the visual pipeline works as expected.
"""
##########################################################################
## Imports
##########################################################################
import os
import unittest
from unittest import mock
from yellowbrick.base import Visualizer
from yellowbrick.pipeline import VisualPipeline
from sklearn.pipeline import Pipeline
from sklearn.base import BaseEstimator, TransformerMixin
##########################################################################
## Mock Objects
##########################################################################
class Thing(object):
pass
class MockEstimator(BaseEstimator):
def fit(self, X, y=None, **kwargs):
return self
class MockVisualEstimator(Visualizer):
def fit(self, X, y=None, **kwargs):
self.draw(**kwargs)
return self
def draw(self, **kwargs):
pass
class MockTransformer(BaseEstimator, TransformerMixin):
def fit(self, X, y=None, **kwargs):
return self
def transform(self, X, **kwargs):
return X
class MockVisualTransformer(Visualizer, TransformerMixin):
def fit(self, X, y=None, **kwargs):
self.draw(**kwargs)
return self
def transform(self, X, **kwargs):
return X
def draw(self, **kwargs):
pass
##########################################################################
## VisualPipeline Tests
##########################################################################
class VisualPipelineTests(unittest.TestCase):
def test_validate_steps(self):
"""
Assert that visual transformers can be added to pipelines
"""
# Pipeline objects have a _validate_steps method that raises an
# TypeError if the steps don't match transforms --> estimator.
# validate a bad intermediate transformer on the Pipeline
with self.assertRaises(TypeError):
Pipeline([
('real', MockTransformer()),
('bad', Thing()),
('model', MockEstimator()),
])
# validate a bad intermediate transformer on the VisualPipeline
with self.assertRaises(TypeError):
VisualPipeline([
('real', MockTransformer()),
('bad', Thing()),
('model', MockEstimator()),
])
# validate a bad final estimator on the Pipeline
with self.assertRaises(TypeError):
Pipeline([
('real', MockTransformer()),
('bad', Thing()),
])
# validate a bad final estimator on the VisualPipeline
with self.assertRaises(TypeError):
VisualPipeline([
('real', MockTransformer()),
('bad', Thing()),
])
# validate visual transformers on a Pipeline
try:
Pipeline([
('real', MockTransformer()),
('visual', MockVisualTransformer()),
('model', MockEstimator()),
])
except TypeError:
self.fail("could not add a visual transformer to a Pipeline!")
# validate visual transformers on a VisualPipeline
try:
VisualPipeline([
('real', MockTransformer()),
('visual', MockVisualTransformer()),
('model', MockEstimator()),
])
except TypeError:
self.fail("could not add a visual transformer to a VisualPipeline!")
def test_visual_steps_property(self):
"""
Test the visual steps property to filter visualizers
"""
pipeline = VisualPipeline([
('a', MockTransformer()),
('b', MockVisualTransformer()),
('c', MockTransformer()),
('d', MockVisualTransformer()),
('e', MockEstimator()),
])
self.assertNotIn('a', pipeline.visual_steps)
self.assertIn('b', pipeline.visual_steps)
self.assertNotIn('c', pipeline.visual_steps)
self.assertIn('d', pipeline.visual_steps)
self.assertNotIn('e', pipeline.visual_steps)
def test_pipeline_poof(self):
"""
Test the poof call against the VisualPipeline
"""
pipeline = VisualPipeline([
('a', mock.MagicMock(MockTransformer())),
('b', mock.MagicMock(MockVisualTransformer())),
('c', mock.MagicMock(MockTransformer())),
('d', mock.MagicMock(MockVisualTransformer())),
('e', mock.MagicMock(MockEstimator()),)
])
pipeline.poof()
pipeline.steps[1][1].poof.assert_called_once_with(outpath=None)
pipeline.steps[3][1].poof.assert_called_once_with(outpath=None)
def test_pipeline_savefig_poof(self):
"""
Test the poof call with an outdir to save all the figures
"""
pipeline = VisualPipeline([
('a', mock.MagicMock(MockTransformer())),
('b', mock.MagicMock(MockVisualTransformer())),
('c', mock.MagicMock(MockTransformer())),
('d', mock.MagicMock(MockVisualTransformer())),
('e', mock.MagicMock(MockVisualEstimator()),)
])
# Must use path joining for Windows compatibility
tmpdir = os.path.join("tmp", "figures")
pipeline.poof(outdir=tmpdir)
pipeline.steps[1][1].poof.assert_called_once_with(outpath=os.path.join(tmpdir, "b.pdf"))
pipeline.steps[3][1].poof.assert_called_once_with(outpath=os.path.join(tmpdir, "d.pdf"))
pipeline.steps[4][1].poof.assert_called_once_with(outpath=os.path.join(tmpdir, "e.pdf"))
@unittest.skip("need to find a way for fit to return self in mocks")
def test_fit_transform_poof_and_draw_calls(self):
"""
Test calling fit, transform, and poof on the pipeline
"""
pipeline = VisualPipeline([
('a', mock.MagicMock(MockTransformer())),
('b', mock.MagicMock(MockVisualTransformer())),
('c', mock.MagicMock(MockTransformer())),
('d', mock.MagicMock(MockVisualTransformer())),
('e', mock.MagicMock(MockEstimator()),)
])
X = [[1, 1, 1, 1, 1],
[2, 2, 2, 2, 2],
[3, 3, 3, 3, 3]]
y = [1, 2, 3, 4, 5]
pipeline.fit(X, y)
for name, step in pipeline.named_steps.items():
step.fit.assert_called_once_with(X, y)
pipeline.transform(X)
for name, step in pipeline.named_steps.items():
if name == 'e': continue
step.transform.assert_called_once_with(X)
pipeline.poof()
for name, step in pipeline.named_steps.items():
if name in {'a', 'c', 'e'}: continue
step.poof.assert_called_once_with(outpath=None)
| apache-2.0 | 7,521,119,648,828,526,000 | 30.819383 | 96 | 0.543957 | false |
cloudify-cosmo/softlayer-python | SoftLayer/CLI/loadbal/service_add.py | 1 | 1663 | """Adds a new load balancer service."""
# :license: MIT, see LICENSE for more details.
import SoftLayer
from SoftLayer.CLI import environment
from SoftLayer.CLI import loadbal
import click
@click.command()
@click.argument('identifier')
@click.option('--enabled / --disabled',
required=True,
help="Create the service as enable or disabled")
@click.option('--port',
required=True,
help="The port number for the service",
type=click.INT)
@click.option('--weight',
required=True,
type=click.INT,
help="The weight of the service")
@click.option('--healthcheck-type',
required=True,
help="The health check type")
@click.option('--ip-address', '--ip',
required=True,
help="The IP of the service")
@environment.pass_env
def cli(env, identifier, enabled, port, weight, healthcheck_type, ip_address):
"""Adds a new load balancer service."""
mgr = SoftLayer.LoadBalancerManager(env.client)
loadbal_id, group_id = loadbal.parse_id(identifier)
# check if the IP is valid
ip_address_id = None
if ip_address:
ip_service = env.client['Network_Subnet_IpAddress']
ip_record = ip_service.getByIpAddress(ip_address)
ip_address_id = ip_record['id']
mgr.add_service(loadbal_id,
group_id,
ip_address_id=ip_address_id,
enabled=enabled,
port=port,
weight=weight,
hc_type=healthcheck_type)
return 'Load balancer service is being added!'
| mit | 6,626,000,125,855,541,000 | 30.980769 | 78 | 0.591702 | false |
ppyordanov/HCI_4_Future_Cities | Server/src/virtualenv/Lib/encodings/koi8_r.py | 1 | 13245 | """ Python Character Mapping Codec koi8_r generated from 'MAPPINGS/VENDORS/MISC/KOI8-R.TXT' with gencodec.py.
""" # "
import codecs
# ## Codec APIs
class Codec(codecs.Codec):
def encode(self, input, errors='strict'):
return codecs.charmap_encode(input, errors, encoding_table)
def decode(self, input, errors='strict'):
return codecs.charmap_decode(input, errors, decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input, self.errors, encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input, self.errors, decoding_table)[0]
class StreamWriter(Codec, codecs.StreamWriter):
pass
class StreamReader(Codec, codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='koi8-r',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
u'\x00' # 0x00 -> NULL
u'\x01' # 0x01 -> START OF HEADING
u'\x02' # 0x02 -> START OF TEXT
u'\x03' # 0x03 -> END OF TEXT
u'\x04' # 0x04 -> END OF TRANSMISSION
u'\x05' # 0x05 -> ENQUIRY
u'\x06' # 0x06 -> ACKNOWLEDGE
u'\x07' # 0x07 -> BELL
u'\x08' # 0x08 -> BACKSPACE
u'\t' # 0x09 -> HORIZONTAL TABULATION
u'\n' # 0x0A -> LINE FEED
u'\x0b' # 0x0B -> VERTICAL TABULATION
u'\x0c' # 0x0C -> FORM FEED
u'\r' # 0x0D -> CARRIAGE RETURN
u'\x0e' # 0x0E -> SHIFT OUT
u'\x0f' # 0x0F -> SHIFT IN
u'\x10' # 0x10 -> DATA LINK ESCAPE
u'\x11' # 0x11 -> DEVICE CONTROL ONE
u'\x12' # 0x12 -> DEVICE CONTROL TWO
u'\x13' # 0x13 -> DEVICE CONTROL THREE
u'\x14' # 0x14 -> DEVICE CONTROL FOUR
u'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
u'\x16' # 0x16 -> SYNCHRONOUS IDLE
u'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
u'\x18' # 0x18 -> CANCEL
u'\x19' # 0x19 -> END OF MEDIUM
u'\x1a' # 0x1A -> SUBSTITUTE
u'\x1b' # 0x1B -> ESCAPE
u'\x1c' # 0x1C -> FILE SEPARATOR
u'\x1d' # 0x1D -> GROUP SEPARATOR
u'\x1e' # 0x1E -> RECORD SEPARATOR
u'\x1f' # 0x1F -> UNIT SEPARATOR
u' ' # 0x20 -> SPACE
u'!' # 0x21 -> EXCLAMATION MARK
u'"' # 0x22 -> QUOTATION MARK
u'#' # 0x23 -> NUMBER SIGN
u'$' # 0x24 -> DOLLAR SIGN
u'%' # 0x25 -> PERCENT SIGN
u'&' # 0x26 -> AMPERSAND
u"'" # 0x27 -> APOSTROPHE
u'(' # 0x28 -> LEFT PARENTHESIS
u')' # 0x29 -> RIGHT PARENTHESIS
u'*' # 0x2A -> ASTERISK
u'+' # 0x2B -> PLUS SIGN
u',' # 0x2C -> COMMA
u'-' # 0x2D -> HYPHEN-MINUS
u'.' # 0x2E -> FULL STOP
u'/' # 0x2F -> SOLIDUS
u'0' # 0x30 -> DIGIT ZERO
u'1' # 0x31 -> DIGIT ONE
u'2' # 0x32 -> DIGIT TWO
u'3' # 0x33 -> DIGIT THREE
u'4' # 0x34 -> DIGIT FOUR
u'5' # 0x35 -> DIGIT FIVE
u'6' # 0x36 -> DIGIT SIX
u'7' # 0x37 -> DIGIT SEVEN
u'8' # 0x38 -> DIGIT EIGHT
u'9' # 0x39 -> DIGIT NINE
u':' # 0x3A -> COLON
u';' # 0x3B -> SEMICOLON
u'<' # 0x3C -> LESS-THAN SIGN
u'=' # 0x3D -> EQUALS SIGN
u'>' # 0x3E -> GREATER-THAN SIGN
u'?' # 0x3F -> QUESTION MARK
u'@' # 0x40 -> COMMERCIAL AT
u'A' # 0x41 -> LATIN CAPITAL LETTER A
u'B' # 0x42 -> LATIN CAPITAL LETTER B
u'C' # 0x43 -> LATIN CAPITAL LETTER C
u'D' # 0x44 -> LATIN CAPITAL LETTER D
u'E' # 0x45 -> LATIN CAPITAL LETTER E
u'F' # 0x46 -> LATIN CAPITAL LETTER F
u'G' # 0x47 -> LATIN CAPITAL LETTER G
u'H' # 0x48 -> LATIN CAPITAL LETTER H
u'I' # 0x49 -> LATIN CAPITAL LETTER I
u'J' # 0x4A -> LATIN CAPITAL LETTER J
u'K' # 0x4B -> LATIN CAPITAL LETTER K
u'L' # 0x4C -> LATIN CAPITAL LETTER L
u'M' # 0x4D -> LATIN CAPITAL LETTER M
u'N' # 0x4E -> LATIN CAPITAL LETTER N
u'O' # 0x4F -> LATIN CAPITAL LETTER O
u'P' # 0x50 -> LATIN CAPITAL LETTER P
u'Q' # 0x51 -> LATIN CAPITAL LETTER Q
u'R' # 0x52 -> LATIN CAPITAL LETTER R
u'S' # 0x53 -> LATIN CAPITAL LETTER S
u'T' # 0x54 -> LATIN CAPITAL LETTER T
u'U' # 0x55 -> LATIN CAPITAL LETTER U
u'V' # 0x56 -> LATIN CAPITAL LETTER V
u'W' # 0x57 -> LATIN CAPITAL LETTER W
u'X' # 0x58 -> LATIN CAPITAL LETTER X
u'Y' # 0x59 -> LATIN CAPITAL LETTER Y
u'Z' # 0x5A -> LATIN CAPITAL LETTER Z
u'[' # 0x5B -> LEFT SQUARE BRACKET
u'\\' # 0x5C -> REVERSE SOLIDUS
u']' # 0x5D -> RIGHT SQUARE BRACKET
u'^' # 0x5E -> CIRCUMFLEX ACCENT
u'_' # 0x5F -> LOW LINE
u'`' # 0x60 -> GRAVE ACCENT
u'a' # 0x61 -> LATIN SMALL LETTER A
u'b' # 0x62 -> LATIN SMALL LETTER B
u'c' # 0x63 -> LATIN SMALL LETTER C
u'd' # 0x64 -> LATIN SMALL LETTER D
u'e' # 0x65 -> LATIN SMALL LETTER E
u'f' # 0x66 -> LATIN SMALL LETTER F
u'g' # 0x67 -> LATIN SMALL LETTER G
u'h' # 0x68 -> LATIN SMALL LETTER H
u'i' # 0x69 -> LATIN SMALL LETTER I
u'j' # 0x6A -> LATIN SMALL LETTER J
u'k' # 0x6B -> LATIN SMALL LETTER K
u'l' # 0x6C -> LATIN SMALL LETTER L
u'm' # 0x6D -> LATIN SMALL LETTER M
u'n' # 0x6E -> LATIN SMALL LETTER N
u'o' # 0x6F -> LATIN SMALL LETTER O
u'p' # 0x70 -> LATIN SMALL LETTER P
u'q' # 0x71 -> LATIN SMALL LETTER Q
u'r' # 0x72 -> LATIN SMALL LETTER R
u's' # 0x73 -> LATIN SMALL LETTER S
u't' # 0x74 -> LATIN SMALL LETTER T
u'u' # 0x75 -> LATIN SMALL LETTER U
u'v' # 0x76 -> LATIN SMALL LETTER V
u'w' # 0x77 -> LATIN SMALL LETTER W
u'x' # 0x78 -> LATIN SMALL LETTER X
u'y' # 0x79 -> LATIN SMALL LETTER Y
u'z' # 0x7A -> LATIN SMALL LETTER Z
u'{' # 0x7B -> LEFT CURLY BRACKET
u'|' # 0x7C -> VERTICAL LINE
u'}' # 0x7D -> RIGHT CURLY BRACKET
u'~' # 0x7E -> TILDE
u'\x7f' # 0x7F -> DELETE
u'\u2500' # 0x80 -> BOX DRAWINGS LIGHT HORIZONTAL
u'\u2502' # 0x81 -> BOX DRAWINGS LIGHT VERTICAL
u'\u250c' # 0x82 -> BOX DRAWINGS LIGHT DOWN AND RIGHT
u'\u2510' # 0x83 -> BOX DRAWINGS LIGHT DOWN AND LEFT
u'\u2514' # 0x84 -> BOX DRAWINGS LIGHT UP AND RIGHT
u'\u2518' # 0x85 -> BOX DRAWINGS LIGHT UP AND LEFT
u'\u251c' # 0x86 -> BOX DRAWINGS LIGHT VERTICAL AND RIGHT
u'\u2524' # 0x87 -> BOX DRAWINGS LIGHT VERTICAL AND LEFT
u'\u252c' # 0x88 -> BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
u'\u2534' # 0x89 -> BOX DRAWINGS LIGHT UP AND HORIZONTAL
u'\u253c' # 0x8A -> BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
u'\u2580' # 0x8B -> UPPER HALF BLOCK
u'\u2584' # 0x8C -> LOWER HALF BLOCK
u'\u2588' # 0x8D -> FULL BLOCK
u'\u258c' # 0x8E -> LEFT HALF BLOCK
u'\u2590' # 0x8F -> RIGHT HALF BLOCK
u'\u2591' # 0x90 -> LIGHT SHADE
u'\u2592' # 0x91 -> MEDIUM SHADE
u'\u2593' # 0x92 -> DARK SHADE
u'\u2320' # 0x93 -> TOP HALF INTEGRAL
u'\u25a0' # 0x94 -> BLACK SQUARE
u'\u2219' # 0x95 -> BULLET OPERATOR
u'\u221a' # 0x96 -> SQUARE ROOT
u'\u2248' # 0x97 -> ALMOST EQUAL TO
u'\u2264' # 0x98 -> LESS-THAN OR EQUAL TO
u'\u2265' # 0x99 -> GREATER-THAN OR EQUAL TO
u'\xa0' # 0x9A -> NO-BREAK SPACE
u'\u2321' # 0x9B -> BOTTOM HALF INTEGRAL
u'\xb0' # 0x9C -> DEGREE SIGN
u'\xb2' # 0x9D -> SUPERSCRIPT TWO
u'\xb7' # 0x9E -> MIDDLE DOT
u'\xf7' # 0x9F -> DIVISION SIGN
u'\u2550' # 0xA0 -> BOX DRAWINGS DOUBLE HORIZONTAL
u'\u2551' # 0xA1 -> BOX DRAWINGS DOUBLE VERTICAL
u'\u2552' # 0xA2 -> BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
u'\u0451' # 0xA3 -> CYRILLIC SMALL LETTER IO
u'\u2553' # 0xA4 -> BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
u'\u2554' # 0xA5 -> BOX DRAWINGS DOUBLE DOWN AND RIGHT
u'\u2555' # 0xA6 -> BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
u'\u2556' # 0xA7 -> BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
u'\u2557' # 0xA8 -> BOX DRAWINGS DOUBLE DOWN AND LEFT
u'\u2558' # 0xA9 -> BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
u'\u2559' # 0xAA -> BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
u'\u255a' # 0xAB -> BOX DRAWINGS DOUBLE UP AND RIGHT
u'\u255b' # 0xAC -> BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
u'\u255c' # 0xAD -> BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
u'\u255d' # 0xAE -> BOX DRAWINGS DOUBLE UP AND LEFT
u'\u255e' # 0xAF -> BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
u'\u255f' # 0xB0 -> BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
u'\u2560' # 0xB1 -> BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
u'\u2561' # 0xB2 -> BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
u'\u0401' # 0xB3 -> CYRILLIC CAPITAL LETTER IO
u'\u2562' # 0xB4 -> BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
u'\u2563' # 0xB5 -> BOX DRAWINGS DOUBLE VERTICAL AND LEFT
u'\u2564' # 0xB6 -> BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
u'\u2565' # 0xB7 -> BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
u'\u2566' # 0xB8 -> BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
u'\u2567' # 0xB9 -> BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
u'\u2568' # 0xBA -> BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
u'\u2569' # 0xBB -> BOX DRAWINGS DOUBLE UP AND HORIZONTAL
u'\u256a' # 0xBC -> BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
u'\u256b' # 0xBD -> BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
u'\u256c' # 0xBE -> BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
u'\xa9' # 0xBF -> COPYRIGHT SIGN
u'\u044e' # 0xC0 -> CYRILLIC SMALL LETTER YU
u'\u0430' # 0xC1 -> CYRILLIC SMALL LETTER A
u'\u0431' # 0xC2 -> CYRILLIC SMALL LETTER BE
u'\u0446' # 0xC3 -> CYRILLIC SMALL LETTER TSE
u'\u0434' # 0xC4 -> CYRILLIC SMALL LETTER DE
u'\u0435' # 0xC5 -> CYRILLIC SMALL LETTER IE
u'\u0444' # 0xC6 -> CYRILLIC SMALL LETTER EF
u'\u0433' # 0xC7 -> CYRILLIC SMALL LETTER GHE
u'\u0445' # 0xC8 -> CYRILLIC SMALL LETTER HA
u'\u0438' # 0xC9 -> CYRILLIC SMALL LETTER I
u'\u0439' # 0xCA -> CYRILLIC SMALL LETTER SHORT I
u'\u043a' # 0xCB -> CYRILLIC SMALL LETTER KA
u'\u043b' # 0xCC -> CYRILLIC SMALL LETTER EL
u'\u043c' # 0xCD -> CYRILLIC SMALL LETTER EM
u'\u043d' # 0xCE -> CYRILLIC SMALL LETTER EN
u'\u043e' # 0xCF -> CYRILLIC SMALL LETTER O
u'\u043f' # 0xD0 -> CYRILLIC SMALL LETTER PE
u'\u044f' # 0xD1 -> CYRILLIC SMALL LETTER YA
u'\u0440' # 0xD2 -> CYRILLIC SMALL LETTER ER
u'\u0441' # 0xD3 -> CYRILLIC SMALL LETTER ES
u'\u0442' # 0xD4 -> CYRILLIC SMALL LETTER TE
u'\u0443' # 0xD5 -> CYRILLIC SMALL LETTER U
u'\u0436' # 0xD6 -> CYRILLIC SMALL LETTER ZHE
u'\u0432' # 0xD7 -> CYRILLIC SMALL LETTER VE
u'\u044c' # 0xD8 -> CYRILLIC SMALL LETTER SOFT SIGN
u'\u044b' # 0xD9 -> CYRILLIC SMALL LETTER YERU
u'\u0437' # 0xDA -> CYRILLIC SMALL LETTER ZE
u'\u0448' # 0xDB -> CYRILLIC SMALL LETTER SHA
u'\u044d' # 0xDC -> CYRILLIC SMALL LETTER E
u'\u0449' # 0xDD -> CYRILLIC SMALL LETTER SHCHA
u'\u0447' # 0xDE -> CYRILLIC SMALL LETTER CHE
u'\u044a' # 0xDF -> CYRILLIC SMALL LETTER HARD SIGN
u'\u042e' # 0xE0 -> CYRILLIC CAPITAL LETTER YU
u'\u0410' # 0xE1 -> CYRILLIC CAPITAL LETTER A
u'\u0411' # 0xE2 -> CYRILLIC CAPITAL LETTER BE
u'\u0426' # 0xE3 -> CYRILLIC CAPITAL LETTER TSE
u'\u0414' # 0xE4 -> CYRILLIC CAPITAL LETTER DE
u'\u0415' # 0xE5 -> CYRILLIC CAPITAL LETTER IE
u'\u0424' # 0xE6 -> CYRILLIC CAPITAL LETTER EF
u'\u0413' # 0xE7 -> CYRILLIC CAPITAL LETTER GHE
u'\u0425' # 0xE8 -> CYRILLIC CAPITAL LETTER HA
u'\u0418' # 0xE9 -> CYRILLIC CAPITAL LETTER I
u'\u0419' # 0xEA -> CYRILLIC CAPITAL LETTER SHORT I
u'\u041a' # 0xEB -> CYRILLIC CAPITAL LETTER KA
u'\u041b' # 0xEC -> CYRILLIC CAPITAL LETTER EL
u'\u041c' # 0xED -> CYRILLIC CAPITAL LETTER EM
u'\u041d' # 0xEE -> CYRILLIC CAPITAL LETTER EN
u'\u041e' # 0xEF -> CYRILLIC CAPITAL LETTER O
u'\u041f' # 0xF0 -> CYRILLIC CAPITAL LETTER PE
u'\u042f' # 0xF1 -> CYRILLIC CAPITAL LETTER YA
u'\u0420' # 0xF2 -> CYRILLIC CAPITAL LETTER ER
u'\u0421' # 0xF3 -> CYRILLIC CAPITAL LETTER ES
u'\u0422' # 0xF4 -> CYRILLIC CAPITAL LETTER TE
u'\u0423' # 0xF5 -> CYRILLIC CAPITAL LETTER U
u'\u0416' # 0xF6 -> CYRILLIC CAPITAL LETTER ZHE
u'\u0412' # 0xF7 -> CYRILLIC CAPITAL LETTER VE
u'\u042c' # 0xF8 -> CYRILLIC CAPITAL LETTER SOFT SIGN
u'\u042b' # 0xF9 -> CYRILLIC CAPITAL LETTER YERU
u'\u0417' # 0xFA -> CYRILLIC CAPITAL LETTER ZE
u'\u0428' # 0xFB -> CYRILLIC CAPITAL LETTER SHA
u'\u042d' # 0xFC -> CYRILLIC CAPITAL LETTER E
u'\u0429' # 0xFD -> CYRILLIC CAPITAL LETTER SHCHA
u'\u0427' # 0xFE -> CYRILLIC CAPITAL LETTER CHE
u'\u042a' # 0xFF -> CYRILLIC CAPITAL LETTER HARD SIGN
)
### Encoding table
encoding_table = codecs.charmap_build(decoding_table)
| mit | 2,934,191,392,544,551,000 | 41.588424 | 109 | 0.595092 | false |
LamCiuLoeng/fd | rpac/controllers/pdf.py | 1 | 2302 | # -*- coding: utf-8 -*-
import os
import json
# turbogears imports
from tg import expose, redirect, validate, flash, session, request, config
from tg.decorators import *
# third party imports
from repoze.what import authorize
from repoze.what.predicates import not_anonymous, in_group, has_permission
from sqlalchemy.sql.expression import and_
# project specific imports
from rpac.lib.base import BaseController
from rpac.model import *
from rpac.util.common import *
from rpac.util.layout_pdf import gen_pdf
__all__ = ['PdfController', 'PdfLayoutController']
class PdfController( BaseController ):
# Uncomment this line if your controller requires an authenticated user
# allow_only = authorize.in_group( 'Admin' )
allow_only = authorize.not_anonymous()
@expose()
def index(self, **kw):
header = None
# details = None
hid = kw.get('id', None)
if hid:
header = qry( OrderHeader ).filter( and_( OrderHeader.active == 0 , OrderHeader.id == hid ) ).first()
if header and header.dtls:
details = [(d.id, d.itemCode) for d in header.dtls]
# print details
pdf_zip_file = gen_pdf(header.no, details)
return serveFile(unicode(pdf_zip_file))
class PdfLayoutController( BaseController ):
# Uncomment this line if your controller requires an authenticated user
# allow_only = authorize.in_group( 'Admin' )
@expose('rpac.templates.pdf.index')
def index(self, **kw):
detail = None
data = None
detail_id = kw.get('id', None)
if detail_id:
detail = qry( OrderDetail ).filter( and_( OrderDetail.active == 0 , OrderDetail.id == detail_id ) ).first()
if detail:
item_code = detail.itemCode
template_dir = config.get('pdf_template_dir')
pdf_template = os.path.join(template_dir, '%s.mak' % item_code)
# print item_code, pdf_template
if os.path.exists(pdf_template):
# print os.path.exists(pdf_template)
data = detail.layoutValue
data = json.loads(data) if data else None
override_template(self.index, 'mako:rpac.templates.pdf.%s' % item_code)
# print data
return dict(data=data)
| mit | -5,420,445,439,386,341,000 | 31.422535 | 119 | 0.632493 | false |
appleseedhq/cortex | test/IECore/TransformationMatrixData.py | 2 | 12802 | ##########################################################################
#
# Copyright (c) 2007-2011, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
"""Unit test for TransformationMatrix and TransformationMatrixData bindings"""
import os, os.path
import math
import unittest
import imath
import IECore
import random
class TransformationMatrixfTest(unittest.TestCase):
def testConstructors(self):
"""Test TransformationMatrixf constructors"""
a = IECore.TransformationMatrixf()
self.assertEqual( a.transform, imath.M44f() )
a = IECore.TransformationMatrixf( imath.V3f( 2, 2, 2 ), imath.Eulerf(), imath.V3f( 1, 0, 0 ) )
self.assert_( a.transform.equalWithAbsError( imath.M44f().scale( imath.V3f(2,2,2) ) * imath.M44f().translate( imath.V3f(1,0,0) ), 0.01) )
b = IECore.TransformationMatrixf( a )
self.assertEqual( a.transform, b.transform )
def testAttributes(self):
"""Test TransformationMatrixf attributes"""
a = IECore.TransformationMatrixf()
self.assertEqual( a.scalePivot, imath.V3f( 0, 0, 0 ) )
self.assertEqual( a.scale, imath.V3f( 1, 1, 1 ) )
self.assertEqual( a.shear, imath.V3f( 0, 0, 0 ) )
self.assertEqual( a.scalePivotTranslation, imath.V3f( 0, 0, 0 ) )
self.assertEqual( a.rotatePivot, imath.V3f( 0, 0, 0 ) )
self.assertEqual( a.rotationOrientation, imath.Quatf() )
self.assertEqual( a.rotate, imath.Eulerf() )
self.assertEqual( a.rotatePivotTranslation, imath.V3f( 0, 0, 0 ) )
self.assertEqual( a.translate, imath.V3f( 0, 0, 0 ) )
try:
a.transform = 1
except:
pass
else:
raise Exception, "Should not be able to set transform."
def testTransform(self):
"""Test TransformationMatrixf transform"""
a = IECore.TransformationMatrixf()
a.scale = imath.V3f( 2, 2, 2 )
self.assertEqual( a.transform, imath.M44f().scale( imath.V3f( 2, 2, 2 ) ) )
a.rotate = imath.Eulerf( 0.2, 0.2, 0.2 )
self.assert_( a.transform.equalWithAbsError( imath.M44f().scale( imath.V3f( 2, 2, 2 ) ) * imath.Eulerf( 0.2, 0.2, 0.2 ).toMatrix44(), 0.01 ) )
def testComparison(self):
"""Test TransformationMatrixf comparison"""
a = IECore.TransformationMatrixf()
b = IECore.TransformationMatrixf()
self.assertEqual( a, b )
b.scalePivot = imath.V3f( 0.00001, 0, 0 )
self.assertNotEqual( a, b )
a.scalePivot = imath.V3f( 0.00001, 0, 0 )
self.assertEqual( a, b )
class TransformationMatrixdTest(unittest.TestCase):
def testConstructors(self):
"""Test TransformationMatrixd constructors"""
a = IECore.TransformationMatrixd()
self.assertEqual( a.transform, imath.M44d() )
a = IECore.TransformationMatrixd( imath.V3d( 2, 2, 2 ), imath.Eulerd(), imath.V3d( 1, 0, 0 ) )
self.assert_( a.transform.equalWithAbsError( imath.M44d().scale( imath.V3d(2,2,2) ) * imath.M44d().translate( imath.V3d(1,0,0) ), 0.01 ) )
b = IECore.TransformationMatrixd( a )
self.assertEqual( a.transform, b.transform )
def testAttributes(self):
"""Test TransformationMatrixd attributes"""
a = IECore.TransformationMatrixd()
self.assertEqual( a.scalePivot, imath.V3d( 0, 0, 0 ) )
self.assertEqual( a.scale, imath.V3d( 1, 1, 1 ) )
self.assertEqual( a.shear, imath.V3d( 0, 0, 0 ) )
self.assertEqual( a.scalePivotTranslation, imath.V3d( 0, 0, 0 ) )
self.assertEqual( a.rotatePivot, imath.V3d( 0, 0, 0 ) )
self.assertEqual( a.rotationOrientation, imath.Quatd() )
self.assertEqual( a.rotate, imath.Eulerd() )
self.assertEqual( a.rotatePivotTranslation, imath.V3d( 0, 0, 0 ) )
self.assertEqual( a.translate, imath.V3d( 0, 0, 0 ) )
try:
a.transform = 1
except:
pass
else:
raise Exception, "Should not be able to set transform."
def testTransform(self):
"""Test TransformationMatrixd transform"""
a = IECore.TransformationMatrixd()
a.scale = imath.V3d( 2, 2, 2 )
self.assertEqual( a.transform, imath.M44d().scale( imath.V3d( 2, 2, 2 ) ) )
a.rotate = imath.Eulerd( 0.2, 0.2, 0.2 )
self.assert_( a.transform.equalWithAbsError( imath.M44d().scale( imath.V3d( 2, 2, 2 ) ) * imath.Eulerd( 0.2, 0.2, 0.2 ).toMatrix44(), 0.01 ) )
def testComparison(self):
"""Test TransformationMatrixd comparison"""
a = IECore.TransformationMatrixd()
b = IECore.TransformationMatrixd()
self.assertEqual( a, b )
b.scalePivot = imath.V3d( 0.00001, 0, 0 )
self.assertNotEqual( a, b )
a.scalePivot = imath.V3d( 0.00001, 0, 0 )
self.assertEqual( a, b )
class TransformationMatrixDatafTest(unittest.TestCase):
testFile = "test/transform.fio"
def testConstructors(self):
"""Test TransformationMatrixfData constructors"""
a = IECore.TransformationMatrixfData()
self.assertEqual( a.value, IECore.TransformationMatrixf() )
a = IECore.TransformationMatrixfData( IECore.TransformationMatrixf( imath.V3f( 2, 2, 2 ), imath.Eulerf(), imath.V3f( 1, 0, 0 ) ) )
self.assertEqual( a.value.scale, imath.V3f( 2, 2, 2 ) )
def testCopy(self):
"""Test TransformationMatrixfData copy"""
a = IECore.TransformationMatrixfData( IECore.TransformationMatrixf( imath.V3f( 2, 2, 2 ), imath.Eulerf(), imath.V3f( 1, 0, 0 ) ) )
self.assertEqual( a.value.scale, imath.V3f( 2, 2, 2 ) )
b = a.copy()
a.value = IECore.TransformationMatrixf()
self.assertEqual( b.value.scale, imath.V3f( 2, 2, 2 ) )
self.assertEqual( a.value.scale, imath.V3f( 1, 1, 1 ) )
def testIO(self):
"""Test TransformationMatrixfData IO"""
a = IECore.TransformationMatrixfData( IECore.TransformationMatrixf( imath.V3f(2,3,4), imath.Eulerf(), imath.V3f(1,2,3) ) )
w = IECore.ObjectWriter( a, self.testFile )
w.write()
r = IECore.ObjectReader( self.testFile )
b = r.read()
self.assertEqual( a, b )
def testInterpolation(self):
"""Test TranformationMatrixfData interpolation"""
a = IECore.TransformationMatrixfData()
b = IECore.TransformationMatrixfData( IECore.TransformationMatrixf( imath.V3f(2,3,4), imath.Eulerf(), imath.V3f(1,2,3) ) )
c = IECore.linearObjectInterpolation( a, b, 0.5 )
self.assertEqual( type(c), IECore.TransformationMatrixfData )
self.assert_( c.value.scale.equalWithAbsError( imath.V3f( 1.5, 2, 2.5 ), 0.01 ) )
self.assert_( c.value.translate.equalWithAbsError( imath.V3f( 0.5, 1, 1.5 ), 0.01 ) )
def testComparison(self):
"""Test TransformationMatrixfData comparison"""
a = IECore.TransformationMatrixfData()
b = IECore.TransformationMatrixfData()
self.assertEqual( a, b )
b.value = IECore.TransformationMatrixf( imath.V3f( 0.00001, 0, 0 ), imath.Eulerf(), imath.V3f(0,0,0) )
self.assertNotEqual( a, b )
a.value = IECore.TransformationMatrixf( imath.V3f( 0.00001, 0, 0 ), imath.Eulerf(), imath.V3f(0,0,0) )
self.assertEqual( a, b )
def testHash( self ) :
def modifyAndTest( data, field, index ) :
h = data.hash()
v = data.value
f = getattr( v, field )
if isinstance( f, imath.Quatf ) :
# imath omits the indexing operator
# from its bindings, so we have to do
# it all manually
if index == 0 :
f.setR( f.r() + 1 )
else :
fv = f.v()
fv[index-1] += 1
f.setV( fv )
else :
f[index] += 1
setattr( v, field, f )
data.value = v
self.assertNotEqual( data.hash(), h )
d = IECore.TransformationMatrixfData()
modifyAndTest( d, "scalePivot", 0 )
modifyAndTest( d, "scalePivot", 1 )
modifyAndTest( d, "scalePivot", 2 )
modifyAndTest( d, "scale", 0 )
modifyAndTest( d, "scale", 1 )
modifyAndTest( d, "scale", 2 )
modifyAndTest( d, "shear", 0 )
modifyAndTest( d, "shear", 1 )
modifyAndTest( d, "shear", 2 )
modifyAndTest( d, "scalePivotTranslation", 0 )
modifyAndTest( d, "scalePivotTranslation", 1 )
modifyAndTest( d, "scalePivotTranslation", 2 )
modifyAndTest( d, "rotatePivot", 0 )
modifyAndTest( d, "rotatePivot", 1 )
modifyAndTest( d, "rotatePivot", 2 )
modifyAndTest( d, "rotationOrientation", 0 )
modifyAndTest( d, "rotationOrientation", 1 )
modifyAndTest( d, "rotationOrientation", 2 )
modifyAndTest( d, "rotationOrientation", 3 )
modifyAndTest( d, "rotate", 0 )
modifyAndTest( d, "rotate", 1 )
modifyAndTest( d, "rotate", 2 )
modifyAndTest( d, "rotatePivotTranslation", 0 )
modifyAndTest( d, "rotatePivotTranslation", 1 )
modifyAndTest( d, "rotatePivotTranslation", 2 )
modifyAndTest( d, "translate", 0 )
modifyAndTest( d, "translate", 1 )
modifyAndTest( d, "translate", 2 )
h = d.hash()
v = d.value
r = v.rotate
r.setOrder( imath.Eulerf.Order.ZYX )
v.rotate = r
d.value = v
self.assertNotEqual( d.hash(), h )
def tearDown(self):
if os.path.exists( self.testFile ):
os.remove( self.testFile )
class TransformationMatrixDatadTest(unittest.TestCase):
testFile = "test/transform.fio"
def testConstructors(self):
"""Test TransformationMatrixdData constructors"""
a = IECore.TransformationMatrixdData()
self.assertEqual( a.value, IECore.TransformationMatrixd() )
a = IECore.TransformationMatrixdData( IECore.TransformationMatrixd( imath.V3d( 2, 2, 2 ), imath.Eulerd(), imath.V3d( 1, 0, 0 ) ) )
self.assertEqual( a.value.scale, imath.V3d( 2, 2, 2 ) )
def testCopy(self):
"""Test TransformationMatrixdData copy"""
a = IECore.TransformationMatrixdData( IECore.TransformationMatrixd( imath.V3d( 2, 2, 2 ), imath.Eulerd(), imath.V3d( 1, 0, 0 ) ) )
self.assertEqual( a.value.scale, imath.V3d( 2, 2, 2 ) )
b = a.copy()
a.value = IECore.TransformationMatrixd()
self.assertEqual( b.value.scale, imath.V3d( 2, 2, 2 ) )
self.assertEqual( a.value.scale, imath.V3d( 1, 1, 1 ) )
def testIO(self):
"""Test TransformationMatrixdData IO"""
a = IECore.TransformationMatrixdData( IECore.TransformationMatrixd( imath.V3d(2,3,4), imath.Eulerd(), imath.V3d(1,2,3) ) )
w = IECore.ObjectWriter( a, self.testFile )
w.write()
r = IECore.ObjectReader( self.testFile )
b = r.read()
self.assertEqual( a, b )
def testInterpolation(self):
"""Test TranformationMatrixdData interpolation"""
a = IECore.TransformationMatrixdData()
b = IECore.TransformationMatrixdData( IECore.TransformationMatrixd( imath.V3d(2,3,4), imath.Eulerd(), imath.V3d(1,2,3) ) )
c = IECore.linearObjectInterpolation( a, b, 0.5 )
self.assertEqual( type(c), IECore.TransformationMatrixdData )
self.assert_( c.value.scale.equalWithAbsError( imath.V3d( 1.5, 2, 2.5 ), 0.01 ) )
self.assert_( c.value.translate.equalWithAbsError( imath.V3d( 0.5, 1, 1.5 ), 0.01 ) )
# try rotation interpolation...
d = IECore.TransformationMatrixdData( IECore.TransformationMatrixd( imath.V3d(2,3,4), imath.Eulerd( 1., 2., 3. ), imath.V3d(1,2,3) ) )
e = IECore.linearObjectInterpolation( b, d, 0.2 )
self.assert_( e.value.rotate.equalWithAbsError( imath.V3d( -0.341406, 0.189475, 0.191253 ), 0.001 ) )
def testComparison(self):
"""Test TransformationMatrixdData comparison"""
a = IECore.TransformationMatrixdData()
b = IECore.TransformationMatrixdData()
self.assertEqual( a, b )
b.value = IECore.TransformationMatrixd( imath.V3d( 0.00001, 0, 0 ), imath.Eulerd(), imath.V3d(0,0,0) )
self.assertNotEqual( a, b )
a.value = IECore.TransformationMatrixd( imath.V3d( 0.00001, 0, 0 ), imath.Eulerd(), imath.V3d(0,0,0) )
self.assertEqual( a, b )
def tearDown(self):
if os.path.exists( self.testFile ):
os.remove( self.testFile )
if __name__ == "__main__":
unittest.main()
| bsd-3-clause | 4,432,858,389,025,517,000 | 38.390769 | 144 | 0.687705 | false |
Distrotech/mailutils | python/mailutils/__init__.py | 1 | 1932 | # GNU Mailutils -- a suite of utilities for electronic mail
# Copyright (C) 2009-2012 Free Software Foundation, Inc.
#
# GNU Mailutils is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Mailutils is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Mailutils. If not, see <http://www.gnu.org/licenses/>.
"""
A Python interface to Mailutils framework.
GNU Mailutils is a rich and powerful protocol-independent mail
framework. It contains a series of useful mail libraries, clients,
and servers. These are the primary mail utilities for the GNU system.
The central library is capable of handling electronic mail in various
mailbox formats and protocols, both local and remote. Specifically,
this project contains a POP3 server, an IMAP4 server, and a Sieve mail
filter. It also provides a POSIX `mailx' client, and a collection of
other handy tools.
This software is part of the GNU Project and belongs to the Free
Software Foundation. All libraries are licensed using the GNU LGPL.
The documentation is licensed under the GNU FDL, and everything else
is licensed using the GNU GPL.
See http://www.gnu.org/software/mailutils/ for more information about
GNU Mailutils.
"""
__all__ = (
"error",
"address",
"attribute",
"auth",
"body",
"envelope",
"filter",
"folder",
"header",
"mailer",
"mailbox",
"mailcap",
"message",
"mime",
"nls",
"registrar",
"secret",
"sieve",
"stream",
"url",
"util",
)
| gpl-3.0 | 8,856,906,246,606,288,000 | 31.2 | 74 | 0.717391 | false |
anyonedev/anyonedev-monitor-agent | monitor/metrics/log/Nginx.py | 1 | 2320 | '''
Created on 2014-11-18
@author: hongye
'''
import re
import time
from metrics.log.AgentParser import detect
class NginxAccessLogLineParser(object):
ipP = r"?P<ip>[\d.]*"
timeP = r"""?P<time>\[[^\[\]]*\]"""
requestP = r"""?P<request>\"[^\"]*\""""
statusP = r"?P<status>\d+"
bodyBytesSentP = r"?P<bodyByteSent>\d+"
referP = r"""?P<refer>\"[^\"]*\""""
userAgentP = r"""?P<userAgent>\"[^\"]*\""""
userOperatorSystems = re.compile(r'\([^\(\)]*\)')
userBrowers = re.compile(r'[^\)]*\"')
nginxLogPattern = re.compile(r"(%s)\ -\ -\ (%s)\ (%s)\ (%s)\ (%s)\ (%s)\ (%s)" % (ipP, timeP, requestP, statusP, bodyBytesSentP, referP, userAgentP), re.VERBOSE)
def parse(self, line):
matchs = self.nginxLogPattern.match(line)
if matchs != None:
values = dict()
groups = matchs.groups()
values["ip"] = groups[0]
values["request"] = groups[2]
values["status"] = groups[3]
values["body_bytes_sent"] = groups[4]
values["refer"] = groups[5]
userAgent = groups[6]
values["user_agent"] = userAgent
t = groups[1]
if t != None:
values["time"] = int(time.mktime(time.strptime(t, '[%d/%b/%Y:%H:%M:%S %z]')))
if len(userAgent) > 20:
agent = detect(userAgent)
os = agent.get("os")
if os != None:
values["os_name"] = os.get("name")
values["os_version"] = os.get("version")
if agent.get("bot") != None:
values["is_bot"] = agent.get("bot")
browser = agent.get("browser")
if browser != None:
values["browser_name"] =browser.get("name")
values["browser_version"] = browser.get("version")
platform = agent.get("platform")
if platform != None:
values["platform_name"] = platform.get("name")
values["platform_version"] = platform.get("version")
return values
return None
def nginx_access_log_parser():
return NginxAccessLogLineParser()
| gpl-2.0 | 4,690,549,868,023,794,000 | 34.151515 | 166 | 0.471983 | false |
jcollado/pic2map | tests/test_cli.py | 1 | 6128 | # -*- coding: utf-8 -*-
"""Command Line Interface test cases."""
import argparse
import logging
import os
import tempfile
import unittest
from StringIO import StringIO
from mock import (
MagicMock as Mock,
patch,
)
from pic2map.cli import (
add,
count,
main,
parse_arguments,
remove,
serve,
valid_directory,
)
class MainTests(unittest.TestCase):
"""Main function test cases."""
def setUp(self):
"""Patch parse_arguments function."""
self.parse_arguments_patcher = patch('pic2map.cli.parse_arguments')
self.parse_arguments = self.parse_arguments_patcher.start()
self.logging_patcher = patch('pic2map.cli.logging')
self.logging_patcher.start()
def test_func_called(self):
"""Command function is called."""
argv = Mock()
function = Mock()
args = argparse.Namespace(
log_level=logging.WARNING,
func=function,
)
self.parse_arguments.return_value = args
main(argv)
function.assert_called_once_with(args)
def tearDown(self):
"""Undo the patching."""
self.parse_arguments_patcher.stop()
self.logging_patcher.stop()
class CommandFunctionTests(unittest.TestCase):
"""Command function test cases."""
def setUp(self):
"""Patch dependencies."""
self.tree_explorer_patcher = patch('pic2map.cli.TreeExplorer')
self.tree_explorer_cls = self.tree_explorer_patcher.start()
self.filter_gps_metadata_patcher = (
patch('pic2map.cli.filter_gps_metadata'))
self.filter_gps_metadata = self.filter_gps_metadata_patcher.start()
self.transform_metadata_to_row_patcher = (
patch('pic2map.cli.transform_metadata_to_row'))
self.transform_metadata_to_row = (
self.transform_metadata_to_row_patcher.start())
self.location_db_patcher = patch('pic2map.cli.LocationDB')
self.location_cls = self.location_db_patcher.start()
def tearDown(self):
"""Undo the patching."""
self.tree_explorer_patcher.stop()
self.filter_gps_metadata_patcher.stop()
self.transform_metadata_to_row_patcher.stop()
self.location_db_patcher.stop()
def test_add(self):
"""Add command function."""
tree_explorer = self.tree_explorer_cls()
paths = Mock()
tree_explorer.paths.return_value = paths
metadata_record = Mock()
metadata_records = [metadata_record]
self.filter_gps_metadata.return_value = metadata_records
row = Mock()
self.transform_metadata_to_row.return_value = row
database = self.location_cls().__enter__()
directory = 'some directory'
args = argparse.Namespace(directory=directory)
add(args)
self.tree_explorer_cls.assert_called_with(directory)
self.filter_gps_metadata.assert_called_once_with(paths)
self.transform_metadata_to_row.assert_called_once_with(metadata_record)
database.insert.assert_called_with([row])
def test_remove(self):
"""Remove command function."""
directory = 'some directory'
args = argparse.Namespace(directory=directory)
remove(args)
database = self.location_cls().__enter__()
database.delete.assert_called_once_with(directory)
def test_count(self):
"""Count command function."""
file_count = 10
database = self.location_cls().__enter__()
database.count.return_value = file_count
args = argparse.Namespace()
with patch('sys.stdout', new_callable=StringIO) as stdout:
count(args)
self.assertEqual(stdout.getvalue(), '{}\n'.format(file_count))
def test_serve(self):
"""Serve command function."""
args = argparse.Namespace()
with patch('pic2map.cli.app') as app:
serve(args)
app.run.assert_called_once_with(debug=True)
class ValidDirectoryTest(unittest.TestCase):
"""Valid directory test cases."""
def test_valid_directory(self):
"""Valid directory path."""
temp_directory = tempfile.mkdtemp()
try:
self.assertTrue(
valid_directory(temp_directory),
temp_directory,
)
finally:
os.rmdir(temp_directory)
def test_invalid_directory(self):
"""Invalid directory."""
with tempfile.NamedTemporaryFile() as temp_file:
with self.assertRaises(argparse.ArgumentTypeError):
valid_directory(temp_file.name)
def test_unreadable_directory(self):
"""Unreadable diretory."""
temp_directory = tempfile.mkdtemp()
try:
os.chmod(temp_directory, 0)
with self.assertRaises(argparse.ArgumentTypeError):
valid_directory(temp_directory)
finally:
os.rmdir(temp_directory)
class ParseArgumentsTest(unittest.TestCase):
"""Parse arguments test case."""
def test_add_command(self):
"""Add command."""
directory = 'some directory'
with patch('pic2map.cli.valid_directory') as valid_directory_func:
valid_directory_func.return_value = directory
args = parse_arguments(['add', directory])
self.assertEqual(args.directory, directory)
self.assertEqual(args.func, add)
def test_remove(self):
"""Remove command."""
directory = 'some directory'
with patch('pic2map.cli.valid_directory') as valid_directory_func:
valid_directory_func.return_value = directory
args = parse_arguments(['remove', directory])
self.assertEqual(args.directory, directory)
self.assertEqual(args.func, remove)
def test_count(self):
"""Count command."""
args = parse_arguments(['count'])
self.assertEqual(args.func, count)
def test_serve_command(self):
"""Serve command."""
args = parse_arguments(['serve'])
self.assertEqual(args.func, serve)
| mit | -5,578,770,176,831,160,000 | 29.79397 | 79 | 0.615862 | false |
endlessm/chromium-browser | native_client/tools/scons_to_gn.py | 2 | 2130 | #!/usr/bin/python
# Copyright (c) 2014 The Native Client Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import print_function
import fnmatch
import optparse
import os
import sys
import scons_to_gn
TOOLS_DIR = os.path.dirname(__file__)
"""Convert from Scons to GN.
Takes a set of SCONS input files and generates a output file per input, as
well as a global alias file.
"""
def RunTests():
test_path = os.path.abspath(os.path.join(TOOLS_DIR, 'scons_to_gn'))
fail_count = 0
for filename in os.listdir(test_path):
if fnmatch.fnmatch(filename, '*_test.py'):
print('Testing: ' + filename)
filepath = os.path.join(test_path, filename)
val = os.system(sys.executable + ' ' + filepath)
if val:
print('FAILED: ' + filename)
fail_count += 1
print('\n')
def main(argv):
usage = 'usage: %prog [options] <scons files> ...'
parser = optparse.OptionParser(usage)
parser.add_option('-v', '--verbose', dest='verbose',
action='store_true', default=False)
parser.add_option('-t', '--test', dest='test',
action='store_true', default=False)
parser.add_option('-w', '--write', dest='write',
action='store_true', default=False)
options, args = parser.parse_args(argv)
if options.test:
RunTests()
return 0
if len(args) == 0:
parser.error('Expecting list of sources.')
trusted = scons_to_gn.TrustedConditions()
untrusted = scons_to_gn.UntrustedConditions()
for name in args:
if name.endswith('nacl.scons'):
tracker = scons_to_gn.ObjectTracker(name, untrusted)
elif name.endswith('build.scons'):
tracker = scons_to_gn.ObjectTracker(name, trusted)
else:
parser.error('Expecting build.scons and nacl.scons sources.')
if options.write:
outname = os.path.join(os.path.dirname(name), 'BUILD.gn')
with open(outname, 'wt') as outfile:
tracker.Dump(outfile)
else:
tracker.Dump(sys.stdout)
return 0
if __name__ == '__main__':
retval = main(sys.argv[1:])
sys.exit(retval)
| bsd-3-clause | -4,111,002,075,645,395,500 | 26.662338 | 74 | 0.655399 | false |
USF-COT/trdi_adcp_readers | trdi_adcp_readers/readers.py | 1 | 28613 | import numpy as np
import dask.array as darr
from dask import compute, delayed
from dask.bag import from_delayed, from_sequence
from pandas import Timedelta
from xarray import Variable, IndexVariable, DataArray, Dataset
from trdi_adcp_readers.pd0.pd0_parser_sentinelV import (ChecksumError,
ReadChecksumError,
ReadHeaderError)
from trdi_adcp_readers.pd15.pd0_converters import (
PD15_file_to_PD0,
PD15_string_to_PD0
)
from trdi_adcp_readers.pd0.pd0_parser import parse_pd0_bytearray
from trdi_adcp_readers.pd0.pd0_parser_sentinelV import parse_sentinelVpd0_bytearray
from IPython import embed
def read_PD15_file(path, header_lines=0, return_pd0=False):
pd0_bytes = PD15_file_to_PD0(path, header_lines)
data = parse_pd0_bytearray(pd0_bytes)
if return_pd0:
return data, pd0_bytes
else:
return data
def read_PD15_hex(hex_string, return_pd0=False):
pd15_byte_string = hex_string.decode("hex")
pd0_bytes = PD15_string_to_PD0(pd15_byte_string)
data = parse_pd0_bytearray(pd0_bytes)
if return_pd0:
return data, pd0_bytes
else:
return data
def read_PD15_string(string, return_pd0=False):
pd0_bytes = PD15_string_to_PD0(string)
data = parse_pd0_bytearray(pd0_bytes)
if return_pd0:
return data, pd0_bytes
else:
return data
def _alloc_timestamp(item):
if type(item)==dict:
return item['timestamp']
else:
return item # NaNs marking bad ensembles.
def _alloc_timestamp_parts(part): # Each partition is an array of dicts.
return np.array([ens['timestamp'] for ens in part if type(ens)==dict]) # Exclude NaNs marking bad ensembles.
@delayed
def _addtarr(t, dt):
return darr.array([tn + dt for tn in t])
def _alloc_hpr(ensblk, group, varname):
phisc = 0.01 # Scale heading, pitch and roll by 0.01. Sentinel V manual, p. 259.
return darr.array([ensarr[group][varname]*phisc for ensarr in ensblk
if type(ensarr)==dict])
def _alloc_beam5(ensblk, group):
return np.array([ensarr[group]['data'] for ensarr in ensblk
if type(ensarr)==dict])
# def _alloc_beam5vel(ensblk):
# arrs = darr.array([])
# for ensarr in ensblk:
# if type(ensarr)==dict:
# arr = darr.from_array(np.array(ensarr['velocity_beam5']['data']), chunks=1)
# arrs = darr.concatenate((arrs, arr), axis=1)
# else:
# continue
return arrs
# def alloc_2dvars(ensarr):
# vjanus = ensarr['velocity_janus']['data']
# b1[:, n] = vjanus[:, 0]
# b2[:, n] = vjanus[:, 1]
# b3[:, n] = vjanus[:, 2]
# b4[:, n] = vjanus[:, 3]
# # b5[:, n] = ensarr['velocity_beam5']['data'].squeeze()
# # corjanus = ensarr['correlation_janus']['data']
# # cor1[:, n] = corjanus[:, 0]
# # cor2[:, n] = corjanus[:, 1]
# # cor3[:, n] = corjanus[:, 2]
# # cor4[:, n] = corjanus[:, 3]
# # cor5[:, n] = ensarr['correlation_beam5']['data'].squeeze()
# # intjanus = ensarr['echo_intensity_janus']['data']
# # int1[:, n] = intjanus[:, 0]
# # int2[:, n] = intjanus[:, 1]
# # int3[:, n] = intjanus[:, 2]
# # int4[:, n] = intjanus[:, 3]
# # int5[:, n] = ensarr['echo_intensity_beam5']['data'].squeeze()
def _bag2DataArray(bg, chunks, **kwargs):
return DataArray(darr.from_array(np.array(wrk.compute()), chunks=chunks)
**kwargs)
def ensembles2dataset_dask(ensdict, ncfpath, dsattrs={}, chunks=10,
verbose=True, print_every=1000):
"""
Convert a dictionary of ensembles into an xarray Dataset object
using dask.delayed to keep memory usage feasible.
"""
mms2ms = 1e-3
n=0
# fbadens = np.array(ensdict_aux)==None
# nt = len(ensdict) - np.sum(fbadens)
# embed()
ensdict0 = None
while ensdict0 is None:
ensdict0 = ensdict[n].compute()
n+=1
nz = ensdict0['fixed_leader_janus']['number_of_cells']
fixj = ensdict0['fixed_leader_janus'].compute()
fix5 = ensdict0['fixed_leader_beam5'].compute()
# Add ping offset to get beam 5's timestamps.
dt5 = fix5['ping_offset_time'] # In milliseconds.
dt5 = np.array(Timedelta(dt5, unit='ms'))
th = fixj['beam_angle']
assert th==25 # Always 25 degrees.
th = th*np.pi/180.
Cth = np.cos(th)
# Construct along-beam/vertical axes.
cm2m = 1e-2
r1janus = fixj['bin_1_distance']*cm2m
r1b5 = fix5['bin_1_distance']*cm2m
ncj = fixj['number_of_cells']
nc5 = fix5['number_of_cells']
lcj = fixj['depth_cell_length']*cm2m
lc5 = fix5['depth_cell_length']*cm2m
Lj = ncj*lcj # Distance from center of bin 1 to the center of last bin (Janus).
L5 = nc5*lc5 # Distance from center of bin 1 to the center of last bin (beam 5).
rb = r1janus + np.arange(0, Lj, lcj) # Distance from xducer head
# (Janus).
zab = Cth*rb # Vertical distance from xducer head
# (Janus).
zab5 = r1b5 + np.arange(0, L5, lc5) # Distance from xducer head, also
# depth for the vertical beam.
rb = IndexVariable('z', rb, attrs={'units':'meters', 'long_name':"along-beam distance from the xducer's face to the center of the bins, for beams 1-4 (Janus)"})
zab = IndexVariable('z', zab, attrs={'units':'meters', 'long_name':"vertical distance from the instrument's head to the center of the bins, for beams 1-4 (Janus)"})
zab5 = IndexVariable('z5', zab5, attrs={'units':'meters', 'long_name':"vertical distance from xducer face to the center of the bins, for beam 5 (vertical)"})
ensdict = from_sequence(ensdict)
tjanus = ensdict.map_partitions(_alloc_timestamp_parts)
t5 = _addtarr(tjanus, dt5)
if verbose: print("Unpacking timestamps.")
time = IndexVariable('time', tjanus.compute(), attrs={'long_name':'timestamps for beams 1-4 (Janus)'})
time5 = IndexVariable('time5', t5.compute(), attrs={'long_name':'timestamps for beam 5 (vertical)'})
if verbose: print("Done unpacking timestamps.")
coords0 = dict(time=time)
coords = dict(z=zab, time=time, rb=rb)
coords5 = dict(z5=zab5, time5=time5)
dims = ['z', 'time']
dims5 = ['z5', 'time5']
dims0 = ['time']
coordsdict = coords0
if verbose: print("Allocating heading, pitch, roll.")
kwda = dict(coords=coordsdict, dims=dims0, attrs=dict(units=unit, long_name=lname))
svars = ['heading', 'pitch', 'roll']
long_names = svars
units = ['degrees']*3
grp = 'variable_leader_janus'
vars1d = dict()
for vname,lname,unit in zip(svars,long_names,units):
if verbose: print(vname)
wrk = ensdict.map_partitions(_alloc_hpr, grp, vname)
# wrk = darr.from_array(np.array(wrk.compute()), chunks=chunks)
wrk2 = delayed(_bag2DataArray)(wrk, chunks)(**kwda)
vars1d.update({vname:wrk2})
del(wrk, wrk2)
ds2hpr = Dataset(data_vars=vars1d, coords=coordsdict)
ds2hpr = ds2hpr.to_netcdf(ncfpath, compute=False, mode='w')
if verbose: print("Saving heading, pitch, roll.")
ds2hpr.compute()
if verbose: print("Done saving heading, pitch, roll.")
del(ds2hpr)
coordsdict = coords5
# Load beam 5 variables into memory to
# be able to put them in a chunked DataArray.
if verbose: print("Allocating beam 5 variables.")
grps = ['velocity_beam5', 'correlation_beam5', 'echo_intensity_beam5']
long_names = ['Beam 5 velocity', 'Beam 5 correlation', 'Beam 5 echo amplitude']
units = ['mm/s, positive toward xducer face', 'unitless', 'dB']
vars5 = dict()
for grp,lname,unit in zip(grps,long_names,units):
if verbose: print(grp)
wrk = ensdict.map_partitions(_alloc_beam5, grp)
wrk = darr.from_array(np.array(wrk.compute()).T, chunks=(1, chunks))
wrk = DataArray(wrk, coords=coordsdict, dims=dims5, attrs=dict(units=unit, long_name=lname))
vars5.update({grp:wrk})
del(wrk)
ds5 = Dataset(data_vars=vars5, coords=coordsdict)
ds5 = ds5.to_netcdf(ncfpath, compute=False, mode='a')
if verbose: print("Saving beam 5 variables.")
ds5.compute()
if verbose: print("Done saving beam 5 variables.")
del(ds5)
embed()
coordsdict = coords
# Load beams 1-4 variables into memory to
# be able to put them in a chunked DataArray.
if verbose: print("Allocating Janus variables.")
grps = ['velocity_janus', 'correlation_janus', 'echo_intensity_janus']
long_names = ['Janus velocity', 'Janus correlation', 'Janus echo amplitude']
units = ['mm/s, positive toward xducer face', 'unitless', 'dB']
vars5 = dict()
for grp,lname,unit in zip(grps,long_names,units):
if verbose: print(grp)
wrk = ensdict.map_partitions(_alloc_janus, grp)
wrk = darr.from_array(np.array(wrk.compute()).T, chunks=(1, chunks))
wrk = DataArray(wrk, coords=coordsdict, dims=dims5, attrs=dict(units=unit, long_name=lname))
vars5.update({grp:wrk})
del(wrk)
dsj = Dataset(data_vars=varsj, coords=coordsdict)
dsj = dsj.to_netcdf(ncfpath, compute=False, mode='a')
if verbose: print("Saving Janus variables.")
dsj.compute()
if verbose: print("Done saving Janus variables.")
del(dsj)
long_names = ('Beam 1 velocity', 'Beam 2 velocity',
'Beam 3 velocity', 'Beam 4 velocity',
'Beam 5 velocity',
'Beam 1 correlation', 'Beam 2 correlation',
'Beam 3 correlation', 'Beam 4 correlation',
'Beam 5 correlation',
'Beam 1 echo amplitude', 'Beam 2 echo amplitude',
'Beam 3 echo amplitude', 'Beam 4 echo amplitude',
'Beam 5 echo amplitude',
'heading', 'pitch', 'roll')
units = ('m/s, positive toward xducer face',
'm/s, positive toward xducer face',
'm/s, positive toward xducer face',
'm/s, positive toward xducer face',
'm/s, positive toward xducer face',
'no units', 'no units', 'no units', 'no units',
'no units',
'dB', 'dB', 'dB', 'dB',
'dB',
'degrees', 'degrees', 'degrees')
names = ('b1', 'b2', 'b3', 'b4', 'b5',
'cor1', 'cor2', 'cor3', 'cor4', 'cor5',
'int1', 'int2', 'int3', 'int4', 'int5',
'phi1', 'phi2', 'phi3')
# data_vars = {}
#
# sk = darr.zeros((nz, nt), chunks=chunks)*np.nan # Beam vels stored in mm/s
# # as int64 to save memory.
# b1, b2, b3, b4 = sk.copy(), sk.copy(), sk.copy(), sk.copy()
# # embed()
# sk0 = darr.zeros(nt, chunks=chunks)*np.nan
# cor1, cor2, cor3, cor4 = sk.copy(), sk.copy(), sk.copy(), sk.copy()
# int1, int2, int3, int4 = sk.copy(), sk.copy(), sk.copy(), sk.copy()
# b5, cor5, int5 = sk.copy(), sk.copy(), sk.copy()
# heading, pitch, roll = sk0.copy(), sk0.copy(), sk0.copy()
# tjanus = []
# ensdict = np.array(ensdict)[~fbadens]
# ensdict = ensdict.tolist()
arrs = (b1, b2, b3, b4, b5,
cor1, cor2, cor3, cor4, cor5,
int1, int2, int3, int4, int5,
heading, pitch, roll)
# pressure, temperature, salinity, soundspeed)
for arr,name,long_name,unit in zip(arrs,names,long_names,units):
if 'Beam5' in long_name:
coordsn = coords5
dimsn = dims
elif 'phi' in name:
coordsn = coords0
dimsn = dims0
else:
coordsn = coords
dimsn = dims
da = DataArray(arr, coords=coordsn, dims=dimsn, attrs=dict(units=unit, long_name=long_name))
data_vars.update({name:da})
allcoords.update(coords)
allcoords.update(coords5)
ds = Dataset(data_vars=data_vars, coords=allcoords, attrs=dsattrs)
return ds
def ensembles2dataset(ensdict, dsattrs={}, verbose=False, print_every=1000):
"""
Convert a dictionary of ensembles into an xarray Dataset object.
"""
mms2ms = 1e-3
fbadens = np.array([not isinstance(ens, dict) for ens in ensdict])
nt = len(ensdict) - np.sum(fbadens)
n=0
ensdict0 = np.nan
while not isinstance(ensdict0, dict):
ensdict0 = ensdict[n]
n+=1
nz = ensdict0['fixed_leader_janus']['number_of_cells']
sk = np.ma.zeros((nz, nt))*np.nan # Beam vels stored in mm/s
# as int64 to save memory.
b1, b2, b3, b4 = sk.copy(), sk.copy(), sk.copy(), sk.copy()
sk0 = np.ma.zeros(nt)*np.nan
cor1, cor2, cor3, cor4 = sk.copy(), sk.copy(), sk.copy(), sk.copy()
int1, int2, int3, int4 = sk.copy(), sk.copy(), sk.copy(), sk.copy()
b5, cor5, int5 = sk.copy(), sk.copy(), sk.copy()
heading, pitch, roll = sk0.copy(), sk0.copy(), sk0.copy()
tjanus = []
ensdict = np.array(ensdict)[~fbadens]
ensdict = ensdict.tolist()
n=0
for ensarr in ensdict:
tjanus.append(ensarr['timestamp'])
heading[n] = ensarr['variable_leader_janus']['heading']
pitch[n] = ensarr['variable_leader_janus']['pitch']
roll[n] = ensarr['variable_leader_janus']['roll']
vjanus = ensarr['velocity_janus']['data']
b1[:, n] = vjanus[:, 0]
b2[:, n] = vjanus[:, 1]
b3[:, n] = vjanus[:, 2]
b4[:, n] = vjanus[:, 3]
b5[:, n] = ensarr['velocity_beam5']['data'].squeeze()
corjanus = ensarr['correlation_janus']['data']
cor1[:, n] = corjanus[:, 0]
cor2[:, n] = corjanus[:, 1]
cor3[:, n] = corjanus[:, 2]
cor4[:, n] = corjanus[:, 3]
cor5[:, n] = ensarr['correlation_beam5']['data'].squeeze()
intjanus = ensarr['echo_intensity_janus']['data']
int1[:, n] = intjanus[:, 0]
int2[:, n] = intjanus[:, 1]
int3[:, n] = intjanus[:, 2]
int4[:, n] = intjanus[:, 3]
int5[:, n] = ensarr['echo_intensity_beam5']['data'].squeeze()
n+=1
if verbose and not n%print_every: print(n)
fixj = ensdict0['fixed_leader_janus']
fix5 = ensdict0['fixed_leader_beam5']
# Add ping offset to get beam 5's timestamps.
dt5 = fix5['ping_offset_time'] # In milliseconds.
dt5 = np.array(Timedelta(dt5, unit='ms'))
t5 = tjanus + dt5
th = fixj['beam_angle']
assert th==25 # Always 25 degrees.
th = th*np.pi/180.
Cth = np.cos(th)
# Construct along-beam/vertical axes.
cm2m = 1e-2
r1janus = fixj['bin_1_distance']*cm2m
r1b5 = fix5['bin_1_distance']*cm2m
ncj = fixj['number_of_cells']
nc5 = fix5['number_of_cells']
lcj = fixj['depth_cell_length']*cm2m
lc5 = fix5['depth_cell_length']*cm2m
Lj = ncj*lcj # Distance from center of bin 1 to the center of last bin (Janus).
L5 = nc5*lc5 # Distance from center of bin 1 to the center of last bin (beam 5).
rb = r1janus + np.arange(0, Lj, lcj) # Distance from xducer head
# (Janus).
zab = Cth*rb # Vertical distance from xducer head
# (Janus).
zab5 = r1b5 + np.arange(0, L5, lc5) # Distance from xducer head, also
# depth for the vertical beam.
rb = IndexVariable('z', rb, attrs={'units':'meters', 'long_name':"along-beam distance from the xducer's face to the center of the bins, for beams 1-4 (Janus)"})
zab = IndexVariable('z', zab, attrs={'units':'meters', 'long_name':"vertical distance from the instrument's head to the center of the bins, for beams 1-4 (Janus)"})
zab5 = IndexVariable('z', zab5, attrs={'units':'meters', 'long_name':"vertical distance from xducer face to the center of the bins, for beam 5 (vertical)"})
time = IndexVariable('time', tjanus, attrs={'long_name':'timestamp for beams 1-4 (Janus)'})
time5 = IndexVariable('time', t5, attrs={'long_name':'timestamp for beam 5 (vertical)'})
coords0 = [('time', time)]
coords = [('z', zab), ('time', time)]
coords5 = [('z5', zab5), ('time5', time5)]
dims = ['z', 'time']
dims0 = ['time']
# Convert velocities to m/s.
b1, b2, b3, b4, b5 = b1*mms2ms, b2*mms2ms, b3*mms2ms, b4*mms2ms, b5*mms2ms
# Scale heading, pitch and roll. Sentinel V manual, p. 259.
phisc = 0.01
heading *= phisc
pitch *= phisc
roll *= phisc
arrs = (b1, b2, b3, b4, b5,
cor1, cor2, cor3, cor4, cor5,
int1, int2, int3, int4, int5,
heading, pitch, roll)
# pressure, temperature, salinity, soundspeed)
long_names = ('Beam 1 velocity', 'Beam 2 velocity',
'Beam 3 velocity', 'Beam 4 velocity',
'Beam 5 velocity',
'Beam 1 correlation', 'Beam 2 correlation',
'Beam 3 correlation', 'Beam 4 correlation',
'Beam 5 correlation',
'Beam 1 echo amplitude', 'Beam 2 echo amplitude',
'Beam 3 echo amplitude', 'Beam 4 echo amplitude',
'Beam 5 echo amplitude',
'heading', 'pitch', 'roll')
units = ('m/s, positive toward xducer face',
'm/s, positive toward xducer face',
'm/s, positive toward xducer face',
'm/s, positive toward xducer face',
'm/s, positive toward xducer face',
'no units', 'no units', 'no units', 'no units',
'no units',
'dB', 'dB', 'dB', 'dB',
'dB',
'degrees', 'degrees', 'degrees')
names = ('b1', 'b2', 'b3', 'b4', 'b5',
'cor1', 'cor2', 'cor3', 'cor4', 'cor5',
'int1', 'int2', 'int3', 'int4', 'int5',
'phi1', 'phi2', 'phi3')
data_vars = {}
for arr,name,long_name,unit in zip(arrs,names,long_names,units):
if 'Beam5' in long_name:
coordsn = coords5
dimsn = dims
elif 'phi' in name:
coordsn = coords0
dimsn = dims0
else:
coordsn = coords
dimsn = dims
if 'int' in name:
arr *= 0.45 # Scale factor for echo intensity, see Sentinel V manual
# Sentinel V manual p. 264.
da = DataArray(arr, coords=coordsn, dims=dimsn, attrs=dict(units=unit, long_name=long_name))
data_vars.update({name:da})
allcoords = {'rb':rb} # Along-beam distance for slanted beams.
allcoords.update(coords)
allcoords.update(coords5)
ds = Dataset(data_vars=data_vars, coords=allcoords, attrs=dsattrs)
return ds
def read_PD0_file(path, header_lines=0, return_pd0=False, all_ensembles=True,
format='sentinel', use_dask=True, chunks=100,
debug=False, verbose=True, print_every=1e3):
"""Read a TRDI Workhorse or Sentinel V *.pd0 file."""
pd0_bytes = bytearray()
with open(path, 'rb') as f:
pd0_bytes = bytearray(f.read())
f.close()
if all_ensembles:
pd0reader = read_PD0_bytes_ensembles
kwread = dict(verbose=verbose, print_every=print_every,
use_dask=use_dask, chunks=chunks)
else:
pd0reader = read_PD0_bytes
kwread = dict()
ret = pd0reader(pd0_bytes, return_pd0=return_pd0, format=format, **kwread)
if return_pd0:
data, t, fixed_attrs, BAD_ENS, fbad_ens, errortype_count, pd0_bytes = ret
else:
data, t, fixed_attrs, BAD_ENS, fbad_ens, errortype_count = ret
if verbose:
nens = len(t)
nbadens = len(fbad_ens)
ngoodens = nens - nbadens
pbadens = 100.*nbadens/nens
print("")
print("Skipped %d/%d bad ensembles (%.2f%%)."%(nbadens, nens, pbadens))
print("---Breakdown of dud ensembles---")
print("*Bad checksums: %d"%errortype_count['bad_checksum'])
print("*Could not read ensemble's checksum: %d"%errortype_count['read_checksum'])
print("*Could not read ensemble's header: %d"%errortype_count['read_header'])
if debug:
if return_pd0:
ret = data, t, fixed_attrs, BAD_ENS, fbad_ens, errortype_count, pd0_bytes
else:
ret = data, t, fixed_attrs, BAD_ENS, fbad_ens, errortype_count
else:
if return_pd0:
ret = data, t, fixed_attrs, pd0_bytes
else:
ret = data, t, fixed_attrs
return ret
def read_PD0_bytes_ensembles(PD0_BYTES, return_pd0=False, headerid='\x7f\x7f',
format='sentinel', use_dask=True, chunks=1e4,
verbose=True, print_every=1000):
"""
Finds the hex positions in the bytearray that identify the header of each
ensemble. Then read each ensemble into a dictionary and accumulates them
in a list.
"""
chunks = int(chunks)
if format=='workhorse':
parsepd0 = parse_pd0_bytearray
elif format=='sentinel':
parsepd0 = parse_sentinelVpd0_bytearray
else:
print('Unknown *.pd0 format')
# Split segments of the byte array per ensemble.
ensbytes = PD0_BYTES.split(headerid)
ensbytes = [headerid + ens for ens in ensbytes] # Prepend header id back.
ensbytes = ensbytes[1:] # First entry is empty, cap it off.
nens = len(ensbytes)
nensm = nens - 1
fbad_ens = []
BAD_ENS = []
# embed()
# Get timestamps for all ensembles.
# Note that these timestamps indicate the Janus' (i.e., beams 1-4) pings,
# which will not necessarily be the same as the vertical beam's timestamp.
t = np.empty(nens, dtype=object)
if use_dask:
DATA = darr.from_array(np.array([], dtype=object, ndmin=1), chunks=chunks)
ntotalchunks = nens//chunks
rem_ens = nens%chunks
has_tail=rem_ens>0
if has_tail: ntotalchunks+=1 # Last chunk takes remaining ensembles.
DATAbuffskel = np.empty(chunks, dtype=object)
DATAbuff = DATAbuffskel.copy()
daNaN = darr.from_array(np.array(np.nan, ndmin=1), chunks=1)
cont_inchnk=0
else:
DATA = np.empty(nens, dtype=object)
nChecksumError, nReadChecksumError, nReadHeaderError = 0, 0, 0
cont=0
cont_inchnk=0
for ensb in ensbytes:
try:
if use_dask:
dd = delayed(parsepd0)(ensb)
else:
dd = parsepd0(ensb)
# embed()
t[cont] = dd['timestamp']
except (ChecksumError, ReadChecksumError, ReadHeaderError) as E:
t[cont] = np.nan
fbad_ens.append(cont) # Store index of bad ensemble.
# BAD_ENS.append(ens) # Store bytes of the bad ensemble.
# Which type of error was it?
if isinstance(E, ChecksumError):
nChecksumError += 1
elif isinstance(E, ReadChecksumError):
nReadChecksumError += 1
elif isinstance(E, ReadHeaderError):
nReadHeaderError += 1
if use_dask:
if cont_inchnk==chunks:
DATA = darr.concatenate((DATA, daNaN.copy()))
DATAbuff = DATAbuffskel.copy()
cont_inchnk=0
else:
DATAbuff[cont_inchnk] = np.nan
cont_inchnk+=1
if has_tail and cont==nensm: # Save the last chunk.
DATA = darr.concatenate((DATA, daNaN.copy()))
else:
DATA[cont] = np.nan
cont+=1
continue
if use_dask:
if cont_inchnk==chunks:
DATA = darr.concatenate((DATA, darr.from_array(DATAbuff, chunks=chunks)))
DATAbuff = DATAbuffskel.copy()
cont_inchnk=0
# embed()
else:
DATAbuff[cont_inchnk] = dd
cont_inchnk+=1
if has_tail and cont==nensm: # Save the last chunk.
DATA = darr.concatenate((DATA, darr.from_array(DATAbuff, chunks=chunks)))
else:
DATA[cont] = dd
cont+=1
if verbose and not cont%print_every: print("Ensemble %d"%cont)
errortype_count = dict(bad_checksum=nChecksumError,
read_checksum=nReadChecksumError,
read_header=nReadHeaderError)
# Extract ensemble-independent fields (store in xr.Dataset attributes).
# fixed_attrs = _pick_misc(DATA) # FIXME
fixed_attrs = []
# embed()
if return_pd0:
ret = (DATA, t, fixed_attrs, BAD_ENS, fbad_ens, errortype_count, PD0_BYTES)
else:
ret = (DATA, t, fixed_attrs, BAD_ENS, fbad_ens, errortype_count)
return ret
def read_PD0_bytes(pd0_bytes, return_pd0=False, format='sentinel'):
if format=='workhorse':
data = parse_pd0_bytearray(pd0_bytes)
elif format=='sentinel':
data = parse_sentinelVpd0_bytearray(pd0_bytes)
else:
print('Unknown *.pd0 format')
if return_pd0:
ret = data, pd0_bytes
else:
ret = data
return ret
def inspect_PD0_file(path, format='sentinelV'):
"""
Fetches and organizes metadata on instrument setup
and organizes them in a table.
"""
raise NotImplementedError()
confparams = ['data_source', # START Header.
'number_of_bytes',
'address_offsets',
'number_of_data_types', # END Header.
'system_power', # START fixed_leader_janus.
'system_configuration_MSB',
'sensor_source',
'system_configuration_LSB',
'system_bandwidth',
'number_of_cells',
'pings_per_ensemble',
'false_target_threshold',
'serial_number',
'lag_length',
'sensor_available',
'depth_cell_length',
'beam_angle',
'error_velocity_threshold',
'coordinate_transformation_process',
'heading_bias',
'transmit_pulse_length',
'heading_alignment',
'starting_depth_cell',
'number_of_beams',
'low_correlation_threshold',
'simulation_data_flag',
'cpu_firmware_version',
'transmit_lag_distance',
'ending_depth_cell',
'minimum_percentage_water_profile_pings',
'signal_processing_mode',
'blank_after_transmit',
'bin_1_distance', # END fixed_leader_janus.
'depth_cell_length', # START fixed_leader_beam5.
'vertical_mode',
'ping_offset_time',
'vertical_lag_length',
'transmit_pulse_length',
'number_of_cells',
'bin_1_distance',
'transmit_code_elements',
'pings_per_ensemble', # END fixed_leader_beam5.
'roll_standard_deviation', # START variable_leader_janus.
'error_status_word',
'attitude',
'contamination_sensor',
'attitude_temperature',
'temperature',
'speed_of_sound',
'pitch_standard_deviation',
'pressure_variance',
'heading_standard_deviation',
'pressure',
'transmit_current',
'ensemble_roll_over',
'depth_of_transducer',
'bit_result',
'ambient_temperature',
'salinity',
'pressure_positive',
'pressure_negative',
'transmit_voltage', # END variable_leader_janus.
]
def _pick_misc(d, confparams=confparams):
"""
Check whether the configuration parameters change over ensembles.
If not, replace them with a single value.
"""
dconfparams = dict()
d.reverse()
while d:
dn = d.pop()
for group in dn.keys():
for param in dn[group].keys():
if param in confparams:
dconfparams.update({param:dconfparams[param].extend(dn[group][param])})
ddesc = np.unique([dnn['descriptors'] for dnn in d if dnn is not None]) # Array of lists.
if ddesc.size==1: # If all the lists store the exact same strings.
dconfparams['descriptors'] = ddesc
else:
# print("Warning: Some setup parameters changed during deployment.")
pass
return dconfparams
| mit | -928,729,607,118,611,300 | 36.648684 | 168 | 0.567924 | false |
stratton-oakcoin/oakcoin | contrib/devtools/security-check.py | 1 | 8216 | #!/usr/bin/env python
# Copyright (c) 2015-2016 The Oakcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Perform basic ELF security checks on a series of executables.
Exit status will be 0 if successful, and the program will be silent.
Otherwise the exit status will be 1 and it will log which executables failed which checks.
Needs `readelf` (for ELF) and `objdump` (for PE).
'''
from __future__ import division,print_function,unicode_literals
import subprocess
import sys
import os
READELF_CMD = os.getenv('READELF', '/usr/bin/readelf')
OBJDUMP_CMD = os.getenv('OBJDUMP', '/usr/bin/objdump')
NONFATAL = {'HIGH_ENTROPY_VA'} # checks which are non-fatal for now but only generate a warning
def check_ELF_PIE(executable):
'''
Check for position independent executable (PIE), allowing for address space randomization.
'''
p = subprocess.Popen([READELF_CMD, '-h', '-W', executable], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
(stdout, stderr) = p.communicate()
if p.returncode:
raise IOError('Error opening file')
ok = False
for line in stdout.split(b'\n'):
line = line.split()
if len(line)>=2 and line[0] == b'Type:' and line[1] == b'DYN':
ok = True
return ok
def get_ELF_program_headers(executable):
'''Return type and flags for ELF program headers'''
p = subprocess.Popen([READELF_CMD, '-l', '-W', executable], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
(stdout, stderr) = p.communicate()
if p.returncode:
raise IOError('Error opening file')
in_headers = False
count = 0
headers = []
for line in stdout.split(b'\n'):
if line.startswith(b'Program Headers:'):
in_headers = True
if line == b'':
in_headers = False
if in_headers:
if count == 1: # header line
ofs_typ = line.find(b'Type')
ofs_offset = line.find(b'Offset')
ofs_flags = line.find(b'Flg')
ofs_align = line.find(b'Align')
if ofs_typ == -1 or ofs_offset == -1 or ofs_flags == -1 or ofs_align == -1:
raise ValueError('Cannot parse elfread -lW output')
elif count > 1:
typ = line[ofs_typ:ofs_offset].rstrip()
flags = line[ofs_flags:ofs_align].rstrip()
headers.append((typ, flags))
count += 1
return headers
def check_ELF_NX(executable):
'''
Check that no sections are writable and executable (including the stack)
'''
have_wx = False
have_gnu_stack = False
for (typ, flags) in get_ELF_program_headers(executable):
if typ == b'GNU_STACK':
have_gnu_stack = True
if b'W' in flags and b'E' in flags: # section is both writable and executable
have_wx = True
return have_gnu_stack and not have_wx
def check_ELF_RELRO(executable):
'''
Check for read-only relocations.
GNU_RELRO program header must exist
Dynamic section must have BIND_NOW flag
'''
have_gnu_relro = False
for (typ, flags) in get_ELF_program_headers(executable):
# Note: not checking flags == 'R': here as linkers set the permission differently
# This does not affect security: the permission flags of the GNU_RELRO program header are ignored, the PT_LOAD header determines the effective permissions.
# However, the dynamic linker need to write to this area so these are RW.
# Glibc itself takes care of mprotecting this area R after relocations are finished.
# See also http://permalink.gmane.org/gmane.comp.gnu.binutils/71347
if typ == b'GNU_RELRO':
have_gnu_relro = True
have_bindnow = False
p = subprocess.Popen([READELF_CMD, '-d', '-W', executable], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
(stdout, stderr) = p.communicate()
if p.returncode:
raise IOError('Error opening file')
for line in stdout.split(b'\n'):
tokens = line.split()
if len(tokens)>1 and tokens[1] == b'(BIND_NOW)' or (len(tokens)>2 and tokens[1] == b'(FLAGS)' and b'BIND_NOW' in tokens[2]):
have_bindnow = True
return have_gnu_relro and have_bindnow
def check_ELF_Canary(executable):
'''
Check for use of stack canary
'''
p = subprocess.Popen([READELF_CMD, '--dyn-syms', '-W', executable], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
(stdout, stderr) = p.communicate()
if p.returncode:
raise IOError('Error opening file')
ok = False
for line in stdout.split(b'\n'):
if b'__stack_chk_fail' in line:
ok = True
return ok
def get_PE_dll_characteristics(executable):
'''
Get PE DllCharacteristics bits.
Returns a tuple (arch,bits) where arch is 'i386:x86-64' or 'i386'
and bits is the DllCharacteristics value.
'''
p = subprocess.Popen([OBJDUMP_CMD, '-x', executable], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
(stdout, stderr) = p.communicate()
if p.returncode:
raise IOError('Error opening file')
arch = ''
bits = 0
for line in stdout.split('\n'):
tokens = line.split()
if len(tokens)>=2 and tokens[0] == 'architecture:':
arch = tokens[1].rstrip(',')
if len(tokens)>=2 and tokens[0] == 'DllCharacteristics':
bits = int(tokens[1],16)
return (arch,bits)
IMAGE_DLL_CHARACTERISTICS_HIGH_ENTROPY_VA = 0x0020
IMAGE_DLL_CHARACTERISTICS_DYNAMIC_BASE = 0x0040
IMAGE_DLL_CHARACTERISTICS_NX_COMPAT = 0x0100
def check_PE_DYNAMIC_BASE(executable):
'''PIE: DllCharacteristics bit 0x40 signifies dynamicbase (ASLR)'''
(arch,bits) = get_PE_dll_characteristics(executable)
reqbits = IMAGE_DLL_CHARACTERISTICS_DYNAMIC_BASE
return (bits & reqbits) == reqbits
# On 64 bit, must support high-entropy 64-bit address space layout randomization in addition to DYNAMIC_BASE
# to have secure ASLR.
def check_PE_HIGH_ENTROPY_VA(executable):
'''PIE: DllCharacteristics bit 0x20 signifies high-entropy ASLR'''
(arch,bits) = get_PE_dll_characteristics(executable)
if arch == 'i386:x86-64':
reqbits = IMAGE_DLL_CHARACTERISTICS_HIGH_ENTROPY_VA
else: # Unnecessary on 32-bit
assert(arch == 'i386')
reqbits = 0
return (bits & reqbits) == reqbits
def check_PE_NX(executable):
'''NX: DllCharacteristics bit 0x100 signifies nxcompat (DEP)'''
(arch,bits) = get_PE_dll_characteristics(executable)
return (bits & IMAGE_DLL_CHARACTERISTICS_NX_COMPAT) == IMAGE_DLL_CHARACTERISTICS_NX_COMPAT
CHECKS = {
'ELF': [
('PIE', check_ELF_PIE),
('NX', check_ELF_NX),
('RELRO', check_ELF_RELRO),
('Canary', check_ELF_Canary)
],
'PE': [
('DYNAMIC_BASE', check_PE_DYNAMIC_BASE),
('HIGH_ENTROPY_VA', check_PE_HIGH_ENTROPY_VA),
('NX', check_PE_NX)
]
}
def identify_executable(executable):
with open(filename, 'rb') as f:
magic = f.read(4)
if magic.startswith(b'MZ'):
return 'PE'
elif magic.startswith(b'\x7fELF'):
return 'ELF'
return None
if __name__ == '__main__':
retval = 0
for filename in sys.argv[1:]:
try:
etype = identify_executable(filename)
if etype is None:
print('%s: unknown format' % filename)
retval = 1
continue
failed = []
warning = []
for (name, func) in CHECKS[etype]:
if not func(filename):
if name in NONFATAL:
warning.append(name)
else:
failed.append(name)
if failed:
print('%s: failed %s' % (filename, ' '.join(failed)))
retval = 1
if warning:
print('%s: warning %s' % (filename, ' '.join(warning)))
except IOError:
print('%s: cannot open' % filename)
retval = 1
exit(retval)
| mit | -7,381,608,130,979,772,000 | 37.037037 | 163 | 0.617454 | false |
dims/oslo.utils | oslo/utils/openstack/common/log.py | 1 | 26577 | # Copyright 2011 OpenStack Foundation.
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""OpenStack logging handler.
This module adds to logging functionality by adding the option to specify
a context object when calling the various log methods. If the context object
is not specified, default formatting is used. Additionally, an instance uuid
may be passed as part of the log message, which is intended to make it easier
for admins to find messages related to a specific instance.
It also allows setting of formatting information through conf.
"""
import inspect
import itertools
import logging
import logging.config
import logging.handlers
import os
import re
import sys
import traceback
from oslo.config import cfg
import six
from six import moves
from oslo.utils.openstack.common.gettextutils import _
from oslo.utils.openstack.common import jsonutils
from oslo.utils.openstack.common import local
from oslo.utils import importutils
_DEFAULT_LOG_DATE_FORMAT = "%Y-%m-%d %H:%M:%S"
_SANITIZE_KEYS = ['adminPass', 'admin_pass', 'password', 'admin_password']
# NOTE(ldbragst): Let's build a list of regex objects using the list of
# _SANITIZE_KEYS we already have. This way, we only have to add the new key
# to the list of _SANITIZE_KEYS and we can generate regular expressions
# for XML and JSON automatically.
_SANITIZE_PATTERNS = []
_FORMAT_PATTERNS = [r'(%(key)s\s*[=]\s*[\"\']).*?([\"\'])',
r'(<%(key)s>).*?(</%(key)s>)',
r'([\"\']%(key)s[\"\']\s*:\s*[\"\']).*?([\"\'])',
r'([\'"].*?%(key)s[\'"]\s*:\s*u?[\'"]).*?([\'"])',
r'([\'"].*?%(key)s[\'"]\s*,\s*\'--?[A-z]+\'\s*,\s*u?[\'"])'
'.*?([\'"])',
r'(%(key)s\s*--?[A-z]+\s*).*?([\s])']
for key in _SANITIZE_KEYS:
for pattern in _FORMAT_PATTERNS:
reg_ex = re.compile(pattern % {'key': key}, re.DOTALL)
_SANITIZE_PATTERNS.append(reg_ex)
common_cli_opts = [
cfg.BoolOpt('debug',
short='d',
default=False,
help='Print debugging output (set logging level to '
'DEBUG instead of default WARNING level).'),
cfg.BoolOpt('verbose',
short='v',
default=False,
help='Print more verbose output (set logging level to '
'INFO instead of default WARNING level).'),
]
logging_cli_opts = [
cfg.StrOpt('log-config-append',
metavar='PATH',
deprecated_name='log-config',
help='The name of a logging configuration file. This file '
'is appended to any existing logging configuration '
'files. For details about logging configuration files, '
'see the Python logging module documentation.'),
cfg.StrOpt('log-format',
metavar='FORMAT',
help='DEPRECATED. '
'A logging.Formatter log message format string which may '
'use any of the available logging.LogRecord attributes. '
'This option is deprecated. Please use '
'logging_context_format_string and '
'logging_default_format_string instead.'),
cfg.StrOpt('log-date-format',
default=_DEFAULT_LOG_DATE_FORMAT,
metavar='DATE_FORMAT',
help='Format string for %%(asctime)s in log records. '
'Default: %(default)s .'),
cfg.StrOpt('log-file',
metavar='PATH',
deprecated_name='logfile',
help='(Optional) Name of log file to output to. '
'If no default is set, logging will go to stdout.'),
cfg.StrOpt('log-dir',
deprecated_name='logdir',
help='(Optional) The base directory used for relative '
'--log-file paths.'),
cfg.BoolOpt('use-syslog',
default=False,
help='Use syslog for logging. '
'Existing syslog format is DEPRECATED during I, '
'and will change in J to honor RFC5424.'),
cfg.BoolOpt('use-syslog-rfc-format',
# TODO(bogdando) remove or use True after existing
# syslog format deprecation in J
default=False,
help='(Optional) Enables or disables syslog rfc5424 format '
'for logging. If enabled, prefixes the MSG part of the '
'syslog message with APP-NAME (RFC5424). The '
'format without the APP-NAME is deprecated in I, '
'and will be removed in J.'),
cfg.StrOpt('syslog-log-facility',
default='LOG_USER',
help='Syslog facility to receive log lines.')
]
generic_log_opts = [
cfg.BoolOpt('use_stderr',
default=True,
help='Log output to standard error.')
]
log_opts = [
cfg.StrOpt('logging_context_format_string',
default='%(asctime)s.%(msecs)03d %(process)d %(levelname)s '
'%(name)s [%(request_id)s %(user_identity)s] '
'%(instance)s%(message)s',
help='Format string to use for log messages with context.'),
cfg.StrOpt('logging_default_format_string',
default='%(asctime)s.%(msecs)03d %(process)d %(levelname)s '
'%(name)s [-] %(instance)s%(message)s',
help='Format string to use for log messages without context.'),
cfg.StrOpt('logging_debug_format_suffix',
default='%(funcName)s %(pathname)s:%(lineno)d',
help='Data to append to log format when level is DEBUG.'),
cfg.StrOpt('logging_exception_prefix',
default='%(asctime)s.%(msecs)03d %(process)d TRACE %(name)s '
'%(instance)s',
help='Prefix each line of exception output with this format.'),
cfg.ListOpt('default_log_levels',
default=[
'amqp=WARN',
'amqplib=WARN',
'boto=WARN',
'qpid=WARN',
'sqlalchemy=WARN',
'suds=INFO',
'oslo.messaging=INFO',
'iso8601=WARN',
'requests.packages.urllib3.connectionpool=WARN'
],
help='List of logger=LEVEL pairs.'),
cfg.BoolOpt('publish_errors',
default=False,
help='Enables or disables publication of error events.'),
cfg.BoolOpt('fatal_deprecations',
default=False,
help='Enables or disables fatal status of deprecations.'),
# NOTE(mikal): there are two options here because sometimes we are handed
# a full instance (and could include more information), and other times we
# are just handed a UUID for the instance.
cfg.StrOpt('instance_format',
default='[instance: %(uuid)s] ',
help='The format for an instance that is passed with the log '
'message. '),
cfg.StrOpt('instance_uuid_format',
default='[instance: %(uuid)s] ',
help='The format for an instance UUID that is passed with the '
'log message. '),
]
CONF = cfg.CONF
CONF.register_cli_opts(common_cli_opts)
CONF.register_cli_opts(logging_cli_opts)
CONF.register_opts(generic_log_opts)
CONF.register_opts(log_opts)
# our new audit level
# NOTE(jkoelker) Since we synthesized an audit level, make the logging
# module aware of it so it acts like other levels.
logging.AUDIT = logging.INFO + 1
logging.addLevelName(logging.AUDIT, 'AUDIT')
try:
NullHandler = logging.NullHandler
except AttributeError: # NOTE(jkoelker) NullHandler added in Python 2.7
class NullHandler(logging.Handler):
def handle(self, record):
pass
def emit(self, record):
pass
def createLock(self):
self.lock = None
def _dictify_context(context):
if context is None:
return None
if not isinstance(context, dict) and getattr(context, 'to_dict', None):
context = context.to_dict()
return context
def _get_binary_name():
return os.path.basename(inspect.stack()[-1][1])
def _get_log_file_path(binary=None):
logfile = CONF.log_file
logdir = CONF.log_dir
if logfile and not logdir:
return logfile
if logfile and logdir:
return os.path.join(logdir, logfile)
if logdir:
binary = binary or _get_binary_name()
return '%s.log' % (os.path.join(logdir, binary),)
return None
def mask_password(message, secret="***"):
"""Replace password with 'secret' in message.
:param message: The string which includes security information.
:param secret: value with which to replace passwords.
:returns: The unicode value of message with the password fields masked.
For example:
>>> mask_password("'adminPass' : 'aaaaa'")
"'adminPass' : '***'"
>>> mask_password("'admin_pass' : 'aaaaa'")
"'admin_pass' : '***'"
>>> mask_password('"password" : "aaaaa"')
'"password" : "***"'
>>> mask_password("'original_password' : 'aaaaa'")
"'original_password' : '***'"
>>> mask_password("u'original_password' : u'aaaaa'")
"u'original_password' : u'***'"
"""
message = six.text_type(message)
# NOTE(ldbragst): Check to see if anything in message contains any key
# specified in _SANITIZE_KEYS, if not then just return the message since
# we don't have to mask any passwords.
if not any(key in message for key in _SANITIZE_KEYS):
return message
secret = r'\g<1>' + secret + r'\g<2>'
for pattern in _SANITIZE_PATTERNS:
message = re.sub(pattern, secret, message)
return message
class BaseLoggerAdapter(logging.LoggerAdapter):
def audit(self, msg, *args, **kwargs):
self.log(logging.AUDIT, msg, *args, **kwargs)
class LazyAdapter(BaseLoggerAdapter):
def __init__(self, name='unknown', version='unknown'):
self._logger = None
self.extra = {}
self.name = name
self.version = version
@property
def logger(self):
if not self._logger:
self._logger = getLogger(self.name, self.version)
return self._logger
class ContextAdapter(BaseLoggerAdapter):
warn = logging.LoggerAdapter.warning
def __init__(self, logger, project_name, version_string):
self.logger = logger
self.project = project_name
self.version = version_string
self._deprecated_messages_sent = dict()
@property
def handlers(self):
return self.logger.handlers
def deprecated(self, msg, *args, **kwargs):
"""Call this method when a deprecated feature is used.
If the system is configured for fatal deprecations then the message
is logged at the 'critical' level and :class:`DeprecatedConfig` will
be raised.
Otherwise, the message will be logged (once) at the 'warn' level.
:raises: :class:`DeprecatedConfig` if the system is configured for
fatal deprecations.
"""
stdmsg = _("Deprecated: %s") % msg
if CONF.fatal_deprecations:
self.critical(stdmsg, *args, **kwargs)
raise DeprecatedConfig(msg=stdmsg)
# Using a list because a tuple with dict can't be stored in a set.
sent_args = self._deprecated_messages_sent.setdefault(msg, list())
if args in sent_args:
# Already logged this message, so don't log it again.
return
sent_args.append(args)
self.warn(stdmsg, *args, **kwargs)
def process(self, msg, kwargs):
# NOTE(mrodden): catch any Message/other object and
# coerce to unicode before they can get
# to the python logging and possibly
# cause string encoding trouble
if not isinstance(msg, six.string_types):
msg = six.text_type(msg)
if 'extra' not in kwargs:
kwargs['extra'] = {}
extra = kwargs['extra']
context = kwargs.pop('context', None)
if not context:
context = getattr(local.store, 'context', None)
if context:
extra.update(_dictify_context(context))
instance = kwargs.pop('instance', None)
instance_uuid = (extra.get('instance_uuid') or
kwargs.pop('instance_uuid', None))
instance_extra = ''
if instance:
instance_extra = CONF.instance_format % instance
elif instance_uuid:
instance_extra = (CONF.instance_uuid_format
% {'uuid': instance_uuid})
extra['instance'] = instance_extra
extra.setdefault('user_identity', kwargs.pop('user_identity', None))
extra['project'] = self.project
extra['version'] = self.version
extra['extra'] = extra.copy()
return msg, kwargs
class JSONFormatter(logging.Formatter):
def __init__(self, fmt=None, datefmt=None):
# NOTE(jkoelker) we ignore the fmt argument, but its still there
# since logging.config.fileConfig passes it.
self.datefmt = datefmt
def formatException(self, ei, strip_newlines=True):
lines = traceback.format_exception(*ei)
if strip_newlines:
lines = [moves.filter(
lambda x: x,
line.rstrip().splitlines()) for line in lines]
lines = list(itertools.chain(*lines))
return lines
def format(self, record):
message = {'message': record.getMessage(),
'asctime': self.formatTime(record, self.datefmt),
'name': record.name,
'msg': record.msg,
'args': record.args,
'levelname': record.levelname,
'levelno': record.levelno,
'pathname': record.pathname,
'filename': record.filename,
'module': record.module,
'lineno': record.lineno,
'funcname': record.funcName,
'created': record.created,
'msecs': record.msecs,
'relative_created': record.relativeCreated,
'thread': record.thread,
'thread_name': record.threadName,
'process_name': record.processName,
'process': record.process,
'traceback': None}
if hasattr(record, 'extra'):
message['extra'] = record.extra
if record.exc_info:
message['traceback'] = self.formatException(record.exc_info)
return jsonutils.dumps(message)
def _create_logging_excepthook(product_name):
def logging_excepthook(exc_type, value, tb):
extra = {'exc_info': (exc_type, value, tb)}
getLogger(product_name).critical(
"".join(traceback.format_exception_only(exc_type, value)),
**extra)
return logging_excepthook
class LogConfigError(Exception):
message = _('Error loading logging config %(log_config)s: %(err_msg)s')
def __init__(self, log_config, err_msg):
self.log_config = log_config
self.err_msg = err_msg
def __str__(self):
return self.message % dict(log_config=self.log_config,
err_msg=self.err_msg)
def _load_log_config(log_config_append):
try:
logging.config.fileConfig(log_config_append,
disable_existing_loggers=False)
except moves.configparser.Error as exc:
raise LogConfigError(log_config_append, six.text_type(exc))
def setup(product_name, version='unknown'):
"""Setup logging."""
if CONF.log_config_append:
_load_log_config(CONF.log_config_append)
else:
_setup_logging_from_conf(product_name, version)
sys.excepthook = _create_logging_excepthook(product_name)
def set_defaults(logging_context_format_string):
cfg.set_defaults(log_opts,
logging_context_format_string=
logging_context_format_string)
def _find_facility_from_conf():
facility_names = logging.handlers.SysLogHandler.facility_names
facility = getattr(logging.handlers.SysLogHandler,
CONF.syslog_log_facility,
None)
if facility is None and CONF.syslog_log_facility in facility_names:
facility = facility_names.get(CONF.syslog_log_facility)
if facility is None:
valid_facilities = facility_names.keys()
consts = ['LOG_AUTH', 'LOG_AUTHPRIV', 'LOG_CRON', 'LOG_DAEMON',
'LOG_FTP', 'LOG_KERN', 'LOG_LPR', 'LOG_MAIL', 'LOG_NEWS',
'LOG_AUTH', 'LOG_SYSLOG', 'LOG_USER', 'LOG_UUCP',
'LOG_LOCAL0', 'LOG_LOCAL1', 'LOG_LOCAL2', 'LOG_LOCAL3',
'LOG_LOCAL4', 'LOG_LOCAL5', 'LOG_LOCAL6', 'LOG_LOCAL7']
valid_facilities.extend(consts)
raise TypeError(_('syslog facility must be one of: %s') %
', '.join("'%s'" % fac
for fac in valid_facilities))
return facility
class RFCSysLogHandler(logging.handlers.SysLogHandler):
def __init__(self, *args, **kwargs):
self.binary_name = _get_binary_name()
# Do not use super() unless type(logging.handlers.SysLogHandler)
# is 'type' (Python 2.7).
# Use old style calls, if the type is 'classobj' (Python 2.6)
logging.handlers.SysLogHandler.__init__(self, *args, **kwargs)
def format(self, record):
# Do not use super() unless type(logging.handlers.SysLogHandler)
# is 'type' (Python 2.7).
# Use old style calls, if the type is 'classobj' (Python 2.6)
msg = logging.handlers.SysLogHandler.format(self, record)
msg = self.binary_name + ' ' + msg
return msg
def _setup_logging_from_conf(project, version):
log_root = getLogger(None).logger
for handler in log_root.handlers:
log_root.removeHandler(handler)
if CONF.use_syslog:
facility = _find_facility_from_conf()
# TODO(bogdando) use the format provided by RFCSysLogHandler
# after existing syslog format deprecation in J
if CONF.use_syslog_rfc_format:
syslog = RFCSysLogHandler(address='/dev/log',
facility=facility)
else:
syslog = logging.handlers.SysLogHandler(address='/dev/log',
facility=facility)
log_root.addHandler(syslog)
logpath = _get_log_file_path()
if logpath:
filelog = logging.handlers.WatchedFileHandler(logpath)
log_root.addHandler(filelog)
if CONF.use_stderr:
streamlog = ColorHandler()
log_root.addHandler(streamlog)
elif not logpath:
# pass sys.stdout as a positional argument
# python2.6 calls the argument strm, in 2.7 it's stream
streamlog = logging.StreamHandler(sys.stdout)
log_root.addHandler(streamlog)
if CONF.publish_errors:
handler = importutils.import_object(
"oslo.utils.openstack.common.log_handler.PublishErrorsHandler",
logging.ERROR)
log_root.addHandler(handler)
datefmt = CONF.log_date_format
for handler in log_root.handlers:
# NOTE(alaski): CONF.log_format overrides everything currently. This
# should be deprecated in favor of context aware formatting.
if CONF.log_format:
handler.setFormatter(logging.Formatter(fmt=CONF.log_format,
datefmt=datefmt))
log_root.info('Deprecated: log_format is now deprecated and will '
'be removed in the next release')
else:
handler.setFormatter(ContextFormatter(project=project,
version=version,
datefmt=datefmt))
if CONF.debug:
log_root.setLevel(logging.DEBUG)
elif CONF.verbose:
log_root.setLevel(logging.INFO)
else:
log_root.setLevel(logging.WARNING)
for pair in CONF.default_log_levels:
mod, _sep, level_name = pair.partition('=')
logger = logging.getLogger(mod)
# NOTE(AAzza) in python2.6 Logger.setLevel doesn't convert string name
# to integer code.
if sys.version_info < (2, 7):
level = logging.getLevelName(level_name)
logger.setLevel(level)
else:
logger.setLevel(level_name)
_loggers = {}
def getLogger(name='unknown', version='unknown'):
if name not in _loggers:
_loggers[name] = ContextAdapter(logging.getLogger(name),
name,
version)
return _loggers[name]
def getLazyLogger(name='unknown', version='unknown'):
"""Returns lazy logger.
Creates a pass-through logger that does not create the real logger
until it is really needed and delegates all calls to the real logger
once it is created.
"""
return LazyAdapter(name, version)
class WritableLogger(object):
"""A thin wrapper that responds to `write` and logs."""
def __init__(self, logger, level=logging.INFO):
self.logger = logger
self.level = level
def write(self, msg):
self.logger.log(self.level, msg.rstrip())
class ContextFormatter(logging.Formatter):
"""A context.RequestContext aware formatter configured through flags.
The flags used to set format strings are: logging_context_format_string
and logging_default_format_string. You can also specify
logging_debug_format_suffix to append extra formatting if the log level is
debug.
For information about what variables are available for the formatter see:
http://docs.python.org/library/logging.html#formatter
If available, uses the context value stored in TLS - local.store.context
"""
def __init__(self, *args, **kwargs):
"""Initialize ContextFormatter instance
Takes additional keyword arguments which can be used in the message
format string.
:keyword project: project name
:type project: string
:keyword version: project version
:type version: string
"""
self.project = kwargs.pop('project', 'unknown')
self.version = kwargs.pop('version', 'unknown')
logging.Formatter.__init__(self, *args, **kwargs)
def format(self, record):
"""Uses contextstring if request_id is set, otherwise default."""
# store project info
record.project = self.project
record.version = self.version
# store request info
context = getattr(local.store, 'context', None)
if context:
d = _dictify_context(context)
for k, v in d.items():
setattr(record, k, v)
# NOTE(sdague): default the fancier formatting params
# to an empty string so we don't throw an exception if
# they get used
for key in ('instance', 'color', 'user_identity'):
if key not in record.__dict__:
record.__dict__[key] = ''
if record.__dict__.get('request_id'):
fmt = CONF.logging_context_format_string
else:
fmt = CONF.logging_default_format_string
if (record.levelno == logging.DEBUG and
CONF.logging_debug_format_suffix):
fmt += " " + CONF.logging_debug_format_suffix
if sys.version_info < (3, 2):
self._fmt = fmt
else:
self._style = logging.PercentStyle(fmt)
self._fmt = self._style._fmt
# Cache this on the record, Logger will respect our formatted copy
if record.exc_info:
record.exc_text = self.formatException(record.exc_info, record)
return logging.Formatter.format(self, record)
def formatException(self, exc_info, record=None):
"""Format exception output with CONF.logging_exception_prefix."""
if not record:
return logging.Formatter.formatException(self, exc_info)
stringbuffer = moves.StringIO()
traceback.print_exception(exc_info[0], exc_info[1], exc_info[2],
None, stringbuffer)
lines = stringbuffer.getvalue().split('\n')
stringbuffer.close()
if CONF.logging_exception_prefix.find('%(asctime)') != -1:
record.asctime = self.formatTime(record, self.datefmt)
formatted_lines = []
for line in lines:
pl = CONF.logging_exception_prefix % record.__dict__
fl = '%s%s' % (pl, line)
formatted_lines.append(fl)
return '\n'.join(formatted_lines)
class ColorHandler(logging.StreamHandler):
LEVEL_COLORS = {
logging.DEBUG: '\033[00;32m', # GREEN
logging.INFO: '\033[00;36m', # CYAN
logging.AUDIT: '\033[01;36m', # BOLD CYAN
logging.WARN: '\033[01;33m', # BOLD YELLOW
logging.ERROR: '\033[01;31m', # BOLD RED
logging.CRITICAL: '\033[01;31m', # BOLD RED
}
def format(self, record):
record.color = self.LEVEL_COLORS[record.levelno]
return logging.StreamHandler.format(self, record)
class DeprecatedConfig(Exception):
message = _("Fatal call to deprecated config: %(msg)s")
def __init__(self, msg):
super(Exception, self).__init__(self.message % dict(msg=msg))
| apache-2.0 | 858,290,176,509,701,200 | 35.506868 | 79 | 0.588027 | false |
benfitzpatrick/cylc | lib/cylc/gui/combo_logviewer.py | 1 | 3938 | #!/usr/bin/env python
# THIS FILE IS PART OF THE CYLC SUITE ENGINE.
# Copyright (C) 2008-2016 NIWA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Cylc gui log viewer, with a combo box for log file selection."""
import gtk
import os
from parsec.OrderedDict import OrderedDict
from cylc.gui.logviewer import logviewer
from cylc.gui.tailer import Tailer
from cylc.task_id import TaskID
class ComboLogViewer(logviewer):
"""Implement a viewer for task jobs in the "cylc gui".
It has a a combo box for log file selection.
task_id -- The NAME.POINT of a task proxy.
filenames -- The names of the task job logs.
cmd_tmpls -- A dict to map file names and alternate commands to tail follow
the file.
init_active_index -- The index for selecting the initial log file.
"""
LABEL_TEXT = "Choose Log File: "
def __init__(self, task_id, filenames, cmd_tmpls, init_active_index):
self.filenames = OrderedDict()
name_str, point_str = TaskID.split(task_id)
for filename in filenames:
try:
f_point_str, f_name_str, f_submit_num_str, f_base_name = (
filename.rsplit(os.sep, 4)[1:])
if (f_point_str == point_str and f_name_str == name_str and
int(f_submit_num_str) and f_base_name):
name = f_submit_num_str + os.sep + f_base_name
if ":" in filename:
name += " (%s)" % (filename.split(":", 1)[0])
except ValueError:
name = filename
self.filenames[name] = filename
self.init_active_index = init_active_index
self.cmd_tmpls = cmd_tmpls
logviewer.__init__(
self, task_id, None, filenames[self.init_active_index])
def connect(self):
"""Connect to the selected log file tailer."""
try:
cmd_tmpl = self.cmd_tmpls[self.filename]
except (KeyError, TypeError):
cmd_tmpl = None
self.t = Tailer(self.logview, self.filename, cmd_tmpl=cmd_tmpl)
self.t.start()
def create_gui_panel(self):
"""Create the panel."""
logviewer.create_gui_panel(self)
label = gtk.Label(self.LABEL_TEXT)
combobox = gtk.combo_box_new_text()
for name in self.filenames:
combobox.append_text(name)
combobox.connect("changed", self.switch_log)
if self.init_active_index:
combobox.set_active(self.init_active_index)
else:
combobox.set_active(0)
self.hbox.pack_end(combobox, False)
self.hbox.pack_end(label, False)
def switch_log(self, callback):
"""Switch to another file, if necessary."""
if self.t is None:
return False
model = callback.get_model()
index = callback.get_active()
name = model[index][0]
filename = self.filenames[name]
if filename != self.filename:
self.filename = filename
self.t.stop()
self.t.join()
logbuffer = self.logview.get_buffer()
pos_start, pos_end = logbuffer.get_bounds()
self.reset_logbuffer()
logbuffer.delete(pos_start, pos_end)
self.log_label.set_text(name)
self.connect()
return False
| gpl-3.0 | -7,878,608,303,728,449,000 | 34.477477 | 79 | 0.611732 | false |
bolkedebruin/airflow | airflow/providers/mysql/hooks/mysql.py | 1 | 8407 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import json
import MySQLdb
import MySQLdb.cursors
from airflow.hooks.dbapi_hook import DbApiHook
class MySqlHook(DbApiHook):
"""
Interact with MySQL.
You can specify charset in the extra field of your connection
as ``{"charset": "utf8"}``. Also you can choose cursor as
``{"cursor": "SSCursor"}``. Refer to the MySQLdb.cursors for more details.
Note: For AWS IAM authentication, use iam in the extra connection parameters
and set it to true. Leave the password field empty. This will use the
"aws_default" connection to get the temporary token unless you override
in extras.
extras example: ``{"iam":true, "aws_conn_id":"my_aws_conn"}``
"""
conn_name_attr = 'mysql_conn_id'
default_conn_name = 'mysql_default'
supports_autocommit = True
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.schema = kwargs.pop("schema", None)
self.connection = kwargs.pop("connection", None)
def set_autocommit(self, conn, autocommit):
"""
MySql connection sets autocommit in a different way.
"""
conn.autocommit(autocommit)
def get_autocommit(self, conn):
"""
MySql connection gets autocommit in a different way.
:param conn: connection to get autocommit setting from.
:type conn: connection object.
:return: connection autocommit setting
:rtype: bool
"""
return conn.get_autocommit()
def get_conn(self):
"""
Returns a mysql connection object
"""
conn = self.connection or self.get_connection(self.mysql_conn_id)
conn_config = {
"user": conn.login,
"passwd": conn.password or '',
"host": conn.host or 'localhost',
"db": self.schema or conn.schema or ''
}
# check for authentication via AWS IAM
if conn.extra_dejson.get('iam', False):
conn_config['passwd'], conn.port = self.get_iam_token(conn)
conn_config["read_default_group"] = 'enable-cleartext-plugin'
if not conn.port:
conn_config["port"] = 3306
else:
conn_config["port"] = int(conn.port)
if conn.extra_dejson.get('charset', False):
conn_config["charset"] = conn.extra_dejson["charset"]
if (conn_config["charset"]).lower() == 'utf8' or\
(conn_config["charset"]).lower() == 'utf-8':
conn_config["use_unicode"] = True
if conn.extra_dejson.get('cursor', False):
if (conn.extra_dejson["cursor"]).lower() == 'sscursor':
conn_config["cursorclass"] = MySQLdb.cursors.SSCursor
elif (conn.extra_dejson["cursor"]).lower() == 'dictcursor':
conn_config["cursorclass"] = MySQLdb.cursors.DictCursor
elif (conn.extra_dejson["cursor"]).lower() == 'ssdictcursor':
conn_config["cursorclass"] = MySQLdb.cursors.SSDictCursor
local_infile = conn.extra_dejson.get('local_infile', False)
if conn.extra_dejson.get('ssl', False):
# SSL parameter for MySQL has to be a dictionary and in case
# of extra/dejson we can get string if extra is passed via
# URL parameters
dejson_ssl = conn.extra_dejson['ssl']
if isinstance(dejson_ssl, str):
dejson_ssl = json.loads(dejson_ssl)
conn_config['ssl'] = dejson_ssl
if conn.extra_dejson.get('unix_socket'):
conn_config['unix_socket'] = conn.extra_dejson['unix_socket']
if local_infile:
conn_config["local_infile"] = 1
conn = MySQLdb.connect(**conn_config)
return conn
def get_uri(self):
conn = self.get_connection(getattr(self, self.conn_name_attr))
uri = super().get_uri()
if conn.extra_dejson.get('charset', False):
charset = conn.extra_dejson["charset"]
return "{uri}?charset={charset}".format(uri=uri, charset=charset)
return uri
def bulk_load(self, table, tmp_file):
"""
Loads a tab-delimited file into a database table
"""
conn = self.get_conn()
cur = conn.cursor()
cur.execute("""
LOAD DATA LOCAL INFILE '{tmp_file}'
INTO TABLE {table}
""".format(tmp_file=tmp_file, table=table))
conn.commit()
def bulk_dump(self, table, tmp_file):
"""
Dumps a database table into a tab-delimited file
"""
conn = self.get_conn()
cur = conn.cursor()
cur.execute("""
SELECT * INTO OUTFILE '{tmp_file}'
FROM {table}
""".format(tmp_file=tmp_file, table=table))
conn.commit()
@staticmethod
def _serialize_cell(cell, conn):
"""
MySQLdb converts an argument to a literal
when passing those separately to execute. Hence, this method does nothing.
:param cell: The cell to insert into the table
:type cell: object
:param conn: The database connection
:type conn: connection object
:return: The same cell
:rtype: object
"""
return cell
def get_iam_token(self, conn):
"""
Uses AWSHook to retrieve a temporary password to connect to MySQL
Port is required. If none is provided, default 3306 is used
"""
from airflow.providers.amazon.aws.hooks.aws_hook import AwsHook
aws_conn_id = conn.extra_dejson.get('aws_conn_id', 'aws_default')
aws_hook = AwsHook(aws_conn_id)
if conn.port is None:
port = 3306
else:
port = conn.port
client = aws_hook.get_client_type('rds')
token = client.generate_db_auth_token(conn.host, port, conn.login)
return token, port
def bulk_load_custom(self, table, tmp_file, duplicate_key_handling='IGNORE', extra_options=''):
"""
A more configurable way to load local data from a file into the database.
.. warning:: According to the mysql docs using this function is a
`security risk <https://dev.mysql.com/doc/refman/8.0/en/load-data-local.html>`_.
If you want to use it anyway you can do so by setting a client-side + server-side option.
This depends on the mysql client library used.
:param table: The table were the file will be loaded into.
:type table: str
:param tmp_file: The file (name) that contains the data.
:type tmp_file: str
:param duplicate_key_handling: Specify what should happen to duplicate data.
You can choose either `IGNORE` or `REPLACE`.
.. seealso::
https://dev.mysql.com/doc/refman/8.0/en/load-data.html#load-data-duplicate-key-handling
:type duplicate_key_handling: str
:param extra_options: More sql options to specify exactly how to load the data.
.. seealso:: https://dev.mysql.com/doc/refman/8.0/en/load-data.html
:type extra_options: str
"""
conn = self.get_conn()
cursor = conn.cursor()
cursor.execute("""
LOAD DATA LOCAL INFILE '{tmp_file}'
{duplicate_key_handling}
INTO TABLE {table}
{extra_options}
""".format(
tmp_file=tmp_file,
table=table,
duplicate_key_handling=duplicate_key_handling,
extra_options=extra_options
))
cursor.close()
conn.commit()
| apache-2.0 | -8,715,119,068,911,317,000 | 36.364444 | 103 | 0.603188 | false |
icyflame/batman | scripts/spamremove.py | 1 | 3721 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Script to remove links that are being or have been spammed.
Usage:
python pwb.py spamremove www.spammedsite.com
It will use Special:Linksearch to find the pages on the wiki that link to
that site, then for each page make a proposed change consisting of removing
all the lines where that url occurs. You can choose to:
* accept the changes as proposed
* edit the page yourself to remove the offending link
* not change the page in question
Command line options:
-always Do not ask, but remove the lines automatically. Be very
careful in using this option!
-namespace: Filters the search to a given namespace. If this is specified
multiple times it will search all given namespaces
"""
#
# (C) Pywikibot team, 2007-2014
#
# Distributed under the terms of the MIT license.
#
from __future__ import absolute_import, unicode_literals
__version__ = '$Id$'
#
import pywikibot
from pywikibot import i18n
from pywikibot.editor import TextEditor
def main(*args):
"""
Process command line arguments and perform task.
If args is an empty list, sys.argv is used.
@param args: command line arguments
@type args: list of unicode
"""
always = False
namespaces = []
spamSite = ''
for arg in pywikibot.handle_args(args):
if arg == "-always":
always = True
elif arg.startswith('-namespace:'):
try:
namespaces.append(int(arg[len('-namespace:'):]))
except ValueError:
namespaces.append(arg[len('-namespace:'):])
else:
spamSite = arg
if not spamSite:
pywikibot.bot.suggest_help(missing_parameters=['spam site'])
return False
mysite = pywikibot.Site()
pages = mysite.exturlusage(spamSite, namespaces=namespaces, content=True)
summary = i18n.twtranslate(mysite, 'spamremove-remove',
{'url': spamSite})
for i, p in enumerate(pages, 1):
text = p.text
if spamSite not in text:
continue
# Show the title of the page we're working on.
# Highlight the title in purple.
pywikibot.output(u"\n\n>>> \03{lightpurple}%s\03{default} <<<"
% p.title())
lines = text.split('\n')
newpage = []
lastok = ""
for line in lines:
if spamSite in line:
if lastok:
pywikibot.output(lastok)
pywikibot.output('\03{lightred}%s\03{default}' % line)
lastok = None
else:
newpage.append(line)
if line.strip():
if lastok is None:
pywikibot.output(line)
lastok = line
if always:
answer = "y"
else:
answer = pywikibot.input_choice(
u'\nDelete the red lines?',
[('yes', 'y'), ('no', 'n'), ('edit', 'e')],
'n', automatic_quit=False)
if answer == "n":
continue
elif answer == "e":
editor = TextEditor()
newtext = editor.edit(text, highlight=spamSite,
jumpIndex=text.find(spamSite))
else:
newtext = "\n".join(newpage)
if newtext != text:
p.text = newtext
p.save(summary)
else:
if "i" not in locals():
pywikibot.output('No page found.')
elif i == 1:
pywikibot.output('1 pages done.')
else:
pywikibot.output('%d pages done.' % i)
if __name__ == '__main__':
main()
| mit | -2,946,422,371,415,227,400 | 29.008065 | 79 | 0.552808 | false |
pcrews/lbaas-magic | saltmagic.py | 1 | 12306 | # lbaas-magic.py
# setup of nova, salt-master, etc for lbaas shenanigans
# to be run on a prospective salt-master
# This will:
# configure the vm to be an lbaas-salt-master
# run the appropriate salt-cloud mappings
# set up the environment (secgroups, floating_ips, highstate, etc)
# test the environment
###########
# imports
###########
import os
import ast
import sys
import time
import yaml
import logging
import argparse
import commands
##########
# parser
##########
parser = argparse.ArgumentParser(description='saltmagic.py - your gateway to the wonderful world of cloud-based lbaas')
parser.add_argument( '--verbose'
, action = 'count'
, dest = 'verbose'
, default = 0
, help = 'Controls internal output. Utilize multiple times to increase output'
)
parser.add_argument( '--config'
, action = 'store'
, dest ='configfile'
, default = 'saltmagic.cfg'
, help = 'path to a config file containing options. Command line options will supercede any options specified in the config'
)
parser.add_argument( '--create-saltmaster'
, action = 'store_true'
, dest = 'createsaltmaster'
, default = True
, help = 'Flag to signal if you need us to create the saltmaster vm for you. Entails additional checks and setup steps'
)
parser.add_argument( '--delete-saltmaster'
, action = 'store_true'
, dest = 'deletesaltmaster'
, default = False
, help = 'Flag to signal if you need us to delete the saltmaster vm we create for you post-script'
)
parser.add_argument( '--os_username'
, action = 'store'
, dest ='osusername'
, default = None
, help = 'OpenStack username for the account that will own the saltmaster.'
)
parser.add_argument( '--os_tenant'
, action = 'store'
, dest ='ostenant'
, default = None
, help = 'OpenStack tenant name for the account that will own the saltmaster.'
)
parser.add_argument( '--os_password'
, action = 'store'
, dest ='ospassword'
, default = None
, help = 'OpenStack password for the account that will own the saltmaster.'
)
# functions
def report_nova_item(title, nova_item, logging, depth=1):
""" Utility function for reporting info we receive from nova actions in a fancy manner :) """
logging.info(title)
indent = (' '*4)*depth
for item in nova_item:
for key, value in vars(item).items():
if not key.startswith('_'):
if type(value) is list:
logging.info("%s%s:" %(indent,key))
for list_item in value:
logging.info("%s%s" %(indent*2,list_item))
elif type(value) is dict:
report_nova_item(key, value, logging, depth+1)
else:
logging.info("%s%s: %s" %(indent,key, value))
logging.info("")
######
# main
######
# configure logging
logging.basicConfig(format='%(asctime)s %(message)s', datefmt='%Y%m%d-%H%M%S %p', level=logging.INFO)
args = parser.parse_args(sys.argv[1:])
if args.verbose:
logging.info("argument values:")
for key, item in vars(args).items():
logging.info("\t%s: %s" %(key, item))
if args.configfile:
# We have a magic config file that we expect to be in key: value format
# get our test input variants (nodes, names, etc)
inputs_file = open(args.configfile,'r')
saltmagic_inputs = yaml.load(inputs_file)
inputs_file.close()
saltmaster_inputs = saltmagic_inputs['saltmaster_inputs']
print "Welcome to lbaas saltmagic, where we usher you into the wonderful world of tomorrow..."
time.sleep(.5)
print "Now on to business."
#sys.exit(0)
##############################
# create salt-master instance
##############################
if args.createsaltmaster:
logging.info("Creating vm instance for salt-master: %s..." %saltmaster_inputs['saltmaster_name'])
cmd = "nova --os-username='%s' --os-tenant-name='%s' --os-password='%s' --os-region-name='%s' --os-auth-url='%s' boot --flavor=%s --image=%s --key_name=%s --security_groups=%s %s" %( saltmaster_inputs['saltmaster_user']
, saltmaster_inputs['saltmaster_tenant']
, saltmaster_inputs['saltmaster_password']
, saltmaster_inputs['saltmaster_region']
, saltmaster_inputs['saltmaster_auth_url']
, saltmaster_inputs['saltmaster_flavor']
, saltmaster_inputs['saltmaster_image']
, saltmaster_inputs['saltmaster_keypair']
, saltmaster_inputs['saltmaster_secgroup']
, saltmaster_inputs['saltmaster_name'])
retcode, result = commands.getstatusoutput(cmd)
logging.info(cmd)
logging.info(retcode)
logging.info("\n%s" %result)
# get info:
saltmaster_info = {}
for line in result.split('\n')[3:-1]:
data = line.split('|')
key = data[1].strip()
value = data[2].strip()
saltmaster_info[key]=value
if args.verbose:
logging.info("preliminary saltmaster_info:")
for key, value in saltmaster_info.items():
logging.info(" %s: %s" %(key, value))
###################################
# wait for salt-master to be ready
###################################
logging.info("Waiting for instance to be in ACTIVE state...")
saltmaster_ready = False
attempts_remain = 120
wait_time = 1
while not saltmaster_ready and attempts_remain:
cmd = "nova --os-username='%s' --os-tenant-name='%s' --os-password='%s' --os-region-name='%s' --os-auth-url='%s' show %s" %( saltmaster_inputs['saltmaster_user']
, saltmaster_inputs['saltmaster_tenant']
, saltmaster_inputs['saltmaster_password']
, saltmaster_inputs['saltmaster_region']
, saltmaster_inputs['saltmaster_auth_url']
, saltmaster_info['id'])
retcode, result = commands.getstatusoutput(cmd)
for line in result.split('\n')[3:-1]:
data = line.split('|')
key = data[1].strip()
value = data[2].strip()
if key == 'status':
if value == "ACTIVE":
saltmaster_ready=True
else:
attempts_remain -= 1
logging.info("Node: %s, id: %s not in ACTIVE status. Status: %s." %(saltmaster_inputs['saltmaster_name'], key, value))
logging.info("Waiting %d seconds. %d attempts remain" %(wait_time, attempts_remain))
time.sleep(wait_time)
if not saltmaster_ready:
logging.error("Salt-master vm: %s, id: %s not ACTIVE in %d seconds. Fail!" %(saltmaster_inputs['saltmaster_name'], saltmaster_info['id'], (attempts_remain*wait_time)))
sys.exit(1)
saltmaster_info = {}
for line in result.split('\n')[3:-1]:
data = line.split('|')
key = data[1].strip()
value = data[2].strip()
saltmaster_info[key]=value
if args.verbose:
logging.info("saltmaster_info:")
for key, value in saltmaster_info.items():
logging.info(" %s: %s" %(key, value))
saltmaster_ip = [ipaddr.strip() for ipaddr in saltmaster_info['private network'].split(',') if ipaddr.strip().startswith('15.')][0]
logging.info("Saltmaster ip: %s" %saltmaster_ip)
logging.info("Testing ssh readiness...")
ssh_ready = False
attempts_remain = 300
wait_time = 1
while not ssh_ready and attempts_remain:
cmd = "fab -H %s check_ls" %saltmaster_ip
retcode, result = commands.getstatusoutput(cmd)
if args.verbose:
logging.info(cmd)
logging.info(retcode)
logging.info(result)
if retcode != 0:
attempts_remain -= 1
logging.info("saltmaster not yet ssh ready")
logging.info("Waiting %d seconds. %d attempts remain" %(wait_time, attempts_remain))
time.sleep(wait_time)
else:
ssh_ready=True
if not ssh_ready:
logging.error("Salt-master vm: %s, id: %s not ssh ready in %d seconds. Fail!" %(saltmaster_inputs['saltmaster_name'], saltmaster_info['id'], (attempts_remain*wait_time)))
sys.exit(1)
logging.info("Salt-master ready for action!")
################################
# write a bootstrap pillar file
################################
bootstrap_pillar_file = 'bootstrap_pillar.sls'
logging.info("Writing bootstrap pillar file to %s" %bootstrap_pillar_file)
with open(bootstrap_pillar_file,'w') as outfile:
for key, value in saltmagic_inputs['saltmaster_pillar'].items():
if key == 'saltmaster_ip' and int(value) == 0:
value = saltmaster_ip
elif key == 'lbaas-saltmaster-id-rsa':
value = '|\n ' + value.replace('\n','\n ')
if args.verbose:
logging.info( " %s: %s" %(key, value))
outfile.write("%s: %s\n" %(key, value))
########################
# configure salt-master
########################
logging.info("Starting saltmaster: %s bootstrap..." %saltmaster_inputs['saltmaster_name'])
cmd = "fab -H %s install_salt" %saltmaster_ip
retcode, result = commands.getstatusoutput(cmd)
logging.info(cmd)
logging.info(retcode)
logging.info("\n%s" %result)
###############################
logging.info("Installing salt-cloud...")
cmd = "fab -H %s install_salt_cloud" %saltmaster_ip
retcode, result = commands.getstatusoutput(cmd)
logging.info(cmd)
logging.info(retcode)
logging.info("\n%s" %result)
###############################
"""
# testing salt-cloud
logging.info("Starting test of salt-cloud on our master...")
cmd = "fab -H %s test_salt_cloud" %saltmaster_ip
retcode, result = commands.getstatusoutput(cmd)
logging.info(cmd)
logging.info(retcode)
logging.info("\n%s" %result)
logging.info("Taking a moment to bask in the results of our efforts...")
time.sleep(20)
"""
# call infrastructure mapping for salt-cloud
# this magic is handled in the in-repo salt-cloud deploy script
logging.info("Calling salt-cloud to deploy basic libra environment...")
cmd = "fab -H %s deploy_libra_env:%s,%s,%s,%s,%s,'/etc/salt/cloudconfigs/cloud_az3','/srv/lbaas-staging-salt/cloudmaps/basic_staging_az3.dat'" %(saltmaster_ip, saltmaster_inputs['saltmaster_user']
, saltmaster_inputs['saltmaster_tenant']
, saltmaster_inputs['saltmaster_password']
, saltmaster_inputs['saltmaster_region']
, saltmaster_inputs['saltmaster_auth_url'])
retcode, result = commands.getstatusoutput(cmd)
logging.info(cmd)
logging.info(retcode)
logging.info("\n%s" %result)
# test
################
# fin / cleanup
################
if args.deletesaltmaster:
logging.info("Deleting vm instance for salt-master: %s..." %saltmaster_inputs['saltmaster_name'])
cmd = "nova --os-username='%s' --os-tenant-name='%s' --os-password='%s' --os-region-name='%s' --os-auth-url='%s' delete %s" %( saltmaster_inputs['saltmaster_user']
, saltmaster_inputs['saltmaster_tenant']
, saltmaster_inputs['saltmaster_password']
, saltmaster_inputs['saltmaster_region']
, saltmaster_inputs['saltmaster_auth_url']
, saltmaster_info['id'])
retcode, result = commands.getstatusoutput(cmd)
logging.info(cmd)
logging.info(retcode)
logging.info(result)
logging.info("Yay, we made it!")
| apache-2.0 | 5,244,639,250,482,241,000 | 42.793594 | 223 | 0.559321 | false |
mnpiozhang/myblog | blogweb/urls.py | 1 | 1670 | """myblog URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.8/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
from django.conf.urls import include, url
from django.contrib import admin
from views import index,showarticle,searchtag,aboutme,archive,tags,itPopularBooks,novelPopularBooks,toys,searchtitle,flashtime,randomtool,dogfood,douban250books
from feeds import ArticlesFeed
from .apis.booksapi import jdBooksApi
from .apis.commonapi import getAllArticles
urlpatterns = [
url(r'^index/(\d*)', index),
url(r'^show/(\d+)/$',showarticle),
url(r'^tag/(?P<tagname>\w+)/(?P<page>\d*)$',searchtag),
url(r'^about/$',aboutme),
url(r'^archive/$',archive),
url(r'^tag/$',tags),
url(r'^rss/$',ArticlesFeed()),
url(r'^toys/$',toys),
url(r'^toys/itpopularbooks/$',itPopularBooks),
url(r'^toys/novelpopularbooks/$',novelPopularBooks),
url(r'^toys/flashtime/$',flashtime),
url(r'^search',searchtitle),
url(r'^randomtool/',randomtool),
url(r'^dogfood/',dogfood),
url(r'^jdbooks/',jdBooksApi.as_view()),
url(r'^getarticle/',getAllArticles.as_view()),
url(r'^toys/getdouban250/$',douban250books),
]
| mit | 1,069,603,243,270,913,000 | 38.761905 | 160 | 0.684431 | false |
diegojromerolopez/djanban | src/djanban/apps/dev_times/migrations/0009_auto_20170515_1731.py | 1 | 1516 | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-05-15 15:31
from __future__ import unicode_literals
from django.db import migrations
def adjust_spent_time(member, spent_time, date):
spent_time_factors = member.spent_time_factors.all()
for spent_time_factor in spent_time_factors:
if (spent_time_factor.start_date is None and spent_time_factor.end_date is None) or \
(spent_time_factor.start_date <= date and spent_time_factor.end_date is None) or \
(spent_time_factor.start_date <= date <= spent_time_factor.end_date):
adjusted_value = spent_time * spent_time_factor.factor
return adjusted_value
return spent_time
def update_adjusted_spent_time(apps, schema):
DailySpentTime = apps.get_model("dev_times", "DailySpentTime")
for daily_spent_time in DailySpentTime.objects.all():
if daily_spent_time.spent_time is None:
daily_spent_time.adjusted_spent_time = None
else:
daily_spent_time.adjusted_spent_time = adjust_spent_time(
daily_spent_time.member, daily_spent_time.spent_time, daily_spent_time.date
)
DailySpentTime.objects.filter(id=daily_spent_time.id).update(adjusted_spent_time=daily_spent_time.adjusted_spent_time)
class Migration(migrations.Migration):
dependencies = [
('dev_times', '0008_dailyspenttime_adjusted_spent_time'),
]
operations = [
migrations.RunPython(update_adjusted_spent_time)
]
| mit | 604,419,573,935,151,400 | 37.871795 | 126 | 0.668206 | false |
azumimuo/family-xbmc-addon | plugin.video.citerkita/resources/lib/googledocs.py | 1 | 2310 | # -*- coding: utf-8 -*-
'''
Genesis Add-on
Copyright (C) 2015 lambda
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import re,json
from resources.lib import client
def resolve(url):
try:
url = url.split('/preview', 1)[0]
url = url.replace('drive.google.com', 'docs.google.com')
result = client.request(url)
result = re.compile('"fmt_stream_map",(".+?")').findall(result)[0]
u = json.loads(result)
u = [i.split('|')[-1] for i in u.split(',')]
u = sum([tag(i) for i in u], [])
url = []
try: url += [[i for i in u if i['quality'] == '1080p'][0]]
except: pass
try: url += [[i for i in u if i['quality'] == 'HD'][0]]
except: pass
try: url += [[i for i in u if i['quality'] == 'SD'][0]]
except: pass
if url == []: return
return url
except:
return
def tag(url):
quality = re.compile('itag=(\d*)').findall(url)
quality += re.compile('=m(\d*)$').findall(url)
try: quality = quality[0]
except: return []
if quality in ['37', '137', '299', '96', '248', '303', '46']:
return [{'quality': '1080p', 'url': url}]
elif quality in ['22', '84', '136', '298', '120', '95', '247', '302', '45', '102']:
return [{'quality': 'HD', 'url': url}]
elif quality in ['35', '44', '135', '244', '94']:
return [{'quality': 'SD', 'url': url}]
elif quality in ['18', '34', '43', '82', '100', '101', '134', '243', '93']:
return [{'quality': 'SD', 'url': url}]
elif quality in ['5', '6', '36', '83', '133', '242', '92', '132']:
return [{'quality': 'SD', 'url': url}]
else:
return []
| gpl-2.0 | -8,715,977,691,981,637,000 | 32 | 87 | 0.554545 | false |
dpineo/gadann | gadann/model.py | 1 | 7347 | #
# GADANN - GPU Accelerated Deep Artificial Neural Network
#
# Copyright (C) 2014 Daniel Pineo ([email protected])
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import numpy
import copy
import logging
import time
import cv2
from .tensor import Tensor
from .updater import SgdUpdater
from . import kernels
logger = logging.getLogger(__name__)
# -------------- NeuralNetworkModel ----------------
class NeuralNetworkModel(object):
def __init__(self, layers, input_shape=None, updater=SgdUpdater()):
self.layers = []
for layer_n, layer in enumerate(layers):
if 'shape' not in layer:
layer['shape'] = input_shape[1:]
if 'input_shape' not in layer:
layer['input_shape'] = input_shape
if 'name' not in layer:
layer['name'] = 'Layer ' + str(layer_n)
if 'updater' not in layer:
layer['updater'] = copy.copy(updater)
self.layers.append(layers[layer_n]['layer'](**layer))
input_shape = self.layers[-1].output_shape
'''
def add(self, layer, **kwargs):
if 'shape' not in kwargs:
layer['shape'] = input_shape[1:]
if 'input_shape' not in kwargs:
layer['input_shape'] = input_shape
if 'name' not in layer:
layer['name'] = 'Layer ' + str(layer_n)
if 'updater' not in layer:
layer['updater'] = copy.copy(updater)
self.layers.append(layer(**kwargs))
input_shape = self.layers[-1].output_shape
'''
def classify(self, features):
probabilities = self.probability(features)
return Tensor(numpy.argmax(probabilities.get(), axis=1))
'''
def probability(self, features):
predictions = self.predict(features)
return tensor.Tensor(numpy.argmax(predictions.get(), axis=1))
confidences = numpy.max(predictions.get(), axis=1)
'''
def evaluate(self, features, labels):
classifications = (self.classify(f) for f in features)
return numpy.fromiter(((l.get().flatten() == c.get().flatten()).mean()
for (l, c) in zip(labels, classifications)), float).mean()
def probability(self, input):
for layer in self.layers:
input = layer.fprop(input)
assert(not numpy.isnan(input.get()).any())
return input
# return reduce(lambda x,l: l.fprop(x), self.layers, input)
def __str__(self):
return self.__class__.__name__ + '\n' + '\n'.join([str(l) for l in self.layers])
def show(self):
for layer_n, layer in enumerate(self.layers):
if not layer.params:
continue
weights = layer.params['w']
for deconv_layer in reversed(self.layers[:layer_n]):
weights = deconv_layer.bprop(weights)
cv2.imshow(layer.name, weights.mosaic().get() + .5)
cv2.waitKey(1)
def train_backprop(self, features, labels, n_epochs):
logger.info("Training (batch gradient descent)")
for epoch in range(n_epochs):
logger.info("Epoch " + str(epoch),)
start_time = time.time()
for n, (batch_features, batch_targets) in enumerate(zip(features, labels)):
# Save the activations during the forward pass, they will be used to
# compute the gradients during the backward pass
layer_activations = [batch_features]
# Forward pass
for layer in self.layers:
layer_activations.append(layer.fprop(layer_activations[-1]))
# Error delta
output_error = layer_activations[-1] - batch_targets
# Backward pass
for layer in reversed(self.layers):
input_error = layer.bprop(output_error, layer_activations.pop())
grads = layer.gradient(layer_activations[-1], output_error)
layer.update(grads)
output_error = input_error
logger.info(' Epoch {} Time={:.3f}'.format(epoch, time.time()-start_time))
self.show()
def train_contrastive_divergence(self, features, n_epochs):
logger.info("Training (contrastive divergence)")
for layer in self.layers[:-2]: # don't train the last linear & softmax layers
logger.info("training " + layer.name)
# skip the layer if it has no parameters to train
if not layer.params:
continue
for epoch in range(n_epochs):
reconstruction_error_avg = 0
start_time = time.time()
for batch_n, v in enumerate(features):
# Gibbs sampling
p_h_given_v = kernels.logistic(layer.fprop(v))
h_sample = kernels.sample(p_h_given_v)
pos_grads = layer.gradient(v, h_sample)
p_v_given_h = kernels.logistic(layer.bprop(h_sample))
v_sample = kernels.sample(p_v_given_h)
ph = kernels.logistic(layer.fprop(v_sample))
h = kernels.sample(ph)
neg_grads = layer.gradient(v, h)
# Gradiant of log likelihood wrt the parameters
grads = {k: (neg_grads[k] - pos_grads[k]) / features.batch_size for k in pos_grads.keys()}
# Update parameters wrt the gradients
layer.update(grads)
# Running average of reconstruction error
reconstruction_error = ((v-p_v_given_h)**2).sum()/v.size
reconstruction_error_avg = .1*reconstruction_error + .9*reconstruction_error_avg
self.show()
# print model.updater.status()
logger.info(' Epoch {} Time={:.3f} Error={:.6f}'.format(epoch, time.time()-start_time, reconstruction_error))
self.show()
# propgate input data through the layer
features = features.apply_batchwise(layer.fprop)
| mit | 8,249,523,668,082,480,000 | 39.275281 | 127 | 0.572887 | false |
vitorfs/cmdbox | cmdbox/snippets/tests/test_views_add.py | 1 | 2461 | from django.core.urlresolvers import reverse as r
from django.contrib.auth.models import User
from django.test import TestCase
from cmdbox.snippets.models import Snippet
from cmdbox.snippets.forms import CreateSnippetForm
class SnippetsAddTests(TestCase):
def setUp(self):
self.user = User.objects.create_user('john', '[email protected]', '123')
self.client.login(username='john', password='123')
self.response = self.client.get(r('add_snippet'))
def test_get(self):
self.assertEqual(self.response.status_code, 200)
def test_template(self):
self.assertTemplateUsed(self.response, 'snippets/add.html')
def test_has_form(self):
form = self.response.context['form']
self.assertIsInstance(form, CreateSnippetForm)
def test_html(self):
self.assertContains(self.response, '<form')
self.assertContains(self.response, 'type="hidden"', 1)
self.assertContains(self.response, 'type="text"', 2)
self.assertContains(self.response, 'type="radio"', 3)
self.assertContains(self.response, 'type="submit"', 1)
def test_csrf(self):
self.assertContains(self.response, 'csrfmiddlewaretoken')
class SnippetsAddValidPostTests(TestCase):
def setUp(self):
self.user = User.objects.create_user('john', '[email protected]', '123')
self.client.login(username='john', password='123')
data = {'slug': 'test', 'description': 'Test snippet', 'visibility': 1}
self.response = self.client.post(r('add_snippet'), data)
def test_post(self):
self.assertEqual(self.response.status_code, 302)
def test_created_snippet(self):
self.assertTrue(Snippet.objects.filter(user__username='john', slug='test').exists())
class SnippetsAddInvalidPostTests(TestCase):
def setUp(self):
self.user = User.objects.create_user('john', '[email protected]', '123')
self.client.login(username='john', password='123')
self.response = self.client.post(r('add_snippet'), dict())
def test_post(self):
self.assertEqual(200, self.response.status_code)
def test_template(self):
self.assertTemplateUsed(self.response, 'snippets/add.html')
def test_has_form(self):
form = self.response.context['form']
self.assertIsInstance(form, CreateSnippetForm)
def test_form_has_errors(self):
form = self.response.context['form']
self.assertTrue(form.errors)
| mit | -1,933,612,880,916,621,300 | 35.191176 | 92 | 0.67371 | false |
Jellby/ASEP-MD | Tests/scripts/gen2gromacs.py | 1 | 9989 | #!/usr/bin/python
# Modify the solute geometry and charges in Gromacs .gro and .top files
# Use with 5 arguments:
# 1 (read): generic system file
# 2 (read): .top file
# 3 (read): .gro file
# 4 (write): modified .top file
# 5 (write): modified .gro file
import sys
import re
import math
import copy
#=============================
# Get input arguments
try:
system_input = sys.argv[1]
except IndexError:
sys.exit("Missing input file")
try:
top_input = sys.argv[2]
except IndexError:
sys.exit("Missing input file")
try:
gro_input = sys.argv[3]
except IndexError:
sys.exit("Missing input file")
try:
top_output = sys.argv[4]
except IndexError:
sys.exit("Missing output file")
try:
gro_output = sys.argv[5]
except IndexError:
sys.exit("Missing output file")
#=============================
# Function to replace a word in a string
# (keeping the alignment if possible)
def rep_word ( words, num, new ):
l = len(words[num])
words[num] = new.rjust(l)
#=============================
# Function to displace a molecule, matching an atom with reference
def displace ( mol1, mol2, at ):
disp = {}
disp["x"] = mol1[at]["x"]-mol2[at]["x"]
disp["y"] = mol1[at]["y"]-mol2[at]["y"]
disp["z"] = mol1[at]["z"]-mol2[at]["z"]
old = copy.deepcopy(mol2)
for i in range(len(mol2)):
mol2[i]["x"] = old[i]["x"]+disp["x"]
mol2[i]["y"] = old[i]["y"]+disp["y"]
mol2[i]["z"] = old[i]["z"]+disp["z"]
return
#=============================
# Function to superpose molecules
# see: Acta Chrystallogr. Sec. A 61 (2005), 478
# J. Comput. Chem. 31 (2010), 1561
def superpose ( mol1, mol2 ):
center1 = { "x": 0.0, "y": 0.0, "z": 0.0 }
for i in range(len(mol1)):
center1["x"] += mol1[i]["x"]
center1["y"] += mol1[i]["y"]
center1["z"] += mol1[i]["z"]
center1["x"] = center1["x"]/len(mol1)
center1["y"] = center1["y"]/len(mol1)
center1["z"] = center1["z"]/len(mol1)
for i in range(len(mol1)):
mol1[i]["x"] -= center1["x"]
mol1[i]["y"] -= center1["y"]
mol1[i]["z"] -= center1["z"]
G1 = 0
for i in range(len(mol1)):
G1 += mol1[i]["x"]**2+mol1[i]["y"]**2+mol1[i]["z"]**2
# only use first atoms of mol2 to superpose
center2 = { "x": 0.0, "y": 0.0, "z": 0.0 }
for i in range(len(mol1)):
center2["x"] += mol2[i]["x"]
center2["y"] += mol2[i]["y"]
center2["z"] += mol2[i]["z"]
center2["x"] = center2["x"]/len(mol1)
center2["y"] = center2["y"]/len(mol1)
center2["z"] = center2["z"]/len(mol1)
# but move the whole mol2
for i in range(len(mol2)):
mol2[i]["x"] -= center2["x"]
mol2[i]["y"] -= center2["y"]
mol2[i]["z"] -= center2["z"]
G2 = 0
for i in range(len(mol2)):
G2 += mol2[i]["x"]**2+mol2[i]["y"]**2+mol2[i]["z"]**2
M = {}
for i in ["x", "y", "z"]:
for j in ["x", "y", "z"]:
M[i+j] = 0
for k in range(len(mol1)):
M[i+j] += mol1[k][i] * mol2[k][j]
K = []
K.append( [ M["xx"]+M["yy"]+M["zz"], M["yz"]-M["zy"], M["zx"]-M["xz"], M["xy"]-M["yx"] ] )
K.append( [ M["yz"]-M["zy"], M["xx"]-M["yy"]-M["zz"], M["xy"]+M["yx"], M["xz"]+M["zx"] ] )
K.append( [ M["zx"]-M["xz"], M["xy"]+M["yx"], M["yy"]-M["xx"]-M["zz"], M["yz"]+M["zy"] ] )
K.append( [ M["xy"]-M["yx"], M["xz"]+M["zx"], M["yz"]+M["zy"], M["zz"]-M["xx"]-M["yy"] ] )
coef = []
D = (M["xy"]**2+M["xz"]**2-M["yx"]**2-M["zx"]**2)**2
E = (-M["xx"]**2+M["yy"]**2+M["zz"]**2+M["yz"]**2+M["zy"]**2-2*(M["yy"]*M["zz"]-M["yz"]*M["zy"]))*\
(-M["xx"]**2+M["yy"]**2+M["zz"]**2+M["yz"]**2+M["zy"]**2+2*(M["yy"]*M["zz"]-M["yz"]*M["zy"]))
F = (-(M["xz"]+M["zx"])*(M["yz"]-M["zy"])+(M["xy"]-M["yx"])*(M["xx"]-M["yy"]-M["zz"]))*\
(-(M["xz"]-M["zx"])*(M["yz"]+M["zy"])+(M["xy"]-M["yx"])*(M["xx"]-M["yy"]+M["zz"]))
G = (-(M["xz"]+M["zx"])*(M["yz"]+M["zy"])-(M["xy"]+M["yx"])*(M["xx"]+M["yy"]-M["zz"]))*\
(-(M["xz"]-M["zx"])*(M["yz"]-M["zy"])-(M["xy"]+M["yx"])*(M["xx"]+M["yy"]+M["zz"]))
H = ( (M["xy"]+M["yx"])*(M["yz"]+M["zy"])+(M["xz"]+M["zx"])*(M["xx"]-M["yy"]+M["zz"]))*\
(-(M["xy"]-M["yx"])*(M["yz"]-M["zy"])+(M["xz"]+M["zx"])*(M["xx"]+M["yy"]+M["zz"]))
I = ( (M["xy"]+M["yx"])*(M["yz"]-M["zy"])+(M["xz"]-M["zx"])*(M["xx"]-M["yy"]-M["zz"]))*\
(-(M["xy"]-M["yx"])*(M["yz"]+M["zy"])+(M["xz"]-M["zx"])*(M["xx"]+M["yy"]-M["zz"]))
coef.append( D+E+F+G+H+I )
coef.append( -8.0*( M["xx"]*M["yy"]*M["zz"]+M["xy"]*M["yz"]*M["zx"]+M["xz"]*M["yx"]*M["zy"]
-M["xx"]*M["yz"]*M["zy"]-M["xy"]*M["yx"]*M["zz"]-M["xz"]*M["yy"]*M["zx"] ) )
coef.append( -2.0*( M["xx"]**2+M["xy"]**2+M["xz"]**2+M["yx"]**2+M["yy"]**2+M["yz"]**2+M["zx"]**2+M["zy"]**2+M["zz"]**2 ) )
coef.append( 0.0 )
coef.append( 1.0 )
root_old = 0.0
root = 0.5*(G1+G2)
while (math.fabs(root-root_old) > 1.0e-6):
root_old = root
P = root**4+coef[2]*root**2+coef[1]*root+coef[0]
dP = 4*root**3+2*coef[2]*root+coef[1]
root -= P/dP
for i in range(len(K)):
K[i][i] -= root
for i in range(len(K)):
vect = []
for j in range(len(K)):
adj = copy.deepcopy(K)
del adj[i]
for k in range(len(adj)):
del adj[k][j]
det = adj[0][0]*adj[1][1]*adj[2][2]+adj[0][1]*adj[1][2]*adj[2][0]+adj[0][2]*adj[1][0]*adj[2][1] \
-adj[0][0]*adj[1][2]*adj[2][1]-adj[0][1]*adj[1][0]*adj[2][2]-adj[0][2]*adj[1][1]*adj[2][0]
det *= (-1)**(i+j)
vect.append(det)
norm = math.sqrt(vect[0]**2+vect[1]**2+vect[2]**2+vect[3]**2)
if (norm > 1.0e-6):
vect[0] = -vect[0]/norm
vect[1] = vect[1]/norm
vect[2] = vect[2]/norm
vect[3] = vect[3]/norm
break
M["xx"] =vect[0]**2+vect[1]**2-vect[2]**2-vect[3]**2
M["yy"] =vect[0]**2-vect[1]**2+vect[2]**2-vect[3]**2
M["zz"] =vect[0]**2-vect[1]**2-vect[2]**2+vect[3]**2
M["xy"] =2.0*(vect[1]*vect[2]-vect[0]*vect[3])
M["yx"] =2.0*(vect[1]*vect[2]+vect[0]*vect[3])
M["yz"] =2.0*(vect[2]*vect[3]-vect[0]*vect[1])
M["zy"] =2.0*(vect[2]*vect[3]+vect[0]*vect[1])
M["zx"] =2.0*(vect[1]*vect[3]-vect[0]*vect[2])
M["xz"] =2.0*(vect[1]*vect[3]+vect[0]*vect[2])
old = copy.deepcopy(mol2)
for i in range(len(mol2)):
mol2[i]["x"] = M["xx"]*old[i]["x"]+M["xy"]*old[i]["y"]+M["xz"]*old[i]["z"]+center1["x"]
mol2[i]["y"] = M["yx"]*old[i]["x"]+M["yy"]*old[i]["y"]+M["yz"]*old[i]["z"]+center1["y"]
mol2[i]["z"] = M["zx"]*old[i]["x"]+M["zy"]*old[i]["y"]+M["zz"]*old[i]["z"]+center1["z"]
return
#=============================
# Read the system file
# Skip the file until the solute is found is found
file_system = open(system_input, "r")
for line in file_system:
if (re.match("Solute",line)):
break
# Skip name and number of molecules
file_system.next()
file_system.next()
# Read coordinates and charges
mol = []
num = int(file_system.next())
for i in range(num):
tmp = dict(zip(("x","y","z","q"),file_system.next().split()[4:8]))
tmp["x"] = float(tmp["x"])
tmp["y"] = float(tmp["y"])
tmp["z"] = float(tmp["z"])
tmp["q"] = float(tmp["q"])
mol.append(tmp)
file_system.close()
#=============================
# Read the topology file
# and write the modified charges
file_top = open(top_input, "r")
file_top_out = open(top_output, "w")
# Skip to the definition of the first molecule's atoms
for line in file_top:
file_top_out.write(line)
if (re.match("\[\s*atoms\s*\]",line)):
break
# Replace the 7th word (the charge) with the new charge
for i in range(num):
line = file_top.next()
# Skip comment lines
while (re.match("\s*;", line)):
file_top_out.write(line)
line = file_top.next()
words = re.findall("(\s*\S+)",line)
rep_word(words, 6, " "+str(mol[i]["q"]))
file_top_out.write("".join(words)+"\n")
# Copy the rest of the file unchanged
for line in file_top:
file_top_out.write(line)
file_top.close()
file_top_out.close()
#=============================
# Read the coordinates file
# and write the modified coordinates
coord_prec = "11.6"
veloc_prec = "11.7"
format_str = "%%5d%%5s%%5s%%5d%%%sf%%%sf%%%sf%%%sf%%%sf%%%sf\n" % (coord_prec, coord_prec, coord_prec, veloc_prec, veloc_prec, veloc_prec)
file_gro = open(gro_input, "r")
file_gro_out = open(gro_output, "w")
# First read the solute coordinates
file_gro.next()
file_gro.next()
mol_gro = []
for i in range(num):
line = file_gro.next()
dots = [match.start() for match in re.finditer("\.", line[20:])]
width = dots[1]-dots[0]
tmp = dict(zip(("x","y","z"), [line[j:j+width] for j in range(20, len(line), width)]))
tmp["x"] = float(tmp["x"])*10
tmp["y"] = float(tmp["y"])*10
tmp["z"] = float(tmp["z"])*10
mol_gro.append(tmp)
# Modify the input coordinates to fit the original orientation
superpose ( mol_gro, mol )
# Back to the top of the file
file_gro.seek(0)
# Copy title and total number of atoms
file_gro_out.write(file_gro.next())
numtot = int(file_gro.next())
file_gro_out.write("%5d\n" % numtot)
# Read the atom coordinates and velocities
for i in range(numtot):
line = file_gro.next()
dots = [match.start() for match in re.finditer("\.", line[20:])]
width = dots[1]-dots[0]
tmp = dict(zip(("x","y","z","vx","vy","vz"), [line[j:j+width] for j in range(20, len(line), width)]))
tmp["resnum"] = int(line[0:5])
tmp["resname"] = line[5:10]
tmp["atname"] = line[10:15]
tmp["atnum"] = int(line[15:20])
# For the solute, write the new coordinates, in nm
if (i < num):
tmp["x"] = 0.1*mol[i]["x"]
tmp["y"] = 0.1*mol[i]["y"]
tmp["z"] = 0.1*mol[i]["z"]
else:
tmp["x"] = float(tmp["x"])
tmp["y"] = float(tmp["y"])
tmp["z"] = float(tmp["z"])
# Write the velocities if present
if "vx" in tmp:
tmp["vx"] = float(tmp["vx"])
tmp["vy"] = float(tmp["vy"])
tmp["vz"] = float(tmp["vz"])
else:
tmp["vx"] = 0.0
tmp["vy"] = 0.0
tmp["vz"] = 0.0
file_gro_out.write(format_str % \
(tmp["resnum"], tmp["resname"], tmp["atname"], tmp["atnum"], tmp["x"], tmp["y"], tmp["z"], tmp["vx"], tmp["vy"], tmp["vz"]))
# Copy the cell tensor
file_gro_out.write(file_gro.next())
file_gro.close()
file_gro_out.close()
| gpl-3.0 | 6,437,913,414,592,867,000 | 30.11838 | 138 | 0.513565 | false |
gammapy/enrico | enrico/plotting.py | 1 | 25543 | import os
from distutils.version import LooseVersion
import numpy as np
try:
import astropy.io.fits as fits
except ImportError:
import pyfits as fits
import pyLikelihood
import matplotlib
matplotlib.use('Agg')
matplotlib.rc('font', **{'family': 'serif', 'serif': ['Computer Modern'], 'size': 15})
matplotlib.rc('text', usetex=True)
import matplotlib.pyplot as plt
from enrico.constants import MEV_TO_ERG, ERG_TO_MEV
from enrico.config import get_config
from enrico import utils
from enrico import Loggin
from enrico.extern.astropy_bayesian_blocks import bayesian_blocks
class Params:
"""Collection of Plotting parameters like Energy bounds,
colors, file name, etc...."""
def __init__(self, srcname, Emin=100, Emax=3e5,
PlotName="LAT_SED", LineColor=2,
PointColor = 1, N = 2000):
self.Emin = Emin #Energy bounds
self.Emax = Emax
self.N = N #Number of points for the TGraph
self.srcname = srcname # Source of interest
self.PlotName = PlotName #file name
#color options
self.LineColor = LineColor
self.PointColor = PointColor
class Result(Loggin.Message):
"""Helper class to get the results from a (Un)BinnedAnalysis object
and compute the SED and errors"""
def __init__(self, Fit, pars):
super(Result,self).__init__()
Loggin.Message.__init__(self)
self.Fit = Fit
self.Model = Fit[pars.srcname].funcs['Spectrum'].genericName()
self.ptsrc = pyLikelihood.PointSource_cast(Fit[pars.srcname].src)
self.covar = np.array(utils.GetCovar(pars.srcname, self.Fit, False))
self.srcpars = pyLikelihood.StringVector()
Fit[pars.srcname].src.spectrum().getFreeParamNames(self.srcpars)
def GetDecorrelationEnergy(self,par):
self.E, self.SED = self.MakeSED(par)
self.Err = self.MakeSEDError(par)
i=np.argmin(self.Err/self.SED)
self.decE = self.E[i]
self.decFlux = self.SED[i]/self.E[i]**2*ERG_TO_MEV
self.decFluxerr = self.Err[i]/self.E[i]**2*ERG_TO_MEV
self.decSED = self.SED[i]
self.decSEDerr = self.Err[i]
def _DumpSED(self,par):
"""Save the energy, E2.dN/dE, and corresponding error in an ascii file
The count and residuals plot vs E is also made"""
try:
self.decE
except NameError:
self.GetDecorrelationEnergy(par)
self.info("Decorrelation energy : %4.2e MeV"% self.decE)
self.info("Diffential flux at the Decorrelation energy : %2.2e +/- %2.2e ph/cm2/s/MeV" \
%(self.decFlux, self.decFluxerr))
self.info("SED value at the Decorrelation energy : %2.2e +/- %2.2e erg/cm2/s" \
%(self.decSED, self.decSEDerr))
try:
self.CountsPlot(par)
except Exception as e:
print((type(e))) # the exception instance
print((e.args)) # arguments stored in .args
print(e) # __str__ allows args to be printed directly,
#raise
# Save all in ascii file
# log(E) log (E**2*dN/dE) log(E**2*dN/dE_err) is_dot (0,1) is_upper (0,1)
save_file = open(par.PlotName + '.dat', 'w')
save_file.write("# log(E) log (E**2*dN/dE) Error on log(E**2*dN/dE) \n")
for i in range(par.N):
save_file.write("%12.4e %12.4e %12.4e \n" % (self.E[i], self.SED[i], self.Err[i]))
save_file.close()
def MakeFlux(self, params):
"""Compute differential Flux distribution and
corresponding energy and return a numpy array"""
E = np.logspace(np.log10(params.Emin), np.log10(params.Emax), params.N)
Flux = np.zeros(params.N)
for i in range(params.N):
Flux[i] = self.dNde(E[i])
return E, Flux
def MakeSED(self, pars):
"""Compute Spectral energy distribution and corresponding energy
and return a numpy array"""
E = np.logspace(np.log10(pars.Emin), np.log10(pars.Emax), pars.N)
nuFnu = np.zeros(pars.N)
for i in range(pars.N):
nuFnu[i] = MEV_TO_ERG * E[i] ** 2 * self.dNde(E[i]) #Mev to Ergs
return E, nuFnu
def MakeSEDError(self, pars):
"""@todo: document me"""
estep = np.log(pars.Emax / pars.Emin) / (pars.N - 1)
energies = pars.Emin * np.exp(estep * np.arange(np.float(pars.N)))
err = np.zeros(pars.N)
j = 0
for ene in energies:
arg = pyLikelihood.dArg(ene)
partials = np.zeros(len(self.srcpars))
for i in range(len(self.srcpars)):
x = self.srcpars[i]
partials[i] = self.ptsrc.spectrum().derivByParam(arg, x)
err[j] = np.sqrt(np.dot(partials, np.dot(self.covar, partials)))
j += 1
return MEV_TO_ERG * energies ** 2 * err #Mev to Ergs
def dNde(self, energy):
arg = pyLikelihood.dArg(energy)
return self.ptsrc.spectrum()(arg)
def CountsPlot(self, Parameter):
"""@todo: document me"""
imName = "tmp.fits"
filebase = Parameter.PlotName
total = np.array([])
obs = np.array([])
obs_err = np.array([])
emax = np.array([])
emin = np.array([])
src = np.array([])
# Summed Likelihood has no writeCountsSpectra
# but we can do it component by component
for comp in self.Fit.components:
#self.Fit.writeCountsSpectra(imName)
try:
comp.writeCountsSpectra(imName)
image = fits.open(imName)
#loop on the source names to find the good one
j = 0
for ID in image[1].data.names:
if ID == Parameter.srcname:
indice = j
j += 1
for jn in range(len(image[3].data.field(0))):
energymin = image[3].data.field(1)[jn]
energymax = image[3].data.field(0)[jn]
if energymax in emax and energymin in emin:
k = np.where(energymax==emax)
obs[k] = obs[k] + image[1].data.field(0)[jn]
obs_err[k] = np.sqrt(obs[k])
src[k] = src[k] + image[1].data.field(indice)[jn]
for i in range(len(image[1].data.names) - 1):
total[k] = total[k] + image[1].data.field(i + 1)[jn]
else:
emax = np.append(emax, energymax)
emin = np.append(emin, energymin)
obs = np.append(obs,image[1].data.field(0)[jn])
obs_err = np.append(obs_err,\
np.sqrt(image[1].data.field(0)[jn]))
src = np.append(src, image[1].data.field(indice)[jn])
total = np.append(total,0)
for i in range(len(image[1].data.names) - 1):
total[-1] = total[-1] + image[1].data.field(i + 1)[jn]
except RuntimeError as e:
print("Exception RuntimeError ocurred: ")
print((type(e)))
print((e.args))
print(e)
break
except IndexError:
print("Exception IndexError ocurred (component unavailable): ")
print((type(e)))
print((e.args))
print(e)
continue
# Sort by energy
energy_order = np.argsort(emin)
src = src[energy_order]
obs = obs[energy_order]
obs_err = obs_err[energy_order]
total = total[energy_order]
emin = emin[energy_order]
emax = emax[energy_order]
other = np.array(total - src)
Nbin = len(src)
E = np.array((emax + emin) / 2.)
err_E = np.array((emax - emin) / 2.)
total = np.array(total)
residual = np.zeros(Nbin)
Dres = np.zeros(Nbin)
plt.figure()
plt.loglog()
plt.title('Counts plot')
plt.xlabel("E (MeV) ")
plt.ylabel("Counts / bin")
plt.errorbar(E,obs,xerr=err_E,yerr=obs_err,fmt='o',color="red",ls='None',label="Data")
plt.plot(E,src,ls='dashed',color="blue",label=Parameter.srcname.replace("_"," "))
plt.plot(E,other,ls='solid',color="green",label="Other Sources")
plt.plot(E,total,lw=1.5,ls='solid',label="All Sources")
plt.legend()
plt.tight_layout()
plt.savefig(filebase + "_CountsPlot.png", dpi=150, facecolor='w', edgecolor='w',
orientation='portrait', papertype=None, format=None,
transparent=False, bbox_inches=None, pad_inches=0.1,
frameon=None)
plt.figure()
plt.title('Residuals plot')
plt.semilogx()
for i in range(Nbin):
try:
residual[i] = (obs[i] - total[i]) / total[i]
Dres[i] = (obs_err[i] / total[i])
except:
residual[i] = 0.
Dres[i] = 0.
if residual[i] == -1.:
residual[i] = 0.
ymin = min(residual) - max(Dres)
ymax = max(residual) + max(Dres)
plt.ylim(ymax = ymax, ymin = ymin)
plt.xlim(xmin = min(E)*0.3, xmax = max(E)*2)
plt.xlabel("E (MeV) ")
plt.ylabel("(counts-model)/model")
plt.errorbar(E,residual,xerr=err_E,yerr=Dres,fmt='o',color="red",ls='None',label="Data")
zero = np.zeros(2)
Ezero = np.array([1e-5, 1e10])
plt.plot(Ezero,zero,lw=1.5,ls='solid',color='black')
plt.tight_layout()
plt.savefig(filebase + "ResPlot.png", dpi=150, facecolor='w', edgecolor='w',
orientation='portrait', papertype=None, format=None,
transparent=False, bbox_inches=None, pad_inches=0.1,
frameon=None)
os.system("rm " + imName)
image.close()
# def PlotFoldedLC(Time, TimeErr, Flux, FluxErr, tag="Flux (photon cm^{-2} s^{-1})"):
# _, tgraph, arrows = PlotLC(Time, TimeErr, Flux, FluxErr, tag)
# xmin = 0
# xmax = 1
# if max(FluxErr)==0:
# ymin = 0.
# ymax = max(Flux)*1.3
# else:
# ymin = np.min(min(Flux) - max(FluxErr) * 1.3, 0.)
# ymax = (max(Flux) + max(FluxErr)) * 1.3
# gh = ROOT.TH2F("ghflux", "", 80, xmin, xmax, 100, ymin, ymax)
# gh.SetStats(000)
# gh.SetXTitle("Orbital Phase")
# gh.SetYTitle(tag)
# return gh, tgraph, arrows
def GetDataPoints(config,pars,ignore_missing_bins=False):
"""Collect the data points/UL and generate a TGraph for the points
and a list of TArrow for the UL. All is SED format"""
#Preparation + declaration of arrays
arrows = []
NEbin = int(config['Ebin']['NumEnergyBins'])
lEmax = np.log10(float(config['energy']['emax']))
lEmin = np.log10(float(config['energy']['emin']))
Epoint = np.zeros(NEbin)
EpointErrp = np.zeros(NEbin)
EpointErrm = np.zeros(NEbin)
Fluxpoint = np.zeros(NEbin)
FluxpointErrp = np.zeros(NEbin)
FluxpointErrm = np.zeros(NEbin)
uplim = np.zeros(NEbin,dtype=int)
ener = np.logspace(lEmin, lEmax, NEbin + 1)
mes = Loggin.Message()
mes.info("Save Ebin results in ",pars.PlotName+".Ebin.dat")
dumpfile = open(pars.PlotName+".Ebin.dat",'w')
dumpfile.write("# Energy (MeV)\tEmin (MeV)\tEmax (MeV)\tE**2. dN/dE (erg.cm-2s-1)\tGaussianError\tMinosNegativeError\tMinosPositiveError\n")
from enrico.constants import EbinPath
for i in range(NEbin):#Loop over the energy bins
#E = int(pow(10, (np.log10(ener[i + 1]) + np.log10(ener[i])) / 2))
filename = (config['out'] + '/'+EbinPath+str(NEbin)+'/' + config['target']['name'] +
"_" + str(i) + ".conf")
try:#read the config file of each data points
CurConf = get_config(filename)
mes.info("Reading "+filename)
results = utils.ReadResult(CurConf)
except:
if not ignore_missing_bins:
mes.warning("cannot read the Results of energy bin "+ str(i))
continue
#fill the energy arrays
#Epoint[i] = results.get("Scale")
#if Epoint[i] in [results.get("Emin"),results.get("Emax")]:
#### <---- is this a mistake?? does not make much sense to me
Epoint[i] = 10**((np.log10(results.get("Emin"))+np.log10(results.get("Emax")))/2.)
#Epoint[i] = int(pow(10, (np.log10(ener[i + 1]) + np.log10(ener[i])) / 2))
Epoint[i] = 10**((np.log10(results.get("Emin"))+np.log10(results.get("Emax")))/2.)
EpointErrm[i] = Epoint[i] - results.get("Emin")
EpointErrp[i] = results.get("Emax") - Epoint[i]
dprefactor = 0
#Compute the flux or the UL (in SED format)
if 'Ulvalue' in results:
PrefUl = utils.Prefactor(results.get("Ulvalue"),results.get("Index"),
results.get("Emin"),results.get("Emax"),Epoint[i])
Fluxpoint[i] = MEV_TO_ERG * PrefUl * Epoint[i] ** 2
uplim[i] = 1
else : #Not an UL : compute points + errors
Fluxpoint[i] = MEV_TO_ERG * results.get("Prefactor") * Epoint[i] ** 2
dprefactor = results.get("dPrefactor")
try:
down = abs(results.get("dPrefactor-"))
up = results.get("dPrefactor+")
if down==0 or up ==0 :
mes.error("cannot get Error value")
FluxpointErrp[i] = MEV_TO_ERG * up * Epoint[i] ** 2
FluxpointErrm[i] = MEV_TO_ERG * down * Epoint[i] ** 2
except:
try:
err = MEV_TO_ERG * dprefactor * Epoint[i] ** 2
FluxpointErrp[i] = err
FluxpointErrm[i] = err
except:
pass
mes.info("Energy bins results")
print(("Energy = ",Epoint[i]))
#Save the data point in a ascii file
if 'Ulvalue' in results:
dumpfile.write(str(Epoint[i])+"\t"+str(results.get("Emin"))+"\t"+str( results.get("Emax"))+"\t"+str(Fluxpoint[i])+"\t0\t0\t0\n")
print(("E**2. dN/dE = ",Fluxpoint[i]))
else:
dumpfile.write(str(Epoint[i])+"\t"+str(results.get("Emin"))+"\t"+str( results.get("Emax"))+"\t"+str(Fluxpoint[i])+"\t"+str( MEV_TO_ERG * dprefactor * Epoint[i] ** 2)+"\t"+str(FluxpointErrm[i])+"\t"+str(FluxpointErrp[i])+"\n")
print(("E**2. dN/dE = ",Fluxpoint[i]," + ",FluxpointErrp[i]," - ",FluxpointErrm[i]))
dumpfile.close()
return Epoint, Fluxpoint, EpointErrm, EpointErrp, FluxpointErrm, FluxpointErrp, uplim
def plot_errorbar_withuls(x,xerrm,xerrp,y,yerrm,yerrp,uplim,bblocks=False):
""" plot an errorbar plot with upper limits. Optionally compute and draw bayesian blocks (bblocks) """
# plt.errorbar(Epoint, Fluxpoint, xerr=[EpointErrm, EpointErrp], yerr=[FluxpointErrm, FluxpointErrp],fmt='o',color='black',ls='None',uplims=uplim)
uplim = np.asarray(uplim,dtype=bool) # It is an array of 1 and 0s, needs to be a bool array.
# make sure that the arrays are numpy arrays and not lists.
x = np.asarray(x)
xerrm = np.asarray(xerrm)
xerrp = np.asarray(xerrp)
y = np.asarray(y)
yerrm = np.asarray(yerrm)
yerrp = np.asarray(yerrp)
# Get the strict upper limit (best fit value + error, then set the error to 0 and the lower error to 20% of the value)
y[uplim] += yerrp[uplim]
yerrm[uplim] = 0
yerrp[uplim] = 0
optimal_markersize = (0.5+4./(1.+np.log10(len(y))))
optimal_errorlinewidth = (0.2+2./(1.+4.*np.log10(len(y))))
# Plot the significant points
plt.errorbar(x[~uplim], y[~uplim],
xerr=[xerrm[~uplim], xerrp[~uplim]],
yerr=[yerrm[~uplim], yerrp[~uplim]],
lw=optimal_errorlinewidth,
fmt='o',ms=optimal_markersize,capsize=0,zorder=10,
color='black',ls='None',uplims=False,label='LAT data')
# Plot the upper limits. For some reason, matplotlib draws the arrows inverted for uplim and lolim [?]
# This is a known issue fixed in matplotlib 1.4: https://github.com/matplotlib/matplotlib/pull/2452
if LooseVersion(matplotlib.__version__) < LooseVersion("1.4.0"):
plt.errorbar(x[uplim], y[uplim],
xerr=[xerrm[uplim], xerrp[uplim]],
yerr=[yerrm[uplim], yerrp[uplim]],
fmt='o',markersize=0,capsize=0,zorder=-1,
lw=optimal_errorlinewidth,
color='0.50',ls='None',lolims=False)
plt.errorbar(x[uplim], 0.8*y[uplim],
yerr=[0.2*y[uplim], 0.2*y[uplim]],
fmt='o',markersize=0,capsize=optimal_markersize/1.5,zorder=-1,
lw=optimal_errorlinewidth,
color='0.50',ls='None',lolims=True)
else:
plt.errorbar(x[uplim], y[uplim],
xerr=[xerrm[uplim], xerrp[uplim]],
yerr=[yerrm[uplim], yerrp[uplim]],
lw=optimal_errorlinewidth,
fmt='o',markersize=0,capsize=0,zorder=-1,
color='0.50',ls='None',uplims=False)
plt.errorbar(x[uplim], y[uplim],
yerr=[0.2*y[uplim], 0.2*y[uplim]],
lw=optimal_errorlinewidth,
fmt='o',markersize=0,capsize=optimal_markersize/1.5,zorder=-1,
color='0.50',ls='None',uplims=True)
if bblocks and len(x[~uplim])>2:
yerr = 0.5*(yerrm+yerrp)
# Set the value and error for the uls.
yerr[uplim] = y[uplim] #min(y[yerr>0]+yerr[yerr>0])
y[uplim] = 0
edges = bayesian_blocks(x,y,yerr,fitness='measures',p0=0.5)
#edges = bayesian_blocks(x[yerr>0],y[yerr>0],yerr[yerr>0],fitness='measures',p0=0.1)
xvalues = 0.5*(edges[:-1]+edges[1:])
xerrors = 0.5*(edges[1:]-edges[:-1])
yvalues = []
yerrors = []
for k in range(len(edges)-1):
xmin,xmax = edges[k],edges[k+1]
filt = (x>=xmin)*(x<=xmax)*(yerr>0)
sum_inv_square = np.sum(1./yerr[filt]**2)
yvalues.append(np.sum(y[filt]/yerr[filt]**2)/sum_inv_square)
yerrors.append(1./np.sqrt(sum_inv_square))
yvalues = np.asarray(yvalues)
yerrors = np.asarray(yerrors)
# Plot the significant points
ystep = []
ystepmin = []
ystepmax = []
xstep = []
for k in range(len(xvalues)):
for _ in range(2):
ystep.append(yvalues[k]) # 3 values, to mark the minimum and center
ystepmin.append(yvalues[k]-yerrors[k]) # 3 values, to mark the minimum and center
ystepmax.append(yvalues[k]+yerrors[k]) # 3 values, to mark the minimum and center
xstep.append(xvalues[k]-xerrors[k])
xstep.append(xvalues[k]+xerrors[k])
plt.step(xstep, ystep,
color='#d62728',zorder=-10,
ls='solid')
plt.fill_between(xstep, ystepmin, ystepmax,
color='#d62728',zorder=-10, alpha=0.5)
plt.errorbar(xvalues, yvalues,
xerr=xerrors,yerr=yerrors,
marker=None,ms=0,capsize=0,color='#d62728',zorder=-10,
ls='None',label='bayesian blocks')
plt.legend(loc=0,fontsize='small',numpoints=1)
def plot_bayesianblocks(xmin, xmax, y, yerrm, yerrp, uplim):
# Set the value and error for the uls.
yerrm[uplim] = y[uplim]
yerrp[uplim] = y[uplim]
y[uplim] = 0.
xvalues = 0.5*(xmax+xmin)
xerrors = 0.5*(xmax-xmin)
# Plot the significant points
ystep = []
ystepmin = []
ystepmax = []
xstep = []
for k in range(len(xvalues)):
for _ in range(2):
ystep.append(y[k]) # 3 values, to mark the minimum and center
ystepmin.append(y[k]-yerrm[k]) # 3 values, to mark the minimum and center
ystepmax.append(y[k]+yerrp[k]) # 3 values, to mark the minimum and center
xstep.append(xmin[k])
xstep.append(xmax[k])
plt.step(xstep, ystep,
color='#d62728',zorder=-10,
ls='solid')
plt.fill_between(xstep, ystepmin, ystepmax,
color='#d62728',zorder=-10, alpha=0.5)
plt.errorbar(xvalues, y,
xerr=xerrors,yerr=[yerrm, yerrp],
marker=None,ms=0,capsize=0,color='#d62728',zorder=-10,
ls='None')
def PlotSED(config,pars,ignore_missing_bins=False):
"""plot a nice SED with a butterfly and points"""
# Read the ascii file where the butterfly is stored
filebase = utils._SpecFileName(config)
lines = open(filebase + '.dat', 'r').readlines()
SED = []
E = []
Err = []
for i in range(len(lines) - 1):
words = lines[i + 1].split()
if float(words[0])<pars.Emax :
E.append(float(words[0]))
SED.append(float(words[1]))
Err.append(float(words[2]))
ilen = len(SED)
#From dN/dE to SED
Fluxp = np.array(SED)*np.exp(np.array(Err)/np.array(SED))
Fluxm = np.array(SED)*np.exp(-np.array(Err)/np.array(SED))
ErrorFlux = np.zeros(2 * ilen + 1)
ErrorE = np.zeros(2 * ilen + 1)
#Compute the butterfly and close it
for i in range(ilen):
ErrorFlux[i] = Fluxp[i]
ErrorE[i] = E[i]
for i in range(ilen):
ErrorFlux[ilen + i] = Fluxm[ilen - i - 1]
ErrorE[ilen + i] = E[ilen - i - 1]
ErrorFlux[-1] = Fluxp[0]
ErrorE[-1] = E[0]
#Actually make the plot
plt.figure()
plt.title(pars.PlotName.split("/")[-1].replace('_','\_'))
name = pars.PlotName.split("/")[-1]
plt.loglog()
plt.xlabel(r"Energy (MeV)")
plt.ylabel(r"$\mathrm{E^2\ dN/dE}\ \mathrm{(erg\ cm^{-2} s^{-1})}$")
plt.plot(E,SED,"-r",label='LAT model')
plt.plot(ErrorE,ErrorFlux,"-r")
#Plot points
NEbin = int(config['Ebin']['NumEnergyBins'])
if NEbin > 0:
Epoint, Fluxpoint, EpointErrm, EpointErrp, FluxpointErrm, FluxpointErrp, uplim = GetDataPoints(config,pars,ignore_missing_bins) #collect data points
plot_errorbar_withuls(Epoint,EpointErrm,EpointErrp,Fluxpoint,FluxpointErrm,FluxpointErrp,uplim)
#print uplim
#print FluxpointErrm
#print FluxpointErrp
#Set meaningful axes limits
xlim = plt.xlim()
ylim = plt.ylim()
xlim = (max([20,xlim[0]]),min([2e6,xlim[1]]))
ylim = (max([1e-14,ylim[0]]),min([1e-8,ylim[1]]))
plt.xlim(xlim)
plt.ylim(ylim)
# turn them into log10 scale
#xticks = plt.xticks()[0]
#xticklabels = np.array(np.log10(xticks),dtype=int)
#plt.xticks(xticks,xticklabels)
#plt.xlabel('$\mathrm{\log_{10}\mathbf{(Energy)} \\ \\ [MeV]}$')
plt.legend(fontsize='small',ncol=1,\
loc=3,numpoints=1)#,framealpha=0.75)
#Upper horizontal secondary axis with frequency
#Plt2 = plt.twiny()
#Plt2.set_xscale('log')
#Plt2.set_xlim(2.417990504024163e+20 *np.array(xlim))
#Plt2.set_xticklabels(np.array(np.log10(Plt2.get_xticks()),dtype=int))
#Plt2.set_xlabel('$\mathrm{\log_{10}\mathbf{(Frequency)} \\ \\ [Hz]}$')
#save the canvas
#plt.grid()
plt.tight_layout()
plt.savefig("%s.png" %filebase, dpi=150, facecolor='w', edgecolor='w',
orientation='portrait', papertype=None, format=None,
transparent=False, bbox_inch=None, pad_inches=0.1,
frameon=None)
def PlotUL(pars,config,ULFlux,Index):
#Compute the SED
E = np.logspace(np.log10(pars.Emin), np.log10(pars.Emax), pars.N)
SED = MEV_TO_ERG * E ** 2 * (-Index+1)*ULFlux* np.power(E,-Index)/(np.power(pars.Emax,-Index+1)-np.power(pars.Emin,-Index+1))
#Actually make the plot
plt.xlabel(r"E [MeV]")
plt.ylabel(r"$\mathrm{E^2\ dN/dE}\ \mathrm{(erg\ cm^{-2} s^{-1})}$")
plt.loglog()
plt.plot(E,SED,"-",color='black')
# Plot the upper limits. For some reason, matplotlib draws the arrows inverted for uplim and lolim [?]
# This is a known issue fixed in matplotlib 1.4: https://github.com/matplotlib/matplotlib/pull/2452
if LooseVersion(matplotlib.__version__) < LooseVersion("1.4.0"):
plt.errorbar([E[0],E[-1]], [SED[0],SED[-1]], yerr=[SED[0]*0.8,SED[-1]*0.8],fmt='.',color='black',ls='None',lolims=[1,1])
else:
plt.errorbar([E[0],E[-1]], [SED[0],SED[-1]], yerr=[SED[0]*0.8,SED[-1]*0.8],fmt='.',color='black',ls='None',uplims=[1,1])
#save the plot
filebase = utils._SpecFileName(config)
plt.tight_layout()
plt.savefig(filebase + '.png', dpi=150, facecolor='w', edgecolor='w',
orientation='portrait', papertype=None, format=None,
transparent=False, bbox_inches=None, pad_inches=0.1,
frameon=None)
def plot_sed_fromconfig(config,ignore_missing_bins=False):
config = get_config(config)
utils.mkdir_p(config["out"]+"/Spectrum")
srcname = config['target']['name']
Emin = config['energy']['emin']
Emax = config['energy']['emax']
filename = utils._SpecFileName(config)
Param = Params(srcname, Emin=Emin, Emax=Emax, PlotName=filename)
Result = utils.ReadResult(config)
# if the TS > ts limit plot the butterfly, if not draw UL
if Result["TS"]> config['UpperLimit']['TSlimit'] :
PlotSED(config,Param,ignore_missing_bins)
else :
try :
PlotUL(Param,config,Result['Ulvalue'],config['UpperLimit']['SpectralIndex'])
except :
print("Not able to plot an upper limit in a SED diagram. UL computed?")
| bsd-3-clause | 6,686,314,298,672,276,000 | 39.803514 | 238 | 0.565086 | false |
auth0/auth0-python | auth0/v3/authentication/delegated.py | 1 | 1118 | from .base import AuthenticationBase
class Delegated(AuthenticationBase):
"""Delegated authentication endpoints.
Args:
domain (str): Your auth0 domain (e.g: username.auth0.com)
"""
def get_token(self, client_id, target, api_type, grant_type,
id_token=None, refresh_token=None, scope='openid'):
"""Obtain a delegation token.
"""
if id_token and refresh_token:
raise ValueError('Only one of id_token or refresh_token '
'can be None')
data = {
'client_id': client_id,
'grant_type': grant_type,
'target': target,
'scope': scope,
'api_type': api_type,
}
if id_token:
data.update({'id_token': id_token})
elif refresh_token:
data.update({'refresh_token': refresh_token})
else:
raise ValueError('Either id_token or refresh_token must '
'have a value')
return self.post('{}://{}/delegation'.format(self.protocol, self.domain), data=data)
| mit | -5,919,315,672,139,504,000 | 29.216216 | 92 | 0.538462 | false |
FederatedAI/FATE | python/federatedml/feature/test/ohe_alignment_test.py | 1 | 1102 | import unittest
import uuid
from fate_arch.session import computing_session as session
from federatedml.feature.homo_onehot.homo_ohe_arbiter import HomoOneHotArbiter
class TestOHE_alignment(unittest.TestCase):
def setUp(self):
self.job_id = str(uuid.uuid1())
session.init(self.job_id)
def test_instance(self):
ohe_alignment_arbiter = HomoOneHotArbiter()
guest_columns = [
{'race_black': ['0', '1'], 'race_hispanic': ['0'], 'race_asian': ['0', '1'], 'race_other': ['1'],
'electivesurgery': ['0', '1']}]
host_columns = [
{'race_black': ['0', '1'], 'race_hispanic': ['0', '1'], 'race_asian': ['0', '1'], 'race_other': ['0'],
'electivesurgery': ['0', '1']}]
aligned_columns = sorted(
ohe_alignment_arbiter.combine_all_column_headers(guest_columns, host_columns)['race_hispanic'])
self.assertTrue(len(aligned_columns) == 2)
self.assertEqual(['0', '1'], aligned_columns)
def tearDown(self):
session.stop()
if __name__ == '__main__':
unittest.main()
| apache-2.0 | 4,312,677,503,983,750,700 | 32.393939 | 114 | 0.584392 | false |
bitglue/shinysdr | shinysdr/i/top.py | 1 | 23658 | # Copyright 2013, 2014, 2015, 2016, 2017 Kevin Reid <[email protected]>
#
# This file is part of ShinySDR.
#
# ShinySDR is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ShinySDR is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ShinySDR. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import, division
import math
import time
from twisted.internet import reactor
from twisted.python import log
from zope.interface import implementer # available via Twisted
from gnuradio import blocks
from gnuradio import gr
from shinysdr.i.audiomux import AudioManager
from shinysdr.i.blocks import MonitorSink, RecursiveLockBlockMixin, Context
from shinysdr.i.poller import the_subscription_context
from shinysdr.i.receiver import Receiver
from shinysdr.math import LazyRateCalculator
from shinysdr.signals import SignalType
from shinysdr.telemetry import TelemetryStore
from shinysdr.types import EnumT, NoticeT, ReferenceT
from shinysdr.values import CellDict, ExportedState, CollectionState, exported_value, setter, IWritableCollection, unserialize_exported_state
_DEBUG_RETUNE = False
@implementer(IWritableCollection)
class ReceiverCollection(CollectionState):
def __init__(self, table, top):
CollectionState.__init__(self, table)
self.__top = top
def state_insert(self, key, desc):
self.__top.add_receiver(mode=desc['mode'], key=key, state=desc)
def create_child(self, desc):
(key, receiver) = self.__top.add_receiver(desc['mode'])
receiver.state_from_json(desc)
return key
def delete_child(self, key):
self.__top.delete_receiver(key)
# TODO: Figure out how to stop having to 'declare' this here and in config.py
_STUB_FEATURES = {'stereo': True}
class Top(gr.top_block, ExportedState, RecursiveLockBlockMixin):
def __init__(self, devices={}, audio_config=None, features=_STUB_FEATURES):
# pylint: disable=dangerous-default-value
if len(devices) <= 0:
raise ValueError('Must have at least one RF device')
gr.top_block.__init__(self, "SDR top block")
self.__running = False # duplicate of GR state we can't reach, see __start_or_stop
self.__has_a_useful_receiver = False
# Configuration
# TODO: device refactoring: Remove vestigial 'accessories'
self._sources = CellDict({k: d for k, d in devices.iteritems() if d.can_receive()})
self._accessories = accessories = {k: d for k, d in devices.iteritems() if not d.can_receive()}
for key in self._sources:
# arbitrary valid initial value
self.source_name = key
break
self.__rx_device_type = EnumT({k: v.get_name() or k for (k, v) in self._sources.iteritems()})
# Audio early setup
self.__audio_manager = AudioManager( # must be before contexts
graph=self,
audio_config=audio_config,
stereo=features['stereo'])
# Blocks etc.
# TODO: device refactoring: remove 'source' concept (which is currently a device)
# TODO: remove legacy no-underscore names, maybe get rid of self.source
self.source = None
self.__monitor_rx_driver = None
self.monitor = MonitorSink(
signal_type=SignalType(sample_rate=10000, kind='IQ'), # dummy value will be updated in _do_connect
context=Context(self))
self.monitor.get_interested_cell().subscribe2(self.__start_or_stop_later, the_subscription_context)
self.__clip_probe = MaxProbe()
# Receiver blocks (multiple, eventually)
self._receivers = CellDict(dynamic=True)
self._receiver_valid = {}
# collections
# TODO: No longer necessary to have these non-underscore names
self.sources = CollectionState(CellDict(self._sources))
self.receivers = ReceiverCollection(self._receivers, self)
self.accessories = CollectionState(CellDict(accessories))
self.__telemetry_store = TelemetryStore()
# Flags, other state
self.__needs_reconnect = [u'initialization']
self.__in_reconnect = False
self.receiver_key_counter = 0
self.receiver_default_state = {}
self.__cpu_calculator = LazyRateCalculator(lambda: time.clock())
# Initialization
def hookup_vfo_callback(k, d): # function so as to not close over loop variable
d.get_vfo_cell().subscribe2(lambda value: self.__device_vfo_callback(k), the_subscription_context)
for k, d in devices.iteritems():
hookup_vfo_callback(k, d)
self._do_connect()
def add_receiver(self, mode, key=None, state=None):
if len(self._receivers) >= 100:
# Prevent storage-usage DoS attack
raise Exception('Refusing to create more than 100 receivers')
if key is not None:
assert key not in self._receivers
else:
while True:
key = base26(self.receiver_key_counter)
self.receiver_key_counter += 1
if key not in self._receivers:
break
if len(self._receivers) > 0:
arbitrary = self._receivers.itervalues().next()
defaults = arbitrary.state_to_json()
else:
defaults = self.receiver_default_state
combined_state = defaults.copy()
for do_not_use_default in ['device_name', 'freq_linked_to_device']:
if do_not_use_default in combined_state:
del combined_state[do_not_use_default]
if state is not None:
combined_state.update(state)
facet = ContextForReceiver(self, key)
receiver = unserialize_exported_state(Receiver, kwargs=dict(
mode=mode,
audio_channels=self.__audio_manager.get_channels(),
device_name=self.source_name,
audio_destination=self.__audio_manager.get_default_destination(), # TODO match others
context=facet,
), state=combined_state)
facet._receiver = receiver
self._receivers[key] = receiver
self._receiver_valid[key] = False
self.__needs_reconnect.append(u'added receiver ' + key)
self._do_connect()
# until _enabled, the facet ignores any reconnect/rebuild-triggering callbacks
facet._enabled = True
return (key, receiver)
def delete_receiver(self, key):
assert key in self._receivers
receiver = self._receivers[key]
# save defaults for use if about to become empty
if len(self._receivers) == 1:
self.receiver_default_state = receiver.state_to_json()
del self._receivers[key]
del self._receiver_valid[key]
self.__needs_reconnect.append(u'removed receiver ' + key)
self._do_connect()
# TODO move these methods to a facet of AudioManager
def add_audio_queue(self, queue, queue_rate):
self.__audio_manager.add_audio_queue(queue, queue_rate)
self.__needs_reconnect.append(u'added audio queue')
self._do_connect()
self.__start_or_stop()
def remove_audio_queue(self, queue):
self.__audio_manager.remove_audio_queue(queue)
self.__start_or_stop()
self.__needs_reconnect.append(u'removed audio queue')
self._do_connect()
def get_audio_queue_channels(self):
"""
Return the number of channels (which will be 1 or 2) in audio queue outputs.
"""
return self.__audio_manager.get_channels()
def _do_connect(self):
"""Do all reconfiguration operations in the proper order."""
if self.__in_reconnect:
raise Exception('reentrant reconnect or _do_connect crashed')
self.__in_reconnect = True
t0 = time.time()
if self.source is not self._sources[self.source_name]:
log.msg('Flow graph: Switching RF device to %s' % (self.source_name))
self.__needs_reconnect.append(u'switched device')
this_source = self._sources[self.source_name]
self.source = this_source
self.state_changed('source')
self.__monitor_rx_driver = this_source.get_rx_driver()
monitor_signal_type = self.__monitor_rx_driver.get_output_type()
self.monitor.set_signal_type(monitor_signal_type)
self.monitor.set_input_center_freq(this_source.get_freq())
self.__clip_probe.set_window_and_reconnect(0.5 * monitor_signal_type.get_sample_rate())
if self.__needs_reconnect:
log.msg(u'Flow graph: Rebuilding connections because: %s' % (', '.join(self.__needs_reconnect),))
self.__needs_reconnect = []
self._recursive_lock()
self.disconnect_all()
self.connect(
self.__monitor_rx_driver,
self.monitor)
self.connect(
self.__monitor_rx_driver,
self.__clip_probe)
# Filter receivers
audio_rs = self.__audio_manager.reconnecting()
n_valid_receivers = 0
has_non_audio_receiver = False
for key, receiver in self._receivers.iteritems():
self._receiver_valid[key] = receiver.get_is_valid()
if not self._receiver_valid[key]:
continue
if not self.__audio_manager.validate_destination(receiver.get_audio_destination()):
log.err('Flow graph: receiver audio destination %r is not available' % (receiver.get_audio_destination(),))
continue
n_valid_receivers += 1
if n_valid_receivers > 6:
# Sanity-check to avoid burning arbitrary resources
# TODO: less arbitrary constant; communicate this restriction to client
log.err('Flow graph: Refusing to connect more than 6 receivers')
break
self.connect(self._sources[receiver.get_device_name()].get_rx_driver(), receiver)
receiver_output_type = receiver.get_output_type()
if receiver_output_type.get_sample_rate() <= 0:
# Demodulator has no output, but receiver has a dummy output, so connect it to something to satisfy flow graph structure.
self.connect(receiver, blocks.null_sink(gr.sizeof_float * self.__audio_manager.get_channels()))
# Note that we have a non-audio receiver which may be useful even if there is no audio output
has_non_audio_receiver = True
else:
assert receiver_output_type.get_kind() == 'STEREO'
audio_rs.input(receiver, receiver_output_type.get_sample_rate(), receiver.get_audio_destination())
self.__has_a_useful_receiver = audio_rs.finish_bus_connections() or \
has_non_audio_receiver
self._recursive_unlock()
# (this is in an if block but it can't not execute if anything else did)
log.msg('Flow graph: ...done reconnecting (%i ms).' % ((time.time() - t0) * 1000,))
self.__start_or_stop_later()
self.__in_reconnect = False
def __device_vfo_callback(self, device_key):
reactor.callLater(
self._sources[device_key].get_rx_driver().get_tune_delay(),
self.__device_vfo_changed,
device_key)
def __device_vfo_changed(self, device_key):
device = self._sources[device_key]
freq = device.get_freq()
if self.source is device:
self.monitor.set_input_center_freq(freq)
for rec_key, receiver in self._receivers.iteritems():
if receiver.get_device_name() == device_key:
receiver.changed_device_freq()
self._update_receiver_validity(rec_key)
# TODO: If multiple receivers change validity we'll do redundant reconnects in this loop; avoid that.
def _update_receiver_validity(self, key):
receiver = self._receivers[key]
if receiver.get_is_valid() != self._receiver_valid[key]:
self.__needs_reconnect.append(u'receiver %s validity changed' % (key,))
self._do_connect()
@exported_value(type=ReferenceT(), changes='never')
def get_monitor(self):
return self.monitor
@exported_value(type=ReferenceT(), persists=False, changes='never')
def get_sources(self):
return self.sources
@exported_value(type=ReferenceT(), persists=False, changes='explicit')
def get_source(self):
return self.source # TODO no need for this now...?
@exported_value(type=ReferenceT(), changes='never')
def get_receivers(self):
return self.receivers
# TODO the concept of 'accessories' is old and needs to go away, but we don't have a flexible enough UI to replace it with just devices since only one device can be looked-at at a time so far.
@exported_value(type=ReferenceT(), persists=False, changes='never')
def get_accessories(self):
return self.accessories
@exported_value(type=ReferenceT(), changes='never', label='Telemetry')
def get_telemetry_store(self):
return self.__telemetry_store
def start(self, **kwargs):
# pylint: disable=arguments-differ
# trigger reconnect/restart notification
self._recursive_lock()
self._recursive_unlock()
super(Top, self).start(**kwargs)
self.__running = True
def stop(self):
super(Top, self).stop()
self.__running = False
def __start_or_stop(self):
# TODO: Improve start/stop conditions:
#
# * run if a client is watching an audio-having receiver's cell-based outputs (e.g. VOR) but not listening to audio
#
# * don't run if no client is watching a pure telemetry receiver
# (maybe a user preference since having a history when you connect is useful)
#
# Both of these refinements require becoming aware of cell subscriptions.
should_run = (
self.__has_a_useful_receiver or
self.monitor.get_interested_cell().get())
if should_run != self.__running:
if should_run:
self.start()
else:
self.stop()
self.wait()
def __start_or_stop_later(self, unused_subscription_value=None):
reactor.callLater(0, self.__start_or_stop)
def close_all_devices(self):
"""Close all devices in preparation for a clean shutdown.
Makes this top block unusable"""
for device in self._sources.itervalues():
device.close()
for device in self._accessories.itervalues():
device.close()
self.stop()
self.wait()
@exported_value(
type_fn=lambda self: self.__rx_device_type,
changes='this_setter',
label='RF source')
def get_source_name(self):
return self.source_name
@setter
def set_source_name(self, value):
if value == self.source_name:
return
if value not in self._sources:
raise ValueError('Source %r does not exist' % (value,))
self.source_name = value
self._do_connect()
@exported_value(type=NoticeT(always_visible=False), changes='continuous')
def get_clip_warning(self):
level = self.__clip_probe.level()
# We assume that our sample source's absolute limits on I and Q values are the range -1.0 to 1.0. This is a square region; therefore the magnitude observed can be up to sqrt(2) = 1.414 above this, allowing us some opportunity to measure the amount of excess, and also to detect clipping even if the device doesn't produce exactly +-1.0 valus.
if level >= 1.0:
return u'Input amplitude too high (%.2f \u2265 1.0). Reduce gain.' % math.sqrt(level)
else:
return u''
# TODO: This becomes useless w/ Session fix
@exported_value(type=float, changes='continuous')
def get_cpu_use(self):
return round(self.__cpu_calculator.get(), 2)
def _get_rx_device_type(self):
"""for ContextForReceiver only"""
return self.__rx_device_type
def _get_audio_destination_type(self):
"""for ContextForReceiver only"""
return self.__audio_manager.get_destination_type()
def _trigger_reconnect(self, reason):
self.__needs_reconnect.append(reason)
self._do_connect()
def _recursive_lock_hook(self):
for source in self._sources.itervalues():
source.notify_reconnecting_or_restarting()
class ContextForReceiver(Context):
def __init__(self, top, key):
Context.__init__(self, top)
self.__top = top
self._key = key
self._enabled = False # assigned outside
self._receiver = None # assigned outside
def get_device(self, device_key):
return self.__top._sources[device_key]
def get_rx_device_type(self):
return self.__top._get_rx_device_type()
def get_audio_destination_type(self):
return self.__top._get_audio_destination_type()
def revalidate(self, tuning):
if not self._enabled: return
# TODO: Lots of the below logic probably ought to replace the current receiver.get_is_valid.
# TODO: Be aware of receiver bandwidth.
receiver = self._receiver
device = self.__top._sources[receiver.get_device_name()]
usable_bandwidth_range = device.get_rx_driver().get_usable_bandwidth()
needed_freq = receiver.get_rec_freq()
current_device_freq = device.get_freq()
def validate_by_range(rec_freq, dev_freq):
rel_freq = rec_freq - dev_freq
return usable_bandwidth_range(rel_freq) == rel_freq
# TODO: can't do this because it horribly breaks drag-tuning
# if tuning and not validate_by_range(needed_freq, current_device_freq):
# we need to check the range as well as receiver.get_is_valid because receiver.get_is_valid uses the tune_delay delayed frequency which may not be up to date
if tuning and not receiver.get_is_valid() and not validate_by_range(needed_freq, current_device_freq):
# TODO need 0Hz-gimmick logic
direction = 1 if needed_freq > current_device_freq else -1
usable_bandwidth_step = usable_bandwidth_range.get_max() - usable_bandwidth_range.get_min()
page_size = usable_bandwidth_step
paged_freq = device.get_freq() + direction * page_size
if validate_by_range(needed_freq, paged_freq):
freq = paged_freq
if _DEBUG_RETUNE:
print '--- page', device.get_freq(), direction * page_size, freq
else:
# One page will not do; jump exactly to center frequency, or as close as avoids DC if needed.
freq = needed_freq - _find_in_usable_bandwidth(
usable_bandwidth_range, receiver)
if _DEBUG_RETUNE:
print '--- jump', receiver.get_mode(), device.get_freq(), 'to', freq, 'for', needed_freq
# TODO write justification here that this won't be dangerously reentrant
device.set_freq(freq)
# TODO: It would also make sense to switch sources here, if the receiver is more-in-range for the other source.
# No need to _update_receiver_validity here because tuning will do that with fewer reconnects.
else:
self.__top._update_receiver_validity(self._key)
def changed_needed_connections(self, reason):
if self._enabled:
self.__top._trigger_reconnect(u'receiver %s: %s' % (self._key, reason))
def output_message(self, message):
self.__top.get_telemetry_store().receive(message)
def _find_in_usable_bandwidth(usable_bandwidth_range, receiver):
"""Given the usable_bandwidth_range of a device and a receiver, find where we can place the receiver in the range (all relative frequencies)."""
shape = receiver.get_demodulator().get_band_shape()
# We assume that the bandwidth range is generally wide-open with a possible gap in the middle.
# TODO: It would make more sense to directly ask RangeT to tell us where an interval will fit, rather than making that assumption.
least_positive = usable_bandwidth_range(0, range_round_direction=+1)
greatest_negative = usable_bandwidth_range(0, range_round_direction=-1)
if least_positive == greatest_negative == 0:
# No DC avoidance. We can just pick the center.
if _DEBUG_RETUNE:
print '--- no offset'
return 0.0
offset_positive = least_positive - shape.stop_low
offset_negative = greatest_negative - shape.stop_high
if _DEBUG_RETUNE:
print '--- offsets', receiver.get_mode(), offset_negative, greatest_negative, least_positive, offset_positive
if offset_positive < 0 or offset_negative > 0:
# Receiver has a larger filter than the DC offset, so line up neatly.
return 0.0
if offset_positive <= -offset_negative:
return offset_positive
else:
return offset_negative
class MaxProbe(gr.hier_block2):
"""
A probe whose level is the maximum magnitude-squared occurring within the specified window of samples.
"""
def __init__(self, window=10000):
gr.hier_block2.__init__(
self, type(self).__name__,
gr.io_signature(1, 1, gr.sizeof_gr_complex),
gr.io_signature(0, 0, 0))
self.__sink = None # quiet pylint
self.set_window_and_reconnect(window)
def level(self):
# pylint: disable=method-hidden, no-self-use
# overridden in instances
raise Exception('This placeholder should never get called')
def set_window_and_reconnect(self, window):
"""
Must be called while the flowgraph is locked already.
"""
# Use a power-of-2 window size to satisfy gnuradio allocation alignment without going overboard.
window = int(2 ** math.floor(math.log(window, 2)))
self.disconnect_all()
self.__sink = blocks.probe_signal_f()
self.connect(
self,
blocks.complex_to_mag_squared(),
blocks.stream_to_vector(itemsize=gr.sizeof_float, nitems_per_block=window),
blocks.max_ff(window),
self.__sink)
# shortcut method implementation
self.level = self.__sink.level
def base26(x):
"""not quite base 26, actually, because it has no true zero digit"""
if x < 26:
return 'abcdefghijklmnopqrstuvwxyz'[x]
else:
return base26(x // 26 - 1) + base26(x % 26)
| gpl-3.0 | 2,584,396,331,808,346,000 | 40.798587 | 350 | 0.61624 | false |
maas/maas | src/maasserver/exceptions.py | 1 | 6877 | # Copyright 2012-2017 Canonical Ltd. This software is licensed under the
# GNU Affero General Public License version 3 (see the file LICENSE).
"""Exceptions."""
__all__ = [
"ClusterUnavailable",
"MAASException",
"MAASAPIBadRequest",
"MAASAPIException",
"MAASAPINotFound",
"NodeStateViolation",
"NodeGroupMisconfiguration",
"NoScriptsFound",
"IteratorReusedError",
"PowerProblem",
"StaticIPAddressExhaustion",
"StaticIPAddressTypeClash",
"UnresolvableHost",
]
import http.client
import json
from django.core.exceptions import ValidationError
from django.http import HttpResponse, HttpResponseRedirect
class MAASException(Exception):
"""Base class for MAAS' exceptions."""
class CannotDeleteUserException(Exception):
"""User can't be deleted."""
class MAASAPIException(Exception):
"""Base class for MAAS' API exceptions.
:ivar api_error: The HTTP code that should be returned when this error
is raised in the API (defaults to 500: "Internal Server Error").
"""
api_error = int(http.client.INTERNAL_SERVER_ERROR)
def make_http_response(self):
"""Create an :class:`HttpResponse` representing this exception."""
encoding = "utf-8"
return HttpResponse(
status=self.api_error,
content=str(self).encode(encoding),
content_type="text/plain; charset=%s" % encoding,
)
class MAASAPIBadRequest(MAASAPIException):
api_error = int(http.client.BAD_REQUEST)
class MAASAPINotFound(MAASAPIException):
api_error = int(http.client.NOT_FOUND)
class MAASAPIForbidden(MAASAPIException):
api_error = int(http.client.FORBIDDEN)
class MAASAPIValidationError(MAASAPIBadRequest, ValidationError):
"""A validation error raised during a MAAS API request."""
def make_http_response(self):
"""Create an :class:`HttpResponse` representing this exception."""
content_type = b"application/json"
if hasattr(self, "error_dict"):
messages = json.dumps(self.message_dict)
elif len(self.messages) == 1:
messages = self.messages[0]
content_type = b"text/plain"
else:
messages = json.dumps(self.messages)
encoding = b"utf-8"
return HttpResponse(
status=self.api_error,
content=messages,
content_type=b"%s; charset=%s" % (content_type, encoding),
)
class Unauthorized(MAASAPIException):
"""HTTP error 401: Unauthorized. Login required."""
api_error = int(http.client.UNAUTHORIZED)
class NodeStateViolation(MAASAPIException):
"""Operation on node not possible given node's current state."""
api_error = int(http.client.CONFLICT)
class NodesNotAvailable(NodeStateViolation):
"""Requested node(s) are not available to be acquired."""
api_error = int(http.client.CONFLICT)
class Redirect(MAASAPIException):
"""Redirect. The exception message is the target URL."""
api_error = int(http.client.FOUND)
def make_http_response(self):
return HttpResponseRedirect(str(self))
class NodeGroupMisconfiguration(MAASAPIException):
"""Node Groups (aka Cluster Controllers) are misconfigured.
This might mean that more than one controller is marked as managing the
same network
"""
api_error = int(http.client.CONFLICT)
class ClusterUnavailable(MAASAPIException):
"""A Cluster Controller is not available for RPC queries."""
api_error = int(http.client.SERVICE_UNAVAILABLE)
class IteratorReusedError(Exception):
"""Raise when a :class:`UseOnceIterator` gets reused."""
class StaticIPAddressExhaustion(MAASAPIException):
"""Raised when no more static IPs are available during allocation."""
api_error = int(http.client.SERVICE_UNAVAILABLE)
class IPAddressCheckFailed(MAASAPIException):
"""IP address allocation checks failed."""
api_error = int(http.client.SERVICE_UNAVAILABLE)
class StaticIPAddressUnavailable(MAASAPIException):
"""Raised when a requested IP is not available."""
api_error = int(http.client.NOT_FOUND)
class StaticIPAddressOutOfRange(MAASAPIException):
"""Raised when a requested IP is not in an acceptable range."""
api_error = int(http.client.FORBIDDEN)
class StaticIPAddressTypeClash(MAASAPIException):
"""Raised when trying to allocate an IP for a MAC where one of another
type already exists."""
api_error = int(http.client.CONFLICT)
class StaticIPAlreadyExistsForMACAddress(MAASAPIException):
"""Raised when trying to allocate a static IP for a non-node MAC
where a node with that MAC already exists."""
api_error = int(http.client.CONFLICT)
class StaticIPAddressConflict(MAASAPIException):
"""Raised when trying to allocate a static IP that doesn't belong to
the network the MAC address is connected to."""
api_error = int(http.client.CONFLICT)
class StaticIPAddressForbidden(MAASAPIException):
"""Raised when trying to allocate a static IP that belongs to a
dynamic range."""
api_error = int(http.client.CONFLICT)
class NodeActionError(MAASException):
"""Raised when there is an error performing a NodeAction."""
def __init__(self, error):
# Avoid circular imports.
from maasserver.clusterrpc.utils import get_error_message_for_exception
if isinstance(error, Exception):
super().__init__(get_error_message_for_exception(error))
else:
super().__init__(error)
class UnresolvableHost(MAASException):
"""Raised when a hostname can't be resolved to an IP address."""
class MissingBootImage(MAASException):
"""Raised when a boot image is expected to exists."""
class PreseedError(MAASException):
"""Raised when issue generating the preseed."""
class PowerProblem(MAASAPIException):
"""Raised when there's a problem with a power operation.
This could be a problem with parameters, a problem with the power
controller, or something else. The exception text will contain more
information.
"""
api_error = int(http.client.SERVICE_UNAVAILABLE)
class PodProblem(MAASAPIException):
"""Raised when there's a problem with a pod operation.
This could be a problem with parameters, a problem with the pod's
controller, or something else. The exception text will contain more
information.
"""
api_error = int(http.client.SERVICE_UNAVAILABLE)
class NoScriptsFound(MAASException):
"""Raised when no Scripts are found based on user input."""
class StorageClearProblem(MAASAPIException):
"""Raised when an issue occurs that prevents the clearing of a machine's
storage configuration."""
class NetworkingResetProblem(MAASException):
"""Raised when an issue occurs that prevents resetting networking configuration."""
| agpl-3.0 | -2,348,278,307,039,550,500 | 26.842105 | 87 | 0.703359 | false |
mscook/pyParaTools | ParaUtils.py | 1 | 8999 | """Utility methods for paramagnetic observables """
import math
from numpy import *
def ZXZRot(A, B, G, scal=1.0):
"""
Builds the ZXZ rotation matrix given 3 Euler Angles. See:
http://mathworld.wolfram.com/EulerAngles.html
@param A : The (A)lpha angle
@type A : float
@param B : The (B)eta angle
@type B : float
@param G : The (G)amma angle
@type G : float
@param val: (OPTIONAL) Such that we can scale the rotation matix
is for the X-tensor frame determination
@type val : float
"""
rot = zeros((3,3))
ca = math.cos(math.radians(A))
cb = math.cos(math.radians(B))
cg = math.cos(math.radians(G))
sa = math.sin(math.radians(A))
sb = math.sin(math.radians(B))
sg = math.sin(math.radians(G))
rot[0][0] = (( cg * ca) - (cb * sa * sg))*scal
rot[0][1] = (( cg * sa) + (cb * ca * sg))*scal
rot[0][2] = (( sg * sb))*scal
rot[1][0] = ((-sg * ca) - (cb * sa * cg))*scal
rot[1][1] = ((-sg * sa) + (cb * ca * cg))*scal
rot[1][2] = (( cg * sb))*scal
rot[2][0] = (( sb * sa))*scal
rot[2][1] = ((-sb * ca))*scal
rot[2][2] = (cb)*scal
return rot
def ZYZRot(A, B, G, scal=1.0):
"""
.Builds the ZYZ rotation matrix given 3 Euler Angles. See:
http://mathworld.wolfram.com/EulerAngles.html
@param A: The (A)lpha angle
@type A : float
@param B: The (B)eta angle
@type B : float
@param G: The (G)amma angle
@type G : float
@param val: (OPTIONAL) Such that we can scale the rotation matix
is for the X-tensor frame determination
@type val : float
"""
rot = zeros((3,3))
ca = math.cos(math.radians(A))
cb = math.cos(math.radians(B))
cg = math.cos(math.radians(G))
sa = math.sin(math.radians(A))
sb = math.sin(math.radians(B))
sg = math.sin(math.radians(G))
rot[0][0] = ((-sg * sa) + (cb * ca * cg))*scal
rot[0][1] = (( sg * ca) + (cb * sa * cg))*scal
rot[0][2] = (( -cg * sb))*scal
rot[1][0] = ((-cg * sa) - (cb * ca * sg))*scal
rot[1][1] = ((cg * ca) - (cb * sa * sg))*scal
rot[1][2] = (( sg * sb))*scal
rot[2][0] = (( sb * ca))*scal
rot[2][1] = ((sb * sa))*scal
rot[2][2] = (cb)*scal
return rot
def RotX90():
"""
.Builds the rotation matrix for 90 deg rotation about X
1, 0, 0, 0, 0, 1, 0, -1, 0
"""
rot = zeros((3,3))
rot[0][0] = 1.0
rot[1][2] = 1.0
rot[2][1] = -1.0
return rot
def RotY90():
"""
.Builds the rotation matrix for 90 deg rotation about Y
0, 0, -1, 0, 1, 0, 1, 0, 0
"""
rot = zeros((3,3))
rot[0][2] = -1.0
rot[1][1] = 1.0
rot[2][0] = 1.0
return rot
def RotZ90():
"""
.Builds the rotation matrix for 90 deg rotation about Z
0, 1, 0, -1, 0, 0, 0, 0, 1
"""
rot = zeros((3,3))
rot[0][1] = 1.0
rot[1][0] = -1.0
rot[2][2] = 1.0
return rot
def correctRofAngles(cosv, sinv):
#TODO: Check that this is correct
if (cosv <= math.pi/2.0):
if (sinv < 0.0):
sinv = sinv + 2*math.pi
return sinv
else:
return sinv
else:
if(sinv > 0.0):
return cosv
else:
return -1*(cosv) +2*math.pi
def ABGFromRotMatrixZYZ(rotMat):
#TODO: Check these are correct!
#TODO: Add the corresponding ZXZ method
b_c = math.acos(rotMat[2,2])
a_c = math.acos(rotMat[2,0]/math.sin(b_c))
g_c = math.acos(-1*rotMat[0,2]/math.sin(b_c))
a_s = math.asin(rotMat[2,1]/math.sin(b_c))
g_s = math.asin(rotMat[1,2]/math.sin(b_c))
aE = correctRofAngles(a_c, a_s)
bE = b_c
gE = correctRofAngles(g_c, g_s)
return aE, bE, gE
def FromVVU(AxorRh):
"""
Convert from van Vleck Units (vvu = m3/3.77 10-35)
@param AxorRh: Axial or Rhombic component
@type AxorRh : float
"""
return AxorRh/(1./((12*math.pi))*10000)
def ToVVU(AxorRh):
"""
Convert to van Vleck Units (vvu = m3/3.77 10-35)
@param AxorRh: Axial or Rhombic component
@type AxorRh : float
"""
return AxorRh*(1./((12*math.pi))*10000)
def FixAngle(angle):
"""
To fix up the angles after optimization as they are not [0:2pi] bound
@param angle: An Euler angle determined from the optimization
@type angle: float
"""
while angle > 0.0:
angle = angle - 360.0
while angle < 0.0:
angle = angle + 360.0
return angle
def SwapVals(val1, val2):
temp = 0.0
temp = val1
val1 = val2
val2 = temp
return val1, val2
def lookupMGR(spin_type):
"""
Return the gyromagnetic ratios for the coupling.
See: http://nmrwiki.org/wiki/index.php?title=Gyromagnetic_ratio
"""
#TODO: These need to be checked
PI2 = 2*math.pi
H1mgr = (PI2*42.576)*1e6
C13mgr = (PI2*10.705)*1e6
Nmgr = []
N14mgr = (PI2*3.0766)*1e6
N15mgr = (PI2*-4.315)*1e6
Nmgr.append(N14mgr)
Nmgr.append(N15mgr)
O17mgr = (PI2*-5.7716)*1e6
mgr = {'H':H1mgr, 'C':C13mgr, 'N':Nmgr, 'O':O17mgr}
return mgr[spin_type]
def rdcScal(S, g1, g2, B0, temp):
"""
Scaling constant.for RDC calculations
"""
#TODO: These need to be checked
hbar = 1.05457148e-34
kboltz = 1.3806503e-23
scal = -S*g1*g2*hbar*B0*B0 / (8*15*math.pi*math.pi*kboltz*temp)
return scal*0.01
def FitSummary(soln,cov,info,mesg,success, p0, y_meas, tof):
scal = 1.0
if tof == 2 or tof == 3:
#The effective strength of the X-tensor is 1/2ved in monomer fits
scal = 2.0
f_type = { \
0:'Standard X-tensor optimization', \
1:'Standard X-tensor optimization (fixed metal position)', \
2:'X-tensor optimization to dimer', \
3:'X-tensor optimization to dimer (fixed metal position)', \
4:'2 X-tensors to a monomer', \
5:'2 X-tensors (1 fixed metal site) to a monomer', \
6:'2 X-tensors (2 fixed metal sites) to a monomer' }
print 80*'-'
print "Fitting Results: ", f_type[tof]
print 80*'-'
if success==1:
print "We have converged to a minima"
else:
print "We have failed to converge"
print "REASON:", mesg
# calculate final chi square
chisq=sum(info["fvec"]*info["fvec"])
dof=len(y_meas)-len(p0)
# chisq, sqrt(chisq/dof) agrees with gnuplot
print "* Converged with chi squared: ",chisq
print "* Degrees of freedom, dof: ", dof
print "* RMS of residuals (i.e. sqrt(chisq/dof)): ", sqrt(chisq/dof)
print "* Reduced chisq (i.e. variance of residuals): ", chisq/dof
print
# uncertainties are calculated as per gnuplot, "fixing" the result
# for non unit values of the reduced chisq.
# values at min match gnuplot
print "Fitted parameters at minimum, with 68% C.I.:"
print "%s%7s%11s%13s" % ("Param", "Init", "Final", "Error")
#NOTE: The confidence intervals may not be correct due to conversion to VVU etc.
if tof == 0 or tof == 2 or tof ==4:
for i,pmin in enumerate(soln):
if i == 3 or i == 4 or i == 11 or i == 12:
#NOTE: The scal factor is dimer specific
print "%3i %7s %13.4f +/- %8f"%(i+1,FromVVU(p0[i]),scal*(FromVVU(pmin)),scal*(FromVVU(sqrt(cov[i,i])*sqrt(chisq/dof))))
elif i == 5 or i == 6 or i ==7 or i == 13 or i == 14 or i == 15:
print "%3i %7s %13.4f +/- %8f"%(i+1,FixAngle(p0[i]),FixAngle(pmin),sqrt(cov[i,i])*sqrt(chisq/dof))
else:
print "%3i %7s %13.4f +/- %8f"%(i+1,p0[i],pmin,sqrt(cov[i,i])*sqrt(chisq/dof))
if tof == 1 or tof == 3 or tof == 5:
for i,pmin in enumerate(soln):
if i == 0 or i == 1 or i == 8 or i == 9:
#NOTE: The scal factor is dimer specific
print "%3i %7s %13.4f +/- %8f"%(i+1,FromVVU(p0[i]),scal*(FromVVU(pmin)),scal*(FromVVU(sqrt(cov[i,i])*sqrt(chisq/dof))))
elif i == 2 or i == 3 or i ==4 or i == 10 or i == 11 or i == 12:
print "%3i %7s %13.4f +/- %8f"%(i+1,FixAngle(p0[i]),FixAngle(pmin),sqrt(cov[i,i])*sqrt(chisq/dof))
else:
print "%3i %7s %13.4f +/- %8f"%(i+1,p0[i],pmin,sqrt(cov[i,i])*sqrt(chisq/dof))
if tof == 6:
for i,pmin in enumerate(soln):
if i == 0 or i == 1 or i == 5 or i == 6:
#NOTE: The scal factor is dimer specific
print "%3i %7s %13.4f +/- %8f"%(i+1,FromVVU(p0[i]),scal*(FromVVU(pmin)),scal*(FromVVU(sqrt(cov[i,i])*sqrt(chisq/dof))))
elif i == 2 or i == 3 or i == 4 or i == 7 or i == 8 or i == 9:
print "%3i %7s %13.4f +/- %8f"%(i+1,FixAngle(p0[i]),FixAngle(pmin),sqrt(cov[i,i])*sqrt(chisq/dof))
else:
print "%3i %7s %13.4f +/- %8f"%(i+1,p0[i],pmin,sqrt(cov[i,i])*sqrt(chisq/dof))
print 80*'-'
print
return chisq/dof
| apache-2.0 | -5,894,408,450,203,394,000 | 32.206642 | 137 | 0.536615 | false |
nathanielvarona/airflow | airflow/utils/log/json_formatter.py | 1 | 2206 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""json_formatter module stores all related to ElasticSearch specific logger classes"""
import json
import logging
from airflow.utils.helpers import merge_dicts
class JSONFormatter(logging.Formatter):
"""JSONFormatter instances are used to convert a log record to json."""
# pylint: disable=too-many-arguments
def __init__(self, fmt=None, datefmt=None, style='%', json_fields=None, extras=None):
super().__init__(fmt, datefmt, style)
if extras is None:
extras = {}
if json_fields is None:
json_fields = []
self.json_fields = json_fields
self.extras = extras
def usesTime(self):
return self.json_fields.count('asctime') > 0
def format(self, record):
super().format(record)
record_dict = {label: getattr(record, label, None) for label in self.json_fields}
if "message" in self.json_fields:
msg = record_dict["message"]
if record.exc_text:
if msg[-1:] != "\n":
msg = msg + "\n"
msg = msg + record.exc_text
if record.stack_info:
if msg[-1:] != "\n":
msg = msg + "\n"
msg = msg + self.formatStack(record.stack_info)
record_dict["message"] = msg
merged_record = merge_dicts(record_dict, self.extras)
return json.dumps(merged_record)
| apache-2.0 | 8,733,983,074,481,377,000 | 37.034483 | 89 | 0.644152 | false |
MasonM/django-elect | django_elect/tests/test_models.py | 1 | 13065 | from freezegun import freeze_time
from datetime import datetime
from django.test import TestCase
from django.apps import apps
from django_elect import settings
from django_elect.models import Ballot, Candidate, Election, Vote, \
VotePlurality, VotePreferential, VotingNotAllowedException
@freeze_time("2010-10-10 00:00:00")
class BaseTestCase(TestCase):
def setUp(self):
user_model = apps.get_model(settings.DJANGO_ELECT_USER_MODEL)
self.user1 = user_model.objects.create_user(username="user1",
email="[email protected]")
self.user2 = user_model.objects.create_user(username="user2",
email="[email protected]")
self.election_current = Election.objects.create(
name="current",
introduction="Intro1",
vote_start=datetime(2010, 10, 1),
vote_end=datetime(2010, 10, 11))
def create_candidate(self, ballot, incumbent=True, last_name='bar'):
return ballot.candidates.create(
first_name="foo",
last_name=last_name,
incumbent=incumbent)
def create_current_pl_ballot(self, seats_available=2):
return self.election_current.ballots.create(
type="Pl",
seats_available=seats_available)
def create_current_pr_ballot(self, seats_available=2):
return self.election_current.ballots.create(
type="Pr",
seats_available=seats_available)
class ElectionTestCase(BaseTestCase):
"Tests for the Election model"
def test_unicode(self):
self.assertEqual(unicode(self.election_current), u"current")
def test_voting_allowed_with_current_election(self):
self.assertTrue(self.election_current.voting_allowed())
def test_voting_allowed_with_finished_election(self):
election_finished = Election(
vote_start=datetime(2010, 10, 1),
vote_end=datetime(2010, 10, 9))
self.assertFalse(election_finished.voting_allowed())
def test_voting_allowed_with_future_election(self):
election_future = Election(
vote_start=datetime(2010, 10, 11),
vote_end=datetime(2010, 10, 17))
self.assertFalse(election_future.voting_allowed())
def test_has_voted_with_user_not_allowed_to_vote(self):
self.assertFalse(self.election_current.has_voted(self.user1))
def test_has_voted_with_user_who_already_voted(self):
self.election_current.allowed_voters.add(self.user1)
self.election_current.votes.create(account=self.user1)
self.assertTrue(self.election_current.has_voted(self.user1))
self.assertFalse(self.election_current.has_voted(self.user2))
def test_voting_allowed_for_user_with_empty_allowed_voters_list(self):
self.assertTrue(self.election_current.voting_allowed_for_user(self.user1))
self.assertTrue(self.election_current.voting_allowed_for_user(self.user2))
def test_voting_allowed_for_user_with_nonempty_allowed_voters_list(self):
self.election_current.allowed_voters.add(self.user1)
self.assertTrue(self.election_current.voting_allowed_for_user(self.user1))
self.assertFalse(self.election_current.voting_allowed_for_user(self.user2))
def test_voting_allowed_for_user_who_voted_with_empty_allowed_voters_list(self):
self.assertTrue(self.election_current.voting_allowed_for_user(self.user1))
self.election_current.votes.create(account=self.user1)
self.assertFalse(self.election_current.voting_allowed_for_user(self.user1))
def test_voting_allowed_for_user_who_voted_with_nonempty_allowed_voters_list(self):
self.election_current.allowed_voters.add(self.user1)
self.assertTrue(self.election_current.voting_allowed_for_user(self.user1))
self.election_current.votes.create(account=self.user1)
self.assertFalse(self.election_current.voting_allowed_for_user(self.user1))
def test_voting_allowed_for_user_with_finished_election(self):
election_finished = Election.objects.create(
name="finished",
vote_start=datetime(2010, 10, 1),
vote_end=datetime(2010, 10, 9))
election_finished.allowed_voters.add(self.user1)
self.assertFalse(election_finished.voting_allowed_for_user(self.user1))
def test_create_vote_for_user_not_allowed(self):
self.election_current.allowed_voters.add(self.user2)
create_vote = lambda: self.election_current.create_vote(self.user1)
# shouldn't be allowed to save a vote for someone not in allowed_voters
self.assertRaises(VotingNotAllowedException, create_vote)
def test_create_vote_success(self):
vote = self.election_current.create_vote(self.user1)
self.assertEqual(vote.account, self.user1)
self.assertEqual(vote.election, self.election_current)
def test_disassociate_accounts(self):
self.election_current.votes.create(account=self.user1)
self.election_current.votes.create(account=self.user2)
self.assertEqual(self.election_current.votes.count(), 2)
self.election_current.disassociate_accounts()
self.assertEqual(self.election_current.votes.count(), 2)
self.assertFalse(self.election_current.has_voted(self.user1))
self.assertFalse(self.election_current.has_voted(self.user2))
def test_full_statistics(self):
ballot_plurality = self.create_current_pl_ballot(seats_available=6)
pl_candidate1 = self.create_candidate(ballot_plurality)
pl_candidate2 = self.create_candidate(ballot_plurality)
pl_candidate3 = self.create_candidate(ballot_plurality)
ballot_preferential = self.create_current_pr_ballot(seats_available=2)
pr_candidate1 = self.create_candidate(ballot_preferential)
pr_candidate2 = self.create_candidate(ballot_preferential)
vote1 = self.election_current.votes.create(account=self.user1)
vote1.pluralities.create(candidate=pl_candidate1)
vote1.preferentials.create(candidate=pr_candidate1, point=2)
vote1.preferentials.create(candidate=pr_candidate2, point=3)
stats = self.election_current.get_full_statistics()
expected_candidates = [pl_candidate1, pl_candidate2, pl_candidate3,
pr_candidate1, pr_candidate2]
expected_ballots = [ballot_plurality, ballot_preferential]
expected_votes = { vote1: [1, 0, 0, 2, 3] }
self.assertEqual(expected_candidates, stats['candidates'])
self.assertEqual(expected_ballots, stats['ballots'])
self.assertEqual(expected_votes, stats['votes'])
# Add another vote
vote2 = self.election_current.votes.create(account=self.user1)
vote2.pluralities.create(candidate=pl_candidate3)
vote2.pluralities.create(candidate=pl_candidate2)
vote2.preferentials.create(candidate=pr_candidate1, point=3)
stats = self.election_current.get_full_statistics()
expected_votes[vote2] = [0, 1, 1, 3, 0]
# candidates and ballots should be unchanged and in same order
self.assertEqual(expected_candidates, stats['candidates'])
self.assertEqual(expected_ballots, stats['ballots'])
self.assertEqual(expected_votes, stats['votes'])
class BallotTestCase(BaseTestCase):
"Tests for logic in the Ballot model that's common to both types"
def test_has_incumbents_with_empty_ballot(self):
ballot = self.create_current_pl_ballot()
self.assertFalse(ballot.has_incumbents())
def test_has_incumbents_with_ballot_not_having_incumbent_candidates(self):
ballot = self.create_current_pl_ballot()
candidate = self.create_candidate(ballot, incumbent=False)
self.assertFalse(ballot.has_incumbents())
def test_has_incumbents_with_ballot_having_incumbent_candidates(self):
ballot = self.create_current_pl_ballot()
candidate = self.create_candidate(ballot, incumbent=True)
self.assertTrue(ballot.has_incumbents())
class PluralityBallotTestCase(BaseTestCase):
"Tests for the Ballot model with type='Pl'"
def test_unicode(self):
ballot = Ballot(
description="lorem ipsum",
type="Pl",
election=self.election_current,
)
self.assertEqual(unicode(ballot), "Plurality current: lorem ipsum")
def test_get_candidate_stats(self):
self.election_current.allowed_voters.add(self.user1, self.user2)
ballot = self.create_current_pl_ballot(seats_available=6)
pl_candidate1 = self.create_candidate(ballot)
self.assertEqual(ballot.get_candidate_stats(), [(pl_candidate1, 0)])
vote1 = self.election_current.votes.create(account=self.user1)
vote1.pluralities.create(candidate=pl_candidate1)
self.assertEqual(ballot.get_candidate_stats(), [(pl_candidate1, 1)])
pl_candidate2 = self.create_candidate(ballot)
self.assertEqual(ballot.get_candidate_stats(),
[(pl_candidate1, 1), (pl_candidate2, 0)])
vote2 = self.election_current.votes.create(account=self.user2)
vote1.pluralities.create(candidate=pl_candidate1)
self.assertEqual(ballot.get_candidate_stats(),
[(pl_candidate1, 2), (pl_candidate2, 0)])
class PreferentialBallotTestCase(BaseTestCase):
"Tests for the Ballot model with type='Pr'"
def test_unicode(self):
ballot = Ballot(
description="dolor sit",
type="Pr",
election=self.election_current,
)
self.assertEqual(unicode(ballot), "Preferential current: dolor sit")
def test_get_candidate_stats(self):
ballot_preferential = self.create_current_pr_ballot(seats_available=2)
pr_candidate1 = self.create_candidate(ballot_preferential)
self.election_current.allowed_voters.add(self.user1, self.user2)
self.assertEqual(ballot_preferential.get_candidate_stats(),
[(pr_candidate1, 0)])
vote1 = self.election_current.votes.create(account=self.user1)
vote1.preferentials.create(point=2, candidate=pr_candidate1)
self.assertEqual(ballot_preferential.get_candidate_stats(),
[(pr_candidate1, 2)])
pr_candidate2 = self.create_candidate(ballot_preferential)
self.assertEqual(ballot_preferential.get_candidate_stats(),
[(pr_candidate1, 2), (pr_candidate2, 0)])
vote2 = self.election_current.votes.create(account=self.user2)
vote2.preferentials.create(point=1, candidate=pr_candidate1)
vote2.preferentials.create(point=2, candidate=pr_candidate2)
self.assertEqual(ballot_preferential.get_candidate_stats(),
[(pr_candidate1, 3), (pr_candidate2, 2)])
class CandidateTestCase(BaseTestCase):
"Tests for the Candidate model"
def test_unicode_with_normal_candidate(self):
candidate = Candidate(
first_name="FOO",
last_name="BAR",
institution="FBAR",
incumbent=True)
self.assertEqual(unicode(candidate), "*FOO BAR (FBAR)")
def test_unicode_with_writein(self):
candidate = Candidate(
first_name="LOREM",
last_name="IPSUM",
write_in=True,
incumbent=False)
self.assertEqual(unicode(candidate), "LOREM IPSUM (write-in)")
def test_get_name(self):
candidate = Candidate(
first_name="FOO",
last_name="BAR",
institution="FBAR",
incumbent=True)
self.assertEqual(candidate.get_name(), "FOO BAR")
class VoteTestCase(BaseTestCase):
"Tests for the Vote model"
def test_unicode(self):
vote1 = self.election_current.votes.create(account=self.user1)
self.user1.first_name = 'foo'
self.user1.last_name = 'bar'
self.assertEqual(unicode(vote1), "%s - current" % unicode(self.user1))
def test_get_details(self):
ballot_plurality = self.create_current_pl_ballot(seats_available=6)
pl_candidate1 = self.create_candidate(ballot_plurality)
self.election_current.allowed_voters.add(self.user1)
vote = self.election_current.votes.create(account=self.user1)
self.assertEqual(repr(vote.get_details()), repr(
[(ballot_plurality, [])]))
ballot_preferential = self.create_current_pr_ballot(seats_available=2)
pr_candidate1 = self.create_candidate(ballot_preferential)
pr_candidate2 = self.create_candidate(ballot_preferential)
vote_pl1 = vote.pluralities.create(candidate=pl_candidate1)
self.assertEqual(repr(vote.get_details()), repr(
[(ballot_plurality, [vote_pl1]),
(ballot_preferential, [])]))
vote_pr1 = vote.preferentials.create(candidate=pr_candidate1, point=2)
vote_pr2 = vote.preferentials.create(candidate=pr_candidate2, point=3)
self.assertEqual(repr(vote.get_details()), repr(
[(ballot_plurality, [vote_pl1]),
(ballot_preferential, [vote_pr1, vote_pr2])]))
| bsd-3-clause | 4,890,007,329,238,700,000 | 42.55 | 87 | 0.677153 | false |
Ledoux/ShareYourSystem | Pythonlogy/ShareYourSystem/Specials/Predicters/Predicter/tests/01_tests_dynamic_rate/05_test_dynamic_rate_multiagents_onesensor_ExampleCell.py | 1 | 2219 | #/###################/#
# Import modules
#
#ImportModules
import ShareYourSystem as SYS
import scipy.stats
import numpy
numpy.random.seed(4)
#/###################/#
# Build the model
#
#Simulation time
SimulationTimeFloat=1000.
#set
AgentUnitsInt = 100
#SimulationTimeFloat=0.2
BrianingDebugVariable=0.1 if SimulationTimeFloat<0.5 else 25.
#A - transition matrix
JacobianTimeFloat = 30. #(ms)
A = (-1./float(JacobianTimeFloat)
)*SYS.numpy.array([[1.]])
#Define
MyPredicter=SYS.PredicterClass(
).mapSet(
{
'NumscipyingSeedVariable':4,
'BrianingStepTimeFloat':0.05,
'-Populations':[
('|Sensor',{
'RecordingLabelVariable':[0],
#'BrianingDebugVariable':BrianingDebugVariable,
'-Interactions':{
'|Encod':{
#'BrianingDebugVariable':BrianingDebugVariable
}
}
}),
('|Agent',{
'RecordingLabelVariable':[0,1,2],
#'BrianingDebugVariable':BrianingDebugVariable,
'-Interactions':{
'|Fast':{
#'BrianingDebugVariable':BrianingDebugVariable
}
},
'-Traces':{
'|U':{
'RecordingInitFloatsArray':scipy.stats.norm(0.,0.01).rvs(size=AgentUnitsInt)
}
}
#'LeakingNoiseStdVariable':0.01
}),
('|Decoder',{
'RecordingLabelVariable':[0],
#'BrianingDebugVariable':BrianingDebugVariable
'-Interactions':{
'|Slow':{
#'BrianingDebugVariable':BrianingDebugVariable,
#'LeakingWeigthVariable':0.
}
}
})
]
}
).predict(
_DynamicBool=True,
_JacobianVariable=A,
_CommandVariable="#custom:#clock:250*ms:(0.5/"+str(
JacobianTimeFloat
)+")*mV*(int(t==250*ms)+int(t==500*ms))",
_AgentTimeFloat = 10.,
_AgentUnitsInt = AgentUnitsInt,
_DecoderVariable = "#array",
_DecoderMeanFloat = 0.,
_DecoderStdFloat = 20./SYS.numpy.sqrt(AgentUnitsInt),
_InteractionStr = "Rate"
).simulate(
SimulationTimeFloat
)
#/###################/#
# View
#
MyPredicter.mapSet(
{
'PyplotingFigureVariable':{
'figsize':(10,8)
},
'PyplotingGridVariable':(30,30),
'-Panels':[
]
}
).view(
).pyplot(
).show(
)
#/###################/#
# Print
#
#Definition the AttestedStr
print('MyPredicter is ')
SYS._print(MyPredicter)
| mit | -1,234,332,505,470,472,200 | 18.12931 | 83 | 0.61379 | false |
pgroudas/pants | src/python/pants/option/option_value_container.py | 1 | 4590 | # coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
from pants.option.ranked_value import RankedValue
class OptionValueContainer(object):
"""A container for option values.
Implements the following functionality:
1) Attribute forwarding.
An attribute can be registered as forwarding to another attribute, and attempts
to read the source attribute's value will be read from the target attribute.
This is necessary so we can qualify registered options by the scope that registered them,
to allow re-registration in inner scopes. This is best explained by example:
Say that in global scope we register an option with two names: [-f, --foo], which writes its
value to the attribute foo. Then in the compile scope we re-register --foo but leave -f alone.
The re-registered --foo will also write to attribute foo. So now -f, which in the compile
scope is unrelated to --foo, can still stomp on its value.
With attribute forwarding we can have the global scope option write to _DEFAULT_foo__, and
the re-registered option to _COMPILE_foo__, and then have the 'f' and 'foo' attributes
forward, appropriately.
Note that only reads are forwarded. The target of the forward must be written to directly.
If the source attribute is set directly, this overrides any forwarding.
2) Value ranking.
Attribute values can be ranked, so that a given attribute's value can only be changed if
the new value has at least as high a rank as the old value. This allows an option value in
an outer scope to override that option's value in an inner scope, when the outer scope's
value comes from a higher ranked source (e.g., the outer value comes from an env var and
the inner one from config).
See ranked_value.py for more details.
Note that this container is suitable for passing as the namespace argument to argparse's
parse_args() method.
"""
def __init__(self):
self._forwardings = {} # src attribute name -> target attribute name.
def add_forwardings(self, forwardings):
"""Add attribute forwardings.
Will overwrite existing forwardings with the same source attributes.
:param forwardings: A map of source attribute name -> attribute to read source's value from.
"""
self._forwardings.update(forwardings)
def update(self, attrs):
"""Set attr values on this object from the data in the attrs dict."""
for k, v in attrs.items():
setattr(self, k, v)
def get(self, key, default=None):
# Support dict-like dynamic access. See also __getitem__ below.
if hasattr(self, key):
return getattr(self, key)
else:
return default
def __setattr__(self, key, value):
if key == '_forwardings':
return super(OptionValueContainer, self).__setattr__(key, value)
if hasattr(self, key):
existing_value = getattr(self, key)
if isinstance(existing_value, RankedValue):
existing_rank = existing_value.rank
else:
# Values without rank are assumed to be flag values set by argparse.
existing_rank = RankedValue.FLAG
else:
existing_rank = RankedValue.NONE
if isinstance(value, RankedValue):
new_rank = value.rank
else:
# Values without rank are assumed to be flag values set by argparse.
new_rank = RankedValue.FLAG
if new_rank >= existing_rank:
# We set values from outer scopes before values from inner scopes, so
# in case of equal rank we overwrite. That way that the inner scope value wins.
super(OptionValueContainer, self).__setattr__(key, value)
def __getitem__(self, key):
# Support natural dynamic access, options[key_var] is more idiomatic than
# getattr(option, key_var).
return getattr(self, key)
def __getattr__(self, key):
# Note: Called only if regular attribute lookup fails, so accesses
# to non-forwarded attributes will be handled the normal way.
if key == '_forwardings':
# In case we get called in copy/deepcopy, which don't invoke the ctor.
raise AttributeError
if key not in self._forwardings:
raise AttributeError('No such forwarded attribute: {}'.format(key))
val = getattr(self, self._forwardings[key])
if isinstance(val, RankedValue):
return val.value
else:
return val
| apache-2.0 | 3,330,725,136,526,867,000 | 38.568966 | 99 | 0.698475 | false |
rdo-management/ironic-discoverd | ironic_discoverd_ramdisk/discover.py | 1 | 8663 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
import json
import logging
import os
import subprocess
import tarfile
import tempfile
import netifaces
import requests
LOG = logging.getLogger('ironic-discoverd-ramdisk')
def try_call(*cmd, **kwargs):
strip = kwargs.pop('strip', True)
kwargs['stdout'] = subprocess.PIPE
kwargs['stderr'] = subprocess.PIPE
try:
p = subprocess.Popen(cmd, **kwargs)
out, err = p.communicate()
except EnvironmentError as exc:
LOG.warn('command %s failed: %s', cmd, exc)
return
if p.returncode:
LOG.warn('command %s returned failure status %d:\n%s', cmd,
p.returncode, err.strip())
else:
return out.strip() if strip else out
def try_shell(sh, **kwargs):
strip = kwargs.pop('strip', True)
kwargs['stdout'] = subprocess.PIPE
kwargs['stderr'] = subprocess.PIPE
kwargs['shell'] = True
p = subprocess.Popen([sh], **kwargs)
out, err = p.communicate()
if p.returncode:
LOG.warn('shell script "%s" failed with code %d:\n%s', sh,
p.returncode, err.strip())
else:
return out.strip() if strip else out
class AccumulatedFailure(object):
"""Object accumulated failures without raising exception."""
def __init__(self):
self._failures = []
def add(self, fail, *fmt):
"""Add failure with optional formatting."""
if fmt:
fail = fail % fmt
LOG.error('%s', fail)
self._failures.append(fail)
def get_error(self):
"""Get error string or None."""
if not self._failures:
return
msg = ('The following errors were encountered during '
'hardware discovery:\n%s'
% '\n'.join('* %s' % item for item in self._failures))
return msg
def __nonzero__(self):
return bool(self._failures)
__bool__ = __nonzero__
def __repr__(self): # pragma: no cover
# This is for tests
if self:
return '<%s: %s>' % (self.__class__.__name__,
', '.join(self._failures))
else:
return '<%s: success>' % self.__class__.__name__
def discover_basic_properties(data, args):
# These properties might not be present, we don't count it as failure
data['boot_interface'] = args.bootif
data['ipmi_address'] = try_shell(
"ipmitool lan print | grep -e 'IP Address [^S]' | awk '{ print $4 }'")
LOG.info('BMC IP address: %s', data['ipmi_address'])
def discover_network_interfaces(data, failures):
data.setdefault('interfaces', {})
for iface in netifaces.interfaces():
if iface.startswith('lo'):
LOG.info('ignoring local network interface %s', iface)
continue
LOG.debug('found network interface %s', iface)
addrs = netifaces.ifaddresses(iface)
try:
mac = addrs[netifaces.AF_LINK][0]['addr']
except (KeyError, IndexError):
LOG.info('no link information for interface %s in %s',
iface, addrs)
continue
try:
ip = addrs[netifaces.AF_INET][0]['addr']
except (KeyError, IndexError):
LOG.info('no IP address for interface %s', iface)
ip = None
data['interfaces'][iface] = {'mac': mac, 'ip': ip}
if data['interfaces']:
LOG.info('network interfaces: %s', data['interfaces'])
else:
failures.add('no network interfaces found')
def discover_scheduling_properties(data, failures):
scripts = [
('cpus', "grep processor /proc/cpuinfo | wc -l"),
('cpu_arch', "lscpu | grep Architecture | awk '{ print $2 }'"),
('local_gb', "fdisk -l | grep Disk | awk '{print $5}' | head -n 1"),
]
for key, script in scripts:
data[key] = try_shell(script)
LOG.info('value for "%s" field is %s', key, data[key])
ram_info = try_shell(
"dmidecode --type memory | grep Size | awk '{ print $2; }'")
if ram_info:
total_ram = 0
for ram_record in ram_info.split('\n'):
try:
total_ram += int(ram_record)
except ValueError:
pass
data['memory_mb'] = total_ram
LOG.info('total RAM: %s MiB', total_ram)
else:
failures.add('failed to get RAM information')
for key in ('cpus', 'local_gb', 'memory_mb'):
try:
data[key] = int(data[key])
except (KeyError, ValueError, TypeError):
LOG.warn('value for %s is missing or malformed: %s',
key, data.get(key))
data[key] = None
# FIXME(dtantsur): -1 is required to give Ironic some spacing for
# partitioning and may be removed later
if data['local_gb']:
data['local_gb'] = data['local_gb'] / 1024 / 1024 / 1024 - 1
if data['local_gb'] < 1:
LOG.warn('local_gb is less than 1 GiB')
data['local_gb'] = None
def discover_additional_properties(args, data, failures):
hw_args = ('--benchmark', 'cpu', 'disk', 'mem') if args.benchmark else ()
hw_json = try_call('hardware-detect', *hw_args)
if hw_json:
try:
data['data'] = json.loads(hw_json)
except ValueError:
LOG.error('JSON value returned from hardware-detect cannot be '
'decoded:\n%s', hw_json)
failures.add('unable to get extended hardware properties')
else:
failures.add('unable to get extended hardware properties')
def discover_block_devices(data):
block_devices = try_shell(
"lsblk -no TYPE,SERIAL | grep disk | awk '{print $2}'")
if not block_devices:
LOG.warn('unable to get block devices')
return
serials = [item for item in block_devices.split('\n') if item.strip()]
data['block_devices'] = {'serials': serials}
def discover_hardware(args, data, failures):
try_call('modprobe', 'ipmi_msghandler')
try_call('modprobe', 'ipmi_devintf')
try_call('modprobe', 'ipmi_si')
discover_basic_properties(data, args)
discover_network_interfaces(data, failures)
discover_scheduling_properties(data, failures)
if args.use_hardware_detect:
discover_additional_properties(args, data, failures)
discover_block_devices(data)
def call_discoverd(args, data, failures):
data['error'] = failures.get_error()
LOG.info('posting collected data to %s', args.callback_url)
resp = requests.post(args.callback_url, data=json.dumps(data))
if resp.status_code >= 400:
LOG.error('discoverd error %d: %s',
resp.status_code,
resp.content.decode('utf-8'))
resp.raise_for_status()
return resp.json()
def collect_logs(args):
files = {args.log_file} | set(args.system_log_file or ())
with tempfile.TemporaryFile() as fp:
with tarfile.open(fileobj=fp, mode='w:gz') as tar:
with tempfile.NamedTemporaryFile() as jrnl_fp:
if try_shell("journalctl > '%s'" % jrnl_fp.name) is not None:
tar.add(jrnl_fp.name, arcname='journal')
else:
LOG.warn('failed to get system journal')
for fname in files:
if os.path.exists(fname):
tar.add(fname)
else:
LOG.warn('log file %s does not exist', fname)
fp.seek(0)
return base64.b64encode(fp.read())
def setup_ipmi_credentials(resp):
user, password = resp['ipmi_username'], resp['ipmi_password']
if try_call('ipmitool', 'user', 'set', 'name', '2', user) is None:
raise RuntimeError('failed to set IPMI user name to %s', user)
if try_call('ipmitool', 'user', 'set', 'password', '2', password) is None:
raise RuntimeError('failed to set IPMI password')
try_call('ipmitool', 'user', 'enable', '2')
try_call('ipmitool', 'channel', 'setaccess', '1', '2',
'link=on', 'ipmi=on', 'callin=on', 'privilege=4')
def fork_and_serve_logs(args):
pass # TODO(dtantsur): implement
| apache-2.0 | -1,589,015,766,572,319,500 | 32.191571 | 78 | 0.587441 | false |
0x90/pyroute2 | pyroute2/netlink/nl80211/__init__.py | 2 | 26383 | '''
NL80211 module
================
TODO
'''
from pyroute2.common import map_namespace
from pyroute2.netlink import genlmsg
from pyroute2.netlink.generic import GenericNetlinkSocket
from pyroute2.netlink.nlsocket import Marshal
from pyroute2.netlink import nla
from pyroute2.netlink import nla_base
# import pdb
import struct
from pyroute2.common import hexdump
# nl80211 commands
NL80211_CMD_UNSPEC = 0
NL80211_CMD_GET_WIPHY = 1
NL80211_CMD_SET_WIPHY = 2
NL80211_CMD_NEW_WIPHY = 3
NL80211_CMD_DEL_WIPHY = 4
NL80211_CMD_GET_INTERFACE = 5
NL80211_CMD_SET_INTERFACE = 6
NL80211_CMD_NEW_INTERFACE = 7
NL80211_CMD_DEL_INTERFACE = 8
NL80211_CMD_GET_KEY = 9
NL80211_CMD_SET_KEY = 10
NL80211_CMD_NEW_KEY = 11
NL80211_CMD_DEL_KEY = 12
NL80211_CMD_GET_BEACON = 13
NL80211_CMD_SET_BEACON = 14
NL80211_CMD_START_AP = 15
NL80211_CMD_NEW_BEACON = NL80211_CMD_START_AP
NL80211_CMD_STOP_AP = 16
NL80211_CMD_DEL_BEACON = NL80211_CMD_STOP_AP
NL80211_CMD_GET_STATION = 17
NL80211_CMD_SET_STATION = 18
NL80211_CMD_NEW_STATION = 19
NL80211_CMD_DEL_STATION = 20
NL80211_CMD_GET_MPATH = 21
NL80211_CMD_SET_MPATH = 22
NL80211_CMD_NEW_MPATH = 23
NL80211_CMD_DEL_MPATH = 24
NL80211_CMD_SET_BSS = 25
NL80211_CMD_SET_REG = 26
NL80211_CMD_REQ_SET_REG = 27
NL80211_CMD_GET_MESH_CONFIG = 28
NL80211_CMD_SET_MESH_CONFIG = 29
NL80211_CMD_SET_MGMT_EXTRA_IE = 30
NL80211_CMD_GET_REG = 31
NL80211_CMD_GET_SCAN = 32
NL80211_CMD_TRIGGER_SCAN = 33
NL80211_CMD_NEW_SCAN_RESULTS = 34
NL80211_CMD_SCAN_ABORTED = 35
NL80211_CMD_REG_CHANGE = 36
NL80211_CMD_AUTHENTICATE = 37
NL80211_CMD_ASSOCIATE = 38
NL80211_CMD_DEAUTHENTICATE = 39
NL80211_CMD_DISASSOCIATE = 40
NL80211_CMD_MICHAEL_MIC_FAILURE = 41
NL80211_CMD_REG_BEACON_HINT = 42
NL80211_CMD_JOIN_IBSS = 43
NL80211_CMD_LEAVE_IBSS = 44
NL80211_CMD_TESTMODE = 45
NL80211_CMD_CONNECT = 46
NL80211_CMD_ROAM = 47
NL80211_CMD_DISCONNECT = 48
NL80211_CMD_SET_WIPHY_NETNS = 49
NL80211_CMD_GET_SURVEY = 50
NL80211_CMD_NEW_SURVEY_RESULTS = 51
NL80211_CMD_SET_PMKSA = 52
NL80211_CMD_DEL_PMKSA = 53
NL80211_CMD_FLUSH_PMKSA = 54
NL80211_CMD_REMAIN_ON_CHANNEL = 55
NL80211_CMD_CANCEL_REMAIN_ON_CHANNEL = 56
NL80211_CMD_SET_TX_BITRATE_MASK = 57
NL80211_CMD_REGISTER_FRAME = 58
NL80211_CMD_REGISTER_ACTION = NL80211_CMD_REGISTER_FRAME
NL80211_CMD_FRAME = 59
NL80211_CMD_ACTION = NL80211_CMD_FRAME
NL80211_CMD_FRAME_TX_STATUS = 60
NL80211_CMD_ACTION_TX_STATUS = NL80211_CMD_FRAME_TX_STATUS
NL80211_CMD_SET_POWER_SAVE = 61
NL80211_CMD_GET_POWER_SAVE = 62
NL80211_CMD_SET_CQM = 63
NL80211_CMD_NOTIFY_CQM = 64
NL80211_CMD_SET_CHANNEL = 65
NL80211_CMD_SET_WDS_PEER = 66
NL80211_CMD_FRAME_WAIT_CANCEL = 67
NL80211_CMD_JOIN_MESH = 68
NL80211_CMD_LEAVE_MESH = 69
NL80211_CMD_UNPROT_DEAUTHENTICATE = 70
NL80211_CMD_UNPROT_DISASSOCIATE = 71
NL80211_CMD_NEW_PEER_CANDIDATE = 72
NL80211_CMD_GET_WOWLAN = 73
NL80211_CMD_SET_WOWLAN = 74
NL80211_CMD_START_SCHED_SCAN = 75
NL80211_CMD_STOP_SCHED_SCAN = 76
NL80211_CMD_SCHED_SCAN_RESULTS = 77
NL80211_CMD_SCHED_SCAN_STOPPED = 78
NL80211_CMD_SET_REKEY_OFFLOAD = 79
NL80211_CMD_PMKSA_CANDIDATE = 80
NL80211_CMD_TDLS_OPER = 81
NL80211_CMD_TDLS_MGMT = 82
NL80211_CMD_UNEXPECTED_FRAME = 83
NL80211_CMD_PROBE_CLIENT = 84
NL80211_CMD_REGISTER_BEACONS = 85
NL80211_CMD_UNEXPECTED_4ADDR_FRAME = 86
NL80211_CMD_SET_NOACK_MAP = 87
NL80211_CMD_CH_SWITCH_NOTIFY = 88
NL80211_CMD_START_P2P_DEVICE = 89
NL80211_CMD_STOP_P2P_DEVICE = 90
NL80211_CMD_CONN_FAILED = 91
NL80211_CMD_SET_MCAST_RATE = 92
NL80211_CMD_SET_MAC_ACL = 93
NL80211_CMD_RADAR_DETECT = 94
NL80211_CMD_GET_PROTOCOL_FEATURES = 95
NL80211_CMD_UPDATE_FT_IES = 96
NL80211_CMD_FT_EVENT = 97
NL80211_CMD_CRIT_PROTOCOL_START = 98
NL80211_CMD_CRIT_PROTOCOL_STOP = 99
NL80211_CMD_GET_COALESCE = 100
NL80211_CMD_SET_COALESCE = 101
NL80211_CMD_CHANNEL_SWITCH = 102
NL80211_CMD_VENDOR = 103
NL80211_CMD_SET_QOS_MAP = 104
NL80211_CMD_ADD_TX_TS = 105
NL80211_CMD_DEL_TX_TS = 106
NL80211_CMD_GET_MPP = 107
NL80211_CMD_JOIN_OCB = 108
NL80211_CMD_LEAVE_OCB = 109
NL80211_CMD_CH_SWITCH_STARTED_NOTIFY = 110
NL80211_CMD_TDLS_CHANNEL_SWITCH = 111
NL80211_CMD_TDLS_CANCEL_CHANNEL_SWITCH = 112
NL80211_CMD_WIPHY_REG_CHANGE = 113
NL80211_CMD_MAX = NL80211_CMD_WIPHY_REG_CHANGE
(NL80211_NAMES, NL80211_VALUES) = map_namespace('NL80211_CMD_', globals())
NL80211_BSS_ELEMENTS_SSID = 0
NL80211_BSS_ELEMENTS_SUPPORTED_RATES = 1
NL80211_BSS_ELEMENTS_CHANNEL = 3
NL80211_BSS_ELEMENTS_VENDOR = 221
BSS_MEMBERSHIP_SELECTOR_HT_PHY = 127
BSS_MEMBERSHIP_SELECTOR_VHT_PHY = 126
class nl80211cmd(genlmsg):
nla_map = (('NL80211_ATTR_UNSPEC', 'none'),
('NL80211_ATTR_WIPHY', 'hex'),
('NL80211_ATTR_WIPHY_NAME', 'asciiz'),
('NL80211_ATTR_IFINDEX', 'uint32'),
('NL80211_ATTR_IFNAME', 'asciiz'),
('NL80211_ATTR_IFTYPE', 'hex'),
('NL80211_ATTR_MAC', 'l2addr'),
('NL80211_ATTR_KEY_DATA', 'hex'),
('NL80211_ATTR_KEY_IDX', 'hex'),
('NL80211_ATTR_KEY_CIPHER', 'uint32'),
('NL80211_ATTR_KEY_SEQ', 'hex'),
('NL80211_ATTR_KEY_DEFAULT', 'hex'),
('NL80211_ATTR_BEACON_INTERVAL', 'hex'),
('NL80211_ATTR_DTIM_PERIOD', 'hex'),
('NL80211_ATTR_BEACON_HEAD', 'hex'),
('NL80211_ATTR_BEACON_TAIL', 'hex'),
('NL80211_ATTR_STA_AID', 'hex'),
('NL80211_ATTR_STA_FLAGS', 'hex'),
('NL80211_ATTR_STA_LISTEN_INTERVAL', 'hex'),
('NL80211_ATTR_STA_SUPPORTED_RATES', 'hex'),
('NL80211_ATTR_STA_VLAN', 'hex'),
('NL80211_ATTR_STA_INFO', 'hex'),
('NL80211_ATTR_WIPHY_BANDS', 'hex'),
('NL80211_ATTR_MNTR_FLAGS', 'hex'),
('NL80211_ATTR_MESH_ID', 'hex'),
('NL80211_ATTR_STA_PLINK_ACTION', 'hex'),
('NL80211_ATTR_MPATH_NEXT_HOP', 'hex'),
('NL80211_ATTR_MPATH_INFO', 'hex'),
('NL80211_ATTR_BSS_CTS_PROT', 'hex'),
('NL80211_ATTR_BSS_SHORT_PREAMBLE', 'hex'),
('NL80211_ATTR_BSS_SHORT_SLOT_TIME', 'hex'),
('NL80211_ATTR_HT_CAPABILITY', 'hex'),
('NL80211_ATTR_SUPPORTED_IFTYPES', 'hex'),
('NL80211_ATTR_REG_ALPHA2', 'hex'),
('NL80211_ATTR_REG_RULES', 'hex'),
('NL80211_ATTR_MESH_CONFIG', 'hex'),
('NL80211_ATTR_BSS_BASIC_RATES', 'hex'),
('NL80211_ATTR_WIPHY_TXQ_PARAMS', 'hex'),
('NL80211_ATTR_WIPHY_FREQ', 'hex'),
('NL80211_ATTR_WIPHY_CHANNEL_TYPE', 'hex'),
('NL80211_ATTR_KEY_DEFAULT_MGMT', 'hex'),
('NL80211_ATTR_MGMT_SUBTYPE', 'hex'),
('NL80211_ATTR_IE', 'hex'),
('NL80211_ATTR_MAX_NUM_SCAN_SSIDS', 'hex'),
('NL80211_ATTR_SCAN_FREQUENCIES', 'hex'),
('NL80211_ATTR_SCAN_SSIDS', 'hex'),
('NL80211_ATTR_GENERATION', 'hex'),
('NL80211_ATTR_BSS', 'bss'),
('NL80211_ATTR_REG_INITIATOR', 'hex'),
('NL80211_ATTR_REG_TYPE', 'hex'),
('NL80211_ATTR_SUPPORTED_COMMANDS', 'hex'),
('NL80211_ATTR_FRAME', 'hex'),
('NL80211_ATTR_SSID', 'hex'),
('NL80211_ATTR_AUTH_TYPE', 'hex'),
('NL80211_ATTR_REASON_CODE', 'hex'),
('NL80211_ATTR_KEY_TYPE', 'hex'),
('NL80211_ATTR_MAX_SCAN_IE_LEN', 'hex'),
('NL80211_ATTR_CIPHER_SUITES', 'hex'),
('NL80211_ATTR_FREQ_BEFORE', 'hex'),
('NL80211_ATTR_FREQ_AFTER', 'hex'),
('NL80211_ATTR_FREQ_FIXED', 'hex'),
('NL80211_ATTR_WIPHY_RETRY_SHORT', 'hex'),
('NL80211_ATTR_WIPHY_RETRY_LONG', 'hex'),
('NL80211_ATTR_WIPHY_FRAG_THRESHOLD', 'hex'),
('NL80211_ATTR_WIPHY_RTS_THRESHOLD', 'hex'),
('NL80211_ATTR_TIMED_OUT', 'hex'),
('NL80211_ATTR_USE_MFP', 'hex'),
('NL80211_ATTR_STA_FLAGS2', 'hex'),
('NL80211_ATTR_CONTROL_PORT', 'hex'),
('NL80211_ATTR_TESTDATA', 'hex'),
('NL80211_ATTR_PRIVACY', 'hex'),
('NL80211_ATTR_DISCONNECTED_BY_AP', 'hex'),
('NL80211_ATTR_STATUS_CODE', 'hex'),
('NL80211_ATTR_CIPHER_SUITES_PAIRWISE', 'hex'),
('NL80211_ATTR_CIPHER_SUITE_GROUP', 'hex'),
('NL80211_ATTR_WPA_VERSIONS', 'hex'),
('NL80211_ATTR_AKM_SUITES', 'hex'),
('NL80211_ATTR_REQ_IE', 'hex'),
('NL80211_ATTR_RESP_IE', 'hex'),
('NL80211_ATTR_PREV_BSSID', 'hex'),
('NL80211_ATTR_KEY', 'hex'),
('NL80211_ATTR_KEYS', 'hex'),
('NL80211_ATTR_PID', 'hex'),
('NL80211_ATTR_4ADDR', 'hex'),
('NL80211_ATTR_SURVEY_INFO', 'hex'),
('NL80211_ATTR_PMKID', 'hex'),
('NL80211_ATTR_MAX_NUM_PMKIDS', 'hex'),
('NL80211_ATTR_DURATION', 'hex'),
('NL80211_ATTR_COOKIE', 'hex'),
('NL80211_ATTR_WIPHY_COVERAGE_CLASS', 'hex'),
('NL80211_ATTR_TX_RATES', 'hex'),
('NL80211_ATTR_FRAME_MATCH', 'hex'),
('NL80211_ATTR_ACK', 'hex'),
('NL80211_ATTR_PS_STATE', 'hex'),
('NL80211_ATTR_CQM', 'hex'),
('NL80211_ATTR_LOCAL_STATE_CHANGE', 'hex'),
('NL80211_ATTR_AP_ISOLATE', 'hex'),
('NL80211_ATTR_WIPHY_TX_POWER_SETTING', 'hex'),
('NL80211_ATTR_WIPHY_TX_POWER_LEVEL', 'hex'),
('NL80211_ATTR_TX_FRAME_TYPES', 'hex'),
('NL80211_ATTR_RX_FRAME_TYPES', 'hex'),
('NL80211_ATTR_FRAME_TYPE', 'hex'),
('NL80211_ATTR_CONTROL_PORT_ETHERTYPE', 'hex'),
('NL80211_ATTR_CONTROL_PORT_NO_ENCRYPT', 'hex'),
('NL80211_ATTR_SUPPORT_IBSS_RSN', 'hex'),
('NL80211_ATTR_WIPHY_ANTENNA_TX', 'hex'),
('NL80211_ATTR_WIPHY_ANTENNA_RX', 'hex'),
('NL80211_ATTR_MCAST_RATE', 'hex'),
('NL80211_ATTR_OFFCHANNEL_TX_OK', 'hex'),
('NL80211_ATTR_BSS_HT_OPMODE', 'hex'),
('NL80211_ATTR_KEY_DEFAULT_TYPES', 'hex'),
('NL80211_ATTR_MAX_REMAIN_ON_CHANNEL_DURATION', 'hex'),
('NL80211_ATTR_MESH_SETUP', 'hex'),
('NL80211_ATTR_WIPHY_ANTENNA_AVAIL_TX', 'hex'),
('NL80211_ATTR_WIPHY_ANTENNA_AVAIL_RX', 'hex'),
('NL80211_ATTR_SUPPORT_MESH_AUTH', 'hex'),
('NL80211_ATTR_STA_PLINK_STATE', 'hex'),
('NL80211_ATTR_WOWLAN_TRIGGERS', 'hex'),
('NL80211_ATTR_WOWLAN_TRIGGERS_SUPPORTED', 'hex'),
('NL80211_ATTR_SCHED_SCAN_INTERVAL', 'hex'),
('NL80211_ATTR_INTERFACE_COMBINATIONS', 'hex'),
('NL80211_ATTR_SOFTWARE_IFTYPES', 'hex'),
('NL80211_ATTR_REKEY_DATA', 'hex'),
('NL80211_ATTR_MAX_NUM_SCHED_SCAN_SSIDS', 'hex'),
('NL80211_ATTR_MAX_SCHED_SCAN_IE_LEN', 'hex'),
('NL80211_ATTR_SCAN_SUPP_RATES', 'hex'),
('NL80211_ATTR_HIDDEN_SSID', 'hex'),
('NL80211_ATTR_IE_PROBE_RESP', 'hex'),
('NL80211_ATTR_IE_ASSOC_RESP', 'hex'),
('NL80211_ATTR_STA_WME', 'hex'),
('NL80211_ATTR_SUPPORT_AP_UAPSD', 'hex'),
('NL80211_ATTR_ROAM_SUPPORT', 'hex'),
('NL80211_ATTR_SCHED_SCAN_MATCH', 'hex'),
('NL80211_ATTR_MAX_MATCH_SETS', 'hex'),
('NL80211_ATTR_PMKSA_CANDIDATE', 'hex'),
('NL80211_ATTR_TX_NO_CCK_RATE', 'hex'),
('NL80211_ATTR_TDLS_ACTION', 'hex'),
('NL80211_ATTR_TDLS_DIALOG_TOKEN', 'hex'),
('NL80211_ATTR_TDLS_OPERATION', 'hex'),
('NL80211_ATTR_TDLS_SUPPORT', 'hex'),
('NL80211_ATTR_TDLS_EXTERNAL_SETUP', 'hex'),
('NL80211_ATTR_DEVICE_AP_SME', 'hex'),
('NL80211_ATTR_DONT_WAIT_FOR_ACK', 'hex'),
('NL80211_ATTR_FEATURE_FLAGS', 'hex'),
('NL80211_ATTR_PROBE_RESP_OFFLOAD', 'hex'),
('NL80211_ATTR_PROBE_RESP', 'hex'),
('NL80211_ATTR_DFS_REGION', 'hex'),
('NL80211_ATTR_DISABLE_HT', 'hex'),
('NL80211_ATTR_HT_CAPABILITY_MASK', 'hex'),
('NL80211_ATTR_NOACK_MAP', 'hex'),
('NL80211_ATTR_INACTIVITY_TIMEOUT', 'hex'),
('NL80211_ATTR_RX_SIGNAL_DBM', 'hex'),
('NL80211_ATTR_BG_SCAN_PERIOD', 'hex'),
('NL80211_ATTR_WDEV', 'uint32'),
('NL80211_ATTR_USER_REG_HINT_TYPE', 'hex'),
('NL80211_ATTR_CONN_FAILED_REASON', 'hex'),
('NL80211_ATTR_SAE_DATA', 'hex'),
('NL80211_ATTR_VHT_CAPABILITY', 'hex'),
('NL80211_ATTR_SCAN_FLAGS', 'hex'),
('NL80211_ATTR_CHANNEL_WIDTH', 'uint32'),
('NL80211_ATTR_CENTER_FREQ1', 'hex'),
('NL80211_ATTR_CENTER_FREQ2', 'hex'),
('NL80211_ATTR_P2P_CTWINDOW', 'hex'),
('NL80211_ATTR_P2P_OPPPS', 'hex'),
('NL80211_ATTR_LOCAL_MESH_POWER_MODE', 'hex'),
('NL80211_ATTR_ACL_POLICY', 'hex'),
('NL80211_ATTR_MAC_ADDRS', 'hex'),
('NL80211_ATTR_MAC_ACL_MAX', 'hex'),
('NL80211_ATTR_RADAR_EVENT', 'hex'),
('NL80211_ATTR_EXT_CAPA', 'hex'),
('NL80211_ATTR_EXT_CAPA_MASK', 'hex'),
('NL80211_ATTR_STA_CAPABILITY', 'hex'),
('NL80211_ATTR_STA_EXT_CAPABILITY', 'hex'),
('NL80211_ATTR_PROTOCOL_FEATURES', 'hex'),
('NL80211_ATTR_SPLIT_WIPHY_DUMP', 'hex'),
('NL80211_ATTR_DISABLE_VHT', 'hex'),
('NL80211_ATTR_VHT_CAPABILITY_MASK', 'hex'),
('NL80211_ATTR_MDID', 'hex'),
('NL80211_ATTR_IE_RIC', 'hex'),
('NL80211_ATTR_CRIT_PROT_ID', 'hex'),
('NL80211_ATTR_MAX_CRIT_PROT_DURATION', 'hex'),
('NL80211_ATTR_PEER_AID', 'hex'),
('NL80211_ATTR_COALESCE_RULE', 'hex'),
('NL80211_ATTR_CH_SWITCH_COUNT', 'hex'),
('NL80211_ATTR_CH_SWITCH_BLOCK_TX', 'hex'),
('NL80211_ATTR_CSA_IES', 'hex'),
('NL80211_ATTR_CSA_C_OFF_BEACON', 'hex'),
('NL80211_ATTR_CSA_C_OFF_PRESP', 'hex'),
('NL80211_ATTR_RXMGMT_FLAGS', 'hex'),
('NL80211_ATTR_STA_SUPPORTED_CHANNELS', 'hex'),
('NL80211_ATTR_STA_SUPPORTED_OPER_CLASSES', 'hex'),
('NL80211_ATTR_HANDLE_DFS', 'hex'),
('NL80211_ATTR_SUPPORT_5_MHZ', 'hex'),
('NL80211_ATTR_SUPPORT_10_MHZ', 'hex'),
('NL80211_ATTR_OPMODE_NOTIF', 'hex'),
('NL80211_ATTR_VENDOR_ID', 'hex'),
('NL80211_ATTR_VENDOR_SUBCMD', 'hex'),
('NL80211_ATTR_VENDOR_DATA', 'hex'),
('NL80211_ATTR_VENDOR_EVENTS', 'hex'),
('NL80211_ATTR_QOS_MAP', 'hex'),
('NL80211_ATTR_MAC_HINT', 'hex'),
('NL80211_ATTR_WIPHY_FREQ_HINT', 'hex'),
('NL80211_ATTR_MAX_AP_ASSOC_STA', 'hex'),
('NL80211_ATTR_TDLS_PEER_CAPABILITY', 'hex'),
('NL80211_ATTR_SOCKET_OWNER', 'hex'),
('NL80211_ATTR_CSA_C_OFFSETS_TX', 'hex'),
('NL80211_ATTR_MAX_CSA_COUNTERS', 'hex'),
('NL80211_ATTR_TDLS_INITIATOR', 'hex'),
('NL80211_ATTR_USE_RRM', 'hex'),
('NL80211_ATTR_WIPHY_DYN_ACK', 'hex'),
('NL80211_ATTR_TSID', 'hex'),
('NL80211_ATTR_USER_PRIO', 'hex'),
('NL80211_ATTR_ADMITTED_TIME', 'hex'),
('NL80211_ATTR_SMPS_MODE', 'hex'),
('NL80211_ATTR_OPER_CLASS', 'hex'),
('NL80211_ATTR_MAC_MASK', 'hex'),
('NL80211_ATTR_WIPHY_SELF_MANAGED_REG', 'hex'),
('NUM_NL80211_ATTR', 'hex'))
class bss(nla):
class elementsBinary(nla_base):
def binary_supported_rates(self, rawdata):
# pdb.set_trace()
string = ""
for byteRaw in rawdata:
(byte,) = struct.unpack("B", byteRaw)
r = byte & 0x7f
if r == BSS_MEMBERSHIP_SELECTOR_VHT_PHY and byte & 0x80:
string += "VHT"
elif r == BSS_MEMBERSHIP_SELECTOR_HT_PHY and byte & 0x80:
string += "HT"
else:
string += "%d.%d" % (r / 2, 5 * (r & 1))
string += "%s " % ("*" if byte & 0x80 else "")
return string
def binary_vendor(self, rawdata):
'''
Extract vendor data
'''
vendor = {}
# pdb.set_trace()
size = len(rawdata)
# if len > 4 and rawdata[0] == ms_oui[0]
# and rawdata[1] == ms_oui[1] and rawdata[2] == ms_oui[2]
if size < 3:
vendor["VENDOR_NAME"] = "Vendor specific: <too short data:"
+ hexdump(rawdata)
return vendor
def decode_nlas(self):
return
def decode(self):
nla_base.decode(self)
self.value = {}
init = self.buf.tell()
while (self.buf.tell()-init) < (self.length-4):
(msg_type, length) = struct.unpack('BB', self.buf.read(2))
data = self.buf.read(length)
if msg_type == NL80211_BSS_ELEMENTS_SSID:
self.value["SSID"] = data
if msg_type == NL80211_BSS_ELEMENTS_SUPPORTED_RATES:
supported_rates = self.binary_supported_rates(data)
self.value["SUPPORTED_RATES"] = supported_rates
if msg_type == NL80211_BSS_ELEMENTS_CHANNEL:
(channel,) = struct.unpack("B", data[0])
self.value["CHANNEL"] = channel
if msg_type == NL80211_BSS_ELEMENTS_VENDOR:
self.binary_vendor(data)
# if catch == 0:
# self.value["NL80211_BSS_ELEMENTS_UNKNOWN"+str(msg_type)]=hexdump(data)
self.buf.seek(init)
# self.value["NL80211_BSS_ELEMENTS_HEXDUMP"] =
# hexdump(self.buf.read(self.length))
self.buf.seek(init)
prefix = 'NL80211_BSS_'
nla_map = (('NL80211_BSS_UNSPEC', 'none'),
('NL80211_BSS_BSSID', 'hex'),
('NL80211_BSS_FREQUENCY', 'uint32'),
('NL80211_BSS_TSF', 'uint64'),
('NL80211_BSS_BEACON_INTERVAL', 'uint16'),
('NL80211_BSS_CAPABILITY', 'uint8'),
('NL80211_BSS_INFORMATION_ELEMENTS', 'elementsBinary'),
('NL80211_BSS_SIGNAL_MBM', 'uint32'),
('NL80211_BSS_STATUS', 'uint32'),
('NL80211_BSS_SEEN_MS_AGO', 'uint32'),
('NL80211_BSS_BEACON_IES', 'hex'),
('NL80211_BSS_CHAN_WIDTH', 'uint32'),
('NL80211_BSS_BEACON_TSF', 'uint64')
)
class MarshalNl80211(Marshal):
msg_map = {NL80211_CMD_UNSPEC: nl80211cmd,
NL80211_CMD_GET_WIPHY: nl80211cmd,
NL80211_CMD_SET_WIPHY: nl80211cmd,
NL80211_CMD_NEW_WIPHY: nl80211cmd,
NL80211_CMD_DEL_WIPHY: nl80211cmd,
NL80211_CMD_GET_INTERFACE: nl80211cmd,
NL80211_CMD_SET_INTERFACE: nl80211cmd,
NL80211_CMD_NEW_INTERFACE: nl80211cmd,
NL80211_CMD_DEL_INTERFACE: nl80211cmd,
NL80211_CMD_GET_KEY: nl80211cmd,
NL80211_CMD_SET_KEY: nl80211cmd,
NL80211_CMD_NEW_KEY: nl80211cmd,
NL80211_CMD_DEL_KEY: nl80211cmd,
NL80211_CMD_GET_BEACON: nl80211cmd,
NL80211_CMD_SET_BEACON: nl80211cmd,
NL80211_CMD_START_AP: nl80211cmd,
NL80211_CMD_NEW_BEACON: nl80211cmd,
NL80211_CMD_STOP_AP: nl80211cmd,
NL80211_CMD_DEL_BEACON: nl80211cmd,
NL80211_CMD_GET_STATION: nl80211cmd,
NL80211_CMD_SET_STATION: nl80211cmd,
NL80211_CMD_NEW_STATION: nl80211cmd,
NL80211_CMD_DEL_STATION: nl80211cmd,
NL80211_CMD_GET_MPATH: nl80211cmd,
NL80211_CMD_SET_MPATH: nl80211cmd,
NL80211_CMD_NEW_MPATH: nl80211cmd,
NL80211_CMD_DEL_MPATH: nl80211cmd,
NL80211_CMD_SET_BSS: nl80211cmd,
NL80211_CMD_SET_REG: nl80211cmd,
NL80211_CMD_REQ_SET_REG: nl80211cmd,
NL80211_CMD_GET_MESH_CONFIG: nl80211cmd,
NL80211_CMD_SET_MESH_CONFIG: nl80211cmd,
NL80211_CMD_SET_MGMT_EXTRA_IE: nl80211cmd,
NL80211_CMD_GET_REG: nl80211cmd,
NL80211_CMD_GET_SCAN: nl80211cmd,
NL80211_CMD_TRIGGER_SCAN: nl80211cmd,
NL80211_CMD_NEW_SCAN_RESULTS: nl80211cmd,
NL80211_CMD_SCAN_ABORTED: nl80211cmd,
NL80211_CMD_REG_CHANGE: nl80211cmd,
NL80211_CMD_AUTHENTICATE: nl80211cmd,
NL80211_CMD_ASSOCIATE: nl80211cmd,
NL80211_CMD_DEAUTHENTICATE: nl80211cmd,
NL80211_CMD_DISASSOCIATE: nl80211cmd,
NL80211_CMD_MICHAEL_MIC_FAILURE: nl80211cmd,
NL80211_CMD_REG_BEACON_HINT: nl80211cmd,
NL80211_CMD_JOIN_IBSS: nl80211cmd,
NL80211_CMD_LEAVE_IBSS: nl80211cmd,
NL80211_CMD_TESTMODE: nl80211cmd,
NL80211_CMD_CONNECT: nl80211cmd,
NL80211_CMD_ROAM: nl80211cmd,
NL80211_CMD_DISCONNECT: nl80211cmd,
NL80211_CMD_SET_WIPHY_NETNS: nl80211cmd,
NL80211_CMD_GET_SURVEY: nl80211cmd,
NL80211_CMD_NEW_SURVEY_RESULTS: nl80211cmd,
NL80211_CMD_SET_PMKSA: nl80211cmd,
NL80211_CMD_DEL_PMKSA: nl80211cmd,
NL80211_CMD_FLUSH_PMKSA: nl80211cmd,
NL80211_CMD_REMAIN_ON_CHANNEL: nl80211cmd,
NL80211_CMD_CANCEL_REMAIN_ON_CHANNEL: nl80211cmd,
NL80211_CMD_SET_TX_BITRATE_MASK: nl80211cmd,
NL80211_CMD_REGISTER_FRAME: nl80211cmd,
NL80211_CMD_REGISTER_ACTION: nl80211cmd,
NL80211_CMD_FRAME: nl80211cmd,
NL80211_CMD_ACTION: nl80211cmd,
NL80211_CMD_FRAME_TX_STATUS: nl80211cmd,
NL80211_CMD_ACTION_TX_STATUS: nl80211cmd,
NL80211_CMD_SET_POWER_SAVE: nl80211cmd,
NL80211_CMD_GET_POWER_SAVE: nl80211cmd,
NL80211_CMD_SET_CQM: nl80211cmd,
NL80211_CMD_NOTIFY_CQM: nl80211cmd,
NL80211_CMD_SET_CHANNEL: nl80211cmd,
NL80211_CMD_SET_WDS_PEER: nl80211cmd,
NL80211_CMD_FRAME_WAIT_CANCEL: nl80211cmd,
NL80211_CMD_JOIN_MESH: nl80211cmd,
NL80211_CMD_LEAVE_MESH: nl80211cmd,
NL80211_CMD_UNPROT_DEAUTHENTICATE: nl80211cmd,
NL80211_CMD_UNPROT_DISASSOCIATE: nl80211cmd,
NL80211_CMD_NEW_PEER_CANDIDATE: nl80211cmd,
NL80211_CMD_GET_WOWLAN: nl80211cmd,
NL80211_CMD_SET_WOWLAN: nl80211cmd,
NL80211_CMD_START_SCHED_SCAN: nl80211cmd,
NL80211_CMD_STOP_SCHED_SCAN: nl80211cmd,
NL80211_CMD_SCHED_SCAN_RESULTS: nl80211cmd,
NL80211_CMD_SCHED_SCAN_STOPPED: nl80211cmd,
NL80211_CMD_SET_REKEY_OFFLOAD: nl80211cmd,
NL80211_CMD_PMKSA_CANDIDATE: nl80211cmd,
NL80211_CMD_TDLS_OPER: nl80211cmd,
NL80211_CMD_TDLS_MGMT: nl80211cmd,
NL80211_CMD_UNEXPECTED_FRAME: nl80211cmd,
NL80211_CMD_PROBE_CLIENT: nl80211cmd,
NL80211_CMD_REGISTER_BEACONS: nl80211cmd,
NL80211_CMD_UNEXPECTED_4ADDR_FRAME: nl80211cmd,
NL80211_CMD_SET_NOACK_MAP: nl80211cmd,
NL80211_CMD_CH_SWITCH_NOTIFY: nl80211cmd,
NL80211_CMD_START_P2P_DEVICE: nl80211cmd,
NL80211_CMD_STOP_P2P_DEVICE: nl80211cmd,
NL80211_CMD_CONN_FAILED: nl80211cmd,
NL80211_CMD_SET_MCAST_RATE: nl80211cmd,
NL80211_CMD_SET_MAC_ACL: nl80211cmd,
NL80211_CMD_RADAR_DETECT: nl80211cmd,
NL80211_CMD_GET_PROTOCOL_FEATURES: nl80211cmd,
NL80211_CMD_UPDATE_FT_IES: nl80211cmd,
NL80211_CMD_FT_EVENT: nl80211cmd,
NL80211_CMD_CRIT_PROTOCOL_START: nl80211cmd,
NL80211_CMD_CRIT_PROTOCOL_STOP: nl80211cmd,
NL80211_CMD_GET_COALESCE: nl80211cmd,
NL80211_CMD_SET_COALESCE: nl80211cmd,
NL80211_CMD_CHANNEL_SWITCH: nl80211cmd,
NL80211_CMD_VENDOR: nl80211cmd,
NL80211_CMD_SET_QOS_MAP: nl80211cmd,
NL80211_CMD_ADD_TX_TS: nl80211cmd,
NL80211_CMD_DEL_TX_TS: nl80211cmd,
NL80211_CMD_GET_MPP: nl80211cmd,
NL80211_CMD_JOIN_OCB: nl80211cmd,
NL80211_CMD_LEAVE_OCB: nl80211cmd,
NL80211_CMD_CH_SWITCH_STARTED_NOTIFY: nl80211cmd,
NL80211_CMD_TDLS_CHANNEL_SWITCH: nl80211cmd,
NL80211_CMD_TDLS_CANCEL_CHANNEL_SWITCH: nl80211cmd,
NL80211_CMD_WIPHY_REG_CHANGE: nl80211cmd}
def fix_message(self, msg):
try:
msg['event'] = NL80211_VALUES[msg['cmd']]
except Exception:
pass
class NL80211(GenericNetlinkSocket):
def __init__(self):
GenericNetlinkSocket.__init__(self)
self.marshal = MarshalNl80211()
def bind(self, groups=0, async=False):
GenericNetlinkSocket.bind(self, 'nl80211', nl80211cmd,
groups, None, async)
| apache-2.0 | -3,365,363,244,516,462,000 | 43.118729 | 95 | 0.544479 | false |
wakiyamap/electrum-mona | electrum_mona/gui/qt/network_dialog.py | 1 | 20468 | #!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2012 thomasv@gitorious
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import socket
import time
from enum import IntEnum
from typing import Tuple, TYPE_CHECKING
import threading
from PyQt5.QtCore import Qt, pyqtSignal, QThread
from PyQt5.QtWidgets import (QTreeWidget, QTreeWidgetItem, QMenu, QGridLayout, QComboBox,
QLineEdit, QDialog, QVBoxLayout, QHeaderView, QCheckBox,
QTabWidget, QWidget, QLabel)
from PyQt5.QtGui import QFontMetrics
from electrum_mona.i18n import _
from electrum_mona import constants, blockchain, util
from electrum_mona.interface import ServerAddr, PREFERRED_NETWORK_PROTOCOL
from electrum_mona.network import Network
from electrum_mona.logging import get_logger
from .util import (Buttons, CloseButton, HelpButton, read_QIcon, char_width_in_lineedit,
PasswordLineEdit)
if TYPE_CHECKING:
from electrum_mona.simple_config import SimpleConfig
_logger = get_logger(__name__)
protocol_names = ['TCP', 'SSL']
protocol_letters = 'ts'
class NetworkDialog(QDialog):
def __init__(self, *, network: Network, config: 'SimpleConfig', network_updated_signal_obj):
QDialog.__init__(self)
self.setWindowTitle(_('Network'))
self.setMinimumSize(500, 500)
self.nlayout = NetworkChoiceLayout(network, config)
self.network_updated_signal_obj = network_updated_signal_obj
vbox = QVBoxLayout(self)
vbox.addLayout(self.nlayout.layout())
vbox.addLayout(Buttons(CloseButton(self)))
self.network_updated_signal_obj.network_updated_signal.connect(
self.on_update)
util.register_callback(self.on_network, ['network_updated'])
self._cleaned_up = False
def on_network(self, event, *args):
signal_obj = self.network_updated_signal_obj
if signal_obj:
signal_obj.network_updated_signal.emit(event, args)
def on_update(self):
self.nlayout.update()
def clean_up(self):
if self._cleaned_up:
return
self._cleaned_up = True
self.nlayout.clean_up()
self.network_updated_signal_obj.network_updated_signal.disconnect()
self.network_updated_signal_obj = None
class NodesListWidget(QTreeWidget):
"""List of connected servers."""
SERVER_ADDR_ROLE = Qt.UserRole + 100
CHAIN_ID_ROLE = Qt.UserRole + 101
ITEMTYPE_ROLE = Qt.UserRole + 102
class ItemType(IntEnum):
CHAIN = 0
CONNECTED_SERVER = 1
DISCONNECTED_SERVER = 2
TOPLEVEL = 3
def __init__(self, parent):
QTreeWidget.__init__(self)
self.parent = parent # type: NetworkChoiceLayout
self.setHeaderLabels([_('Server'), _('Height')])
self.setContextMenuPolicy(Qt.CustomContextMenu)
self.customContextMenuRequested.connect(self.create_menu)
def create_menu(self, position):
item = self.currentItem()
if not item:
return
item_type = item.data(0, self.ITEMTYPE_ROLE)
menu = QMenu()
if item_type == self.ItemType.CONNECTED_SERVER:
server = item.data(0, self.SERVER_ADDR_ROLE) # type: ServerAddr
menu.addAction(_("Use as server"), lambda: self.parent.follow_server(server))
elif item_type == self.ItemType.DISCONNECTED_SERVER:
server = item.data(0, self.SERVER_ADDR_ROLE) # type: ServerAddr
def func():
self.parent.server_e.setText(server.net_addr_str())
self.parent.set_server()
menu.addAction(_("Use as server"), func)
elif item_type == self.ItemType.CHAIN:
chain_id = item.data(0, self.CHAIN_ID_ROLE)
menu.addAction(_("Follow this branch"), lambda: self.parent.follow_branch(chain_id))
else:
return
menu.exec_(self.viewport().mapToGlobal(position))
def keyPressEvent(self, event):
if event.key() in [Qt.Key_F2, Qt.Key_Return, Qt.Key_Enter]:
self.on_activated(self.currentItem(), self.currentColumn())
else:
QTreeWidget.keyPressEvent(self, event)
def on_activated(self, item, column):
# on 'enter' we show the menu
pt = self.visualItemRect(item).bottomLeft()
pt.setX(50)
self.customContextMenuRequested.emit(pt)
def update(self, *, network: Network, servers: dict, use_tor: bool):
self.clear()
# connected servers
connected_servers_item = QTreeWidgetItem([_("Connected nodes"), ''])
connected_servers_item.setData(0, self.ITEMTYPE_ROLE, self.ItemType.TOPLEVEL)
chains = network.get_blockchains()
n_chains = len(chains)
for chain_id, interfaces in chains.items():
b = blockchain.blockchains.get(chain_id)
if b is None: continue
name = b.get_name()
if n_chains > 1:
x = QTreeWidgetItem([name + '@%d'%b.get_max_forkpoint(), '%d'%b.height()])
x.setData(0, self.ITEMTYPE_ROLE, self.ItemType.CHAIN)
x.setData(0, self.CHAIN_ID_ROLE, b.get_id())
else:
x = connected_servers_item
for i in interfaces:
star = ' *' if i == network.interface else ''
item = QTreeWidgetItem([f"{i.server.to_friendly_name()}" + star, '%d'%i.tip])
item.setData(0, self.ITEMTYPE_ROLE, self.ItemType.CONNECTED_SERVER)
item.setData(0, self.SERVER_ADDR_ROLE, i.server)
item.setToolTip(0, str(i.server))
x.addChild(item)
if n_chains > 1:
connected_servers_item.addChild(x)
# disconnected servers
disconnected_servers_item = QTreeWidgetItem([_("Other known servers"), ""])
disconnected_servers_item.setData(0, self.ITEMTYPE_ROLE, self.ItemType.TOPLEVEL)
connected_hosts = set([iface.host for ifaces in chains.values() for iface in ifaces])
protocol = PREFERRED_NETWORK_PROTOCOL
for _host, d in sorted(servers.items()):
if _host in connected_hosts:
continue
if _host.endswith('.onion') and not use_tor:
continue
port = d.get(protocol)
if port:
server = ServerAddr(_host, port, protocol=protocol)
item = QTreeWidgetItem([server.net_addr_str(), ""])
item.setData(0, self.ITEMTYPE_ROLE, self.ItemType.DISCONNECTED_SERVER)
item.setData(0, self.SERVER_ADDR_ROLE, server)
disconnected_servers_item.addChild(item)
self.addTopLevelItem(connected_servers_item)
self.addTopLevelItem(disconnected_servers_item)
connected_servers_item.setExpanded(True)
for i in range(connected_servers_item.childCount()):
connected_servers_item.child(i).setExpanded(True)
disconnected_servers_item.setExpanded(True)
# headers
h = self.header()
h.setStretchLastSection(False)
h.setSectionResizeMode(0, QHeaderView.Stretch)
h.setSectionResizeMode(1, QHeaderView.ResizeToContents)
super().update()
class NetworkChoiceLayout(object):
def __init__(self, network: Network, config: 'SimpleConfig', wizard=False):
self.network = network
self.config = config
self.tor_proxy = None
self.tabs = tabs = QTabWidget()
proxy_tab = QWidget()
blockchain_tab = QWidget()
tabs.addTab(blockchain_tab, _('Overview'))
tabs.addTab(proxy_tab, _('Proxy'))
fixed_width_hostname = 24 * char_width_in_lineedit()
fixed_width_port = 6 * char_width_in_lineedit()
# Proxy tab
grid = QGridLayout(proxy_tab)
grid.setSpacing(8)
# proxy setting
self.proxy_cb = QCheckBox(_('Use proxy'))
self.proxy_cb.clicked.connect(self.check_disable_proxy)
self.proxy_cb.clicked.connect(self.set_proxy)
self.proxy_mode = QComboBox()
self.proxy_mode.addItems(['SOCKS4', 'SOCKS5'])
self.proxy_host = QLineEdit()
self.proxy_host.setFixedWidth(fixed_width_hostname)
self.proxy_port = QLineEdit()
self.proxy_port.setFixedWidth(fixed_width_port)
self.proxy_user = QLineEdit()
self.proxy_user.setPlaceholderText(_("Proxy user"))
self.proxy_password = PasswordLineEdit()
self.proxy_password.setPlaceholderText(_("Password"))
self.proxy_password.setFixedWidth(fixed_width_port)
self.proxy_mode.currentIndexChanged.connect(self.set_proxy)
self.proxy_host.editingFinished.connect(self.set_proxy)
self.proxy_port.editingFinished.connect(self.set_proxy)
self.proxy_user.editingFinished.connect(self.set_proxy)
self.proxy_password.editingFinished.connect(self.set_proxy)
self.proxy_mode.currentIndexChanged.connect(self.proxy_settings_changed)
self.proxy_host.textEdited.connect(self.proxy_settings_changed)
self.proxy_port.textEdited.connect(self.proxy_settings_changed)
self.proxy_user.textEdited.connect(self.proxy_settings_changed)
self.proxy_password.textEdited.connect(self.proxy_settings_changed)
self.tor_cb = QCheckBox(_("Use Tor Proxy"))
self.tor_cb.setIcon(read_QIcon("tor_logo.png"))
self.tor_cb.hide()
self.tor_cb.clicked.connect(self.use_tor_proxy)
grid.addWidget(self.tor_cb, 1, 0, 1, 3)
grid.addWidget(self.proxy_cb, 2, 0, 1, 3)
grid.addWidget(HelpButton(_('Proxy settings apply to all connections: with Electrum servers, but also with third-party services.')), 2, 4)
grid.addWidget(self.proxy_mode, 4, 1)
grid.addWidget(self.proxy_host, 4, 2)
grid.addWidget(self.proxy_port, 4, 3)
grid.addWidget(self.proxy_user, 5, 2)
grid.addWidget(self.proxy_password, 5, 3)
grid.setRowStretch(7, 1)
# Blockchain Tab
grid = QGridLayout(blockchain_tab)
msg = ' '.join([
_("Electrum connects to several nodes in order to download block headers and find out the longest blockchain."),
_("This blockchain is used to verify the transactions sent by your transaction server.")
])
self.status_label = QLabel('')
grid.addWidget(QLabel(_('Status') + ':'), 0, 0)
grid.addWidget(self.status_label, 0, 1, 1, 3)
grid.addWidget(HelpButton(msg), 0, 4)
self.autoconnect_cb = QCheckBox(_('Select server automatically'))
self.autoconnect_cb.setEnabled(self.config.is_modifiable('auto_connect'))
self.autoconnect_cb.clicked.connect(self.set_server)
self.autoconnect_cb.clicked.connect(self.update)
msg = ' '.join([
_("If auto-connect is enabled, Electrum will always use a server that is on the longest blockchain."),
_("If it is disabled, you have to choose a server you want to use. Electrum will warn you if your server is lagging.")
])
grid.addWidget(self.autoconnect_cb, 1, 0, 1, 3)
grid.addWidget(HelpButton(msg), 1, 4)
self.server_e = QLineEdit()
self.server_e.setFixedWidth(fixed_width_hostname + fixed_width_port)
self.server_e.editingFinished.connect(self.set_server)
msg = _("Electrum sends your wallet addresses to a single server, in order to receive your transaction history.")
grid.addWidget(QLabel(_('Server') + ':'), 2, 0)
grid.addWidget(self.server_e, 2, 1, 1, 3)
grid.addWidget(HelpButton(msg), 2, 4)
self.height_label = QLabel('')
msg = _('This is the height of your local copy of the blockchain.')
grid.addWidget(QLabel(_('Blockchain') + ':'), 3, 0)
grid.addWidget(self.height_label, 3, 1)
grid.addWidget(HelpButton(msg), 3, 4)
self.split_label = QLabel('')
grid.addWidget(self.split_label, 4, 0, 1, 3)
self.nodes_list_widget = NodesListWidget(self)
grid.addWidget(self.nodes_list_widget, 6, 0, 1, 5)
vbox = QVBoxLayout()
vbox.addWidget(tabs)
self.layout_ = vbox
# tor detector
self.td = td = TorDetector()
td.found_proxy.connect(self.suggest_proxy)
td.start()
self.fill_in_proxy_settings()
self.update()
def clean_up(self):
if self.td:
self.td.found_proxy.disconnect()
self.td.stop()
self.td = None
def check_disable_proxy(self, b):
if not self.config.is_modifiable('proxy'):
b = False
for w in [self.proxy_mode, self.proxy_host, self.proxy_port, self.proxy_user, self.proxy_password]:
w.setEnabled(b)
def enable_set_server(self):
if self.config.is_modifiable('server'):
enabled = not self.autoconnect_cb.isChecked()
self.server_e.setEnabled(enabled)
else:
for w in [self.autoconnect_cb, self.server_e, self.nodes_list_widget]:
w.setEnabled(False)
def update(self):
net_params = self.network.get_parameters()
server = net_params.server
auto_connect = net_params.auto_connect
if not self.server_e.hasFocus():
self.server_e.setText(server.to_friendly_name())
self.autoconnect_cb.setChecked(auto_connect)
height_str = "%d "%(self.network.get_local_height()) + _('blocks')
self.height_label.setText(height_str)
n = len(self.network.get_interfaces())
status = _("Connected to {0} nodes.").format(n) if n > 1 else _("Connected to {0} node.").format(n) if n == 1 else _("Not connected")
self.status_label.setText(status)
chains = self.network.get_blockchains()
if len(chains) > 1:
chain = self.network.blockchain()
forkpoint = chain.get_max_forkpoint()
name = chain.get_name()
msg = _('Chain split detected at block {0}').format(forkpoint) + '\n'
msg += (_('You are following branch') if auto_connect else _('Your server is on branch'))+ ' ' + name
msg += ' (%d %s)' % (chain.get_branch_size(), _('blocks'))
else:
msg = ''
self.split_label.setText(msg)
self.nodes_list_widget.update(network=self.network,
servers=self.network.get_servers(),
use_tor=self.tor_cb.isChecked())
self.enable_set_server()
def fill_in_proxy_settings(self):
proxy_config = self.network.get_parameters().proxy
if not proxy_config:
proxy_config = {"mode": "none", "host": "localhost", "port": "9050"}
b = proxy_config.get('mode') != "none"
self.check_disable_proxy(b)
if b:
self.proxy_cb.setChecked(True)
self.proxy_mode.setCurrentIndex(
self.proxy_mode.findText(str(proxy_config.get("mode").upper())))
self.proxy_host.setText(proxy_config.get("host"))
self.proxy_port.setText(proxy_config.get("port"))
self.proxy_user.setText(proxy_config.get("user", ""))
self.proxy_password.setText(proxy_config.get("password", ""))
def layout(self):
return self.layout_
def follow_branch(self, chain_id):
self.network.run_from_another_thread(self.network.follow_chain_given_id(chain_id))
self.update()
def follow_server(self, server: ServerAddr):
self.network.run_from_another_thread(self.network.follow_chain_given_server(server))
self.update()
def accept(self):
pass
def set_server(self):
net_params = self.network.get_parameters()
try:
server = ServerAddr.from_str_with_inference(str(self.server_e.text()))
if not server: raise Exception("failed to parse")
except Exception:
return
net_params = net_params._replace(server=server,
auto_connect=self.autoconnect_cb.isChecked())
self.network.run_from_another_thread(self.network.set_parameters(net_params))
def set_proxy(self):
net_params = self.network.get_parameters()
if self.proxy_cb.isChecked():
proxy = {'mode':str(self.proxy_mode.currentText()).lower(),
'host':str(self.proxy_host.text()),
'port':str(self.proxy_port.text()),
'user':str(self.proxy_user.text()),
'password':str(self.proxy_password.text())}
else:
proxy = None
self.tor_cb.setChecked(False)
net_params = net_params._replace(proxy=proxy)
self.network.run_from_another_thread(self.network.set_parameters(net_params))
def suggest_proxy(self, found_proxy):
if found_proxy is None:
self.tor_cb.hide()
return
self.tor_proxy = found_proxy
self.tor_cb.setText("Use Tor proxy at port " + str(found_proxy[1]))
if (self.proxy_cb.isChecked()
and self.proxy_mode.currentIndex() == self.proxy_mode.findText('SOCKS5')
and self.proxy_host.text() == "127.0.0.1"
and self.proxy_port.text() == str(found_proxy[1])):
self.tor_cb.setChecked(True)
self.tor_cb.show()
def use_tor_proxy(self, use_it):
if not use_it:
self.proxy_cb.setChecked(False)
else:
socks5_mode_index = self.proxy_mode.findText('SOCKS5')
if socks5_mode_index == -1:
_logger.info("can't find proxy_mode 'SOCKS5'")
return
self.proxy_mode.setCurrentIndex(socks5_mode_index)
self.proxy_host.setText("127.0.0.1")
self.proxy_port.setText(str(self.tor_proxy[1]))
self.proxy_user.setText("")
self.proxy_password.setText("")
self.tor_cb.setChecked(True)
self.proxy_cb.setChecked(True)
self.check_disable_proxy(use_it)
self.set_proxy()
def proxy_settings_changed(self):
self.tor_cb.setChecked(False)
class TorDetector(QThread):
found_proxy = pyqtSignal(object)
def __init__(self):
QThread.__init__(self)
self._stop_event = threading.Event()
def run(self):
# Probable ports for Tor to listen at
ports = [9050, 9150]
while True:
for p in ports:
net_addr = ("127.0.0.1", p)
if TorDetector.is_tor_port(net_addr):
self.found_proxy.emit(net_addr)
break
else:
self.found_proxy.emit(None)
stopping = self._stop_event.wait(10)
if stopping:
return
def stop(self):
self._stop_event.set()
self.exit()
self.wait()
@staticmethod
def is_tor_port(net_addr: Tuple[str, int]) -> bool:
try:
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.settimeout(0.1)
s.connect(net_addr)
# Tor responds uniquely to HTTP-like requests
s.send(b"GET\n")
if b"Tor is not an HTTP Proxy" in s.recv(1024):
return True
except socket.error:
pass
return False
| mit | 6,902,632,499,620,850,000 | 39.854291 | 146 | 0.61325 | false |
arksu/a2client | etc/blender_exporter/export_a1.py | 1 | 20410 | # coding=utf-8
import struct
import bpy
import bmesh
from bpy_extras.io_utils import ExportHelper
import mathutils
import os.path
import math
#===========================================================================
# Custom exception class
#===========================================================================
class Error( Exception ):
def __init__(self, message):
self.message = message
bind_pose = dict()
def run(fpath, markerFilter, scaleFactor, initFrame, do_mesh, do_skeleton, do_anims):
print ("start a1 export...")
file = open(fpath, 'bw')
# set 0 frame for take skeleton pos
goBack = bpy.context.scene.frame_current
bpy.context.scene.frame_set(initFrame)
# try export by udk exporter
arm, mesh = find_armature_and_mesh()
if (do_mesh):
file.write(struct.pack('<b', 1))
udk_parse_mesh(mesh, file)
else:
file.write(struct.pack('<b', 0))
correctionMatrix = mathutils.Matrix()
if (len(bpy.data.armatures) > 0 and do_skeleton):
armature = bpy.data.armatures[arm.name]
file.write(struct.pack('<b', 1)) # skeleton flag
write_skeleton(file, armature, correctionMatrix)
if (do_anims):
file.write(struct.pack('<b', 1)) # anim flag
write_all_anims(file, markerFilter, fpath, arm)
else:
file.write(struct.pack('<b', 0)) # anim flag
else:
file.write(struct.pack('<b', 0)) # skeleton flag
file.write(struct.pack('<b', 0)) # anim flag
file.close()
print ("a1 export done")
bpy.context.scene.frame_set(goBack)
return {'FINISHED'}
def udk_parse_mesh(mesh, file):
option_clamp_uv = True
#bpy.ops.object.mode_set(mode='OBJECT')
#error ? on commands for select object?
print("Mesh object:", mesh.name)
scene = bpy.context.scene
for i in scene.objects: i.select = False # deselect all objects
scene.objects.active = mesh
setmesh = mesh
mesh = triangulate_mesh(mesh)
#bpy.context.scene.objects.unlink(setmesh)
print("FACES----:",len(mesh.data.tessfaces))
discarded_face_count = 0
vertex_groups = mesh.vertex_groups
write_string(file, "a1mesh")
file.write(struct.pack('<I', len(mesh.data.tessfaces)))
if (mesh.parent):
matrix = mesh.parent.matrix_world * mesh.matrix_local
else:
matrix = mesh.matrix_local
for face in mesh.data.tessfaces:
if len(face.vertices) != 3:
raise Error("Non-triangular face (%i)" % len(face.vertices))
if not is_1d_face(face, mesh.data):
face_index = face.index
has_uv = False
face_uv = None
if len(mesh.data.uv_textures) > 0:
has_uv = True
uv_layer = mesh.data.tessface_uv_textures.active
face_uv = uv_layer.data[face_index]
#size(data) is number of texture faces. Each face has UVs
#print("DATA face uv: ",len(faceUV.uv), " >> ",(faceUV.uv[0][0]))
for i in range(3):
vert_index = face.vertices[i]
vert = mesh.data.vertices[vert_index]
#assumes 3 UVs Per face (for now)
if (has_uv):
if len(face_uv.uv) != 3:
print("WARNING: face has more or less than 3 UV coordinates - writing 0,0...")
uv = [0.0, 0.0]
else:
uv = [face_uv.uv[i][0],face_uv.uv[i][1]] #OR bottom works better # 24 for cube
else:
#print ("No UVs?")
uv = [0.0, 0.0]
#flip V coordinate because UEd requires it and DOESN'T flip it on its own like it
#does with the mesh Y coordinates. this is otherwise known as MAGIC-2
uv[1] = 1.0 - uv[1]
# clamp UV coords if udk_option_clamp_uv is True
if option_clamp_uv:
if (uv[0] > 1):
uv[0] = 1
if (uv[0] < 0):
uv[0] = 0
if (uv[1] > 1):
uv[1] = 1
if (uv[1] < 0):
uv[1] = 0
#matrix = mathutils.Matrix()
co = matrix * vert.co
# no = mesh.matrix_local * vert.normal
no = vert.normal
no.normalize()
file.write(struct.pack('<fff', co[0], co[1], co[2]))
file.write(struct.pack('<fff', no[0], no[1], no[2]))
#weight_layer = False
if (len(vert.groups) > 0):
file.write(struct.pack('<H', len(vert.groups)))
for vgroup in vert.groups:
wg = vertex_groups[vgroup.group]
vertex_weight = vgroup.weight
wname = wg.name
write_string(file, wname)
file.write(struct.pack('<f', vertex_weight))
else:
# no weight data
file.write(struct.pack('<H', 0))
file.write(struct.pack('<ff', uv[0], uv[1]))
#END if not is_1d_face(current_face, mesh.data)
else:
discarded_face_count += 1
print ("discarded_face_count ", discarded_face_count)
bpy.ops.object.mode_set(mode='OBJECT') # OBJECT mode
mesh.parent = None # unparent to avoid phantom links
bpy.context.scene.objects.unlink(mesh) # unlink
# arm - armature object
def write_skeleton(file, armature, correctionMatrix):
# global orientationTweak
print("save skeleton...")
armature_obj = bpy.data.objects[armature.name]
arm_mw = armature_obj.matrix_world
bones = armature.bones
if not bones:
print("no bones for skeleton")
return
abandonedBones = [i for i in bones
if i.parent and i.parent not in bones[:]]
if abandonedBones:
boneList = []
for ab in abandonedBones:
boneList.append("- " + str(ab.name))
print ("bones missing parents : ", boneList)
print ("bones count: ", len(bones))
# header
write_string(file, "a1skeleton")
# count
file.write(struct.pack('<H', len(bones)))
# data
for b in bones:
if not b.use_deform:
print ("not deformable bone!: ", b.name)
write_skip(file, True)
continue
write_skip(file, False)
bone_parent = b.parent
while bone_parent:
if bone_parent.use_deform:
break
bone_parent = bone_parent.parent
if bone_parent:
pn = bone_parent.name
else:
pn = ''
mw = arm_mw * b.matrix_local # точно
if bone_parent:
ml = bone_parent.matrix_local.inverted() * b.matrix_local
else:
ml = mw
# mw = get_mw(b)
# ml = correctionMatrix * b.matrix_local
# print ("m local : ", ml)
# print ("m world : ", mw)
# print ("parent", pn)
# print ("name: ", b.name, "---------")
write_string(file, b.name)
write_string(file, pn)
write_matrix(file, mw.inverted()) # bind
# inverted = boneMatrix.inverted()
write_matrix(file, ml) # frame
bind_pose[b.name] = ml
print("skeleton saved")
def write_all_anims(file, markerFilter, filePath, arm):
ranges = get_ranges(markerFilter)
print ("ranges : ", ranges)
if ranges:
file.write(struct.pack('<H', len(ranges)))
for r in ranges.keys():
# folder = os.path.dirname(filePath)
# animFile = os.path.join(folder, r + ".a1anim")
write_anim(file, r, ranges[r], arm)
else:
file.write(struct.pack('<H', 1))
write_anim(file, None, None, arm)
# baseFilePathEnd = filePath.rfind(".md5mesh")
# if baseFilePathEnd == -1:
# animFilePath = filePath + ".md5anim"
# else:
# animFilePath = filePath[:baseFilePathEnd] + ".md5anim"
# write_md5anim(animFilePath, prerequisites, correctionMatrix, None)
# return {'FINISHED'}
def write_anim(file, Name, frameRange, armature):
# global orientationTweak
print ("save animation... name: ", Name, " range: ", frameRange)
write_string(file, "a1anim")
if frameRange == None:
startFrame = bpy.context.scene.frame_start
endFrame = bpy.context.scene.frame_end
else:
startFrame, endFrame = frameRange
#armature = bpy.context.object.find_armature()
#armature = bpy.data.armatures[0]
bones = armature.data.bones
armObj = [o for o in bpy.data.objects if o.data == bones[0].id_data][0]
pBones = armObj.pose.bones
print ("arm :", armObj , " pbones: ", pBones)
# anim name
if Name:
write_string(file, Name)
else:
write_string(file, '')
# frames count
fcount = endFrame - startFrame + 1
file.write(struct.pack('<H', fcount))
fps = bpy.context.scene.render.fps
file.write(struct.pack('<H', fps))
# bones names
file.write(struct.pack('<H', len(bones)))
for b in bones:
write_string(file, b.name)
# print ("orientationTweak ", orientationTweak)
# frames
print ("process frames...")
for frame in range(startFrame, endFrame + 1):
bpy.context.scene.frame_set(frame)
#print("set frame ", frame)
for b in bones:
if not b.use_deform:
write_skip(file, True)
continue
write_skip(file, False)
pBone = pBones[b.name]
bone_parent = pBone.parent
while bone_parent:
if bone_parent.bone.use_deform:
break
bone_parent = bone_parent.parent
pBoneMatrix = pBone.matrix
if bone_parent:
diffMatrix = bone_parent.matrix.inverted() * (pBoneMatrix)
else:
diffMatrix = armObj.matrix_world * pBoneMatrix
# diffMatrix = orientationTweak * diffMatrix
# print ("bind_pose ", b.name, "=", bind_pose[b.name])
# print ("frame matrix=", diffMatrix)
# одинаковые матрицы. запишем флаг для пропуска этой кости по флагам
# if cmp_matrix(bind_pose[b.name], diffMatrix):
# print("equal matrix ", b.name)
# write_skip(file, True)
# else:
# write_skip(file, False)
write_matrix(file, diffMatrix)
print ("animation saved")
pass
def get_mw(bone):
ml = bone.matrix_local
if (bone.parent):
ml = get_mw(bone.parent) * ml
# else:
# ml = bpy.data.objects['Armature'].matrix_world * ml
return ml
def triangulate(bm):
while True:
nonTris = [f for f in bm.faces if len(f.verts) > 3]
if nonTris:
nt = nonTris[0]
pivotLoop = nt.loops[0]
allVerts = nt.verts
vert1 = pivotLoop.vert
wrongVerts = [vert1,
pivotLoop.link_loop_next.vert,
pivotLoop.link_loop_prev.vert]
bmesh.utils.face_split(nt, vert1, [v for v in allVerts
if v not in wrongVerts][0])
for seq in [bm.verts, bm.faces, bm.edges]: seq.index_update()
else:
break
return bm
def write_string(file, str):
l = len(str)
file.write(struct.pack('<H', l))
file.write(bytearray(str, 'ascii'))
def write_matrix(file, m):
# transpose in converter
file.write(struct.pack('<ffff', m[0][0], m[0][1], m[0][2], m[0][3]))
file.write(struct.pack('<ffff', m[1][0], m[1][1], m[1][2], m[1][3]))
file.write(struct.pack('<ffff', m[2][0], m[2][1], m[2][2], m[2][3]))
file.write(struct.pack('<ffff', m[3][0], m[3][1], m[3][2], m[3][3]))
def eps_num(n1, n2):
return (n1-n2) < 0.00001
def cmp_matrix(m1, m2):
if \
eps_num(m1[0][0], m2[0][0]) and eps_num(m1[0][1], m2[0][1]) and eps_num(m1[0][2], m2[0][2]) and eps_num(m1[0][3], m2[0][3]) and \
eps_num(m1[1][0], m2[1][0]) and eps_num(m1[1][1], m2[1][1]) and eps_num(m1[1][2], m2[1][2]) and eps_num(m1[1][3], m2[1][3]) and \
eps_num(m1[2][0], m2[2][0]) and eps_num(m1[2][1], m2[2][1]) and eps_num(m1[2][2], m2[2][2]) and eps_num(m1[2][3], m2[2][3]) and \
eps_num(m1[3][0], m2[3][0]) and eps_num(m1[3][1], m2[3][1]) and eps_num(m1[3][2], m2[3][2]) and eps_num(m1[3][3], m2[3][3]):
return True
else:
return False
def write_skip(file, skip):
if skip:
file.write(struct.pack('<b', 1))
else:
file.write(struct.pack('<b', 0))
def get_ranges(markerFilter):
markers = bpy.context.scene.timeline_markers
starts = [m for m in markers if
m.name.startswith(markerFilter)
and m.name.endswith("_start", 2)]
ends = [m for m in markers if
m.name.startswith(markerFilter)
and m.name.endswith("_end", 2)]
if not starts or not ends:
return None
else:
return find_matches(starts, ends)
def find_matches(starts, ends):
pairs = {}
for s in starts:
basename = s.name[:s.name.rfind("_start")]
matches = [e for e in ends if
e.name[:e.name.rfind("_end")] == basename]
if matches:
m = matches[0]
pairs[basename] = (min(s.frame, m.frame), max(s.frame, m.frame))
return pairs
#===========================================================================
# Locate the target armature and mesh for export
# RETURNS armature, mesh
#===========================================================================
def find_armature_and_mesh():
print ("find_armature_and_mesh")
context = bpy.context
active_object = context.active_object
armature = None
mesh = None
# TODO:
# this could be more intuitive
bpy.ops.object.mode_set(mode='OBJECT')
# try the active object
if active_object and active_object.type == 'ARMATURE':
armature = active_object
# otherwise, try for a single armature in the scene
else:
all_armatures = [obj for obj in context.scene.objects if obj.type == 'ARMATURE']
if len(all_armatures) == 1:
armature = all_armatures[0]
elif len(all_armatures) > 1:
raise Error("Please select an armature in the scene")
else:
raise Error("No armatures in scene")
print ("Found armature: ", armature.name, " ", armature)
meshselected = []
parented_meshes = [obj for obj in armature.children if obj.type == 'MESH']
for obj in armature.children:
#print(dir(obj))
if obj.type == 'MESH' and obj.select == True:
meshselected.append(obj)
# try the active object
if active_object and active_object.type == 'MESH' and len(meshselected) == 0:
if active_object.parent == armature:
mesh = active_object
else:
raise Error("The selected mesh is not parented to the armature")
# otherwise, expect a single mesh parented to the armature (other object types are ignored)
else:
print("Number of meshes:",len(parented_meshes))
print("Number of meshes (selected):",len(meshselected))
if len(parented_meshes) == 1:
mesh = parented_meshes[0]
elif len(parented_meshes) > 1:
if len(meshselected) >= 1:
mesh = sortmesh(meshselected)
else:
raise Error("More than one mesh(s) parented to armature. Select object(s)!")
else:
raise Error("No mesh parented to armature")
print ("Found mesh: " +mesh.name, " ", mesh)
# if len(armature.pose.bones) == len(mesh.vertex_groups):
# print("Armature and Mesh Vertex Groups matches Ok!")
# else:
# raise Error("Armature bones:" + str(len(armature.pose.bones)) + " Mesh Vertex Groups:" + str(len(mesh.vertex_groups)) +" doesn't match!")
return armature, mesh
#copy mesh data and then merge them into one object
def meshmerge(selectedobjects):
bpy.ops.object.mode_set(mode='OBJECT')
cloneobjects = []
if len(selectedobjects) > 1:
print("selectedobjects:",len(selectedobjects))
count = 0 #reset count
for count in range(len( selectedobjects)):
#print("Index:",count)
if selectedobjects[count] != None:
me_da = selectedobjects[count].data.copy() #copy data
me_ob = selectedobjects[count].copy() #copy object
#note two copy two types else it will use the current data or mesh
me_ob.data = me_da
bpy.context.scene.objects.link(me_ob)#link the object to the scene #current object location
print("Index:",count,"clone object",me_ob.name)
cloneobjects.append(me_ob)
#bpy.ops.object.mode_set(mode='OBJECT')
for i in bpy.data.objects: i.select = False #deselect all objects
count = 0 #reset count
#bpy.ops.object.mode_set(mode='OBJECT')
for count in range(len( cloneobjects)):
if count == 0:
bpy.context.scene.objects.active = cloneobjects[count]
print("Set Active Object:",cloneobjects[count].name)
cloneobjects[count].select = True
bpy.ops.object.join()
if len(cloneobjects) > 1:
bpy.types.Scene.udk_copy_merge = True
return cloneobjects[0]
#sort the mesh center top list and not center at the last array. Base on order while select to merge mesh to make them center.
def sortmesh(selectmesh):
print("MESH SORTING...")
centermesh = []
notcentermesh = []
for countm in range(len(selectmesh)):
if selectmesh[countm].location.x == 0 and selectmesh[countm].location.y == 0 and selectmesh[countm].location.z == 0:
centermesh.append(selectmesh[countm])
else:
notcentermesh.append(selectmesh[countm])
selectmesh = []
for countm in range(len(centermesh)):
selectmesh.append(centermesh[countm])
for countm in range(len(notcentermesh)):
selectmesh.append(notcentermesh[countm])
if len(selectmesh) == 1:
return selectmesh[0]
else:
return meshmerge(selectmesh)
#===========================================================================
# http://en.wikibooks.org/wiki/Blender_3D:_Blending_Into_Python/Cookbook#Triangulate_NMesh
# blender 2.50 format using the Operators/command convert the mesh to tri mesh
#===========================================================================
def triangulate_mesh( object ):
print("triangulateNMesh")
#print(type(object))
scene = bpy.context.scene
me_ob = object.copy()
me_ob.data = object.to_mesh(bpy.context.scene, True, 'PREVIEW') #write data object
bpy.context.scene.objects.link(me_ob)
bpy.context.scene.update()
bpy.ops.object.mode_set(mode='OBJECT')
for i in scene.objects:
i.select = False # deselect all objects
me_ob.select = True
scene.objects.active = me_ob
print("Copy and Convert mesh just incase any way...")
bpy.ops.object.mode_set(mode='EDIT')
bpy.ops.mesh.select_all(action='SELECT')# select all the face/vertex/edge
bpy.ops.object.mode_set(mode='EDIT')
bpy.ops.mesh.quads_convert_to_tris()
bpy.context.scene.update()
bpy.ops.object.mode_set(mode='OBJECT')
print("Triangulated mesh")
me_ob.data = me_ob.to_mesh(bpy.context.scene, True, 'PREVIEW') #write data object
bpy.context.scene.update()
return me_ob
#===========================================================================
#RG - check to make sure face isnt a line
#===========================================================================
def is_1d_face( face, mesh ):
#ID Vertex of id point
v0 = face.vertices[0]
v1 = face.vertices[1]
v2 = face.vertices[2]
return (mesh.vertices[v0].co == mesh.vertices[v1].co\
or mesh.vertices[v1].co == mesh.vertices[v2].co\
or mesh.vertices[v2].co == mesh.vertices[v0].co)
return False | gpl-3.0 | -2,436,140,333,795,629,600 | 32.747927 | 146 | 0.549069 | false |
stormi/tsunami | src/primaires/salle/editeurs/redit/edt_repos.py | 1 | 5551 | # -*-coding:Utf-8 -*
# Copyright (c) 2010 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Ce fichier définit le contexte-éditeur 'edt_repos'."""
from primaires.interpreteur.editeur import Editeur
from primaires.format.fonctions import format_nb
class EdtRepos(Editeur):
"""Ce contexte permet d'éditer la sous-catégorie 'repos' d'un détail.
"""
def __init__(self, pere, objet=None, attribut=None):
"""Constructeur de l'éditeur."""
Editeur.__init__(self, pere, objet, attribut)
self.ajouter_option("s", self.opt_asseoir)
self.ajouter_option("l", self.opt_allonger)
self.ajouter_option("c", self.opt_connecteur)
def accueil(self):
"""Message d'accueil du contexte"""
detail = self.objet
msg = "| |tit|" + "Edition du détail '{}'".format(detail).ljust(76)
msg += "|ff||\n" + self.opts.separateur + "\n"
msg += self.aide_courte
msg += format_nb(detail.nb_places_assises,
"{nb} place{s} assise{s} ", fem=True)
msg += "(récupération : {}).\n".format(detail.facteur_asseoir)
msg += format_nb(detail.nb_places_allongees,
"{nb} place{s} allongée{s} ", fem=True)
msg += "(récupération : {}).\n".format(detail.facteur_allonger)
msg += "Connecteur : |ent|" + detail.connecteur + "|ff|\n"
return msg
def opt_asseoir(self, arguments):
"""Option asseoir.
Syntaxe : /s <nb> (<facteur>)
"""
detail = self.objet
if not arguments:
self.pere << "|err|Précisez au moins un nombre de places.|ff|"
return
nb_places = facteur = 0
try:
nb_places, facteur = arguments.split(" ")
except ValueError:
try:
nb_places = int(arguments.split(" ")[0])
assert nb_places >= 0
except (ValueError, AssertionError):
self.pere << "|err|Précisez un nombre valide et positif.|ff|"
return
try:
nb_places = int(nb_places)
facteur = float(facteur)
except ValueError:
self.pere << "|err|Précisez des nombres valides.|ff|"
return
if nb_places:
detail.peut_asseoir = True
detail.nb_places_assises = nb_places
else:
detail.peut_asseoir = False
detail.nb_places_assises = 0
if facteur:
detail.facteur_asseoir = facteur
self.actualiser()
def opt_allonger(self, arguments):
"""Option allonger.
Syntaxe : /l <nb> (<facteur>)
"""
detail = self.objet
if not arguments:
self.pere << "|err|Précisez au moins un nombre de places.|ff|"
return
nb_places = facteur = 0
try:
nb_places, facteur = arguments.split(" ")
except ValueError:
try:
nb_places = int(arguments.split(" ")[0])
assert nb_places >= 0
except (ValueError, AssertionError):
self.pere << "|err|Précisez un nombre valide et positif.|ff|"
return
try:
nb_places = int(nb_places)
facteur = float(facteur)
except ValueError:
self.pere << "|err|Précisez des nombres valides.|ff|"
return
if nb_places:
detail.peut_allonger = True
detail.nb_places_allongees = nb_places
else:
detail.peut_allonger = False
detail.nb_places_allongees = 0
if facteur:
detail.facteur_allonger = facteur
self.actualiser()
def opt_connecteur(self, arguments):
"""Option connecteur.
Syntaxe : /c <connecteur>
"""
detail = self.objet
if not arguments:
self.pere << "|err|Précisez un connecteur.|ff|"
return
detail.connecteur = arguments
self.actualiser()
| bsd-3-clause | -8,722,783,594,387,688,000 | 37.151724 | 79 | 0.601952 | false |
MSLNZ/msl-qt | tests/test_utils.py | 1 | 1695 | from msl.qt import utils, QtCore, QtWidgets, Qt, QtGui
def test_screen_geometry():
# just check that these don't raise an exception
assert isinstance(utils.screen_geometry(), QtCore.QRect)
assert isinstance(utils.screen_geometry(QtWidgets.QLabel()), QtCore.QRect)
assert isinstance(utils.screen_geometry(QtWidgets.QLabel(parent=QtWidgets.QLabel())), QtCore.QRect)
def test_drag_enter_paths():
mime = QtCore.QMimeData()
event = QtGui.QDragEnterEvent(QtCore.QPoint(0, 0), Qt.CopyAction, mime, Qt.LeftButton, Qt.NoModifier)
paths = utils.drag_drop_paths(event)
assert len(paths) == 0
url1 = QtCore.QUrl('/path/to/image.jpeg')
url1.setScheme('file')
url2 = QtCore.QUrl('') # does not pass the isValid() and scheme() == 'file' checks
url3 = QtCore.QUrl('/path/to/image.jpeg')
url3.setScheme('ftp') # does not pass the scheme() == 'file' check
url4 = QtCore.QUrl('/path/to/image.png')
url4.setScheme('file')
url5 = QtCore.QUrl('/path/to/image2.jpg')
url5.setScheme('file')
mime.setUrls([url1, url2, url3, url4, url5])
event = QtGui.QDragEnterEvent(QtCore.QPoint(0, 0), Qt.CopyAction, mime, Qt.LeftButton, Qt.NoModifier)
paths = utils.drag_drop_paths(event)
assert len(paths) == 3
assert '/path/to/image.jpeg' in paths
assert '/path/to/image.png' in paths
assert '/path/to/image2.jpg' in paths
paths = utils.drag_drop_paths(event, pattern='*.jp*g')
assert len(paths) == 2
assert '/path/to/image.jpeg' in paths
assert '/path/to/image2.jpg' in paths
paths = utils.drag_drop_paths(event, pattern='*.png')
assert len(paths) == 1
assert '/path/to/image.png' in paths
| mit | -982,922,349,157,547,000 | 34.3125 | 105 | 0.673746 | false |
allenai/document-qa | docqa/elmo/run_on_user_text.py | 1 | 3809 | import argparse
import tensorflow as tf
from docqa.data_processing.qa_training_data import ParagraphAndQuestion, ParagraphAndQuestionSpec
from docqa.data_processing.text_utils import NltkAndPunctTokenizer
from docqa.elmo.lm_qa_models import ElmoQaModel
from docqa.model_dir import ModelDir
"""
Script to run a model on user provided question/context input.
Its main purpose is to be an example of how to use the model on new question/context pairs.
"""
def main():
parser = argparse.ArgumentParser(description="Run an ELMo model on user input")
parser.add_argument("model", help="Model directory")
parser.add_argument("question", help="Question to answer")
parser.add_argument("context", help="Context to answer the question with")
args = parser.parse_args()
# Tokenize the input, the models expected data to be tokenized using `NltkAndPunctTokenizer`
# Note the model expects case-sensitive input
tokenizer = NltkAndPunctTokenizer()
question = tokenizer.tokenize_paragraph_flat(args.question)
context = tokenizer.tokenize_paragraph_flat(args.context)
print("Loading model")
model_dir = ModelDir(args.model)
model = model_dir.get_model()
if not isinstance(model, ElmoQaModel):
raise ValueError("This script is build to work for ElmoQaModel models only")
# Important! This tells the language model not to use the pre-computed word vectors,
# which are only applicable for the SQuAD dev/train sets.
# Instead the language model will use its character-level CNN to compute
# the word vectors dynamically.
model.lm_model.embed_weights_file = None
# Tell the model the batch size and vocab to expect, This will load the needed
# word vectors and fix the batch size when building the graph / encoding the input
print("Setting up model")
voc = set(question)
voc.update(context)
model.set_input_spec(ParagraphAndQuestionSpec(batch_size=1), voc)
# Now we build the actual tensorflow graph, `best_span` and `conf` are
# tensors holding the predicted span (inclusive) and confidence scores for each
# element in the input batch
print("Build tf graph")
sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))
with sess.as_default():
# 17 means to limit the span to size 17 or less
best_spans, conf = model.get_prediction().get_best_span(17)
# Now restore the weights, this is a bit fiddly since we need to avoid restoring the
# bilm weights, and instead load them from the pre-computed data
all_vars = tf.global_variables() + tf.get_collection(tf.GraphKeys.SAVEABLE_OBJECTS)
lm_var_names = {x.name for x in all_vars if x.name.startswith("bilm")}
vars = [x for x in all_vars if x.name not in lm_var_names]
model_dir.restore_checkpoint(sess, vars)
# Run the initializer of the lm weights, which will load them from the lm directory
sess.run(tf.variables_initializer([x for x in all_vars if x.name in lm_var_names]))
# Now the model is ready to run
# The model takes input in the form of `ContextAndQuestion` objects, for example:
data = [ParagraphAndQuestion(context, question, None, "user-question1")]
print("Starting run")
# The model is run in two steps, first it "encodes" the paragraph/context pairs
# into numpy arrays, then to use `sess` to run the actual model get the predictions
encoded = model.encode(data, is_train=False) # batch of `ContextAndQuestion` -> feed_dict
best_spans, conf = sess.run([best_spans, conf], feed_dict=encoded) # feed_dict -> predictions
print("Best span: " + str(best_spans[0]))
print("Answer text: " + " ".join(context[best_spans[0][0]:best_spans[0][1]+1]))
print("Confidence: " + str(conf[0]))
if __name__ == "__main__":
main() | apache-2.0 | 85,362,426,133,489,180 | 45.463415 | 98 | 0.716461 | false |
woodmd/haloanalysis | extpipe/utils.py | 1 | 7826 | import copy
import re
import glob
import numpy as np
from numpy.core import defchararray
from scipy.interpolate import RegularGridInterpolator
from astropy.io import fits
from astropy.table import Table, Column
def stack_files(files, outfile, new_cols=None):
h = fits.open(files[0])
tables = []
for hdu in h:
if isinstance(hdu,fits.BinTableHDU):
tables += [stack_tables(files,hdu.name,new_cols=new_cols)]
hdus = [fits.PrimaryHDU()]
hdus += [fits.table_to_hdu(t) for t in tables]
hdulist = fits.HDUList(hdus)
hdulist.writeto(outfile,overwrite=True)
def stack_tables(files, hdu=None, new_cols=None):
tables = []
for f in sorted(files):
tables += [Table.read(f,hdu=hdu)]
cols = []
for c in tables[0].colnames:
col = tables[0][c]
cols += [Column(name=col.name, unit=col.unit, shape=col.shape,
dtype=col.dtype)]
tab = Table(cols,meta=tables[0].meta)
for t in tables:
row = [ t[c] for c in tables[0].colnames ]
tab.add_row(row)
if new_cols is not None:
for col in new_cols:
tab.add_column(col)
return tab
def load_source_rows(tab, names, key='assoc'):
"""Load the rows from a table that match a source name.
Parameters
----------
tab : `astropy.table.Table`
Table that will be searched.
names : list
List of source identifiers.
key : str
Name of the table column that will be searched for a source
matching key.
Returns
-------
outtab : `astropy.table.Table`
Table containing the subset of rows with matching source identifiers.
"""
names = [name.lower().replace(' ', '') for name in names]
col = tab[[key]].copy()
col[key] = defchararray.replace(defchararray.lower(col[key]),
' ', '')
mask = create_mask(col, {key: names})
return tab[mask]
def create_mask(tab, target_def):
"""Create a table mask from a target definition."""
m = np.empty(len(tab), dtype=bool); m.fill(True)
for k,v in target_def.items():
if isinstance(v,list):
m0 = np.zeros(len(tab),dtype=bool)
for t in v:
m0 |= (tab[k] == t)
m &= m0
elif isinstance(v,dict):
m0 = np.empty(len(tab),dtype=bool)
m0.fill(True)
if 'min' in v:
m0 &= (tab[k] >= v['min'])
if 'max' in v:
m0 &= (tab[k] <= v['max'])
m &= m0
elif isinstance(v,str):
p = re.compile('([a-zA-Z_2-9][^"<>=&|!()\s*.]+)')
# regular expression should capture all column names
# that consist of a-z, A-Z, '_', and numbers at the end
# it should not capture pure numbers and numbers like '1e10'
replaced = [] # check what already has been replaced
for cname in p.findall(v):
print(cname)
if not cname in replaced:
if tab.columns.has_key(cname):
tab[cname]
v = v.replace(cname, "tab['{0:s}']".format(cname))
#else:
# v = v.replace(cname, "'{0:s}'".format(cname))
replaced.append(cname)
# all of the above in one line but does not work if column name starts with a number
# or if expression is not a number
#print 'Cutting on expression', p.sub(r"tab['\1']",v)
print 'Cutting on expression', v
m &= eval(v)
return m
def interp_map(z, axis0, axis1,dim=0):
s0 = z.ndim*[None]
s1 = z.ndim*[slice(None)]
s0[idim] = slice(None)
s1[idim] = slice(0,1)
z /= axis0.width[s0]
shape = list(z.shape)
shape[idim] = len(axis1.centers)
zinterp = np.zeros(shape)
for x, idx in np.ndenumerate(z[s1]):
zinterp = np.interp(axis1.centers,
axis0.centers,
z[:,i])
class MapND(object):
"""Container class representing an n-dimensional map."""
def __init__(self, axes, data, log_interp=False):
"""
Parameters
----------
axes : list
List of `Axis` objects defining the n-dimensional grid.
data : `~numpy.ndarray`
log_interp : bool
Perform interpolation in log-space.
"""
self._axes = axes
self._data = data
self._log_interp = log_interp
points = [ax.centers for ax in axes]
if log_interp:
self._fn = RegularGridInterpolator(points, np.log(data),
bounds_error=False,
fill_value=None)
else:
self._fn = RegularGridInterpolator(points, data,
bounds_error=False,
fill_value=None)
@property
def fn(self):
return self._fn
@property
def axes(self):
return self._axes
@property
def data(self):
return self._data
def marginalize(self, dims):
data = np.squeeze(np.apply_over_axes(np.sum,self.data,axes=dims))
axes = []
for i, axis in enumerate(self.axes):
if i not in dims:
axes += [axis]
return MapND(axes, data, self._log_interp)
def slice(self, dims, vals):
axis_xvals = []
axes = []
for i, axis in enumerate(self.axes):
axis_xvals += [axis.centers]
if not i in dims:
axes += [axis]
for i, v in zip(dims,vals):
axis_xvals[i] = np.array(v,ndmin=1)
interp_xvals = np.meshgrid(*axis_xvals,indexing='ij',sparse=True)
data = np.squeeze(self.interp(tuple(interp_xvals)))
return MapND(axes, data, self._log_interp)
def interp(self, *args):
if self._log_interp:
log_vals = self._fn(*args)
log_vals[~np.isfinite(log_vals)] = -33
return np.exp(log_vals)
else:
return self._fn(*args)
class Axis(object):
def __init__(self, name, edges, centers=None):
self._edges = edges
self._centers = (0.5*(edges[1:] + edges[:-1])
if centers is None else centers)
self._name = name
@staticmethod
def create_from_centers(name, centers, logscale=False):
"""Create a new axis object from a sequence of bin centers."""
if logscale:
delta = np.log(centers[1:])-np.log(centers[:-1])
else:
delta = centers[1:]-centers[:-1]
if len(delta) == 0:
delta = np.array([1.0])
else:
delta = np.insert(delta,0,delta[0])
if logscale:
edges_lo = np.log(centers) - 0.5*delta
edges_hi = np.log(centers) + 0.5*delta
edges = np.exp(np.insert(edges_hi,0,edges_lo[0]))
else:
edges_lo = centers - 0.5*delta
edges_hi = centers + 0.5*delta
edges = np.insert(edges_hi,0,edges_lo[0])
return Axis(name, edges, centers)
@property
def name(self):
return self._name
@property
def edges(self):
return self._edges
@property
def lo(self):
"""Return the lower bin edges."""
return self._edges[:-1]
@property
def hi(self):
"""Return the upper bin edges."""
return self._edges[1:]
@property
def nbin(self):
return len(self._edges)-1
@property
def centers(self):
return self._centers
@property
def width(self):
return self._edges[1:] - self._edges[:-1]
| bsd-3-clause | -7,118,146,222,372,312,000 | 25.80137 | 89 | 0.523256 | false |
eReuse/DeviceHub | ereuse_devicehub/scripts/updates/snapshot_software.py | 1 | 1290 | from contextlib import suppress
from pydash import find
from ereuse_devicehub.resources.device.domain import DeviceDomain
from ereuse_devicehub.resources.event.device import DeviceEventDomain
from ereuse_devicehub.scripts.updates.update import Update
class SnapshotSoftware(Update):
"""
Changes the values of SnapshotSoftware and adds it to the materialized one in devices
"""
def execute(self, database):
SNAPSHOT_SOFTWARE = {
'DDI': 'Workbench',
'Scan': 'AndroidApp',
'DeviceHubClient': 'Web'
}
for snapshot in DeviceEventDomain.get({'@type': "devices:Snapshot"}):
with suppress(KeyError):
snapshot['snapshotSoftware'] = SNAPSHOT_SOFTWARE[snapshot.get('snapshotSoftware', 'DDI')]
DeviceEventDomain.update_one_raw(snapshot['_id'], {'$set': {'snapshotSoftware': snapshot['snapshotSoftware']}})
for device in DeviceDomain.get({'events._id': snapshot['_id']}):
materialized_snapshot = find(device['events'], lambda event: event['_id'] == snapshot['_id'])
materialized_snapshot['snapshotSoftware'] = snapshot['snapshotSoftware']
DeviceDomain.update_one_raw(device['_id'], {'$set': {'events': device['events']}})
| agpl-3.0 | 4,927,501,792,348,379,000 | 45.071429 | 123 | 0.655039 | false |
noironetworks/neutron | neutron/tests/functional/cmd/test_netns_cleanup.py | 1 | 6514 | # Copyright (c) 2015 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import mock
from neutron_lib import constants as n_const
from neutron.agent.l3 import namespaces
from neutron.agent.linux import dhcp
from neutron.agent.linux import ip_lib
from neutron.agent.linux import utils
from neutron.cmd import netns_cleanup
from neutron.common import utils as common_utils
from neutron.conf.agent import cmd
from neutron.tests import base as basetest
from neutron.tests.common import net_helpers
from neutron.tests.functional import base
from neutron.tests.functional.cmd import process_spawn
GET_NAMESPACES = 'neutron.agent.linux.ip_lib.list_network_namespaces'
TEST_INTERFACE_DRIVER = 'neutron.agent.linux.interface.OVSInterfaceDriver'
NUM_SUBPROCESSES = 6
class NetnsCleanupTest(base.BaseSudoTestCase):
def setUp(self):
super(NetnsCleanupTest, self).setUp()
self.get_namespaces_p = mock.patch(GET_NAMESPACES)
self.get_namespaces = self.get_namespaces_p.start()
def setup_config(self, args=None):
if args is None:
args = []
# force option enabled to make sure non-empty namespaces are
# cleaned up and deleted
args.append('--force')
self.conf = netns_cleanup.setup_conf()
self.conf.set_override('interface_driver', TEST_INTERFACE_DRIVER)
self.config_parse(conf=self.conf, args=args)
def test_cleanup_network_namespaces_cleans_dhcp_and_l3_namespaces(self):
dhcp_namespace = self.useFixture(
net_helpers.NamespaceFixture(dhcp.NS_PREFIX)).name
l3_namespace = self.useFixture(
net_helpers.NamespaceFixture(namespaces.NS_PREFIX)).name
bridge = self.useFixture(
net_helpers.VethPortFixture(namespace=dhcp_namespace)).bridge
self.useFixture(
net_helpers.VethPortFixture(bridge, l3_namespace))
# we scope the get_namespaces to our own ones not to affect other
# tests, as otherwise cleanup will kill them all
self.get_namespaces.return_value = [l3_namespace, dhcp_namespace]
# launch processes in each namespace to make sure they're
# killed during cleanup
procs_launched = self._launch_processes([l3_namespace, dhcp_namespace])
self.assertIsNot(procs_launched, 0)
common_utils.wait_until_true(
lambda: self._get_num_spawned_procs() == procs_launched,
timeout=15,
exception=Exception("Didn't spawn expected number of processes"))
netns_cleanup.cleanup_network_namespaces(self.conf)
self.get_namespaces_p.stop()
namespaces_now = ip_lib.list_network_namespaces()
procs_after = self._get_num_spawned_procs()
self.assertEqual(procs_after, 0)
self.assertNotIn(l3_namespace, namespaces_now)
self.assertNotIn(dhcp_namespace, namespaces_now)
@staticmethod
def _launch_processes(namespaces):
"""Launch processes in the specified namespaces.
This function will spawn processes inside the given namespaces:
- 6 processes listening on tcp ports (parent + 5 children)
- 1 process + 5 subprocesses listening on unix sockets
- 1 process + 5 subprocesses listening on udp6 sockets
First two sets of processes will process SIGTERM so when the parent
gets killed, it will kill all spawned children
The last set of processes will ignore SIGTERM. This will allow us
to test the cleanup functionality which will issue a SIGKILL
to all remaining processes after the SIGTERM attempt
"""
commands = [['python', process_spawn.__file__,
'-n', NUM_SUBPROCESSES,
'-f', n_const.IPv4,
'-p', n_const.PROTO_NAME_TCP,
'--noignore_sigterm',
'--parent_listen'],
['python', process_spawn.__file__,
'-n', NUM_SUBPROCESSES,
'-f', process_spawn.UNIX_FAMILY,
'-p', n_const.PROTO_NAME_TCP,
'--noignore_sigterm',
'--noparent_listen'],
['python', process_spawn.__file__,
'-n', NUM_SUBPROCESSES,
'-f', n_const.IPv4,
'-p', n_const.PROTO_NAME_UDP,
'--ignore_sigterm',
'--noparent_listen']]
proc_count = 0
for netns in namespaces:
ip = ip_lib.IPWrapper(namespace=netns)
for command in commands:
# The total amount of processes per command is
# the process itself plus the number of subprocesses spawned by
# it
proc_count += (1 + NUM_SUBPROCESSES)
# We need to pass the PATH env variable so that python
# interpreter runs under the same virtual environment.
# Otherwise, it won't find the necessary packages such as
# oslo_config
ip.netns.execute(command,
addl_env={'PATH':
os.environ.get('PATH')})
return proc_count
@staticmethod
def _get_num_spawned_procs():
cmd = ['ps', '-f', '-u', 'root']
out = utils.execute(cmd, run_as_root=True)
return sum([1 for line in out.splitlines() if 'process_spawn' in line])
class TestNETNSCLIConfig(basetest.BaseTestCase):
def setup_config(self, args=None):
self.conf = netns_cleanup.setup_conf()
super(TestNETNSCLIConfig, self).setup_config(args=args)
def test_netns_opts_registration(self):
self.assertFalse(self.conf.force)
self.assertIsNone(self.conf.get('agent_type'))
# to unregister opts
self.conf.reset()
self.conf.unregister_opts(cmd.netns_opts)
| apache-2.0 | -7,499,183,389,434,799,000 | 40.490446 | 79 | 0.625576 | false |
Makeystreet/makeystreet | woot/apps/catalog/migrations/0139_auto__add_field_makey_is_staff_pick__add_field_makey_added_time_staff_.py | 1 | 70054 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Makey.is_staff_pick'
db.add_column(u'catalog_makey', 'is_staff_pick',
self.gf('django.db.models.fields.BooleanField')(default=False),
keep_default=False)
# Adding field 'Makey.added_time_staff_pick'
db.add_column(u'catalog_makey', 'added_time_staff_pick',
self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Makey.is_staff_pick'
db.delete_column(u'catalog_makey', 'is_staff_pick')
# Deleting field 'Makey.added_time_staff_pick'
db.delete_column(u'catalog_makey', 'added_time_staff_pick')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'catalog.article': {
'Meta': {'object_name': 'Article'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'comments': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['catalog.Comment']", 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'new_user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.NewUser']", 'null': 'True', 'blank': 'True'}),
'rating': ('django.db.models.fields.IntegerField', [], {}),
'recommendation': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'tags': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['catalog.ArticleTag']", 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'catalog.articleemail': {
'Meta': {'object_name': 'ArticleEmail'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'email_subscriptions'", 'null': 'True', 'to': "orm['catalog.ArticleTag']"}),
'temp_id': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'catalog.articletag': {
'Meta': {'object_name': 'ArticleTag'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'likes_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'url_snippet': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '200'})
},
'catalog.cfistoreitem': {
'Meta': {'object_name': 'CfiStoreItem'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'item': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['catalog.Product']", 'unique': 'True'}),
'likers': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'cfi_store_item_likes'", 'symmetrical': 'False', 'through': "orm['catalog.LikeCfiStoreItem']", 'to': u"orm['auth.User']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'catalog.comment': {
'Meta': {'object_name': 'Comment'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'body': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'likes_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'catalog.documentation': {
'Meta': {'object_name': 'Documentation'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '1000'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'catalog.emailcollect': {
'Meta': {'object_name': 'EmailCollect'},
'email': ('django.db.models.fields.EmailField', [], {'max_length': '30'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'catalog.favoritemakey': {
'Meta': {'unique_together': "(('user', 'makey'),)", 'object_name': 'FavoriteMakey'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'makey': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Makey']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'catalog.image': {
'Meta': {'object_name': 'Image'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'comments': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['catalog.Comment']", 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}),
'full_url': ('django.db.models.fields.URLField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_s3': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'large_url': ('django.db.models.fields.URLField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}),
'likes_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'small_url': ('django.db.models.fields.URLField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'images'", 'null': 'True', 'to': u"orm['auth.User']"})
},
'catalog.instructablestep': {
'Meta': {'ordering': "['-step']", 'object_name': 'InstructableStep'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'body': ('tinymce.models.HTMLField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'iid': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '50', 'null': 'True'}),
'images': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['catalog.Image']", 'null': 'True', 'blank': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'makey': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Makey']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'step': ('django.db.models.fields.IntegerField', [], {'default': '-1'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'url': ('django.db.models.fields.URLField', [], {'default': 'None', 'max_length': '200', 'null': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'words': ('django.db.models.fields.IntegerField', [], {'default': '-1'})
},
'catalog.inventory': {
'Meta': {'unique_together': "(('part', 'space'),)", 'object_name': 'Inventory'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'part': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'inventory_part'", 'to': "orm['catalog.Product']"}),
'quantity': ('django.db.models.fields.IntegerField', [], {'default': '1', 'null': 'True', 'blank': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'space': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'inventory_space'", 'to': "orm['catalog.Space']"})
},
'catalog.like': {
'Meta': {'object_name': 'Like'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'fb_like_id': ('django.db.models.fields.CharField', [], {'default': "'-1'", 'max_length': '100', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'catalog.likearticle': {
'Meta': {'unique_together': "(('user', 'article'),)", 'object_name': 'LikeArticle'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'article': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Article']"}),
'fb_like_id': ('django.db.models.fields.CharField', [], {'default': "'-1'", 'max_length': '100', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'catalog.likecfistoreitem': {
'Meta': {'unique_together': "(('user', 'cfi_store_item'),)", 'object_name': 'LikeCfiStoreItem'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'cfi_store_item': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.CfiStoreItem']"}),
'fb_like_id': ('django.db.models.fields.CharField', [], {'default': "'-1'", 'max_length': '100', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'catalog.likechannel': {
'Meta': {'unique_together': "(('user', 'channel'),)", 'object_name': 'LikeChannel'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'channel': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.ArticleTag']"}),
'fb_like_id': ('django.db.models.fields.CharField', [], {'default': "'-1'", 'max_length': '100', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'catalog.likecomment': {
'Meta': {'unique_together': "(('user', 'comment'),)", 'object_name': 'LikeComment'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'comment': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Comment']"}),
'fb_like_id': ('django.db.models.fields.CharField', [], {'default': "'-1'", 'max_length': '100', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'catalog.likeimage': {
'Meta': {'unique_together': "(('user', 'image'),)", 'object_name': 'LikeImage'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'fb_like_id': ('django.db.models.fields.CharField', [], {'default': "'-1'", 'max_length': '100', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Image']"}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'catalog.likelisting': {
'Meta': {'unique_together': "(('user', 'listing'),)", 'object_name': 'LikeListing'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'fb_like_id': ('django.db.models.fields.CharField', [], {'default': "'-1'", 'max_length': '100', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'listing': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Listing']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'catalog.likemakey': {
'Meta': {'unique_together': "(('user', 'makey'),)", 'object_name': 'LikeMakey'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'fb_like_id': ('django.db.models.fields.CharField', [], {'default': "'-1'", 'max_length': '100', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'makey': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'makeylikes'", 'to': "orm['catalog.Makey']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'catalog.likenote': {
'Meta': {'unique_together': "(('user', 'note'),)", 'object_name': 'LikeNote'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'fb_like_id': ('django.db.models.fields.CharField', [], {'default': "'-1'", 'max_length': '100', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'note': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Note']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'catalog.likeproduct': {
'Meta': {'unique_together': "(('user', 'product'),)", 'object_name': 'LikeProduct'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'fb_like_id': ('django.db.models.fields.CharField', [], {'default': "'-1'", 'max_length': '100', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Product']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'catalog.likeproductdescription': {
'Meta': {'unique_together': "(('user', 'product_description'),)", 'object_name': 'LikeProductDescription'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'fb_like_id': ('django.db.models.fields.CharField', [], {'default': "'-1'", 'max_length': '100', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'product_description': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.ProductDescription']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'catalog.likeproductimage': {
'Meta': {'unique_together': "(('user', 'image'),)", 'object_name': 'LikeProductImage'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'fb_like_id': ('django.db.models.fields.CharField', [], {'default': "'-1'", 'max_length': '100', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.ProductImage']"}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Product']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'catalog.likeproducttutorial': {
'Meta': {'unique_together': "(('user', 'tutorial', 'product'),)", 'object_name': 'LikeProductTutorial'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'fb_like_id': ('django.db.models.fields.CharField', [], {'default': "'-1'", 'max_length': '100', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Product']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'tutorial': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Tutorial']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'catalog.likeshop': {
'Meta': {'unique_together': "(('user', 'shop'),)", 'object_name': 'LikeShop'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'fb_like_id': ('django.db.models.fields.CharField', [], {'default': "'-1'", 'max_length': '100', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'shop': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Shop']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'catalog.likevideo': {
'Meta': {'unique_together': "(('user', 'video'),)", 'object_name': 'LikeVideo'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'fb_like_id': ('django.db.models.fields.CharField', [], {'default': "'-1'", 'max_length': '100', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'video': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Video']"})
},
'catalog.list': {
'Meta': {'object_name': 'List'},
'access': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'access'", 'symmetrical': 'False', 'to': u"orm['auth.User']"}),
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_private': ('django.db.models.fields.BooleanField', [], {}),
'items': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['catalog.ListItem']", 'symmetrical': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'owner'", 'to': u"orm['auth.User']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'catalog.listgroup': {
'Meta': {'object_name': 'ListGroup'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'lists': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['catalog.List']", 'symmetrical': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'catalog.listing': {
'Meta': {'object_name': 'Listing'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'admins': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'company': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'content': ('tinymce.models.HTMLField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'likes_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'catalog.listitem': {
'Meta': {'object_name': 'ListItem'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'createdby': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'note': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Product']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'catalog.location': {
'Meta': {'object_name': 'Location'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'catalog.logidenticalproduct': {
'Meta': {'object_name': 'LogIdenticalProduct'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'product1': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'product1'", 'to': "orm['catalog.Product']"}),
'product2': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'product2'", 'to': "orm['catalog.Product']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'catalog.makey': {
'Meta': {'object_name': 'Makey'},
'about': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'added_time_staff_pick': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'as_part': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'as_makey'", 'null': 'True', 'to': "orm['catalog.Product']"}),
'as_part_new': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'as_makey'", 'null': 'True', 'to': "orm['catalog.NewProduct']"}),
'collaborators': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'collaborators'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['auth.User']"}),
'comments': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'makeycomments'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['catalog.Comment']"}),
'cover_pic': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Image']", 'null': 'True', 'blank': 'True'}),
'credits': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'derived_from': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'forked_as'", 'null': 'True', 'to': "orm['catalog.Makey']"}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'documentations': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'makeydocumentations'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['catalog.Documentation']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'images': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'makeyimages'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['catalog.Image']"}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_private': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_staff_pick': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'made_in': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'makeys_made_in'", 'null': 'True', 'to': "orm['catalog.Space']"}),
'mentors': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'modules_used': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'used_in'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['catalog.Makey']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'new_parts': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'makeys_parts'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['catalog.NewProduct']"}),
'new_tools': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'makeys_tools'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['catalog.NewProduct']"}),
'new_users': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'makeys'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['catalog.NewUser']"}),
'notes': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'makeynotes'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['catalog.Note']"}),
'removed_collaborators': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'makey_removed'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['auth.User']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'status': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'videos': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'makeyvideos'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['catalog.Video']"}),
'votes': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'why': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
'catalog.makeyimage': {
'Meta': {'object_name': 'MakeyImage'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'makey_id': ('django.db.models.fields.IntegerField', [], {}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'catalog.newinventory': {
'Meta': {'unique_together': "(('part', 'space'),)", 'object_name': 'NewInventory'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'part': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'new_inventory_part'", 'to': "orm['catalog.NewProduct']"}),
'quantity': ('django.db.models.fields.IntegerField', [], {'default': '1', 'null': 'True', 'blank': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'space': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'new_inventory_space'", 'to': "orm['catalog.Space']"})
},
'catalog.newproduct': {
'Meta': {'object_name': 'NewProduct'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Image']", 'null': 'True', 'blank': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'catalog.newuser': {
'Meta': {'object_name': 'NewUser'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'catalog.note': {
'Meta': {'ordering': "['order']", 'object_name': 'Note'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'body': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
'comments': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['catalog.Comment']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Image']", 'null': 'True', 'blank': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'likes_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '140', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'catalog.product': {
'Meta': {'object_name': 'Product'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'identicalto': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Product']", 'null': 'True', 'blank': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'makeys': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'partsused'", 'blank': 'True', 'to': "orm['catalog.Makey']"}),
'makeys_as_tools': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'tools_used'", 'blank': 'True', 'to': "orm['catalog.Makey']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'sku': ('django.db.models.fields.IntegerField', [], {}),
'space_as_tools': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'tools_in_space'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['catalog.Space']"}),
'tutorials': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'products'", 'blank': 'True', 'to': "orm['catalog.Tutorial']"})
},
'catalog.productdescription': {
'Meta': {'object_name': 'ProductDescription'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '100000'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'productdescriptions'", 'to': "orm['catalog.Product']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'shop': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Shop']", 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'blank': 'True'}),
'user_or_shop': ('django.db.models.fields.BooleanField', [], {})
},
'catalog.productimage': {
'Meta': {'object_name': 'ProductImage'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'productimages'", 'to': "orm['catalog.Product']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'shop': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Shop']", 'null': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'catalog.productreview': {
'Meta': {'object_name': 'ProductReview'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'product_reviews'", 'to': "orm['catalog.Product']"}),
'rating': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'review': ('django.db.models.fields.CharField', [], {'max_length': '100000'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'votes': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'catalog.productshopurl': {
'Meta': {'object_name': 'ProductShopUrl'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'productshopurls'", 'to': "orm['catalog.Product']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'shop': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Shop']"}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'})
},
'catalog.searchlog': {
'Meta': {'object_name': 'SearchLog'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'term': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'time': ('django.db.models.fields.DateTimeField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'catalog.shop': {
'Meta': {'object_name': 'Shop'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'images': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'shopimages'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['catalog.Image']"}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'location': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Location']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'})
},
'catalog.shopreview': {
'Meta': {'object_name': 'ShopReview'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'rating': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'review': ('django.db.models.fields.CharField', [], {'max_length': '100000'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'shop': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'shop_reviews'", 'to': "orm['catalog.Shop']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'votes': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'catalog.space': {
'Meta': {'object_name': 'Space'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'address': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'admins': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'space_admins'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['auth.User']"}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'comments': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['catalog.Comment']", 'null': 'True', 'blank': 'True'}),
'country': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'date_of_founding': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '254', 'null': 'True', 'blank': 'True'}),
'facebook': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'inventory': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'space_inventory'", 'symmetrical': 'False', 'through': "orm['catalog.Inventory']", 'to': "orm['catalog.Product']"}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'kind': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'last_updated_external': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'latitude': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '15', 'decimal_places': '10', 'blank': 'True'}),
'logo': ('django.db.models.fields.URLField', [], {'max_length': '400', 'null': 'True', 'blank': 'True'}),
'longitude': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '15', 'decimal_places': '10', 'blank': 'True'}),
'map_zoom_level': ('django.db.models.fields.IntegerField', [], {'default': '13'}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'space_members'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['auth.User']"}),
'membership_fee': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'new_inventory': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'space_new_inventory'", 'symmetrical': 'False', 'through': "orm['catalog.NewInventory']", 'to': "orm['catalog.NewProduct']"}),
'new_members': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'space_new_members'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['catalog.NewUser']"}),
'new_tools': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'space_new_tools'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['catalog.NewProduct']"}),
'no_of_members': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'twitter': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
},
'catalog.spacereview': {
'Meta': {'object_name': 'SpaceReview'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'rating': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'review': ('django.db.models.fields.CharField', [], {'max_length': '100000'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'space': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'space_reviews'", 'to': "orm['catalog.Space']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'votes': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'catalog.textdocumentation': {
'Meta': {'object_name': 'TextDocumentation'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'body': ('tinymce.models.HTMLField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'images': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['catalog.Image']", 'null': 'True', 'blank': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'makey': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'text_documentations'", 'null': 'True', 'to': "orm['catalog.Makey']"}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'title': ('django.db.models.fields.TextField', [], {'max_length': '100'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'catalog.toindexstore': {
'Meta': {'object_name': 'ToIndexStore'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'location': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Location']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'})
},
'catalog.topmakeys': {
'Meta': {'object_name': 'TopMakeys'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'makey': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Makey']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'catalog.topproducts': {
'Meta': {'object_name': 'TopProducts'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Product']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'catalog.topshops': {
'Meta': {'object_name': 'TopShops'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'shop': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Shop']"})
},
'catalog.toptutorials': {
'Meta': {'object_name': 'TopTutorials'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'tutorial': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Tutorial']"})
},
'catalog.topusers': {
'Meta': {'object_name': 'TopUsers'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'catalog.tutorial': {
'Meta': {'object_name': 'Tutorial'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'images': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'tutorialimages'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['catalog.Image']"}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'votes': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'catalog.upfile': {
'Meta': {'object_name': 'UpFile'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'description': ('django.db.models.fields.TextField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}),
'filename': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'filetype': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'makey': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'files'", 'null': 'True', 'to': "orm['catalog.Makey']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '1000'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'catalog.userflags': {
'Meta': {'object_name': 'UserFlags'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'show_maker_intro': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'show_makey_intro': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'catalog.userinteraction': {
'Meta': {'object_name': 'UserInteraction'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'event': ('django.db.models.fields.IntegerField', [], {}),
'event_id': ('django.db.models.fields.IntegerField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'catalog.userprofile': {
'Meta': {'object_name': 'UserProfile'},
'aboutme': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'blog_url': ('django.db.models.fields.URLField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}),
'college': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'facebook_url': ('django.db.models.fields.URLField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}),
'following': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'followers'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['catalog.UserProfile']"}),
'github_url': ('django.db.models.fields.URLField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instructables_url': ('django.db.models.fields.URLField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'linkedin_url': ('django.db.models.fields.URLField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}),
'location': ('django.db.models.fields.CharField', [], {'default': "'Bangalore, India'", 'max_length': '255'}),
'membership': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'patent': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'profile_pic': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Image']", 'null': 'True', 'blank': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'stackoverflow_url': ('django.db.models.fields.URLField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}),
'twitter_url': ('django.db.models.fields.URLField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'profile'", 'unique': 'True', 'to': u"orm['auth.User']"}),
'website_url': ('django.db.models.fields.URLField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}),
'yt_channel_url': ('django.db.models.fields.URLField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'})
},
'catalog.video': {
'Meta': {'object_name': 'Video'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'comments': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['catalog.Comment']", 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}),
'embed_url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'likes_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'site': ('django.db.models.fields.IntegerField', [], {}),
'thumb_url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'catalog.votemakey': {
'Meta': {'unique_together': "(('user', 'makey'),)", 'object_name': 'VoteMakey'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'makey': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Makey']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'vote': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
'catalog.voteproductreview': {
'Meta': {'unique_together': "(('user', 'review'),)", 'object_name': 'VoteProductReview'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'review': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.ProductReview']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'vote': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
'catalog.voteshopreview': {
'Meta': {'unique_together': "(('user', 'review'),)", 'object_name': 'VoteShopReview'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'review': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.ShopReview']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'vote': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
'catalog.votespacereview': {
'Meta': {'unique_together': "(('user', 'review'),)", 'object_name': 'VoteSpaceReview'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'review': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.SpaceReview']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'vote': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
'catalog.votetutorial': {
'Meta': {'unique_together': "(('user', 'tutorial'),)", 'object_name': 'VoteTutorial'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'tutorial': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Tutorial']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'vote': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['catalog'] | apache-2.0 | -7,416,513,312,088,065,000 | 83.607488 | 229 | 0.54261 | false |
bhermansyah/DRR-datacenter | covid19/google_sheet.py | 1 | 11962 | import os
import json
import pickle
import pandas as pd
import numpy as np
import datetime
from collections import OrderedDict
from .utils import JSONEncoderCustom
from httplib2 import Http
from django.conf import settings
# google
from googleapiclient.discovery import build
from google_auth_oauthlib.flow import InstalledAppFlow
from google.auth.transport.requests import Request
SPREADSHEET_ID = '1F-AMEDtqK78EA6LYME2oOsWQsgJi4CT3V_G4Uo-47Rg'
RANGE_NAME = 'afg_covid19_stats'
def get_google_sheet(spreadsheet_id, range_name):
""" Retrieve sheet data using OAuth credentials and Google Python API. """
SCOPES = ['https://www.googleapis.com/auth/spreadsheets.readonly']
# Setup the Sheets API
creds = None
if os.path.exists(settings.GOOGLE_OAUTH2_TOKEN):
with open(settings.GOOGLE_OAUTH2_TOKEN, 'rb') as token:
creds = pickle.load(token)
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request())
else:
flow = InstalledAppFlow.from_client_secrets_file(settings.GOOGLE_OAUTH2_CLIENT_SECRETS_JSON, SCOPES)
creds = flow.run_local_server(port=0)
# Save the credentials for the next run
with open(settings.GOOGLE_OAUTH2_TOKEN, 'wb') as token:
pickle.dump(creds, token)
service = build('sheets', 'v4', credentials=creds, cache_discovery=False)
# Call the Sheets API
sheet = service.spreadsheets()
gsheet = sheet.values().get(spreadsheetId=spreadsheet_id, range=range_name).execute()
return gsheet
def replace_charts(value):
if value == u'\u2013':
return value.replace(u'\u2013', "0")
return value.replace(',', "")
def gsheet2df(gsheet):
header = gsheet.get('values', [])[0]
values = gsheet.get('values', [])[2:]
if not values:
print('No data found.')
else:
all_data = []
for col_id, col_name in enumerate(header):
column_data = []
provinceName = []
for row in values:
if col_name == 'Province':
provinceName.append(row[col_id].replace(' Province',''))
elif col_name == 'Cases' or col_name == 'Deaths' or col_name == 'Recoveries':
column_data.append(replace_charts(row[col_id]))
else:
column_data.append(row[col_id])
combineData = provinceName + column_data
ds = pd.Series(data=combineData, name=col_name.replace(" ", "_"))
all_data.append(ds)
df = pd.concat(all_data, axis=1)
return df
def Common(request, code):
gsheet = get_google_sheet(SPREADSHEET_ID, RANGE_NAME)
df = gsheet2df(gsheet)
Common = {}
Common['latestData'] = getLatestData(df, request, code)
Common['chart'] = Chart(df, request, code)
Common['total'] = getTotalEntireAfg(df, request, code)
return Common
def Chart(df, request, code):
ChartJson = {}
# LineChart
if code:
Cases = pd.to_numeric(df.Cases).where(df.Province==code).groupby([df.Date]).sum()
Deaths = pd.to_numeric(df.Deaths).where(df.Province==code).groupby([df.Date]).sum()
Recoveries = pd.to_numeric(df.Recoveries).where(df.Province==code).groupby([df.Date]).sum()
else:
Cases = pd.to_numeric(df.Cases).groupby([df.Date]).sum()
Deaths = pd.to_numeric(df.Deaths).groupby([df.Date]).sum()
Recoveries = pd.to_numeric(df.Recoveries).groupby([df.Date]).sum()
sortedCases = Cases.to_dict(OrderedDict)
sortedDeaths = Deaths.to_dict(OrderedDict)
sortedRecoveries = Recoveries.to_dict(OrderedDict)
diffCases = Cases.diff().fillna(0).reset_index(drop=True).to_dict(OrderedDict)
diffDeath = Deaths.diff().fillna(0).reset_index(drop=True).to_dict(OrderedDict)
diffRecovery = Recoveries.diff().fillna(0).reset_index(drop=True).to_dict(OrderedDict)
# percentGrowthCase = Cases.pct_change().fillna(0).reset_index(drop=True)
# percentGrowthDeath = Deaths.pct_change().fillna(0).reset_index(drop=True).to_dict(OrderedDict)
# percentGrowthRecovery = Recoveries.pct_change().fillna(0).reset_index(drop=True).to_dict(OrderedDict)
# print(percentGrowthCase)
datelist = []
series = {}
series['date'] = []
series['cases'] = {}
series['cases']['name'] = 'Confirmed'
series['cases']['data'] = []
for i in sortedCases:
series['date'].append(i)
series['cases']['data'].append(sortedCases[i])
series['deaths'] = {}
series['deaths']['name'] = 'Deaths'
series['deaths']['data'] = []
for i in sortedDeaths:
series['deaths']['data'].append(sortedDeaths[i])
series['recoveries'] = {}
series['recoveries']['name'] = 'Recovered'
series['recoveries']['data'] = []
for i in sortedRecoveries:
series['recoveries']['data'].append(sortedRecoveries[i])
series['GrowthCase'] = {}
series['GrowthCase']['name'] = 'New Cases'
series['GrowthCase']['data'] = []
for i in diffCases:
series['GrowthCase']['data'].append(diffCases[i])
series['GrowthDeath'] = {}
series['GrowthDeath']['name'] = 'Death Growth'
series['GrowthDeath']['data'] = []
for i in diffDeath:
series['GrowthDeath']['data'].append(diffDeath[i])
series['GrowthRecovery'] = {}
series['GrowthRecovery']['name'] = 'Recovery Growth'
series['GrowthRecovery']['data'] = []
for i in diffRecovery:
series['GrowthRecovery']['data'].append(diffRecovery[i])
# latest['Active_Cases'] = latest['Cases'] - (latest['Recoveries'] - latest['Deaths'])
ActiveCases = Cases - ( Recoveries - Deaths)
sortedActiveCases = ActiveCases.to_dict(OrderedDict)
series['activecase'] = {}
series['activecase']['name'] = 'Active Cases'
series['activecase']['data'] = []
for i in sortedActiveCases:
series['activecase']['data'].append(sortedActiveCases[i])
ChartJson['LineChart'] = series
# BarChart
barData = {}
latest = df.groupby('Province').nth(0).reset_index()
if code:
latest = latest.where(latest['Province']==code)
latest = latest[latest['Province'].notna()]
latest['Cases'] = latest['Cases'].astype(int)
latest['Deaths'] = latest['Deaths'].astype(int)
latest['Recoveries'] = latest['Recoveries'].astype(int)
latest['Active_Cases'] = latest['Cases'] - (latest['Recoveries'] - latest['Deaths'])
latest['Active_Cases'] = latest['Active_Cases'].astype(int)
barData['NewCasesData'] = {}
barData['NewCasesData']['id'] = 'bar_new_cases_data'
barData['NewCasesData']['name'] = series['date']
barData['NewCasesData']['data'] = series['GrowthCase']['data']
barData['CasesData'] = {}
barData['CasesData']['id'] = 'bar_cases_data'
barData['CasesData']['name'] = [v for k,v in latest['Province'].items()]
barData['CasesData']['data'] = [v for k,v in latest['Cases'].items()]
barData['ActiveCasesData'] = {}
barData['ActiveCasesData']['id'] = 'bar_active_cases_data'
barData['ActiveCasesData']['name'] = [v for k,v in latest['Province'].items()]
barData['ActiveCasesData']['data'] = [v for k,v in latest['Active_Cases'].items()]
ChartJson['BarChart'] = barData
pieData = {}
pieData['PercentageCasesData'] = {}
pieData['PercentageCasesData']['id'] = 'pie_pos_case_percent'
pieData['PercentageCasesData']['title'] = 'Positive Case Percentage'
pieData['PercentageCasesData']['data'] = [["Active Cases", sum(latest['Active_Cases'])], ["Recovered", sum(latest['Recoveries'])], ["Dead", sum(latest['Deaths'])]]
ChartJson['PieChart'] = pieData
barStackData = {}
barStackData['ProvPositiveCasesData'] = {}
barStackData['ProvPositiveCasesData']['id'] = 'bar_prov_pos_cases_data'
barStackData['ProvPositiveCasesData']['labels'] = [v for k,v in latest['Province'].items()]
barStackData['ProvPositiveCasesData']['data_val'] = {}
barStackData['ProvPositiveCasesData']['data_val']['active'] = {}
barStackData['ProvPositiveCasesData']['data_val']['active']['name'] = 'Active Cases'
barStackData['ProvPositiveCasesData']['data_val']['active']['data'] = [v for k,v in latest['Active_Cases'].items()]
barStackData['ProvPositiveCasesData']['data_val']['recovered'] = {}
barStackData['ProvPositiveCasesData']['data_val']['recovered']['name'] = 'Recovered'
barStackData['ProvPositiveCasesData']['data_val']['recovered']['data'] = [v for k,v in latest['Recoveries'].items()]
barStackData['ProvPositiveCasesData']['data_val']['death'] = {}
barStackData['ProvPositiveCasesData']['data_val']['death']['name'] = 'Dead'
barStackData['ProvPositiveCasesData']['data_val']['death']['data'] = [v for k,v in latest['Deaths'].items()]
ChartJson['BarStackChart'] = barStackData
return ChartJson
def getTotalEntireAfg(df, request, code):
GetTotal = {}
latest = df.groupby('Province').nth(0).reset_index()
previous = df.groupby('Province').nth(1).reset_index()
if code:
latest = latest[latest['Province'].isin([code])]
previous = previous[previous['Province'].isin([code])]
latest['Cases'] = latest['Cases'].astype(int)
latest['Deaths'] = latest['Deaths'].astype(int)
latest['Recoveries'] = latest['Recoveries'].astype(int)
latest['Active_Cases'] = latest['Cases'] - (latest['Recoveries'] - latest['Deaths'])
TotalCases = sum(latest['Cases'])
TotalDeaths = sum(latest['Deaths'])
TotalRecoveries = sum(latest['Recoveries'])
GetTotal['Confirmed Cases'] = [sum(latest['Cases'])]
GetTotal['Recovered'] = [sum(latest['Recoveries'])]
GetTotal['Deaths'] = [sum(latest['Deaths'])]
GetTotal['Active Cases'] = [sum(latest['Active_Cases'])]
GrowthCases = sum(latest['Cases']) - sum(previous['Cases'].astype(int))
GrowthDeaths = sum(latest['Deaths']) - sum(previous['Deaths'].astype(int))
GrowthRecoveries = sum(latest['Recoveries']) - sum(previous['Recoveries'].astype(int))
GrowthActiveCase = GrowthCases - (GrowthRecoveries - GrowthDeaths)
GetTotal['Confirmed Cases'].append({'GrowthCases': GrowthCases})
GetTotal['Deaths'].append({'GrowthDeaths': GrowthDeaths})
GetTotal['Recovered'].append({'GrowthRecoveries': GrowthRecoveries})
GetTotal['Active Cases'].append({'GrowthActiveCase': GrowthActiveCase})
return GetTotal
def getLatestData(df, request, code):
latest = df.groupby('Province').nth(0).reset_index()
previous = df.groupby('Province').nth(1).reset_index()
growthData = pd.merge(latest, previous, left_on='Province', right_on='Province', how='left').fillna(0)
latest['GrowthCases'] = growthData['Cases_x'].astype(int) - growthData['Cases_y'].astype(int)
latest['GrowthDeaths'] = growthData['Deaths_x'].astype(int) - growthData['Deaths_y'].astype(int)
latest['GrowthRecoveries'] = growthData['Recoveries_x'].astype(int) - growthData['Recoveries_y'].astype(int)
latest['GrowthActive_Cases'] = latest['GrowthCases'] - (latest['GrowthRecoveries'] - latest['GrowthDeaths'])
latest['Cases'] = latest['Cases'].astype(int)
latest['Deaths'] = latest['Deaths'].astype(int)
latest['Recoveries'] = latest['Recoveries'].astype(int)
latest['Active_Cases'] = latest['Cases'] - (latest['Recoveries'] - latest['Deaths'])
latest['Recovery_Rate'] = (latest['Recoveries'] / latest['Cases']) * 100
latest['Death_Rate'] = (latest['Deaths'] / latest['Cases']) * 100
GetLatestData = latest.to_dict(orient='records')
return GetLatestData
def JsonResponse(request):
response = {}
code = None
if 'code' in request.GET:
code = request.GET['code']
if request.GET['page'] == 'covid19':
response = Common(request, code)
response['googledata'] = json.dumps(response, cls=JSONEncoderCustom)
return response
| gpl-3.0 | 1,334,763,164,641,085,200 | 38.740864 | 167 | 0.643789 | false |
stefano-pogliani/salt-extentions | tests/unit/modules/netbeans/fixtures.py | 1 | 1831 | import os
import shutil
import tempfile
import pytest
def mock_install_tree(request, version, script=None):
root = tempfile.mkdtemp()
# When done, delete the entire thing.
def cleanup():
shutil.rmtree(root)
request.addfinalizer(cleanup)
# Create expected structure.
bin = os.path.join(root, "bin")
locale = os.path.join(root, "nb", "core", "locale")
bundle = os.path.join(root, "tmp", "org", "netbeans", "core", "startup")
os.makedirs(bin)
os.makedirs(locale)
os.makedirs(bundle)
# Create structure for version info.
# -> org/netbeans/core/startup/Bundle_nb.properties
info = open(os.path.join(bundle, "Bundle_nb.properties"), 'w')
info.write("LBL_splash_window_title=Starting NetBeans IDE\n")
info.write(
"currentVersion=NetBeans IDE {0} (Build 2)\n".format(version)
)
info.close()
# Create files
# -> root/bin/netbeans*
main = open(os.path.join(bin, "netbeans"), 'w')
if script is None:
main.write("#!/bin/sh\n")
main.write("echo Testing script for Salt state\n")
else:
source = open(script)
main.write(source.read())
main.close()
os.chmod(os.path.join(bin, "netbeans"), 0755)
# Pack version structure in JAR.
# -> root/nb/core/locale/core_nb.jar
os.system("jar -cf '{target}' -C {tmp} org".format(
target=os.path.join(locale, "core_nb.jar"),
tmp=os.path.join(root, "tmp")
))
# Remove structure for JAR.
shutil.rmtree(os.path.join(root, "tmp"))
return root
@pytest.fixture
def mock_salt_demo(request):
return mock_install_tree(request, "salt-demo")
@pytest.fixture
def mock_salt_test(request):
return mock_install_tree(request, "salt-test")
@pytest.fixture
def mock_salt_plugins(request):
return mock_install_tree(
request, "salt-plugins",
"extentions/tests/unit/modules/netbeans/mock-bin"
)
| bsd-3-clause | 227,841,479,396,325,220 | 24.082192 | 74 | 0.673949 | false |
okolisny/integration_tests | cfme/tests/configure/test_display_settings.py | 1 | 1849 | # -*- coding: utf-8 -*-
import pytest
from cfme import test_requirements
from cfme.configure.settings import visual
from cfme.fixtures import pytest_selenium as sel
from cfme.web_ui import ColorGroup, form_buttons
from cfme.utils.appliance import current_appliance
from cfme.utils.appliance.implementations.ui import navigate_to
from cfme.utils import version
from cfme.configure import settings # NOQA
pytestmark = [pytest.mark.tier(3),
test_requirements.settings]
colors = [
'Orange',
'Yellow',
'Green',
'Blue',
'ManageIQ-Blue',
'Black',
]
@pytest.yield_fixture(scope="module")
def set_timezone():
time_zone = visual.timezone
visual.timezone = "(GMT-10:00) Hawaii"
yield
visual.timezone = time_zone
def set_header_color(name):
cg = ColorGroup('Header Accent Color')
if cg.active != name:
cg.choose(name)
sel.click(form_buttons.save)
def is_header_color_changed(name):
cg = ColorGroup('Header Accent Color')
if cg.active == name:
return cg.status(name)
def reset_default_color(default_color):
cg = ColorGroup('Header Accent Color')
if cg.active != default_color:
cg.choose(default_color)
sel.click(form_buttons.save)
def test_timezone_setting(set_timezone):
""" Tests timezone setting
Metadata:
test_flag: visuals
"""
locator = version.pick({
version.LOWEST: ('//label[contains(@class,"control-label") and contains(., "Started On")]'
'/../div/p[contains(., "{}")]'.format("HST")),
'5.7': ('//label[contains(@class,"control-label") and contains(., "Started On")]'
'/../div/p[contains(., "{}")]'.format("-1000"))
})
navigate_to(current_appliance.server, 'DiagnosticsDetails')
assert sel.is_displayed(locator), "Timezone settings Failed"
| gpl-2.0 | 1,625,218,247,549,686,800 | 25.414286 | 98 | 0.652244 | false |
suyashphadtare/test | erpnext/manufacturing/doctype/production_planning_tool/production_planning_tool.py | 1 | 15406 | # Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.utils import cstr, flt, cint, nowdate, add_days, comma_and
from frappe import msgprint, _
from frappe.model.document import Document
class ProductionPlanningTool(Document):
def __init__(self, arg1, arg2=None):
super(ProductionPlanningTool, self).__init__(arg1, arg2)
self.item_dict = {}
def get_so_details(self, so):
"""Pull other details from so"""
so = frappe.db.sql("""select transaction_date, customer, grand_total
from tabSales_Order where name = %s""", so, as_dict = 1)
ret = {
'sales_order_date': so and so[0]['transaction_date'] or '',
'customer' : so[0]['customer'] or '',
'grand_total': so[0]['grand_total']
}
return ret
def get_item_details(self, item_code):
""" Pull other item details from item master"""
item = frappe.db.sql("""select description, stock_uom, default_bom
from tabItem where name = %s""", item_code, as_dict =1)
ret = {
'description' : item and item[0]['description'],
'stock_uom' : item and item[0]['stock_uom'],
'bom_no' : item and item[0]['default_bom']
}
return ret
def clear_so_table(self):
self.set('pp_so_details', [])
def clear_item_table(self):
self.set('pp_details', [])
def validate_company(self):
if not self.company:
frappe.throw(_("Please enter Company"))
def get_open_sales_orders(self):
""" Pull sales orders which are pending to deliver based on criteria selected"""
so_filter = item_filter = ""
if self.from_date:
so_filter += ' and so.transaction_date >= "' + self.from_date + '"'
if self.to_date:
so_filter += ' and so.transaction_date <= "' + self.to_date + '"'
if self.customer:
so_filter += ' and so.customer = "' + self.customer + '"'
if self.fg_item:
item_filter += ' and item.name = "' + self.fg_item + '"'
open_so = frappe.db.sql("""
select distinct so.name, so.transaction_date, so.customer, so.grand_total
from tabSales_Order so, tabSales_Order_Item so_item
where so_item.parent = so.name
and so.docstatus = 1 and so.status != "Stopped"
and so.company = %s
and ifnull(so_item.qty, 0) > ifnull(so_item.delivered_qty, 0) %s
and (exists (select name from tabItem item where item.name=so_item.item_code
and (ifnull(item.is_pro_applicable, 'No') = 'Yes'
or ifnull(item.is_sub_contracted_item, 'No') = 'Yes') %s)
or exists (select name from tabPacked_Item pi
where pi.parent = so.name and pi.parent_item = so_item.item_code
and exists (select name from tabItem item where item.name=pi.item_code
and (ifnull(item.is_pro_applicable, 'No') = 'Yes'
or ifnull(item.is_sub_contracted_item, 'No') = 'Yes') %s)))
""" % ('%s', so_filter, item_filter, item_filter), self.company, as_dict=1)
self.add_so_in_table(open_so)
def add_so_in_table(self, open_so):
""" Add sales orders in the table"""
self.clear_so_table()
so_list = [d.sales_order for d in self.get('pp_so_details')]
for r in open_so:
if cstr(r['name']) not in so_list:
pp_so = self.append('pp_so_details', {})
pp_so.sales_order = r['name']
pp_so.sales_order_date = cstr(r['transaction_date'])
pp_so.customer = cstr(r['customer'])
pp_so.grand_total = flt(r['grand_total'])
def get_items_from_so(self):
""" Pull items from Sales Order, only proction item
and subcontracted item will be pulled from Packing item
and add items in the table
"""
items = self.get_items()
self.add_items(items)
def get_items(self):
so_list = filter(None, [d.sales_order for d in self.get('pp_so_details')])
if not so_list:
msgprint(_("Please enter sales order in the above table"))
return []
item_condition = ""
if self.fg_item:
item_condition = ' and so_item.item_code = "' + self.fg_item + '"'
items = frappe.db.sql("""select distinct parent, item_code, warehouse,
(qty - ifnull(delivered_qty, 0)) as pending_qty
from tabSales_Order_Item so_item
where parent in (%s) and docstatus = 1 and ifnull(qty, 0) > ifnull(delivered_qty, 0)
and exists (select * from tabItem item where item.name=so_item.item_code
and (ifnull(item.is_pro_applicable, 'No') = 'Yes'
or ifnull(item.is_sub_contracted_item, 'No') = 'Yes')) %s""" % \
(", ".join(["%s"] * len(so_list)), item_condition), tuple(so_list), as_dict=1)
if self.fg_item:
item_condition = ' and pi.item_code = "' + self.fg_item + '"'
packed_items = frappe.db.sql("""select distinct pi.parent, pi.item_code, pi.warehouse as warehouse,
(((so_item.qty - ifnull(so_item.delivered_qty, 0)) * pi.qty) / so_item.qty)
as pending_qty
from tabSales_Order_Item so_item, tabPacked_Item pi
where so_item.parent = pi.parent and so_item.docstatus = 1
and pi.parent_item = so_item.item_code
and so_item.parent in (%s) and ifnull(so_item.qty, 0) > ifnull(so_item.delivered_qty, 0)
and exists (select * from tabItem item where item.name=pi.item_code
and (ifnull(item.is_pro_applicable, 'No') = 'Yes'
or ifnull(item.is_sub_contracted_item, 'No') = 'Yes')) %s""" % \
(", ".join(["%s"] * len(so_list)), item_condition), tuple(so_list), as_dict=1)
return items + packed_items
def add_items(self, items):
self.clear_item_table()
for p in items:
item_details = frappe.db.sql("""select description, stock_uom, default_bom
from tabItem where name=%s""", p['item_code'])
pi = self.append('pp_details', {})
pi.sales_order = p['parent']
pi.warehouse = p['warehouse']
pi.item_code = p['item_code']
pi.description = item_details and item_details[0][0] or ''
pi.stock_uom = item_details and item_details[0][1] or ''
pi.bom_no = item_details and item_details[0][2] or ''
pi.so_pending_qty = flt(p['pending_qty'])
pi.planned_qty = flt(p['pending_qty'])
def validate_data(self):
self.validate_company()
for d in self.get('pp_details'):
self.validate_bom_no(d)
if not flt(d.planned_qty):
frappe.throw(_("Please enter Planned Qty for Item {0} at row {1}").format(d.item_code, d.idx))
def validate_bom_no(self, d):
if not d.bom_no:
frappe.throw(_("Please enter BOM for Item {0} at row {1}").format(d.item_code, d.idx))
else:
bom = frappe.db.sql("""select name from tabBOM where name = %s and item = %s
and docstatus = 1 and is_active = 1""",
(d.bom_no, d.item_code), as_dict = 1)
if not bom:
frappe.throw(_("Incorrect or Inactive BOM {0} for Item {1} at row {2}").format(d.bom_no, d.item_code, d.idx))
def raise_production_order(self):
"""It will raise production order (Draft) for all distinct FG items"""
self.validate_data()
from erpnext.utilities.transaction_base import validate_uom_is_integer
validate_uom_is_integer(self, "stock_uom", "planned_qty")
items = self.get_distinct_items_and_boms()[1]
pro = self.create_production_order(items)
if pro:
pro = ["""<a href="#Form/Production Order/%s" target="_blank">%s</a>""" % \
(p, p) for p in pro]
msgprint(_("{0} created").format(comma_and(pro)))
else :
msgprint(_("No Production Orders created"))
def get_distinct_items_and_boms(self):
""" Club similar BOM and item for processing
bom_dict {
bom_no: ['sales_order', 'qty']
}
"""
item_dict, bom_dict = {}, {}
for d in self.get("pp_details"):
bom_dict.setdefault(d.bom_no, []).append([d.sales_order, flt(d.planned_qty)])
item_dict[(d.item_code, d.sales_order, d.warehouse)] = {
"production_item" : d.item_code,
"sales_order" : d.sales_order,
"qty" : flt(item_dict.get((d.item_code, d.sales_order, d.warehouse),
{}).get("qty")) + flt(d.planned_qty),
"bom_no" : d.bom_no,
"description" : d.description,
"stock_uom" : d.stock_uom,
"company" : self.company,
"wip_warehouse" : "",
"fg_warehouse" : d.warehouse,
"status" : "Draft",
}
return bom_dict, item_dict
def create_production_order(self, items):
"""Create production order. Called from Production Planning Tool"""
from erpnext.manufacturing.doctype.production_order.production_order import OverProductionError
pro_list = []
for key in items:
pro = frappe.new_doc("Production Order")
pro.update(items[key])
frappe.flags.mute_messages = True
try:
pro.insert()
pro_list.append(pro.name)
except OverProductionError:
pass
frappe.flags.mute_messages = False
return pro_list
def download_raw_materials(self):
""" Create csv data for required raw material to produce finished goods"""
self.validate_data()
bom_dict = self.get_distinct_items_and_boms()[0]
self.get_raw_materials(bom_dict)
return self.get_csv()
def get_raw_materials(self, bom_dict):
""" Get raw materials considering sub-assembly items
{
"item_code": [qty_required, description, stock_uom, min_order_qty]
}
"""
item_list = []
for bom, so_wise_qty in bom_dict.items():
bom_wise_item_details = {}
if self.use_multi_level_bom:
# get all raw materials with sub assembly childs
# Did not use qty_consumed_per_unit in the query, as it leads to rounding loss
for d in frappe.db.sql("""select fb.item_code,
ifnull(sum(ifnull(fb.qty, 0)/ifnull(bom.quantity, 1)), 0) as qty,
fb.description, fb.stock_uom, it.min_order_qty
from tabBOM_Explosion_Item fb, tabBOM bom, tabItem it
where bom.name = fb.parent and it.name = fb.item_code and ifnull(it.is_pro_applicable, 'No') = 'No'
and ifnull(it.is_sub_contracted_item, 'No') = 'No'
and fb.docstatus<2 and bom.name=%s
group by item_code, stock_uom""", bom, as_dict=1):
bom_wise_item_details.setdefault(d.item_code, d)
else:
# Get all raw materials considering SA items as raw materials,
# so no childs of SA items
for d in frappe.db.sql("""select bom_item.item_code,
ifnull(sum(ifnull(bom_item.qty, 0)/ifnull(bom.quantity, 1)), 0) as qty,
bom_item.description, bom_item.stock_uom, item.min_order_qty
from tabBOM_Item bom_item, tabBOM bom, tabItem item
where bom.name = bom_item.parent and bom.name = %s and bom_item.docstatus < 2
and bom_item.item_code = item.name
group by item_code""", bom, as_dict=1):
bom_wise_item_details.setdefault(d.item_code, d)
for item, item_details in bom_wise_item_details.items():
for so_qty in so_wise_qty:
item_list.append([item, flt(item_details.qty) * so_qty[1], item_details.description,
item_details.stock_uom, item_details.min_order_qty, so_qty[0]])
self.make_items_dict(item_list)
def make_items_dict(self, item_list):
for i in item_list:
self.item_dict.setdefault(i[0], []).append([flt(i[1]), i[2], i[3], i[4], i[5]])
def get_csv(self):
item_list = [['Item Code', 'Description', 'Stock UOM', 'Required Qty', 'Warehouse',
'Quantity Requested for Purchase', 'Ordered Qty', 'Actual Qty']]
for item in self.item_dict:
total_qty = sum([flt(d[0]) for d in self.item_dict[item]])
item_list.append([item, self.item_dict[item][0][1], self.item_dict[item][0][2], total_qty])
item_qty = frappe.db.sql("""select warehouse, indented_qty, ordered_qty, actual_qty
from tabBin where item_code = %s""", item, as_dict=1)
i_qty, o_qty, a_qty = 0, 0, 0
for w in item_qty:
i_qty, o_qty, a_qty = i_qty + flt(w.indented_qty), o_qty + flt(w.ordered_qty), a_qty + flt(w.actual_qty)
item_list.append(['', '', '', '', w.warehouse, flt(w.indented_qty),
flt(w.ordered_qty), flt(w.actual_qty)])
if item_qty:
item_list.append(['', '', '', '', 'Total', i_qty, o_qty, a_qty])
return item_list
def raise_purchase_request(self):
"""
Raise Material Request if projected qty is less than qty required
Requested qty should be shortage qty considering minimum order qty
"""
self.validate_data()
if not self.purchase_request_for_warehouse:
frappe.throw(_("Please enter Warehouse for which Material Request will be raised"))
bom_dict = self.get_distinct_items_and_boms()[0]
self.get_raw_materials(bom_dict)
if self.item_dict:
self.insert_purchase_request()
def get_requested_items(self):
item_projected_qty = self.get_projected_qty()
items_to_be_requested = frappe._dict()
for item, so_item_qty in self.item_dict.items():
requested_qty = 0
total_qty = sum([flt(d[0]) for d in so_item_qty])
if total_qty > item_projected_qty.get(item, 0):
# shortage
requested_qty = total_qty - flt(item_projected_qty.get(item))
# consider minimum order qty
requested_qty = requested_qty > flt(so_item_qty[0][3]) and \
requested_qty or flt(so_item_qty[0][3])
# distribute requested qty SO wise
for item_details in so_item_qty:
if requested_qty:
sales_order = item_details[4] or "No Sales Order"
if requested_qty <= item_details[0]:
adjusted_qty = requested_qty
else:
adjusted_qty = item_details[0]
items_to_be_requested.setdefault(item, {}).setdefault(sales_order, 0)
items_to_be_requested[item][sales_order] += adjusted_qty
requested_qty -= adjusted_qty
else:
break
# requested qty >= total so qty, due to minimum order qty
if requested_qty:
items_to_be_requested.setdefault(item, {}).setdefault("No Sales Order", 0)
items_to_be_requested[item]["No Sales Order"] += requested_qty
return items_to_be_requested
def get_projected_qty(self):
items = self.item_dict.keys()
item_projected_qty = frappe.db.sql("""select item_code, sum(projected_qty)
from tabBin where item_code in (%s) and warehouse=%s group by item_code""" %
(", ".join(["%s"]*len(items)), '%s'), tuple(items + [self.purchase_request_for_warehouse]))
return dict(item_projected_qty)
def insert_purchase_request(self):
items_to_be_requested = self.get_requested_items()
from erpnext.accounts.utils import get_fiscal_year
fiscal_year = get_fiscal_year(nowdate())[0]
purchase_request_list = []
if items_to_be_requested:
for item in items_to_be_requested:
item_wrapper = frappe.get_doc("Item", item)
pr_doc = frappe.new_doc("Material Request")
pr_doc.update({
"transaction_date": nowdate(),
"status": "Draft",
"company": self.company,
"fiscal_year": fiscal_year,
"requested_by": frappe.session.user,
"material_request_type": "Purchase"
})
for sales_order, requested_qty in items_to_be_requested[item].items():
pr_doc.append("indent_details", {
"doctype": "Material Request Item",
"__islocal": 1,
"item_code": item,
"item_name": item_wrapper.item_name,
"description": item_wrapper.description,
"uom": item_wrapper.stock_uom,
"item_group": item_wrapper.item_group,
"brand": item_wrapper.brand,
"qty": requested_qty,
"schedule_date": add_days(nowdate(), cint(item_wrapper.lead_time_days)),
"warehouse": self.purchase_request_for_warehouse,
"sales_order_no": sales_order if sales_order!="No Sales Order" else None
})
pr_doc.ignore_permissions = 1
pr_doc.submit()
purchase_request_list.append(pr_doc.name)
if purchase_request_list:
pur_req = ["""<a href="#Form/Material Request/%s" target="_blank">%s</a>""" % \
(p, p) for p in purchase_request_list]
msgprint(_("Material Requests {0} created").format(comma_and(pur_req)))
else:
msgprint(_("Nothing to request"))
| agpl-3.0 | -4,334,333,021,142,938,000 | 37.039506 | 113 | 0.652084 | false |
chronicle/api-samples-python | common/chronicle_auth.py | 1 | 2712 | # Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Helper functions to access Chronicle APIs using OAuth 2.0.
Background information:
https://google-auth.readthedocs.io/en/latest/user-guide.html#service-account-private-key-files
https://developers.google.com/identity/protocols/oauth2#serviceaccount
Details about using the Google-auth library with the Requests library:
https://github.com/googleapis/google-auth-library-python/blob/master/google/auth/transport/requests.py
https://requests.readthedocs.io
"""
import argparse
import pathlib
from typing import Optional, Union
from google.auth.transport import requests
from google.oauth2 import service_account
DEFAULT_CREDENTIALS_FILE = pathlib.Path.home() / ".chronicle_credentials.json"
AUTHORIZATION_SCOPES = ["https://www.googleapis.com/auth/chronicle-backstory"]
def initialize_http_session(
credentials_file_path: Optional[Union[str, pathlib.Path]]
) -> requests.AuthorizedSession:
"""Initializes an authorized HTTP session, based on the given credentials.
Args:
credentials_file_path: Absolute or relative path to a JSON file containing
the private OAuth 2.0 credentials of a Google Cloud Platform service
account. Optional - the default is ".chronicle_credentials.json" in the
user's home directory. Keep it secret, keep it safe.
Returns:
HTTP session object to send authorized requests and receive responses.
Raises:
OSError: Failed to read the given file, e.g. not found, no read access
(https://docs.python.org/library/exceptions.html#os-exceptions).
ValueError: Invalid file contents.
"""
if not credentials_file_path:
credentials_file_path = DEFAULT_CREDENTIALS_FILE
credentials = service_account.Credentials.from_service_account_file(
str(credentials_file_path), scopes=AUTHORIZATION_SCOPES)
return requests.AuthorizedSession(credentials)
def add_argument_credentials_file(parser: argparse.ArgumentParser):
"""Adds a shared command-line argument to all the sample modules."""
parser.add_argument(
"-c",
"--credentials_file",
type=str,
help=f"credentials file path (default: '{DEFAULT_CREDENTIALS_FILE}')")
| apache-2.0 | -6,668,738,066,000,931,000 | 36.666667 | 102 | 0.760324 | false |
radiasoft/optics | tests/bending_magnet_srw.py | 1 | 14635 | """
Example of a bending magnet emitting in x-ray region for a multi-electron emission (by convolution)
"""
import numpy as np
import inspect
# Import elements from common Glossary
from optics.beam.electron_beam_pencil import ElectronBeamPencil, ElectronBeam
from optics.magnetic_structures.bending_magnet import BendingMagnet
from optics.beamline.optical_elements.lens.lens_ideal import LensIdeal
from optics.beamline.optical_elements.image_plane import ImagePlane
from optics.beamline.beamline import Beamline
from optics.beamline.beamline_position import BeamlinePosition
#import SRW driver and particular settings of the glossary elements used
from code_drivers.SRW.SRW_driver import SRWDriver
from code_drivers.SRW.SRW_bending_magnet_setting import SRWBendingMagnetSetting
from code_drivers.SRW.SRW_beamline_component_setting import SRWBeamlineComponentSetting
def run_bending_magnet_srw(example_index): # example_index=0 is infrared example, example_index=1 is xrays example
###################################################################################################
# Main idea: abstract definition of the setting (electron beam, radiation source, beamline)
# We want to put everything in generic classes that is independent of a specific implementation.
# These are basically the information a scientist would need to physically build the beamline.
#
# Then, we need extra information/settings to perform a calculation. And the extra settings
# vary for different programs. We provide these extra information by attaching program depended
# "settings".
###################################################################################################
#
# 1) define first the electron beam
#
if example_index == 0:
electron_beam = ElectronBeamPencil(energy_in_GeV=3.0,energy_spread=0.89e-3,current=0.5)
# electron_beam = ElectronBeam(energy_in_GeV=3.0,
# energy_spread=0.89e-03,
# current=0.5,
# electrons_per_bunch=500,
# moment_xx = (127.346e-6)**2 ,
# moment_xxp = 0. ,
# moment_xpxp = 100*(91.88e-6)**2,
# moment_yy = (92.3093e-6)**2 ,
# moment_yyp = 0 ,
# moment_ypyp = 100*(7.94e-6)**2 )
else:
#electron_beam = ElectronBeamPencil(energy_in_GeV=6.0,energy_spread=0.89e-3,current=0.2)
electron_beam = ElectronBeam(energy_in_GeV=6.0,
energy_spread=0.89e-03,
current=0.2,
electrons_per_bunch=500,
moment_xx = (77.9e-06)**2 ,
moment_xxp = 0. ,
moment_xpxp = (110.9e-06)**2,
moment_yy = (12.9e-06)**2 ,
moment_yyp = 0 ,
moment_ypyp = (0.5e-06)**2 )
#
# 2) define the magnetic structure
#
if example_index == 0:
#bending_magnet = BendingMagnet(radius=2.25,magnetic_field=0.4,length=4.0)
bending_magnet = BendingMagnet(radius=25.01,magnetic_field=0.4,length=4.0)
else:
bending_magnet = BendingMagnet(radius=23.2655,magnetic_field=0.86,length=0.5)
# Attach SRW bending magnet settings.
#TODO: angular acceptance is used to define screen size.
# NOTE: Maybe angular acceptance is generic and should move to BendingMagnet or Source class??
srw_bending_magnet_setting = SRWBendingMagnetSetting()
if example_index == 0:
horizontal_angle = 0.1
vertical_angle = 0.02
energy = 0.5*0.123984
else:
horizontal_angle = 1e-3
vertical_angle = 0.4e-3
energy = 15000.0
srw_bending_magnet_setting.set_relPrec(0.003)
srw_bending_magnet_setting.set_sampFactNxNyForProp(0.0035)
srw_bending_magnet_setting.set_acceptance_angle(horizontal_angle=horizontal_angle,
vertical_angle=vertical_angle)
bending_magnet.add_settings(srw_bending_magnet_setting)
#
# 3) define beamline containing the optical elements
# In this case, create a beamline that only has one lens attached plus an image (detector) plane
#
#
beamline = Beamline()
# First create the lens.
if example_index == 0:
lens_focal_length = 2.5
else:
lens_focal_length = 12.5
lens = LensIdeal("focus lens",
focal_x=lens_focal_length,
focal_y=lens_focal_length)
# Specify the position of the lens (could set extra parameters for: off-axis and inclination)
# lens_position=p verifies lens equation (1/F = 1/p + 1/q, and p=q for 1:1 magnification)
lens_position = BeamlinePosition(2*lens_focal_length)
# Set settings for SRW.
# These are settings that depend on the "driver" to use.
# If no special settings are set the driver will use its default settings.
lens_setting = SRWBeamlineComponentSetting()
if example_index == 0:
#for SRW experts:
#lens_setting.from_list([1, 1, 1., 0, 0, 1., 2., 1., 2., 0, 0, 0])
lens_setting.set_auto_resize_before_propagation(1) #[0]: Auto-Resize (1) or not (0) Before propagation
lens_setting.set_auto_resize_after_propagation(1) #[1]: Auto-Resize (1) or not (0) After propagation
lens_setting.set_auto_resize_relative_precision(1.) #[2]: Relative Precision for propagation with Auto-Resizing (1. is nominal)
lens_setting.set_allow_semi_analytical_phase_treatment(0) #[3]: Allow (1) or not (0) for semi-analytical treatment of the quadratic (leading) phase terms at the propagation
lens_setting.set_resize_on_ft_side(0) #[4]: Do any Resizing on Fourier side, using FFT, (1) or not (0)
lens_setting.set_resize_factor_horizontal(1.) #[5]: Horizontal Range modification factor at Resizing (1. means no modification)
lens_setting.set_resize_resolution_horizontal(2.) #[6]: Horizontal Resolution modification factor at Resizing
lens_setting.set_resize_factor_vertical(1.) #[7]: Vertical Range modification factor at Resizing
lens_setting.set_resize_resolution_vertical(2.) #[8]: Vertical Resolution modification factor at Resizing
else:
#lens_setting.from_list([0, 0, 1., 0, 0, 1., 5., 1., 8., 0, 0, 0])
lens_setting.set_auto_resize_before_propagation(0)
lens_setting.set_auto_resize_after_propagation(0)
lens_setting.set_auto_resize_relative_precision(1.)
lens_setting.set_allow_semi_analytical_phase_treatment(0)
lens_setting.set_resize_on_ft_side(0)
lens_setting.set_resize_factor_horizontal(1.)
lens_setting.set_resize_resolution_horizontal(5.)
lens_setting.set_resize_factor_vertical(1.)
lens_setting.set_resize_resolution_vertical(8.)
lens.add_settings(lens_setting)
# We could also _simultaneously_ add settings for shadow here:
# lens_setting = ShadowBeamlineComponentSetting()
# lens_setting.setSOMETHING(..)
# lens.addSettings(lens_setting)
# The lens would be configured _simultaneously_ for SRW and SHADOW.
# Attach the component at its position to the beamline.
beamline.attach_component_at(lens, lens_position)
# Second create the image plane.
plane = ImagePlane("Image screen")
plane_setting = SRWBeamlineComponentSetting()
if example_index == 0:
pass #these are default values, so no need to set
#plane_setting.from_list([1, 1, 1., 0, 0, 1., 1., 1., 1., 0, 0, 0])
#plane_setting.set_auto_resize_before_propagation(0)
#plane_setting.set_auto_resize_after_propagation(0)
#plane_setting.set_auto_resize_relative_precision(1.)
#plane_setting.set_allow_semi_analytical_phase_treatment(0)
#plane_setting.set_resize_on_ft_side(0)
#plane_setting.set_resize_factor_horizontal(1.)
#plane_setting.set_resize_resolution_horizontal(1.)
#plane_setting.set_resize_factor_vertical(1.)
#plane_setting.set_resize_resolution_vertical(1.)
else:
#define non-default settings for the propagation in the drift space
#note that although in SRW the driftSpace is a component, in the present beamline
#definition it is not necessary to be defined, as it is automatically added by the
#driver. However, we set here the settings of the drift space that is inserted upstream
#of the "plane" element
drift_space_settings = SRWBeamlineComponentSetting()
drift_space_settings.from_list([0, 0, 1., 1, 0, 1., 1., 1., 1., 0, 0, 0])
plane_setting.set_drift_space_settings(drift_space_settings)
#plane_setting.from_list([0, 0, 1., 0, 0, 4., 1.,1.5, 1., 0, 0, 0])
plane_setting.set_auto_resize_before_propagation(0)
plane_setting.set_auto_resize_after_propagation(0)
plane_setting.set_auto_resize_relative_precision(1.)
plane_setting.set_allow_semi_analytical_phase_treatment(0)
plane_setting.set_resize_on_ft_side(0)
plane_setting.set_resize_factor_horizontal(4.)
plane_setting.set_resize_resolution_horizontal(1.)
plane_setting.set_resize_factor_vertical(1.5)
plane_setting.set_resize_resolution_vertical(1.)
# Attach a screen/image plane.
# Absolute position = distance_source_lens + distance_lens_plane =
# 2*lens_focal_length + 2*lens_focal_lengh = 4*lens_focal_length
plane.add_settings(plane_setting)
plane_position = BeamlinePosition(4*lens_focal_length)
beamline.attach_component_at(plane, plane_position)
#
# Print a summary of the elements used
#
components = [electron_beam,bending_magnet,lens]
print("===========================================================================================================")
for component_index,component in enumerate(components):
tmp = component.to_dictionary()
#tmp = electron_beam.to_dictionary()
print("Component index %d:"%component_index,inspect.getmodule(component))
for i,var in enumerate(tmp):
print(" %20s = %10.5f %5s %s"%(var,tmp[var][0],tmp[var][1],tmp[var][2]))
print("===========================================================================================================")
#
# Calculate the radiation (i.e., run the codes). It returns a native SRWLWfr()
#
# Specify to use SRW.
driver = SRWDriver()
srw_wavefront = driver.calculate_radiation(electron_beam=electron_beam,
magnetic_structure=bending_magnet,
beamline=beamline,
energy_min=energy,
energy_max=energy)
#
# extract the intensity
#
intensity, dim_x, dim_y = driver.calculate_intensity(srw_wavefront)
# # Do some tests.
# # assert abs(1.7063003e+09 - intensity[10, 10])<1e+6, \
# # 'Quick verification of intensity value'
# flux = intensity.sum() * (dim_x[1]-dim_x[0]) * (dim_y[1]-dim_y[0])
# print("Total flux = %10.5e photons/s/.1%%bw"%flux)
# if example_index == 0:
# assert abs(2.40966e+08 - flux)<1e+3, \
# 'Quick verification of intensity value'
# else:
# assert abs(2.14704e+07 - flux)<1e+3, \
# 'Quick verification of intensity value'
# Calculate phases.
#phase = driver.calculate_phase(srw_wavefront)
# # Do some tests.
# checksum = np.sum( np.abs(srw_wavefront.arEx) ) + np.abs( np.sum(srw_wavefront.arEy) )
# print("checksum is: ",checksum)
#
# if example_index == 0:
# assert np.abs(checksum - 1.1845644e+10) < 1e3, "Test electric field checksum"
# else:
# assert np.abs(checksum - 1.53895e+13) < 1e8, "Test electric field checksum"
return srw_wavefront, dim_x, dim_y, intensity
def test_bending_magnet_infrared_srw():
srw_wavefront, dim_x, dim_y, intensity = run_bending_magnet_srw(0)
flux = intensity.sum() * (dim_x[1]-dim_x[0]) * (dim_y[1]-dim_y[0])
print("Total flux = %10.5e photons/s/.1%%bw"%flux)
assert abs(2.40966e+08 - flux)<1e+3, \
'Quick verification of intensity value'
checksum = np.sum( np.abs(srw_wavefront.arEx) ) + np.abs( np.sum(srw_wavefront.arEy) )
print("checksum is: ",checksum)
assert np.abs(checksum - 1.1845644e+10) < 1e3, "Test electric field checksum"
return srw_wavefront, dim_x, dim_y, intensity
def test_bending_magnet_xrays_srw():
srw_wavefront, dim_x, dim_y, intensity = run_bending_magnet_srw(1)
flux = intensity.sum() * (dim_x[1]-dim_x[0]) * (dim_y[1]-dim_y[0])
print("Total flux = %10.5e photons/s/.1%%bw"%flux)
assert abs(2.14704e+07 - flux)<1e+3, \
'Quick verification of intensity value'
checksum = np.sum( np.abs(srw_wavefront.arEx) ) + np.abs( np.sum(srw_wavefront.arEy) )
print("checksum is: ",checksum)
assert np.abs(checksum - 1.53895e+13) < 1e8, "Test electric field checksum"
return srw_wavefront, dim_x, dim_y, intensity
if __name__ == "__main__":
import matplotlib.pyplot as plt
import time
example_index = 0 # 0=infrared example, 1=x-ray ESRF example
if example_index == 0:
srw_wavefront, dim_x, dim_y, intensity = test_bending_magnet_infrared_srw()
print('Calling plots with array shape: ',intensity.shape,'...')
t0_main = time.time()
plt.pcolormesh(dim_x,dim_y,intensity.transpose())
plt.title("Real space for infrared example")
plt.colorbar()
print('done in', round(time.time() - t0_main), 's')
plt.show()
else:
srw_wavefront, dim_x, dim_y, intensity = test_bending_magnet_xrays_srw()
print('Calling plots with array shape: ',intensity.shape,'...')
t0_main = time.time()
plt.pcolormesh(dim_x,dim_y,intensity.transpose())
plt.title("Real space for xrays example")
plt.colorbar()
print('done in', round(time.time() - t0_main), 's')
plt.show() | apache-2.0 | -377,875,962,136,480,100 | 42.820359 | 181 | 0.601367 | false |
dc3-plaso/plaso | plaso/analysis/unique_domains_visited.py | 1 | 2742 | # -*- coding: utf-8 -*-
"""A plugin to generate a list of domains visited."""
import sys
if sys.version_info[0] < 3:
import urlparse
else:
from urllib import parse as urlparse # pylint: disable=no-name-in-module
from plaso.analysis import interface
from plaso.analysis import manager
from plaso.containers import reports
class UniqueDomainsVisitedPlugin(interface.AnalysisPlugin):
"""A plugin to generate a list all domains visited.
This plugin will extract domains from browser history events extracted by
Plaso. The list produced can be used to quickly determine if there has been
a visit to a site of interest, for example, a known phishing site.
"""
NAME = u'unique_domains_visited'
# Indicate that we can run this plugin during regular extraction.
ENABLE_IN_EXTRACTION = True
_DATATYPES = frozenset([
u'chrome:history:file_downloaded', u'chrome:history:page_visited',
u'firefox:places:page_visited', u'firefox:downloads:download',
u'macosx:lsquarantine', u'msiecf:redirected', u'msiecf:url',
u'msie:webcache:container', u'opera:history', u'safari:history:visit'])
def __init__(self):
"""Initializes the domains visited plugin."""
super(UniqueDomainsVisitedPlugin, self).__init__()
self._domains = []
def ExamineEvent(self, mediator, event):
"""Analyzes an event and extracts domains from it.
We only evaluate straightforward web history events, not visits which can
be inferred by TypedURLs, cookies or other means.
Args:
mediator (AnalysisMediator): mediates interactions between
analysis plugins and other components, such as storage and dfvfs.
event (EventObject): event to examine.
"""
if event.data_type not in self._DATATYPES:
return
url = getattr(event, u'url', None)
if url is None:
return
parsed_url = urlparse.urlparse(url)
domain = getattr(parsed_url, u'netloc', None)
if domain in self._domains:
# We've already found an event containing this domain.
return
self._domains.append(domain)
def CompileReport(self, mediator):
"""Compiles an analysis report.
Args:
mediator (AnalysisMediator): mediates interactions between
analysis plugins and other components, such as storage and dfvfs.
Returns:
The analysis report (instance of AnalysisReport).
"""
lines_of_text = [u'Listing domains visited by all users']
for domain in sorted(self._domains):
lines_of_text.append(domain)
lines_of_text.append(u'')
report_text = u'\n'.join(lines_of_text)
return reports.AnalysisReport(plugin_name=self.NAME, text=report_text)
manager.AnalysisPluginManager.RegisterPlugin(UniqueDomainsVisitedPlugin)
| apache-2.0 | -1,282,014,892,101,884,400 | 32.036145 | 77 | 0.710795 | false |
jhseu/tensorflow | tensorflow/python/kernel_tests/cwise_ops_test.py | 1 | 48021 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for coefficient-wise operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.compat import compat
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes as dtypes_lib
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_math_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_grad # pylint: disable=unused-import
from tensorflow.python.platform import test
_ADD = lambda x, y: x + y
_SUB = lambda x, y: x - y
_MUL = lambda x, y: x * y
_POW = lambda x, y: x**y
_TRUEDIV = lambda x, y: x / y
_FLOORDIV = lambda x, y: x // y
_MOD = lambda x, y: x % y
_LT = lambda x, y: x < y
_LE = lambda x, y: x <= y
_GT = lambda x, y: x > y
_GE = lambda x, y: x >= y
_AND = lambda x, y: x & y
_OR = lambda x, y: x | y
_XOR = lambda x, y: x ^ y
_INV = lambda x: ~x
# TODO(zongheng): it'd be great to factor out this function and various random
# SparseTensor gen funcs.
def _sparsify(x, thresh=0.5, index_dtype=np.int64):
x[x < thresh] = 0
non_zero = np.where(x)
x_indices = np.vstack(non_zero).astype(index_dtype).T
x_values = x[non_zero]
x_shape = x.shape
return sparse_tensor.SparseTensor(
indices=x_indices, values=x_values, dense_shape=x_shape), x_values
def _default_tolerance(dtype):
"""Returns a sensible default tolerance for comparing results of a given type.
Args:
dtype: A datatype.
"""
if dtype == np.float16:
return 5e-3
elif dtype in (np.float32, np.complex64):
return 1e-3
elif dtype in (np.float64, np.complex128):
return 1e-5
else:
return None # Fail fast for unexpected types
class ComparisonOpTest(test.TestCase):
def _compareScalar(self, func, x, y, dtype):
with test_util.use_gpu():
out = func(
ops.convert_to_tensor(np.array([x]).astype(dtype)),
ops.convert_to_tensor(np.array([y]).astype(dtype)))
ret = self.evaluate(out)
return ret[0]
def testScalarCompareScalar(self):
dtypes = [np.float16, np.float32, np.float64, np.int32, np.int64]
data = [-1, 0, 1]
for t in dtypes:
for x in data:
for y in data:
self.assertEqual(self._compareScalar(math_ops.less, x, y, t), x < y)
self.assertEqual(
self._compareScalar(math_ops.less_equal, x, y, t), x <= y)
self.assertEqual(
self._compareScalar(math_ops.greater, x, y, t), x > y)
self.assertEqual(
self._compareScalar(math_ops.greater_equal, x, y, t), x >= y)
self.assertEqual(self._compareScalar(math_ops.equal, x, y, t), x == y)
self.assertEqual(
self._compareScalar(math_ops.not_equal, x, y, t), x != y)
data = [-1, 0, 1, -1j, 1j, 1 + 1j, 1 - 1j]
for t in [np.complex64, np.complex128]:
for x in data:
for y in data:
self.assertEqual(self._compareScalar(math_ops.equal, x, y, t), x == y)
self.assertEqual(
self._compareScalar(math_ops.not_equal, x, y, t), x != y)
def _compare(self, x, y, np_func, tf_func):
np_ans = np_func(x, y)
with test_util.use_gpu():
out = tf_func(ops.convert_to_tensor(x), ops.convert_to_tensor(y))
tf_ans = self.evaluate(out)
self.assertAllEqual(np_ans, tf_ans)
def testTensorCompareTensor(self):
x = np.linspace(-15, 15, 6).reshape(1, 3, 2)
y = np.linspace(20, -10, 6).reshape(1, 3, 2)
for t in [np.float16, np.float32, np.float64, np.int32, np.int64]:
xt = x.astype(t)
yt = y.astype(t)
self._compare(xt, yt, np.less, math_ops.less)
self._compare(xt, yt, np.less_equal, math_ops.less_equal)
self._compare(xt, yt, np.greater, math_ops.greater)
self._compare(xt, yt, np.greater_equal, math_ops.greater_equal)
self._compare(xt, yt, np.equal, math_ops.equal)
self._compare(xt, yt, np.not_equal, math_ops.not_equal)
# Complex types do not support ordering but do support equality tests.
for t in [np.complex64, np.complex128]:
xt = x.astype(t)
xt -= 1j * xt
yt = y.astype(t)
yt -= 1j * yt
self._compare(xt, yt, np.equal, math_ops.equal)
self._compare(xt, yt, np.not_equal, math_ops.not_equal)
def _compareBCast(self, xs, ys, dtype, np_func, tf_func):
x = np.linspace(-15, 15, np.prod(xs)).astype(dtype).reshape(xs)
y = np.linspace(20, -10, np.prod(ys)).astype(dtype).reshape(ys)
if dtype in (np.complex64, np.complex128):
x -= 1j * x
y -= 1j * y
self._compare(x, y, np_func, tf_func)
self._compare(y, x, np_func, tf_func)
def _testBCastByFunc(self, np_func, tf_func, include_complex=False):
shapes = [
([1, 3, 2], [1]),
([1, 3, 2], [2]),
([1, 3, 2], [3, 2]),
([1, 3, 2], [3, 1]),
([1, 3, 2], [1, 3, 2]),
([1, 3, 2], [2, 3, 1]),
([1, 3, 2], [2, 1, 1]),
([1, 3, 2], [1, 3, 1]),
([2, 1, 5], [2, 3, 1]),
([2, 0, 5], [2, 0, 1]),
([2, 3, 0], [2, 3, 1]),
]
dtypes = [
np.float16,
np.float32,
np.float64,
np.int32,
np.int64,
]
if include_complex:
dtypes.extend([np.complex64, np.complex128])
for (xs, ys) in shapes:
for dtype in dtypes:
self._compareBCast(xs, ys, dtype, np_func, tf_func)
def testBCastLess(self):
self._testBCastByFunc(np.less, math_ops.less)
def testBCastLessEqual(self):
self._testBCastByFunc(np.less_equal, math_ops.less_equal)
def testBCastGreater(self):
self._testBCastByFunc(np.greater, math_ops.greater)
def testBCastGreaterEqual(self):
self._testBCastByFunc(np.greater_equal, math_ops.greater_equal)
def testBCastEqual(self):
self._testBCastByFunc(np.equal, math_ops.equal, include_complex=True)
def testBCastNotEqual(self):
self._testBCastByFunc(
np.not_equal, math_ops.not_equal, include_complex=True)
def testShapeMismatch(self):
dtypes = [np.float16, np.float32, np.float64, np.int32, np.int64]
funcs = [
math_ops.less, math_ops.less_equal, math_ops.greater,
math_ops.greater_equal, math_ops.equal, math_ops.not_equal
]
x = np.arange(0, 10).reshape([2, 5])
y = np.arange(0, 10).reshape([5, 2])
for t in dtypes:
for f in funcs:
with self.assertRaisesRegexp(
(ValueError, errors.InvalidArgumentError),
"Incompatible shapes|Dimensions must be equal"):
f(x.astype(t), y.astype(t))
class LogicalOpTest(test.TestCase):
def _compareBinary(self, x, y, np_func, tf_func, use_gpu=False):
np_ans = np_func(x, y)
with test_util.device(use_gpu=use_gpu):
inx = ops.convert_to_tensor(x)
iny = ops.convert_to_tensor(y)
out = tf_func(inx, iny)
tf_val = self.evaluate(out)
self.assertEqual(out.dtype, dtypes_lib.bool)
self.assertAllEqual(np_ans, tf_val)
self.assertShapeEqual(np_ans, out)
def _not(self, x, use_gpu=False):
np_ans = np.logical_not(x)
with test_util.device(use_gpu=use_gpu):
out = math_ops.logical_not(ops.convert_to_tensor(x))
tf_val = self.evaluate(out)
self.assertEqual(out.dtype, dtypes_lib.bool)
self.assertAllEqual(np_ans, tf_val)
self.assertShapeEqual(np_ans, out)
def testScalar(self):
data = [np.array([True]), np.array([False])]
for use_gpu in [True, False]:
for x in data:
self._not(x, use_gpu)
for x in data:
for y in data:
self._compareBinary(x, y, np.logical_and, math_ops.logical_and,
use_gpu)
self._compareBinary(x, y, np.logical_or, math_ops.logical_or, use_gpu)
self._compareBinary(x, y, np.logical_xor, math_ops.logical_xor,
use_gpu)
def testTensor(self):
x = np.random.randint(0, 2, 6).astype(np.bool).reshape(1, 3, 2)
y = np.random.randint(0, 2, 6).astype(np.bool).reshape(1, 3, 2)
for use_gpu in [True, False]:
self._not(x, use_gpu)
self._compareBinary(x, y, np.logical_and, math_ops.logical_and, use_gpu)
self._compareBinary(x, y, np.logical_or, math_ops.logical_or, use_gpu)
self._compareBinary(x, y, np.logical_xor, math_ops.logical_xor, use_gpu)
def testBCast(self):
shapes = [
([1, 3, 2], [1]),
([1, 3, 2], [2]),
([1, 3, 2], [3, 2]),
([1, 3, 2], [3, 1]),
([1, 3, 2], [1, 3, 2]),
([1, 3, 2], [2, 3, 1]),
([1, 3, 2], [2, 1, 1]),
([1, 3, 2], [1, 3, 1]),
([2, 1, 5], [2, 3, 1]),
([2, 0, 5], [2, 0, 1]),
([2, 3, 0], [2, 3, 1]),
]
for (xs, ys) in shapes:
x = np.random.randint(0, 2, np.prod(xs)).astype(np.bool).reshape(xs)
y = np.random.randint(0, 2, np.prod(ys)).astype(np.bool).reshape(ys)
for use_gpu in [True, False]:
self._compareBinary(x, y, np.logical_and, math_ops.logical_and, use_gpu)
self._compareBinary(x, y, np.logical_or, math_ops.logical_or, use_gpu)
self._compareBinary(x, y, np.logical_xor, math_ops.logical_xor, use_gpu)
@test_util.run_deprecated_v1
def testShapeMismatch(self):
x = np.random.randint(0, 2, 6).astype(np.bool).reshape(1, 3, 2)
y = np.random.randint(0, 2, 6).astype(np.bool).reshape(3, 2, 1)
for f in [math_ops.logical_and, math_ops.logical_or, math_ops.logical_xor]:
with self.assertRaisesWithPredicateMatch(
ValueError, lambda e: "Dimensions must" in str(e)):
f(x, y)
@test_util.run_deprecated_v1
def testUsingAsPythonValueFails(self):
# Ensure that we raise an error when the user attempts to treat a
# `Tensor` as a Python `bool`.
b = constant_op.constant(False)
with self.assertRaises(TypeError):
if b:
pass
x = constant_op.constant(3)
y = constant_op.constant(4)
with self.assertRaises(TypeError):
if x > y:
pass
z = constant_op.constant(7)
# The chained comparison should fail because Python computes `x <
# y` and short-circuits the comparison with `z` if it is `False`.
with self.assertRaises(TypeError):
_ = x < y < z
class SelectOpTest(test.TestCase):
def _compare(self, fn, c, x, y, use_gpu):
np_ans = np.where(c, x, y)
with test_util.device(use_gpu=use_gpu):
out = fn(c, x, y)
tf_ans = self.evaluate(out)
self.assertAllEqual(np_ans, tf_ans)
self.assertShapeEqual(np_ans, out)
def _compareGradientX(self,
fn,
c,
x,
y,
numeric_gradient_type=None,
x_init_value=None):
with self.cached_session():
inx = ops.convert_to_tensor(x)
iny = ops.convert_to_tensor(y)
out = fn(c, inx, iny)
s = list(np.shape(c))
if x_init_value is None:
x_init_value = x
if x.shape != y.shape:
x_init_value = np.broadcast_to(y, x.shape)
jacob_t, jacob_n = gradient_checker.compute_gradient(
inx, s, out, s, x_init_value=x_init_value)
if numeric_gradient_type is not None:
xf = x.astype(numeric_gradient_type)
yf = y.astype(numeric_gradient_type)
inxf = ops.convert_to_tensor(xf)
inyf = ops.convert_to_tensor(yf)
outf = fn(c, inxf, inyf)
_, jacob_n = gradient_checker.compute_gradient(
inxf, s, outf, s, x_init_value=xf)
jacob_n = jacob_n.astype(x.dtype)
if x.dtype == np.float16:
self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)
elif x.dtype == np.float32:
self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)
elif x.dtype == np.float64:
self.assertAllClose(jacob_t, jacob_n, rtol=1e-5, atol=1e-5)
def _compareGradientY(self, fn, c, x, y, numeric_gradient_type=None):
with self.cached_session():
inx = ops.convert_to_tensor(x)
iny = ops.convert_to_tensor(y)
out = fn(c, inx, iny)
s = list(np.shape(c))
jacob_t, jacob_n = gradient_checker.compute_gradient(
iny, s, out, s, x_init_value=x, delta=1.0)
if numeric_gradient_type is not None:
xf = x.astype(numeric_gradient_type)
yf = y.astype(numeric_gradient_type)
inxf = ops.convert_to_tensor(xf)
inyf = ops.convert_to_tensor(yf)
outf = fn(c, inxf, inyf)
_, jacob_n = gradient_checker.compute_gradient(
inyf, s, outf, s, x_init_value=yf)
jacob_n = jacob_n.astype(x.dtype)
if x.dtype == np.float16:
self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)
elif x.dtype == np.float32:
self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)
elif x.dtype == np.float64:
self.assertAllClose(jacob_t, jacob_n, rtol=1e-5, atol=1e-5)
def _testScalar(self, fn):
c = True
x = np.random.rand(1, 3, 2) * 100
y = np.random.rand(1, 3, 2) * 100
for t in [
np.float16, np.float32, np.float64, np.int32, np.int64, np.complex64,
np.complex128
]:
xt = x.astype(t)
yt = y.astype(t)
self._compare(fn, c, xt, yt, use_gpu=False)
if t in [np.float16, np.float32, np.float64]:
self._compare(fn, c, xt, yt, use_gpu=True)
def testScalar(self):
self._testScalar(array_ops.where)
self._testScalar(array_ops.where_v2)
def _testScalarBroadcast(self, fn, c, x, y):
for t in [
np.float16, np.float32, np.float64, np.int32, np.int64, np.complex64,
np.complex128
]:
xt = x.astype(t)
yt = y.astype(t)
self._compare(fn, c, xt, yt, use_gpu=False)
if t in [np.float16, np.float32, np.float64]:
self._compare(fn, c, xt, yt, use_gpu=True)
def testScalarBroadcast(self):
c = True
# where_v2 only
x = np.random.rand(1, 3, 2) * 100
y = np.random.rand(1, 1, 1) * 100
self._testScalarBroadcast(array_ops.where_v2, c, x, y)
self._testScalarBroadcast(array_ops.where_v2, c, y, x)
x = np.random.rand(1, 3, 2) * 100
y = np.random.rand(1, 3, 1) * 100
self._testScalarBroadcast(array_ops.where_v2, c, x, y)
self._testScalarBroadcast(array_ops.where_v2, c, y, x)
x = np.random.rand(1, 3, 2) * 100
y = np.random.rand(1, 1, 2) * 100
self._testScalarBroadcast(array_ops.where_v2, c, x, y)
self._testScalarBroadcast(array_ops.where_v2, c, y, x)
x = np.random.rand(1, 3, 2) * 100
y = np.random.rand(1, 1) * 100
self._testScalarBroadcast(array_ops.where_v2, c, x, y)
self._testScalarBroadcast(array_ops.where_v2, c, y, x)
x = np.random.rand(1, 3, 2) * 100
y = np.random.rand(1) * 100
self._testScalarBroadcast(array_ops.where_v2, c, x, y)
self._testScalarBroadcast(array_ops.where_v2, c, y, x)
x = np.random.rand(1, 3, 2) * 100
y = np.random.rand(1, 2) * 100
self._testScalarBroadcast(array_ops.where_v2, c, x, y)
self._testScalarBroadcast(array_ops.where_v2, c, y, x)
x = np.random.rand(1, 3, 2) * 100
y = np.random.rand(3, 2) * 100
self._testScalarBroadcast(array_ops.where_v2, c, x, y)
self._testScalarBroadcast(array_ops.where_v2, c, y, x)
def _testBasic(self, fn):
c = np.random.randint(0, 2, 6).astype(np.bool).reshape(1, 3, 2)
x = np.random.rand(1, 3, 2) * 100
y = np.random.rand(1, 3, 2) * 100
for t in [
np.float16, np.float32, np.float64, np.int32, np.int64, np.complex64,
np.complex128
]:
xt = x.astype(t)
yt = y.astype(t)
self._compare(fn, c, xt, yt, use_gpu=False)
if t in [np.float16, np.float32, np.float64]:
self._compare(fn, c, xt, yt, use_gpu=True)
def testBasic(self):
self._testBasic(array_ops.where)
self._testBasic(array_ops.where_v2)
def _testBasicBroadcast(self, fn, c, x, y):
for t in [
np.float16, np.float32, np.float64, np.int32, np.int64, np.complex64,
np.complex128
]:
xt = x.astype(t)
yt = y.astype(t)
self._compare(fn, c, xt, yt, use_gpu=False)
if t in [np.float16, np.float32, np.float64]:
self._compare(fn, c, xt, yt, use_gpu=True)
def testBasicBroadcast(self):
c0 = np.random.randint(0, 2, 6).astype(np.bool).reshape(1, 3, 2)
c1 = np.random.randint(0, 2, 2).astype(np.bool).reshape(1, 1, 2)
c2 = np.random.randint(0, 2, 3).astype(np.bool).reshape(1, 3, 1)
c3 = np.random.randint(0, 2, 1).astype(np.bool).reshape(1, 1, 1)
for c in [c0, c1, c2, c3]:
# where_v2 only
x = np.random.rand(1, 3, 2) * 100
y = np.random.rand(1, 1, 1) * 100
self._testBasicBroadcast(array_ops.where_v2, c, x, y)
self._testBasicBroadcast(array_ops.where_v2, c, y, x)
x = np.random.rand(1, 3, 2) * 100
y = np.random.rand(1, 3, 1) * 100
self._testBasicBroadcast(array_ops.where_v2, c, x, y)
self._testBasicBroadcast(array_ops.where_v2, c, y, x)
x = np.random.rand(1, 3, 2) * 100
y = np.random.rand(1, 1, 2) * 100
self._testBasicBroadcast(array_ops.where_v2, c, x, y)
self._testBasicBroadcast(array_ops.where_v2, c, y, x)
x = np.random.rand(1, 3, 2) * 100
y = np.random.rand(1, 1) * 100
self._testBasicBroadcast(array_ops.where_v2, c, x, y)
self._testBasicBroadcast(array_ops.where_v2, c, y, x)
x = np.random.rand(1, 3, 2) * 100
y = np.random.rand(1) * 100
self._testBasicBroadcast(array_ops.where_v2, c, x, y)
self._testBasicBroadcast(array_ops.where_v2, c, y, x)
x = np.random.rand(1, 3, 2) * 100
y = np.random.rand(1, 2) * 100
self._testBasicBroadcast(array_ops.where_v2, c, x, y)
self._testBasicBroadcast(array_ops.where_v2, c, y, x)
x = np.random.rand(1, 3, 2) * 100
y = np.random.rand(3, 2) * 100
self._testBasicBroadcast(array_ops.where_v2, c, x, y)
self._testBasicBroadcast(array_ops.where_v2, c, y, x)
def _testGradients(self, fn):
c = np.random.randint(0, 2, 6).astype(np.bool).reshape(1, 3, 2)
x = np.random.rand(1, 3, 2) * 100
y = np.random.rand(1, 3, 2) * 100
for t in [np.float16, np.float32, np.float64]:
xt = x.astype(t)
yt = y.astype(t)
if t == np.float16:
# Compare fp16 theoretical gradients to fp32 numerical gradients,
# since fp16 numerical gradients are too imprecise unless great
# care is taken with choosing the inputs and the delta. This is
# a weaker check (in particular, it does not test the op itself,
# only its gradient), but it's much better than nothing.
self._compareGradientX(fn, c, xt, yt, np.float)
self._compareGradientY(fn, c, xt, yt, np.float)
else:
self._compareGradientX(fn, c, xt, yt)
self._compareGradientY(fn, c, xt, yt)
@test_util.run_deprecated_v1
def testGradients(self):
self._testGradients(array_ops.where)
self._testGradients(array_ops.where_v2)
@test_util.run_deprecated_v1
def testGradientsBroadcast(self):
c = np.random.randint(0, 2, 6).astype(np.bool).reshape(1, 3, 2)
for t in [np.float32, np.float64]:
# where_v2 only
x = np.random.rand(1, 3, 2) * 100
y = np.random.rand(1, 1, 1) * 100
self._compareGradientX(array_ops.where_v2, c, x.astype(t), y.astype(t))
x = np.random.rand(1, 3, 2) * 100
y = np.random.rand(1, 3, 1) * 100
self._compareGradientX(array_ops.where_v2, c, x.astype(t), y.astype(t))
x = np.random.rand(1, 3, 2) * 100
y = np.random.rand(1, 1, 2) * 100
self._compareGradientX(array_ops.where_v2, c, x.astype(t), y.astype(t))
x = np.random.rand(1, 3, 2) * 100
y = np.random.rand(1, 1) * 100
self._compareGradientX(array_ops.where_v2, c, x.astype(t), y.astype(t))
x = np.random.rand(1, 3, 2) * 100
y = np.random.rand(1) * 100
self._compareGradientX(array_ops.where_v2, c, x.astype(t), y.astype(t))
x = np.random.rand(1, 3, 2) * 100
y = np.random.rand(1, 2) * 100
self._compareGradientX(array_ops.where_v2, c, x.astype(t), y.astype(t))
x = np.random.rand(1, 3, 2) * 100
y = np.random.rand(3, 2) * 100
self._compareGradientX(array_ops.where_v2, c, x.astype(t), y.astype(t))
def _testShapeMismatch(self, fn):
c = np.random.randint(0, 2, 6).astype(np.bool).reshape(1, 3, 2)
x = np.random.rand(1, 3, 2) * 100
y = np.random.rand(2, 5, 3) * 100
for t in [
np.float16, np.float32, np.float64, np.int32, np.int64, np.complex64,
np.complex128
]:
xt = x.astype(t)
yt = y.astype(t)
with self.assertRaises(ValueError):
fn(c, xt, yt)
@test_util.run_deprecated_v1
def testShapeMismatch(self):
self._testShapeMismatch(array_ops.where)
self._testShapeMismatch(array_ops.where_v2)
def _testEmptyTensor(self, fn):
c = np.random.randint(0, 3, 0).astype(np.bool).reshape(1, 3, 0)
x = np.random.rand(1, 3, 0) * 100
y = np.random.rand(1, 3, 0) * 100
z_expected = np.zeros((1, 3, 0), dtype=np.float32)
with self.cached_session():
xt = x.astype(np.float32)
yt = y.astype(np.float32)
z = fn(c, xt, yt).eval()
self.assertAllEqual(z_expected, z)
@test_util.run_deprecated_v1
def testEmptyTensor(self):
self._testEmptyTensor(array_ops.where)
self._testEmptyTensor(array_ops.where_v2)
def _testNan(self, fn):
with self.cached_session():
for c in False, True:
for a in 7.0, np.nan:
for b in 5.0, np.nan:
x = fn(c, a, b).eval()
y = a if c else b
self.assertEqual(np.isnan(x), np.isnan(y))
@test_util.run_deprecated_v1
def testNan(self):
"""Verify that nans don't propagate where they shouldn't."""
self._testNan(array_ops.where)
self._testNan(array_ops.where_v2)
class BatchSelectOpTest(test.TestCase):
"""Test broadcasting of Select when 'c' is a vec and 't' &'e' are rank2+."""
def _compare(self, c, x, y, use_gpu):
np_ans = np.dstack(
[x_i if c_i else y_i for c_i, x_i, y_i in zip(c, x, y)]).transpose(
[2, 0, 1])
with test_util.device(use_gpu=use_gpu):
out = array_ops.where(c, x, y)
tf_ans = self.evaluate(out)
self.assertAllEqual(np_ans, tf_ans)
self.assertShapeEqual(np_ans, out)
def _compareGradientX(self, c, x, y, numeric_gradient_type=None):
with self.cached_session():
inx = ops.convert_to_tensor(x)
iny = ops.convert_to_tensor(y)
out = array_ops.where(c, inx, iny)
s = list(np.shape(x))
jacob_t, jacob_n = gradient_checker.compute_gradient(
inx, s, out, s, x_init_value=x)
if numeric_gradient_type is not None:
xf = x.astype(numeric_gradient_type)
yf = y.astype(numeric_gradient_type)
inxf = ops.convert_to_tensor(xf)
inyf = ops.convert_to_tensor(yf)
outf = array_ops.where(c, inxf, inyf)
_, jacob_n = gradient_checker.compute_gradient(
inxf, s, outf, s, x_init_value=xf)
jacob_n = jacob_n.astype(x.dtype)
if x.dtype == np.float16:
self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)
elif x.dtype == np.float32:
self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)
elif x.dtype == np.float64:
self.assertAllClose(jacob_t, jacob_n, rtol=1e-5, atol=1e-5)
def _compareGradientY(self, c, x, y, numeric_gradient_type=None):
with self.cached_session():
inx = ops.convert_to_tensor(x)
iny = ops.convert_to_tensor(y)
out = array_ops.where(c, inx, iny)
s = list(np.shape(x))
jacob_t, jacob_n = gradient_checker.compute_gradient(
iny, s, out, s, x_init_value=y)
if numeric_gradient_type is not None:
xf = x.astype(numeric_gradient_type)
yf = y.astype(numeric_gradient_type)
inxf = ops.convert_to_tensor(xf)
inyf = ops.convert_to_tensor(yf)
outf = array_ops.where(c, inxf, inyf)
_, jacob_n = gradient_checker.compute_gradient(
inyf, s, outf, s, x_init_value=yf)
jacob_n = jacob_n.astype(x.dtype)
if x.dtype == np.float16:
self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)
elif x.dtype == np.float32:
self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)
elif x.dtype == np.float64:
self.assertAllClose(jacob_t, jacob_n, rtol=1e-5, atol=1e-5)
def testBasic(self):
c = np.random.randint(0, 2, 16).astype(np.bool)
x = np.random.rand(16, 2, 8) * 100
y = np.random.rand(16, 2, 8) * 100
for t in [
np.float16, np.float32, np.float64, np.int32, np.int64, np.complex64,
np.complex128
]:
xt = x.astype(t)
yt = y.astype(t)
self._compare(c, xt, yt, use_gpu=False)
if t in [np.float16, np.float32, np.float64]:
self._compare(c, xt, yt, use_gpu=True)
@test_util.run_deprecated_v1
def testGradients(self):
c = np.random.randint(0, 2, 16).astype(np.bool)
x = np.random.rand(16, 2, 8) * 100
y = np.random.rand(16, 2, 8) * 100
for t in [np.float16, np.float32, np.float64]:
xt = x.astype(t)
yt = y.astype(t)
if t == np.float16:
# Compare fp16 theoretical gradients to fp32 numerical gradients,
# since fp16 numerical gradients are too imprecise unless great
# care is taken with choosing the inputs and the delta. This is
# a weaker check (in particular, it does not test the op itself,
# only its gradient), but it's much better than nothing.
self._compareGradientX(c, xt, yt, np.float)
self._compareGradientY(c, xt, yt, np.float)
else:
self._compareGradientX(c, xt, yt)
self._compareGradientY(c, xt, yt)
@test_util.run_deprecated_v1
def testShapeMismatch(self):
c = np.random.randint(0, 2, 8).astype(np.bool)
x = np.random.rand(16, 3, 2) * 100
y = np.random.rand(16, 3, 2) * 100
for t in [
np.float16, np.float32, np.float64, np.int32, np.int64, np.complex64,
np.complex128
]:
xt = x.astype(t)
yt = y.astype(t)
with self.assertRaises(ValueError):
array_ops.where(c, xt, yt)
class MinMaxOpTest(test.TestCase):
def _compare(self, x, y, use_gpu):
np_min, np_max = np.minimum(x, y), np.maximum(x, y)
with test_util.device(use_gpu=use_gpu):
inx = ops.convert_to_tensor(x)
iny = ops.convert_to_tensor(y)
omin, omax = math_ops.minimum(inx, iny), math_ops.maximum(inx, iny)
tf_min, tf_max = self.evaluate([omin, omax])
self.assertAllEqual(np_min, tf_min)
self.assertAllEqual(np_max, tf_max)
def testBasic(self):
x = np.random.rand(1, 3, 2) * 100.
y = np.random.rand(1, 3, 2) * 100.
for t in [np.float16, np.float32, np.float64, np.int32, np.int64]:
self._compare(x.astype(t), y.astype(t), use_gpu=False)
self._compare(x.astype(t), y.astype(t), use_gpu=True)
def testDifferentShapes(self):
x = np.random.rand(1, 3, 2) * 100.
y = np.random.rand(2) * 100. # should broadcast
for t in [np.float16, np.float32, np.float64, np.int32, np.int64]:
self._compare(x.astype(t), y.astype(t), use_gpu=False)
self._compare(x.astype(t), y.astype(t), use_gpu=True)
def testScalar(self):
x = np.random.rand(1, 3, 2) * 100.
y = np.random.rand(1).item() * 100. # should broadcast
# dropped np.float64, int64 because TF automatically converts to 32 bit
for t in [np.float32, np.int32]:
self._compare(x.astype(t), t(y), use_gpu=False)
self._compare(x.astype(t), t(y), use_gpu=True)
def _compareGradientX(self, func, x, y):
with self.cached_session():
inx = ops.convert_to_tensor(x)
iny = ops.convert_to_tensor(y)
out = func(inx, iny)
s = list(np.shape(x))
jacob_t, jacob_n = gradient_checker.compute_gradient(
inx, s, out, s, x_init_value=x)
if x.dtype == np.float16:
self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)
elif x.dtype == np.float32:
self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)
elif x.dtype == np.float64:
self.assertAllClose(jacob_t, jacob_n, rtol=1e-5, atol=1e-5)
def _compareGradientY(self, func, x, y):
with self.cached_session():
inx = ops.convert_to_tensor(x)
iny = ops.convert_to_tensor(y)
out = func(inx, iny)
s = list(np.shape(x))
jacob_t, jacob_n = gradient_checker.compute_gradient(
iny, s, out, s, x_init_value=y)
if x.dtype == np.float16:
self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)
elif x.dtype == np.float32:
self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)
elif x.dtype == np.float64:
self.assertAllClose(jacob_t, jacob_n, rtol=1e-5, atol=1e-5)
@test_util.run_deprecated_v1
def testGradients(self):
x = np.random.rand(1, 3, 2) * 100.
# ensure x != y
y = x + (np.random.randint(2, size=x.shape) - .5) * 2 # -1 or +1
self._compareGradientX(math_ops.maximum, x, y)
self._compareGradientY(math_ops.maximum, x, y)
self._compareGradientX(math_ops.minimum, x, y)
self._compareGradientY(math_ops.minimum, x, y)
class MathOpsOverloadTest(test.TestCase):
def _computeTensorAndLiteral(self, x, y, dtype, func):
with test_util.force_cpu():
inx = ops.convert_to_tensor(x, dtype=dtype)
z = func(inx, y) # Should use __add__, __sub__, etc.
return self.evaluate(z)
def _computeLiteralAndTensor(self, x, y, dtype, func):
with test_util.force_cpu():
iny = ops.convert_to_tensor(y, dtype=dtype)
z = func(x, iny) # Should use __radd__, __rsub__, etc.
return self.evaluate(z)
def _compareBinary(self, x, y, dtype, np_func, tf_func):
np_ans = np_func(x, y).astype(dtype.as_numpy_dtype)
self.assertAllClose(np_ans,
self._computeTensorAndLiteral(x, y, dtype, tf_func))
self.assertAllClose(np_ans,
self._computeLiteralAndTensor(x, y, dtype, tf_func))
def _compareUnary(self, x, dtype, np_func, tf_func):
np_ans = np_func(x).astype(dtype.as_numpy_dtype)
with test_util.force_cpu():
self.assertAllClose(
np_ans, self.evaluate(tf_func(ops.convert_to_tensor(x, dtype=dtype))))
def testOverload(self):
dtypes = [
dtypes_lib.float16,
dtypes_lib.float32,
dtypes_lib.float64,
dtypes_lib.int32,
dtypes_lib.int64,
dtypes_lib.complex64,
dtypes_lib.complex128,
]
funcs = [
(np.add, _ADD),
(np.subtract, _SUB),
(np.multiply, _MUL),
(np.power, _POW),
(np.true_divide, _TRUEDIV),
(np.floor_divide, _FLOORDIV),
]
for dtype in dtypes:
for np_func, tf_func in funcs:
if dtype in (dtypes_lib.complex64,
dtypes_lib.complex128) and tf_func == _FLOORDIV:
continue # floordiv makes no sense for complex
self._compareBinary(10, 5, dtype, np_func, tf_func)
# Mod only works for int32 and int64.
for dtype in [dtypes_lib.int32, dtypes_lib.int64]:
self._compareBinary(10, 3, dtype, np.mod, _MOD)
def testOverloadComparisons(self):
dtypes = [
dtypes_lib.float16,
dtypes_lib.float32,
dtypes_lib.float64,
dtypes_lib.int32,
dtypes_lib.int64,
]
funcs = [
(np.less, _LT),
(np.less_equal, _LE),
(np.greater, _GT),
(np.greater_equal, _GE),
]
for dtype in dtypes:
for np_func, tf_func in funcs:
self._compareBinary(10, 5, dtype, np_func, tf_func)
logical_funcs = [(np.logical_and, _AND), (np.logical_or, _OR),
(np.logical_xor, _XOR), (np.equal, math_ops.equal),
(np.not_equal, math_ops.not_equal)]
for np_func, tf_func in logical_funcs:
self._compareBinary(True, False, dtypes_lib.bool, np_func, tf_func)
self._compareBinary(True, True, dtypes_lib.bool, np_func, tf_func)
self._compareBinary(False, False, dtypes_lib.bool, np_func, tf_func)
self._compareBinary(False, True, dtypes_lib.bool, np_func, tf_func)
self._compareBinary([True, True, False, False],
[True, False, True, False], dtypes_lib.bool, np_func,
tf_func)
self._compareUnary(True, dtypes_lib.bool, np.logical_not, _INV)
self._compareUnary(False, dtypes_lib.bool, np.logical_not, _INV)
self._compareUnary([True, False], dtypes_lib.bool, np.logical_not, _INV)
class IsFiniteInfNanTest(test.TestCase):
def _compare(self, x, use_gpu):
np_finite, np_inf, np_nan = np.isfinite(x), np.isinf(x), np.isnan(x)
with test_util.device(use_gpu=use_gpu):
inx = ops.convert_to_tensor(x)
ofinite, oinf, onan = math_ops.is_finite(inx), math_ops.is_inf(
inx), math_ops.is_nan(inx)
tf_finite, tf_inf, tf_nan = self.evaluate([ofinite, oinf, onan])
self.assertAllEqual(np_inf, tf_inf)
self.assertAllEqual(np_nan, tf_nan)
self.assertAllEqual(np_finite, tf_finite)
self.assertShapeEqual(np_inf, oinf)
self.assertShapeEqual(np_nan, onan)
self.assertShapeEqual(np_finite, ofinite)
def _testDtype(self, dtype):
fi = np.finfo(dtype)
data = np.array([
0, -1, 1, fi.resolution, -fi.resolution, fi.min, fi.max, -np.inf,
np.inf, np.nan
]).astype(dtype)
self._compare(data, use_gpu=False)
self._compare(data, use_gpu=True)
def testHalf(self):
self._testDtype(np.float16)
def testFloat(self):
self._testDtype(np.float32)
def testDouble(self):
self._testDtype(np.float64)
def testSqrt(self):
for dtype in [np.float16, np.float32, np.float64]:
fi = np.finfo(dtype)
for size in [1, 3, 4, 7, 8, 63, 64, 65]:
# For float32 Eigen uses Carmack's fast vectorized sqrt algorithm.
# It is not accurate for very large arguments, so we test for
# fi.max/100 instead of fi.max here.
for value in [fi.min, -2, -1, 0, fi.tiny, 1, 2, 1000, fi.max / 100]:
x = np.full((size,), value, dtype=dtype)
np_y = np.sqrt(x)
np_nan = np.isnan(np_y)
with test_util.use_gpu():
tf_y = math_ops.sqrt(x)
tf_nan = math_ops.is_nan(tf_y)
if value < 0:
self.assertAllEqual(np_nan, self.evaluate(tf_nan))
else:
self.assertAllCloseAccordingToType(np_y, self.evaluate(tf_y))
class RoundingTest(test.TestCase):
def _compare_values(self, x, y=None):
y = np.rint(x) if y is None else np.asarray(y)
tf_rint = math_ops.rint(x)
np_rint = self.evaluate(tf_rint)
self.assertAllEqual(y, np_rint)
self.assertShapeEqual(y, tf_rint)
def _compare(self, x):
np_floor, np_ceil = np.floor(x), np.ceil(x)
inx = ops.convert_to_tensor(x)
ofloor, oceil = math_ops.floor(inx), math_ops.ceil(inx)
tf_floor, tf_ceil = self.evaluate([ofloor, oceil])
self.assertAllEqual(np_floor, tf_floor)
self.assertAllEqual(np_ceil, tf_ceil)
self.assertShapeEqual(np_floor, ofloor)
self.assertShapeEqual(np_ceil, oceil)
def _testDtype(self, dtype):
data = (np.arange(-3, 3) / 4.).reshape(1, 3, 2).astype(dtype)
self._compare(data)
# TODO: rint op is not supported for float16
if dtype is np.float16:
return
self._compare_values(data)
x = [0.5, 0.5000001]
y = [0.0, 1.0]
self._compare_values(x, y=y)
# numpy example
x = [-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0]
y = [-2., -2., -0., 0., 2., 2., 2.]
self._compare_values(x, y=y)
def testTypes(self):
self.skipTest("b/131162241")
for dtype in [np.float16, np.float32, np.float64]:
self._testDtype(dtype)
class ComplexMakeRealImagTest(test.TestCase):
def _compareMake(self, real, imag, use_gpu):
np_ans = real + (1j) * imag
with test_util.device(use_gpu=use_gpu):
real = ops.convert_to_tensor(real)
imag = ops.convert_to_tensor(imag)
tf_ans = math_ops.complex(real, imag)
out = self.evaluate(tf_ans)
self.assertAllEqual(np_ans, out)
self.assertShapeEqual(np_ans, tf_ans)
def testMake(self):
real = (np.arange(-3, 3) / 4.).reshape([1, 3, 2]).astype(np.float32)
imag = (np.arange(-3, 3) / 5.).reshape([1, 3, 2]).astype(np.float32)
for use_gpu in [False, True]:
self._compareMake(real, imag, use_gpu)
self._compareMake(real, 12.0, use_gpu)
self._compareMake(23.0, imag, use_gpu)
def testRealImagNumericType(self):
for use_gpu in [True, False]:
for value in [1., 1j, 1. + 1j]:
np_real, np_imag = np.real(value), np.imag(value)
with test_util.device(use_gpu=use_gpu):
tf_real = math_ops.real(value)
tf_imag = math_ops.imag(value)
self.assertAllEqual(np_real, self.evaluate(tf_real))
self.assertAllEqual(np_imag, self.evaluate(tf_imag))
def _compareRealImag(self, cplx, use_gpu):
np_real, np_imag = np.real(cplx), np.imag(cplx)
np_zeros = np_real * 0
with test_util.device(use_gpu=use_gpu):
inx = ops.convert_to_tensor(cplx)
tf_real = math_ops.real(inx)
tf_imag = math_ops.imag(inx)
tf_real_real = math_ops.real(tf_real)
tf_imag_real = math_ops.imag(tf_real)
self.assertAllEqual(np_real, self.evaluate(tf_real))
self.assertAllEqual(np_imag, self.evaluate(tf_imag))
self.assertAllEqual(np_real, self.evaluate(tf_real_real))
self.assertAllEqual(np_zeros, self.evaluate(tf_imag_real))
def testRealImag64(self):
real = (np.arange(-3, 3) / 4.).reshape([1, 3, 2]).astype(np.float32)
imag = (np.arange(-3, 3) / 5.).reshape([1, 3, 2]).astype(np.float32)
cplx = real + 1j * imag
self._compareRealImag(cplx, use_gpu=False)
self._compareRealImag(cplx, use_gpu=True)
def testRealImag128(self):
real = (np.arange(-3, 3) / 4.).reshape([1, 3, 2]).astype(np.float64)
imag = (np.arange(-3, 3) / 5.).reshape([1, 3, 2]).astype(np.float64)
cplx = real + 1j * imag
self._compareRealImag(cplx, use_gpu=False)
self._compareRealImag(cplx, use_gpu=True)
def _compareAngle(self, cplx, use_gpu):
np_angle = np.angle(cplx)
with test_util.device(use_gpu=use_gpu):
inx = ops.convert_to_tensor(cplx)
tf_angle = math_ops.angle(inx)
tf_angle_val = self.evaluate(tf_angle)
self.assertAllClose(np_angle, tf_angle_val)
self.assertShapeEqual(np_angle, tf_angle)
def testAngle64(self):
real = (np.arange(-3, 3) / 4.).reshape([1, 3, 2]).astype(np.float32)
imag = (np.arange(-3, 3) / 5.).reshape([1, 3, 2]).astype(np.float32)
cplx = real + 1j * imag
self._compareAngle(cplx, use_gpu=False)
self._compareAngle(cplx, use_gpu=True)
def testAngle(self):
real = (np.arange(-3, 3) / 4.).reshape([1, 3, 2]).astype(np.float64)
imag = (np.arange(-3, 3) / 5.).reshape([1, 3, 2]).astype(np.float64)
cplx = real + 1j * imag
self._compareAngle(cplx, use_gpu=False)
self._compareAngle(cplx, use_gpu=True)
@test_util.run_deprecated_v1
def testRealReal(self):
for dtype in (dtypes_lib.int32, dtypes_lib.int64, dtypes_lib.float32,
dtypes_lib.float64):
x = array_ops.placeholder(dtype)
y = math_ops.real(x)
self.assertEqual(x, y)
def _compareConj(self, cplx, use_gpu):
np_ans = np.conj(cplx)
with test_util.device(use_gpu=use_gpu):
inx = ops.convert_to_tensor(cplx)
tf_conj = math_ops.conj(inx)
tf_ans = self.evaluate(tf_conj)
self.assertAllEqual(np_ans, tf_ans)
self.assertShapeEqual(np_ans, tf_conj)
def testConj64(self):
real = (np.arange(-3, 3) / 4.).reshape([1, 3, 2]).astype(np.float32)
imag = (np.arange(-3, 3) / 5.).reshape([1, 3, 2]).astype(np.float32)
cplx = real + 1j * imag
self._compareConj(cplx, use_gpu=False)
self._compareConj(cplx, use_gpu=True)
def testConj128(self):
real = (np.arange(-3, 3) / 4.).reshape([1, 3, 2]).astype(np.float64)
imag = (np.arange(-3, 3) / 5.).reshape([1, 3, 2]).astype(np.float64)
cplx = real + 1j * imag
self._compareConj(cplx, use_gpu=False)
self._compareConj(cplx, use_gpu=True)
@test_util.run_deprecated_v1
def testConjReal(self):
for dtype in (dtypes_lib.int32, dtypes_lib.int64, dtypes_lib.float16,
dtypes_lib.float32, dtypes_lib.float64):
x = array_ops.placeholder(dtype)
y = math_ops.conj(x)
self.assertEqual(x, y)
@test_util.run_deprecated_v1
def testConjString(self):
x = array_ops.placeholder(dtypes_lib.string)
with self.assertRaisesRegexp(TypeError,
r"Expected numeric or variant tensor"):
math_ops.conj(x)
def _compareGradient(self, x):
# x[:, 0] is real, x[:, 1] is imag. We combine real and imag into
# complex numbers. Then, we extract real and imag parts and
# computes the squared sum. This is obviously the same as sum(real
# * real) + sum(imag * imag). We just want to make sure the
# gradient function is checked.
with self.cached_session():
inx = ops.convert_to_tensor(x)
real, imag = array_ops.split(value=inx, num_or_size_splits=2, axis=1)
real, imag = array_ops.reshape(real, [-1]), array_ops.reshape(imag, [-1])
cplx = math_ops.complex(real, imag)
cplx = math_ops.conj(cplx)
loss = math_ops.reduce_sum(math_ops.square(
math_ops.real(cplx))) + math_ops.reduce_sum(
math_ops.square(math_ops.imag(cplx)))
epsilon = 1e-3
jacob_t, jacob_n = gradient_checker.compute_gradient(
inx, list(x.shape), loss, [1], x_init_value=x, delta=epsilon)
self.assertAllClose(jacob_t, jacob_n, rtol=epsilon, atol=epsilon)
def _compareBroadcastGradient(self, x):
x_ = ops.convert_to_tensor(x)
epsilon = 1e-3
with self.cached_session():
for args in [(x_, 0.), (0., x_)]:
z = math_ops.reduce_sum(math_ops.abs(math_ops.complex(*args)))
jacob_t, jacob_n = gradient_checker.compute_gradient(
x_, list(x.shape), z, [1], x_init_value=x, delta=epsilon)
self.assertAllClose(jacob_t, jacob_n, rtol=epsilon, atol=epsilon)
@test_util.run_deprecated_v1
def testGradient(self):
# complex64
data = np.arange(1, 2, 0.10).reshape([5, 2]).astype(np.float32)
self._compareGradient(data)
self._compareBroadcastGradient(data)
# complex128
data = np.arange(1, 2, 0.10).reshape([5, 2]).astype(np.float64)
self._compareGradient(data)
def _compareMulGradient(self, data):
# data is a float matrix of shape [n, 4]. data[:, 0], data[:, 1],
# data[:, 2], data[:, 3] are real parts of x, imaginary parts of
# x, real parts of y and imaginary parts of y.
with self.cached_session():
inp = ops.convert_to_tensor(data)
xr, xi, yr, yi = array_ops.split(value=inp, num_or_size_splits=4, axis=1)
def vec(x): # Reshape to a vector
return array_ops.reshape(x, [-1])
xr, xi, yr, yi = vec(xr), vec(xi), vec(yr), vec(yi)
def cplx(r, i): # Combine to a complex vector
return math_ops.complex(r, i)
x, y = cplx(xr, xi), cplx(yr, yi)
# z is x times y in complex plane.
z = x * y
# Defines the loss function as the sum of all coefficients of z.
loss = math_ops.reduce_sum(math_ops.real(z) + math_ops.imag(z))
epsilon = 0.005
jacob_t, jacob_n = gradient_checker.compute_gradient(
inp, list(data.shape), loss, [1], x_init_value=data, delta=epsilon)
self.assertAllClose(jacob_t, jacob_n, rtol=epsilon, atol=epsilon)
@test_util.run_deprecated_v1
def testMulGradient(self):
data = np.arange(1, 2, 0.125).reshape([2, 4]).astype(np.float32)
self._compareMulGradient(data)
class PolyvalTest(test.TestCase):
def _runtest(self, dtype, degree):
x = np.random.rand(2, 2).astype(dtype)
coeffs = [np.random.rand(2, 2).astype(dtype) for _ in range(degree + 1)]
np_val = np.polyval(coeffs, x)
with self.cached_session():
tf_val = math_ops.polyval(coeffs, x)
self.assertAllClose(np_val, self.evaluate(tf_val))
def testSimple(self):
for dtype in [
np.int32, np.float32, np.float64, np.complex64, np.complex128
]:
for degree in range(5):
self._runtest(dtype, degree)
def testBroadcast(self):
dtype = np.float32
degree = 3
shapes = [(1,), (2, 1), (1, 2), (2, 2)]
for x_shape in shapes:
for coeff_shape in shapes:
x = np.random.rand(*x_shape).astype(dtype)
coeffs = [
np.random.rand(*coeff_shape).astype(dtype)
for _ in range(degree + 1)
]
np_val = np.polyval(coeffs, x)
with self.cached_session():
tf_val = math_ops.polyval(coeffs, x)
self.assertAllClose(np_val, self.evaluate(tf_val))
def testEmpty(self):
x = np.random.rand(2, 2).astype(np.float32)
coeffs = []
np_val = np.polyval(coeffs, x)
with self.cached_session():
tf_val = math_ops.polyval(coeffs, x)
self.assertAllClose(np_val, self.evaluate(tf_val))
class SingularGradientOpTest(test.TestCase):
@test_util.run_deprecated_v1
def testGradientAtSingularity(self):
if not compat.forward_compatible(2020, 3, 14):
self.skipTest("Skipping test for future functionality.")
ops_and_singularity = [
(gen_math_ops.reciprocal, (0.,)),
(gen_math_ops.rsqrt, (0.,)),
(gen_math_ops.sqrt, (0.,)),
(gen_math_ops.sqrt_grad, (
0.,
0.,
)),
(gen_math_ops.reciprocal_grad, (
1.,
0.,
)),
(gen_math_ops.tan, (np.pi / 2,)),
(gen_math_ops.log, (0.,)),
(gen_math_ops.log1p, (-1.,)),
(gen_math_ops.acosh, (0.,)),
(gen_math_ops.asin, (1.,)),
(gen_math_ops.acos, (1.,)),
(gen_math_ops.atan2, (0., 0.)),
(gen_math_ops.div, (1., 0.)),
(gen_math_ops.div_no_nan, (1., 0.)),
(gen_math_ops.real_div, (1., 0.)),
(math_ops.pow, (0., -1.)),
]
for op, singularity in ops_and_singularity:
for dtype in (dtypes_lib.half, dtypes_lib.float32, dtypes_lib.float64,
dtypes_lib.complex64, dtypes_lib.complex128):
if dtype.is_complex and op in [
gen_math_ops.asin, gen_math_ops.acos, gen_math_ops.atan2
]:
continue
if dtype == dtypes_lib.half and op in [
gen_math_ops.acosh, gen_math_ops.asin, gen_math_ops.acos,
gen_math_ops.atan2
]:
continue
with self.cached_session():
print("op = ", op, ", singularity = ", singularity, ", type = ",
dtype)
args = [constant_op.constant(s, dtype=dtype) for s in singularity]
grad_y = constant_op.constant(0, dtype=dtype)
y = op(*args)
g = gradients_impl.gradients(y, args, grad_ys=grad_y)
g_val = self.evaluate(g)
self.assertAllEqual(g_val, np.zeros(len(singularity)))
if __name__ == "__main__":
test.main()
| apache-2.0 | 590,041,044,447,827,500 | 36.312354 | 80 | 0.604027 | false |
GoogleCloudPlatform/python-compat-runtime | appengine-compat/exported_appengine_sdk/google/appengine/tools/appcfg.py | 1 | 190508 | #!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Tool for deploying apps to an app server.
Currently, the application only uploads new appversions. To do this, it first
walks the directory tree rooted at the path the user specifies, adding all the
files it finds to a list. It then uploads the application configuration
(app.yaml) to the server using HTTP, followed by uploading each of the files.
It then commits the transaction with another request.
The bulk of this work is handled by the AppVersionUpload class, which exposes
methods to add to the list of files, fetch a list of modified files, upload
files, and commit or rollback the transaction.
"""
from __future__ import with_statement
import calendar
import contextlib
import copy
import datetime
import errno
import hashlib
import itertools
import json
import logging
import mimetypes
import optparse
import os
import random
import re
import shutil
import StringIO
import subprocess
import sys
import tempfile
import time
import urllib
import urllib2
import google
from oauth2client import devshell
import yaml
from google.appengine.cron import groctimespecification
from google.appengine.api import appinfo
from google.appengine.api import appinfo_includes
from google.appengine.api import backendinfo
from google.appengine.api import client_deployinfo
from google.appengine.api import croninfo
from google.appengine.api import dispatchinfo
from google.appengine.api import dosinfo
from google.appengine.api import queueinfo
from google.appengine.api import validation
from google.appengine.api import yaml_errors
from google.appengine.api import yaml_object
from google.appengine.datastore import datastore_index
from google.appengine.tools import appengine_rpc
from google.appengine.tools import augment_mimetypes
from google.appengine.tools import bulkloader
from google.appengine.tools import context_util
from google.appengine.tools import goroots
from google.appengine.tools import sdk_update_checker
try:
from oauth2client.contrib import gce as oauth2client_gce
except ImportError:
from oauth2client import gce as oauth2client_gce
try:
from google.appengine.tools import appengine_rpc_httplib2
except ImportError:
appengine_rpc_httplib2 = None
if sys.version_info[:2] >= (2, 7):
from google.appengine.tools import appcfg_java
else:
appcfg_java = None
LIST_DELIMITER = '\n'
TUPLE_DELIMITER = '|'
BACKENDS_ACTION = 'backends'
BACKENDS_MESSAGE = ('Warning: This application uses Backends, a deprecated '
'feature that has been replaced by Modules, which '
'offers additional functionality. Please convert your '
'backends to modules as described at: ')
_CONVERTING_URL = (
'https://developers.google.com/appengine/docs/%s/modules/converting')
MAX_LOG_LEVEL = 4
MAX_BATCH_SIZE = 3200000
MAX_BATCH_COUNT = 100
MAX_BATCH_FILE_SIZE = 200000
BATCH_OVERHEAD = 500
verbosity = 1
PREFIXED_BY_ADMIN_CONSOLE_RE = '^(?:admin-console|admin-console-hr)(.*)'
SDK_PRODUCT = 'appcfg_py'
DAY = 24*3600
SUNDAY = 6
MEGA = 1024 * 1024
MILLION = 1000 * 1000
DEFAULT_RESOURCE_LIMITS = {
'max_file_size': 32 * MILLION,
'max_blob_size': 32 * MILLION,
'max_files_to_clone': 100,
'max_total_file_size': 150 * MEGA,
'max_file_count': 10000,
}
# Client ID and secrets are managed in the Google API console.
APPCFG_CLIENT_ID = '550516889912.apps.googleusercontent.com'
APPCFG_CLIENT_NOTSOSECRET = 'ykPq-0UYfKNprLRjVx1hBBar'
APPCFG_SCOPES = ('https://www.googleapis.com/auth/appengine.admin',
'https://www.googleapis.com/auth/cloud-platform',
'https://www.googleapis.com/auth/userinfo.email')
STATIC_FILE_PREFIX = '__static__'
METADATA_BASE = 'http://metadata.google.internal'
SERVICE_ACCOUNT_BASE = (
'computeMetadata/v1/instance/service-accounts/default')
APP_YAML_FILENAME = 'app.yaml'
GCLOUD_ONLY_RUNTIMES = set(['custom', 'nodejs'])
augment_mimetypes.init()
class Error(Exception):
pass
class CannotStartServingError(Error):
"""We could not start serving the version being uploaded."""
pass
def PrintUpdate(msg, error_fh=sys.stderr):
"""Print a message to stderr or the given file-like object.
If 'verbosity' is greater than 0, print the message.
Args:
msg: The string to print.
error_fh: Where to send the message.
"""
if verbosity > 0:
timestamp = datetime.datetime.now()
print >>error_fh, '%s %s' % (timestamp.strftime('%I:%M %p'), msg)
def StatusUpdate(msg, error_fh=sys.stderr):
"""Print a status message to stderr or the given file-like object."""
PrintUpdate(msg, error_fh)
def BackendsStatusUpdate(runtime, error_fh=sys.stderr):
"""Print the Backends status message based on current runtime.
Args:
runtime: String name of current runtime.
error_fh: Where to send the message.
"""
language = runtime
if language == 'python27':
language = 'python'
elif language == 'java7':
language = 'java'
if language == 'python' or language == 'java':
StatusUpdate(BACKENDS_MESSAGE + (_CONVERTING_URL % language), error_fh)
def ErrorUpdate(msg, error_fh=sys.stderr):
"""Print an error message to stderr."""
PrintUpdate(msg, error_fh)
def _PrintErrorAndExit(stream, msg, exit_code=2):
"""Prints the given error message and exists the program.
Args:
stream: The stream (e.g. StringIO or file) to write the message to.
msg: The error message to display as a string.
exit_code: The integer code to pass to sys.exit().
"""
stream.write(msg)
sys.exit(exit_code)
def JavaSupported():
"""True if Java is supported by this SDK."""
if appcfg_java:
tools_java_dir = os.path.join(os.path.dirname(appcfg_java.__file__), 'java')
return os.path.isdir(tools_java_dir)
else:
return False
@contextlib.contextmanager
def TempChangeField(obj, field_name, new_value):
"""Context manager to change a field value on an object temporarily.
Args:
obj: The object to change the field on.
field_name: The field name to change.
new_value: The new value.
Yields:
The old value.
"""
old_value = getattr(obj, field_name)
setattr(obj, field_name, new_value)
yield old_value
setattr(obj, field_name, old_value)
class FileClassification(object):
"""A class to hold a file's classification.
This class both abstracts away the details of how we determine
whether a file is a regular, static or error file as well as acting
as a container for various metadata about the file.
"""
def __init__(self, config, filename, error_fh=sys.stderr):
"""Initializes a FileClassification instance.
Args:
config: The app.yaml object to check the filename against.
filename: The name of the file.
error_fh: Where to send status and error messages.
"""
self.__error_fh = error_fh
self.__static_mime_type = self.__GetMimeTypeIfStaticFile(config, filename)
self.__static_app_readable = self.__GetAppReadableIfStaticFile(config,
filename)
self.__error_mime_type, self.__error_code = self.__LookupErrorBlob(config,
filename)
def __GetMimeTypeIfStaticFile(self, config, filename):
"""Looks up the mime type for 'filename'.
Uses the handlers in 'config' to determine if the file should
be treated as a static file.
Args:
config: The app.yaml object to check the filename against.
filename: The name of the file.
Returns:
The mime type string. For example, 'text/plain' or 'image/gif'.
None if this is not a static file.
"""
if self.__FileNameImpliesStaticFile(filename):
return self.__MimeType(filename)
for handler in config.handlers:
handler_type = handler.GetHandlerType()
if handler_type in ('static_dir', 'static_files'):
if handler_type == 'static_dir':
regex = os.path.join(re.escape(handler.GetHandler()), '.*')
else:
regex = handler.upload
if re.match(regex, filename):
return handler.mime_type or self.__MimeType(filename)
return None
@staticmethod
def __FileNameImpliesStaticFile(filename):
"""True if the name of a file implies that it is a static resource.
For Java applications specified with web.xml and appengine-web.xml, we
create a staging directory that includes a __static__ hierarchy containing
links to all files that are implied static by the contents of those XML
files. So if a file has been copied into that directory then we can assume
it is static.
Args:
filename: The full path to the file.
Returns:
True if the file should be considered a static resource based on its name.
"""
static = '__static__' + os.sep
return static in filename
@staticmethod
def __GetAppReadableIfStaticFile(config, filename):
"""Looks up whether a static file is readable by the application.
Uses the handlers in 'config' to determine if the file should
be treated as a static file and if so, if the file should be readable by the
application.
Args:
config: The AppInfoExternal object to check the filename against.
filename: The name of the file.
Returns:
True if the file is static and marked as app readable, False otherwise.
"""
for handler in config.handlers:
handler_type = handler.GetHandlerType()
if handler_type in ('static_dir', 'static_files'):
if handler_type == 'static_dir':
regex = os.path.join(re.escape(handler.GetHandler()), '.*')
else:
regex = handler.upload
if re.match(regex, filename):
return handler.application_readable
return False
def __LookupErrorBlob(self, config, filename):
"""Looks up the mime type and error_code for 'filename'.
Uses the error handlers in 'config' to determine if the file should
be treated as an error blob.
Args:
config: The app.yaml object to check the filename against.
filename: The name of the file.
Returns:
A tuple of (mime_type, error_code), or (None, None) if this is not an
error blob. For example, ('text/plain', default) or ('image/gif',
timeout) or (None, None).
"""
if not config.error_handlers:
return (None, None)
for error_handler in config.error_handlers:
if error_handler.file == filename:
error_code = error_handler.error_code
error_code = error_code or 'default'
if error_handler.mime_type:
return (error_handler.mime_type, error_code)
else:
return (self.__MimeType(filename), error_code)
return (None, None)
def __MimeType(self, filename, default='application/octet-stream'):
guess = mimetypes.guess_type(filename)[0]
if guess is None:
print >>self.__error_fh, ('Could not guess mimetype for %s. Using %s.'
% (filename, default))
return default
return guess
def IsApplicationFile(self):
return bool((not self.IsStaticFile() or self.__static_app_readable) and
not self.IsErrorFile())
def IsStaticFile(self):
return bool(self.__static_mime_type)
def StaticMimeType(self):
return self.__static_mime_type
def IsErrorFile(self):
return bool(self.__error_mime_type)
def ErrorMimeType(self):
return self.__error_mime_type
def ErrorCode(self):
return self.__error_code
def BuildClonePostBody(file_tuples):
"""Build the post body for the /api/clone{files,blobs,errorblobs} urls.
Args:
file_tuples: A list of tuples. Each tuple should contain the entries
appropriate for the endpoint in question.
Returns:
A string containing the properly delimited tuples.
"""
file_list = []
for tup in file_tuples:
path = tup[1]
tup = tup[2:]
file_list.append(TUPLE_DELIMITER.join([path] + list(tup)))
return LIST_DELIMITER.join(file_list)
def _GetRemoteResourceLimits(logging_context):
"""Get the resource limit as reported by the admin console.
Get the resource limits by querying the admin_console/appserver. The
actual limits returned depends on the server we are talking to and
could be missing values we expect or include extra values.
Args:
logging_context: The _ClientDeployLoggingContext for this upload.
Returns:
A dictionary.
"""
try:
yaml_data = logging_context.Send('/api/appversion/getresourcelimits')
except urllib2.HTTPError, err:
if err.code != 404:
raise
return {}
return yaml.safe_load(yaml_data)
def GetResourceLimits(logging_context, error_fh=sys.stderr):
"""Gets the resource limits.
Gets the resource limits that should be applied to apps. Any values
that the server does not know about will have their default value
reported (although it is also possible for the server to report
values we don't know about).
Args:
logging_context: The _ClientDeployLoggingContext for this upload.
error_fh: Where to send status and error messages.
Returns:
A dictionary.
"""
resource_limits = DEFAULT_RESOURCE_LIMITS.copy()
StatusUpdate('Getting current resource limits.', error_fh)
resource_limits.update(_GetRemoteResourceLimits(logging_context))
logging.debug('Using resource limits: %s', resource_limits)
return resource_limits
def RetryWithBackoff(callable_func, retry_notify_func,
initial_delay=1, backoff_factor=2,
max_delay=60, max_tries=20):
"""Calls a function multiple times, backing off more and more each time.
Args:
callable_func: A function that performs some operation that should be
retried a number of times upon failure. Signature: () -> (done, value)
If 'done' is True, we'll immediately return (True, value)
If 'done' is False, we'll delay a bit and try again, unless we've
hit the 'max_tries' limit, in which case we'll return (False, value).
retry_notify_func: This function will be called immediately before the
next retry delay. Signature: (value, delay) -> None
'value' is the value returned by the last call to 'callable_func'
'delay' is the retry delay, in seconds
initial_delay: Initial delay after first try, in seconds.
backoff_factor: Delay will be multiplied by this factor after each try.
max_delay: Maximum delay, in seconds.
max_tries: Maximum number of tries (the first one counts).
Returns:
What the last call to 'callable_func' returned, which is of the form
(done, value). If 'done' is True, you know 'callable_func' returned True
before we ran out of retries. If 'done' is False, you know 'callable_func'
kept returning False and we ran out of retries.
Raises:
Whatever the function raises--an exception will immediately stop retries.
"""
delay = initial_delay
num_tries = 0
while True:
done, opaque_value = callable_func()
num_tries += 1
if done:
return True, opaque_value
if num_tries >= max_tries:
return False, opaque_value
retry_notify_func(opaque_value, delay)
time.sleep(delay)
delay = min(delay * backoff_factor, max_delay)
def RetryNoBackoff(callable_func,
retry_notify_func,
delay=5,
max_tries=200):
"""Calls a function multiple times, with the same delay each time.
Args:
callable_func: A function that performs some operation that should be
retried a number of times upon failure. Signature: () -> (done, value)
If 'done' is True, we'll immediately return (True, value)
If 'done' is False, we'll delay a bit and try again, unless we've
hit the 'max_tries' limit, in which case we'll return (False, value).
retry_notify_func: This function will be called immediately before the
next retry delay. Signature: (value, delay) -> None
'value' is the value returned by the last call to 'callable_func'
'delay' is the retry delay, in seconds
delay: Delay between tries, in seconds.
max_tries: Maximum number of tries (the first one counts).
Returns:
What the last call to 'callable_func' returned, which is of the form
(done, value). If 'done' is True, you know 'callable_func' returned True
before we ran out of retries. If 'done' is False, you know 'callable_func'
kept returning False and we ran out of retries.
Raises:
Whatever the function raises--an exception will immediately stop retries.
"""
return RetryWithBackoff(callable_func,
retry_notify_func,
delay,
1,
delay,
max_tries)
def MigratePython27Notice():
"""Tells the user that Python 2.5 runtime is deprecated.
Encourages the user to migrate from Python 2.5 to Python 2.7.
Prints a message to sys.stdout. The caller should have tested that the user is
using Python 2.5, so as not to spuriously display this message.
"""
ErrorUpdate(
'WARNING: This application is using the Python 2.5 runtime, which is '
'deprecated! It should be updated to the Python 2.7 runtime as soon as '
'possible, which offers performance improvements and many new features. '
'Learn how simple it is to migrate your application to Python 2.7 at '
'https://developers.google.com/appengine/docs/python/python25/migrate27.')
def MigrateGcloudNotice():
"""Tells the user that deploying a flex app with appcfg is deprecated."""
ErrorUpdate(
'WARNING: We highly recommend using the Google Cloud '
'SDK for deployments to the App Engine Flexible '
'Environment. Using appcfg.py for deployments to the '
'flexible environment could lead to downtime. Please '
'visit https://cloud.google.com/sdk to learn more.')
class IndexDefinitionUpload(object):
"""Provides facilities to upload index definitions to the hosting service."""
def __init__(self, rpcserver, definitions, error_fh=sys.stderr):
"""Creates a new DatastoreIndexUpload.
Args:
rpcserver: The RPC server to use. Should be an instance of HttpRpcServer
or TestRpcServer.
definitions: An IndexDefinitions object.
error_fh: Where to send status and error messages.
"""
self.rpcserver = rpcserver
self.definitions = definitions
self.error_fh = error_fh
def DoUpload(self):
"""Uploads the index definitions."""
StatusUpdate('Uploading index definitions.', self.error_fh)
with TempChangeField(self.definitions, 'application', None) as app_id:
self.rpcserver.Send('/api/datastore/index/add',
app_id=app_id,
payload=self.definitions.ToYAML())
class CronEntryUpload(object):
"""Provides facilities to upload cron entries to the hosting service."""
def __init__(self, rpcserver, cron, error_fh=sys.stderr):
"""Creates a new CronEntryUpload.
Args:
rpcserver: The RPC server to use. Should be an instance of a subclass of
AbstractRpcServer
cron: The CronInfoExternal object loaded from the cron.yaml file.
error_fh: Where to send status and error messages.
"""
self.rpcserver = rpcserver
self.cron = cron
self.error_fh = error_fh
def DoUpload(self):
"""Uploads the cron entries."""
StatusUpdate('Uploading cron entries.', self.error_fh)
with TempChangeField(self.cron, 'application', None) as app_id:
self.rpcserver.Send('/api/cron/update',
app_id=app_id,
payload=self.cron.ToYAML())
class QueueEntryUpload(object):
"""Provides facilities to upload task queue entries to the hosting service."""
def __init__(self, rpcserver, queue, error_fh=sys.stderr):
"""Creates a new QueueEntryUpload.
Args:
rpcserver: The RPC server to use. Should be an instance of a subclass of
AbstractRpcServer
queue: The QueueInfoExternal object loaded from the queue.yaml file.
error_fh: Where to send status and error messages.
"""
self.rpcserver = rpcserver
self.queue = queue
self.error_fh = error_fh
def DoUpload(self):
"""Uploads the task queue entries."""
StatusUpdate('Uploading task queue entries.', self.error_fh)
with TempChangeField(self.queue, 'application', None) as app_id:
self.rpcserver.Send('/api/queue/update',
app_id=app_id,
payload=self.queue.ToYAML())
class DispatchEntryUpload(object):
"""Provides facilities to upload dispatch entries to the hosting service."""
def __init__(self, rpcserver, dispatch, error_fh=sys.stderr):
"""Creates a new DispatchEntryUpload.
Args:
rpcserver: The RPC server to use. Should be an instance of a subclass of
AbstractRpcServer
dispatch: The DispatchInfoExternal object loaded from the dispatch.yaml
file.
error_fh: Where to send status and error messages.
"""
self.rpcserver = rpcserver
self.dispatch = dispatch
self.error_fh = error_fh
def DoUpload(self):
"""Uploads the dispatch entries."""
StatusUpdate('Uploading dispatch entries.', self.error_fh)
self.rpcserver.Send('/api/dispatch/update',
app_id=self.dispatch.application,
payload=self.dispatch.ToYAML())
class DosEntryUpload(object):
"""Provides facilities to upload dos entries to the hosting service."""
def __init__(self, rpcserver, dos, error_fh=sys.stderr):
"""Creates a new DosEntryUpload.
Args:
rpcserver: The RPC server to use. Should be an instance of a subclass of
AbstractRpcServer.
dos: The DosInfoExternal object loaded from the dos.yaml file.
error_fh: Where to send status and error messages.
"""
self.rpcserver = rpcserver
self.dos = dos
self.error_fh = error_fh
def DoUpload(self):
"""Uploads the dos entries."""
StatusUpdate('Uploading DOS entries.', self.error_fh)
with TempChangeField(self.dos, 'application', None) as app_id:
self.rpcserver.Send('/api/dos/update',
app_id=app_id,
payload=self.dos.ToYAML())
class DefaultVersionSet(object):
"""Provides facilities to set the default (serving) version."""
def __init__(self, rpcserver, app_id, module, version, error_fh=sys.stderr):
"""Creates a new DefaultVersionSet.
Args:
rpcserver: The RPC server to use. Should be an instance of a subclass of
AbstractRpcServer.
app_id: The application to make the change to.
module: The module to set the default version of (if any).
version: The version to set as the default.
error_fh: Where to send status and error messages.
"""
self.rpcserver = rpcserver
self.app_id = app_id
self.module = module
self.version = version
self.error_fh = error_fh
def SetVersion(self):
"""Sets the default version."""
if self.module:
modules = self.module.split(',')
if len(modules) > 1:
StatusUpdate('Setting the default version of modules %s of application '
'%s to %s.' % (', '.join(modules),
self.app_id,
self.version),
self.error_fh)
params = [('app_id', self.app_id), ('version', self.version)]
params.extend(('module', module) for module in modules)
url = '/api/appversion/setdefault?' + urllib.urlencode(sorted(params))
self.rpcserver.Send(url)
return
else:
StatusUpdate('Setting default version of module %s of application %s '
'to %s.' % (self.module, self.app_id, self.version),
self.error_fh)
else:
StatusUpdate('Setting default version of application %s to %s.'
% (self.app_id, self.version), self.error_fh)
self.rpcserver.Send('/api/appversion/setdefault',
app_id=self.app_id,
module=self.module,
version=self.version)
class TrafficMigrator(object):
"""Provides facilities to migrate traffic."""
def __init__(self, rpcserver, app_id, version, error_fh=sys.stderr):
"""Creates a new TrafficMigrator.
Args:
rpcserver: The RPC server to use. Should be an instance of a subclass of
AbstractRpcServer.
app_id: The application to make the change to.
version: The version to set as the default.
error_fh: Where to send status and error messages.
"""
self.rpcserver = rpcserver
self.app_id = app_id
self.version = version
self.error_fh = error_fh
def MigrateTraffic(self):
"""Migrates traffic."""
StatusUpdate('Migrating traffic of application %s to %s.'
% (self.app_id, self.version), self.error_fh)
self.rpcserver.Send('/api/appversion/migratetraffic',
app_id=self.app_id,
version=self.version)
class IndexOperation(object):
"""Provide facilities for writing Index operation commands."""
def __init__(self, rpcserver, error_fh=sys.stderr):
"""Creates a new IndexOperation.
Args:
rpcserver: The RPC server to use. Should be an instance of HttpRpcServer
or TestRpcServer.
error_fh: Where to send status and error messages.
"""
self.rpcserver = rpcserver
self.error_fh = error_fh
def DoDiff(self, definitions):
"""Retrieve diff file from the server.
Args:
definitions: datastore_index.IndexDefinitions as loaded from users
index.yaml file.
Returns:
A pair of datastore_index.IndexDefinitions objects. The first record
is the set of indexes that are present in the index.yaml file but missing
from the server. The second record is the set of indexes that are
present on the server but missing from the index.yaml file (indicating
that these indexes should probably be vacuumed).
"""
StatusUpdate('Fetching index definitions diff.', self.error_fh)
with TempChangeField(definitions, 'application', None) as app_id:
response = self.rpcserver.Send('/api/datastore/index/diff',
app_id=app_id,
payload=definitions.ToYAML())
return datastore_index.ParseMultipleIndexDefinitions(response)
def DoDelete(self, definitions, app_id):
"""Delete indexes from the server.
Args:
definitions: Index definitions to delete from datastore.
app_id: The application id.
Returns:
A single datstore_index.IndexDefinitions containing indexes that were
not deleted, probably because they were already removed. This may
be normal behavior as there is a potential race condition between fetching
the index-diff and sending deletion confirmation through.
"""
StatusUpdate('Deleting selected index definitions.', self.error_fh)
response = self.rpcserver.Send('/api/datastore/index/delete',
app_id=app_id,
payload=definitions.ToYAML())
return datastore_index.ParseIndexDefinitions(response)
class VacuumIndexesOperation(IndexOperation):
"""Provide facilities to request the deletion of datastore indexes."""
def __init__(self, rpcserver, force, confirmation_fn=raw_input,
error_fh=sys.stderr):
"""Creates a new VacuumIndexesOperation.
Args:
rpcserver: The RPC server to use. Should be an instance of HttpRpcServer
or TestRpcServer.
force: True to force deletion of indexes, else False.
confirmation_fn: Function used for getting input form user.
error_fh: Where to send status and error messages.
"""
super(VacuumIndexesOperation, self).__init__(rpcserver, error_fh)
self.force = force
self.confirmation_fn = confirmation_fn
def GetConfirmation(self, index):
"""Get confirmation from user to delete an index.
This method will enter an input loop until the user provides a
response it is expecting. Valid input is one of three responses:
y: Confirm deletion of index.
n: Do not delete index.
a: Delete all indexes without asking for further confirmation.
If the user enters nothing at all, the default action is to skip
that index and do not delete.
If the user selects 'a', as a side effect, the 'force' flag is set.
Args:
index: Index to confirm.
Returns:
True if user enters 'y' or 'a'. False if user enter 'n'.
"""
while True:
print 'This index is no longer defined in your index.yaml file.'
print
print index.ToYAML()
print
confirmation = self.confirmation_fn(
'Are you sure you want to delete this index? (N/y/a): ')
confirmation = confirmation.strip().lower()
if confirmation == 'y':
return True
elif confirmation == 'n' or not confirmation:
return False
elif confirmation == 'a':
self.force = True
return True
else:
print 'Did not understand your response.'
def DoVacuum(self, definitions):
"""Vacuum indexes in datastore.
This method will query the server to determine which indexes are not
being used according to the user's local index.yaml file. Once it has
made this determination, it confirms with the user which unused indexes
should be deleted. Once confirmation for each index is receives, it
deletes those indexes.
Because another user may in theory delete the same indexes at the same
time as the user, there is a potential race condition. In this rare cases,
some of the indexes previously confirmed for deletion will not be found.
The user is notified which indexes these were.
Args:
definitions: datastore_index.IndexDefinitions as loaded from users
index.yaml file.
"""
unused_new_indexes, notused_indexes = self.DoDiff(definitions)
deletions = datastore_index.IndexDefinitions(indexes=[])
if notused_indexes.indexes is not None:
for index in notused_indexes.indexes:
if self.force or self.GetConfirmation(index):
deletions.indexes.append(index)
if deletions.indexes:
not_deleted = self.DoDelete(deletions, definitions.application)
if not_deleted.indexes:
not_deleted_count = len(not_deleted.indexes)
if not_deleted_count == 1:
warning_message = ('An index was not deleted. Most likely this is '
'because it no longer exists.\n\n')
else:
warning_message = ('%d indexes were not deleted. Most likely this '
'is because they no longer exist.\n\n'
% not_deleted_count)
for index in not_deleted.indexes:
warning_message += index.ToYAML()
logging.warning(warning_message)
class LogsRequester(object):
"""Provide facilities to export request logs."""
def __init__(self,
rpcserver,
app_id,
module,
version_id,
output_file,
num_days,
append,
severity,
end,
vhost,
include_vhost,
include_all=None,
time_func=time.time,
error_fh=sys.stderr):
"""Constructor.
Args:
rpcserver: The RPC server to use. Should be an instance of HttpRpcServer
or TestRpcServer.
app_id: The application to fetch logs from.
module: The module of the app to fetch logs from, optional.
version_id: The version of the app to fetch logs for.
output_file: Output file name.
num_days: Number of days worth of logs to export; 0 for all available.
append: True if appending to an existing file.
severity: App log severity to request (0-4); None for no app logs.
end: date object representing last day of logs to return.
vhost: The virtual host of log messages to get. None for all hosts.
include_vhost: If true, the virtual host is included in log messages.
include_all: If true, we add to the log message everything we know
about the request.
time_func: A time.time() compatible function, which can be overridden for
testing.
error_fh: Where to send status and error messages.
"""
self.rpcserver = rpcserver
self.app_id = app_id
self.output_file = output_file
self.append = append
self.num_days = num_days
self.severity = severity
self.vhost = vhost
self.include_vhost = include_vhost
self.include_all = include_all
self.error_fh = error_fh
self.module = module
self.version_id = version_id
self.sentinel = None
self.write_mode = 'w'
if self.append:
self.sentinel = FindSentinel(self.output_file)
self.write_mode = 'a'
self.skip_until = False
now = PacificDate(time_func())
if end < now:
self.skip_until = end
else:
end = now
self.valid_dates = None
if self.num_days:
start = end - datetime.timedelta(self.num_days - 1)
self.valid_dates = (start, end)
def DownloadLogs(self):
"""Download the requested logs.
This will write the logs to the file designated by
self.output_file, or to stdout if the filename is '-'.
Multiple roundtrips to the server may be made.
"""
if self.module:
StatusUpdate('Downloading request logs for app %s module %s version %s.' %
(self.app_id, self.module, self.version_id), self.error_fh)
else:
StatusUpdate('Downloading request logs for app %s version %s.' %
(self.app_id, self.version_id), self.error_fh)
tf = tempfile.TemporaryFile()
last_offset = None
try:
while True:
try:
new_offset = self.RequestLogLines(tf, last_offset)
if not new_offset or new_offset == last_offset:
break
last_offset = new_offset
except KeyboardInterrupt:
StatusUpdate('Keyboard interrupt; saving data downloaded so far.',
self.error_fh)
break
StatusUpdate('Copying request logs to %r.' % self.output_file,
self.error_fh)
if self.output_file == '-':
of = sys.stdout
else:
try:
of = open(self.output_file, self.write_mode)
except IOError, err:
StatusUpdate('Can\'t write %r: %s.' % (self.output_file, err))
sys.exit(1)
try:
line_count = CopyReversedLines(tf, of)
finally:
of.flush()
if of is not sys.stdout:
of.close()
finally:
tf.close()
StatusUpdate('Copied %d records.' % line_count, self.error_fh)
def RequestLogLines(self, tf, offset):
"""Make a single roundtrip to the server.
Args:
tf: Writable binary stream to which the log lines returned by
the server are written, stripped of headers, and excluding
lines skipped due to self.sentinel or self.valid_dates filtering.
offset: Offset string for a continued request; None for the first.
Returns:
The offset string to be used for the next request, if another
request should be issued; or None, if not.
"""
logging.info('Request with offset %r.', offset)
kwds = {'app_id': self.app_id,
'version': self.version_id,
'limit': 1000,
'no_header': 1,
}
if self.module:
kwds['module'] = self.module
if offset:
kwds['offset'] = offset
if self.severity is not None:
kwds['severity'] = str(self.severity)
if self.vhost is not None:
kwds['vhost'] = str(self.vhost)
if self.include_vhost is not None:
kwds['include_vhost'] = str(self.include_vhost)
if self.include_all is not None:
kwds['include_all'] = str(self.include_all)
response = self.rpcserver.Send('/api/request_logs', payload=None, **kwds)
response = response.replace('\r', '\0')
lines = response.splitlines()
logging.info('Received %d bytes, %d records.', len(response), len(lines))
offset = None
valid_dates = self.valid_dates
sentinel = self.sentinel
skip_until = self.skip_until
len_sentinel = None
if sentinel:
len_sentinel = len(sentinel)
for line in lines:
if line.startswith('#'):
match = re.match(r'^#\s*next_offset=(\S+)\s*$', line)
if match and match.group(1) != 'None':
offset = match.group(1)
continue
if (sentinel and
line.startswith(sentinel) and
line[len_sentinel : len_sentinel+1] in ('', '\0')):
return None
linedate = DateOfLogLine(line)
if not linedate:
continue
if skip_until:
if linedate > skip_until:
continue
else:
self.skip_until = skip_until = False
if valid_dates and not valid_dates[0] <= linedate <= valid_dates[1]:
return None
tf.write(line + '\n')
if not lines:
return None
return offset
def DateOfLogLine(line):
"""Returns a date object representing the log line's timestamp.
Args:
line: a log line string.
Returns:
A date object representing the timestamp or None if parsing fails.
"""
m = re.compile(r'[^[]+\[(\d+/[A-Za-z]+/\d+):[^\d]*').match(line)
if not m:
return None
try:
return datetime.date(*time.strptime(m.group(1), '%d/%b/%Y')[:3])
except ValueError:
return None
def PacificDate(now):
"""For a UTC timestamp, return the date in the US/Pacific timezone.
Args:
now: A posix timestamp giving current UTC time.
Returns:
A date object representing what day it is in the US/Pacific timezone.
"""
return datetime.date(*time.gmtime(PacificTime(now))[:3])
def PacificTime(now):
"""Helper to return the number of seconds between UTC and Pacific time.
This is needed to compute today's date in Pacific time (more
specifically: Mountain View local time), which is how request logs
are reported. (Google servers always report times in Mountain View
local time, regardless of where they are physically located.)
This takes (post-2006) US DST into account. Pacific time is either
8 hours or 7 hours west of UTC, depending on whether DST is in
effect. Since 2007, US DST starts on the Second Sunday in March
March, and ends on the first Sunday in November. (Reference:
http://aa.usno.navy.mil/faq/docs/daylight_time.php.)
Note that the server doesn't report its local time (the HTTP Date
header uses UTC), and the client's local time is irrelevant.
Args:
now: A posix timestamp giving current UTC time.
Returns:
A pseudo-posix timestamp giving current Pacific time. Passing
this through time.gmtime() will produce a tuple in Pacific local
time.
"""
now -= 8*3600
if IsPacificDST(now):
now += 3600
return now
def IsPacificDST(now):
"""Helper for PacificTime to decide whether now is Pacific DST (PDT).
Args:
now: A pseudo-posix timestamp giving current time in PST.
Returns:
True if now falls within the range of DST, False otherwise.
"""
pst = time.gmtime(now)
year = pst[0]
assert year >= 2007
begin = calendar.timegm((year, 3, 8, 2, 0, 0, 0, 0, 0))
while time.gmtime(begin).tm_wday != SUNDAY:
begin += DAY
end = calendar.timegm((year, 11, 1, 2, 0, 0, 0, 0, 0))
while time.gmtime(end).tm_wday != SUNDAY:
end += DAY
return begin <= now < end
def CopyReversedLines(instream, outstream, blocksize=2**16):
r"""Copy lines from input stream to output stream in reverse order.
As a special feature, null bytes in the input are turned into
newlines followed by tabs in the output, but these 'sub-lines'
separated by null bytes are not reversed. E.g. If the input is
'A\0B\nC\0D\n', the output is 'C\n\tD\nA\n\tB\n'.
Args:
instream: A seekable stream open for reading in binary mode.
outstream: A stream open for writing; doesn't have to be seekable or binary.
blocksize: Optional block size for buffering, for unit testing.
Returns:
The number of lines copied.
"""
line_count = 0
instream.seek(0, 2)
last_block = instream.tell() // blocksize
spillover = ''
for iblock in xrange(last_block + 1, -1, -1):
instream.seek(iblock * blocksize)
data = instream.read(blocksize)
lines = data.splitlines(True)
lines[-1:] = ''.join(lines[-1:] + [spillover]).splitlines(True)
if lines and not lines[-1].endswith('\n'):
lines[-1] += '\n'
lines.reverse()
if lines and iblock > 0:
spillover = lines.pop()
if lines:
line_count += len(lines)
data = ''.join(lines).replace('\0', '\n\t')
outstream.write(data)
return line_count
def FindSentinel(filename, blocksize=2**16, error_fh=sys.stderr):
"""Return the sentinel line from the output file.
Args:
filename: The filename of the output file. (We'll read this file.)
blocksize: Optional block size for buffering, for unit testing.
error_fh: Where to send status and error messages.
Returns:
The contents of the last line in the file that doesn't start with
a tab, with its trailing newline stripped; or None if the file
couldn't be opened or no such line could be found by inspecting
the last 'blocksize' bytes of the file.
"""
if filename == '-':
StatusUpdate('Can\'t combine --append with output to stdout.',
error_fh)
sys.exit(2)
try:
fp = open(filename, 'rb')
except IOError, err:
StatusUpdate('Append mode disabled: can\'t read %r: %s.' % (filename, err),
error_fh)
return None
try:
fp.seek(0, 2)
fp.seek(max(0, fp.tell() - blocksize))
lines = fp.readlines()
del lines[:1]
sentinel = None
for line in lines:
if not line.startswith('\t'):
sentinel = line
if not sentinel:
StatusUpdate('Append mode disabled: can\'t find sentinel in %r.' %
filename, error_fh)
return None
return sentinel.rstrip('\n')
finally:
fp.close()
class UploadBatcher(object):
"""Helper to batch file uploads."""
def __init__(self, what, logging_context):
"""Constructor.
Args:
what: Either 'file' or 'blob' or 'errorblob' indicating what kind of
objects this batcher uploads. Used in messages and URLs.
logging_context: The _ClientDeployLoggingContext for this upload.
"""
assert what in ('file', 'blob', 'errorblob'), repr(what)
self.what = what
self.logging_context = logging_context
self.single_url = '/api/appversion/add' + what
self.batch_url = self.single_url + 's'
self.batching = True
self.batch = []
self.batch_size = 0
def SendBatch(self):
"""Send the current batch on its way.
If successful, resets self.batch and self.batch_size.
Raises:
HTTPError with code=404 if the server doesn't support batching.
"""
boundary = 'boundary'
parts = []
for path, payload, mime_type in self.batch:
while boundary in payload:
boundary += '%04x' % random.randint(0, 0xffff)
assert len(boundary) < 80, 'Unexpected error, please try again.'
part = '\n'.join(['',
'X-Appcfg-File: %s' % urllib.quote(path),
'X-Appcfg-Hash: %s' % _Hash(payload),
'Content-Type: %s' % mime_type,
'Content-Length: %d' % len(payload),
'Content-Transfer-Encoding: 8bit',
'',
payload,
])
parts.append(part)
parts.insert(0,
'MIME-Version: 1.0\n'
'Content-Type: multipart/mixed; boundary="%s"\n'
'\n'
'This is a message with multiple parts in MIME format.' %
boundary)
parts.append('--\n')
delimiter = '\n--%s' % boundary
payload = delimiter.join(parts)
logging.info('Uploading batch of %d %ss to %s with boundary="%s".',
len(self.batch), self.what, self.batch_url, boundary)
self.logging_context.Send(self.batch_url,
payload=payload,
content_type='message/rfc822')
self.batch = []
self.batch_size = 0
def SendSingleFile(self, path, payload, mime_type):
"""Send a single file on its way."""
logging.info('Uploading %s %s (%s bytes, type=%s) to %s.',
self.what, path, len(payload), mime_type, self.single_url)
self.logging_context.Send(self.single_url,
payload=payload,
content_type=mime_type,
path=path)
def Flush(self):
"""Flush the current batch.
This first attempts to send the batch as a single request; if that
fails because the server doesn't support batching, the files are
sent one by one, and self.batching is reset to False.
At the end, self.batch and self.batch_size are reset.
"""
if not self.batch:
return
try:
self.SendBatch()
except urllib2.HTTPError, err:
if err.code != 404:
raise
logging.info('Old server detected; turning off %s batching.', self.what)
self.batching = False
for path, payload, mime_type in self.batch:
self.SendSingleFile(path, payload, mime_type)
self.batch = []
self.batch_size = 0
def AddToBatch(self, path, payload, mime_type):
"""Batch a file, possibly flushing first, or perhaps upload it directly.
Args:
path: The name of the file.
payload: The contents of the file.
mime_type: The MIME Content-type of the file, or None.
If mime_type is None, application/octet-stream is substituted.
"""
if not mime_type:
mime_type = 'application/octet-stream'
size = len(payload)
if size <= MAX_BATCH_FILE_SIZE:
if (len(self.batch) >= MAX_BATCH_COUNT or
self.batch_size + size > MAX_BATCH_SIZE):
self.Flush()
if self.batching:
logging.info('Adding %s %s (%s bytes, type=%s) to batch.',
self.what, path, size, mime_type)
self.batch.append((path, payload, mime_type))
self.batch_size += size + BATCH_OVERHEAD
return
self.SendSingleFile(path, payload, mime_type)
def _FormatHash(h):
"""Return a string representation of a hash.
The hash is a sha1 hash. It is computed both for files that need to be
pushed to App Engine and for data payloads of requests made to App Engine.
Args:
h: The hash
Returns:
The string representation of the hash.
"""
return '%s_%s_%s_%s_%s' % (h[0:8], h[8:16], h[16:24], h[24:32], h[32:40])
def _Hash(content):
"""Compute the sha1 hash of the content.
Args:
content: The data to hash as a string.
Returns:
The string representation of the hash.
"""
h = hashlib.sha1(content).hexdigest()
return _FormatHash(h)
def _HashFromFileHandle(file_handle):
"""Compute the hash of the content of the file pointed to by file_handle.
Args:
file_handle: File-like object which provides seek, read and tell.
Returns:
The string representation of the hash.
"""
pos = file_handle.tell()
content_hash = _Hash(file_handle.read())
file_handle.seek(pos, 0)
return content_hash
def EnsureDir(path):
"""Makes sure that a directory exists at the given path.
If a directory already exists at that path, nothing is done.
Otherwise, try to create a directory at that path with os.makedirs.
If that fails, propagate the resulting OSError exception.
Args:
path: The path that you want to refer to a directory.
"""
try:
os.makedirs(path)
except OSError, exc:
if not (exc.errno == errno.EEXIST and os.path.isdir(path)):
raise
def DoDownloadApp(rpcserver, out_dir, app_id, module, app_version,
error_fh=sys.stderr):
"""Downloads the files associated with a particular app version.
Args:
rpcserver: The RPC server to use to download.
out_dir: The directory the files should be downloaded to.
app_id: The app ID of the app whose files we want to download.
module: The module we want to download from. Can be:
- None: We'll download from the default module.
- <module>: We'll download from the specified module.
app_version: The version number we want to download. Can be:
- None: We'll download the latest default version.
- <major>: We'll download the latest minor version.
- <major>/<minor>: We'll download that exact version.
error_fh: Where to send status and error messages.
"""
StatusUpdate('Fetching file list...', error_fh)
url_args = {'app_id': app_id}
if module:
url_args['module'] = module
if app_version is not None:
url_args['version_match'] = app_version
result = rpcserver.Send('/api/files/list', **url_args)
StatusUpdate('Fetching files...', error_fh)
lines = result.splitlines()
if len(lines) < 1:
logging.error('Invalid response from server: empty')
return
full_version = lines[0]
file_lines = lines[1:]
current_file_number = 0
num_files = len(file_lines)
num_errors = 0
for line in file_lines:
parts = line.split('|', 2)
if len(parts) != 3:
logging.error('Invalid response from server: expecting '
'"<id>|<size>|<path>", found: "%s"\n', line)
return
current_file_number += 1
file_id, size_str, path = parts
try:
size = int(size_str)
except ValueError:
logging.error('Invalid file list entry from server: invalid size: '
'"%s"', size_str)
return
StatusUpdate('[%d/%d] %s' % (current_file_number, num_files, path),
error_fh)
def TryGet():
"""A request to /api/files/get which works with the RetryWithBackoff."""
try:
contents = rpcserver.Send('/api/files/get', app_id=app_id,
version=full_version, id=file_id)
return True, contents
except urllib2.HTTPError, exc:
if exc.code == 503:
return False, exc
else:
raise
def PrintRetryMessage(_, delay):
StatusUpdate('Server busy. Will try again in %d seconds.' % delay,
error_fh)
success, contents = RetryWithBackoff(TryGet, PrintRetryMessage)
if not success:
logging.error('Unable to download file "%s".', path)
num_errors += 1
continue
if len(contents) != size:
logging.error('File "%s": server listed as %d bytes but served '
'%d bytes.', path, size, len(contents))
num_errors += 1
full_path = os.path.join(out_dir, path)
if os.path.exists(full_path):
logging.error('Unable to create file "%s": path conflicts with '
'an existing file or directory', path)
num_errors += 1
continue
full_dir = os.path.dirname(full_path)
try:
EnsureDir(full_dir)
except OSError, exc:
logging.error('Couldn\'t create directory "%s": %s', full_dir, exc)
num_errors += 1
continue
try:
out_file = open(full_path, 'wb')
except IOError, exc:
logging.error('Couldn\'t open file "%s": %s', full_path, exc)
num_errors += 1
continue
try:
try:
out_file.write(contents)
except IOError, exc:
logging.error('Couldn\'t write to file "%s": %s', full_path, exc)
num_errors += 1
continue
finally:
out_file.close()
if num_errors > 0:
logging.error('Number of errors: %d. See output for details.', num_errors)
class _ClientDeployLoggingContext(object):
"""Context for sending and recording server rpc requests.
Attributes:
rpcserver: The AbstractRpcServer to use for the upload.
requests: A list of client_deployinfo.Request objects to include
with the client deploy log.
time_func: Function to get the current time in milliseconds.
request_params: A dictionary with params to append to requests
"""
def __init__(self,
rpcserver,
request_params,
usage_reporting,
time_func=time.time):
"""Creates a new AppVersionUpload.
Args:
rpcserver: The RPC server to use. Should be an instance of HttpRpcServer
or TestRpcServer.
request_params: A dictionary with params to append to requests
usage_reporting: Whether to actually upload data.
time_func: Function to return the current time in millisecods
(default time.time).
"""
self.rpcserver = rpcserver
self.request_params = request_params
self.usage_reporting = usage_reporting
self.time_func = time_func
self.requests = []
def Send(self, url, payload='', **kwargs):
"""Sends a request to the server, with common params."""
start_time_usec = self.GetCurrentTimeUsec()
request_size_bytes = len(payload)
try:
logging.info('Send: %s, params=%s', url, self.request_params)
kwargs.update(self.request_params)
result = self.rpcserver.Send(url, payload=payload, **kwargs)
self._RegisterReqestForLogging(url, 200, start_time_usec,
request_size_bytes)
return result
except urllib2.HTTPError, e:
self._RegisterReqestForLogging(url, e.code, start_time_usec,
request_size_bytes)
raise e
def GetCurrentTimeUsec(self):
"""Returns the current time in microseconds."""
return int(round(self.time_func() * 1000 * 1000))
def GetSdkVersion(self):
"""Returns the current SDK Version."""
sdk_version = sdk_update_checker.GetVersionObject()
return sdk_version.get('release', '?') if sdk_version else '?'
def _RegisterReqestForLogging(self, path, response_code, start_time_usec,
request_size_bytes):
"""Registers a request for client deploy logging purposes."""
end_time_usec = self.GetCurrentTimeUsec()
self.requests.append(client_deployinfo.Request(
path=path,
response_code=response_code,
start_time_usec=start_time_usec,
end_time_usec=end_time_usec,
request_size_bytes=request_size_bytes))
def LogClientDeploy(self, runtime, start_time_usec, success):
"""Logs a client deployment attempt.
Args:
runtime: The runtime for the app being deployed.
start_time_usec: The start time of the deployment in micro seconds.
success: True if the deployment succeeded otherwise False.
"""
if not self.usage_reporting:
logging.info('Skipping usage reporting.')
return
end_time_usec = self.GetCurrentTimeUsec()
try:
info = client_deployinfo.ClientDeployInfoExternal(
runtime=runtime,
start_time_usec=start_time_usec,
end_time_usec=end_time_usec,
requests=self.requests,
success=success,
sdk_version=self.GetSdkVersion())
self.Send('/api/logclientdeploy', info.ToYAML())
except BaseException, e:
logging.debug('Exception logging deploy info continuing - %s', e)
class EndpointsState(object):
SERVING = 'serving'
PENDING = 'pending'
FAILED = 'failed'
_STATES = frozenset((SERVING, PENDING, FAILED))
@classmethod
def Parse(cls, value):
state = value.lower()
if state not in cls._STATES:
lst = sorted(cls._STATES)
pretty_states = ', '.join(lst[:-1]) + ', or ' + lst[-1]
raise ValueError('Unexpected Endpoints state "%s"; should be %s.' %
(value, pretty_states))
return state
class AppVersionUpload(object):
"""Provides facilities to upload a new appversion to the hosting service.
Attributes:
rpcserver: The AbstractRpcServer to use for the upload.
config: The AppInfoExternal object derived from the app.yaml file.
app_id: The application string from 'config'.
version: The version string from 'config'.
backend: The backend to update, if any.
files: A dictionary of files to upload to the rpcserver, mapping path to
hash of the file contents.
in_transaction: True iff a transaction with the server has started.
An AppVersionUpload can do only one transaction at a time.
deployed: True iff the Deploy method has been called.
started: True iff the StartServing method has been called.
logging_context: The _ClientDeployLoggingContext for this upload.
ignore_endpoints_failures: True to finish deployment even if there are
errors updating the Google Cloud Endpoints configuration (if there is
one). False if these errors should cause a failure/rollback.
"""
def __init__(self, rpcserver, config, module_yaml_path='app.yaml',
backend=None,
error_fh=None,
usage_reporting=False, ignore_endpoints_failures=True):
"""Creates a new AppVersionUpload.
Args:
rpcserver: The RPC server to use. Should be an instance of HttpRpcServer
or TestRpcServer.
config: An AppInfoExternal object that specifies the configuration for
this application.
module_yaml_path: The (string) path to the yaml file corresponding to
<config>, relative to the bundle directory.
backend: If specified, indicates the update applies to the given backend.
The backend name must match an entry in the backends: stanza.
error_fh: Unexpected HTTPErrors are printed to this file handle.
usage_reporting: Whether or not to report usage.
ignore_endpoints_failures: True to finish deployment even if there are
errors updating the Google Cloud Endpoints configuration (if there is
one). False if these errors should cause a failure/rollback.
"""
self.rpcserver = rpcserver
self.config = config
self.app_id = self.config.application
self.module = self.config.module or self.config.service
self.backend = backend
self.error_fh = error_fh or sys.stderr
self.version = self.config.version
self.params = {}
if self.app_id:
self.params['app_id'] = self.app_id
if self.module:
self.params['module'] = self.module
if self.backend:
self.params['backend'] = self.backend
elif self.version:
self.params['version'] = self.version
self.files = {}
self.all_files = set()
self.in_transaction = False
self.deployed = False
self.started = False
self.batching = True
self.logging_context = _ClientDeployLoggingContext(rpcserver,
self.params,
usage_reporting)
self.file_batcher = UploadBatcher('file', self.logging_context)
self.blob_batcher = UploadBatcher('blob', self.logging_context)
self.errorblob_batcher = UploadBatcher('errorblob', self.logging_context)
if not self.config.vm_settings:
self.config.vm_settings = appinfo.VmSettings()
self.config.vm_settings['module_yaml_path'] = module_yaml_path
if not self.config.auto_id_policy:
self.config.auto_id_policy = appinfo.DATASTORE_ID_POLICY_DEFAULT
self.ignore_endpoints_failures = ignore_endpoints_failures
def AddFile(self, path, file_handle):
"""Adds the provided file to the list to be pushed to the server.
Args:
path: The path the file should be uploaded as.
file_handle: A stream containing data to upload.
"""
assert not self.in_transaction, 'Already in a transaction.'
assert file_handle is not None
reason = appinfo.ValidFilename(path)
if reason:
logging.error(reason)
return
content_hash = _HashFromFileHandle(file_handle)
self.files[path] = content_hash
self.all_files.add(path)
def Describe(self):
"""Returns a string describing the object being updated."""
result = 'app: %s' % self.app_id
if self.module is not None and self.module != appinfo.DEFAULT_MODULE:
result += ', module: %s' % self.module
if self.backend:
result += ', backend: %s' % self.backend
elif self.version:
result += ', version: %s' % self.version
return result
@staticmethod
def _ValidateBeginYaml(resp):
"""Validates the given /api/appversion/create response string."""
response_dict = yaml.safe_load(resp)
if not response_dict or 'warnings' not in response_dict:
return False
return response_dict
def Begin(self):
"""Begins the transaction, returning a list of files that need uploading.
All calls to AddFile must be made before calling Begin().
Returns:
A list of pathnames for files that should be uploaded using UploadFile()
before Commit() can be called.
"""
assert not self.in_transaction, 'Already in a transaction.'
config_copy = copy.deepcopy(self.config)
for url in config_copy.handlers:
handler_type = url.GetHandlerType()
if url.application_readable:
if handler_type == 'static_dir':
url.static_dir = '%s/%s' % (STATIC_FILE_PREFIX, url.static_dir)
elif handler_type == 'static_files':
url.static_files = '%s/%s' % (STATIC_FILE_PREFIX, url.static_files)
url.upload = '%s/%s' % (STATIC_FILE_PREFIX, url.upload)
response = self.logging_context.Send(
'/api/appversion/create',
payload=config_copy.ToYAML())
result = self._ValidateBeginYaml(response)
if result:
warnings = result.get('warnings')
for warning in warnings:
StatusUpdate('WARNING: %s' % warning, self.error_fh)
self.in_transaction = True
files_to_clone = []
blobs_to_clone = []
errorblobs = {}
for path, content_hash in self.files.iteritems():
file_classification = FileClassification(
self.config, path, error_fh=self.error_fh)
if file_classification.IsStaticFile():
upload_path = path
if file_classification.IsApplicationFile():
upload_path = '%s/%s' % (STATIC_FILE_PREFIX, path)
blobs_to_clone.append((path, upload_path, content_hash,
file_classification.StaticMimeType()))
if file_classification.IsErrorFile():
errorblobs[path] = content_hash
if file_classification.IsApplicationFile():
files_to_clone.append((path, path, content_hash))
files_to_upload = {}
def CloneFiles(url, files, file_type):
"""Sends files to the given url.
Args:
url: the server URL to use.
files: a list of files
file_type: the type of the files
"""
if not files:
return
StatusUpdate('Cloning %d %s file%s.' %
(len(files), file_type, len(files) != 1 and 's' or ''),
self.error_fh)
max_files = self.resource_limits['max_files_to_clone']
for i in xrange(0, len(files), max_files):
if i > 0 and i % max_files == 0:
StatusUpdate('Cloned %d files.' % i, self.error_fh)
chunk = files[i:min(len(files), i + max_files)]
result = self.logging_context.Send(url,
payload=BuildClonePostBody(chunk))
if result:
to_upload = {}
for f in result.split(LIST_DELIMITER):
for entry in files:
real_path, upload_path = entry[:2]
if f == upload_path:
to_upload[real_path] = self.files[real_path]
break
files_to_upload.update(to_upload)
CloneFiles('/api/appversion/cloneblobs', blobs_to_clone, 'static')
CloneFiles('/api/appversion/clonefiles', files_to_clone, 'application')
logging.debug('Files to upload: %s', files_to_upload)
for (path, content_hash) in errorblobs.iteritems():
files_to_upload[path] = content_hash
self.files = files_to_upload
return sorted(files_to_upload.iterkeys())
def UploadFile(self, path, file_handle):
"""Uploads a file to the hosting service.
Must only be called after Begin().
The path provided must be one of those that were returned by Begin().
Args:
path: The path the file is being uploaded as.
file_handle: A file-like object containing the data to upload.
Raises:
KeyError: The provided file is not amongst those to be uploaded.
"""
assert self.in_transaction, 'Begin() must be called before UploadFile().'
if path not in self.files:
raise KeyError('File \'%s\' is not in the list of files to be uploaded.'
% path)
del self.files[path]
file_classification = FileClassification(
self.config, path, error_fh=self.error_fh)
payload = file_handle.read()
if file_classification.IsStaticFile():
upload_path = path
if file_classification.IsApplicationFile():
upload_path = '%s/%s' % (STATIC_FILE_PREFIX, path)
self.blob_batcher.AddToBatch(upload_path, payload,
file_classification.StaticMimeType())
if file_classification.IsErrorFile():
self.errorblob_batcher.AddToBatch(file_classification.ErrorCode(),
payload,
file_classification.ErrorMimeType())
if file_classification.IsApplicationFile():
self.file_batcher.AddToBatch(path, payload, None)
def Precompile(self):
"""Handle precompilation."""
StatusUpdate('Compilation starting.', self.error_fh)
files = []
if self.config.GetEffectiveRuntime() == 'go':
for f in self.all_files:
if f.endswith('.go') and not self.config.nobuild_files.match(f):
files.append(f)
while True:
if files:
StatusUpdate('Compilation: %d files left.' % len(files), self.error_fh)
files = self.PrecompileBatch(files)
if not files:
break
StatusUpdate('Compilation completed.', self.error_fh)
def PrecompileBatch(self, files):
"""Precompile a batch of files.
Args:
files: Either an empty list (for the initial request) or a list
of files to be precompiled.
Returns:
Either an empty list (if no more files need to be precompiled)
or a list of files to be precompiled subsequently.
"""
payload = LIST_DELIMITER.join(files)
response = self.logging_context.Send('/api/appversion/precompile',
payload=payload)
if not response:
return []
return response.split(LIST_DELIMITER)
def Commit(self):
"""Commits the transaction, making the new app version available.
All the files returned by Begin() must have been uploaded with UploadFile()
before Commit() can be called.
This tries the new 'deploy' method; if that fails it uses the old 'commit'.
Returns:
An appinfo.AppInfoSummary if one was returned from the Deploy, None
otherwise.
Raises:
RuntimeError: Some required files were not uploaded.
CannotStartServingError: Another operation is in progress on this version.
"""
assert self.in_transaction, 'Begin() must be called before Commit().'
if self.files:
raise RuntimeError('Not all required files have been uploaded.')
def PrintRetryMessage(_, delay):
StatusUpdate('Will check again in %s seconds.' % delay, self.error_fh)
app_summary = self.Deploy()
success, unused_contents = RetryWithBackoff(
lambda: (self.IsReady(), None), PrintRetryMessage, 1, 2, 60, 20)
if not success:
logging.warning('Version still not ready to serve, aborting.')
raise RuntimeError('Version not ready.')
result = self.StartServing()
if not result:
self.in_transaction = False
else:
if result == '0':
raise CannotStartServingError(
'Another operation on this version is in progress.')
success, response = RetryNoBackoff(self.IsServing, PrintRetryMessage)
if not success:
logging.warning('Version still not serving, aborting.')
raise RuntimeError('Version not ready.')
check_config_updated = response.get('check_endpoints_config')
if check_config_updated:
unused_done, (last_state, user_error) = RetryWithBackoff(
self.IsEndpointsConfigUpdated,
PrintRetryMessage, 1, 2, 60, 20)
if last_state != EndpointsState.SERVING:
self.HandleEndpointsError(user_error)
self.in_transaction = False
return app_summary
def Deploy(self):
"""Deploys the new app version but does not make it default.
All the files returned by Begin() must have been uploaded with UploadFile()
before Deploy() can be called.
Returns:
An appinfo.AppInfoSummary if one was returned from the Deploy, None
otherwise.
Raises:
RuntimeError: Some required files were not uploaded.
"""
assert self.in_transaction, 'Begin() must be called before Deploy().'
if self.files:
raise RuntimeError('Not all required files have been uploaded.')
StatusUpdate('Starting deployment.', self.error_fh)
result = self.logging_context.Send('/api/appversion/deploy')
self.deployed = True
if result:
return yaml_object.BuildSingleObject(appinfo.AppInfoSummary, result)
else:
return None
def IsReady(self):
"""Check if the new app version is ready to serve traffic.
Raises:
RuntimeError: Deploy has not yet been called.
Returns:
True if the server returned the app is ready to serve.
"""
assert self.deployed, 'Deploy() must be called before IsReady().'
StatusUpdate('Checking if deployment succeeded.', self.error_fh)
result = self.logging_context.Send('/api/appversion/isready')
return result == '1'
def StartServing(self):
"""Start serving with the newly created version.
Raises:
RuntimeError: Deploy has not yet been called.
Returns:
The response body, as a string.
"""
assert self.deployed, 'Deploy() must be called before StartServing().'
StatusUpdate('Deployment successful.', self.error_fh)
self.params['willcheckserving'] = '1'
result = self.logging_context.Send('/api/appversion/startserving')
del self.params['willcheckserving']
self.started = True
return result
@staticmethod
def _ValidateIsServingYaml(resp):
"""Validates the given /isserving YAML string.
Args:
resp: the response from an RPC to a URL such as /api/appversion/isserving.
Returns:
The resulting dictionary if the response is valid, or None otherwise.
"""
response_dict = yaml.safe_load(resp)
if 'serving' not in response_dict:
return None
return response_dict
def IsServing(self):
"""Check if the new app version is serving.
Raises:
RuntimeError: Deploy has not yet been called.
CannotStartServingError: A bad response was received from the isserving
API call.
Returns:
(serving, response) Where serving is True if the deployed app version is
serving, False otherwise. response is a dict containing the parsed
response from the server, or an empty dict if the server's response was
an old style 0/1 response.
"""
assert self.started, 'StartServing() must be called before IsServing().'
StatusUpdate('Checking if updated app version is serving.', self.error_fh)
self.params['new_serving_resp'] = '1'
result = self.logging_context.Send('/api/appversion/isserving')
del self.params['new_serving_resp']
if result in ['0', '1']:
return result == '1', {}
result = AppVersionUpload._ValidateIsServingYaml(result)
if not result:
raise CannotStartServingError(
'Internal error: Could not parse IsServing response.')
message = result.get('message')
fatal = result.get('fatal')
if message:
StatusUpdate(message, self.error_fh)
if fatal:
raise CannotStartServingError(message or 'Unknown error.')
return result['serving'], result
@staticmethod
def _ValidateIsEndpointsConfigUpdatedYaml(resp):
"""Validates the YAML string response from an isconfigupdated request.
Args:
resp: A string containing the response from the server.
Returns:
The dictionary with the parsed response if the response is valid.
Otherwise returns False.
"""
response_dict = yaml.safe_load(resp)
if 'updated' not in response_dict and 'updatedDetail2' not in response_dict:
return None
return response_dict
def GetLogUrl(self):
"""Get the URL for the app's logs."""
module = '%s:' % self.module if self.module else ''
return ('https://appengine.google.com/logs?' +
urllib.urlencode((('app_id', self.app_id),
('version_id', module + self.version))))
def IsEndpointsConfigUpdated(self):
"""Check if the Endpoints configuration for this app has been updated.
This should only be called if the app has a Google Cloud Endpoints
handler, or if it's removing one. The server performs the check to see
if Endpoints support is added/updated/removed, and the response to the
isserving call indicates whether IsEndpointsConfigUpdated should be called.
Raises:
AssertionError: Deploy has not yet been called.
CannotStartServingError: There was an unexpected error with the server
response.
Returns:
(done, updated_state), where done is False if this function should
be called again to retry, True if not. updated_state is an
EndpointsState value indicating whether the Endpoints configuration has
been updated on the server.
"""
assert self.started, ('StartServing() must be called before '
'IsEndpointsConfigUpdated().')
StatusUpdate('Checking if Endpoints configuration has been updated.',
self.error_fh)
result = self.logging_context.Send('/api/isconfigupdated')
result = AppVersionUpload._ValidateIsEndpointsConfigUpdatedYaml(result)
if result is None:
raise CannotStartServingError(
'Internal error: Could not parse IsEndpointsConfigUpdated response.')
if 'updatedDetail2' in result:
updated_state = EndpointsState.Parse(result['updatedDetail2'])
user_error = result.get('errorMessage')
else:
updated_state = (EndpointsState.SERVING if result['updated']
else EndpointsState.PENDING)
user_error = None
return updated_state != EndpointsState.PENDING, (updated_state, user_error)
def HandleEndpointsError(self, user_error):
"""Handle an error state returned by IsEndpointsConfigUpdated.
Args:
user_error: Either None or a string with a message from the server
that indicates what the error was and how the user should resolve it.
Raises:
RuntimeError: The update state is fatal and the user hasn't chosen
to ignore Endpoints errors.
"""
detailed_error = user_error or (
"Check the app's AppEngine logs for errors: %s" % self.GetLogUrl())
error_message = ('Failed to update Endpoints configuration. %s' %
detailed_error)
StatusUpdate(error_message, self.error_fh)
doc_link = ('https://developers.google.com/appengine/docs/python/'
'endpoints/test_deploy#troubleshooting_a_deployment_failure')
StatusUpdate('See the deployment troubleshooting documentation for more '
'information: %s' % doc_link, self.error_fh)
if self.ignore_endpoints_failures:
StatusUpdate('Ignoring Endpoints failure and proceeding with update.',
self.error_fh)
else:
raise RuntimeError(error_message)
def Rollback(self, force_rollback=False):
"""Rolls back the transaction if one is in progress."""
if not self.in_transaction:
return
msg = 'Rolling back the update.'
if self.config.vm and not force_rollback:
msg += (' This can sometimes take a while since a VM version is being '
'rolled back.')
StatusUpdate(msg, self.error_fh)
self.logging_context.Send('/api/appversion/rollback',
force_rollback='1' if force_rollback else '0')
self.in_transaction = False
self.files = {}
def DoUpload(self, paths, openfunc):
"""Uploads a new appversion with the given config and files to the server.
Args:
paths: An iterator that yields the relative paths of the files to upload.
openfunc: A function that takes a path and returns a file-like object.
Returns:
An appinfo.AppInfoSummary if one was returned from the server, None
otherwise.
"""
start_time_usec = self.logging_context.GetCurrentTimeUsec()
logging.info('Reading app configuration.')
StatusUpdate('Starting update of %s' % self.Describe(), self.error_fh)
try:
self.resource_limits = GetResourceLimits(self.logging_context,
self.error_fh)
self._AddFilesThatAreSmallEnough(paths, openfunc)
except KeyboardInterrupt:
logging.info('User interrupted. Aborting.')
raise
except EnvironmentError, e:
if self._IsExceptionClientDeployLoggable(e):
self.logging_context.LogClientDeploy(self.config.runtime,
start_time_usec, False)
logging.error('An error occurred processing files \'%s\': %s. Aborting.',
list(paths), e)
raise
try:
missing_files = self.Begin()
self._UploadMissingFiles(missing_files, openfunc)
if (self.config.derived_file_type and
appinfo.PYTHON_PRECOMPILED in self.config.derived_file_type):
try:
self.Precompile()
except urllib2.HTTPError, e:
ErrorUpdate('Error %d: --- begin server output ---\n'
'%s\n--- end server output ---' %
(e.code, e.read().rstrip('\n')))
if e.code == 422 or self.config.GetEffectiveRuntime() == 'go':
raise
print >>self.error_fh, (
'Precompilation failed. Your app can still serve but may '
'have reduced startup performance. You can retry the update '
'later to retry the precompilation step.')
app_summary = self.Commit()
StatusUpdate('Completed update of %s' % self.Describe(), self.error_fh)
self.logging_context.LogClientDeploy(self.config.runtime, start_time_usec,
True)
except BaseException, e:
try:
self._LogDoUploadException(e)
self.Rollback()
finally:
if self._IsExceptionClientDeployLoggable(e):
self.logging_context.LogClientDeploy(self.config.runtime,
start_time_usec, False)
raise
logging.info('Done!')
return app_summary
def _IsExceptionClientDeployLoggable(self, exception):
"""Determines if an exception qualifes for client deploy log reistration.
Args:
exception: The exception to check.
Returns:
True iff exception qualifies for client deploy logging - basically a
system error rather than a user or error or cancellation.
"""
if isinstance(exception, KeyboardInterrupt):
return False
if (isinstance(exception, urllib2.HTTPError)
and 400 <= exception.code <= 499):
return False
return True
def _AddFilesThatAreSmallEnough(self, paths, openfunc):
"""Calls self.AddFile on files that are small enough.
By small enough, we mean that their size is within
self.resource_limits['max_file_size'] for application files, and
'max_blob_size' otherwise. Files that are too large are logged as errors,
and dropped (not sure why this isn't handled by raising an exception...).
Args:
paths: List of paths, relative to the app's base path.
openfunc: A function that takes a paths element, and returns a file-like
object.
"""
StatusUpdate('Scanning files on local disk.', self.error_fh)
num_files = 0
for path in paths:
file_handle = openfunc(path)
try:
file_length = GetFileLength(file_handle)
file_classification = FileClassification(
self.config, path, self.error_fh)
if file_classification.IsApplicationFile():
max_size = self.resource_limits['max_file_size']
else:
max_size = self.resource_limits['max_blob_size']
if file_length > max_size:
extra_msg = (' Consider --enable_jar_splitting.'
if JavaSupported() and path.endswith('jar')
else '')
logging.error('Ignoring file \'%s\': Too long '
'(max %d bytes, file is %d bytes).%s',
path, max_size, file_length, extra_msg)
else:
logging.info('Processing file \'%s\'', path)
self.AddFile(path, file_handle)
finally:
file_handle.close()
num_files += 1
if num_files % 500 == 0:
StatusUpdate('Scanned %d files.' % num_files, self.error_fh)
def _UploadMissingFiles(self, missing_files, openfunc):
"""DoUpload helper to upload files that need to be uploaded.
Args:
missing_files: List of files that need to be uploaded. Begin returns such
a list. Design note: we don't call Begin here, because we want DoUpload
to call it directly so that Begin/Commit are more clearly paired.
openfunc: Function that takes a path relative to the app's base path, and
returns a file-like object.
"""
if not missing_files:
return
StatusUpdate('Uploading %d files and blobs.' % len(missing_files),
self.error_fh)
num_files = 0
for missing_file in missing_files:
file_handle = openfunc(missing_file)
try:
self.UploadFile(missing_file, file_handle)
finally:
file_handle.close()
num_files += 1
if num_files % 500 == 0:
StatusUpdate('Processed %d out of %s.' %
(num_files, len(missing_files)), self.error_fh)
self.file_batcher.Flush()
self.blob_batcher.Flush()
self.errorblob_batcher.Flush()
StatusUpdate('Uploaded %d files and blobs.' % num_files, self.error_fh)
@staticmethod
def _LogDoUploadException(exception):
"""Helper that logs exceptions that occurred during DoUpload.
Args:
exception: An exception that was thrown during DoUpload.
"""
def InstanceOf(tipe):
return isinstance(exception, tipe)
if InstanceOf(KeyboardInterrupt):
logging.info('User interrupted. Aborting.')
elif InstanceOf(urllib2.HTTPError):
logging.info('HTTP Error (%s)', exception)
elif InstanceOf(CannotStartServingError):
logging.error(exception.message)
else:
logging.exception('An unexpected error occurred. Aborting.')
class DoLockAction(object):
"""Locks/unlocks a particular vm app version and shows state."""
def __init__(
self, url, rpcserver, app_id, version, module, instance, file_handle):
self.url = url
self.rpcserver = rpcserver
self.app_id = app_id
self.version = version
self.module = module
self.instance = instance
self.file_handle = file_handle
def GetState(self):
yaml_data = self.rpcserver.Send('/api/vms/debugstate',
app_id=self.app_id,
version_match=self.version,
module=self.module)
state = yaml.safe_load(yaml_data)
done = state['state'] != 'PENDING'
if done:
print >> self.file_handle, state['message']
return (done, state['message'])
def PrintRetryMessage(self, msg, delay):
StatusUpdate('%s. Will try again in %d seconds.' % (msg, delay),
self.file_handle)
def Do(self):
kwargs = {'app_id': self.app_id,
'version_match': self.version,
'module': self.module}
if self.instance:
kwargs['instance'] = self.instance
response = self.rpcserver.Send(self.url, **kwargs)
print >> self.file_handle, response
RetryWithBackoff(self.GetState, self.PrintRetryMessage, 1, 2, 5, 20)
def FileIterator(base, skip_files, runtime, separator=os.path.sep):
"""Walks a directory tree, returning all the files. Follows symlinks.
Args:
base: The base path to search for files under.
skip_files: A regular expression object for files/directories to skip.
runtime: The name of the runtime e.g. "python". If "python27" then .pyc
files with matching .py files will be skipped.
separator: Path separator used by the running system's platform.
Yields:
Paths of files found, relative to base.
"""
dirs = ['']
while dirs:
current_dir = dirs.pop()
entries = set(os.listdir(os.path.join(base, current_dir)))
for entry in sorted(entries):
name = os.path.join(current_dir, entry)
fullname = os.path.join(base, name)
if separator == '\\':
name = name.replace('\\', '/')
if runtime == 'python27' and not skip_files.match(name):
root, extension = os.path.splitext(entry)
if extension == '.pyc' and (root + '.py') in entries:
logging.warning('Ignoring file \'%s\': Cannot upload both '
'<filename>.py and <filename>.pyc', name)
continue
if os.path.isfile(fullname):
if skip_files.match(name):
logging.info('Ignoring file \'%s\': File matches ignore regex.', name)
else:
yield name
elif os.path.isdir(fullname):
if skip_files.match(name):
logging.info(
'Ignoring directory \'%s\': Directory matches ignore regex.',
name)
else:
dirs.append(name)
def GetFileLength(fh):
"""Returns the length of the file represented by fh.
This function is capable of finding the length of any seekable stream,
unlike os.fstat, which only works on file streams.
Args:
fh: The stream to get the length of.
Returns:
The length of the stream.
"""
pos = fh.tell()
fh.seek(0, 2)
length = fh.tell()
fh.seek(pos, 0)
return length
def GetUserAgent(get_version=sdk_update_checker.GetVersionObject,
get_platform=appengine_rpc.GetPlatformToken,
sdk_product=SDK_PRODUCT):
"""Determines the value of the 'User-agent' header to use for HTTP requests.
If the 'APPCFG_SDK_NAME' environment variable is present, that will be
used as the first product token in the user-agent.
Args:
get_version: Used for testing.
get_platform: Used for testing.
sdk_product: Used as part of sdk/version product token.
Returns:
String containing the 'user-agent' header value, which includes the SDK
version, the platform information, and the version of Python;
e.g., 'appcfg_py/1.0.1 Darwin/9.2.0 Python/2.5.2'.
"""
product_tokens = []
sdk_name = os.environ.get('APPCFG_SDK_NAME')
if sdk_name:
product_tokens.append(sdk_name)
else:
version = get_version()
if version is None:
release = 'unknown'
else:
release = version['release']
product_tokens.append('%s/%s' % (sdk_product, release))
product_tokens.append(get_platform())
python_version = '.'.join(str(i) for i in sys.version_info)
product_tokens.append('Python/%s' % python_version)
return ' '.join(product_tokens)
def GetSourceName(get_version=sdk_update_checker.GetVersionObject):
"""Gets the name of this source version."""
version = get_version()
if version is None:
release = 'unknown'
else:
release = version['release']
return 'Google-appcfg-%s' % (release,)
def _ReadUrlContents(url):
"""Reads the contents of a URL into a string.
Args:
url: a string that is the URL to read.
Returns:
A string that is the contents read from the URL.
Raises:
urllib2.URLError: If the URL cannot be read.
"""
req = urllib2.Request(url, headers={'Metadata-Flavor': 'Google'})
return urllib2.urlopen(req).read()
class AppCfgApp(object):
"""Singleton class to wrap AppCfg tool functionality.
This class is responsible for parsing the command line and executing
the desired action on behalf of the user. Processing files and
communicating with the server is handled by other classes.
Attributes:
actions: A dictionary mapping action names to Action objects.
action: The Action specified on the command line.
parser: An instance of optparse.OptionParser.
options: The command line options parsed by 'parser'.
argv: The original command line as a list.
args: The positional command line args left over after parsing the options.
error_fh: Unexpected HTTPErrors are printed to this file handle.
Attributes for testing:
parser_class: The class to use for parsing the command line. Because
OptionsParser will exit the program when there is a parse failure, it
is nice to subclass OptionsParser and catch the error before exiting.
read_url_contents: A function to read the contents of a URL.
"""
def __init__(self, argv, parser_class=optparse.OptionParser,
rpc_server_class=None,
out_fh=sys.stdout,
error_fh=sys.stderr,
update_check_class=sdk_update_checker.SDKUpdateChecker,
throttle_class=None,
opener=open,
file_iterator=FileIterator,
time_func=time.time,
wrap_server_error_message=True,
oauth_client_id=APPCFG_CLIENT_ID,
oauth_client_secret=APPCFG_CLIENT_NOTSOSECRET,
oauth_scopes=APPCFG_SCOPES):
"""Initializer. Parses the cmdline and selects the Action to use.
Initializes all of the attributes described in the class docstring.
Prints help or error messages if there is an error parsing the cmdline.
Args:
argv: The list of arguments passed to this program.
parser_class: Options parser to use for this application.
rpc_server_class: RPC server class to use for this application.
out_fh: All normal output is printed to this file handle.
error_fh: Unexpected HTTPErrors are printed to this file handle.
update_check_class: sdk_update_checker.SDKUpdateChecker class (can be
replaced for testing).
throttle_class: A class to use instead of ThrottledHttpRpcServer
(only used in the bulkloader).
opener: Function used for opening files.
file_iterator: Callable that takes (basepath, skip_files, file_separator)
and returns a generator that yields all filenames in the file tree
rooted at that path, skipping files that match the skip_files compiled
regular expression.
time_func: A time.time() compatible function, which can be overridden for
testing.
wrap_server_error_message: If true, the error messages from
urllib2.HTTPError exceptions in Run() are wrapped with
'--- begin server output ---' and '--- end server output ---',
otherwise the error message is printed as is.
oauth_client_id: The client ID of the project providing Auth. Defaults to
the SDK default project client ID, the constant APPCFG_CLIENT_ID.
oauth_client_secret: The client secret of the project providing Auth.
Defaults to the SDK default project client secret, the constant
APPCFG_CLIENT_NOTSOSECRET.
oauth_scopes: The scope or set of scopes to be accessed by the OAuth2
token retrieved. Defaults to APPCFG_SCOPES. Can be a string or
iterable of strings, representing the scope(s) to request.
"""
self.parser_class = parser_class
self.argv = argv
self.rpc_server_class = rpc_server_class
self.out_fh = out_fh
self.error_fh = error_fh
self.update_check_class = update_check_class
self.throttle_class = throttle_class
self.time_func = time_func
self.wrap_server_error_message = wrap_server_error_message
self.oauth_client_id = oauth_client_id
self.oauth_client_secret = oauth_client_secret
self.oauth_scopes = oauth_scopes
self.read_url_contents = _ReadUrlContents
self.parser = self._GetOptionParser()
for action in self.actions.itervalues():
action.options(self, self.parser)
self.options, self.args = self.parser.parse_args(argv[1:])
if len(self.args) < 1:
self._PrintHelpAndExit()
if not self.options.allow_any_runtime:
if self.options.runtime:
if self.options.runtime not in appinfo.GetAllRuntimes():
_PrintErrorAndExit(self.error_fh,
'"%s" is not a supported runtime\n' %
self.options.runtime)
else:
appinfo.AppInfoExternal.ATTRIBUTES[appinfo.RUNTIME] = (
'|'.join(appinfo.GetAllRuntimes()))
if self.options.redundant_oauth2:
print >>sys.stderr, (
'\nNote: the --oauth2 flag is now the default and can be omitted.\n')
action = self.args.pop(0)
def RaiseParseError(actionname, action):
self.parser, self.options = self._MakeSpecificParser(action)
error_desc = action.error_desc
if not error_desc:
error_desc = "Expected a <directory> argument after '%s'." % (
actionname.split(' ')[0])
self.parser.error(error_desc)
if action == BACKENDS_ACTION:
if len(self.args) < 1:
RaiseParseError(action, self.actions[BACKENDS_ACTION])
backend_action_first = BACKENDS_ACTION + ' ' + self.args[0]
if backend_action_first in self.actions:
self.args.pop(0)
action = backend_action_first
elif len(self.args) > 1:
backend_directory_first = BACKENDS_ACTION + ' ' + self.args[1]
if backend_directory_first in self.actions:
self.args.pop(1)
action = backend_directory_first
if len(self.args) < 1 or action == BACKENDS_ACTION:
RaiseParseError(action, self.actions[action])
if action not in self.actions:
self.parser.error("Unknown action: '%s'\n%s" %
(action, self.parser.get_description()))
self.action = self.actions[action]
if not self.action.uses_basepath or self.options.help:
self.basepath = None
else:
if not self.args:
RaiseParseError(action, self.action)
self.basepath = self.args.pop(0)
self.parser, self.options = self._MakeSpecificParser(self.action)
if self.options.help:
self._PrintHelpAndExit()
if self.options.verbose == 2:
logging.getLogger().setLevel(logging.INFO)
elif self.options.verbose == 3:
logging.getLogger().setLevel(logging.DEBUG)
global verbosity
verbosity = self.options.verbose
if self.options.oauth2_client_id:
self.oauth_client_id = self.options.oauth2_client_id
if self.options.oauth2_client_secret:
self.oauth_client_secret = self.options.oauth2_client_secret
self.opener = opener
self.file_iterator = file_iterator
def Run(self):
"""Executes the requested action.
Catches any HTTPErrors raised by the action and prints them to stderr.
Returns:
1 on error, 0 if successful.
"""
try:
self.action(self)
except urllib2.HTTPError, e:
body = e.read()
if self.wrap_server_error_message:
error_format = ('Error %d: --- begin server output ---\n'
'%s\n--- end server output ---')
else:
error_format = 'Error %d: %s'
print >>self.error_fh, (error_format % (e.code, body.rstrip('\n')))
return 1
except yaml_errors.EventListenerError, e:
print >>self.error_fh, ('Error parsing yaml file:\n%s' % e)
return 1
except CannotStartServingError:
print >>self.error_fh, 'Could not start serving the given version.'
return 1
return 0
def _GetActionDescriptions(self):
"""Returns a formatted string containing the short_descs for all actions."""
action_names = self.actions.keys()
action_names.sort()
desc = ''
for action_name in action_names:
if not self.actions[action_name].hidden:
desc += ' %s: %s\n' % (action_name,
self.actions[action_name].short_desc)
return desc
def _GetOptionParser(self):
"""Creates an OptionParser with generic usage and description strings.
Returns:
An OptionParser instance.
"""
def AppendSourceReference(option, opt_str, value, parser):
"""Validates the source reference string and appends it to the list."""
try:
appinfo.ValidateSourceReference(value)
except validation.ValidationError, e:
raise optparse.OptionValueError('option %s: %s' % (opt_str, e.message))
getattr(parser.values, option.dest).append(value)
class Formatter(optparse.IndentedHelpFormatter):
"""Custom help formatter that does not reformat the description."""
def format_description(self, description):
"""Very simple formatter."""
return description + '\n'
class AppCfgOption(optparse.Option):
"""Custom Option for AppCfg.
Adds an 'update' action for storing key-value pairs as a dict.
"""
_ACTION = 'update'
ACTIONS = optparse.Option.ACTIONS + (_ACTION,)
STORE_ACTIONS = optparse.Option.STORE_ACTIONS + (_ACTION,)
TYPED_ACTIONS = optparse.Option.TYPED_ACTIONS + (_ACTION,)
ALWAYS_TYPED_ACTIONS = optparse.Option.ALWAYS_TYPED_ACTIONS + (_ACTION,)
def take_action(self, action, dest, opt, value, values, parser):
if action != self._ACTION:
return optparse.Option.take_action(
self, action, dest, opt, value, values, parser)
try:
key, value = value.split(':', 1)
except ValueError:
raise optparse.OptionValueError(
'option %s: invalid value: %s (must match NAME:VALUE)' % (
opt, value))
values.ensure_value(dest, {})[key] = value
desc = self._GetActionDescriptions()
desc = ('Action must be one of:\n%s'
'Use \'help <action>\' for a detailed description.') % desc
parser = self.parser_class(usage='%prog [options] <action>',
description=desc,
formatter=Formatter(),
conflict_handler='resolve',
option_class=AppCfgOption)
parser.add_option('-h', '--help', action='store_true',
dest='help', help='Show the help message and exit.')
parser.add_option('-q', '--quiet', action='store_const', const=0,
dest='verbose', help='Print errors only.')
parser.add_option('-v', '--verbose', action='store_const', const=2,
dest='verbose', default=1,
help='Print info level logs.')
parser.add_option('--noisy', action='store_const', const=3,
dest='verbose', help='Print all logs.')
parser.add_option('-s', '--server', action='store', dest='server',
default='appengine.google.com',
metavar='SERVER', help='The App Engine server.')
parser.add_option('--secure', action='store_true', dest='secure',
default=True, help=optparse.SUPPRESS_HELP)
parser.add_option('--ignore_bad_cert', action='store_true',
dest='ignore_certs', default=False,
help=optparse.SUPPRESS_HELP)
parser.add_option('--insecure', action='store_false', dest='secure',
help=optparse.SUPPRESS_HELP)
parser.add_option('-e', '--email', action='store', dest='email',
metavar='EMAIL', default=None,
help='The username to use. Will prompt if omitted.')
parser.add_option('-H', '--host', action='store', dest='host',
metavar='HOST', default=None,
help='Overrides the Host header sent with all RPCs.')
parser.add_option('--no_cookies', action='store_false',
dest='save_cookies', default=True,
help='Do not save authentication cookies to local disk.')
parser.add_option('--skip_sdk_update_check', action='store_true',
dest='skip_sdk_update_check', default=False,
help='Do not check for SDK updates.')
parser.add_option('-A', '--application', action='store', dest='app_id',
help=('Set the application, overriding the application '
'value from app.yaml file.'))
parser.add_option('-M', '--module', action='store', dest='module',
help=('Set the module, overriding the module value '
'from app.yaml.'))
parser.add_option('-V', '--version', action='store', dest='version',
help=('Set the (major) version, overriding the version '
'value from app.yaml file.'))
parser.add_option('-r', '--runtime', action='store', dest='runtime',
help='Override runtime from app.yaml file.')
parser.add_option('--source_ref', metavar='[repository_uri#]revision',
type='string', action='callback',
callback=AppendSourceReference, dest='source_ref',
default=[],
help=optparse.SUPPRESS_HELP)
parser.add_option('-E', '--env_variable', action='update',
dest='env_variables', metavar='NAME:VALUE',
help=('Set an environment variable, potentially '
'overriding an env_variable value from app.yaml '
'file (flag may be repeated to set multiple '
'variables).'))
parser.add_option('-R', '--allow_any_runtime', action='store_true',
dest='allow_any_runtime', default=False,
help='Do not validate the runtime in app.yaml')
parser.add_option('--oauth2', action='store_true',
dest='redundant_oauth2', default=False,
help='Ignored (OAuth2 is the default).')
parser.add_option('--oauth2_refresh_token', action='store',
dest='oauth2_refresh_token', default=None,
help='An existing OAuth2 refresh token to use. Will '
'not attempt interactive OAuth approval.')
parser.add_option('--oauth2_access_token', action='store',
dest='oauth2_access_token', default=None,
help='An existing OAuth2 access token to use. Will '
'not attempt interactive OAuth approval.')
parser.add_option('--oauth2_client_id', action='store',
dest='oauth2_client_id', default=None,
help=optparse.SUPPRESS_HELP)
parser.add_option('--oauth2_client_secret', action='store',
dest='oauth2_client_secret', default=None,
help=optparse.SUPPRESS_HELP)
parser.add_option('--oauth2_credential_file', action='store',
dest='oauth2_credential_file', default=None,
help=optparse.SUPPRESS_HELP)
parser.add_option('--authenticate_service_account', action='store_true',
dest='authenticate_service_account', default=False,
help='Authenticate using the default service account '
'for the Google Compute Engine VM in which appcfg is '
'being called')
parser.add_option('--noauth_local_webserver', action='store_false',
dest='auth_local_webserver', default=True,
help='Do not run a local web server to handle redirects '
'during OAuth authorization.')
parser.add_option('--called_by_gcloud',
action='store_true', default=False,
help=optparse.SUPPRESS_HELP)
parser.add_option('--ignore_endpoints_failures', action='store_true',
dest='ignore_endpoints_failures', default=True,
help=optparse.SUPPRESS_HELP)
parser.add_option('--no_ignore_endpoints_failures', action='store_false',
dest='ignore_endpoints_failures',
help=optparse.SUPPRESS_HELP)
return parser
def _MakeSpecificParser(self, action):
"""Creates a new parser with documentation specific to 'action'.
Args:
action: An Action instance to be used when initializing the new parser.
Returns:
A tuple containing:
parser: An instance of OptionsParser customized to 'action'.
options: The command line options after re-parsing.
"""
parser = self._GetOptionParser()
parser.set_usage(action.usage)
parser.set_description('%s\n%s' % (action.short_desc, action.long_desc))
action.options(self, parser)
options, unused_args = parser.parse_args(self.argv[1:])
return parser, options
def _PrintHelpAndExit(self, exit_code=2):
"""Prints the parser's help message and exits the program.
Args:
exit_code: The integer code to pass to sys.exit().
"""
self.parser.print_help()
sys.exit(exit_code)
def _GetRpcServer(self):
"""Returns an instance of an AbstractRpcServer.
Returns:
A new AbstractRpcServer, on which RPC calls can be made.
Raises:
RuntimeError: The user has request non-interactive authentication but the
environment is not correct for that to work.
"""
StatusUpdate('Host: %s' % self.options.server, self.error_fh)
source = GetSourceName()
dev_appserver = self.options.host in ['localhost', '127.0.0.1']
if dev_appserver:
if not self.rpc_server_class:
self.rpc_server_class = appengine_rpc.HttpRpcServer
if hasattr(self, 'runtime'):
self.rpc_server_class.RUNTIME = self.runtime
email = self.options.email
if email is None:
email = '[email protected]'
logging.info('Using debug user %s. Override with --email', email)
rpcserver = self.rpc_server_class(
self.options.server,
lambda: (email, 'password'),
GetUserAgent(),
source,
host_override=self.options.host,
save_cookies=self.options.save_cookies,
secure=False)
rpcserver.authenticated = True
return rpcserver
if not self.rpc_server_class:
self.rpc_server_class = appengine_rpc_httplib2.HttpRpcServerOAuth2
oauth2_parameters = self._GetOAuth2Parameters()
extra_headers = {}
return self.rpc_server_class(self.options.server, oauth2_parameters,
GetUserAgent(), source,
host_override=self.options.host,
save_cookies=self.options.save_cookies,
auth_tries=3,
account_type='HOSTED_OR_GOOGLE',
secure=self.options.secure,
ignore_certs=self.options.ignore_certs,
extra_headers=extra_headers,
options=self.options)
def _MaybeGetDevshellOAuth2AccessToken(self):
"""Returns a valid OAuth2 access token when running in Cloud Shell."""
try:
creds = devshell.DevshellCredentials()
return creds.access_token
except devshell.NoDevshellServer:
return None
def _GetOAuth2Parameters(self):
"""Returns appropriate an OAuth2Parameters object for authentication."""
oauth2_parameters = (
appengine_rpc_httplib2.HttpRpcServerOAuth2.OAuth2Parameters(
access_token=(self.options.oauth2_access_token or
self._MaybeGetDevshellOAuth2AccessToken()),
client_id=self.oauth_client_id,
client_secret=self.oauth_client_secret,
scope=self.oauth_scopes,
refresh_token=self.options.oauth2_refresh_token,
credential_file=self.options.oauth2_credential_file,
credentials=self._GetCredentials()))
return oauth2_parameters
def _GetCredentials(self):
"""Return appropriate credentials if we are running in a GCE environment.
Returns:
AppAssertionCredentials if we are running on GCE, None if not.
Raises:
RuntimeError: The user has requested authentication for a service account
but the environment is not correct for that to work.
"""
if self.options.authenticate_service_account:
url = '%s/%s/scopes' % (METADATA_BASE, SERVICE_ACCOUNT_BASE)
try:
vm_scopes_string = self.read_url_contents(url)
except urllib2.URLError, e:
raise RuntimeError('Could not obtain scope list from metadata service: '
'%s: %s. This may be because we are not running in '
'a Google Compute Engine VM.' % (url, e))
vm_scopes = vm_scopes_string.split()
missing = list(set(self.oauth_scopes).difference(vm_scopes))
if missing:
raise RuntimeError('Required scopes %s missing from %s. '
'This VM instance probably needs to be recreated '
'with the missing scopes.' % (missing, vm_scopes))
return oauth2client_gce.AppAssertionCredentials()
else:
return None
def _GetSourceContexts(self, basepath):
"""Return a list of extended source contexts for this deployment.
Args:
basepath: Base application directory.
Returns:
If --repo_info_file was specified, it returns the contexts specified in
that file. If the file does not contain any regular contexts (i.e.
contexts that do not point at a source capture), it will add one or more
source contexts describing the repo associated with the basepath
directory.
"""
source_contexts = []
if self.options.repo_info_file:
try:
with open(self.options.repo_info_file, 'r') as f:
source_contexts = json.load(f)
except (ValueError, IOError), ex:
raise RuntimeError(
'Failed to load {0}: {1}'.format(self.options.repo_info_file, ex))
if isinstance(source_contexts, dict):
source_contexts = [context_util.ExtendContextDict(source_contexts)]
regular_contexts = [context for context in source_contexts
if not context_util.IsCaptureContext(context)]
capture_contexts = [context for context in source_contexts
if context_util.IsCaptureContext(context)]
if not regular_contexts:
try:
regular_contexts = context_util.CalculateExtendedSourceContexts(
basepath)
except context_util.GenerateSourceContextError, e:
logging.info('No source context generated: %s', e)
return regular_contexts + capture_contexts
def _CreateSourceContextFiles(self, source_contexts, basepath, openfunc,
paths):
"""Adds the source context JSON files for the given contexts.
Args:
source_contexts: One or more extended source contexts.
basepath: Base application directory.
openfunc: The current function for opening files.
paths: The current list of paths for the application.
Returns:
(open_func, [string])
An extended version of openfunc which can also return the JSON contents of
the source context files, and a list of files including the original paths
plus the generated source context files.
"""
if not source_contexts:
return (openfunc, paths)
context_file_map = {}
if not os.path.exists(
os.path.join(basepath, context_util.CONTEXT_FILENAME)):
best_context = context_util.BestSourceContext(source_contexts)
context_file_map[context_util.CONTEXT_FILENAME] = json.dumps(
best_context)
if not os.path.exists(
os.path.join(basepath, context_util.EXT_CONTEXT_FILENAME)):
context_file_map[context_util.EXT_CONTEXT_FILENAME] = json.dumps(
source_contexts)
base_openfunc = openfunc
def OpenWithContext(name):
if name in context_file_map:
return StringIO.StringIO(context_file_map[name])
return base_openfunc(name)
return (OpenWithContext, itertools.chain(paths, context_file_map.keys()))
def _FindYaml(self, basepath, file_name):
"""Find yaml files in application directory.
Args:
basepath: Base application directory.
file_name: Relative file path from basepath, without extension, to search
for.
Returns:
Path to located yaml file if one exists, else None.
"""
if not os.path.isdir(basepath):
self.parser.error('Not a directory: %s' % basepath)
alt_basepath = os.path.join(basepath, 'WEB-INF', 'appengine-generated')
for yaml_basepath in (basepath, alt_basepath):
for yaml_file in (file_name + '.yaml', file_name + '.yml'):
yaml_path = os.path.join(yaml_basepath, yaml_file)
if os.path.isfile(yaml_path):
return yaml_path
return None
def _ParseAppInfoFromYaml(self, basepath, basename='app'):
"""Parses the app.yaml file.
Args:
basepath: The directory of the application.
basename: The relative file path, from basepath, to search for.
Returns:
An AppInfoExternal object.
"""
try:
appyaml = self._ParseYamlFile(basepath, basename, appinfo_includes.Parse)
except yaml_errors.EventListenerError, e:
self.parser.error('Error parsing %s.yaml: %s.' % (
os.path.join(basepath, basename), e))
if not appyaml:
if JavaSupported():
if appcfg_java.IsWarFileWithoutYaml(basepath):
java_app_update = appcfg_java.JavaAppUpdate(basepath, self.options)
appyaml_string = java_app_update.GenerateAppYamlString([])
appyaml = appinfo.LoadSingleAppInfo(appyaml_string)
if not appyaml:
self.parser.error('Directory contains neither an %s.yaml '
'configuration file nor a WEB-INF subdirectory '
'with web.xml and appengine-web.xml.' % basename)
else:
self.parser.error('Directory %r does not contain configuration file '
'%s.yaml' %
(os.path.abspath(basepath), basename))
appyaml.module = appyaml.module or appyaml.service
appyaml.service = None
orig_application = appyaml.application
orig_module = appyaml.module
orig_version = appyaml.version
if self.options.app_id:
appyaml.application = self.options.app_id
if self.options.module:
appyaml.module = self.options.module
if self.options.version:
appyaml.version = self.options.version
if self.options.runtime:
appyaml.SetEffectiveRuntime(self.options.runtime)
if self.options.env_variables:
if appyaml.env_variables is None:
appyaml.env_variables = appinfo.EnvironmentVariables()
appyaml.env_variables.update(self.options.env_variables)
if self.options.source_ref:
try:
combined_refs = '\n'.join(self.options.source_ref)
appinfo.ValidateCombinedSourceReferencesString(combined_refs)
if appyaml.beta_settings is None:
appyaml.beta_settings = appinfo.BetaSettings()
appyaml.beta_settings['source_reference'] = combined_refs
except validation.ValidationError, e:
self.parser.error(e.message)
if not appyaml.application:
self.parser.error('Expected -A app_id when application property in file '
'%s.yaml is not set.' % basename)
msg = 'Application: %s' % appyaml.application
if appyaml.application != orig_application:
msg += ' (was: %s)' % orig_application
if self.action.function is 'Update':
if (appyaml.module is not None and
appyaml.module != appinfo.DEFAULT_MODULE):
msg += '; module: %s' % appyaml.module
if appyaml.module != orig_module:
msg += ' (was: %s)' % orig_module
msg += '; version: %s' % appyaml.version
if appyaml.version != orig_version:
msg += ' (was: %s)' % orig_version
StatusUpdate(msg, self.error_fh)
return appyaml
def _ParseYamlFile(self, basepath, basename, parser):
"""Parses a yaml file.
Args:
basepath: The base directory of the application.
basename: The relative file path, from basepath, (with the '.yaml'
stripped off).
parser: the function or method used to parse the file.
Returns:
A single parsed yaml file or None if the file does not exist.
"""
file_name = self._FindYaml(basepath, basename)
if file_name is not None:
fh = self.opener(file_name, 'r')
try:
defns = parser(fh, open_fn=self.opener)
finally:
fh.close()
return defns
return None
def _ParseBackendsYaml(self, basepath):
"""Parses the backends.yaml file.
Args:
basepath: the directory of the application.
Returns:
A BackendsInfoExternal object or None if the file does not exist.
"""
return self._ParseYamlFile(basepath, 'backends',
backendinfo.LoadBackendInfo)
def _ParseIndexYaml(self, basepath, appyaml=None):
"""Parses the index.yaml file.
Args:
basepath: the directory of the application.
appyaml: The app.yaml, if present.
Returns:
A single parsed yaml file or None if the file does not exist.
"""
index_yaml = self._ParseYamlFile(basepath,
'index',
datastore_index.ParseIndexDefinitions)
if not index_yaml:
return None
self._SetApplication(index_yaml, 'index', appyaml)
return index_yaml
def _SetApplication(self, dest_yaml, basename, appyaml=None):
"""Parses and sets the application property onto the dest_yaml parameter.
The order of precendence is:
1. Command line (-A application)
2. Specified dest_yaml file
3. App.yaml file
This exits with a parse error if application is not present in any of these
locations.
Args:
dest_yaml: The yaml object to set 'application' on.
basename: The name of the dest_yaml file for use in errors.
appyaml: The already parsed appyaml, if present. If none, this method will
attempt to parse app.yaml.
"""
if self.options.app_id:
dest_yaml.application = self.options.app_id
if not dest_yaml.application:
if not appyaml:
appyaml = self._ParseYamlFile(self.basepath,
'app',
appinfo_includes.Parse)
if appyaml:
dest_yaml.application = appyaml.application
else:
self.parser.error('Expected -A app_id when %s.yaml.application is not '
'set and app.yaml is not present.' % basename)
def _ParseCronYaml(self, basepath, appyaml=None):
"""Parses the cron.yaml file.
Args:
basepath: the directory of the application.
appyaml: The app.yaml, if present.
Returns:
A CronInfoExternal object or None if the file does not exist.
"""
cron_yaml = self._ParseYamlFile(basepath, 'cron', croninfo.LoadSingleCron)
if not cron_yaml:
return None
self._SetApplication(cron_yaml, 'cron', appyaml)
return cron_yaml
def _ParseQueueYaml(self, basepath, appyaml=None):
"""Parses the queue.yaml file.
Args:
basepath: the directory of the application.
appyaml: The app.yaml, if present.
Returns:
A QueueInfoExternal object or None if the file does not exist.
"""
queue_yaml = self._ParseYamlFile(basepath,
'queue',
queueinfo.LoadSingleQueue)
if not queue_yaml:
return None
self._SetApplication(queue_yaml, 'queue', appyaml)
return queue_yaml
def _ParseDispatchYaml(self, basepath, appyaml=None):
"""Parses the dispatch.yaml file.
Args:
basepath: the directory of the application.
appyaml: The app.yaml, if present.
Returns:
A DispatchInfoExternal object or None if the file does not exist.
"""
dispatch_yaml = self._ParseYamlFile(basepath,
'dispatch',
dispatchinfo.LoadSingleDispatch)
if not dispatch_yaml:
return None
self._SetApplication(dispatch_yaml, 'dispatch', appyaml)
return dispatch_yaml
def _ParseDosYaml(self, basepath, appyaml=None):
"""Parses the dos.yaml file.
Args:
basepath: the directory of the application.
appyaml: The app.yaml, if present.
Returns:
A DosInfoExternal object or None if the file does not exist.
"""
dos_yaml = self._ParseYamlFile(basepath, 'dos', dosinfo.LoadSingleDos)
if not dos_yaml:
return None
self._SetApplication(dos_yaml, 'dos', appyaml)
return dos_yaml
def Help(self, action=None):
"""Prints help for a specific action.
Args:
action: If provided, print help for the action provided.
Expects self.args[0], or 'action', to contain the name of the action in
question. Exits the program after printing the help message.
"""
if not action:
if len(self.args) > 1:
self.args = [' '.join(self.args)]
if len(self.args) != 1 or self.args[0] not in self.actions:
self.parser.error('Expected a single action argument. '
' Must be one of:\n' +
self._GetActionDescriptions())
action = self.args[0]
action = self.actions[action]
self.parser, unused_options = self._MakeSpecificParser(action)
self._PrintHelpAndExit(exit_code=0)
def DownloadApp(self):
"""Downloads the given app+version."""
if len(self.args) != 1:
self.parser.error('\"download_app\" expects one non-option argument, '
'found ' + str(len(self.args)) + '.')
out_dir = self.args[0]
app_id = self.options.app_id
if app_id is None:
self.parser.error('You must specify an app ID via -A or --application.')
module = self.options.module
app_version = self.options.version
if os.path.exists(out_dir):
if not os.path.isdir(out_dir):
self.parser.error('Cannot download to path "%s": '
'there\'s a file in the way.' % out_dir)
elif os.listdir(out_dir):
self.parser.error('Cannot download to path "%s": directory already '
'exists and it isn\'t empty.' % out_dir)
rpcserver = self._GetRpcServer()
DoDownloadApp(rpcserver, out_dir, app_id, module, app_version)
def UpdateVersion(self, rpcserver, basepath, appyaml, module_yaml_path,
backend=None):
"""Updates and deploys a new appversion.
Args:
rpcserver: An AbstractRpcServer instance on which RPC calls can be made.
basepath: The root directory of the version to update.
appyaml: The AppInfoExternal object parsed from an app.yaml-like file.
module_yaml_path: The (string) path to the yaml file, relative to the
bundle directory.
backend: The name of the backend to update, if any.
Returns:
An appinfo.AppInfoSummary if one was returned from the Deploy, None
otherwise.
Raises:
RuntimeError: If go-app-builder fails to generate a mapping from relative
paths to absolute paths, its stderr is raised.
"""
runtime = appyaml.GetEffectiveRuntime()
if appyaml.vm and (self.options.called_by_gcloud or runtime != 'go'):
self.options.precompilation = False
elif runtime == 'dart':
self.options.precompilation = False
elif runtime == 'go' and not self.options.precompilation:
logging.warning('Precompilation is required for Go apps; '
'ignoring --no_precompilation')
self.options.precompilation = True
elif (runtime.startswith('java') and
appinfo.JAVA_PRECOMPILED not in (appyaml.derived_file_type or [])):
self.options.precompilation = False
if runtime in GCLOUD_ONLY_RUNTIMES:
raise RuntimeError('The runtime: \'%s\' is only supported with '
'gcloud.' % runtime)
if self.options.precompilation:
if not appyaml.derived_file_type:
appyaml.derived_file_type = []
if appinfo.PYTHON_PRECOMPILED not in appyaml.derived_file_type:
appyaml.derived_file_type.append(appinfo.PYTHON_PRECOMPILED)
source_contexts = self._GetSourceContexts(basepath)
paths = self.file_iterator(basepath, appyaml.skip_files, appyaml.runtime)
openfunc = lambda path: self.opener(os.path.join(basepath, path), 'rb')
if appyaml.GetEffectiveRuntime() == 'go':
if appyaml.runtime == 'vm':
raise RuntimeError(
'The Go runtime with "vm: true" is only supported with gcloud.')
sdk_base = os.path.normpath(os.path.join(
google.appengine.__file__, '..', '..', '..'))
gopath = os.environ.get('GOPATH')
if not gopath:
gopath = os.path.join(sdk_base, 'gopath')
goroot = os.path.join(sdk_base, goroots.GOROOTS[appyaml.api_version])
if not os.path.exists(goroot):
goroot = os.getenv('GOROOT')
gab = None
if goroot:
gab = os.path.join(sdk_base, goroot, 'bin', 'go-app-builder')
if sys.platform.startswith('win'):
gab += '.exe'
if gab and os.path.exists(gab):
app_paths = list(paths)
go_files = [f for f in app_paths
if f.endswith('.go') and not appyaml.nobuild_files.match(f)]
if not go_files:
raise RuntimeError('no Go source files to upload '
'(-nobuild_files applied)')
gab_argv = [
gab,
'-api_version', appyaml.api_version,
'-app_base', self.basepath,
'-arch', '6',
'-gopath', gopath,
'-print_extras',
]
if goroot:
gab_argv.extend(['-goroot', goroot])
gab_argv.extend(go_files)
env = {
'GOOS': 'linux',
'GOARCH': 'amd64',
}
logging.info('Invoking go-app-builder: %s', ' '.join(gab_argv))
try:
p = subprocess.Popen(gab_argv, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, env=env)
(stdout, stderr) = p.communicate()
except Exception, e:
raise RuntimeError('failed running go-app-builder', e)
if p.returncode != 0:
raise RuntimeError(stderr)
overlay = dict([l.split('|') for l in stdout.split('\n') if l])
logging.info('GOPATH overlay: %s', overlay)
def Open(path):
if path in overlay:
return self.opener(overlay[path], 'rb')
return self.opener(os.path.join(basepath, path), 'rb')
paths = app_paths + overlay.keys()
openfunc = Open
openfunc, paths = self._CreateSourceContextFiles(
source_contexts, basepath, openfunc, paths)
appversion = AppVersionUpload(
rpcserver,
appyaml,
module_yaml_path=module_yaml_path,
backend=backend,
error_fh=self.error_fh,
usage_reporting=self.options.usage_reporting,
ignore_endpoints_failures=self.options.ignore_endpoints_failures)
return appversion.DoUpload(paths, openfunc)
def UpdateUsingSpecificFiles(self):
"""Updates and deploys new app versions based on given config files."""
rpcserver = self._GetRpcServer()
all_files = [self.basepath] + self.args
has_python25_version = False
for yaml_path in all_files:
file_name = os.path.basename(yaml_path)
self.basepath = os.path.dirname(yaml_path)
if not self.basepath:
self.basepath = '.'
module_yaml = self._ParseAppInfoFromYaml(self.basepath,
os.path.splitext(file_name)[0])
if module_yaml.runtime == 'python':
has_python25_version = True
if module_yaml.vm is True:
MigrateGcloudNotice()
if not module_yaml.module and file_name != 'app.yaml':
ErrorUpdate("Error: 'module' parameter not specified in %s" %
yaml_path)
continue
self.UpdateVersion(rpcserver, self.basepath, module_yaml, file_name)
if has_python25_version:
MigratePython27Notice()
def Update(self):
"""Updates and deploys a new appversion and global app configs."""
if not os.path.isdir(self.basepath):
self.UpdateUsingSpecificFiles()
return
if JavaSupported() and appcfg_java.IsWarFileWithoutYaml(self.basepath):
java_app_update = appcfg_java.JavaAppUpdate(self.basepath, self.options)
self.options.compile_jsps = not java_app_update.app_engine_web_xml.vm
sdk_root = os.path.dirname(appcfg_java.__file__)
self.stage_dir = java_app_update.CreateStagingDirectory(sdk_root)
try:
appyaml = self._ParseAppInfoFromYaml(
self.stage_dir,
basename=os.path.splitext(APP_YAML_FILENAME)[0])
self._UpdateWithParsedAppYaml(appyaml, self.stage_dir)
finally:
if self.options.retain_upload_dir:
StatusUpdate(
'Temporary staging directory left in %s' % self.stage_dir,
self.error_fh)
else:
shutil.rmtree(self.stage_dir)
else:
appyaml = self._ParseAppInfoFromYaml(
self.basepath,
basename=os.path.splitext(APP_YAML_FILENAME)[0])
self._UpdateWithParsedAppYaml(appyaml, self.basepath)
def _UpdateWithParsedAppYaml(self, appyaml, basepath):
"""Completes update command.
Helper to Update.
Args:
appyaml: AppInfoExternal for the app.
basepath: Path where application's files can be found.
"""
self.runtime = appyaml.runtime
rpcserver = self._GetRpcServer()
if self.options.skip_sdk_update_check:
logging.info('Skipping update check')
else:
updatecheck = self.update_check_class(rpcserver, appyaml)
updatecheck.CheckForUpdates()
def _AbortAppMismatch(yaml_name):
StatusUpdate('Error: Aborting upload because application in %s does not '
'match application in app.yaml' % yaml_name, self.error_fh)
dos_yaml = self._ParseDosYaml(basepath, appyaml)
if dos_yaml and dos_yaml.application != appyaml.application:
_AbortAppMismatch('dos.yaml')
return
queue_yaml = self._ParseQueueYaml(basepath, appyaml)
if queue_yaml and queue_yaml.application != appyaml.application:
_AbortAppMismatch('queue.yaml')
return
cron_yaml = self._ParseCronYaml(basepath, appyaml)
if cron_yaml and cron_yaml.application != appyaml.application:
_AbortAppMismatch('cron.yaml')
return
index_defs = self._ParseIndexYaml(basepath, appyaml)
if index_defs and index_defs.application != appyaml.application:
_AbortAppMismatch('index.yaml')
return
dispatch_yaml = self._ParseDispatchYaml(basepath, appyaml)
if dispatch_yaml and dispatch_yaml.application != appyaml.application:
_AbortAppMismatch('dispatch.yaml')
return
self.UpdateVersion(rpcserver, basepath, appyaml, APP_YAML_FILENAME)
if appyaml.runtime == 'python':
MigratePython27Notice()
if self.options.backends:
self.BackendsUpdate()
if index_defs:
index_upload = IndexDefinitionUpload(rpcserver, index_defs, self.error_fh)
try:
index_upload.DoUpload()
except urllib2.HTTPError, e:
ErrorUpdate('Error %d: --- begin server output ---\n'
'%s\n--- end server output ---' %
(e.code, e.read().rstrip('\n')))
print >> self.error_fh, (
'Your app was updated, but there was an error updating your '
'indexes.')
if cron_yaml:
cron_upload = CronEntryUpload(rpcserver, cron_yaml, self.error_fh)
try:
cron_upload.DoUpload()
except urllib2.HTTPError, e:
ErrorUpdate('Error %d: --- begin server output ---\n'
'%s\n--- end server output ---' %
(e.code, e.read().rstrip('\n')))
print >> self.error_fh, (
'Your app was updated, but there was an error updating your '
'cron tasks.')
if queue_yaml:
queue_upload = QueueEntryUpload(rpcserver, queue_yaml, self.error_fh)
try:
queue_upload.DoUpload()
except urllib2.HTTPError, e:
ErrorUpdate('Error %d: --- begin server output ---\n'
'%s\n--- end server output ---' %
(e.code, e.read().rstrip('\n')))
print >> self.error_fh, (
'Your app was updated, but there was an error updating your '
'queues.')
if dos_yaml:
dos_upload = DosEntryUpload(rpcserver, dos_yaml, self.error_fh)
dos_upload.DoUpload()
if dispatch_yaml:
dispatch_upload = DispatchEntryUpload(rpcserver,
dispatch_yaml,
self.error_fh)
dispatch_upload.DoUpload()
def _UpdateOptions(self, parser):
"""Adds update-specific options to 'parser'.
Args:
parser: An instance of OptionsParser.
"""
parser.add_option('--no_precompilation', action='store_false',
dest='precompilation', default=True,
help='Disable automatic precompilation '
'(ignored for Go apps).')
parser.add_option('--backends', action='store_true',
dest='backends', default=False,
help='Update backends when performing appcfg update.')
parser.add_option('--no_usage_reporting', action='store_false',
dest='usage_reporting', default=True,
help='Disable usage reporting.')
parser.add_option('--repo_info_file', action='store', type='string',
dest='repo_info_file', help=optparse.SUPPRESS_HELP)
unused_repo_info_file_help = (
'The name of a file containing source context information for the '
'modules being deployed. If not specified, the source context '
'information will be inferred from the directory containing the '
'app.yaml file.')
if JavaSupported():
appcfg_java.AddUpdateOptions(parser)
def VacuumIndexes(self):
"""Deletes unused indexes."""
if self.args:
self.parser.error('Expected a single <directory> argument.')
index_defs = self._ParseIndexYaml(self.basepath)
if index_defs is None:
index_defs = datastore_index.IndexDefinitions()
rpcserver = self._GetRpcServer()
vacuum = VacuumIndexesOperation(rpcserver,
self.options.force_delete)
vacuum.DoVacuum(index_defs)
def _VacuumIndexesOptions(self, parser):
"""Adds vacuum_indexes-specific options to 'parser'.
Args:
parser: An instance of OptionsParser.
"""
parser.add_option('-f', '--force', action='store_true', dest='force_delete',
default=False,
help='Force deletion without being prompted.')
def UpdateCron(self):
"""Updates any new or changed cron definitions."""
if self.args:
self.parser.error('Expected a single <directory> argument.')
rpcserver = self._GetRpcServer()
cron_yaml = self._ParseCronYaml(self.basepath)
if cron_yaml:
cron_upload = CronEntryUpload(rpcserver, cron_yaml, self.error_fh)
cron_upload.DoUpload()
else:
print >>self.error_fh, (
'Could not find cron configuration. No action taken.')
def UpdateIndexes(self):
"""Updates indexes."""
if self.args:
self.parser.error('Expected a single <directory> argument.')
rpcserver = self._GetRpcServer()
index_defs = self._ParseIndexYaml(self.basepath)
if index_defs:
index_upload = IndexDefinitionUpload(rpcserver, index_defs, self.error_fh)
index_upload.DoUpload()
else:
print >>self.error_fh, (
'Could not find index configuration. No action taken.')
def UpdateQueues(self):
"""Updates any new or changed task queue definitions."""
if self.args:
self.parser.error('Expected a single <directory> argument.')
rpcserver = self._GetRpcServer()
queue_yaml = self._ParseQueueYaml(self.basepath)
if queue_yaml:
queue_upload = QueueEntryUpload(rpcserver, queue_yaml, self.error_fh)
queue_upload.DoUpload()
else:
print >>self.error_fh, (
'Could not find queue configuration. No action taken.')
def UpdateDispatch(self):
"""Updates new or changed dispatch definitions."""
if self.args:
self.parser.error('Expected a single <directory> argument.')
rpcserver = self._GetRpcServer()
dispatch_yaml = self._ParseDispatchYaml(self.basepath)
if dispatch_yaml:
dispatch_upload = DispatchEntryUpload(rpcserver,
dispatch_yaml,
self.error_fh)
dispatch_upload.DoUpload()
else:
print >>self.error_fh, ('Could not find dispatch configuration. No action'
' taken.')
def UpdateDos(self):
"""Updates any new or changed dos definitions."""
if self.args:
self.parser.error('Expected a single <directory> argument.')
rpcserver = self._GetRpcServer()
dos_yaml = self._ParseDosYaml(self.basepath)
if dos_yaml:
dos_upload = DosEntryUpload(rpcserver, dos_yaml, self.error_fh)
dos_upload.DoUpload()
else:
print >>self.error_fh, (
'Could not find dos configuration. No action taken.')
def BackendsAction(self):
"""Placeholder; we never expect this action to be invoked."""
pass
def BackendsPhpCheck(self, appyaml):
"""Don't support backends with the PHP runtime.
This should be used to prevent use of backends update/start/configure
with the PHP runtime. We continue to allow backends
stop/delete/list/rollback just in case there are existing PHP backends.
Args:
appyaml: A parsed app.yaml file.
"""
if appyaml.runtime.startswith('php'):
_PrintErrorAndExit(
self.error_fh,
'Error: Backends are not supported with the PHP runtime. '
'Please use Modules instead.\n')
def BackendsYamlCheck(self, basepath, appyaml, backend=None):
"""Check the backends.yaml file is sane and which backends to update."""
if appyaml.backends:
self.parser.error('Backends are not allowed in app.yaml.')
backends_yaml = self._ParseBackendsYaml(basepath)
appyaml.backends = backends_yaml.backends
if not appyaml.backends:
self.parser.error('No backends found in backends.yaml.')
backends = []
for backend_entry in appyaml.backends:
entry = backendinfo.LoadBackendEntry(backend_entry.ToYAML())
if entry.name in backends:
self.parser.error('Duplicate entry for backend: %s.' % entry.name)
else:
backends.append(entry.name)
backends_to_update = []
if backend:
if backend in backends:
backends_to_update = [backend]
else:
self.parser.error("Backend '%s' not found in backends.yaml." %
backend)
else:
backends_to_update = backends
return backends_to_update
def BackendsUpdate(self):
"""Updates a backend."""
self.backend = None
if len(self.args) == 1:
self.backend = self.args[0]
elif len(self.args) > 1:
self.parser.error('Expected an optional <backend> argument.')
if JavaSupported() and appcfg_java.IsWarFileWithoutYaml(self.basepath):
java_app_update = appcfg_java.JavaAppUpdate(self.basepath, self.options)
self.options.compile_jsps = True
sdk_root = os.path.dirname(appcfg_java.__file__)
basepath = java_app_update.CreateStagingDirectory(sdk_root)
else:
basepath = self.basepath
yaml_file_basename = 'app'
appyaml = self._ParseAppInfoFromYaml(basepath,
basename=yaml_file_basename)
BackendsStatusUpdate(appyaml.runtime, self.error_fh)
self.BackendsPhpCheck(appyaml)
rpcserver = self._GetRpcServer()
backends_to_update = self.BackendsYamlCheck(basepath, appyaml, self.backend)
for backend in backends_to_update:
self.UpdateVersion(rpcserver, basepath, appyaml, yaml_file_basename,
backend=backend)
def BackendsList(self):
"""Lists all backends for an app."""
if self.args:
self.parser.error('Expected no arguments.')
appyaml = self._ParseAppInfoFromYaml(self.basepath)
BackendsStatusUpdate(appyaml.runtime, self.error_fh)
rpcserver = self._GetRpcServer()
response = rpcserver.Send('/api/backends/list', app_id=appyaml.application)
print >> self.out_fh, response
def BackendsRollback(self):
"""Does a rollback of an existing transaction on this backend."""
if len(self.args) != 1:
self.parser.error('Expected a single <backend> argument.')
self._Rollback(self.args[0])
def BackendsStart(self):
"""Starts a backend."""
if len(self.args) != 1:
self.parser.error('Expected a single <backend> argument.')
backend = self.args[0]
appyaml = self._ParseAppInfoFromYaml(self.basepath)
BackendsStatusUpdate(appyaml.runtime, self.error_fh)
self.BackendsPhpCheck(appyaml)
rpcserver = self._GetRpcServer()
response = rpcserver.Send('/api/backends/start',
app_id=appyaml.application,
backend=backend)
print >> self.out_fh, response
def BackendsStop(self):
"""Stops a backend."""
if len(self.args) != 1:
self.parser.error('Expected a single <backend> argument.')
backend = self.args[0]
appyaml = self._ParseAppInfoFromYaml(self.basepath)
BackendsStatusUpdate(appyaml.runtime, self.error_fh)
rpcserver = self._GetRpcServer()
response = rpcserver.Send('/api/backends/stop',
app_id=appyaml.application,
backend=backend)
print >> self.out_fh, response
def BackendsDelete(self):
"""Deletes a backend."""
if len(self.args) != 1:
self.parser.error('Expected a single <backend> argument.')
backend = self.args[0]
appyaml = self._ParseAppInfoFromYaml(self.basepath)
BackendsStatusUpdate(appyaml.runtime, self.error_fh)
rpcserver = self._GetRpcServer()
response = rpcserver.Send('/api/backends/delete',
app_id=appyaml.application,
backend=backend)
print >> self.out_fh, response
def BackendsConfigure(self):
"""Changes the configuration of an existing backend."""
if len(self.args) != 1:
self.parser.error('Expected a single <backend> argument.')
backend = self.args[0]
appyaml = self._ParseAppInfoFromYaml(self.basepath)
BackendsStatusUpdate(appyaml.runtime, self.error_fh)
self.BackendsPhpCheck(appyaml)
backends_yaml = self._ParseBackendsYaml(self.basepath)
rpcserver = self._GetRpcServer()
response = rpcserver.Send('/api/backends/configure',
app_id=appyaml.application,
backend=backend,
payload=backends_yaml.ToYAML())
print >> self.out_fh, response
def ListVersions(self):
"""Lists all versions for an app."""
if len(self.args) == 0:
if not self.options.app_id:
self.parser.error('Expected <directory> argument or -A <app id>.')
app_id = self.options.app_id
elif len(self.args) == 1:
if self.options.app_id:
self.parser.error('<directory> argument is not needed with -A.')
appyaml = self._ParseAppInfoFromYaml(self.args[0])
app_id = appyaml.application
else:
self.parser.error('Expected 1 argument, not %d.' % len(self.args))
rpcserver = self._GetRpcServer()
response = rpcserver.Send('/api/versions/list', app_id=app_id)
parsed_response = yaml.safe_load(response)
if not parsed_response:
print >> self.out_fh, ('No versions uploaded for app: %s.' % app_id)
else:
print >> self.out_fh, response
def DeleteVersion(self):
"""Deletes the specified version for an app."""
if not (self.options.app_id and self.options.version):
self.parser.error('Expected an <app_id> argument, a <version> argument '
'and an optional <module> argument.')
if self.options.module:
module = self.options.module
else:
module = ''
rpcserver = self._GetRpcServer()
response = rpcserver.Send('/api/versions/delete',
app_id=self.options.app_id,
version_match=self.options.version,
module=module)
print >> self.out_fh, response
def _LockingAction(self, url):
"""Changes the locking state for a given version."""
if len(self.args) == 1:
appyaml = self._ParseAppInfoFromYaml(self.args[0])
app_id = appyaml.application
module = appyaml.module or ''
version = appyaml.version
elif not self.args:
if not (self.options.app_id and self.options.version):
self.parser.error(
('Expected a <directory> argument or both --application and '
'--version flags.'))
module = ''
else:
self._PrintHelpAndExit()
if self.options.app_id:
app_id = self.options.app_id
if self.options.module:
module = self.options.module
if self.options.version:
version = self.options.version
rpcserver = self._GetRpcServer()
DoLockAction(
url,
rpcserver,
app_id, version, module,
self.options.instance,
self.out_fh).Do()
def DebugAction(self):
"""Sets the specified version and instance for an app to be debuggable."""
self._LockingAction('/api/vms/debug')
def LockAction(self):
"""Locks the specified version and instance for an app."""
self._LockingAction('/api/vms/lock')
def _LockActionOptions(self, parser):
"""Adds lock/unlock-specific options to 'parser'.
Args:
parser: An instance of OptionsParser.
"""
parser.add_option('-I', '--instance', type='string', dest='instance',
help='Instance to lock/unlock.')
def PrepareVmRuntimeAction(self):
"""Prepare the application for vm runtimes and return state."""
if not self.options.app_id:
self.parser.error('Expected an --application argument')
rpcserver = self._GetRpcServer()
response = rpcserver.Send('/api/vms/prepare',
app_id=self.options.app_id)
print >> self.out_fh, response
def _ParseAndValidateModuleYamls(self, yaml_paths):
"""Validates given yaml paths and returns the parsed yaml objects.
Args:
yaml_paths: List of paths to AppInfo yaml files.
Returns:
List of parsed AppInfo yamls.
"""
results = []
app_id = None
last_yaml_path = None
for yaml_path in yaml_paths:
if not os.path.isfile(yaml_path):
_PrintErrorAndExit(
self.error_fh,
("Error: The given path '%s' is not to a YAML configuration "
"file.\n") % yaml_path)
file_name = os.path.basename(yaml_path)
base_path = os.path.dirname(yaml_path)
if not base_path:
base_path = '.'
module_yaml = self._ParseAppInfoFromYaml(base_path,
os.path.splitext(file_name)[0])
if not module_yaml.module and file_name != 'app.yaml':
_PrintErrorAndExit(
self.error_fh,
"Error: 'module' parameter not specified in %s" % yaml_path)
if app_id is not None and module_yaml.application != app_id:
_PrintErrorAndExit(
self.error_fh,
"Error: 'application' value '%s' in %s does not match the value "
"'%s', found in %s" % (module_yaml.application,
yaml_path,
app_id,
last_yaml_path))
app_id = module_yaml.application
last_yaml_path = yaml_path
results.append(module_yaml)
return results
def _ModuleAction(self, action_path):
"""Process flags and yaml files and make a call to the given path.
The 'start_module_version' and 'stop_module_version' actions are extremely
similar in how they process input to appcfg.py and only really differ in
what path they hit on the RPCServer.
Args:
action_path: Path on the RPCServer to send the call to.
"""
modules_to_process = []
if not self.args:
if not (self.options.app_id and
self.options.module and
self.options.version):
_PrintErrorAndExit(self.error_fh,
'Expected at least one <file> argument or the '
'--application, --module and --version flags to'
' be set.')
else:
modules_to_process.append((self.options.app_id,
self.options.module,
self.options.version))
else:
if self.options.module:
_PrintErrorAndExit(self.error_fh,
'You may not specify a <file> argument with the '
'--module flag.')
module_yamls = self._ParseAndValidateModuleYamls(self.args)
for serv_yaml in module_yamls:
app_id = serv_yaml.application
modules_to_process.append((self.options.app_id or serv_yaml.application,
serv_yaml.module or appinfo.DEFAULT_MODULE,
self.options.version or serv_yaml.version))
rpcserver = self._GetRpcServer()
for app_id, module, version in modules_to_process:
response = rpcserver.Send(action_path,
app_id=app_id,
module=module,
version=version)
print >> self.out_fh, response
def StartModuleVersion(self):
"""Starts one or more versions."""
self._ModuleAction('/api/modules/start')
def StopModuleVersion(self):
"""Stops one or more versions."""
self._ModuleAction('/api/modules/stop')
def Rollback(self):
"""Does a rollback of an existing transaction for this app version."""
self._Rollback()
def _RollbackOptions(self, parser):
"""Adds rollback-specific options to parser.
Args:
parser: An instance of OptionsParser.
"""
parser.add_option('--force_rollback', action='store_true',
dest='force_rollback', default=False,
help='Force rollback.')
def _Rollback(self, backend=None):
"""Does a rollback of an existing transaction.
Args:
backend: name of a backend to rollback, or None
If a backend is specified the rollback will affect only that backend, if no
backend is specified the rollback will affect the current app version.
"""
if os.path.isdir(self.basepath):
module_yaml = self._ParseAppInfoFromYaml(self.basepath)
else:
file_name = os.path.basename(self.basepath)
self.basepath = os.path.dirname(self.basepath)
if not self.basepath:
self.basepath = '.'
module_yaml = self._ParseAppInfoFromYaml(self.basepath,
os.path.splitext(file_name)[0])
appversion = AppVersionUpload(self._GetRpcServer(), module_yaml,
module_yaml_path='app.yaml',
backend=backend)
appversion.in_transaction = True
force_rollback = False
if hasattr(self.options, 'force_rollback'):
force_rollback = self.options.force_rollback
appversion.Rollback(force_rollback)
def SetDefaultVersion(self):
"""Sets the default version."""
module = ''
if len(self.args) == 1:
stored_modules = self.options.module
self.options.module = None
try:
appyaml = self._ParseAppInfoFromYaml(self.args[0])
finally:
self.options.module = stored_modules
app_id = appyaml.application
module = appyaml.module or ''
version = appyaml.version
elif not self.args:
if not (self.options.app_id and self.options.version):
self.parser.error(
('Expected a <directory> argument or both --application and '
'--version flags.'))
else:
self._PrintHelpAndExit()
if self.options.app_id:
app_id = self.options.app_id
if self.options.module:
module = self.options.module
if self.options.version:
version = self.options.version
version_setter = DefaultVersionSet(self._GetRpcServer(),
app_id,
module,
version,
self.error_fh)
version_setter.SetVersion()
def MigrateTraffic(self):
"""Migrates traffic."""
module = 'default'
if len(self.args) == 1:
appyaml = self._ParseAppInfoFromYaml(self.args[0])
app_id = appyaml.application
module = appyaml.module or 'default'
version = appyaml.version
elif not self.args:
if not (self.options.app_id and self.options.version):
self.parser.error(
('Expected a <directory> argument or both --application and '
'--version flags.'))
else:
self._PrintHelpAndExit()
if self.options.app_id:
app_id = self.options.app_id
if self.options.module:
module = self.options.module
if self.options.version:
version = self.options.version
if module not in ['', 'default']:
StatusUpdate('migrate_traffic does not support non-default module at '
'this time.')
return
traffic_migrator = TrafficMigrator(
self._GetRpcServer(), app_id, version, self.error_fh)
traffic_migrator.MigrateTraffic()
def RequestLogs(self):
"""Write request logs to a file."""
args_length = len(self.args)
module = ''
if args_length == 2:
appyaml = self._ParseAppInfoFromYaml(self.args.pop(0))
app_id = appyaml.application
module = appyaml.module or ''
version = appyaml.version
elif args_length == 1:
if not (self.options.app_id and self.options.version):
self.parser.error(
('Expected the --application and --version flags if <directory> '
'argument is not specified.'))
else:
self._PrintHelpAndExit()
if self.options.app_id:
app_id = self.options.app_id
if self.options.module:
module = self.options.module
if self.options.version:
version = self.options.version
if (self.options.severity is not None and
not 0 <= self.options.severity <= MAX_LOG_LEVEL):
self.parser.error(
'Severity range is 0 (DEBUG) through %s (CRITICAL).' % MAX_LOG_LEVEL)
if self.options.num_days is None:
self.options.num_days = int(not self.options.append)
try:
end_date = self._ParseEndDate(self.options.end_date)
except (TypeError, ValueError):
self.parser.error('End date must be in the format YYYY-MM-DD.')
rpcserver = self._GetRpcServer()
logs_requester = LogsRequester(rpcserver,
app_id,
module,
version,
self.args[0],
self.options.num_days,
self.options.append,
self.options.severity,
end_date,
self.options.vhost,
self.options.include_vhost,
self.options.include_all,
time_func=self.time_func)
logs_requester.DownloadLogs()
@staticmethod
def _ParseEndDate(date, time_func=time.time):
"""Translates an ISO 8601 date to a date object.
Args:
date: A date string as YYYY-MM-DD.
time_func: A time.time() compatible function, which can be overridden for
testing.
Returns:
A date object representing the last day of logs to get.
If no date is given, returns today in the US/Pacific timezone.
"""
if not date:
return PacificDate(time_func())
return datetime.date(*[int(i) for i in date.split('-')])
def _RequestLogsOptions(self, parser):
"""Adds request_logs-specific options to 'parser'.
Args:
parser: An instance of OptionsParser.
"""
parser.add_option('-n', '--num_days', type='int', dest='num_days',
action='store', default=None,
help='Number of days worth of log data to get. '
'The cut-off point is midnight US/Pacific. '
'Use 0 to get all available logs. '
'Default is 1, unless --append is also given; '
'then the default is 0.')
parser.add_option('-a', '--append', dest='append',
action='store_true', default=False,
help='Append to existing file.')
parser.add_option('--severity', type='int', dest='severity',
action='store', default=None,
help='Severity of app-level log messages to get. '
'The range is 0 (DEBUG) through 4 (CRITICAL). '
'If omitted, only request logs are returned.')
parser.add_option('--vhost', type='string', dest='vhost',
action='store', default=None,
help='The virtual host of log messages to get. '
'If omitted, all log messages are returned.')
parser.add_option('--include_vhost', dest='include_vhost',
action='store_true', default=False,
help='Include virtual host in log messages.')
parser.add_option('--include_all', dest='include_all',
action='store_true', default=None,
help='Include everything in log messages.')
parser.add_option('--end_date', dest='end_date',
action='store', default='',
help='End date (as YYYY-MM-DD) of period for log data. '
'Defaults to today.')
def CronInfo(self, now=None, output=sys.stdout):
"""Displays information about cron definitions.
Args:
now: used for testing.
output: Used for testing.
"""
if self.args:
self.parser.error('Expected a single <directory> argument.')
if now is None:
now = datetime.datetime.utcnow()
cron_yaml = self._ParseCronYaml(self.basepath)
if cron_yaml and cron_yaml.cron:
for entry in cron_yaml.cron:
description = entry.description
if not description:
description = '<no description>'
if not entry.timezone:
entry.timezone = 'UTC'
print >>output, '\n%s:\nURL: %s\nSchedule: %s (%s)' % (description,
entry.url,
entry.schedule,
entry.timezone)
if entry.timezone != 'UTC':
print >>output, ('Note: Schedules with timezones won\'t be calculated'
' correctly here')
schedule = groctimespecification.GrocTimeSpecification(entry.schedule)
matches = schedule.GetMatches(now, self.options.num_runs)
for match in matches:
print >>output, '%s, %s from now' % (
match.strftime('%Y-%m-%d %H:%M:%SZ'), match - now)
def _CronInfoOptions(self, parser):
"""Adds cron_info-specific options to 'parser'.
Args:
parser: An instance of OptionsParser.
"""
parser.add_option('-n', '--num_runs', type='int', dest='num_runs',
action='store', default=5,
help='Number of runs of each cron job to display'
'Default is 5')
def _CheckRequiredLoadOptions(self):
"""Checks that upload/download options are present."""
for option in ['filename']:
if getattr(self.options, option) is None:
self.parser.error('Option \'%s\' is required.' % option)
if not self.options.url:
self.parser.error('You must have google.appengine.ext.remote_api.handler '
'assigned to an endpoint in app.yaml, or provide '
'the url of the handler via the \'url\' option.')
def InferRemoteApiUrl(self, appyaml):
"""Uses app.yaml to determine the remote_api endpoint.
Args:
appyaml: A parsed app.yaml file.
Returns:
The url of the remote_api endpoint as a string, or None
"""
handlers = appyaml.handlers
handler_suffixes = ['remote_api/handler.py',
'remote_api.handler.application']
app_id = appyaml.application
for handler in handlers:
if hasattr(handler, 'script') and handler.script:
if any(handler.script.endswith(suffix) for suffix in handler_suffixes):
server = self.options.server
url = handler.url
if url.endswith('(/.*)?'):
url = url[:-6]
if server == 'appengine.google.com':
return 'http://%s.appspot.com%s' % (app_id, url)
else:
match = re.match(PREFIXED_BY_ADMIN_CONSOLE_RE, server)
if match:
return 'http://%s%s%s' % (app_id, match.group(1), url)
else:
return 'http://%s%s' % (server, url)
return None
def RunBulkloader(self, arg_dict):
"""Invokes the bulkloader with the given keyword arguments.
Args:
arg_dict: Dictionary of arguments to pass to bulkloader.Run().
"""
try:
import sqlite3
except ImportError:
logging.error('upload_data action requires SQLite3 and the python '
'sqlite3 module (included in python since 2.5).')
sys.exit(1)
sys.exit(bulkloader.Run(arg_dict, self._GetOAuth2Parameters()))
def _SetupLoad(self):
"""Performs common verification and set up for upload and download."""
if len(self.args) != 1 and not self.options.url:
self.parser.error('Expected either --url or a single <directory> '
'argument.')
if len(self.args) == 1:
self.basepath = self.args[0]
appyaml = self._ParseAppInfoFromYaml(self.basepath)
self.options.app_id = appyaml.application
if not self.options.url:
url = self.InferRemoteApiUrl(appyaml)
if url is not None:
self.options.url = url
self._CheckRequiredLoadOptions()
if self.options.batch_size < 1:
self.parser.error('batch_size must be 1 or larger.')
if verbosity == 1:
logging.getLogger().setLevel(logging.INFO)
self.options.debug = False
else:
logging.getLogger().setLevel(logging.DEBUG)
self.options.debug = True
def _MakeLoaderArgs(self):
"""Returns a dict made from many attributes of self.options, plus others.
See body for list of self.options attributes included. In addition, result
includes
'application' = self.options.app_id
'throttle_class' = self.throttle_class
Returns:
A dict.
"""
args = dict([(arg_name, getattr(self.options, arg_name, None)) for
arg_name in (
'url',
'filename',
'batch_size',
'kind',
'num_threads',
'bandwidth_limit',
'rps_limit',
'http_limit',
'db_filename',
'config_file',
'auth_domain',
'has_header',
'loader_opts',
'log_file',
'debug',
'exporter_opts',
'mapper_opts',
'result_db_filename',
'dry_run',
'dump',
'restore',
'namespace',
'create_config',
)])
args['application'] = self.options.app_id
args['throttle_class'] = self.throttle_class
return args
def PerformDownload(self, run_fn=None):
"""Performs a datastore download via the bulkloader.
Args:
run_fn: Function to invoke the bulkloader, used for testing.
"""
if run_fn is None:
run_fn = self.RunBulkloader
self._SetupLoad()
StatusUpdate('Downloading data records.', self.error_fh)
args = self._MakeLoaderArgs()
args['download'] = bool(args['config_file'])
args['has_header'] = False
args['map'] = False
args['dump'] = not args['config_file']
args['restore'] = False
args['create_config'] = False
run_fn(args)
def PerformUpload(self, run_fn=None):
"""Performs a datastore upload via the bulkloader.
Args:
run_fn: Function to invoke the bulkloader, used for testing.
"""
if run_fn is None:
run_fn = self.RunBulkloader
self._SetupLoad()
StatusUpdate('Uploading data records.', self.error_fh)
args = self._MakeLoaderArgs()
args['download'] = False
args['map'] = False
args['dump'] = False
args['restore'] = not args['config_file']
args['create_config'] = False
run_fn(args)
def CreateBulkloadConfig(self, run_fn=None):
"""Create a bulkloader config via the bulkloader wizard.
Args:
run_fn: Function to invoke the bulkloader, used for testing.
"""
if run_fn is None:
run_fn = self.RunBulkloader
self._SetupLoad()
StatusUpdate('Creating bulkloader configuration.', self.error_fh)
args = self._MakeLoaderArgs()
args['download'] = False
args['has_header'] = False
args['map'] = False
args['dump'] = False
args['restore'] = False
args['create_config'] = True
run_fn(args)
def _PerformLoadOptions(self, parser):
"""Adds options common to 'upload_data' and 'download_data'.
Args:
parser: An instance of OptionsParser.
"""
parser.add_option('--url', type='string', dest='url',
action='store',
help='The location of the remote_api endpoint.')
parser.add_option('--batch_size', type='int', dest='batch_size',
action='store', default=10,
help='Number of records to post in each request.')
parser.add_option('--bandwidth_limit', type='int', dest='bandwidth_limit',
action='store', default=250000,
help='The maximum bytes/second bandwidth for transfers.')
parser.add_option('--rps_limit', type='int', dest='rps_limit',
action='store', default=20,
help='The maximum records/second for transfers.')
parser.add_option('--http_limit', type='int', dest='http_limit',
action='store', default=8,
help='The maximum requests/second for transfers.')
parser.add_option('--db_filename', type='string', dest='db_filename',
action='store',
help='Name of the progress database file.')
parser.add_option('--auth_domain', type='string', dest='auth_domain',
action='store', default='gmail.com',
help='The name of the authorization domain to use.')
parser.add_option('--log_file', type='string', dest='log_file',
help='File to write bulkloader logs. If not supplied '
'then a new log file will be created, named: '
'bulkloader-log-TIMESTAMP.')
parser.add_option('--dry_run', action='store_true',
dest='dry_run', default=False,
help='Do not execute any remote_api calls')
parser.add_option('--namespace', type='string', dest='namespace',
action='store', default='',
help='Namespace to use when accessing datastore.')
parser.add_option('--num_threads', type='int', dest='num_threads',
action='store', default=10,
help='Number of threads to transfer records with.')
def _PerformUploadOptions(self, parser):
"""Adds 'upload_data' specific options to the 'parser' passed in.
Args:
parser: An instance of OptionsParser.
"""
self._PerformLoadOptions(parser)
parser.add_option('--filename', type='string', dest='filename',
action='store',
help='The name of the file containing the input data.'
' (Required)')
parser.add_option('--kind', type='string', dest='kind',
action='store',
help='The kind of the entities to store.')
parser.add_option('--has_header', dest='has_header',
action='store_true', default=False,
help='Whether the first line of the input file should be'
' skipped')
parser.add_option('--loader_opts', type='string', dest='loader_opts',
help='A string to pass to the Loader.initialize method.')
parser.add_option('--config_file', type='string', dest='config_file',
action='store',
help='Name of the configuration file.')
def _PerformDownloadOptions(self, parser):
"""Adds 'download_data' specific options to the 'parser' passed in.
Args:
parser: An instance of OptionsParser.
"""
self._PerformLoadOptions(parser)
parser.add_option('--filename', type='string', dest='filename',
action='store',
help='The name of the file where output data is to be'
' written. (Required)')
parser.add_option('--kind', type='string', dest='kind',
action='store',
help='The kind of the entities to retrieve.')
parser.add_option('--exporter_opts', type='string', dest='exporter_opts',
help='A string to pass to the Exporter.initialize method.'
)
parser.add_option('--result_db_filename', type='string',
dest='result_db_filename',
action='store',
help='Database to write entities to for download.')
parser.add_option('--config_file', type='string', dest='config_file',
action='store',
help='Name of the configuration file.')
def _CreateBulkloadConfigOptions(self, parser):
"""Adds 'download_data' specific options to the 'parser' passed in.
Args:
parser: An instance of OptionsParser.
"""
self._PerformLoadOptions(parser)
parser.add_option('--filename', type='string', dest='filename',
action='store',
help='The name of the file where the generated template'
' is to be written. (Required)')
parser.add_option('--result_db_filename', type='string',
dest='result_db_filename',
action='store',
help='Database to write entities to during config '
'generation.')
def ResourceLimitsInfo(self, output=None):
"""Outputs the current resource limits.
Args:
output: The file handle to write the output to (used for testing).
"""
rpcserver = self._GetRpcServer()
appyaml = self._ParseAppInfoFromYaml(self.basepath)
request_params = {'app_id': appyaml.application, 'version': appyaml.version}
logging_context = _ClientDeployLoggingContext(rpcserver, request_params,
usage_reporting=False)
resource_limits = GetResourceLimits(logging_context, self.error_fh)
for attr_name in sorted(resource_limits):
print >>output, '%s: %s' % (attr_name, resource_limits[attr_name])
class Action(object):
"""Contains information about a command line action.
Attributes:
function: The name of a function defined on AppCfg or its subclasses
that will perform the appropriate action.
usage: A command line usage string.
short_desc: A one-line description of the action.
long_desc: A detailed description of the action. Whitespace and
formatting will be preserved.
error_desc: An error message to display when the incorrect arguments are
given.
options: A function that will add extra options to a given OptionParser
object.
uses_basepath: Does the action use a basepath/app-directory (and hence
app.yaml).
hidden: Should this command be shown in the help listing.
"""
def __init__(self, function, usage, short_desc, long_desc='',
error_desc=None, options=lambda obj, parser: None,
uses_basepath=True, hidden=False):
"""Initializer for the class attributes."""
self.function = function
self.usage = usage
self.short_desc = short_desc
self.long_desc = long_desc
self.error_desc = error_desc
self.options = options
self.uses_basepath = uses_basepath
self.hidden = hidden
def __call__(self, appcfg):
"""Invoke this Action on the specified AppCfg.
This calls the function of the appropriate name on AppCfg, and
respects polymophic overrides.
Args:
appcfg: The appcfg to use.
Returns:
The result of the function call.
"""
method = getattr(appcfg, self.function)
return method()
actions = {
'help': Action(
function='Help',
usage='%prog help <action>',
short_desc='Print help for a specific action.',
uses_basepath=False),
'update': Action(
function='Update',
usage='%prog [options] update <directory> | [file, ...]',
options=_UpdateOptions,
short_desc='Create or update an app version.',
long_desc="""
Specify a directory that contains all of the files required by
the app, and appcfg.py will create/update the app version referenced
in the app.yaml file at the top level of that directory. appcfg.py
will follow symlinks and recursively upload all files to the server.
Temporary or source control files (e.g. foo~, .svn/*) will be skipped.
If you are using the Modules feature, then you may prefer to pass multiple files
to update, rather than a directory, to specify which modules you would like
updated."""),
'download_app': Action(
function='DownloadApp',
usage='%prog [options] download_app -A app_id [ -V version ]'
' <out-dir>',
short_desc='Download a previously-uploaded app.',
long_desc="""
Download a previously-uploaded app to the specified directory. The app
ID is specified by the \"-A\" option. The optional version is specified
by the \"-V\" option.""",
uses_basepath=False),
'update_cron': Action(
function='UpdateCron',
usage='%prog [options] update_cron <directory>',
short_desc='Update application cron definitions.',
long_desc="""
The 'update_cron' command will update any new, removed or changed cron
definitions from the optional cron.yaml file."""),
'update_indexes': Action(
function='UpdateIndexes',
usage='%prog [options] update_indexes <directory>',
short_desc='Update application indexes.',
long_desc="""
The 'update_indexes' command will add additional indexes which are not currently
in production as well as restart any indexes that were not completed."""),
'update_queues': Action(
function='UpdateQueues',
usage='%prog [options] update_queues <directory>',
short_desc='Update application task queue definitions.',
long_desc="""
The 'update_queue' command will update any new, removed or changed task queue
definitions from the optional queue.yaml file."""),
'update_dispatch': Action(
function='UpdateDispatch',
usage='%prog [options] update_dispatch <directory>',
short_desc='Update application dispatch definitions.',
long_desc="""
The 'update_dispatch' command will update any new, removed or changed dispatch
definitions from the optional dispatch.yaml file."""),
'update_dos': Action(
function='UpdateDos',
usage='%prog [options] update_dos <directory>',
short_desc='Update application dos definitions.',
long_desc="""
The 'update_dos' command will update any new, removed or changed dos
definitions from the optional dos.yaml file."""),
'backends': Action(
function='BackendsAction',
usage='%prog [options] backends <directory> <action>',
short_desc='Perform a backend action.',
long_desc="""
The 'backends' command will perform a backends action.""",
error_desc="""\
Expected a <directory> and <action> argument."""),
'backends list': Action(
function='BackendsList',
usage='%prog [options] backends <directory> list',
short_desc='List all backends configured for the app.',
long_desc="""
The 'backends list' command will list all backends configured for the app."""),
'backends update': Action(
function='BackendsUpdate',
usage='%prog [options] backends <directory> update [<backend>]',
options=_UpdateOptions,
short_desc='Update one or more backends.',
long_desc="""
The 'backends update' command updates one or more backends. This command
updates backend configuration settings and deploys new code to the server. Any
existing instances will stop and be restarted. Updates all backends, or a
single backend if the <backend> argument is provided."""),
'backends rollback': Action(
function='BackendsRollback',
usage='%prog [options] backends <directory> rollback <backend>',
short_desc='Roll back an update of a backend.',
long_desc="""
The 'backends update' command requires a server-side transaction.
Use 'backends rollback' if you experience an error during 'backends update'
and want to start the update over again."""),
'backends start': Action(
function='BackendsStart',
usage='%prog [options] backends <directory> start <backend>',
short_desc='Start a backend.',
long_desc="""
The 'backends start' command will put a backend into the START state."""),
'backends stop': Action(
function='BackendsStop',
usage='%prog [options] backends <directory> stop <backend>',
short_desc='Stop a backend.',
long_desc="""
The 'backends start' command will put a backend into the STOP state."""),
'backends delete': Action(
function='BackendsDelete',
usage='%prog [options] backends <directory> delete <backend>',
short_desc='Delete a backend.',
long_desc="""
The 'backends delete' command will delete a backend."""),
'backends configure': Action(
function='BackendsConfigure',
usage='%prog [options] backends <directory> configure <backend>',
short_desc='Reconfigure a backend without stopping it.',
long_desc="""
The 'backends configure' command performs an online update of a backend, without
stopping instances that are currently running. No code or handlers are updated,
only certain configuration settings specified in backends.yaml. Valid settings
are: instances, options: public, and options: failfast."""),
'vacuum_indexes': Action(
function='VacuumIndexes',
usage='%prog [options] vacuum_indexes <directory>',
options=_VacuumIndexesOptions,
short_desc='Delete unused indexes from application.',
long_desc="""
The 'vacuum_indexes' command will help clean up indexes which are no longer
in use. It does this by comparing the local index configuration with
indexes that are actually defined on the server. If any indexes on the
server do not exist in the index configuration file, the user is given the
option to delete them."""),
'rollback': Action(
function='Rollback',
usage='%prog [options] rollback <directory> | <file>',
options=_RollbackOptions,
short_desc='Rollback an in-progress update.',
long_desc="""
The 'update' command requires a server-side transaction.
Use 'rollback' if you experience an error during 'update'
and want to begin a new update transaction."""),
'request_logs': Action(
function='RequestLogs',
usage='%prog [options] request_logs [<directory>] <output_file>',
options=_RequestLogsOptions,
uses_basepath=False,
short_desc='Write request logs in Apache common log format.',
long_desc="""
The 'request_logs' command exports the request logs from your application
to a file. It will write Apache common log format records ordered
chronologically. If output file is '-' stdout will be written.""",
error_desc="""\
Expected an optional <directory> and mandatory <output_file> argument."""),
'cron_info': Action(
function='CronInfo',
usage='%prog [options] cron_info <directory>',
options=_CronInfoOptions,
short_desc='Display information about cron jobs.',
long_desc="""
The 'cron_info' command will display the next 'number' runs (default 5) for
each cron job defined in the cron.yaml file."""),
'start_module_version': Action(
function='StartModuleVersion',
uses_basepath=False,
usage='%prog [options] start_module_version [file, ...]',
short_desc='Start a module version.',
long_desc="""
The 'start_module_version' command will put a module version into the START
state."""),
'stop_module_version': Action(
function='StopModuleVersion',
uses_basepath=False,
usage='%prog [options] stop_module_version [file, ...]',
short_desc='Stop a module version.',
long_desc="""
The 'stop_module_version' command will put a module version into the STOP
state."""),
'upload_data': Action(
function='PerformUpload',
usage='%prog [options] upload_data <directory>',
options=_PerformUploadOptions,
short_desc='Upload data records to datastore.',
long_desc="""
The 'upload_data' command translates input records into datastore entities and
uploads them into your application's datastore.""",
uses_basepath=False),
'download_data': Action(
function='PerformDownload',
usage='%prog [options] download_data <directory>',
options=_PerformDownloadOptions,
short_desc='Download entities from datastore.',
long_desc="""
The 'download_data' command downloads datastore entities and writes them to
file as CSV or developer defined format.""",
uses_basepath=False),
'create_bulkloader_config': Action(
function='CreateBulkloadConfig',
usage='%prog [options] create_bulkload_config <directory>',
options=_CreateBulkloadConfigOptions,
short_desc='Create a bulkloader.yaml from a running application.',
long_desc="""
The 'create_bulkloader_config' command creates a bulkloader.yaml configuration
template for use with upload_data or download_data.""",
uses_basepath=False),
'set_default_version': Action(
function='SetDefaultVersion',
usage='%prog [options] set_default_version [directory]',
short_desc='Set the default (serving) version.',
long_desc="""
The 'set_default_version' command sets the default (serving) version of the app.
Defaults to using the application, version and module specified in app.yaml;
use the --application, --version and --module flags to override these values.
The --module flag can also be a comma-delimited string of several modules. (ex.
module1,module2,module3) In this case, the default version of each module will
be changed to the version specified.
The 'migrate_traffic' command can be thought of as a safer version of this
command.""",
uses_basepath=False),
'migrate_traffic': Action(
function='MigrateTraffic',
usage='%prog [options] migrate_traffic [directory]',
short_desc='Migrates traffic to another version.',
long_desc="""
The 'migrate_traffic' command gradually gradually sends an increasing fraction
of traffic your app's traffic from the current default version to another
version. Once all traffic has been migrated, the new version is set as the
default version.
app.yaml specifies the target application, version, and (optionally) module; use
the --application, --version and --module flags to override these values.
Can be thought of as an enhanced version of the 'set_default_version'
command.""",
uses_basepath=False,
hidden=True),
'resource_limits_info': Action(
function='ResourceLimitsInfo',
usage='%prog [options] resource_limits_info <directory>',
short_desc='Get the resource limits.',
long_desc="""
The 'resource_limits_info' command prints the current resource limits that
are enforced."""),
'list_versions': Action(
function='ListVersions',
usage='%prog [options] list_versions [directory]',
short_desc='List all uploaded versions for an app.',
long_desc="""
The 'list_versions' command outputs the uploaded versions for each module of
an application in YAML. The YAML is in formatted as an associative array,
mapping module_ids to the list of versions uploaded for that module. The
default version will be first in the list.""",
uses_basepath=False),
'delete_version': Action(
function='DeleteVersion',
usage='%prog [options] delete_version -A app_id -V version '
'[-M module]',
uses_basepath=False,
short_desc='Delete the specified version for an app.',
long_desc="""
The 'delete_version' command deletes the specified version for the specified
application."""),
'debug': Action(
function='DebugAction',
usage='%prog [options] debug [-A app_id] [-V version]'
' [-M module] [-I instance] [directory]',
options=_LockActionOptions,
short_desc='Debug a vm runtime application.',
hidden=True,
uses_basepath=False,
long_desc="""
The 'debug' command configures a vm runtime application to be accessable
for debugging."""),
'lock': Action(
function='LockAction',
usage='%prog [options] lock [-A app_id] [-V version]'
' [-M module] [-I instance] [directory]',
options=_LockActionOptions,
short_desc='Lock a debugged vm runtime application.',
hidden=True,
uses_basepath=False,
long_desc="""
The 'lock' command relocks a debugged vm runtime application."""),
'prepare_vm_runtime': Action(
function='PrepareVmRuntimeAction',
usage='%prog [options] prepare_vm_runtime -A app_id',
short_desc='Prepare an application for the VM runtime.',
hidden=True,
uses_basepath=False,
long_desc="""
The 'prepare_vm_runtime' prepares an application for the VM runtime."""),
}
def main(argv):
logging.basicConfig(format=('%(asctime)s %(levelname)s %(filename)s:'
'%(lineno)s %(message)s '))
try:
result = AppCfgApp(argv).Run()
if result:
sys.exit(result)
except KeyboardInterrupt:
StatusUpdate('Interrupted.')
sys.exit(1)
if __name__ == '__main__':
main(sys.argv)
| apache-2.0 | -7,539,231,588,808,250,000 | 33.524828 | 80 | 0.627375 | false |
fmierlo/Xamarin.Helpers | Helpers/update-version.py | 1 | 1430 | import sys
import xml.etree.ElementTree as ElementTree
def log(msg):
sys.stderr.write(msg + '\n')
class Project:
Filename = 'Helpers.csproj'
Schema = '{http://schemas.microsoft.com/developer/msbuild/2003}'
RootTag = Schema + 'Project'
Property = Schema + 'PropertyGroup'
Release = Schema + 'ReleaseVersion'
Package = Schema + 'Description'
class Version:
In = 'Version.cs.in'
Out = 'Version.cs'
def main(*args):
project_tree = ElementTree.parse(Project.Filename)
project = project_tree.getroot()
version = None
package = None
for release in project.iter(Project.Release):
version = release.text
log('Release: {}'.format(version))
break
else:
log('Error: version not found!')
return -1
for name in project.iter(Project.Package):
package = name.text
log('Package: {}'.format(package))
break
else:
log('Error: package not found!')
return -1
with open(Version.In) as input:
with open(Version.Out, 'w') as output:
content = input.read()
content = content.replace('{VersionName}', version)
content = content.replace('{PackageName}', package)
output.write(content)
log('Writed: {} -> {}.{} -> {}'.format(Version.In, package, version, Version.Out))
if __name__ == '__main__':
sys.exit(main(*sys.argv))
| mit | 6,353,014,324,797,405,000 | 27 | 94 | 0.60084 | false |
libracore/erpnext | erpnext/erpnext_integrations/doctype/shopify_settings/shopify_settings.py | 1 | 5006 | # -*- coding: utf-8 -*-
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
import json
from frappe import _
from frappe.model.document import Document
from frappe.utils import get_request_session
from frappe.custom.doctype.custom_field.custom_field import create_custom_fields
from erpnext.erpnext_integrations.utils import get_webhook_address
from erpnext.erpnext_integrations.doctype.shopify_log.shopify_log import make_shopify_log
class ShopifySettings(Document):
def validate(self):
if self.enable_shopify == 1:
setup_custom_fields()
self.validate_access_credentials()
self.register_webhooks()
else:
self.unregister_webhooks()
def validate_access_credentials(self):
if not (self.get_password(raise_exception=False) and self.api_key and self.shopify_url):
frappe.msgprint(_("Missing value for Password, API Key or Shopify URL"), raise_exception=frappe.ValidationError)
def register_webhooks(self):
webhooks = ["orders/create", "orders/paid", "orders/fulfilled"]
# url = get_shopify_url('admin/webhooks.json', self)
created_webhooks = [d.method for d in self.webhooks]
url = get_shopify_url('admin/api/2019-04/webhooks.json', self)
print('url', url)
for method in webhooks:
print('method', method)
session = get_request_session()
print('session', session)
try:
print(get_header(self))
d = session.post(url, data=json.dumps({
"webhook": {
"topic": method,
"address": get_webhook_address(connector_name='shopify_connection', method='store_request_data'),
"format": "json"
}
}), headers=get_header(self))
print('d', d.json())
d.raise_for_status()
self.update_webhook_table(method, d.json())
except Exception as e:
make_shopify_log(status="Warning", message=e, exception=False)
def unregister_webhooks(self):
session = get_request_session()
deleted_webhooks = []
for d in self.webhooks:
url = get_shopify_url('admin/api/2019-04/webhooks.json'.format(d.webhook_id), self)
try:
res = session.delete(url, headers=get_header(self))
res.raise_for_status()
deleted_webhooks.append(d)
except Exception as e:
frappe.log_error(message=frappe.get_traceback(), title=e)
for d in deleted_webhooks:
self.remove(d)
def update_webhook_table(self, method, res):
print('update')
self.append("webhooks", {
"webhook_id": res['webhook']['id'],
"method": method
})
def get_shopify_url(path, settings):
if settings.app_type == "Private":
print(settings.api_key, settings.get_password('password'), settings.shopify_url, path)
return 'https://{}:{}@{}/{}'.format(settings.api_key, settings.get_password('password'), settings.shopify_url, path)
else:
return 'https://{}/{}'.format(settings.shopify_url, path)
def get_header(settings):
header = {'Content-Type': 'application/json'}
return header;
@frappe.whitelist()
def get_series():
return {
"sales_order_series" : frappe.get_meta("Sales Order").get_options("naming_series") or "SO-Shopify-",
"sales_invoice_series" : frappe.get_meta("Sales Invoice").get_options("naming_series") or "SI-Shopify-",
"delivery_note_series" : frappe.get_meta("Delivery Note").get_options("naming_series") or "DN-Shopify-"
}
def setup_custom_fields():
custom_fields = {
"Customer": [
dict(fieldname='shopify_customer_id', label='Shopify Customer Id',
fieldtype='Data', insert_after='series', read_only=1, print_hide=1)
],
"Supplier": [
dict(fieldname='shopify_supplier_id', label='Shopify Supplier Id',
fieldtype='Data', insert_after='supplier_name', read_only=1, print_hide=1)
],
"Address": [
dict(fieldname='shopify_address_id', label='Shopify Address Id',
fieldtype='Data', insert_after='fax', read_only=1, print_hide=1)
],
"Item": [
dict(fieldname='shopify_variant_id', label='Shopify Variant Id',
fieldtype='Data', insert_after='item_code', read_only=1, print_hide=1),
dict(fieldname='shopify_product_id', label='Shopify Product Id',
fieldtype='Data', insert_after='item_code', read_only=1, print_hide=1),
dict(fieldname='shopify_description', label='Shopify Description',
fieldtype='Text Editor', insert_after='description', read_only=1, print_hide=1)
],
"Sales Order": [
dict(fieldname='shopify_order_id', label='Shopify Order Id',
fieldtype='Data', insert_after='title', read_only=1, print_hide=1)
],
"Delivery Note":[
dict(fieldname='shopify_order_id', label='Shopify Order Id',
fieldtype='Data', insert_after='title', read_only=1, print_hide=1),
dict(fieldname='shopify_fulfillment_id', label='Shopify Fulfillment Id',
fieldtype='Data', insert_after='title', read_only=1, print_hide=1)
],
"Sales Invoice": [
dict(fieldname='shopify_order_id', label='Shopify Order Id',
fieldtype='Data', insert_after='title', read_only=1, print_hide=1)
]
}
create_custom_fields(custom_fields)
| gpl-3.0 | 6,692,700,997,614,586,000 | 36.081481 | 118 | 0.696764 | false |
ellonweb/merlin | Arthur/__init__.py | 1 | 1290 | # This file is part of Merlin/Arthur.
# Merlin/Arthur is the Copyright (C)2009,2010 of Elliot Rosemarine.
# Individual portions may be copyright by individual contributors, and
# are included in this collective work with permission of the copyright
# owners.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
from django.conf.urls.defaults import include, patterns, url
handler404 = 'Arthur.errors.page_not_found'
handler500 = 'Arthur.errors.server_error'
urlpatterns = patterns('',
(r'^static/(?P<path>.*)$', 'django.views.static.serve', {'document_root': 'F:/Code/Git/merlin/Arthur/static/'}),
(r'', include('Arthur.views')),
)
| gpl-2.0 | -6,298,270,596,698,404,000 | 45.071429 | 116 | 0.752713 | false |
cfbolz/parsimony | src/tm/tm4/tm4_meta/tm4_simulator.py | 1 | 1445 | import string
import sys
from state import *
from tmsim import *
if __name__ == "__main__":
name = sys.argv[-1]
fileName = name + ".tm4"
path = "../tm4_files/" + fileName
try:
assert len(sys.argv) > 1
for flag in sys.argv[2:-1]:
if not (flag in ["-q", "-s", "-f"]):
int(flag)
except:
raise Exception("Usage: python tm4_simulator.py [-q] [-s] [# steps before aborting] [-f] [name of TM4 file]\n \
Enable -q if you want no program output\n \
Enable -l if you want limited program output\n \
Enable -s followed by the max number of steps if you want to stop interpreting after a certain number of commands\n \
Enable -f if you want to dump the history into a file in tm4_histories instead of the standard output.")
sttm = SingleTapeTuringMachine(path, ["_", "1", "H", "E"])
args = sys.argv[1:-1]
quiet = ("-q" in args)
limited = ("-l" in args)
numSteps = sys.maxint
if ("-s" in args):
numSteps = args[args.index("-s") + 1]
output = None
if ("-f" in args):
output = open("../tm4_histories/" + name + "_history.txt", "w")
try:
assert "-s" in args
except:
raise Exception("You can't include the -f flag without also specifying a maximum step count with the -s flag!")
sttm.run(quiet, numSteps, output)
| mit | -8,676,187,172,972,371,000 | 29.744681 | 129 | 0.555709 | false |
sloria/osf.io | osf/models/user.py | 1 | 64823 | import datetime as dt
import logging
import re
import urllib
import urlparse
import uuid
from copy import deepcopy
from os.path import splitext
from flask import Request as FlaskRequest
from framework import analytics
from guardian.shortcuts import get_perms
# OSF imports
import itsdangerous
import pytz
from dirtyfields import DirtyFieldsMixin
from django.conf import settings
from django.contrib.auth.base_user import AbstractBaseUser, BaseUserManager
from django.contrib.auth.hashers import check_password
from django.contrib.auth.models import PermissionsMixin
from django.dispatch import receiver
from django.db import models
from django.db.models import Count
from django.db.models.signals import post_save
from django.utils import timezone
from framework.auth import Auth, signals, utils
from framework.auth.core import generate_verification_key
from framework.auth.exceptions import (ChangePasswordError, ExpiredTokenError,
InvalidTokenError,
MergeConfirmedRequiredError,
MergeConflictError)
from framework.exceptions import PermissionsError
from framework.sessions.utils import remove_sessions_for_user
from osf.utils.requests import get_current_request
from osf.exceptions import reraise_django_validation_errors, MaxRetriesError, UserStateError
from osf.models.base import BaseModel, GuidMixin, GuidMixinQuerySet
from osf.models.contributor import Contributor, RecentlyAddedContributor
from osf.models.institution import Institution
from osf.models.mixins import AddonModelMixin
from osf.models.session import Session
from osf.models.tag import Tag
from osf.models.validators import validate_email, validate_social, validate_history_item
from osf.utils.datetime_aware_jsonfield import DateTimeAwareJSONField
from osf.utils.fields import NonNaiveDateTimeField, LowercaseEmailField
from osf.utils.names import impute_names
from osf.utils.requests import check_select_for_update
from website import settings as website_settings
from website import filters, mails
from website.project import new_bookmark_collection
logger = logging.getLogger(__name__)
MAX_QUICKFILES_MERGE_RENAME_ATTEMPTS = 1000
def get_default_mailing_lists():
return {'Open Science Framework Help': True}
name_formatters = {
'long': lambda user: user.fullname,
'surname': lambda user: user.family_name if user.family_name else user.fullname,
'initials': lambda user: u'{surname}, {initial}.'.format(
surname=user.family_name,
initial=user.given_name_initial,
),
}
class OSFUserManager(BaseUserManager):
def create_user(self, username, password=None):
if not username:
raise ValueError('Users must have a username')
user = self.model(
username=self.normalize_email(username),
is_active=True,
date_registered=timezone.now()
)
user.set_password(password)
user.save(using=self._db)
return user
_queryset_class = GuidMixinQuerySet
def all(self):
return self.get_queryset().all()
def eager(self, *fields):
fk_fields = set(self.model.get_fk_field_names()) & set(fields)
m2m_fields = set(self.model.get_m2m_field_names()) & set(fields)
return self.select_related(*fk_fields).prefetch_related(*m2m_fields)
def create_superuser(self, username, password):
user = self.create_user(username, password=password)
user.is_superuser = True
user.is_staff = True
user.is_active = True
user.save(using=self._db)
return user
class Email(BaseModel):
address = LowercaseEmailField(unique=True, db_index=True, validators=[validate_email])
user = models.ForeignKey('OSFUser', related_name='emails', on_delete=models.CASCADE)
def __unicode__(self):
return self.address
class OSFUser(DirtyFieldsMixin, GuidMixin, BaseModel, AbstractBaseUser, PermissionsMixin, AddonModelMixin):
FIELD_ALIASES = {
'_id': 'guids___id',
'system_tags': 'tags',
}
settings_type = 'user' # Needed for addons
USERNAME_FIELD = 'username'
# Node fields that trigger an update to the search engine on save
SEARCH_UPDATE_FIELDS = {
'fullname',
'given_name',
'middle_names',
'family_name',
'suffix',
'merged_by',
'date_disabled',
'date_confirmed',
'jobs',
'schools',
'social',
}
TRACK_FIELDS = SEARCH_UPDATE_FIELDS.copy()
TRACK_FIELDS.update({'password', 'last_login'})
# TODO: Add SEARCH_UPDATE_NODE_FIELDS, for fields that should trigger a
# search update for all nodes to which the user is a contributor.
SOCIAL_FIELDS = {
'orcid': u'http://orcid.org/{}',
'github': u'http://github.com/{}',
'scholar': u'http://scholar.google.com/citations?user={}',
'twitter': u'http://twitter.com/{}',
'profileWebsites': [],
'linkedIn': u'https://www.linkedin.com/{}',
'impactStory': u'https://impactstory.org/u/{}',
'researcherId': u'http://researcherid.com/rid/{}',
'researchGate': u'https://researchgate.net/profile/{}',
'academiaInstitution': u'https://{}',
'academiaProfileID': u'.academia.edu/{}',
'baiduScholar': u'http://xueshu.baidu.com/scholarID/{}',
'ssrn': u'http://papers.ssrn.com/sol3/cf_dev/AbsByAuth.cfm?per_id={}'
}
# The primary email address for the account.
# This value is unique, but multiple "None" records exist for:
# * unregistered contributors where an email address was not provided.
# TODO: Update mailchimp subscription on username change in user.save()
# TODO: Consider making this a FK to Email with to_field='address'
# Django supports this (https://docs.djangoproject.com/en/1.11/topics/auth/customizing/#django.contrib.auth.models.CustomUser.USERNAME_FIELD)
# but some third-party apps may not.
username = models.CharField(max_length=255, db_index=True, unique=True)
# Hashed. Use `User.set_password` and `User.check_password`
# password = models.CharField(max_length=255)
fullname = models.CharField(max_length=255)
# user has taken action to register the account
is_registered = models.BooleanField(db_index=True, default=False)
# user has claimed the account
# TODO: This should be retired - it always reflects is_registered.
# While a few entries exist where this is not the case, they appear to be
# the result of a bug, as they were all created over a small time span.
is_claimed = models.BooleanField(default=False, db_index=True)
# for internal use
tags = models.ManyToManyField('Tag', blank=True)
# security emails that have been sent
# TODO: This should be removed and/or merged with system_tags
security_messages = DateTimeAwareJSONField(default=dict, blank=True)
# Format: {
# <message label>: <datetime>
# ...
# }
# user was invited (as opposed to registered unprompted)
is_invited = models.BooleanField(default=False, db_index=True)
# Per-project unclaimed user data:
# TODO: add validation
unclaimed_records = DateTimeAwareJSONField(default=dict, blank=True)
# Format: {
# <project_id>: {
# 'name': <name that referrer provided>,
# 'referrer_id': <user ID of referrer>,
# 'token': <token used for verification urls>,
# 'email': <email the referrer provided or None>,
# 'claimer_email': <email the claimer entered or None>,
# 'last_sent': <timestamp of last email sent to referrer or None>
# }
# ...
# }
# Time of last sent notification email to newly added contributors
# Format : {
# <project_id>: {
# 'last_sent': time.time()
# }
# ...
# }
contributor_added_email_records = DateTimeAwareJSONField(default=dict, blank=True)
# The user into which this account was merged
merged_by = models.ForeignKey('self', null=True, blank=True, related_name='merger')
# verification key v1: only the token string, no expiration time
# used for cas login with username and verification key
verification_key = models.CharField(max_length=255, null=True, blank=True)
# verification key v2: token, and expiration time
# used for password reset, confirm account/email, claim account/contributor-ship
verification_key_v2 = DateTimeAwareJSONField(default=dict, blank=True, null=True)
# Format: {
# 'token': <verification token>
# 'expires': <verification expiration time>
# }
email_last_sent = NonNaiveDateTimeField(null=True, blank=True)
change_password_last_attempt = NonNaiveDateTimeField(null=True, blank=True)
# Logs number of times user attempted to change their password where their
# old password was invalid
old_password_invalid_attempts = models.PositiveIntegerField(default=0)
# email verification tokens
# see also ``unconfirmed_emails``
email_verifications = DateTimeAwareJSONField(default=dict, blank=True)
# Format: {
# <token> : {'email': <email address>,
# 'expiration': <datetime>}
# }
# email lists to which the user has chosen a subscription setting
mailchimp_mailing_lists = DateTimeAwareJSONField(default=dict, blank=True)
# Format: {
# 'list1': True,
# 'list2: False,
# ...
# }
# email lists to which the user has chosen a subscription setting,
# being sent from osf, rather than mailchimp
osf_mailing_lists = DateTimeAwareJSONField(default=get_default_mailing_lists, blank=True)
# Format: {
# 'list1': True,
# 'list2: False,
# ...
# }
# the date this user was registered
date_registered = NonNaiveDateTimeField(db_index=True, auto_now_add=True)
# list of collaborators that this user recently added to nodes as a contributor
# recently_added = fields.ForeignField("user", list=True)
recently_added = models.ManyToManyField('self',
through=RecentlyAddedContributor,
through_fields=('user', 'contributor'),
symmetrical=False)
# Attached external accounts (OAuth)
# external_accounts = fields.ForeignField("externalaccount", list=True)
external_accounts = models.ManyToManyField('ExternalAccount', blank=True)
# CSL names
given_name = models.CharField(max_length=255, blank=True)
middle_names = models.CharField(max_length=255, blank=True)
family_name = models.CharField(max_length=255, blank=True)
suffix = models.CharField(max_length=255, blank=True)
# identity for user logged in through external idp
external_identity = DateTimeAwareJSONField(default=dict, blank=True)
# Format: {
# <external_id_provider>: {
# <external_id>: <status from ('VERIFIED, 'CREATE', 'LINK')>,
# ...
# },
# ...
# }
# Employment history
jobs = DateTimeAwareJSONField(default=list, blank=True, validators=[validate_history_item])
# Format: list of {
# 'title': <position or job title>,
# 'institution': <institution or organization>,
# 'department': <department>,
# 'location': <location>,
# 'startMonth': <start month>,
# 'startYear': <start year>,
# 'endMonth': <end month>,
# 'endYear': <end year>,
# 'ongoing: <boolean>
# }
# Educational history
schools = DateTimeAwareJSONField(default=list, blank=True, validators=[validate_history_item])
# Format: list of {
# 'degree': <position or job title>,
# 'institution': <institution or organization>,
# 'department': <department>,
# 'location': <location>,
# 'startMonth': <start month>,
# 'startYear': <start year>,
# 'endMonth': <end month>,
# 'endYear': <end year>,
# 'ongoing: <boolean>
# }
# Social links
social = DateTimeAwareJSONField(default=dict, blank=True, validators=[validate_social])
# Format: {
# 'profileWebsites': <list of profile websites>
# 'twitter': <twitter id>,
# }
# date the user last sent a request
date_last_login = NonNaiveDateTimeField(null=True, blank=True)
# date the user first successfully confirmed an email address
date_confirmed = NonNaiveDateTimeField(db_index=True, null=True, blank=True)
# When the user was disabled.
date_disabled = NonNaiveDateTimeField(db_index=True, null=True, blank=True)
# When the user was soft-deleted (GDPR)
deleted = NonNaiveDateTimeField(db_index=True, null=True, blank=True)
# when comments were last viewed
comments_viewed_timestamp = DateTimeAwareJSONField(default=dict, blank=True)
# Format: {
# 'Comment.root_target._id': 'timestamp',
# ...
# }
# timezone for user's locale (e.g. 'America/New_York')
timezone = models.CharField(blank=True, default='Etc/UTC', max_length=255)
# user language and locale data (e.g. 'en_US')
locale = models.CharField(blank=True, max_length=255, default='en_US')
# whether the user has requested to deactivate their account
requested_deactivation = models.BooleanField(default=False)
affiliated_institutions = models.ManyToManyField('Institution', blank=True)
notifications_configured = DateTimeAwareJSONField(default=dict, blank=True)
# The time at which the user agreed to our updated ToS and Privacy Policy (GDPR, 25 May 2018)
accepted_terms_of_service = NonNaiveDateTimeField(null=True, blank=True)
objects = OSFUserManager()
is_active = models.BooleanField(default=False)
is_staff = models.BooleanField(default=False)
def __repr__(self):
return '<OSFUser({0!r}) with guid {1!r}>'.format(self.username, self._id)
@property
def deep_url(self):
"""Used for GUID resolution."""
return '/profile/{}/'.format(self._primary_key)
@property
def url(self):
return '/{}/'.format(self._id)
@property
def absolute_url(self):
return urlparse.urljoin(website_settings.DOMAIN, self.url)
@property
def absolute_api_v2_url(self):
from website import util
return util.api_v2_url('users/{}/'.format(self._id))
@property
def api_url(self):
return '/api/v1/profile/{}/'.format(self._id)
@property
def profile_url(self):
return '/{}/'.format(self._id)
@property
def is_disabled(self):
return self.date_disabled is not None
@is_disabled.setter
def is_disabled(self, val):
"""Set whether or not this account has been disabled."""
if val and not self.date_disabled:
self.date_disabled = timezone.now()
elif val is False:
self.date_disabled = None
@property
def is_confirmed(self):
return bool(self.date_confirmed)
@property
def is_merged(self):
"""Whether or not this account has been merged into another account.
"""
return self.merged_by is not None
@property
def unconfirmed_emails(self):
# Handle when email_verifications field is None
email_verifications = self.email_verifications or {}
return [
each['email']
for each
in email_verifications.values()
]
@property
def social_links(self):
social_user_fields = {}
for key, val in self.social.items():
if val and key in self.SOCIAL_FIELDS:
if not isinstance(val, basestring):
social_user_fields[key] = val
else:
social_user_fields[key] = self.SOCIAL_FIELDS[key].format(val)
return social_user_fields
@property
def given_name_initial(self):
"""
The user's preferred initialization of their given name.
Some users with common names may choose to distinguish themselves from
their colleagues in this way. For instance, there could be two
well-known researchers in a single field named "Robert Walker".
"Walker, R" could then refer to either of them. "Walker, R.H." could
provide easy disambiguation.
NOTE: The internal representation for this should never end with a
period. "R" and "R.H" would be correct in the prior case, but
"R.H." would not.
"""
return self.given_name[0]
@property
def email(self):
if self.has_usable_username():
return self.username
else:
return None
@property
def all_tags(self):
"""Return a queryset containing all of this user's tags (incl. system tags)."""
# Tag's default manager only returns non-system tags, so we can't use self.tags
return Tag.all_tags.filter(osfuser=self)
@property
def system_tags(self):
"""The system tags associated with this node. This currently returns a list of string
names for the tags, for compatibility with v1. Eventually, we can just return the
QuerySet.
"""
return self.all_tags.filter(system=True).values_list('name', flat=True)
@property
def csl_given_name(self):
return utils.generate_csl_given_name(self.given_name, self.middle_names, self.suffix)
def csl_name(self, node_id=None):
# disabled users are set to is_registered = False but have a fullname
if self.is_registered or self.is_disabled:
name = self.fullname
else:
name = self.get_unclaimed_record(node_id)['name']
if self.family_name and self.given_name:
"""If the user has a family and given name, use those"""
return {
'family': self.family_name,
'given': self.csl_given_name,
}
else:
""" If the user doesn't autofill his family and given name """
parsed = utils.impute_names(name)
given_name = parsed['given']
middle_names = parsed['middle']
family_name = parsed['family']
suffix = parsed['suffix']
csl_given_name = utils.generate_csl_given_name(given_name, middle_names, suffix)
return {
'family': family_name,
'given': csl_given_name,
}
@property
def contributor_to(self):
return self.nodes.filter(is_deleted=False, type__in=['osf.node', 'osf.registration'])
@property
def visible_contributor_to(self):
return self.nodes.filter(is_deleted=False, contributor__visible=True, type__in=['osf.node', 'osf.registration'])
def set_unusable_username(self):
"""Sets username to an unusable value. Used for, e.g. for invited contributors
and merged users.
NOTE: This is necessary because Django does not allow the username column to be nullable.
"""
if self._id:
self.username = self._id
else:
self.username = str(uuid.uuid4())
return self.username
def has_usable_username(self):
return '@' in self.username
@property
def is_authenticated(self): # Needed for django compat
return True
@property
def is_anonymous(self):
return False
def get_absolute_url(self):
return self.absolute_api_v2_url
def get_addon_names(self):
return []
# django methods
def get_full_name(self):
return self.fullname
def get_short_name(self):
return self.username
def __unicode__(self):
return self.get_short_name()
def __str__(self):
return self.get_short_name()
@property
def contributed(self):
return self.nodes.all()
@property
def can_be_merged(self):
"""The ability of the `merge_user` method to fully merge the user"""
return all((addon.can_be_merged for addon in self.get_addons()))
def merge_user(self, user):
"""Merge a registered user into this account. This user will be
a contributor on any project. if the registered user and this account
are both contributors of the same project. Then it will remove the
registered user and set this account to the highest permission of the two
and set this account to be visible if either of the two are visible on
the project.
:param user: A User object to be merged.
"""
# Attempt to prevent self merges which end up removing self as a contributor from all projects
if self == user:
raise ValueError('Cannot merge a user into itself')
# Fail if the other user has conflicts.
if not user.can_be_merged:
raise MergeConflictError('Users cannot be merged')
# Move over the other user's attributes
# TODO: confirm
for system_tag in user.system_tags.all():
self.add_system_tag(system_tag)
self.is_claimed = self.is_claimed or user.is_claimed
self.is_invited = self.is_invited or user.is_invited
self.is_superuser = self.is_superuser or user.is_superuser
self.is_staff = self.is_staff or user.is_staff
# copy over profile only if this user has no profile info
if user.jobs and not self.jobs:
self.jobs = user.jobs
if user.schools and not self.schools:
self.schools = user.schools
if user.social and not self.social:
self.social = user.social
unclaimed = user.unclaimed_records.copy()
unclaimed.update(self.unclaimed_records)
self.unclaimed_records = unclaimed
# - unclaimed records should be connected to only one user
user.unclaimed_records = {}
security_messages = user.security_messages.copy()
security_messages.update(self.security_messages)
self.security_messages = security_messages
notifications_configured = user.notifications_configured.copy()
notifications_configured.update(self.notifications_configured)
self.notifications_configured = notifications_configured
if not website_settings.RUNNING_MIGRATION:
for key, value in user.mailchimp_mailing_lists.iteritems():
# subscribe to each list if either user was subscribed
subscription = value or self.mailchimp_mailing_lists.get(key)
signals.user_merged.send(self, list_name=key, subscription=subscription)
# clear subscriptions for merged user
signals.user_merged.send(user, list_name=key, subscription=False, send_goodbye=False)
for target_id, timestamp in user.comments_viewed_timestamp.iteritems():
if not self.comments_viewed_timestamp.get(target_id):
self.comments_viewed_timestamp[target_id] = timestamp
elif timestamp > self.comments_viewed_timestamp[target_id]:
self.comments_viewed_timestamp[target_id] = timestamp
# Give old user's emails to self
user.emails.update(user=self)
for k, v in user.email_verifications.iteritems():
email_to_confirm = v['email']
if k not in self.email_verifications and email_to_confirm != user.username:
self.email_verifications[k] = v
user.email_verifications = {}
self.affiliated_institutions.add(*user.affiliated_institutions.values_list('pk', flat=True))
for service in user.external_identity:
for service_id in user.external_identity[service].iterkeys():
if not (
service_id in self.external_identity.get(service, '') and
self.external_identity[service][service_id] == 'VERIFIED'
):
# Prevent 'CREATE', merging user has already been created.
external = user.external_identity[service][service_id]
status = 'VERIFIED' if external == 'VERIFIED' else 'LINK'
if self.external_identity.get(service):
self.external_identity[service].update(
{service_id: status}
)
else:
self.external_identity[service] = {
service_id: status
}
user.external_identity = {}
# FOREIGN FIELDS
self.external_accounts.add(*user.external_accounts.values_list('pk', flat=True))
# - addons
# Note: This must occur before the merged user is removed as a
# contributor on the nodes, as an event hook is otherwise fired
# which removes the credentials.
for addon in user.get_addons():
user_settings = self.get_or_add_addon(addon.config.short_name)
user_settings.merge(addon)
user_settings.save()
# - projects where the user was a contributor
for node in user.contributed:
# Skip quickfiles
if node.is_quickfiles:
continue
# if both accounts are contributor of the same project
if node.is_contributor(self) and node.is_contributor(user):
user_permissions = node.get_permissions(user)
self_permissions = node.get_permissions(self)
permissions = max([user_permissions, self_permissions])
node.set_permissions(user=self, permissions=permissions)
visible1 = self._id in node.visible_contributor_ids
visible2 = user._id in node.visible_contributor_ids
if visible1 != visible2:
node.set_visible(user=self, visible=True, log=True, auth=Auth(user=self))
node.contributor_set.filter(user=user).delete()
else:
node.contributor_set.filter(user=user).update(user=self)
node.save()
# Skip bookmark collections
user.collection_set.exclude(is_bookmark_collection=True).update(creator=self)
from osf.models import QuickFilesNode
from osf.models import BaseFileNode
# - projects where the user was the creator
user.nodes_created.exclude(type=QuickFilesNode._typedmodels_type).update(creator=self)
# - file that the user has checked_out, import done here to prevent import error
for file_node in BaseFileNode.files_checked_out(user=user):
file_node.checkout = self
file_node.save()
# - move files in the merged user's quickfiles node, checking for name conflicts
from addons.osfstorage.models import OsfStorageFileNode
primary_quickfiles = QuickFilesNode.objects.get(creator=self)
merging_user_quickfiles = QuickFilesNode.objects.get(creator=user)
files_in_merging_user_quickfiles = merging_user_quickfiles.files.filter(type='osf.osfstoragefile')
for merging_user_file in files_in_merging_user_quickfiles:
if OsfStorageFileNode.objects.filter(node=primary_quickfiles, name=merging_user_file.name).exists():
digit = 1
split_filename = splitext(merging_user_file.name)
name_without_extension = split_filename[0]
extension = split_filename[1]
found_digit_in_parens = re.findall('(?<=\()(\d)(?=\))', name_without_extension)
if found_digit_in_parens:
found_digit = int(found_digit_in_parens[0])
digit = found_digit + 1
name_without_extension = name_without_extension.replace('({})'.format(found_digit), '').strip()
new_name_format = '{} ({}){}'
new_name = new_name_format.format(name_without_extension, digit, extension)
# check if new name conflicts, update til it does not (try up to 1000 times)
rename_count = 0
while OsfStorageFileNode.objects.filter(node=primary_quickfiles, name=new_name).exists():
digit += 1
new_name = new_name_format.format(name_without_extension, digit, extension)
rename_count += 1
if rename_count >= MAX_QUICKFILES_MERGE_RENAME_ATTEMPTS:
raise MaxRetriesError('Maximum number of rename attempts has been reached')
merging_user_file.name = new_name
merging_user_file.save()
merging_user_file.node = primary_quickfiles
merging_user_file.save()
# finalize the merge
remove_sessions_for_user(user)
# - username is set to the GUID so the merging user can set it primary
# in the future (note: it cannot be set to None due to non-null constraint)
user.set_unusable_username()
user.set_unusable_password()
user.verification_key = None
user.osf_mailing_lists = {}
user.merged_by = self
user.save()
def disable_account(self):
"""
Disables user account, making is_disabled true, while also unsubscribing user
from mailchimp emails, remove any existing sessions.
Ported from framework/auth/core.py
"""
from website import mailchimp_utils
from framework.auth import logout
try:
mailchimp_utils.unsubscribe_mailchimp(
list_name=website_settings.MAILCHIMP_GENERAL_LIST,
user_id=self._id,
username=self.username
)
except mailchimp_utils.mailchimp.ListNotSubscribedError:
pass
except mailchimp_utils.mailchimp.InvalidApiKeyError:
if not website_settings.ENABLE_EMAIL_SUBSCRIPTIONS:
pass
else:
raise
except mailchimp_utils.mailchimp.EmailNotExistsError:
pass
# Call to `unsubscribe` above saves, and can lead to stale data
self.reload()
self.is_disabled = True
# we must call both methods to ensure the current session is cleared and all existing
# sessions are revoked.
req = get_current_request()
if isinstance(req, FlaskRequest):
logout()
remove_sessions_for_user(self)
def update_is_active(self):
"""Update ``is_active`` to be consistent with the fields that
it depends on.
"""
# The user can log in if they have set a password OR
# have a verified external ID, e.g an ORCID
can_login = self.has_usable_password() or (
'VERIFIED' in sum([each.values() for each in self.external_identity.values()], [])
)
self.is_active = (
self.is_registered and
self.is_confirmed and
can_login and
not self.is_merged and
not self.is_disabled
)
# Overrides BaseModel
def save(self, *args, **kwargs):
self.update_is_active()
self.username = self.username.lower().strip() if self.username else None
dirty_fields = set(self.get_dirty_fields(check_relationship=True))
ret = super(OSFUser, self).save(*args, **kwargs)
if self.SEARCH_UPDATE_FIELDS.intersection(dirty_fields) and self.is_confirmed:
self.update_search()
self.update_search_nodes_contributors()
if 'fullname' in dirty_fields:
from osf.models.quickfiles import get_quickfiles_project_title, QuickFilesNode
quickfiles = QuickFilesNode.objects.filter(creator=self).first()
if quickfiles:
quickfiles.title = get_quickfiles_project_title(self)
quickfiles.save()
return ret
# Legacy methods
@classmethod
def create(cls, username, password, fullname, accepted_terms_of_service=None):
validate_email(username) # Raises BlacklistedEmailError if spam address
user = cls(
username=username,
fullname=fullname,
accepted_terms_of_service=accepted_terms_of_service
)
user.update_guessed_names()
user.set_password(password)
return user
def set_password(self, raw_password, notify=True):
"""Set the password for this user to the hash of ``raw_password``.
If this is a new user, we're done. If this is a password change,
then email the user about the change and clear all the old sessions
so that users will have to log in again with the new password.
:param raw_password: the plaintext value of the new password
:param notify: Only meant for unit tests to keep extra notifications from being sent
:rtype: list
:returns: Changed fields from the user save
"""
had_existing_password = bool(self.has_usable_password() and self.is_confirmed)
if self.username == raw_password:
raise ChangePasswordError(['Password cannot be the same as your email address'])
super(OSFUser, self).set_password(raw_password)
if had_existing_password and notify:
mails.send_mail(
to_addr=self.username,
mail=mails.PASSWORD_RESET,
mimetype='html',
user=self,
can_change_preferences=False,
osf_contact_email=website_settings.OSF_CONTACT_EMAIL
)
remove_sessions_for_user(self)
@classmethod
def create_unconfirmed(cls, username, password, fullname, external_identity=None,
do_confirm=True, campaign=None, accepted_terms_of_service=None):
"""Create a new user who has begun registration but needs to verify
their primary email address (username).
"""
user = cls.create(username, password, fullname, accepted_terms_of_service)
user.add_unconfirmed_email(username, external_identity=external_identity)
user.is_registered = False
if external_identity:
user.external_identity.update(external_identity)
if campaign:
# needed to prevent cirular import
from framework.auth.campaigns import system_tag_for_campaign # skipci
# User needs to be saved before adding system tags (due to m2m relationship)
user.save()
user.add_system_tag(system_tag_for_campaign(campaign))
return user
@classmethod
def create_confirmed(cls, username, password, fullname):
user = cls.create(username, password, fullname)
user.is_registered = True
user.is_claimed = True
user.save() # Must save before using auto_now_add field
user.date_confirmed = user.date_registered
user.emails.create(address=username.lower().strip())
return user
def get_unconfirmed_email_for_token(self, token):
"""Return email if valid.
:rtype: bool
:raises: ExpiredTokenError if trying to access a token that is expired.
:raises: InvalidTokenError if trying to access a token that is invalid.
"""
if token not in self.email_verifications:
raise InvalidTokenError
verification = self.email_verifications[token]
# Not all tokens are guaranteed to have expiration dates
if (
'expiration' in verification and
verification['expiration'].replace(tzinfo=pytz.utc) < timezone.now()
):
raise ExpiredTokenError
return verification['email']
def get_unconfirmed_emails_exclude_external_identity(self):
"""Return a list of unconfirmed emails that are not related to external identity."""
unconfirmed_emails = []
if self.email_verifications:
for token, value in self.email_verifications.iteritems():
if not value.get('external_identity'):
unconfirmed_emails.append(value.get('email'))
return unconfirmed_emails
@property
def unconfirmed_email_info(self):
"""Return a list of dictionaries containing information about each of this
user's unconfirmed emails.
"""
unconfirmed_emails = []
email_verifications = self.email_verifications or []
for token in email_verifications:
if self.email_verifications[token].get('confirmed', False):
try:
user_merge = OSFUser.objects.get(emails__address__iexact=self.email_verifications[token]['email'])
except OSFUser.DoesNotExist:
user_merge = False
unconfirmed_emails.append({'address': self.email_verifications[token]['email'],
'token': token,
'confirmed': self.email_verifications[token]['confirmed'],
'user_merge': user_merge.email if user_merge else False})
return unconfirmed_emails
def clean_email_verifications(self, given_token=None):
email_verifications = deepcopy(self.email_verifications or {})
for token in self.email_verifications or {}:
try:
self.get_unconfirmed_email_for_token(token)
except (KeyError, ExpiredTokenError):
email_verifications.pop(token)
continue
if token == given_token:
email_verifications.pop(token)
self.email_verifications = email_verifications
def verify_password_token(self, token):
"""
Verify that the password reset token for this user is valid.
:param token: the token in verification key
:return `True` if valid, otherwise `False`
"""
if token and self.verification_key_v2:
try:
return (self.verification_key_v2['token'] == token and
self.verification_key_v2['expires'] > timezone.now())
except AttributeError:
return False
return False
def verify_claim_token(self, token, project_id):
"""Return whether or not a claim token is valid for this user for
a given node which they were added as a unregistered contributor for.
"""
try:
record = self.get_unclaimed_record(project_id)
except ValueError: # No unclaimed record for given pid
return False
return record['token'] == token
@classmethod
def create_unregistered(cls, fullname, email=None):
"""Create a new unregistered user.
"""
user = cls(
username=email,
fullname=fullname,
is_invited=True,
is_registered=False,
)
if not email:
user.set_unusable_username()
user.set_unusable_password()
user.update_guessed_names()
return user
def update_guessed_names(self):
"""Updates the CSL name fields inferred from the the full name.
"""
parsed = impute_names(self.fullname)
self.given_name = parsed['given']
self.middle_names = parsed['middle']
self.family_name = parsed['family']
self.suffix = parsed['suffix']
def add_unconfirmed_email(self, email, expiration=None, external_identity=None):
"""
Add an email verification token for a given email.
:param email: the email to confirm
:param email: overwrite default expiration time
:param external_identity: the user's external identity
:return: a token
:raises: ValueError if email already confirmed, except for login through external idp.
"""
# Note: This is technically not compliant with RFC 822, which requires
# that case be preserved in the "local-part" of an address. From
# a practical standpoint, the vast majority of email servers do
# not preserve case.
# ref: https://tools.ietf.org/html/rfc822#section-6
email = email.lower().strip()
with reraise_django_validation_errors():
validate_email(email)
if not external_identity and self.emails.filter(address=email).exists():
raise ValueError('Email already confirmed to this user.')
# If the unconfirmed email is already present, refresh the token
if email in self.unconfirmed_emails:
self.remove_unconfirmed_email(email)
verification_key = generate_verification_key(verification_type='confirm')
# handle when email_verifications is None
if not self.email_verifications:
self.email_verifications = {}
self.email_verifications[verification_key['token']] = {
'email': email,
'confirmed': False,
'expiration': expiration if expiration else verification_key['expires'],
'external_identity': external_identity,
}
return verification_key['token']
def remove_unconfirmed_email(self, email):
"""Remove an unconfirmed email addresses and their tokens."""
for token, value in self.email_verifications.iteritems():
if value.get('email') == email:
del self.email_verifications[token]
return True
return False
def remove_email(self, email):
"""Remove a confirmed email"""
if email == self.username:
raise PermissionsError("Can't remove primary email")
if self.emails.filter(address=email):
self.emails.filter(address=email).delete()
signals.user_email_removed.send(self, email=email, osf_contact_email=website_settings.OSF_CONTACT_EMAIL)
def get_confirmation_token(self, email, force=False, renew=False):
"""Return the confirmation token for a given email.
:param str email: The email to get the token for.
:param bool force: If an expired token exists for the given email, generate a new one and return it.
:param bool renew: Generate a new token and return it.
:return Return the confirmation token.
:raises: ExpiredTokenError if trying to access a token that is expired and force=False.
:raises: KeyError if there no token for the email.
"""
# TODO: Refactor "force" flag into User.get_or_add_confirmation_token
for token, info in self.email_verifications.items():
if info['email'].lower() == email.lower():
# Old records will not have an expiration key. If it's missing,
# assume the token is expired
expiration = info.get('expiration')
if renew:
new_token = self.add_unconfirmed_email(email)
self.save()
return new_token
if not expiration or (expiration and expiration < timezone.now()):
if not force:
raise ExpiredTokenError('Token for email "{0}" is expired'.format(email))
else:
new_token = self.add_unconfirmed_email(email)
self.save()
return new_token
return token
raise KeyError('No confirmation token for email "{0}"'.format(email))
def get_confirmation_url(self, email,
external=True,
force=False,
renew=False,
external_id_provider=None,
destination=None):
"""Return the confirmation url for a given email.
:param email: The email to confirm.
:param external: Use absolute or relative url.
:param force: If an expired token exists for the given email, generate a new one and return it.
:param renew: Generate a new token and return it.
:param external_id_provider: The external identity provider that authenticates the user.
:param destination: The destination page to redirect after confirmation
:return: Return the confirmation url.
:raises: ExpiredTokenError if trying to access a token that is expired.
:raises: KeyError if there is no token for the email.
"""
base = website_settings.DOMAIN if external else '/'
token = self.get_confirmation_token(email, force=force, renew=renew)
external = 'external/' if external_id_provider else ''
destination = '?{}'.format(urllib.urlencode({'destination': destination})) if destination else ''
return '{0}confirm/{1}{2}/{3}/{4}'.format(base, external, self._primary_key, token, destination)
def register(self, username, password=None, accepted_terms_of_service=None):
"""Registers the user.
"""
self.username = username
if password:
self.set_password(password)
if not self.emails.filter(address=username):
self.emails.create(address=username)
self.is_registered = True
self.is_claimed = True
self.date_confirmed = timezone.now()
if accepted_terms_of_service:
self.accepted_terms_of_service = timezone.now()
self.update_search()
self.update_search_nodes()
# Emit signal that a user has confirmed
signals.user_confirmed.send(self)
return self
def confirm_email(self, token, merge=False):
"""Confirm the email address associated with the token"""
email = self.get_unconfirmed_email_for_token(token)
# If this email is confirmed on another account, abort
try:
if check_select_for_update():
user_to_merge = OSFUser.objects.filter(emails__address=email).select_for_update().get()
else:
user_to_merge = OSFUser.objects.get(emails__address=email)
except OSFUser.DoesNotExist:
user_to_merge = None
if user_to_merge and merge:
self.merge_user(user_to_merge)
elif user_to_merge:
raise MergeConfirmedRequiredError(
'Merge requires confirmation',
user=self,
user_to_merge=user_to_merge,
)
# If another user has this email as its username, get it
try:
unregistered_user = OSFUser.objects.exclude(guids___id=self._id, guids___id__isnull=False).get(username=email)
except OSFUser.DoesNotExist:
unregistered_user = None
if unregistered_user:
self.merge_user(unregistered_user)
self.save()
unregistered_user.username = None
if not self.emails.filter(address=email).exists():
self.emails.create(address=email)
# Complete registration if primary email
if email.lower() == self.username.lower():
self.register(self.username)
self.date_confirmed = timezone.now()
# Revoke token
del self.email_verifications[token]
# TODO: We can't assume that all unclaimed records are now claimed.
# Clear unclaimed records, so user's name shows up correctly on
# all projects
self.unclaimed_records = {}
self.save()
self.update_search_nodes()
return True
def update_search(self):
from website.search.search import update_user
update_user(self)
def update_search_nodes_contributors(self):
"""
Bulk update contributor name on all nodes on which the user is
a contributor.
:return:
"""
from website.search import search
search.update_contributors_async(self.id)
def update_search_nodes(self):
"""Call `update_search` on all nodes on which the user is a
contributor. Needed to add self to contributor lists in search upon
registration or claiming.
"""
for node in self.contributor_to:
node.update_search()
def update_date_last_login(self):
self.date_last_login = timezone.now()
def get_summary(self, formatter='long'):
return {
'user_fullname': self.fullname,
'user_profile_url': self.profile_url,
'user_display_name': name_formatters[formatter](self),
'user_is_claimed': self.is_claimed
}
def check_password(self, raw_password):
"""
Return a boolean of whether the raw_password was correct. Handles
hashing formats behind the scenes.
Source: https://github.com/django/django/blob/master/django/contrib/auth/base_user.py#L104
"""
def setter(raw_password):
self.set_password(raw_password, notify=False)
# Password hash upgrades shouldn't be considered password changes.
self._password = None
self.save(update_fields=['password'])
return check_password(raw_password, self.password, setter)
def change_password(self, raw_old_password, raw_new_password, raw_confirm_password):
"""Change the password for this user to the hash of ``raw_new_password``."""
raw_old_password = (raw_old_password or '').strip()
raw_new_password = (raw_new_password or '').strip()
raw_confirm_password = (raw_confirm_password or '').strip()
# TODO: Move validation to set_password
issues = []
if not self.check_password(raw_old_password):
self.old_password_invalid_attempts += 1
self.change_password_last_attempt = timezone.now()
issues.append('Old password is invalid')
elif raw_old_password == raw_new_password:
issues.append('Password cannot be the same')
elif raw_new_password == self.username:
issues.append('Password cannot be the same as your email address')
if not raw_old_password or not raw_new_password or not raw_confirm_password:
issues.append('Passwords cannot be blank')
elif len(raw_new_password) < 8:
issues.append('Password should be at least eight characters')
elif len(raw_new_password) > 256:
issues.append('Password should not be longer than 256 characters')
if raw_new_password != raw_confirm_password:
issues.append('Password does not match the confirmation')
if issues:
raise ChangePasswordError(issues)
self.set_password(raw_new_password)
self.reset_old_password_invalid_attempts()
def reset_old_password_invalid_attempts(self):
self.old_password_invalid_attempts = 0
def profile_image_url(self, size=None):
"""A generalized method for getting a user's profile picture urls.
We may choose to use some service other than gravatar in the future,
and should not commit ourselves to using a specific service (mostly
an API concern).
As long as we use gravatar, this is just a proxy to User.gravatar_url
"""
return self._gravatar_url(size)
def _gravatar_url(self, size):
return filters.gravatar(
self,
use_ssl=True,
size=size
)
@property
def display_absolute_url(self):
url = self.absolute_url
if url is not None:
return re.sub(r'https?:', '', url).strip('/')
def display_full_name(self, node=None):
"""Return the full name , as it would display in a contributor list for a
given node.
NOTE: Unclaimed users may have a different name for different nodes.
"""
if node:
unclaimed_data = self.unclaimed_records.get(str(node._id), None)
if unclaimed_data:
return unclaimed_data['name']
return self.fullname
def add_system_tag(self, tag):
if not isinstance(tag, Tag):
tag_instance, created = Tag.all_tags.get_or_create(name=tag.lower(), system=True)
else:
tag_instance = tag
if not tag_instance.system:
raise ValueError('Non-system tag passed to add_system_tag')
if not self.all_tags.filter(id=tag_instance.id).exists():
self.tags.add(tag_instance)
return tag_instance
def get_recently_added(self):
return (
each.contributor
for each in self.recentlyaddedcontributor_set.order_by('-date_added')
)
def _projects_in_common_query(self, other_user):
sqs = Contributor.objects.filter(node=models.OuterRef('pk'), user=other_user)
return (self.nodes
.filter(is_deleted=False)
.exclude(type='osf.collection')
.annotate(contrib=models.Exists(sqs))
.filter(contrib=True))
def get_projects_in_common(self, other_user):
"""Returns either a collection of "shared projects" (projects that both users are contributors for)
or just their primary keys
"""
query = self._projects_in_common_query(other_user)
return set(query.all())
def n_projects_in_common(self, other_user):
"""Returns number of "shared projects" (projects that both users are contributors for)"""
return self._projects_in_common_query(other_user).count()
def add_unclaimed_record(self, claim_origin, referrer, given_name, email=None):
"""Add a new project entry in the unclaimed records dictionary.
:param object claim_origin: Object this unclaimed user was added to. currently `Node` or `Provider`
:param User referrer: User who referred this user.
:param str given_name: The full name that the referrer gave for this user.
:param str email: The given email address.
:returns: The added record
"""
from osf.models.provider import AbstractProvider
if isinstance(claim_origin, AbstractProvider):
if not bool(get_perms(referrer, claim_origin)):
raise PermissionsError(
'Referrer does not have permission to add a moderator to provider {0}'.format(claim_origin._id)
)
else:
if not claim_origin.can_edit(user=referrer):
raise PermissionsError(
'Referrer does not have permission to add a contributor to project {0}'.format(claim_origin._primary_key)
)
pid = str(claim_origin._id)
referrer_id = str(referrer._id)
if email:
clean_email = email.lower().strip()
else:
clean_email = None
verification_key = generate_verification_key(verification_type='claim')
try:
record = self.unclaimed_records[claim_origin._id]
except KeyError:
record = None
if record:
del record
record = {
'name': given_name,
'referrer_id': referrer_id,
'token': verification_key['token'],
'expires': verification_key['expires'],
'email': clean_email,
}
self.unclaimed_records[pid] = record
return record
def get_unclaimed_record(self, project_id):
"""Get an unclaimed record for a given project_id.
:raises: ValueError if there is no record for the given project.
"""
try:
return self.unclaimed_records[project_id]
except KeyError: # reraise as ValueError
raise ValueError('No unclaimed record for user {self._id} on node {project_id}'
.format(**locals()))
def get_claim_url(self, project_id, external=False):
"""Return the URL that an unclaimed user should use to claim their
account. Return ``None`` if there is no unclaimed_record for the given
project ID.
:param project_id: The project ID for the unclaimed record
:raises: ValueError if a record doesn't exist for the given project ID
:rtype: dict
:returns: The unclaimed record for the project
"""
uid = self._primary_key
base_url = website_settings.DOMAIN if external else '/'
unclaimed_record = self.get_unclaimed_record(project_id)
token = unclaimed_record['token']
return '{base_url}user/{uid}/{project_id}/claim/?token={token}'\
.format(**locals())
def is_affiliated_with_institution(self, institution):
"""Return if this user is affiliated with ``institution``."""
return self.affiliated_institutions.filter(id=institution.id).exists()
def update_affiliated_institutions_by_email_domain(self):
"""
Append affiliated_institutions by email domain.
:return:
"""
try:
email_domains = [email.split('@')[1].lower() for email in self.emails.values_list('address', flat=True)]
insts = Institution.objects.filter(email_domains__overlap=email_domains)
if insts.exists():
self.affiliated_institutions.add(*insts)
except IndexError:
pass
def remove_institution(self, inst_id):
try:
inst = self.affiliated_institutions.get(_id=inst_id)
except Institution.DoesNotExist:
return False
else:
self.affiliated_institutions.remove(inst)
return True
def get_activity_points(self):
return analytics.get_total_activity_count(self._id)
def get_or_create_cookie(self, secret=None):
"""Find the cookie for the given user
Create a new session if no cookie is found
:param str secret: The key to sign the cookie with
:returns: The signed cookie
"""
secret = secret or settings.SECRET_KEY
user_session = Session.objects.filter(
data__auth_user_id=self._id
).order_by(
'-modified'
).first()
if not user_session:
user_session = Session(data={
'auth_user_id': self._id,
'auth_user_username': self.username,
'auth_user_fullname': self.fullname,
})
user_session.save()
signer = itsdangerous.Signer(secret)
return signer.sign(user_session._id)
@classmethod
def from_cookie(cls, cookie, secret=None):
"""Attempt to load a user from their signed cookie
:returns: None if a user cannot be loaded else User
"""
if not cookie:
return None
secret = secret or settings.SECRET_KEY
try:
token = itsdangerous.Signer(secret).unsign(cookie)
except itsdangerous.BadSignature:
return None
user_session = Session.load(token)
if user_session is None:
return None
return cls.load(user_session.data.get('auth_user_id'))
def get_node_comment_timestamps(self, target_id):
""" Returns the timestamp for when comments were last viewed on a node, file or wiki.
"""
default_timestamp = dt.datetime(1970, 1, 1, 12, 0, 0, tzinfo=pytz.utc)
return self.comments_viewed_timestamp.get(target_id, default_timestamp)
def gdpr_delete(self):
"""
This function does not remove the user object reference from our database, but it does disable the account and
remove identifying in a manner compliant with GDPR guidelines.
Follows the protocol described in
https://openscience.atlassian.net/wiki/spaces/PRODUC/pages/482803755/GDPR-Related+protocols
"""
from osf.models import PreprintService, AbstractNode
user_nodes = self.nodes.all()
# Validates the user isn't trying to delete things they deliberately made public.
if user_nodes.filter(type='osf.registration').exists():
raise UserStateError('You cannot delete this user because they have one or more registrations.')
if PreprintService.objects.filter(node___contributors=self, ever_public=True).exists():
raise UserStateError('You cannot delete this user because they have one or more preprints.')
# Validates that the user isn't trying to delete things nodes they are the only admin on.
personal_nodes = AbstractNode.objects.annotate(contrib_count=Count('_contributors')).filter(contrib_count__lte=1).filter(contributor__user=self)
shared_nodes = user_nodes.exclude(id__in=personal_nodes.values_list('id'))
for node in shared_nodes.exclude(type='osf.quickfilesnode'):
alternate_admins = Contributor.objects.select_related('user').filter(
node=node,
user__is_active=True,
admin=True,
).exclude(user=self)
if not alternate_admins:
raise UserStateError(
'You cannot delete node {} because it would be a node with contributors, but with no admin.'.format(
node._id))
for addon in node.get_addons():
if addon.short_name not in ('osfstorage', 'wiki') and addon.user_settings and addon.user_settings.owner.id == self.id:
raise UserStateError('You cannot delete this user because they '
'have an external account for {} attached to Node {}, '
'which has other contributors.'.format(addon.short_name, node._id))
for node in shared_nodes.all():
logger.info('Removing {self._id} as a contributor to node (pk:{node_id})...'.format(self=self, node_id=node.pk))
node.remove_contributor(self, auth=Auth(self), log=False)
# This is doesn't to remove identifying info, but ensures other users can't see the deleted user's profile etc.
self.disable_account()
# delete all personal nodes (one contributor), bookmarks, quickfiles etc.
for node in personal_nodes.all():
logger.info('Soft-deleting node (pk: {node_id})...'.format(node_id=node.pk))
node.remove_node(auth=Auth(self))
logger.info('Clearing identifying information...')
# This removes identifying info
# hard-delete all emails associated with the user
self.emails.all().delete()
# Change name to "Deleted user" so that logs render properly
self.fullname = 'Deleted user'
self.set_unusable_username()
self.set_unusable_password()
self.given_name = ''
self.family_name = ''
self.middle_names = ''
self.mailchimp_mailing_lists = {}
self.osf_mailing_lists = {}
self.verification_key = None
self.suffix = ''
self.jobs = []
self.schools = []
self.social = []
self.unclaimed_records = {}
self.notifications_configured = {}
# Scrub all external accounts
if self.external_accounts.exists():
logger.info('Clearing identifying information from external accounts...')
for account in self.external_accounts.all():
account.oauth_key = None
account.oauth_secret = None
account.refresh_token = None
account.provider_name = 'gdpr-deleted'
account.display_name = None
account.profile_url = None
account.save()
self.external_accounts.clear()
self.external_identity = {}
self.deleted = timezone.now()
class Meta:
# custom permissions for use in the OSF Admin App
permissions = (
('view_osfuser', 'Can view user details'),
)
@receiver(post_save, sender=OSFUser)
def add_default_user_addons(sender, instance, created, **kwargs):
if created:
for addon in website_settings.ADDONS_AVAILABLE:
if 'user' in addon.added_default:
instance.add_addon(addon.short_name)
@receiver(post_save, sender=OSFUser)
def create_bookmark_collection(sender, instance, created, **kwargs):
if created:
new_bookmark_collection(instance)
# Allows this hook to be easily mock.patched
def _create_quickfiles_project(instance):
from osf.models.quickfiles import QuickFilesNode
QuickFilesNode.objects.create_for_user(instance)
@receiver(post_save, sender=OSFUser)
def create_quickfiles_project(sender, instance, created, **kwargs):
if created:
_create_quickfiles_project(instance)
| apache-2.0 | -4,507,992,747,545,736,000 | 38.744329 | 152 | 0.62157 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.