hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
919fa37aa3543bcf02318ed5c4668d2fdd054d45
| 10,520 |
py
|
Python
|
utils/tree_structure_tictactoe/tictactoe_to_graphviz_with_minimax_value.py
|
jeremiedecock/tictactoe-py
|
5ae39448e3a4b7d0e002f84d73b193920dfecfe0
|
[
"MIT"
] | null | null | null |
utils/tree_structure_tictactoe/tictactoe_to_graphviz_with_minimax_value.py
|
jeremiedecock/tictactoe-py
|
5ae39448e3a4b7d0e002f84d73b193920dfecfe0
|
[
"MIT"
] | null | null | null |
utils/tree_structure_tictactoe/tictactoe_to_graphviz_with_minimax_value.py
|
jeremiedecock/tictactoe-py
|
5ae39448e3a4b7d0e002f84d73b193920dfecfe0
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2012 Jérémie DECOCK (http://www.jdhp.org)
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import os
# TODO:
# - separate State (Node), Transition (Node.getChildNodes() -> Tictactoe), Policy (Minimax)
#TODO: une fonction self.value() qui donne la valeure de l'arbre, quelque soit le joueur, que ce soit un noeud feuille ou non
# STATE + TRANSITION ##########################################################
class Node:
"""Node class.
Build and keep the full tree in memory.
self._state[0:9] is the board state (3x3 squares).
Each square has value 0 (empty), -1 (filled by player -1) or 1 (filled by
player 1).
self._state[9] is the player (1 or -1) who will play in this state."""
def __init__(self, state):
self._state = tuple(state)
# Make child nodes
self._child_nodes = []
if not self.isFinal()[0]:
player_id = self._state[-1]
# Get the list index of empty squares (that is to say self._state[index]==0)
empty_indices = [index for index, state in enumerate(self._state[:-1]) if state==0]
for index in empty_indices:
child_node_value = list(self._state) # *copy* the list
child_node_value[index] = player_id
child_node_value[-1] = player_id * -1
self._child_nodes.append(Node(child_node_value))
else:
pass
def isFinal(self):
"""Return true is this node is a leaf node and return the value of this node.
Return 0 as value if the current state is a draw.
Return -1 as value if player -1 win.
Return 1 as value if player 1 win.
Else return None as value."""
is_final = False
value = None
state = self._state[0:9]
# DRAW GAME ################### TODO: mettre ça dans une fonction à part
# Check if there is at least one empty square
if state.count(0) == 0:
is_final = True
value = 0
# TODO: it's useless to check if each player has won because the
# current player is the only one who can win... (a win is a leaf node
# and for each state, at most one player has won)
# PLAYER 1 WINS ############### TODO: mettre ça dans une fonction à part
# Check lines
if sum(state[0:3])==3 or sum(state[3:6])==3 or sum(state[6:9])==3:
is_final = True
value = 1
# Check columns
elif sum(state[0:9:3])==3 or sum(state[1:9:3])==3 or sum(state[2:9:3])==3:
is_final = True
value = 1
# Check diagonals
elif sum(state[0:9:4])==3 or sum(state[2:7:2])==3:
is_final = True
value = 1
# PLAYER -1 WINS ##############
# Check lines
elif sum(state[0:3])==-3 or sum(state[3:6])==-3 or sum(state[6:9])==-3:
is_final = True
value = -1
# Check columns
elif sum(state[0:9:3])==-3 or sum(state[1:9:3])==-3 or sum(state[2:9:3])==-3:
is_final = True
value = -1
# Check diagonals
elif sum(state[0:9:4])==-3 or sum(state[2:7:2])==-3:
is_final = True
value = -1
return is_final, value
def getState(self):
return self._state
def getChildNodes(self):
return self._child_nodes
# POLICY ######################################################################
class Minimax:
@staticmethod
def minimax_decision(node):
def value(node):
value = None
if node.getState()[9] == 1:
value = Minimax.max_value(node)
else:
value = Minimax.min_value(node)
return value
#best_states = max([(child_node, value(child_node)) for child_node in node.getChildNodes()])
child_nodes = node.getChildNodes()
best_state, best_value = child_nodes[0], value(child_nodes[0])
for child_node in child_nodes:
if value(child_node) > best_value: # TODO: > or < depends wether player 1 or -1 plays ! quoique... le role "max" tourne, le joueur courrant est toujours max si il utilise la politique minimax ?
best_state, best_value = child_node, value(child_node)
# TODO return action...
@staticmethod
def max_value(node):
if node.isFinal()[0]:
return node.isFinal()[1]
v = -1 # -infinity
for child_node in node.getChildNodes():
v = max(v, Minimax.min_value(child_node))
return v
@staticmethod
def min_value(node):
if node.isFinal()[0]:
return node.isFinal()[1]
v = 1 # +infinity
for child_node in node.getChildNodes():
v = min(v, Minimax.max_value(child_node))
return v
###############################################################################
# TODO faire generateur walk() et le réutiliser dans graphviz() et statistics()
def game_tree_to_graphviz(node, max_depth, filename="tictactoe.dot"):
"""Make a Graphviz representation of the game tree."""
dot_node_declaration = []
dot_edge_declaration = []
symbols = {-1: "x", 0: " ", 1: "o"}
def walk(node, max_depth):
"""The tree traversal function"""
# Do something with node value...
str_val = [symbols[item] for item in node.getState()[0:9]] # convert node.value (list of integers) to list of string (tictactoe symbols "x", " " and "o")
color = "black"
node_final_value = node.isFinal()[1]
if node_final_value == 1: # player "1" win
color = "green"
elif node_final_value == -1: # player "-1" win
color = "red"
value = 0
if node.getState()[9] == 1:
value = Minimax.max_value(node)
else:
value = Minimax.min_value(node)
dot_node_declaration.append('\t%d [shape=record, color=%s, label="{%s}|{%s}|{%s}"];' % (id(node), color, "|".join(str_val[0:3]), "|".join(str_val[3:6]), "|".join(str_val[6:9])))
#dot_node_declaration.append('\t%d [shape=record, color=%s, label="%d|{%s}|{%s}|{%s}"];' % (id(node), color, value, "|".join(str_val[0:3]), "|".join(str_val[3:6]), "|".join(str_val[6:9])))
if max_depth > 1:
for child_node in node.getChildNodes():
dot_edge_declaration.append('\t%d -> %d;' % (id(node), id(child_node)))
# Recurse on each child node
if max_depth > 1:
for child_node in node.getChildNodes():
walk(child_node, max_depth - 1)
# Traverse the tree
walk(node, max_depth)
# Write the "dot" file (Graphviz)
fd = open(filename, "w")
print >> fd, "digraph G {"
print >> fd, os.linesep.join(dot_node_declaration)
print >> fd, os.linesep.join(dot_edge_declaration)
print >> fd, "}"
fd.close()
# Print some statistics about the tree
print "Graphviz:"
print len(dot_node_declaration), "nodes generated"
print len(dot_edge_declaration), "edges generated"
print
###############################################################################
def print_statistics(node):
""" This function is used to check if the number of games (that is to say the
number of leaf nodes) is correct.
See http://en.wikipedia.org/wiki/Tic-tac-toe#Number_of_possible_games
"How many Tic-Tac-Toe games are possible?" Henry Bottomley, 2001
"Mathematical Recreations" Steve Schaeffer, 2002"""
number_of_leaf_nodes = {-1:0, 0:0, 1:0}
number_of_nodes = [0] # TODO: remove this ugly workaround
def walk(node):
"""The tree traversal function"""
number_of_nodes[0] += 1 # TODO: remove this ugly workaround
is_final, value = node.isFinal()
if is_final:
if value == -1:
number_of_leaf_nodes[-1] += 1
elif value == 0:
number_of_leaf_nodes[0] += 1
elif value == 1:
number_of_leaf_nodes[1] += 1
else:
print "Error: unknown value."
# Recurse on each child node
for child_node in node.getChildNodes():
walk(child_node)
walk(node)
print "Statistics:"
print number_of_nodes, "nodes in the tree"
print number_of_leaf_nodes[-1] + number_of_leaf_nodes[0] + number_of_leaf_nodes[1], "possible games (number of leaf nodes)"
print number_of_leaf_nodes[1], "finished games are won by player 1"
print number_of_leaf_nodes[-1], "finished games are won by player -1"
print number_of_leaf_nodes[0], "finished games are drawn"
###############################################################################
def main():
"""Main function
Build the tic-tac-toe game tree and traverse it.
"""
# Build the game tree
#root = Node([0, 0, 1, 0, 0, -1, 1, -1, 0, 1]) # Start with a non-empty board
root = Node([0, 0, 1, 1, -1, -1, 1, -1, 0, 1]) # Start with a non-empty board
#root = Node([0, -1, 1, 0, 0, -1, 1, -1, 1, 1]) # Start with a non-empty board
#root = Node([0, 0, 0, 0, 0, 0, 0, 0, 0, 1])
# Traverse the tree
game_tree_to_graphviz(root, 10)
# Traverse the tree
print_statistics(root)
if __name__ == '__main__':
main()
| 34.834437 | 214 | 0.573004 |
579aee5bb9d73fa9f62107f64a7136050ebec30d
| 3,178 |
py
|
Python
|
server/pyScripts/filestring_utils.py
|
btester271828/malcolmjs
|
16292d41864f00dd4f7e129618866eb8a732637e
|
[
"Apache-2.0"
] | 7 |
2017-02-27T17:41:02.000Z
|
2019-06-20T12:59:06.000Z
|
server/pyScripts/filestring_utils.py
|
btester271828/malcolmjs
|
16292d41864f00dd4f7e129618866eb8a732637e
|
[
"Apache-2.0"
] | 424 |
2018-04-12T15:15:24.000Z
|
2022-03-08T23:05:40.000Z
|
server/pyScripts/filestring_utils.py
|
btester271828/malcolmjs
|
16292d41864f00dd4f7e129618866eb8a732637e
|
[
"Apache-2.0"
] | 3 |
2016-05-19T15:13:03.000Z
|
2018-11-15T10:58:56.000Z
|
import stat
import sys
import os
import errno
def mkdir_p(path):
"""Mimics functionality of bash 'mkdir -p'"""
try:
os.makedirs(path)
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def read_file_to_lines(root_dir, filename, trim_newlines):
try:
with open(root_dir + '/' + filename, 'r') as input_file:
if trim_newlines:
string_array = input_file.read().split('\n')
else:
string_array = input_file.readlines()
except IOError:
sys.exit('Cannot find file %s in root path...Exiting!' % filename)
while '' in string_array:
string_array.remove('')
return string_array
def write_lines_to_file(file_path, lines):
with open(file_path, 'w') as output_file:
output_file.writelines(lines)
def write_file_as_rx(filename, string_lines):
"""Writes string to file, setting permissions as read-only and executable"""
with open(filename, 'w') as writefile:
writefile.writelines(string_lines)
os.chmod(filename, stat.S_IRUSR | stat.S_IXUSR | stat.S_IRGRP | stat.S_IXGRP | stat.S_IROTH | stat.S_IXOTH)
def find_and_replace_line(filename, substitution_dict):
"""Reads lines of file at path filename into string array, looking for and substituting
lines which match patterns given in substitution_dict (including newline in substitute will replace multiple lines)"""
with open(filename, 'r') as readfile:
lines = readfile.readlines()
lines_to_find = substitution_dict.keys()
for line in range(len(lines)):
for target in lines_to_find:
found_regexp = lines[line].find(target)
if found_regexp != -1:
substitute = substitution_dict[target].split('\n')
for substitute_line in substitute:
lines[line] = substitute_line
if substitute_line != '':
lines[line] += '\n'
line += 1
line -= 1
return lines
def find_and_replace_regexp(filename, substitution_dict):
"""Reads lines of file at path filename into string array, looking for and substituting
expressions which match patterns given in substitution_dict"""
with open(filename, 'r') as readfile:
lines = readfile.readlines()
lines_to_find = substitution_dict.keys()
for line in range(len(lines)):
for target in lines_to_find:
found_regexp = lines[line].find(target)
if found_regexp != -1:
lines[line] = lines[line].replace(target, substitution_dict[target])
return lines
def copy_and_merge(source_files, destination):
"""Read all lines from each file path in list source_files and writes to single file at destination"""
with open(destination, 'w') as destination_file:
for source_file in source_files:
with open(source_file, 'r') as source_part:
source_part_lines = source_part.readlines()
for line in source_part_lines:
destination_file.write(line)
| 36.953488 | 122 | 0.642857 |
9c98525fcb0d0b1802ff821f7d1db0119cf52338
| 10,149 |
py
|
Python
|
src/tools/convert_airsimcam_to_coco.py
|
PhyllisH/CenterNet
|
dc17ed79329a7a8faeffbd44be85019b4779a371
|
[
"MIT"
] | null | null | null |
src/tools/convert_airsimcam_to_coco.py
|
PhyllisH/CenterNet
|
dc17ed79329a7a8faeffbd44be85019b4779a371
|
[
"MIT"
] | null | null | null |
src/tools/convert_airsimcam_to_coco.py
|
PhyllisH/CenterNet
|
dc17ed79329a7a8faeffbd44be85019b4779a371
|
[
"MIT"
] | null | null | null |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import pickle
import json
import numpy as np
import math
import cv2
import os
import random
import matplotlib.pyplot as plt
from pyquaternion import Quaternion
import pycocotools.coco as coco
# DATA_PATH = '../../data/kitti/'
from nuscenes.nuscenes import NuScenes
from nuscenes.utils.data_classes import LidarPointCloud, RadarPointCloud, Box
from nuscenes.utils.geometry_utils import view_points, transform_matrix
train_split = ['scene_0', 'scene_1', 'scene_2', 'scene_3', 'scene_4', 'scene_5',
'scene_6', 'scene_8', 'scene_9', 'scene_10', 'scene_11', 'scene_12',
'scene_13', 'scene_14', 'scene_16', 'scene_17', 'scene_18', 'scene_19',
'scene_20', 'scene_21', 'scene_22', 'scene_23', 'scene_24', 'scene_26',
'scene_28', 'scene_29', 'scene_30', 'scene_31', 'scene_32', 'scene_33',
'scene_34', 'scene_35', 'scene_36', 'scene_37', 'scene_38', 'scene_39',
'scene_40', 'scene_42', 'scene_44', 'scene_45', 'scene_46', 'scene_47',
'scene_48', 'scene_49', 'scene_50', 'scene_51', 'scene_52', 'scene_53',
'scene_55', 'scene_56', 'scene_57', 'scene_61', 'scene_62', 'scene_63',
'scene_65', 'scene_66', 'scene_67', 'scene_68', 'scene_69', 'scene_70',
'scene_71', 'scene_72', 'scene_73', 'scene_75', 'scene_76', 'scene_77',
'scene_78', 'scene_79', 'scene_80', 'scene_81', 'scene_82', 'scene_83',
'scene_84', 'scene_87', 'scene_88', 'scene_90', 'scene_92', 'scene_94',
'scene_95', 'scene_97', 'scene_98', 'scene_99', 'scene_100', 'scene_101',
'scene_102', 'scene_103', 'scene_104', 'scene_105', 'scene_106', 'scene_107',
'scene_108', 'scene_109', 'scene_110', 'scene_111', 'scene_112', 'scene_113',
'scene_114', 'scene_116', 'scene_118', 'scene_119']
val_split = ['scene_7', 'scene_15', 'scene_25', 'scene_27', 'scene_41', 'scene_43',
'scene_54', 'scene_58', 'scene_59', 'scene_60', 'scene_64', 'scene_74',
'scene_85', 'scene_86', 'scene_89', 'scene_91', 'scene_93', 'scene_96',
'scene_115', 'scene_117']
def quaternion2euler(rotation):
w, x, y, z = rotation[0], rotation[1], rotation[2], rotation[3]
t0 = +2.0 * (w * x + y * z)
t1 = +1.0 - 2.0 * (x * x + y * y)
roll_x = math.atan2(t0, t1)
t2 = +2.0 * (w * y - z * x)
t2 = +1.0 if t2 > +1.0 else t2
t2 = -1.0 if t2 < -1.0 else t2
pitch_y = math.asin(t2)
t3 = +2.0 * (w * z + x * y)
t4 = +1.0 - 2.0 * (y * y + z * z)
yaw_z = math.atan2(t3, t4)
return roll_x, pitch_y, yaw_z
def _get_rotation_matrix(translation, rotation):
roll, pitch, yaw = quaternion2euler(rotation)
c_y = np.cos(yaw)
s_y = np.sin(yaw)
c_r = np.cos(roll)
s_r = np.sin(roll)
c_p = np.cos(pitch)
s_p = np.sin(pitch)
matrix = np.matrix(np.identity(4))
matrix[0, 3] = translation[0]
matrix[1, 3] = translation[1]
matrix[2, 3] = translation[2]
matrix[0, 0] = c_p * c_y
matrix[0, 1] = c_y * s_p * s_r - s_y * c_r
matrix[0, 2] = -c_y * s_p * c_r - s_y * s_r
matrix[1, 0] = s_y * c_p
matrix[1, 1] = s_y * s_p * s_r + c_y * c_r
matrix[1, 2] = -s_y * s_p * c_r + c_y * s_r
matrix[2, 0] = s_p
matrix[2, 1] = -c_p * s_r
matrix[2, 2] = c_p * c_r
return matrix
def _get_vehicle_coord(anno_data):
translation = anno_data["translation"]
size = anno_data["size"]
a = size[0]
size[0] = size[1]
size[1] = a
rotation = anno_data["rotation"]
# cords the bounding box of a vehicle
cords = np.zeros((8, 4))
cords[0, :] = np.array([size[0] / 2, size[1] / 2, -size[2] / 2, 1])
cords[1, :] = np.array([-size[0] / 2, size[1] / 2, -size[2] / 2, 1])
cords[2, :] = np.array([-size[0] / 2, -size[1] / 2, -size[2] / 2, 1])
cords[3, :] = np.array([size[0] / 2, -size[1] / 2, -size[2] / 2, 1])
cords[4, :] = np.array([size[0] / 2, size[1] / 2, size[2] / 2, 1])
cords[5, :] = np.array([-size[0] / 2, size[1] / 2, size[2] / 2, 1])
cords[6, :] = np.array([-size[0] / 2, -size[1] / 2, size[2] / 2, 1])
cords[7, :] = np.array([size[0] / 2, -size[1] / 2, size[2] / 2, 1])
vehicle_world_matrix = _get_rotation_matrix(translation, rotation)
world_cords = np.dot(vehicle_world_matrix, np.transpose(cords))
return np.array(world_cords)
def get_2d_bounding_box(cords):
x_min = cords[0, 0]
x_max = cords[0, 0]
y_min = cords[1, 0]
y_max = cords[1, 0]
for i in range(1, 8):
if cords[0, i] < x_min:
x_min = cords[0, i]
if cords[0, i] > x_max:
x_max = cords[0, i]
if cords[1, i] < y_min:
y_min = cords[1, i]
if cords[1, i] > y_max:
y_max = cords[1, i]
return x_min, y_min, x_max - x_min, y_max - y_min
def convert_coco():
# data_dir = 'C:/Users/35387/Desktop/airsim_camera_demo'
data_dir = '/DB/rhome/shaohengfang/datasets/airsim/airsim_camera_10scene'
DEBUG = False
nusc = NuScenes(version='v1.0-mini', dataroot=data_dir, verbose=True)
cats = ['car', 'car_overlook']
splits = ['train', 'val']
scene_split = {'train': train_split, 'val': val_split}
cat_ids = {cat: i + 1 for i, cat in enumerate(cats)}
F = 400 # focal
H = 450 # height
W = 800 # width
camera_intrinsic = [[400.0, 0.0, 400.0],
[0.0, 400.0, 225.0],
[0.0, 0.0, 1.0]]
cat_info = []
for i, cat in enumerate(cats):
cat_info.append({'supercategory': 'vehicle', 'name': cat, 'id': i + 1})
image_id = 0
bbox_id = 0
for split in splits:
ret = {'images': [], "type": "instances", 'annotations': [], 'categories': cat_info}
for scene in nusc.scene:
if not scene["name"] in scene_split[split]:
continue
scene_token = scene['token']
cur_sample_token = scene['first_sample_token']
while cur_sample_token != "":
print(cur_sample_token)
cur_sample = nusc.get("sample", cur_sample_token)
# =======================
# execute the current sample data
anno_tokens = cur_sample["anns"]
# get the vehicle coords in global frame
vehicle_cords = []
for anno_token in anno_tokens:
anno_data = nusc.get("sample_annotation", anno_token)
vehicle_cords.append(_get_vehicle_coord(anno_data))
sample_data = cur_sample["data"]
sensors = list(sample_data.keys())
for sensor in sensors:
# image info
sensor_record = nusc.get("sample_data", sample_data[sensor])
image_id += 1
image_info = {'file_name': sensor_record['filename'],
'id': image_id,
'height': 450,
'width': 900}
ret['images'].append(image_info)
# anno info
calibrated_record = nusc.get("calibrated_sensor", sensor_record["calibrated_sensor_token"])
im_position = calibrated_record["translation"]
im_position[2] = -im_position[2]
im_rotation = calibrated_record["rotation"]
im_rotation[3] = -im_rotation[3]
im_rotation = Quaternion(im_rotation)
cat_id = 1
if sensor[:10] == "CAM_BOTTOM":
cat_id = 2
for vehicle_cord in vehicle_cords:
flag = True
# get bbox from vehicle_cord
vehicle_cord_ = np.array(vehicle_cord)
vehicle_cord_ = vehicle_cord_[:3, :]
for j in range(3):
vehicle_cord_[j, :] = vehicle_cord_[j, :] - im_position[j]
vehicle_cord_[:3, :] = np.dot(im_rotation.rotation_matrix, vehicle_cord_[:3, :])
vehicle_cord_[:3, :] = np.dot(Quaternion([0.5, -0.5, 0.5, -0.5]).rotation_matrix.T,
vehicle_cord_[:3, :])
depths = vehicle_cord_[2, :]
for j in range(8):
if depths[j] < 0:
flag = False
if not flag:
continue
vehicle_points = view_points(vehicle_cord_[:3, :], np.array(camera_intrinsic), normalize=True)
x, y, w, h = get_2d_bounding_box(vehicle_points)
if x < 0 or y < 0 or (x + w) > 800 or (y + h) > 450:
flag = False
if not flag:
continue
bbox_id += 1
ann = {'area': w * h,
'iscrowd': 0,
'image_id': image_id,
'bbox': [800 - x - w, y, w, h],
'category_id': cat_id,
'id': bbox_id,
'ignore': 0,
'segmentation': []}
ret['annotations'].append(ann)
# =======================
cur_sample_token = cur_sample['next']
print("# images: ", len(ret['images']))
print("# annotations: ", len(ret['annotations']))
# out_path = 'C:/Users/35387/Desktop/airsim_camera_demo/airsim_instances_{}.json'.format(split)
out_path = '/DB/rhome/shaohengfang/model/CenterNet/data/airsim_camera/annotations/{}_instances.json'.format(split)
json.dump(ret, open(out_path, 'w'))
if __name__ == '__main__':
convert_coco()
| 42.822785 | 122 | 0.515913 |
342339919b30523d35f47e395b6e5b6fb43c6f25
| 3,770 |
py
|
Python
|
contrib/macdeploy/custom_dsstore.py
|
Iconoclasta/DWE
|
167730512be3a43420e80fe63fcdca33e3478110
|
[
"MIT"
] | 2 |
2019-03-05T13:21:21.000Z
|
2019-07-25T18:21:25.000Z
|
contrib/macdeploy/custom_dsstore.py
|
Iconoclasta/DWE
|
167730512be3a43420e80fe63fcdca33e3478110
|
[
"MIT"
] | null | null | null |
contrib/macdeploy/custom_dsstore.py
|
Iconoclasta/DWE
|
167730512be3a43420e80fe63fcdca33e3478110
|
[
"MIT"
] | 4 |
2018-11-07T16:41:42.000Z
|
2019-07-24T14:25:28.000Z
|
#!/usr/bin/env python
# Copyright (c) 2013-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from __future__ import division,print_function,unicode_literals
import biplist
from ds_store import DSStore
from mac_alias import Alias
import sys
output_file = sys.argv[1]
package_name_ns = sys.argv[2]
ds = DSStore.open(output_file, 'w+')
ds['.']['bwsp'] = {
'ShowStatusBar': False,
'WindowBounds': b'{{300, 280}, {500, 343}}',
'ContainerShowSidebar': False,
'SidebarWidth': 0,
'ShowTabView': False,
'PreviewPaneVisibility': False,
'ShowToolbar': False,
'ShowSidebar': False,
'ShowPathbar': True
}
icvp = {
'gridOffsetX': 0.0,
'textSize': 12.0,
'viewOptionsVersion': 1,
'backgroundImageAlias': b'\x00\x00\x00\x00\x02\x1e\x00\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xd1\x94\\\xb0H+\x00\x05\x00\x00\x00\x98\x0fbackground.tiff\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x99\xd19\xb0\xf8\x00\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\x00\x00\r\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0b.background\x00\x00\x10\x00\x08\x00\x00\xd1\x94\\\xb0\x00\x00\x00\x11\x00\x08\x00\x00\xd19\xb0\xf8\x00\x00\x00\x01\x00\x04\x00\x00\x00\x98\x00\x0e\x00 \x00\x0f\x00b\x00a\x00c\x00k\x00g\x00r\x00o\x00u\x00n\x00d\x00.\x00t\x00i\x00f\x00f\x00\x0f\x00\x02\x00\x00\x00\x12\x00\x1c/.background/background.tiff\x00\x14\x01\x06\x00\x00\x00\x00\x01\x06\x00\x02\x00\x00\x0cMacintosh HD\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xce\x97\xab\xc3H+\x00\x00\x01\x88[\x88\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02u\xab\x8d\xd1\x94\\\xb0devrddsk\xff\xff\xff\xff\x00\x00\t \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x07bitcoin\x00\x00\x10\x00\x08\x00\x00\xce\x97\xab\xc3\x00\x00\x00\x11\x00\x08\x00\x00\xd1\x94\\\xb0\x00\x00\x00\x01\x00\x14\x01\x88[\x88\x00\x16\xa9\t\x00\x08\xfaR\x00\x08\xfaQ\x00\x02d\x8e\x00\x0e\x00\x02\x00\x00\x00\x0f\x00\x1a\x00\x0c\x00M\x00a\x00c\x00i\x00n\x00t\x00o\x00s\x00h\x00 \x00H\x00D\x00\x13\x00\x01/\x00\x00\x15\x00\x02\x00\x14\xff\xff\x00\x00\xff\xff\x00\x00',
'backgroundColorBlue': 1.0,
'iconSize': 96.0,
'backgroundColorGreen': 1.0,
'arrangeBy': 'none',
'showIconPreview': True,
'gridSpacing': 100.0,
'gridOffsetY': 0.0,
'showItemInfo': False,
'labelOnBottom': True,
'backgroundType': 2,
'backgroundColorRed': 1.0
}
alias = Alias.from_bytes(icvp['backgroundImageAlias'])
alias.volume.name = package_name_ns
alias.volume.posix_path = '/Volumes/' + package_name_ns
alias.volume.disk_image_alias.target.filename = package_name_ns + '.temp.dmg'
alias.volume.disk_image_alias.target.carbon_path = 'Macintosh HD:Users:\x00bitcoinuser:\x00Documents:\x00bitcoin:\x00bitcoin:\x00' + package_name_ns + '.temp.dmg'
alias.volume.disk_image_alias.target.posix_path = 'Users/bitcoinuser/Documents/bitcoin/bitcoin/' + package_name_ns + '.temp.dmg'
alias.target.carbon_path = package_name_ns + ':.background:\x00background.tiff'
icvp['backgroundImageAlias'] = biplist.Data(alias.to_bytes())
ds['.']['icvp'] = icvp
ds['.']['vSrn'] = ('long', 1)
ds['Applications']['Iloc'] = (370, 156)
ds['DWE-Qt.app']['Iloc'] = (128, 156)
ds.flush()
ds.close()
| 61.803279 | 1,817 | 0.727056 |
dced5e4a71e68ab26a0cf895db6f13330ddde7af
| 1,479 |
py
|
Python
|
sitemap_generator.py
|
YanjieZe/blog
|
103a551289f0206760fc42337457ffdf56233803
|
[
"Apache-2.0"
] | 1 |
2022-02-20T12:15:15.000Z
|
2022-02-20T12:15:15.000Z
|
sitemap_generator.py
|
YanjieZe/blog
|
103a551289f0206760fc42337457ffdf56233803
|
[
"Apache-2.0"
] | null | null | null |
sitemap_generator.py
|
YanjieZe/blog
|
103a551289f0206760fc42337457ffdf56233803
|
[
"Apache-2.0"
] | 1 |
2022-02-20T12:14:26.000Z
|
2022-02-20T12:14:26.000Z
|
#! /usr/bin/env python3
# -*- coding: utf-8
import os
import arrow
path = 'posts/'
html_names = list(filter(lambda x: x[-5:] == '.html', (os.listdir(path))))
url = 'http://yanjieze.xyz/'
sitemap_preamble = """<?xml version="1.0" encoding="UTF-8"?>
<urlset
xmlns="http://www.sitemaps.org/schemas/sitemap/0.9"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://www.sitemaps.org/schemas/sitemap/0.9
http://www.sitemaps.org/schemas/sitemap/0.9/sitemap.xsd">
"""
sitemap_body = """
<url>
<loc>http://yanjieze.xyz/</loc>
<priority>1.00</priority>
</url>
<url>
<loc>http://yanjieze.xyz/about.html</loc>
<priority>0.80</priority>
</url>
"""
try:
from BeautifulSoup import BeautifulSoup
except ImportError:
from bs4 import BeautifulSoup
for html_name in html_names:
print(f'generating info for {html_name}')
with open(path + html_name, 'r') as html:
parsed_html = BeautifulSoup(html.read().encode('utf-8'), "html5lib")
entry = []
entry.append('<url>')
entry.append('\t<loc>' + url + path + html_name + '</loc>')
lastmod = parsed_html.find('meta', attrs={'name':"last_modified"}).get("content")
entry.append('\t<lastmod>' + lastmod + '</lastmod>')
entry.append('\t<priority>0.80</priority>')
entry.append('</url>')
sitemap_body += '\n'.join(entry)
sitemap_body += '</urlset>'
with open('./sitemap.xml', 'w') as feed:
feed.write(sitemap_preamble)
feed.write(sitemap_body)
| 26.890909 | 83 | 0.654496 |
d4c09568cab388bbf604721842f2e40ac637930a
| 110 |
py
|
Python
|
spark/spark_controler/__init__.py
|
kcrandall/Kaggle_Mercedes_Manufacturing
|
f1a2827f7aa145c1df057ab5035cff45e877e785
|
[
"MIT"
] | 9 |
2017-10-19T22:21:16.000Z
|
2022-03-02T21:37:51.000Z
|
spark/spark_controler/__init__.py
|
kcrandall/Kaggle_Mercedes_Manufacturing
|
f1a2827f7aa145c1df057ab5035cff45e877e785
|
[
"MIT"
] | null | null | null |
spark/spark_controler/__init__.py
|
kcrandall/Kaggle_Mercedes_Manufacturing
|
f1a2827f7aa145c1df057ab5035cff45e877e785
|
[
"MIT"
] | 5 |
2018-07-12T21:05:21.000Z
|
2021-04-18T14:15:34.000Z
|
from . import *
# from ec2_instance_data_dict import ec2_data_dict
# from emr_controller import EMRController
| 27.5 | 50 | 0.836364 |
a3c9d9e4afdfba458c10e975ef4711feeb7ebc12
| 216 |
py
|
Python
|
mapchete_xarray/__init__.py
|
wankoelias/mapchete_xarray
|
d225cfcc78fad10767c3cbc755bc825e3110dfae
|
[
"MIT"
] | null | null | null |
mapchete_xarray/__init__.py
|
wankoelias/mapchete_xarray
|
d225cfcc78fad10767c3cbc755bc825e3110dfae
|
[
"MIT"
] | null | null | null |
mapchete_xarray/__init__.py
|
wankoelias/mapchete_xarray
|
d225cfcc78fad10767c3cbc755bc825e3110dfae
|
[
"MIT"
] | null | null | null |
from mapchete_xarray._xarray import (
InputTile,
METADATA,
OutputDataWriter,
OutputDataReader,
)
__all__ = ["InputTile", "METADATA", "OutputDataWriter", "OutputDataReader"]
__version__ = "2021.11.0"
| 21.6 | 75 | 0.717593 |
ce1293aefc0d7f3890cd5ad49dfd51296b76e040
| 2,076 |
py
|
Python
|
vodloader_chapters.py
|
FuckBrains/vodloader
|
5bed341a0c64bc4b77f9a0530924a3ba73d5be2d
|
[
"MIT"
] | null | null | null |
vodloader_chapters.py
|
FuckBrains/vodloader
|
5bed341a0c64bc4b77f9a0530924a3ba73d5be2d
|
[
"MIT"
] | null | null | null |
vodloader_chapters.py
|
FuckBrains/vodloader
|
5bed341a0c64bc4b77f9a0530924a3ba73d5be2d
|
[
"MIT"
] | 1 |
2021-07-09T12:50:25.000Z
|
2021-07-09T12:50:25.000Z
|
import datetime
from math import floor
from os import stat
class vodloader_chapters(object):
def __init__(self, game, title):
self.start_time = datetime.datetime.now()
self.timestamps = [('00:00:00', game, title)]
def __len__(self):
return self.timestamps.__len__()
def append(self, game, title):
delta = datetime.datetime.now() - self.start_time
timestamp = self.get_timestamp_from_sec(delta.seconds)
self.timestamps.append((timestamp, game, title))
def get_games(self):
games = list(map(lambda x :x[1], self.timestamps))
out = []
[out.append(x) for x in games if x not in out]
return out
def get_current_game(self):
return self.timestamps[-1][1]
def get_current_title(self):
return self.timestamps[-1][2]
def get_first_game(self):
return self.timestamps[0][1]
def get_first_title(self):
return self.timestamps[0][2]
def get_game_chapters(self):
out = f'{self.timestamps[0][0]} {self.timestamps[0][1]}\n'
count = 1
for i in range(1, len(self.timestamps)):
if self.timestamps[i][1] != self.timestamps[i-1][1]:
out += f'{self.timestamps[i][0]} {self.timestamps[i][1]}\n'
count += 1
if count > 2:
return out
else:
return None
def get_title_chapters(self):
out = f'{self.timestamps[0][0]} {self.timestamps[0][2]}\n'
count = 1
for i in range(1, len(self.timestamps)):
if self.timestamps[i][2] != self.timestamps[i-1][2]:
out += f'{self.timestamps[i][0]} {self.timestamps[i][2]}\n'
count += 1
if count > 2:
return out
else:
return None
@staticmethod
def get_timestamp_from_sec(seconds):
hours = floor(seconds/3600)
mins = floor(seconds%3600/60)
secs = floor(seconds%60)
timestamp = f'{str(hours).zfill(2)}:{str(mins).zfill(2)}:{str(secs).zfill(2)}'
| 30.985075 | 86 | 0.572736 |
3270da860aaed8043b308b31a5770964cff934ce
| 2,826 |
py
|
Python
|
examples/ex_sndcard.py
|
fspacheco/zignal
|
19ac50157a276e9640e362b0472a5e209dfe6709
|
[
"MIT"
] | null | null | null |
examples/ex_sndcard.py
|
fspacheco/zignal
|
19ac50157a276e9640e362b0472a5e209dfe6709
|
[
"MIT"
] | null | null | null |
examples/ex_sndcard.py
|
fspacheco/zignal
|
19ac50157a276e9640e362b0472a5e209dfe6709
|
[
"MIT"
] | null | null | null |
'''
Created on 16 Feb 2015
This example will play some audio on the system standard sound card.
@author: Ronny Andersson ([email protected])
@copyright: (c) 2015 Ronny Andersson
@license: MIT
'''
# standard library
from __future__ import print_function
import logging
# custom libraries
import zignal.sndcard
def ex_1_play():
# The recommended way of creating and using a sndcard instance is by using the
# "with" statement. This will make sure that the instance is closed correctly
# after usage. See http://effbot.org/zone/python-with-statement.htm
#
# This example plays the audio on the default device
fs = 44100
x = zignal.Sinetone(f0=400, fs=fs, duration=1.5, gaindb=-12)
x2 = zignal.Sinetone(f0=900, fs=fs, duration=1.5, gaindb=-18)
x.append(x2)
x.convert_to_float(targetbits=32)
with zignal.sndcard.PA(device_in ='default', device_out='default') as snd:
# using an assert here helps PyDev in eclipse when pressing ctrl+space for autocomplete
assert isinstance(snd, zignal.sndcard._Device)
snd.play(x)
def ex_2_play():
# Another way of using a sndcard is by first creating an instance and
# manually calling the open() function. The close() function *must* be
# called in a controlled fashion. This usually means that the usage is
# wrapped in a try-except-finally clause.
fs = 44100
x = zignal.Sinetone(f0=700, fs=fs, duration=1.0, gaindb=-24)
xn = zignal.Noise(channels=1, fs=fs, duration=1.0, gaindb=-12, colour='pink')
x.append(xn)
x.convert_to_integer(targetbits=16)
snd = zignal.sndcard.PA()
print(snd)
snd.open()
try:
snd.play(x)
finally:
snd.close()
def ex_3_play_rec():
# Play and record at the same time
fs = 44100
x = zignal.Sinetone(f0=500, fs=fs, duration=1.5, gaindb=-12)
x.convert_to_float(targetbits=32)
with zignal.sndcard.PA(device_in ='default', device_out='default') as snd:
# using an assert here helps PyDev in eclipse when pressing ctrl+space for autocomplete
assert isinstance(snd, (zignal.sndcard.PA))
y = snd.play_rec(x, frames_per_buffer=32)
print(y)
y.plot()
def ex_4_rec():
# Record
fs = 44100
with zignal.sndcard.PA(device_in ='default') as snd:
# using an assert here helps PyDev in eclipse when pressing ctrl+space for autocomplete
assert isinstance(snd, (zignal.sndcard.PA))
print("recording...")
y = snd.rec(duration=3.5, channels=1, fs=fs)
print(y)
y.plot()
if __name__ == '__main__':
logging.basicConfig(format='%(levelname)-7s: %(module)s.%(funcName)-15s %(message)s',
level='DEBUG')
ex_1_play()
ex_2_play()
ex_3_play_rec()
ex_4_rec()
print('++ End of script ++')
| 29.4375 | 95 | 0.666667 |
567427f3763e2f6349852cd5ff04e62d71545d8f
| 3,103 |
py
|
Python
|
bertopic/_utils.py
|
yingzwang/BERTopic
|
cd98fc8d22ab1eba593c518278ce479d2879c372
|
[
"MIT"
] | 2,189 |
2020-10-05T15:22:16.000Z
|
2022-03-31T14:49:49.000Z
|
bertopic/_utils.py
|
Zura1z/BERTopic
|
05a6790b21009d1704e912e0d9ae22290694cfed
|
[
"MIT"
] | 463 |
2020-10-07T16:20:03.000Z
|
2022-03-31T12:47:26.000Z
|
bertopic/_utils.py
|
Zura1z/BERTopic
|
05a6790b21009d1704e912e0d9ae22290694cfed
|
[
"MIT"
] | 317 |
2020-10-06T13:52:25.000Z
|
2022-03-31T04:29:43.000Z
|
import numpy as np
import logging
from collections.abc import Iterable
from scipy.sparse.csr import csr_matrix
class MyLogger:
def __init__(self, level):
self.logger = logging.getLogger('BERTopic')
self.set_level(level)
self._add_handler()
self.logger.propagate = False
def info(self, message):
self.logger.info("{}".format(message))
def set_level(self, level):
levels = ["DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"]
if level in levels:
self.logger.setLevel(level)
def _add_handler(self):
sh = logging.StreamHandler()
sh.setFormatter(logging.Formatter('%(asctime)s - %(name)s - %(message)s'))
self.logger.addHandler(sh)
# Remove duplicate handlers
if len(self.logger.handlers) > 1:
self.logger.handlers = [self.logger.handlers[0]]
def check_documents_type(documents):
""" Check whether the input documents are indeed a list of strings """
if isinstance(documents, Iterable) and not isinstance(documents, str):
if not any([isinstance(doc, str) for doc in documents]):
raise TypeError("Make sure that the iterable only contains strings.")
else:
raise TypeError("Make sure that the documents variable is an iterable containing strings only.")
def check_embeddings_shape(embeddings, docs):
""" Check if the embeddings have the correct shape """
if embeddings is not None:
if not any([isinstance(embeddings, np.ndarray), isinstance(embeddings, csr_matrix)]):
raise ValueError("Make sure to input embeddings as a numpy array or scipy.sparse.csr.csr_matrix. ")
else:
if embeddings.shape[0] != len(docs):
raise ValueError("Make sure that the embeddings are a numpy array with shape: "
"(len(docs), vector_dim) where vector_dim is the dimensionality "
"of the vector embeddings. ")
def check_is_fitted(model):
""" Checks if the model was fitted by verifying the presence of self.matches
Arguments:
model: BERTopic instance for which the check is performed.
Returns:
None
Raises:
ValueError: If the matches were not found.
"""
msg = ("This %(name)s instance is not fitted yet. Call 'fit' with "
"appropriate arguments before using this estimator.")
if not model.topics:
raise ValueError(msg % {'name': type(model).__name__})
class NotInstalled:
"""
This object is used to notify the user that additional dependencies need to be
installed in order to use the string matching model.
"""
def __init__(self, tool, dep):
self.tool = tool
self.dep = dep
msg = f"In order to use {self.tool} you'll need to install via;\n\n"
msg += f"pip install bertopic[{self.dep}]\n\n"
self.msg = msg
def __getattr__(self, *args, **kwargs):
raise ModuleNotFoundError(self.msg)
def __call__(self, *args, **kwargs):
raise ModuleNotFoundError(self.msg)
| 34.865169 | 111 | 0.642282 |
6bb4a4ead36b39b38f3b336cba285e81037471e8
| 3,783 |
py
|
Python
|
trigger.py
|
damiantaranto/ASL-GCP-mubi-movies
|
0cbb310aa7995b2d58aa37a78852346af9224d08
|
[
"MIT"
] | null | null | null |
trigger.py
|
damiantaranto/ASL-GCP-mubi-movies
|
0cbb310aa7995b2d58aa37a78852346af9224d08
|
[
"MIT"
] | null | null | null |
trigger.py
|
damiantaranto/ASL-GCP-mubi-movies
|
0cbb310aa7995b2d58aa37a78852346af9224d08
|
[
"MIT"
] | null | null | null |
import argparse
import json
import logging
import os
import distutils.util
from typing import Optional, List
from google.cloud import aiplatform
def trigger_pipeline_from_payload(payload: dict) -> aiplatform.PipelineJob:
payload = convert_payload(payload)
env = get_env()
return trigger_pipeline(
project_id=env["project_id"],
location=env["location"],
template_path=payload["attributes"]["template_path"],
parameter_values=payload["data"],
pipeline_root=env["pipeline_root"],
service_account=env["service_account"],
enable_caching=payload["attributes"]["enable_caching"],
)
def trigger_pipeline(
project_id: str,
location: str,
template_path: str,
pipeline_root: str,
service_account: str,
parameter_values: dict = {},
enable_caching: Optional[bool] = None,
) -> aiplatform.PipelineJob:
# Initialise API client
aiplatform.init(project=project_id, location=location)
# Instantiate PipelineJob object
pl = aiplatform.pipeline_jobs.PipelineJob(
# Display name is required but seemingly not used
# see
# https://github.com/googleapis/python-aiplatform/blob/9dcf6fb0bc8144d819938a97edf4339fe6f2e1e6/google/cloud/aiplatform/pipeline_jobs.py#L260 # noqa
display_name=template_path,
enable_caching=enable_caching,
template_path=template_path,
parameter_values=parameter_values,
pipeline_root=pipeline_root,
)
# Execute pipeline in Vertex
pl.submit(
service_account=service_account,
)
# pl.submit()
return pl
def convert_payload(payload: dict) -> dict:
"""
Processes the payload dictionary.
Converts enable_caching and adds their defaults if they are missing.
Args:
payload (dict): Cloud Function event payload,
or the contents of a payload JSON file
"""
# make a copy of the payload so we are not modifying the original
payload = payload.copy()
# if payload["data"] is missing, add it as empty dict
payload["data"] = payload.get("data", {})
# if enable_caching value is in attributes, convert from str to bool
# otherwise, it needs to be None
if "enable_caching" in payload["attributes"]:
payload["attributes"]["enable_caching"] = bool(
distutils.util.strtobool(payload["attributes"]["enable_caching"])
)
else:
payload["attributes"]["enable_caching"] = None
return payload
def get_env() -> dict:
"""Get the necessary environment variables for pipeline runs,
and return them as a dictionary.
"""
project_id = os.environ["VERTEX_PROJECT_ID"]
location = os.environ["VERTEX_LOCATION"]
pipeline_root = os.environ["VERTEX_PIPELINE_ROOT"]
service_account = os.environ["VERTEX_SA_EMAIL"]
return {
"project_id": project_id,
"location": location,
"pipeline_root": pipeline_root,
"service_account": service_account,
}
# python trigger.py --payload=./pipeline/config/config.json
def get_args(args: List[str] = None) -> argparse.Namespace:
"""Get args from command line args
Args:
event (dict): Event payload.
context (google.cloud.functions.Context): Metadata for the event.
"""
parser = argparse.ArgumentParser()
parser.add_argument("--payload", help="Path to the config JSON file", type=str)
return parser.parse_args(args)
def sandbox_run() -> aiplatform.PipelineJob:
logging.basicConfig(level=logging.DEBUG)
args = get_args()
# Load JSON payload into a dictionary
with open(args.payload, "r") as f:
payload = json.load(f)
return trigger_pipeline_from_payload(payload)
if __name__ == "__main__":
sandbox_run()
| 28.659091 | 156 | 0.684113 |
41d6452f85a247f61b64d6465e4920552d0389ce
| 1,534 |
py
|
Python
|
chrome/test/enterprise/e2e/policy/translate_enabled/translate_enabled_webdriver_test.py
|
sarang-apps/darshan_browser
|
173649bb8a7c656dc60784d19e7bb73e07c20daa
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null |
chrome/test/enterprise/e2e/policy/translate_enabled/translate_enabled_webdriver_test.py
|
sarang-apps/darshan_browser
|
173649bb8a7c656dc60784d19e7bb73e07c20daa
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null |
chrome/test/enterprise/e2e/policy/translate_enabled/translate_enabled_webdriver_test.py
|
sarang-apps/darshan_browser
|
173649bb8a7c656dc60784d19e7bb73e07c20daa
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null |
# Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import time
from absl import app, flags
from selenium import webdriver
from pywinauto.application import Application
from pywinauto.findwindows import ElementNotFoundError
import test_util
# A URL that is in a different language than our Chrome language.
URL = "https://zh.wikipedia.org/wiki/Chromium"
FLAGS = flags.FLAGS
flags.DEFINE_bool('incognito', False,
'Set flag to open Chrome in incognito mode.')
def main(argv):
os.system('start chrome --remote-debugging-port=9222')
options = webdriver.ChromeOptions()
# Add option for connecting chromedriver with Chrome
options.add_experimental_option("debuggerAddress", "localhost:9222")
driver = test_util.create_chrome_webdriver(
chrome_options=options, incognito=FLAGS.incognito)
driver.get(URL)
time.sleep(10)
translatePopupVisible = None
try:
app = Application(backend="uia")
app.connect(title_re='.*Chrome|.*Chromium')
app.top_window() \
.child_window(title="Translate this page?", control_type="Pane") \
.print_control_identifiers()
translatePopupVisible = True
except ElementNotFoundError as error:
translatePopupVisible = False
finally:
driver.quit()
os.system('taskkill /f /im chrome.exe')
if translatePopupVisible:
print "TRUE"
else:
print "FALSE"
if __name__ == '__main__':
app.run(main)
| 26.912281 | 73 | 0.734029 |
7338b21f3efcf83231098517cb362d88b89afac3
| 8,497 |
py
|
Python
|
scripts/predict.py
|
mrauha/af2_conformations
|
d60db86886186e80622deaa91045caccaf4103d3
|
[
"MIT"
] | 35 |
2021-11-23T12:35:15.000Z
|
2022-03-26T22:09:21.000Z
|
scripts/predict.py
|
mrauha/af2_conformations
|
d60db86886186e80622deaa91045caccaf4103d3
|
[
"MIT"
] | 1 |
2021-12-03T17:55:34.000Z
|
2021-12-03T18:41:25.000Z
|
scripts/predict.py
|
mrauha/af2_conformations
|
d60db86886186e80622deaa91045caccaf4103d3
|
[
"MIT"
] | 9 |
2021-11-23T07:51:38.000Z
|
2022-03-10T04:21:46.000Z
|
from . import util
import os
import numpy as np
import random
import sys
from alphafold.common import protein
from alphafold.model import data
from alphafold.model import config
from alphafold.model import model
from typing import Any, List, Mapping, NoReturn
from absl import logging
def set_config(
use_templates: bool,
max_msa_clusters: int,
max_extra_msa: int,
max_recycles: int,
model_id: int,
n_struct_module_repeats: int,
n_features_in: int,
monomer: bool = True,
model_params: int = 0,
) -> model.RunModel:
r"""Generated Runner object for AlphaFold
Parameters
----------
use_templates : Whether templates are used
max_msa_cluster : How many sequences to use in MSA
max_extra_msa : How many extra sequences to include for summary stats
max_recycles : Number of recycling iterations
model_id : Which AF2 model to use
n_struct_module_repeats : Number of passes through structure module
n_features_in : Unclear
monomer : Predicting as a monomer (set to False if using AlphaFold-multimer)
model_params : Which AF2 model config to use
Returns
----------
AlphaFold RunModel object
"""
if model_id not in range(1, 6):
logging.warning("model_id must be between 1 and 5!")
if use_templates:
model_id = random.randint(1, 2)
else:
model_id = random.randint(1, 5)
# Match model_params to model_id
# Sometimes we don't want to do this, for example,
# to reproduce output from ColabFold (which only uses models 1 and 3)
name = f"model_{ model_params }_ptm"
if not monomer:
name = f"model_{ model_params }_multimer"
cfg = config.model_config(name)
#### Provide config settings
#### MSAs
cfg.data.eval.num_ensemble = 1
if max_msa_clusters > 0:
cfg.data.eval.max_msa_clusters = min(n_features_in, max_msa_clusters)
if max_extra_msa > 0:
cfg.data.common.max_extra_msa = max(
1, min(n_features_in - max_msa_clusters, max_extra_msa)
)
#### Recycle and number of iterations
if monomer:
cfg.data.common.num_recycle = max_recycles
cfg.model.num_recycle = max_recycles
cfg.model.heads.structure_module.num_layer = n_struct_module_repeats
#### Templates
t = use_templates # for brevity
cfg.data.common.use_templates = use_templates
cfg.model.embeddings_and_evoformer.template.embed_torsion_angles = t
cfg.model.embeddings_and_evoformer.template.enabled = t
cfg.data.common.reduce_msa_clusters_by_max_templates = t
cfg.data.eval.subsample_templates = t
p = data.get_model_haiku_params(model_name=name, data_dir=".")
logging.debug("Prediction parameters:")
logging.debug("\tModel ID: {}".format(model_id))
logging.debug("\tUsing templates: {}".format(t))
logging.debug(
"\tMaximum MSA clusters: {}".format(cfg.data.eval.max_msa_clusters)
)
logging.debug(
"\tMaximum extra MSA clusters: {}".format(
cfg.data.common.max_extra_msa
)
)
logging.debug(
"\tNumber recycling iterations: {}".format(cfg.model.num_recycle)
)
logging.debug(
"\tNumber of structure module repeats: {}".format(
cfg.model.heads.structure_module.num_layer
)
)
return model.RunModel(cfg, p)
def run_one_job(
runner: model.RunModel, features_in: dict, random_seed: int, outname: str
) -> Mapping[str, Any]:
r"""Runs one AF2 job with input parameters
Parameters
----------
runner : AlphaFold2 job runner
features_in : Input features, including MSA and templates
random_seed : Random seed
outname : Name of PDB file to write
Returns
----------
None
"""
# Do one last bit of processing
features = runner.process_features(features_in, random_seed=random_seed)
# Generate the model
result = runner.predict(features, random_seed)
pred = protein.from_prediction(features, result)
# Write to file
to_pdb(outname, pred, result["plddt"], features_in["residue_index"])
return result
def predict_structure_from_templates(
seq: str,
outname: str,
a3m_lines: str,
template_path: str,
model_id: int = -1,
model_params: int = -1,
random_seed: int = -1,
max_msa_clusters: int = -1,
max_extra_msa: int = -1,
max_recycles: int = 3,
n_struct_module_repeats: int = 8,
) -> NoReturn:
r"""Predicts the structure.
Parameters
----------
seq : Sequence
outname : Name of output PDB
a3m_lines : String of entire alignment
template_paths : Where to locate templates
model_id : Which AF2 model to run (must be 1 or 2 for templates)
model_params : Which parameters to provide to AF2 model
random_seed : Random seed
max_msa_clusters : Number of sequences to use
max_extra_msa : Number of extra seqs for summary stats
max_recycles : Number of iterations through AF2
n_struct_module_repeats : Number of passes through structural refinement
move_prefix : Prefix for temporary files (deleted after fxn completion)
Returns
----------
None
"""
if random_seed == -1:
random_seed = random.randrange(sys.maxsize)
if model_id not in (1, 2):
model_id = random.randint(1, 2)
if model_params not in (1, 2):
model_params = random.randint(1, 2)
# Assemble the dictionary of input features
features_in = util.setup_features(
seq, a3m_lines, util.mk_template(seq, a3m_lines, template_path).features
)
# Run the models
model_runner = set_config(
True,
max_msa_clusters,
max_extra_msa,
max_recycles,
model_id,
n_struct_module_repeats,
len(features_in["msa"]),
model_params=model_params,
)
result = run_one_job(model_runner, features_in, random_seed, outname)
del model_runner
return result
def predict_structure_no_templates(
seq: str,
outname: str,
a3m_lines: str,
model_id: int = -1,
model_params: int = -1,
random_seed: int = -1,
max_msa_clusters: int = -1,
max_extra_msa: int = -1,
max_recycles: int = 3,
n_struct_module_repeats: int = 8,
) -> NoReturn:
r"""Predicts the structure.
Parameters
----------
seq : Sequence
outname : Name of output PDB
a3m_lines : String of entire alignment
model_id : Which AF2 model to run (must be 1 or 2 for templates)
random_seed : Random seed
max_msa_clusters : Number of sequences to use
max_extra_msa : Number of extra seqs for summary stats
max_recycles : Number of iterations through AF2
n_struct_module_repeats : Number of passes through structural refinement
Returns
----------
None
"""
# Set AF2 model details
if model_id not in range(1, 6):
model_id = random.randint(1, 5)
if model_params not in range(1, 6):
model_params = model_id
if random_seed == -1:
random_seed = random.randrange(sys.maxsize)
features_in = util.setup_features(seq, a3m_lines, util.mk_mock_template(seq))
model_runner = set_config(
False,
max_msa_clusters,
max_extra_msa,
max_recycles,
model_id,
n_struct_module_repeats,
len(features_in["msa"]),
model_params=model_params,
)
result = run_one_job(model_runner, features_in, random_seed, outname)
del model_runner
return result
def to_pdb(
outname, pred, plddts, res_idx # type unknown but check? # type unknown but check?
) -> NoReturn:
r"""Writes unrelaxed PDB to file
Parameters
----------
outname : Name of output PDB
pred : Prediction to write to PDB
plddts : Predicted errors
res_idx : Residues to print (default=all)
Returns
----------
None
"""
with open(outname, "w") as outfile:
outfile.write(protein.to_pdb(pred))
with open(f"b_{ outname }", "w") as outfile:
for line in open(outname, "r").readlines():
if line[0:6] == "ATOM ":
seq_id = int(line[22:26].strip()) - 1
seq_id = np.where(res_idx == seq_id)[0][0]
outfile.write(
"{}A{}{:6.2f}{}".format(
line[:21], line[22:60], plddts[seq_id], line[66:]
)
)
os.rename(f"b_{ outname }", outname)
| 26.720126 | 88 | 0.649053 |
ed156dbe46c16e8eaaaaec584376845b7bc30f05
| 2,015 |
py
|
Python
|
h2o-py/tests/testdir_algos/gbm/pyunit_offset_init_train_gbm.py
|
Hasan-Ibrahim/h2o-3
|
00db449775991095c90641c5dcb864fab41ffa50
|
[
"Apache-2.0"
] | null | null | null |
h2o-py/tests/testdir_algos/gbm/pyunit_offset_init_train_gbm.py
|
Hasan-Ibrahim/h2o-3
|
00db449775991095c90641c5dcb864fab41ffa50
|
[
"Apache-2.0"
] | null | null | null |
h2o-py/tests/testdir_algos/gbm/pyunit_offset_init_train_gbm.py
|
Hasan-Ibrahim/h2o-3
|
00db449775991095c90641c5dcb864fab41ffa50
|
[
"Apache-2.0"
] | null | null | null |
from builtins import range
import sys
sys.path.insert(1,"../../../")
import h2o
from tests import pyunit_utils
from h2o.estimators.gbm import H2OGradientBoostingEstimator
def offset_init_train_gbm():
# Connect to a pre-existing cluster
cars = h2o.upload_file(pyunit_utils.locate("smalldata/junit/cars_20mpg.csv"))
cars = cars[cars["economy_20mpg"].isna() == 0]
cars["economy_20mpg"] = cars["economy_20mpg"].asfactor()
offset = h2o.H2OFrame([[.5]]*398)
offset.set_names(["x1"])
cars = cars.cbind(offset)
# offset_column passed in the train method
gbm_train = H2OGradientBoostingEstimator(ntrees=1, max_depth=1, min_rows=1, learn_rate=1)
gbm_train.train(x=list(range(2,8)),y="economy_20mpg", training_frame=cars, offset_column="x1")
predictions_train = gbm_train.predict(cars)
# test offset_column passed in estimator init
gbm_init = H2OGradientBoostingEstimator(ntrees=1, max_depth=1, min_rows=1, learn_rate=1, offset_column="x1")
gbm_init.train(x=list(range(2,8)),y="economy_20mpg", training_frame=cars)
predictions_init = gbm_init.predict(cars)
# test case the both offset column parameters are set the parameter in train will be used
gbm_init_train = H2OGradientBoostingEstimator(ntrees=1, max_depth=1, min_rows=1,learn_rate=1, offset_column="x1")
gbm_init_train.train(x=list(range(2,8)),y="economy_20mpg", training_frame=cars, offset_column="x1")
predictions_init_train = gbm_init_train.predict(cars)
assert predictions_train == predictions_init, "Expected predictions of a model with offset_column in train method has to be same as predictions of a model with offset_column in constructor."
assert predictions_train == predictions_init_train, "Expected predictions of a model with offset_column in train method has to be same as predictions of a model with offset_column in both constructor and init."
if __name__ == "__main__":
pyunit_utils.standalone_test(offset_init_train_gbm)
else:
offset_init_train_gbm()
| 49.146341 | 214 | 0.760298 |
a6dbeb16b20ba3fac02a8cd7e56967f4011c86bb
| 5,685 |
py
|
Python
|
dlutils/models/pytorch/deepDrivingNetwork.py
|
chelseajohn/dlapplication
|
d2eaba9077320f5a33e122b99691577fe899e1d6
|
[
"Apache-2.0"
] | 2 |
2020-05-07T05:08:54.000Z
|
2020-05-13T10:14:53.000Z
|
dlutils/models/pytorch/deepDrivingNetwork.py
|
chelseajohn/dlapplication
|
d2eaba9077320f5a33e122b99691577fe899e1d6
|
[
"Apache-2.0"
] | null | null | null |
dlutils/models/pytorch/deepDrivingNetwork.py
|
chelseajohn/dlapplication
|
d2eaba9077320f5a33e122b99691577fe899e1d6
|
[
"Apache-2.0"
] | 3 |
2020-05-06T18:49:37.000Z
|
2020-07-13T05:11:56.000Z
|
import torch.nn as nn
import torch
import torch.nn.functional as F
import torch.optim as optim
import torch.nn.init as init
import numpy as np
import random
class DeepDrivingNet(nn.Module):
def __init__(self):
super(DeepDrivingNet, self).__init__()
torch.manual_seed(42)
torch.cuda.manual_seed_all(42)
np.random.seed(42)
random.seed(42)
torch.backends.cudnn.deterministic=True
self.conv1 = torch.nn.Conv2d(1, 24, kernel_size=5, stride=2, padding=0)
init.xavier_normal_(self.conv1.weight.data)
init.zeros_(self.conv1.bias.data)
self.conv2 = torch.nn.Conv2d(24, 36, kernel_size=5, stride=2, padding=0)
init.xavier_normal_(self.conv2.weight.data)
init.zeros_(self.conv2.bias.data)
self.conv3 = torch.nn.Conv2d(36, 48, kernel_size=5, stride=2, padding=0)
init.xavier_normal_(self.conv3.weight.data)
init.zeros_(self.conv3.bias.data)
self.conv4 = torch.nn.Conv2d(48, 64, kernel_size=3, stride=1, padding=0)
init.xavier_normal_(self.conv4.weight.data)
init.zeros_(self.conv4.bias.data)
self.conv5 = torch.nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=0)
init.xavier_normal_(self.conv5.weight.data)
init.zeros_(self.conv5.bias.data)
self.fc1 = torch.nn.Linear(64 * 2790, 100)
init.xavier_normal_(self.fc1.weight.data)
init.zeros_(self.fc1.bias.data)
self.fc2 = torch.nn.Linear(100, 50)
init.xavier_normal_(self.fc2.weight.data)
init.zeros_(self.fc2.bias.data)
self.fc3 = torch.nn.Linear(50, 10)
init.xavier_normal_(self.fc3.weight.data)
init.zeros_(self.fc3.bias.data)
self.fc4 = torch.nn.Linear(10, 1)
init.xavier_normal_(self.fc4.weight.data)
init.zeros_(self.fc4.bias.data)
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.relu(self.conv2(x))
x = F.relu(self.conv3(x))
x = F.relu(self.conv4(x))
x = F.relu(self.conv5(x))
x = x.view(-1, 64 * 2790)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = F.relu(self.fc3(x))
x = self.fc4(x)
return(x)
def __str__(self):
return "Deep Driving CNN"
class DrivingCNNBatchNorm(torch.nn.Module):
def __init__(self):
super(DrivingCNNBatchNorm, self).__init__()
torch.manual_seed(42)
torch.cuda.manual_seed_all(42)
np.random.seed(42)
random.seed(42)
torch.backends.cudnn.deterministic=True
self.conv1 = torch.nn.Conv2d(3, 24, kernel_size=5, stride=2, padding=0)
init.xavier_normal_(self.conv1.weight.data)
init.zeros_(self.conv1.bias.data)
self.conv1_bn = torch.nn.BatchNorm2d(24)
self.conv2 = torch.nn.Conv2d(24, 36, kernel_size=5, stride=2, padding=0)
init.xavier_normal_(self.conv2.weight.data)
init.zeros_(self.conv2.bias.data)
self.conv2_bn = torch.nn.BatchNorm2d(36)
self.conv3 = torch.nn.Conv2d(36, 48, kernel_size=5, stride=2, padding=0)
init.xavier_normal_(self.conv3.weight.data)
init.zeros_(self.conv3.bias.data)
self.conv3_bn = torch.nn.BatchNorm2d(48)
self.conv4 = torch.nn.Conv2d(48, 64, kernel_size=3, stride=1, padding=0)
#self.conv4 = torch.nn.Conv2d(48, 64, kernel_size=5, stride=2, padding=0)
init.xavier_normal_(self.conv4.weight.data)
init.zeros_(self.conv4.bias.data)
self.conv4_bn = torch.nn.BatchNorm2d(64)
self.conv5 = torch.nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=0)
#self.conv5 = torch.nn.Conv2d(64, 64, kernel_size=3, stride=2, padding=0)
init.xavier_normal_(self.conv5.weight.data)
init.zeros_(self.conv5.bias.data)
self.conv5_bn = torch.nn.BatchNorm2d(64)
self.fc1 = torch.nn.Linear(2*32*1302, 100)#83328 ##self.fc1 = torch.nn.Linear(64 * 2790, 100)
init.xavier_normal_(self.fc1.weight.data)
init.zeros_(self.fc1.bias.data)
self.fc1_bn = torch.nn.BatchNorm1d(100)
self.fc2 = torch.nn.Linear(100, 50)
init.xavier_normal_(self.fc2.weight.data)
init.zeros_(self.fc2.bias.data)
self.fc2_bn = torch.nn.BatchNorm1d(50)
self.fc3 = torch.nn.Linear(50, 10)
init.xavier_normal_(self.fc3.weight.data)
init.zeros_(self.fc3.bias.data)
self.fc3_bn = torch.nn.BatchNorm1d(10)
self.fc4 = torch.nn.Linear(10, 1)
init.xavier_normal_(self.fc4.weight.data)
init.zeros_(self.fc4.bias.data)
def forward(self, x):
x = F.relu(self.conv1_bn(self.conv1(x)))
#print(x.shape)
x = F.relu(self.conv2_bn(self.conv2(x)))
#print(x.shape)
x = F.relu(self.conv3_bn(self.conv3(x)))
#print(x.shape)
x = F.relu(self.conv4_bn(self.conv4(x)))
#print(x.shape)
x = F.relu(self.conv5_bn(self.conv5(x)))
#print(x.shape)
x = x.view(-1, 2*32*1302)#x = x.view(-1, 83328) #######x = x.view(-1, 64 * 2790)
#print("After viewing")
#print(x.shape)
x = F.relu(self.fc1_bn(self.fc1(x)))
#print(x.shape)
x = F.relu(self.fc2_bn(self.fc2(x)))
#print(x.shape)
x = F.relu(self.fc3_bn(self.fc3(x)))
#print(x.shape)
x = self.fc4(x)
#print(x.shape)
return(x)
def __str__(self):
return "Deep Driving CNN with batch normalization"
| 36.442308 | 106 | 0.598945 |
70d0d489a8e9dc8e38b438fff8d9f6a0ea56baef
| 109,110 |
py
|
Python
|
src/transformers/modeling_tf_utils.py
|
gante/transformers
|
dfc76b25426d75d5dce489bd18cfd6a51fb01b97
|
[
"Apache-2.0"
] | null | null | null |
src/transformers/modeling_tf_utils.py
|
gante/transformers
|
dfc76b25426d75d5dce489bd18cfd6a51fb01b97
|
[
"Apache-2.0"
] | null | null | null |
src/transformers/modeling_tf_utils.py
|
gante/transformers
|
dfc76b25426d75d5dce489bd18cfd6a51fb01b97
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TF general model utils."""
import functools
import inspect
import os
import pickle
import re
import warnings
from collections.abc import Mapping
from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Union
import h5py
import numpy as np
import tensorflow as tf
from tensorflow.python.keras import backend as K
from tensorflow.python.keras.engine import data_adapter
from tensorflow.python.keras.engine.keras_tensor import KerasTensor
from tensorflow.python.keras.saving import hdf5_format
from huggingface_hub import Repository, list_repo_files
from requests import HTTPError
from . import DataCollatorWithPadding, DefaultDataCollator
from .activations_tf import get_tf_activation
from .configuration_utils import PretrainedConfig
from .dynamic_module_utils import custom_object_save
from .generation_tf_utils import TFGenerationMixin
from .tf_utils import shape_list
from .utils import (
DUMMY_INPUTS,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
TF2_WEIGHTS_NAME,
WEIGHTS_NAME,
EntryNotFoundError,
ModelOutput,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
cached_path,
copy_func,
find_labels,
has_file,
hf_bucket_url,
is_offline_mode,
is_remote_url,
logging,
requires_backends,
)
if TYPE_CHECKING:
from . import PreTrainedTokenizerBase
logger = logging.get_logger(__name__)
tf_logger = tf.get_logger()
TFModelInputType = Union[
List[tf.Tensor],
List[np.ndarray],
List[KerasTensor],
Dict[str, tf.Tensor],
Dict[str, np.ndarray],
Dict[str, KerasTensor],
tf.Tensor,
np.ndarray,
KerasTensor,
]
def dummy_loss(y_true, y_pred):
return tf.reduce_mean(y_pred)
class TFModelUtilsMixin:
"""
A few utilities for `tf.keras.Model`, to be used as a mixin.
"""
def num_parameters(self, only_trainable: bool = False) -> int:
"""
Get the number of (optionally, trainable) parameters in the model.
Args:
only_trainable (`bool`, *optional*, defaults to `False`):
Whether or not to return only the number of trainable parameters
Returns:
`int`: The number of parameters.
"""
if only_trainable:
return int(sum(np.prod(w.shape.as_list()) for w in self.trainable_variables))
else:
return self.count_params()
def keras_serializable(cls):
"""
Decorate a Keras Layer class to support Keras serialization.
This is done by:
1. Adding a `transformers_config` dict to the Keras config dictionary in `get_config` (called by Keras at
serialization time.
2. Wrapping `__init__` to accept that `transformers_config` dict (passed by Keras at deserialization time) and
convert it to a config object for the actual layer initializer.
3. Registering the class as a custom object in Keras (if the Tensorflow version supports this), so that it does not
need to be supplied in `custom_objects` in the call to `tf.keras.models.load_model`.
Args:
cls (a `tf.keras.layers.Layers subclass`):
Typically a `TF.MainLayer` class in this project, in general must accept a `config` argument to its
initializer.
Returns:
The same class object, with modifications for Keras deserialization.
"""
initializer = cls.__init__
config_class = getattr(cls, "config_class", None)
if config_class is None:
raise AttributeError("Must set `config_class` to use @keras_serializable")
@functools.wraps(initializer)
def wrapped_init(self, *args, **kwargs):
config = args[0] if args and isinstance(args[0], PretrainedConfig) else kwargs.pop("config", None)
if isinstance(config, dict):
config = config_class.from_dict(config)
initializer(self, config, *args, **kwargs)
elif isinstance(config, PretrainedConfig):
if len(args) > 0:
initializer(self, *args, **kwargs)
else:
initializer(self, config, *args, **kwargs)
else:
raise ValueError("Must pass either `config` (PretrainedConfig) or `config` (dict)")
self._config = config
self._kwargs = kwargs
cls.__init__ = wrapped_init
if not hasattr(cls, "get_config"):
raise TypeError("Only use @keras_serializable on tf.keras.layers.Layer subclasses")
if hasattr(cls.get_config, "_is_default"):
def get_config(self):
cfg = super(cls, self).get_config()
cfg["config"] = self._config.to_dict()
cfg.update(self._kwargs)
return cfg
cls.get_config = get_config
cls._keras_serializable = True
if hasattr(tf.keras.utils, "register_keras_serializable"):
cls = tf.keras.utils.register_keras_serializable()(cls)
return cls
class TFCausalLanguageModelingLoss:
"""
Loss function suitable for causal language modeling (CLM), that is, the task of guessing the next token.
<Tip>
Any label of -100 will be ignored (along with the corresponding logits) in the loss computation.
</Tip>
"""
def hf_compute_loss(self, labels, logits):
loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(
from_logits=True, reduction=tf.keras.losses.Reduction.NONE
)
# make sure only labels that are not equal to -100 affect the loss
active_loss = tf.not_equal(tf.reshape(labels, (-1,)), -100)
reduced_logits = tf.boolean_mask(tf.reshape(logits, (-1, shape_list(logits)[2])), active_loss)
labels = tf.boolean_mask(tf.reshape(labels, (-1,)), active_loss)
return loss_fn(labels, reduced_logits)
class TFQuestionAnsweringLoss:
"""
Loss function suitable for question answering.
"""
def hf_compute_loss(self, labels, logits):
loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(
from_logits=True, reduction=tf.keras.losses.Reduction.NONE
)
start_loss = loss_fn(labels["start_position"], logits[0])
end_loss = loss_fn(labels["end_position"], logits[1])
return (start_loss + end_loss) / 2.0
class TFTokenClassificationLoss:
"""
Loss function suitable for token classification.
<Tip>
Any label of -100 will be ignored (along with the corresponding logits) in the loss computation.
</Tip>
"""
def hf_compute_loss(self, labels, logits):
loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(
from_logits=True, reduction=tf.keras.losses.Reduction.NONE
)
# make sure only labels that are not equal to -100
# are taken into account as loss
if tf.math.reduce_any(labels == -1):
tf.print("Using `-1` to mask the loss for the token is deprecated. Please use `-100` instead.")
active_loss = tf.reshape(labels, (-1,)) != -1
else:
active_loss = tf.reshape(labels, (-1,)) != -100
reduced_logits = tf.boolean_mask(tf.reshape(logits, (-1, shape_list(logits)[2])), active_loss)
labels = tf.boolean_mask(tf.reshape(labels, (-1,)), active_loss)
return loss_fn(labels, reduced_logits)
class TFSequenceClassificationLoss:
"""
Loss function suitable for sequence classification.
"""
def hf_compute_loss(self, labels, logits):
if len(shape_list(logits)) == 1 or shape_list(logits)[1] == 1:
loss_fn = tf.keras.losses.MeanSquaredError(reduction=tf.keras.losses.Reduction.NONE)
else:
loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(
from_logits=True, reduction=tf.keras.losses.Reduction.NONE
)
return loss_fn(labels, logits)
class TFMultipleChoiceLoss:
"""Loss function suitable for multiple choice tasks."""
def hf_compute_loss(self, labels, logits):
loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(
from_logits=True, reduction=tf.keras.losses.Reduction.NONE
)
return loss_fn(labels, logits)
class TFMaskedLanguageModelingLoss(TFCausalLanguageModelingLoss):
"""
Loss function suitable for masked language modeling (MLM), that is, the task of guessing the masked tokens.
<Tip>
Any label of -100 will be ignored (along with the corresponding logits) in the loss computation.
</Tip>
"""
class TFNextSentencePredictionLoss:
"""
Loss function suitable for next sentence prediction (NSP), that is, the task of guessing the next sentence.
<Tip>
Any label of -100 will be ignored (along with the corresponding logits) in the loss computation.
</Tip>
"""
def hf_compute_loss(self, labels, logits):
loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(
from_logits=True, reduction=tf.keras.losses.Reduction.NONE
)
# make sure only labels that are not equal to -100
# are taken into account as loss
next_sentence_active_loss = tf.not_equal(tf.reshape(labels, (-1,)), -100)
next_sentence_reduced_logits = tf.boolean_mask(tf.reshape(logits, (-1, 2)), next_sentence_active_loss)
next_sentence_label = tf.boolean_mask(tf.reshape(labels, (-1,)), next_sentence_active_loss)
return loss_fn(next_sentence_label, next_sentence_reduced_logits)
def booleans_processing(config, **kwargs):
"""
Process the input booleans of each model in order to be sure they are compliant with the execution mode (eager or
graph)
Args:
config ([`PretrainedConfig`]):
The config of the running model.
**kwargs:
The boolean parameters
Returns:
A dictionary with the proper values for each boolean
"""
final_booleans = {}
if tf.executing_eagerly():
# Pure conv models (such as ConvNext) do not have `output_attentions`. If the signature has
# `output_attentions`, it will be present here in `kwargs`, even if unset (in that case, as `None`)
if "output_attentions" in kwargs:
final_booleans["output_attentions"] = (
kwargs["output_attentions"] if kwargs["output_attentions"] is not None else config.output_attentions
)
final_booleans["output_hidden_states"] = (
kwargs["output_hidden_states"]
if kwargs["output_hidden_states"] is not None
else config.output_hidden_states
)
final_booleans["return_dict"] = (
kwargs["return_dict"] if kwargs["return_dict"] is not None else config.return_dict
)
if "use_cache" in kwargs:
final_booleans["use_cache"] = (
kwargs["use_cache"] if kwargs["use_cache"] is not None else getattr(config, "use_cache", None)
)
else:
# Pure conv models (such as ConvNext) do not have `output_attentions`. If the signature has
# `output_attentions`, it will be present here in `kwargs`, even if unset (in that case, as `None`)
if "output_attentions" in kwargs:
final_booleans["output_attentions"] = config.output_attentions
final_booleans["output_hidden_states"] = config.output_hidden_states
if kwargs.get("return_dict", None) not in (None, True):
tf_logger.warning(
"The parameter `return_dict` cannot be set in graph mode and will always be set to `True`."
)
final_booleans["return_dict"] = True
if "use_cache" in kwargs:
final_booleans["use_cache"] = getattr(config, "use_cache", None)
return final_booleans
def unpack_inputs(func):
"""
Decorator that processes the inputs to a Keras layer, passing them to the layer as keyword arguments. This enables
downstream use of the inputs by their variable name, even if they arrive packed as a dictionary in the first input
(common case in Keras).
Args:
func (`callable`):
The callable function of the TensorFlow model.
Returns:
A callable that wraps the original `func` with the behavior described above.
"""
original_signature = inspect.signature(func)
@functools.wraps(func)
def run_call_with_unpacked_inputs(self, *args, **kwargs):
# isolates the actual `**kwargs` for the decorated function
kwargs_call = {key: val for key, val in kwargs.items() if key not in dict(original_signature.parameters)}
fn_args_and_kwargs = {key: val for key, val in kwargs.items() if key not in kwargs_call}
fn_args_and_kwargs.update({"kwargs_call": kwargs_call})
# move any arg into kwargs, if they exist
fn_args_and_kwargs.update(dict(zip(func.__code__.co_varnames[1:], args)))
# process the inputs and call the wrapped function
main_input_name = getattr(self, "main_input_name", func.__code__.co_varnames[1])
main_input = fn_args_and_kwargs.pop(main_input_name, None)
unpacked_inputs = input_processing(func, self.config, main_input, **fn_args_and_kwargs)
return func(self, **unpacked_inputs)
# Keras enforces the first layer argument to be passed, and checks it through `inspect.getfullargspec()`. This
# function does not follow wrapper chains (i.e. ignores `functools.wraps()`), meaning that without the line below
# Keras would attempt to check the first argument against the literal signature of the wrapper.
run_call_with_unpacked_inputs.__signature__ = original_signature
return run_call_with_unpacked_inputs
def input_processing(func, config, input_ids, **kwargs):
"""
Process the input of each TensorFlow model including the booleans. In case of a list of symbolic inputs, each input
has to be named accordingly to the parameters name, i.e. `input_ids = tf.keras.Input(shape=(128,), dtype='int32',
name="input_ids")` otherwise the order of the tensors will not be guaranteed during the training.
Args:
func (`callable`):
The callable function of the TensorFlow model.
config ([`PretrainedConfig`]):
The config of the running model.
**kwargs:
The inputs of the model.
Returns:
Two lists, one for the missing layers, and another one for the unexpected layers.
"""
signature = dict(inspect.signature(func).parameters)
has_kwargs = bool(signature.pop("kwargs", None))
signature.pop("self", None)
parameter_names = list(signature.keys())
output = {}
allowed_types = (tf.Tensor, bool, int, ModelOutput, tuple, list, dict, np.ndarray, KerasTensor)
if "inputs" in kwargs["kwargs_call"]:
warnings.warn(
"The `inputs` argument is deprecated and will be removed in a future version, use `input_ids` instead.",
FutureWarning,
)
output["input_ids"] = kwargs["kwargs_call"].pop("inputs")
if "decoder_cached_states" in kwargs["kwargs_call"]:
warnings.warn(
"The `decoder_cached_states` argument is deprecated and will be removed in a future version, use"
" `past_key_values` instead.",
FutureWarning,
)
output["past_key_values"] = kwargs["kwargs_call"].pop("decoder_cached_states")
if "past" in kwargs["kwargs_call"] and "past_key_values" in parameter_names:
warnings.warn(
"The `past` argument is deprecated and will be removed in a future version, use `past_key_values`"
" instead.",
FutureWarning,
)
kwargs["past_key_values"] = kwargs["kwargs_call"].pop("past")
elif "past_key_values" in kwargs["kwargs_call"] and "past" in parameter_names:
kwargs["past"] = kwargs["kwargs_call"].pop("past_key_values")
if has_kwargs:
output["kwargs"] = kwargs.pop("kwargs_call", {})
else:
if len(kwargs["kwargs_call"]) > 0:
raise ValueError(
"The following keyword arguments are not supported by this model:"
f" {list(kwargs['kwargs_call'].keys())}."
)
kwargs.pop("kwargs_call")
for k, v in kwargs.items():
if isinstance(v, allowed_types) or v is None:
output[k] = v
else:
raise ValueError(f"Data of type {type(v)} is not allowed only {allowed_types} is accepted for {k}.")
if isinstance(input_ids, (tuple, list)):
for i, input in enumerate(input_ids):
# EagerTensors don't allow to use the .name property so we check for a real Tensor
if type(input) == tf.Tensor:
# Tensor names have always the pattern `name:id` then we check only the
# `name` part
tensor_name = input.name.split(":")[0]
if tensor_name in parameter_names:
output[tensor_name] = input
else:
output[parameter_names[i]] = input
elif isinstance(input, allowed_types) or input is None:
output[parameter_names[i]] = input
else:
raise ValueError(
f"Data of type {type(input)} is not allowed only {allowed_types} is accepted for"
f" {parameter_names[i]}."
)
elif isinstance(input_ids, Mapping):
if "inputs" in input_ids:
warnings.warn(
"The `inputs` argument is deprecated and will be removed in a future version, use `input_ids`"
" instead.",
FutureWarning,
)
output["input_ids"] = input_ids.pop("inputs")
if "decoder_cached_states" in input_ids:
warnings.warn(
"The `decoder_cached_states` argument is deprecated and will be removed in a future version, use"
" `past_key_values` instead.",
FutureWarning,
)
output["past_key_values"] = input_ids.pop("decoder_cached_states")
for k, v in dict(input_ids).items():
if isinstance(v, allowed_types) or v is None:
output[k] = v
elif k not in parameter_names and "args" not in parameter_names:
logger.warning(
f"The parameter {k} does not belongs to the parameter list {parameter_names} and will be ignored."
)
continue
else:
raise ValueError(f"Data of type {type(v)} is not allowed only {allowed_types} is accepted for {k}.")
else:
if isinstance(input_ids, (tf.Tensor, KerasTensor)) or input_ids is None:
output[parameter_names[0]] = input_ids
else:
raise ValueError(
f"Data of type {type(input_ids)} is not allowed only {allowed_types} is accepted for"
f" {parameter_names[0]}."
)
# Populates any unspecified argument with their default value, according to the signature.
for name in parameter_names:
if name not in list(output.keys()) and name != "args":
output[name] = kwargs.pop(name, signature[name].default)
# When creating a SavedModel TF calls the method with LayerCall.__call__(args, **kwargs)
# So to respect the proper output we have to add this exception
if "args" in output:
if output["args"] is not None and type(output["args"]) == tf.Tensor:
tensor_name = output["args"].name.split(":")[0]
output[tensor_name] = output["args"]
else:
# `args` in this case is always the first parameter, then `input_ids`
output["input_ids"] = output["args"]
del output["args"]
if "kwargs" in output:
del output["kwargs"]
boolean_dict = {
k: v
for k, v in output.items()
if k in ["return_dict", "output_attentions", "output_hidden_states", "use_cache"]
}
output.update(
booleans_processing(
config=config,
**boolean_dict,
)
)
return output
def load_tf_weights(model, resolved_archive_file, ignore_mismatched_sizes=False, _prefix=None):
"""
Detect missing and unexpected layers and load the TF weights accordingly to their names and shapes.
Args:
model (`tf.keras.models.Model`):
The model to load the weights into.
resolved_archive_file (`str`):
The location of the H5 file.
ignore_mismatched_sizes (`bool`, *optional*, defaults to `False`):
Whether or not to ignore weights with shapes that don't match between the checkpoint of the model.
Returns:
Three lists, one for the missing layers, another one for the unexpected layers, and a last one for the
mismatched layers.
"""
missing_layers = []
unexpected_layers = []
mismatched_layers = []
# Read the H5 file
with h5py.File(resolved_archive_file, "r") as f:
# Retrieve the name of each layer from the H5 file
saved_h5_model_layers_name = set(hdf5_format.load_attributes_from_hdf5_group(f, "layer_names"))
# Find the missing layers from the high level list of layers
missing_layers = list(set([layer.name for layer in model.layers]) - saved_h5_model_layers_name)
# Find the unexpected layers from the high level list of layers
unexpected_layers = list(saved_h5_model_layers_name - set([layer.name for layer in model.layers]))
saved_weight_names_set = set()
symbolic_weights_names = set()
weight_value_tuples = []
# Compute missing and unexpected sub layers
# Store the weights in list of tuples that looks like [(weight_object, value_of_weight),...]
for layer in model.layers:
# if layer_name from the H5 file belongs to the layers from the instantiated model
if layer.name in saved_h5_model_layers_name:
# Get the H5 layer object from its name
h5_layer_object = f[layer.name]
# Get all the weights as a list from the layer object
symbolic_weights = layer.trainable_weights + layer.non_trainable_weights
saved_weights = {}
# Create a dict from the H5 saved model that looks like {"weight_name": weight_value}
# And a set with only the names
for weight_name in hdf5_format.load_attributes_from_hdf5_group(h5_layer_object, "weight_names"):
# TF names always start with the model name so we ignore it
name = "/".join(weight_name.split("/")[1:])
if _prefix is not None:
name = _prefix + "/" + name
saved_weights[name] = np.asarray(h5_layer_object[weight_name])
# Add the updated name to the final list for computing missing/unexpected values
saved_weight_names_set.add(name)
# Loop over each weights from the instantiated model and compare with the weights from the H5 file
for symbolic_weight in symbolic_weights:
# TF names always start with the model name so we ignore it
if _prefix is not None:
delimeter = len(_prefix.split("/"))
symbolic_weight_name = "/".join(
symbolic_weight.name.split("/")[:delimeter]
+ symbolic_weight.name.split("/")[delimeter + 1 :]
)
else:
symbolic_weight_name = "/".join(symbolic_weight.name.split("/")[1:])
# here we check if the current weight is among the weights from the H5 file
# If yes, get the weight_value of the corresponding weight from the H5 file
# If not, make the value to None
saved_weight_value = saved_weights.get(symbolic_weight_name, None)
# Add the updated name to the final list for computing missing/unexpected values
symbolic_weights_names.add(symbolic_weight_name)
# If the current weight is found
if saved_weight_value is not None:
# Check if the shape of the current weight and the one from the H5 file are different
if K.int_shape(symbolic_weight) != saved_weight_value.shape:
# If yes we reshape the weight from the H5 file accordingly to the current weight
# If the two shapes are not compatible we raise an issue
try:
array = np.reshape(saved_weight_value, K.int_shape(symbolic_weight))
except ValueError as e:
if ignore_mismatched_sizes:
mismatched_layers.append(
(symbolic_weight_name, saved_weight_value.shape, K.int_shape(symbolic_weight))
)
continue
else:
raise e
else:
array = saved_weight_value
# We create the tuple that will be loaded and add it to the final list
weight_value_tuples.append((symbolic_weight, array))
# Load all the weights
K.batch_set_value(weight_value_tuples)
# Compute the missing and unexpected layers
missing_layers.extend(list(symbolic_weights_names - saved_weight_names_set))
unexpected_layers.extend(list(saved_weight_names_set - symbolic_weights_names))
return missing_layers, unexpected_layers, mismatched_layers
def init_copy_embeddings(old_embeddings, new_num_tokens):
r"""
This function aims to reduce the embeddings in case new_num_tokens < old_num_tokens or to pad with -1 in case
new_num_tokens > old_num_tokens. A mask is also computed in order to know which weight in the embeddings should be
kept or not. Example:
- if new_num_tokens=5 and old_num_tokens=4 and old_embeddings=[w1,w2,w3,w4]
- mask=[True,True,True,True,False] and current_weights=[w1,w2,w3,w4,-1]
- if new_num_tokens=4 and old_num_tokens=5 and old_embeddings=[w1,w2,w3,w4,w5]
- mask=[True,True,True,True] and current_weights=[w1,w2,w3,w4]
"""
old_num_tokens, old_embedding_dim = shape_list(old_embeddings)
size_diff = new_num_tokens - old_num_tokens
# initialize new embeddings
# Copy token embeddings from the previous ones
if tf.math.greater(size_diff, 0):
# if the new size is greater than the old one, we extend the current embeddings with a padding until getting new size
# and we create a mask to properly identify the padded values and be replaced by the values of the newly created
# embeddings
current_weights = tf.pad(
old_embeddings.value(), tf.convert_to_tensor([[0, size_diff], [0, 0]]), constant_values=-1
)
num_tokens_to_copy = min(old_num_tokens, new_num_tokens)
mask = tf.fill(tf.convert_to_tensor([num_tokens_to_copy, 1]), True)
mask = tf.pad(mask, tf.convert_to_tensor([[0, size_diff], [0, 0]]), constant_values=False)
else:
# if the new size if lower than the old one, we take the current embeddings until the new size
current_weights = tf.slice(
old_embeddings.value(),
tf.convert_to_tensor([0, 0]),
tf.convert_to_tensor([new_num_tokens, old_embedding_dim]),
)
mask = tf.fill(tf.convert_to_tensor([new_num_tokens, 1]), True)
return mask, current_weights
class TFPreTrainedModel(tf.keras.Model, TFModelUtilsMixin, TFGenerationMixin, PushToHubMixin):
r"""
Base class for all TF models.
[`TFPreTrainedModel`] takes care of storing the configuration of the models and handles methods for loading,
downloading and saving models as well as a few methods common to all models to:
- resize the input embeddings,
- prune heads in the self-attention heads.
Class attributes (overridden by derived classes):
- **config_class** ([`PretrainedConfig`]) -- A subclass of [`PretrainedConfig`] to use as configuration class
for this model architecture.
- **base_model_prefix** (`str`) -- A string indicating the attribute associated to the base model in derived
classes of the same architecture adding modules on top of the base model.
- **main_input_name** (`str`) -- The name of the principal input to the model (often `input_ids` for NLP
models, `pixel_values` for vision models and `input_values` for speech models).
"""
config_class = None
base_model_prefix = ""
main_input_name = "input_ids"
_auto_class = None
_using_dummy_loss = None
_label_to_output_map = None
# a list of re pattern of tensor names to ignore from the model when loading the model weights
# (and avoid unnecessary warnings).
_keys_to_ignore_on_load_missing = None
# a list of re pattern of tensor names to ignore from the weights when loading the model weights
# (and avoid unnecessary warnings).
_keys_to_ignore_on_load_unexpected = None
_requires_load_weight_prefix = False
@property
def dummy_inputs(self) -> Dict[str, tf.Tensor]:
"""
Dummy inputs to build the network.
Returns:
`Dict[str, tf.Tensor]`: The dummy inputs.
"""
return {
"input_ids": tf.constant(DUMMY_INPUTS),
}
@property
def framework(self) -> str:
"""
:str: Identifies that this is a TensorFlow model.
"""
return "tf"
def __init__(self, config, *inputs, **kwargs):
super().__init__(*inputs, **kwargs)
if not isinstance(config, PretrainedConfig):
raise ValueError(
f"Parameter config in `{self.__class__.__name__}(config)` should be an instance of class "
"`PretrainedConfig`. To create a model from a pretrained model use "
f"`model = {self.__class__.__name__}.from_pretrained(PRETRAINED_MODEL_NAME)`"
)
# Save config and origin of the pretrained weights if given in model
self.config = config
self.name_or_path = config.name_or_path
def get_config(self):
return self.config.to_dict()
@classmethod
def from_config(cls, config, **kwargs):
if isinstance(config, PretrainedConfig):
return cls._from_config(config, **kwargs)
return cls._from_config(cls.config_class.from_dict(config, **kwargs))
@classmethod
def _from_config(cls, config, **kwargs):
"""
All context managers that the model should be initialized under go here.
"""
return cls(config, **kwargs)
@tf.function(
input_signature=[
{
"input_ids": tf.TensorSpec((None, None), tf.int32, name="input_ids"),
"attention_mask": tf.TensorSpec((None, None), tf.int32, name="attention_mask"),
"token_type_ids": tf.TensorSpec((None, None), tf.int32, name="token_type_ids"),
}
]
)
def serving(self, inputs):
"""
Method used for serving the model.
Args:
inputs (`Dict[str, tf.Tensor]`):
The input of the saved model as a dictionary of tensors.
"""
output = self.call(inputs)
return self.serving_output(output)
def serving_output(output):
"""
Prepare the output of the saved model. Each model must implement this function.
Args:
output ([`TFBaseModelOutput`]):
The output returned by the model.
"""
raise NotImplementedError
def get_input_embeddings(self) -> tf.keras.layers.Layer:
"""
Returns the model's input embeddings layer.
Returns:
`tf.Variable`: The embeddings layer mapping vocabulary to hidden states.
"""
main_layer = getattr(self, self.base_model_prefix, self)
if main_layer is not self:
return main_layer.get_input_embeddings()
else:
raise NotImplementedError
def _save_checkpoint(self, checkpoint_dir, epoch):
if not os.path.isdir(checkpoint_dir):
os.mkdir(checkpoint_dir)
# We avoid tf.train.checkpoint or saving weights in TF format, even though that includes optimizer
# state for us, because it requires special handling for objects like custom losses, which we use
# internally and which users are likely to use too
weights_path = os.path.join(checkpoint_dir, "weights.h5")
self.save_weights(weights_path)
extra_data = {"epoch": epoch, "optimizer_state": self.optimizer.get_weights()}
extra_data_path = os.path.join(checkpoint_dir, "extra_data.pickle")
with open(extra_data_path, "wb") as f:
pickle.dump(extra_data, f)
def load_repo_checkpoint(self, repo_path_or_name):
"""
Loads a saved checkpoint (model weights and optimizer state) from a repo. Returns the current epoch count when
the checkpoint was made.
Args:
repo_path_or_name (`str`):
Can either be a repository name for your {object} in the Hub or a path to a local folder (in which case
the repository will have the name of that local folder).
Returns:
`dict`: A dictionary of extra metadata from the checkpoint, most commonly an "epoch" count.
"""
if getattr(self, "optimizer", None) is None:
raise RuntimeError(
"Checkpoint loading failed as no optimizer is attached to the model. "
"This is most likely caused by the model not being compiled."
)
if not os.path.isdir(repo_path_or_name):
# If this isn't a local path, check that the remote repo exists and has a checkpoint in it
repo_files = list_repo_files(repo_path_or_name)
for file in ("checkpoint/weights.h5", "checkpoint/extra_data.pickle"):
if file not in repo_files:
raise FileNotFoundError(f"Repo {repo_path_or_name} does not contain checkpoint file {file}!")
if "/" not in repo_path_or_name:
model_id = repo_path_or_name
repo_path_or_name = self.get_full_repo_name(repo_path_or_name)
else:
model_id = repo_path_or_name.split("/")[-1]
repo = Repository(model_id, clone_from=f"https://huggingface.co/{repo_path_or_name}")
local_dir = repo.local_dir
else:
local_dir = repo_path_or_name
# Now make sure the repo actually has a checkpoint in it.
checkpoint_dir = os.path.join(local_dir, "checkpoint")
weights_file = os.path.join(checkpoint_dir, "weights.h5")
if not os.path.isfile(weights_file):
raise FileNotFoundError(f"Could not find checkpoint file weights.h5 in repo {repo_path_or_name}!")
extra_data_file = os.path.join(checkpoint_dir, "extra_data.pickle")
if not os.path.isfile(extra_data_file):
raise FileNotFoundError(f"Could not find checkpoint file extra_data.pickle in repo {repo_path_or_name}!")
# Assuming the repo is real and we got a checkpoint, load the weights and the optimizer state into the model.
# The optimizer state includes the iteration count, so learning rate schedules should resume as normal too.
self.load_weights(weights_file)
with open(extra_data_file, "rb") as f:
extra_data = pickle.load(f)
self.optimizer.set_weights(extra_data["optimizer_state"])
# Finally, return the epoch number from the checkpoint. This isn't a property of the model, so we can't
# set it directly, but the user can pass it to fit().
return {"epoch": extra_data["epoch"]}
def prepare_tf_dataset(
self,
dataset: "datasets.Dataset", # noqa:F821
batch_size: int = 8,
shuffle: bool = True,
tokenizer: Optional["PreTrainedTokenizerBase"] = None,
collate_fn: Optional[Callable] = None,
collate_fn_args: Optional[Dict[str, Any]] = None,
drop_remainder: Optional[bool] = None,
prefetch: bool = True,
):
"""
Wraps a HuggingFace `datasets.Dataset` as a `tf.data.Dataset` with collation and batching. This method is
designed to create a "ready-to-use" dataset that can be passed directly to Keras methods like `fit()` without
further modification. The method will drop columns from the dataset if they don't match input names for the
model. If you want to specify the column names to return rather than using the names that match this model, we
recommend using `Dataset.to_tf_dataset()` instead.
Args:
dataset (`Any`):
A `datasets.Dataset` to be wrapped as a `tf.data.Dataset`.
batch_size (`int`, defaults to 8):
The size of batches to return.
shuffle (`bool`, defaults to `True`):
Whether to return samples from the dataset in random order. Usually `True` for training datasets and
`False` for validation/test datasets.
tokenizer ([`PreTrainedTokenizerBase`], *optional*):
A `PreTrainedTokenizer` that will be used to pad samples to create batches. Has no effect if a specific
`collate_fn` is passed instead.
collate_fn (`Callable`, *optional*):
A function that collates samples from the dataset into a single batch. Defaults to
`DefaultDataCollator` if no `tokenizer` is supplied or `DataCollatorWithPadding` if a `tokenizer` is
passed.
collate_fn_args (`Dict[str, Any]`, *optional*):
A dict of arguments to pass to the `collate_fn` alongside the list of samples.
drop_remainder (`bool`, *optional*):
Whether to drop the final batch, if the batch_size does not evenly divide the dataset length. Defaults
to the same setting as `shuffle`.
prefetch (`bool`, defaults to `True`):
Whether to add prefetching to the end of the `tf.data` pipeline. This is almost always beneficial for
performance, but can be disabled in edge cases.
Returns:
`Dataset`: A `tf.data.Dataset` which is ready to pass to the Keras API.
"""
requires_backends(self, ["datasets"])
import datasets
if collate_fn is None:
if tokenizer is None:
collate_fn = DefaultDataCollator(return_tensors="tf")
else:
collate_fn = DataCollatorWithPadding(tokenizer=tokenizer, return_tensors="tf")
if collate_fn_args is None:
collate_fn_args = dict()
if not isinstance(dataset, datasets.Dataset):
raise TypeError("Dataset argument should be a datasets.Dataset!")
model_inputs = list(dict(inspect.signature(self.call).parameters).keys())
model_labels = find_labels(self.__class__)
unwanted_columns = [
feature
for feature in dataset.features
if feature not in model_inputs and feature not in ("label_ids", "label")
]
dataset = dataset.remove_columns(unwanted_columns)
output_signature, _ = dataset._get_output_signature(
dataset,
batch_size=None,
collate_fn=collate_fn,
collate_fn_args=collate_fn_args,
)
output_columns = list(output_signature.keys())
feature_cols = [col for col in output_columns if col in model_inputs and col not in model_labels]
label_cols = [col for col in output_columns if col in model_labels]
tf_dataset = dataset.to_tf_dataset(
columns=feature_cols,
label_cols=label_cols,
batch_size=batch_size,
shuffle=shuffle,
drop_remainder=drop_remainder,
collate_fn=collate_fn,
collate_fn_args=collate_fn_args,
prefetch=prefetch,
)
return tf_dataset
def compile(
self,
optimizer="rmsprop",
loss="passthrough",
metrics=None,
loss_weights=None,
weighted_metrics=None,
run_eagerly=None,
steps_per_execution=None,
**kwargs
):
"""
This is a thin wrapper that sets the model's loss output head as the loss if the user does not specify a loss
function themselves.
"""
if loss == "passthrough":
logger.warning(
"No loss specified in compile() - the model's internal loss computation will be used as the "
"loss. Don't panic - this is a common way to train TensorFlow models in Transformers! "
"To disable this behaviour please pass a loss argument, or explicitly pass "
"`loss=None` if you do not want your model to compute a loss."
)
loss = dummy_loss
self._using_dummy_loss = True
else:
self._using_dummy_loss = False
parent_args = list(inspect.signature(tf.keras.Model.compile).parameters.keys())
# This argument got renamed, we need to support both versions
if "steps_per_execution" in parent_args:
super().compile(
optimizer=optimizer,
loss=loss,
metrics=metrics,
loss_weights=loss_weights,
weighted_metrics=weighted_metrics,
run_eagerly=run_eagerly,
steps_per_execution=steps_per_execution,
**kwargs,
)
else:
super().compile(
optimizer=optimizer,
loss=loss,
metrics=metrics,
loss_weights=loss_weights,
weighted_metrics=weighted_metrics,
run_eagerly=run_eagerly,
experimental_steps_per_execution=steps_per_execution,
**kwargs,
)
def compute_loss(self, *args, **kwargs):
if hasattr(tf.keras.Model, "compute_loss"):
# This will be true in TF 2.8 or greater
return super().compute_loss(*args, **kwargs)
else:
warnings.warn(
"The old compute_loss method is deprecated as it conflicts with the Keras compute_loss "
"method added in TF 2.8. If you want the original HF compute_loss, please call "
"hf_compute_loss() instead. From TF versions >= 2.8, or Transformers versions >= 5, "
"calling compute_loss() will get the Keras method instead.",
FutureWarning,
)
return self.hf_compute_loss(*args, **kwargs)
def get_label_to_output_name_mapping(self):
arg_names = list(dict(inspect.signature(self.call).parameters).keys())
if self._label_to_output_map is not None:
return self._label_to_output_map
elif "start_positions" in arg_names:
return {"start_positions": "start_logits", "end_positions": "end_logits"}
elif "sentence_order_label" in arg_names:
return {"labels": "prediction_logits", "sentence_order_label": "sop_logits"}
elif "next_sentence_label" in arg_names:
return {"labels": "prediction_logits", "next_sentence_label": "seq_relationship_logits"}
elif "mc_labels" in arg_names:
return {"labels": "logits", "mc_labels": "mc_logits"}
else:
return dict()
def train_step(self, data):
"""
A modification of Keras's default `train_step` that correctly handles matching outputs to labels for our models
and supports directly training on the loss output head. In addition, it ensures input keys are copied to the
labels where appropriate. It will also copy label keys into the input dict when using the dummy loss, to ensure
that they are available to the model during the forward pass.
"""
# We hardcode the most common renamings; models with weirder names can set `self._label_to_output_map`
arg_names = list(dict(inspect.signature(self.call).parameters).keys())
label_kwargs = find_labels(self.__class__)
label_to_output = self.get_label_to_output_name_mapping()
output_to_label = {val: key for key, val in label_to_output.items()}
if not self._using_dummy_loss:
data = data_adapter.expand_1d(data)
x, y, sample_weight = data_adapter.unpack_x_y_sample_weight(data)
# When using a dummy loss, we ensure that separate labels are copied to the correct model arguments,
# if those keys are not already present in the input dict
if self._using_dummy_loss and y is not None:
# If y is a tensor and the model only has one label-like input, map y to that input
if len(label_kwargs) == 1 and isinstance(y, tf.Tensor):
if isinstance(x, tf.Tensor):
x = {arg_names[0]: x}
label_kwarg = next(iter(label_kwargs))
if label_kwarg not in x:
x[label_kwarg] = y
# Otherwise, copy keys from y to x as long as they weren't already present in x
elif isinstance(y, dict):
if isinstance(x, tf.Tensor):
x = {arg_names[0]: x}
for key, val in y.items():
if key in arg_names and key not in x:
x[key] = val
elif output_to_label.get(key, None) in arg_names and key not in x:
x[output_to_label[key]] = val
if y is None:
y = {key: val for key, val in x.items() if key in label_kwargs}
if not y and not self._using_dummy_loss:
raise ValueError("Could not find label column(s) in input dict and no separate labels were provided!")
if isinstance(y, dict):
# Rename labels at this point to match output heads
y = {label_to_output.get(key, key): val for key, val in y.items()}
# Run forward pass.
with tf.GradientTape() as tape:
y_pred = self(x, training=True)
if self._using_dummy_loss:
loss = self.compiled_loss(y_pred.loss, y_pred.loss, sample_weight, regularization_losses=self.losses)
else:
loss = None
# This next block matches outputs to label keys. Tensorflow's standard method for doing this
# can get very confused if any of the keys contain nested values (e.g. lists/tuples of Tensors)
if isinstance(y, dict) and len(y) == 1:
if list(y.keys())[0] in y_pred.keys():
y_pred = y_pred[list(y.keys())[0]]
elif list(y_pred.keys())[0] == "loss":
y_pred = y_pred[1]
else:
y_pred = y_pred[0]
_, y = y.popitem()
elif isinstance(y, dict):
# If the labels are a dict, match keys from the output by name
y_pred = {key: val for key, val in y_pred.items() if key in y}
elif isinstance(y, tuple) or isinstance(y, list):
# If the labels are a tuple/list, match keys to the output by order, skipping the loss.
if list(y_pred.keys())[0] == "loss":
y_pred = y_pred.to_tuple()[1:]
else:
y_pred = y_pred.to_tuple()
y_pred = y_pred[: len(y)] # Remove unused fields in case those cause problems
else:
# If the labels are a single tensor, match them to the first non-loss tensor in the output
if list(y_pred.keys())[0] == "loss":
y_pred = y_pred[1]
else:
y_pred = y_pred[0]
if loss is None:
loss = self.compiled_loss(y, y_pred, sample_weight, regularization_losses=self.losses)
# Run backwards pass.
self.optimizer.minimize(loss, self.trainable_variables, tape=tape)
self.compiled_metrics.update_state(y, y_pred, sample_weight)
# Collect metrics to return
return_metrics = {}
for metric in self.metrics:
result = metric.result()
if isinstance(result, dict):
return_metrics.update(result)
else:
return_metrics[metric.name] = result
return return_metrics
def test_step(self, data):
"""
A modification of Keras's default `train_step` that correctly handles matching outputs to labels for our models
and supports directly training on the loss output head. In addition, it ensures input keys are copied to the
labels where appropriate. It will also copy label keys into the input dict when using the dummy loss, to ensure
that they are available to the model during the forward pass.
"""
# We hardcode the most common renamings; models with weirder names can set `self._label_to_output_map`
arg_names = list(dict(inspect.signature(self.call).parameters).keys())
label_kwargs = find_labels(self.__class__)
label_to_output = self.get_label_to_output_name_mapping()
output_to_label = {val: key for key, val in label_to_output.items()}
if not self._using_dummy_loss:
data = data_adapter.expand_1d(data)
x, y, sample_weight = data_adapter.unpack_x_y_sample_weight(data)
# When using a dummy loss, we ensure that separate labels are copied to the correct model arguments,
# if those keys are not already present in the input dict
if self._using_dummy_loss and y is not None:
arg_names = list(dict(inspect.signature(self.call).parameters).keys())
# If y is a tensor and the model only has one label-like input, map y to that input
if len(label_kwargs) == 1 and isinstance(y, tf.Tensor):
if isinstance(x, tf.Tensor):
x = {arg_names[0]: x}
label_kwarg = next(iter(label_kwargs))
if label_kwarg not in x:
x[label_kwarg] = y
# Otherwise, copy keys from y to x as long as they weren't already present in x
elif isinstance(y, dict):
if isinstance(x, tf.Tensor):
x = {arg_names[0]: x}
for key, val in y.items():
if key in arg_names and key not in x:
x[key] = val
elif output_to_label.get(key, None) in arg_names and key not in x:
x[output_to_label[key]] = val
if y is None:
y = {key: val for key, val in x.items() if key in label_kwargs}
if not y and not self._using_dummy_loss:
raise ValueError("Could not find label column(s) in input dict and no separate labels were provided!")
if isinstance(y, dict):
# Rename labels at this point to match output heads
y = {label_to_output.get(key, key): val for key, val in y.items()}
# Run forward pass.
y_pred = self(x, training=False)
if self._using_dummy_loss:
loss = self.compiled_loss(y_pred.loss, y_pred.loss, sample_weight, regularization_losses=self.losses)
else:
loss = None
# This next block matches outputs to label keys. Tensorflow's standard method for doing this
# can get very confused if any of the keys contain nested values (e.g. lists/tuples of Tensors)
if isinstance(y, dict) and len(y) == 1:
if list(y.keys())[0] in y_pred.keys():
y_pred = y_pred[list(y.keys())[0]]
elif list(y_pred.keys())[0] == "loss":
y_pred = y_pred[1]
else:
y_pred = y_pred[0]
_, y = y.popitem()
elif isinstance(y, dict):
# If the labels are a dict, match keys from the output by name
y_pred = {key: val for key, val in y_pred.items() if key in y}
elif isinstance(y, tuple) or isinstance(y, list):
# If the labels are a tuple/list, match keys to the output by order, skipping the loss.
if list(y_pred.keys())[0] == "loss":
y_pred = y_pred.to_tuple()[1:]
else:
y_pred = y_pred.to_tuple()
y_pred = y_pred[: len(y)] # Remove unused fields in case those cause problems
else:
# If the labels are a single tensor, match them to the first non-loss tensor in the output
if list(y_pred.keys())[0] == "loss":
y_pred = y_pred[1]
else:
y_pred = y_pred[0]
if loss is None:
loss = self.compiled_loss(y, y_pred, sample_weight, regularization_losses=self.losses)
self.compiled_metrics.update_state(y, y_pred, sample_weight)
# Collect metrics to return
return_metrics = {}
for metric in self.metrics:
result = metric.result()
if isinstance(result, dict):
return_metrics.update(result)
else:
return_metrics[metric.name] = result
return return_metrics
def create_model_card(
self,
output_dir,
model_name: str,
language: Optional[str] = None,
license: Optional[str] = None,
tags: Optional[str] = None,
finetuned_from: Optional[str] = None,
tasks: Optional[str] = None,
dataset_tags: Optional[Union[str, List[str]]] = None,
dataset: Optional[Union[str, List[str]]] = None,
dataset_args: Optional[Union[str, List[str]]] = None,
):
# Avoids a circular import by doing this when necessary.
from .modelcard import TrainingSummary # tests_ignore
training_summary = TrainingSummary.from_keras(
self,
keras_history=self.history,
language=language,
license=license,
tags=tags,
model_name=model_name,
finetuned_from=finetuned_from,
tasks=tasks,
dataset_tags=dataset_tags,
dataset=dataset,
dataset_args=dataset_args,
)
model_card = training_summary.to_model_card()
with open(os.path.join(output_dir, "README.md"), "w") as f:
f.write(model_card)
def set_input_embeddings(self, value):
"""
Set model's input embeddings
Args:
value (`tf.Variable`):
The new weights mapping hidden states to vocabulary.
"""
main_layer = getattr(self, self.base_model_prefix)
if main_layer is None:
raise NotImplementedError("The model does not implements the base_model_prefix attribute.")
try:
main_layer.set_input_embeddings(value)
except AttributeError:
logger.info("Building the model")
self(self.dummy_inputs)
main_layer.set_input_embeddings(value)
def get_output_embeddings(self) -> Union[None, tf.keras.layers.Layer]:
"""
Returns the model's output embeddings
Returns:
`tf.Variable`: The new weights mapping vocabulary to hidden states.
"""
if self.get_lm_head() is not None:
lm_head = self.get_lm_head()
try:
return lm_head.get_output_embeddings()
except AttributeError:
logger.info("Building the model")
self(self.dummy_inputs)
return lm_head().get_output_embeddings()
return None # Overwrite for models with output embeddings
def set_output_embeddings(self, value):
"""
Set model's output embeddings
Args:
value (`tf.Variable`):
The new weights mapping hidden states to vocabulary.
"""
if self.get_lm_head() is not None:
lm_head = self.get_lm_head()
try:
lm_head.set_output_embeddings(value)
except AttributeError:
logger.info("Building the model")
self(self.dummy_inputs)
lm_head.set_output_embeddings(value)
def get_output_layer_with_bias(self) -> Union[None, tf.keras.layers.Layer]:
"""
Get the layer that handles a bias attribute in case the model has an LM head with weights tied to the
embeddings
Return:
`tf.keras.layers.Layer`: The layer that handles the bias, None if not an LM model.
"""
warnings.warn(
"The method get_output_layer_with_bias is deprecated. Please use `get_lm_head` instead.", FutureWarning
)
return self.get_lm_head()
def get_prefix_bias_name(self) -> Union[None, str]:
"""
Get the concatenated _prefix name of the bias from the model name to the parent layer
Return:
`str`: The _prefix name of the bias.
"""
warnings.warn("The method get_prefix_bias_name is deprecated. Please use `get_bias` instead.", FutureWarning)
return None
def get_bias(self) -> Union[None, Dict[str, tf.Variable]]:
"""
Dict of bias attached to an LM head. The key represents the name of the bias attribute.
Return:
`tf.Variable`: The weights representing the bias, None if not an LM model.
"""
if self.get_lm_head() is not None:
lm_head = self.get_lm_head()
try:
return lm_head.get_bias()
except AttributeError:
self(self.dummy_inputs)
return lm_head.get_bias()
return None
def set_bias(self, value):
"""
Set all the bias in the LM head.
Args:
value (`Dict[tf.Variable]`):
All the new bias attached to an LM head.
"""
if self.get_lm_head() is not None:
lm_head = self.get_lm_head()
try:
lm_head.set_bias(value)
except AttributeError:
self(self.dummy_inputs)
lm_head.set_bias(value)
def get_lm_head(self) -> tf.keras.layers.Layer:
"""
The LM Head layer. This method must be overwritten by all the models that have a lm head.
Return:
`tf.keras.layers.Layer`: The LM head layer if the model has one, None if not.
"""
return None
def resize_token_embeddings(self, new_num_tokens=None) -> tf.Variable:
"""
Resizes input token embeddings matrix of the model if `new_num_tokens != config.vocab_size`.
Takes care of tying weights embeddings afterwards if the model class has a `tie_weights()` method.
Arguments:
new_num_tokens (`int`, *optional*):
The number of new tokens in the embedding matrix. Increasing the size will add newly initialized
vectors at the end. Reducing the size will remove vectors from the end. If not provided or `None`, just
returns a pointer to the input tokens `tf.Variable` module of the model without doing anything.
Return:
`tf.Variable`: Pointer to the input tokens Embeddings Module of the model.
"""
if new_num_tokens is None or new_num_tokens == self.config.vocab_size:
return self._get_word_embedding_weight(self.get_input_embeddings())
model_embeds = self._resize_token_embeddings(new_num_tokens)
# Update base model and current model config
self.config.vocab_size = new_num_tokens
return model_embeds
def _get_word_embedding_weight(model, embedding_layer):
# If the variable holds the weights themselves, return them
if isinstance(embedding_layer, tf.Tensor):
return embedding_layer
# Otherwise, try to get them from the layer's attributes
embeds = getattr(embedding_layer, "weight", None)
if embeds is not None:
return embeds
embeds = getattr(embedding_layer, "decoder", None)
if embeds is not None:
return embeds
# The reason why the attributes don't exist might be
# because the model is not built, so retry getting
# the argument after building the model
model(model.dummy_inputs)
embeds = getattr(embedding_layer, "weight", None)
if embeds is not None:
return embeds
embeds = getattr(embedding_layer, "decoder", None)
if embeds is not None:
return embeds
return None
def _resize_token_embeddings(self, new_num_tokens):
old_embeddings = self._get_word_embedding_weight(self.get_input_embeddings())
new_embeddings = self._get_resized_embeddings(old_embeddings, new_num_tokens)
# if word embeddings are not tied, make sure that lm head bias is resized as well
if self.get_bias() is not None:
old_lm_head_bias = self.get_bias()
new_lm_head_bias = self._get_resized_lm_head_bias(old_lm_head_bias, new_num_tokens)
self.set_bias(new_lm_head_bias)
# if word embeddings are not tied, make sure that lm head decoder is resized as well
if self.get_output_embeddings() is not None:
old_lm_head_decoder = self._get_word_embedding_weight(self.get_output_embeddings())
new_lm_head_decoder = self._get_resized_lm_head_decoder(old_lm_head_decoder, new_num_tokens)
self.set_output_embeddings(new_lm_head_decoder)
self.set_input_embeddings(new_embeddings)
return self.get_input_embeddings()
def _get_resized_lm_head_bias(self, old_lm_head_bias, new_num_tokens):
"""
Build a resized bias from the old ones. Increasing the size will add newly initialized vectors at the end.
Reducing the size will remove vectors from the end
Args:
old_lm_head_bias (`tf.Variable`):
Old lm head bias to be resized.
new_num_tokens (`int`, *optional*):
New number of tokens in the linear matrix.
Increasing the size will add newly initialized vectors at the end. Reducing the size will remove
vectors from the end. If not provided or `None`, just returns None
Return:
`tf.Variable`: Pointer to the resized bias.
"""
new_lm_head_bias = {}
for attr, weight in old_lm_head_bias.items():
first_dim, old_num_tokens = (None, shape_list(weight)[0]) if tf.rank(weight) == 1 else shape_list(weight)
size_diff = new_num_tokens - old_num_tokens
final_shape = [new_num_tokens] if first_dim is None else [first_dim, new_num_tokens]
# initialize new bias
if tf.math.greater(size_diff, 0):
padding_shape = [[0, size_diff]] if first_dim is None else [[0, 0], [0, size_diff]]
current_bias = tf.pad(weight.value(), tf.convert_to_tensor(padding_shape), constant_values=-1)
num_tokens_to_copy = min(old_num_tokens, new_num_tokens)
mask_shape = [num_tokens_to_copy] if first_dim is None else [1, num_tokens_to_copy]
bias_mask = tf.fill(tf.convert_to_tensor(mask_shape), True)
bias_mask = tf.pad(bias_mask, tf.convert_to_tensor(padding_shape), constant_values=False)
else:
slice_from = [0] if first_dim is None else [0, 0]
current_bias = tf.slice(
weight.value(), tf.convert_to_tensor(slice_from), tf.convert_to_tensor(final_shape)
)
bias_mask = tf.fill(tf.convert_to_tensor(final_shape), True)
new_bias = self.add_weight(
shape=final_shape,
initializer="zeros",
trainable=True,
name=weight.name.split(":")[0],
)
init_bias = tf.where(bias_mask, current_bias, new_bias.value())
new_bias.assign(init_bias)
new_lm_head_bias[attr] = new_bias
return new_lm_head_bias
def _get_resized_lm_head_decoder(self, old_lm_head_decoder, new_num_tokens):
"""
Build a resized decoder from the old ones. Increasing the size will add newly initialized vectors at the end.
Reducing the size will remove vectors from the end
Args:
old_lm_head_decoder (`tf.Variable`):
Old lm head decoder to be resized.
new_num_tokens (`int`, *optional*):
New number of tokens in the linear matrix.
Increasing the size will add newly initialized vectors at the end. Reducing the size will remove
vectors from the end. If not provided or `None`, just returns None
Return:
`tf.Variable`: Pointer to the resized decoder or None if the output embeddings are different from the input
ones.
"""
new_lm_head_decoder = old_lm_head_decoder
is_input_output_equals = tf.reduce_any(
self._get_word_embedding_weight(self.get_input_embeddings()) == old_lm_head_decoder
)
if old_lm_head_decoder is not None and not is_input_output_equals:
old_embedding_dim = shape_list(old_lm_head_decoder)[1]
decoder_mask, current_decoder = init_copy_embeddings(old_lm_head_decoder, new_num_tokens)
new_lm_head_decoder = self.add_weight(
shape=(new_num_tokens, old_embedding_dim),
initializer="zeros",
trainable=True,
name=old_lm_head_decoder.name.split(":")[0],
)
init_decoder = tf.where(decoder_mask, current_decoder, new_lm_head_decoder.value())
new_lm_head_decoder.assign(init_decoder)
return new_lm_head_decoder
def _get_resized_embeddings(self, old_embeddings, new_num_tokens=None) -> tf.Variable:
"""
Build a resized Embedding weights from a provided token Embedding weights. Increasing the size will add newly
initialized vectors at the end. Reducing the size will remove vectors from the end
Args:
old_embeddings (`tf.Variable`):
Old embeddings to be resized.
new_num_tokens (`int`, *optional*):
New number of tokens in the embedding matrix.
Increasing the size will add newly initialized vectors at the end. Reducing the size will remove
vectors from the end. If not provided or `None`, just returns a pointer to the input tokens
``tf.Variable``` module of the model without doing anything.
Return:
`tf.Variable`: Pointer to the resized Embedding Module or the old Embedding Module if `new_num_tokens` is
`None`
"""
old_embedding_dim = shape_list(old_embeddings)[1]
init_range = getattr(self.config, "initializer_range", 0.02)
embeddings_mask, current_embeddings = init_copy_embeddings(old_embeddings, new_num_tokens)
new_embeddings = self.add_weight(
name=old_embeddings.name.split(":")[0],
shape=[new_num_tokens, old_embedding_dim],
initializer=get_initializer(init_range),
dtype=tf.float32,
)
init_embeddings = tf.where(embeddings_mask, current_embeddings, new_embeddings.value())
new_embeddings.assign(init_embeddings)
return new_embeddings
def prune_heads(self, heads_to_prune):
"""
Prunes heads of the base model.
Arguments:
heads_to_prune (`Dict[int, List[int]]`):
Dictionary with keys being selected layer indices (`int`) and associated values being the list of heads
to prune in said layer (list of `int`). For instance {1: [0, 2], 2: [2, 3]} will prune heads 0 and 2 on
layer 1 and heads 2 and 3 on layer 2.
"""
raise NotImplementedError
def save_pretrained(self, save_directory, saved_model=False, version=1, push_to_hub=False, **kwargs):
"""
Save a model and its configuration file to a directory, so that it can be re-loaded using the
[`~TFPreTrainedModel.from_pretrained`] class method.
Arguments:
save_directory (`str`):
Directory to which to save. Will be created if it doesn't exist.
saved_model (`bool`, *optional*, defaults to `False`):
If the model has to be saved in saved model format as well or not.
version (`int`, *optional*, defaults to 1):
The version of the saved model. A saved model needs to be versioned in order to be properly loaded by
TensorFlow Serving as detailed in the official documentation
https://www.tensorflow.org/tfx/serving/serving_basic
push_to_hub (`bool`, *optional*, defaults to `False`):
Whether or not to push your model to the Hugging Face model hub after saving it.
<Tip warning={true}>
Using `push_to_hub=True` will synchronize the repository you are pushing to with `save_directory`,
which requires `save_directory` to be a local clone of the repo you are pushing to if it's an existing
folder. Pass along `temp_dir=True` to use a temporary directory instead.
</Tip>
kwargs:
Additional key word arguments passed along to the [`~utils.PushToHubMixin.push_to_hub`] method.
"""
if os.path.isfile(save_directory):
logger.error(f"Provided path ({save_directory}) should be a directory, not a file")
return
if push_to_hub:
commit_message = kwargs.pop("commit_message", None)
repo = self._create_or_get_repo(save_directory, **kwargs)
os.makedirs(save_directory, exist_ok=True)
if saved_model:
saved_model_dir = os.path.join(save_directory, "saved_model", str(version))
self.save(saved_model_dir, include_optimizer=False, signatures=self.serving)
logger.info(f"Saved model created in {saved_model_dir}")
# Save configuration file
self.config.architectures = [self.__class__.__name__[2:]]
# If we have a custom model, we copy the file defining it in the folder and set the attributes so it can be
# loaded from the Hub.
if self._auto_class is not None:
custom_object_save(self, save_directory, config=self.config)
self.config.save_pretrained(save_directory)
# If we save using the predefined names, we can load using `from_pretrained`
output_model_file = os.path.join(save_directory, TF2_WEIGHTS_NAME)
self.save_weights(output_model_file)
logger.info(f"Model weights saved in {output_model_file}")
if push_to_hub:
url = self._push_to_hub(repo, commit_message=commit_message)
logger.info(f"Model pushed to the hub in this commit: {url}")
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
r"""
Instantiate a pretrained TF 2.0 model from a pre-trained model configuration.
The warning *Weights from XXX not initialized from pretrained model* means that the weights of XXX do not come
pretrained with the rest of the model. It is up to you to train those weights with a downstream fine-tuning
task.
The warning *Weights from XXX not used in YYY* means that the layer XXX is not used by YYY, therefore those
weights are discarded.
Parameters:
pretrained_model_name_or_path (`str`, *optional*):
Can be either:
- A string, the *model id* of a pretrained model hosted inside a model repo on huggingface.co.
Valid model ids can be located at the root-level, like `bert-base-uncased`, or namespaced under a
user or organization name, like `dbmdz/bert-base-german-cased`.
- A path to a *directory* containing model weights saved using
[`~TFPreTrainedModel.save_pretrained`], e.g., `./my_model_directory/`.
- A path or url to a *PyTorch state_dict save file* (e.g, `./pt_model/pytorch_model.bin`). In this
case, `from_pt` should be set to `True` and a configuration object should be provided as `config`
argument. This loading path is slower than converting the PyTorch model in a TensorFlow model
using the provided conversion scripts and loading the TensorFlow model afterwards.
- `None` if you are both providing the configuration and state dictionary (resp. with keyword
arguments `config` and `state_dict`).
model_args (sequence of positional arguments, *optional*):
All remaining positional arguments will be passed to the underlying model's `__init__` method.
config (`Union[PretrainedConfig, str]`, *optional*):
Can be either:
- an instance of a class derived from [`PretrainedConfig`],
- a string valid as input to [`~PretrainedConfig.from_pretrained`].
Configuration for the model to use instead of an automatically loaded configuration. Configuration can
be automatically loaded when:
- The model is a model provided by the library (loaded with the *model id* string of a pretrained
model).
- The model was saved using [`~TFPreTrainedModel.save_pretrained`] and is reloaded by supplying the
save directory.
- The model is loaded by supplying a local directory as `pretrained_model_name_or_path` and a
configuration JSON file named *config.json* is found in the directory.
from_pt: (`bool`, *optional*, defaults to `False`):
Load the model weights from a PyTorch state_dict save file (see docstring of
`pretrained_model_name_or_path` argument).
ignore_mismatched_sizes (`bool`, *optional*, defaults to `False`):
Whether or not to raise an error if some of the weights from the checkpoint do not have the same size
as the weights of the model (if for instance, you are instantiating a model with 10 labels from a
checkpoint with 3 labels).
cache_dir (`str`, *optional*):
Path to a directory in which a downloaded pretrained model configuration should be cached if the
standard cache should not be used.
force_download (`bool`, *optional*, defaults to `False`):
Whether or not to force the (re-)download of the model weights and configuration files, overriding the
cached versions if they exist.
resume_download (`bool`, *optional*, defaults to `False`):
Whether or not to delete incompletely received files. Will attempt to resume the download if such a
file exists.
proxies:
(`Dict[str, str], `optional`): A dictionary of proxy servers to use by protocol or endpoint, e.g.,
`{'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.
output_loading_info(`bool`, *optional*, defaults to `False`): Whether ot not to also return a
dictionary containing missing keys, unexpected keys and error messages.
local_files_only(`bool`, *optional*, defaults to `False`):
Whether or not to only look at local files (e.g., not try doanloading the model).
use_auth_token (`str` or *bool*, *optional*):
The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated
when running `transformers-cli login` (stored in `~/.huggingface`).
revision (`str`, *optional*, defaults to `"main"`):
The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a
git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any
identifier allowed by git.
mirror (`str`, *optional*):
Mirror source to accelerate downloads in China. If you are from China and have an accessibility
problem, you can set this option to resolve it. Note that we do not guarantee the timeliness or safety.
Please refer to the mirror site for more information.
kwargs (remaining dictionary of keyword arguments, *optional*):
Can be used to update the configuration object (after it being loaded) and initiate the model (e.g.,
`output_attentions=True`). Behaves differently depending on whether a `config` is provided or
automatically loaded:
- If a configuration is provided with `config`, `**kwargs` will be directly passed to the
underlying model's `__init__` method (we assume all relevant updates to the configuration have
already been done)
- If a configuration is not provided, `kwargs` will be first passed to the configuration class
initialization function ([`~PretrainedConfig.from_pretrained`]). Each key of `kwargs` that
corresponds to a configuration attribute will be used to override said attribute with the
supplied `kwargs` value. Remaining keys that do not correspond to any configuration attribute
will be passed to the underlying model's `__init__` function.
<Tip>
Passing `use_auth_token=True` is required when you want to use a private model.
</Tip>
Examples:
```python
>>> from transformers import BertConfig, TFBertModel
>>> # Download model and configuration from huggingface.co and cache.
>>> model = TFBertModel.from_pretrained("bert-base-uncased")
>>> # Model was saved using *save_pretrained('./test/saved_model/')* (for example purposes, not runnable).
>>> model = TFBertModel.from_pretrained("./test/saved_model/")
>>> # Update configuration during loading.
>>> model = TFBertModel.from_pretrained("bert-base-uncased", output_attentions=True)
>>> assert model.config.output_attentions == True
>>> # Loading from a Pytorch model file instead of a TensorFlow checkpoint (slower, for example purposes, not runnable).
>>> config = BertConfig.from_json_file("./pt_model/my_pt_model_config.json")
>>> model = TFBertModel.from_pretrained("./pt_model/my_pytorch_model.bin", from_pt=True, config=config)
```"""
config = kwargs.pop("config", None)
cache_dir = kwargs.pop("cache_dir", None)
from_pt = kwargs.pop("from_pt", False)
ignore_mismatched_sizes = kwargs.pop("ignore_mismatched_sizes", False)
force_download = kwargs.pop("force_download", False)
resume_download = kwargs.pop("resume_download", False)
proxies = kwargs.pop("proxies", None)
output_loading_info = kwargs.pop("output_loading_info", False)
local_files_only = kwargs.pop("local_files_only", False)
use_auth_token = kwargs.pop("use_auth_token", None)
revision = kwargs.pop("revision", None)
mirror = kwargs.pop("mirror", None)
load_weight_prefix = kwargs.pop("load_weight_prefix", None)
from_pipeline = kwargs.pop("_from_pipeline", None)
from_auto_class = kwargs.pop("_from_auto", False)
user_agent = {"file_type": "model", "framework": "tensorflow", "from_auto_class": from_auto_class}
if from_pipeline is not None:
user_agent["using_pipeline"] = from_pipeline
if is_offline_mode() and not local_files_only:
logger.info("Offline mode: forcing local_files_only=True")
local_files_only = True
# Load config if we don't provide a configuration
if not isinstance(config, PretrainedConfig):
config_path = config if config is not None else pretrained_model_name_or_path
config, model_kwargs = cls.config_class.from_pretrained(
config_path,
cache_dir=cache_dir,
return_unused_kwargs=True,
force_download=force_download,
resume_download=resume_download,
proxies=proxies,
local_files_only=local_files_only,
use_auth_token=use_auth_token,
revision=revision,
_from_auto=from_auto_class,
_from_pipeline=from_pipeline,
**kwargs,
)
else:
model_kwargs = kwargs
# Load model
if pretrained_model_name_or_path is not None:
if os.path.isdir(pretrained_model_name_or_path):
if from_pt and os.path.isfile(os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME)):
# Load from a PyTorch checkpoint in priority if from_pt
archive_file = os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME)
elif os.path.isfile(os.path.join(pretrained_model_name_or_path, TF2_WEIGHTS_NAME)):
# Load from a TF 2.0 checkpoint
archive_file = os.path.join(pretrained_model_name_or_path, TF2_WEIGHTS_NAME)
# At this stage we don't have a weight file so we will raise an error.
elif os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME):
raise EnvironmentError(
f"Error no file named {TF2_WEIGHTS_NAME} found in directory {pretrained_model_name_or_path} "
"but there is a file for PyTorch weights. Use `from_pt=True` to load this model from those "
"weights."
)
else:
raise EnvironmentError(
f"Error no file named {TF2_WEIGHTS_NAME} or {WEIGHTS_NAME} found in directory "
f"{pretrained_model_name_or_path}."
)
elif os.path.isfile(pretrained_model_name_or_path) or is_remote_url(pretrained_model_name_or_path):
archive_file = pretrained_model_name_or_path
elif os.path.isfile(pretrained_model_name_or_path + ".index"):
archive_file = pretrained_model_name_or_path + ".index"
else:
filename = WEIGHTS_NAME if from_pt else TF2_WEIGHTS_NAME
archive_file = hf_bucket_url(
pretrained_model_name_or_path,
filename=filename,
revision=revision,
mirror=mirror,
)
try:
# Load from URL or cache if already cached
resolved_archive_file = cached_path(
archive_file,
cache_dir=cache_dir,
force_download=force_download,
proxies=proxies,
resume_download=resume_download,
local_files_only=local_files_only,
use_auth_token=use_auth_token,
user_agent=user_agent,
)
except RepositoryNotFoundError:
raise EnvironmentError(
f"{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier "
"listed on 'https://huggingface.co/models'\nIf this is a private repository, make sure to pass a "
"token having permission to this repo with `use_auth_token` or log in with `huggingface-cli "
"login` and pass `use_auth_token=True`."
)
except RevisionNotFoundError:
raise EnvironmentError(
f"{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for "
"this model name. Check the model page at "
f"'https://huggingface.co/{pretrained_model_name_or_path}' for available revisions."
)
except EntryNotFoundError:
if filename == TF2_WEIGHTS_NAME:
has_file_kwargs = {
"revision": revision,
"mirror": mirror,
"proxies": proxies,
"use_auth_token": use_auth_token,
}
if has_file(pretrained_model_name_or_path, WEIGHTS_NAME, **has_file_kwargs):
raise EnvironmentError(
f"{pretrained_model_name_or_path} does not appear to have a file named {TF2_WEIGHTS_NAME} "
"but there is a file for PyTorch weights. Use `from_pt=True` to load this model from "
"those weights."
)
else:
raise EnvironmentError(
f"{pretrained_model_name_or_path} does not appear to have a file named {TF2_WEIGHTS_NAME} "
f"or {WEIGHTS_NAME}."
)
else:
raise EnvironmentError(
f"{pretrained_model_name_or_path} does not appear to have a file named {filename}."
)
except HTTPError as err:
raise EnvironmentError(
f"There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n"
f"{err}"
)
except ValueError:
raise EnvironmentError(
f"We couldn't connect to '{HUGGINGFACE_CO_RESOLVE_ENDPOINT}' to load this model, couldn't find it"
f" in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a"
f" directory containing a file named {TF2_WEIGHTS_NAME} or {WEIGHTS_NAME}.\nCheckout your internet"
" connection or see how to run the library in offline mode at"
" 'https://huggingface.co/docs/transformers/installation#offline-mode'."
)
except EnvironmentError:
raise EnvironmentError(
f"Can't load the model for '{pretrained_model_name_or_path}'. If you were trying to load it from "
"'https://huggingface.co/models', make sure you don't have a local directory with the same name. "
f"Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a directory "
f"containing a file named {TF2_WEIGHTS_NAME} or {WEIGHTS_NAME}."
)
if resolved_archive_file == archive_file:
logger.info(f"loading weights file {archive_file}")
else:
logger.info(f"loading weights file {archive_file} from cache at {resolved_archive_file}")
else:
resolved_archive_file = None
config.name_or_path = pretrained_model_name_or_path
# composed models, *e.g.* TFRag, require special treatment when it comes to loading
# pre-trained weights.
if cls._requires_load_weight_prefix and model_kwargs.get("name") is not None:
model_kwargs["load_weight_prefix"] = load_weight_prefix + "/" + model_kwargs.get("name")
# Instantiate model.
model = cls(config, *model_args, **model_kwargs)
if from_pt:
from .modeling_tf_pytorch_utils import load_pytorch_checkpoint_in_tf2_model
# Load from a PyTorch checkpoint
return load_pytorch_checkpoint_in_tf2_model(model, resolved_archive_file, allow_missing_keys=True)
# we might need to extend the variable scope for composite models
if load_weight_prefix is not None:
with tf.compat.v1.variable_scope(load_weight_prefix):
model(model.dummy_inputs) # build the network with dummy inputs
else:
model(model.dummy_inputs) # build the network with dummy inputs
assert os.path.isfile(resolved_archive_file), f"Error retrieving file {resolved_archive_file}"
# 'by_name' allow us to do transfer learning by skipping/adding layers
# see https://github.com/tensorflow/tensorflow/blob/00fad90125b18b80fe054de1055770cfb8fe4ba3/tensorflow/python/keras/engine/network.py#L1339-L1357
try:
missing_keys, unexpected_keys, mismatched_keys = load_tf_weights(
model,
resolved_archive_file,
ignore_mismatched_sizes=ignore_mismatched_sizes,
_prefix=load_weight_prefix,
)
except OSError as e:
try:
with open(resolved_archive_file) as f:
if f.read().startswith("version"):
raise OSError(
"You seem to have cloned a repository without having git-lfs installed. Please install "
"git-lfs and run `git lfs install` followed by `git lfs pull` in the folder "
"you cloned."
)
else:
raise ValueError from e
except (UnicodeDecodeError, ValueError):
raise OSError(
"Unable to load weights from h5 file. "
"If you tried to load a TF 2.0 model from a PyTorch checkpoint, please set from_pt=True. "
)
model(model.dummy_inputs) # Make sure restore ops are run
if cls._keys_to_ignore_on_load_missing is not None:
for pat in cls._keys_to_ignore_on_load_missing:
missing_keys = [k for k in missing_keys if re.search(pat, k) is None]
if cls._keys_to_ignore_on_load_unexpected is not None:
for pat in cls._keys_to_ignore_on_load_unexpected:
unexpected_keys = [k for k in unexpected_keys if re.search(pat, k) is None]
if len(unexpected_keys) > 0:
logger.warning(
f"Some layers from the model checkpoint at {pretrained_model_name_or_path} were not used when"
f" initializing {model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are"
f" initializing {model.__class__.__name__} from the checkpoint of a model trained on another task or"
" with another architecture (e.g. initializing a BertForSequenceClassification model from a"
" BertForPreTraining model).\n- This IS NOT expected if you are initializing"
f" {model.__class__.__name__} from the checkpoint of a model that you expect to be exactly identical"
" (initializing a BertForSequenceClassification model from a BertForSequenceClassification model)."
)
else:
logger.warning(f"All model checkpoint layers were used when initializing {model.__class__.__name__}.\n")
if len(missing_keys) > 0:
logger.warning(
f"Some layers of {model.__class__.__name__} were not initialized from the model checkpoint at"
f" {pretrained_model_name_or_path} and are newly initialized: {missing_keys}\nYou should probably"
" TRAIN this model on a down-stream task to be able to use it for predictions and inference."
)
elif len(mismatched_keys) == 0:
logger.warning(
f"All the layers of {model.__class__.__name__} were initialized from the model checkpoint at"
f" {pretrained_model_name_or_path}.\nIf your task is similar to the task the model of the checkpoint"
f" was trained on, you can already use {model.__class__.__name__} for predictions without further"
" training."
)
if len(mismatched_keys) > 0:
mismatched_warning = "\n".join(
[
f"- {key}: found shape {shape1} in the checkpoint and {shape2} in the model instantiated"
for key, shape1, shape2 in mismatched_keys
]
)
logger.warning(
f"Some weights of {model.__class__.__name__} were not initialized from the model checkpoint at"
f" {pretrained_model_name_or_path} and are newly initialized because the shapes did not"
f" match:\n{mismatched_warning}\nYou should probably TRAIN this model on a down-stream task to be able"
" to use it for predictions and inference."
)
if output_loading_info:
loading_info = {
"missing_keys": missing_keys,
"unexpected_keys": unexpected_keys,
"mismatched_keys": mismatched_keys,
}
return model, loading_info
return model
# To update the docstring, we need to copy the method, otherwise we change the original docstring.
TFPreTrainedModel.push_to_hub = copy_func(TFPreTrainedModel.push_to_hub)
TFPreTrainedModel.push_to_hub.__doc__ = TFPreTrainedModel.push_to_hub.__doc__.format(
object="model", object_class="TFAutoModel", object_files="model checkpoint"
)
class TFConv1D(tf.keras.layers.Layer):
"""
1D-convolutional layer as defined by Radford et al. for OpenAI GPT (and also used in GPT-2).
Basically works like a linear layer but the weights are transposed.
Args:
nf (`int`):
The number of output features.
nx (`int`):
The number of input features.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation to use to initialize the weights.
kwargs:
Additional keyword arguments passed along to the `__init__` of `tf.keras.layers.Layer`.
"""
def __init__(self, nf, nx, initializer_range=0.02, **kwargs):
super().__init__(**kwargs)
self.nf = nf
self.nx = nx
self.initializer_range = initializer_range
def build(self, input_shape):
self.weight = self.add_weight(
"weight", shape=[self.nx, self.nf], initializer=get_initializer(self.initializer_range)
)
self.bias = self.add_weight("bias", shape=[1, self.nf], initializer=tf.zeros_initializer())
def call(self, x):
bz, sl = shape_list(x)[:2]
x = tf.reshape(x, [-1, self.nx])
x = tf.matmul(x, self.weight) + self.bias
x = tf.reshape(x, [bz, sl, self.nf])
return x
class TFSharedEmbeddings(tf.keras.layers.Layer):
r"""
Construct shared token embeddings.
The weights of the embedding layer is usually shared with the weights of the linear decoder when doing language
modeling.
Args:
vocab_size (`int`):
The size of the vocabulary, e.g., the number of unique tokens.
hidden_size (`int`):
The size of the embedding vectors.
initializer_range (`float`, *optional*):
The standard deviation to use when initializing the weights. If no value is provided, it will default to
\\(1/\sqrt{hidden\_size}\\).
kwargs:
Additional keyword arguments passed along to the `__init__` of `tf.keras.layers.Layer`.
"""
def __init__(self, vocab_size: int, hidden_size: int, initializer_range: Optional[float] = None, **kwargs):
super().__init__(**kwargs)
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.initializer_range = hidden_size**-0.5 if initializer_range is None else initializer_range
def build(self, input_shape):
"""
Build shared token embedding layer Shared weights logic adapted from
https://github.com/tensorflow/models/blob/a009f4fb9d2fc4949e32192a944688925ef78659/official/transformer/v2/embedding_layer.py#L24
"""
self.weight = self.add_weight(
"weight", shape=[self.vocab_size, self.hidden_size], initializer=get_initializer(self.initializer_range)
)
super().build(input_shape)
def get_config(self):
config = {
"vocab_size": self.vocab_size,
"hidden_size": self.hidden_size,
"initializer_range": self.initializer_range,
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
def call(self, inputs: tf.Tensor, mode: str = "embedding") -> tf.Tensor:
"""
Get token embeddings of inputs or decode final hidden state.
Args:
inputs (`tf.Tensor`):
In embedding mode, should be an int64 tensor with shape `[batch_size, length]`.
In linear mode, should be a float tensor with shape `[batch_size, length, hidden_size]`.
mode (`str`, defaults to `"embedding"`):
A valid value is either `"embedding"` or `"linear"`, the first one indicates that the layer should be
used as an embedding layer, the second one that the layer should be used as a linear decoder.
Returns:
`tf.Tensor`: In embedding mode, the output is a float32 embedding tensor, with shape `[batch_size, length,
embedding_size]`.
In linear mode, the output is a float32 with shape `[batch_size, length, vocab_size]`.
Raises:
ValueError: if `mode` is not valid.
Shared weights logic is adapted from
[here](https://github.com/tensorflow/models/blob/a009f4fb9d2fc4949e32192a944688925ef78659/official/transformer/v2/embedding_layer.py#L24).
"""
if mode == "embedding":
return self._embedding(inputs)
elif mode == "linear":
return self._linear(inputs)
else:
raise ValueError(f"mode {mode} is not valid.")
def _embedding(self, input_ids):
"""Applies embedding based on inputs tensor."""
return tf.gather(self.weight, input_ids)
def _linear(self, inputs):
"""
Computes logits by running inputs through a linear layer.
Args:
inputs: A float32 tensor with shape [..., hidden_size]
Returns:
float32 tensor with shape [..., vocab_size].
"""
first_dims = shape_list(inputs)[:-1]
x = tf.reshape(inputs, [-1, self.hidden_size])
logits = tf.matmul(x, self.weight, transpose_b=True)
return tf.reshape(logits, first_dims + [self.vocab_size])
class TFSequenceSummary(tf.keras.layers.Layer):
"""
Compute a single vector summary of a sequence hidden states.
Args:
config ([`PretrainedConfig`]):
The config used by the model. Relevant arguments in the config class of the model are (refer to the actual
config class of your model for the default values it uses):
- **summary_type** (`str`) -- The method to use to make this summary. Accepted values are:
- `"last"` -- Take the last token hidden state (like XLNet)
- `"first"` -- Take the first token hidden state (like Bert)
- `"mean"` -- Take the mean of all tokens hidden states
- `"cls_index"` -- Supply a Tensor of classification token position (GPT/GPT-2)
- `"attn"` -- Not implemented now, use multi-head attention
- **summary_use_proj** (`bool`) -- Add a projection after the vector extraction.
- **summary_proj_to_labels** (`bool`) -- If `True`, the projection outputs to `config.num_labels` classes
(otherwise to `config.hidden_size`).
- **summary_activation** (`Optional[str]`) -- Set to `"tanh"` to add a tanh activation to the output,
another string or `None` will add no activation.
- **summary_first_dropout** (`float`) -- Optional dropout probability before the projection and activation.
- **summary_last_dropout** (`float`)-- Optional dropout probability after the projection and activation.
initializer_range (`float`, defaults to 0.02): The standard deviation to use to initialize the weights.
kwargs:
Additional keyword arguments passed along to the `__init__` of `tf.keras.layers.Layer`.
"""
def __init__(self, config: PretrainedConfig, initializer_range: float = 0.02, **kwargs):
super().__init__(**kwargs)
self.summary_type = config.summary_type if hasattr(config, "summary_use_proj") else "last"
if self.summary_type == "attn":
# We should use a standard multi-head attention module with absolute positional embedding for that.
# Cf. https://github.com/zihangdai/xlnet/blob/master/modeling.py#L253-L276
# We can probably just use the multi-head attention module of PyTorch >=1.1.0
raise NotImplementedError
self.has_summary = hasattr(config, "summary_use_proj") and config.summary_use_proj
if self.has_summary:
if hasattr(config, "summary_proj_to_labels") and config.summary_proj_to_labels and config.num_labels > 0:
num_classes = config.num_labels
else:
num_classes = config.hidden_size
self.summary = tf.keras.layers.Dense(
num_classes, kernel_initializer=get_initializer(initializer_range), name="summary"
)
self.has_activation = False
activation_string = getattr(config, "summary_activation", None)
if activation_string is not None:
self.has_activation = True
self.activation = get_tf_activation(activation_string)
self.has_first_dropout = hasattr(config, "summary_first_dropout") and config.summary_first_dropout > 0
if self.has_first_dropout:
self.first_dropout = tf.keras.layers.Dropout(config.summary_first_dropout)
self.has_last_dropout = hasattr(config, "summary_last_dropout") and config.summary_last_dropout > 0
if self.has_last_dropout:
self.last_dropout = tf.keras.layers.Dropout(config.summary_last_dropout)
def call(self, inputs, cls_index=None, training=False):
if not isinstance(inputs, (dict, tuple, list)):
hidden_states = inputs
elif isinstance(inputs, (tuple, list)):
hidden_states = inputs[0]
cls_index = inputs[1] if len(inputs) > 1 else None
assert len(inputs) <= 2, "Too many inputs."
else:
hidden_states = inputs.get("hidden_states")
cls_index = inputs.get("cls_index", None)
if self.summary_type == "last":
output = hidden_states[:, -1]
elif self.summary_type == "first":
output = hidden_states[:, 0]
elif self.summary_type == "mean":
output = tf.reduce_mean(hidden_states, axis=1)
elif self.summary_type == "cls_index":
hidden_shape = shape_list(hidden_states) # e.g. [batch, num choices, seq length, hidden dims]
if cls_index is None:
cls_index = tf.fill(
hidden_shape[:-2], hidden_shape[-2] - 1
) # A tensor full of shape [batch] or [batch, num choices] full of sequence length
cls_shape = shape_list(cls_index)
if len(cls_shape) <= len(hidden_shape) - 2:
cls_index = tf.expand_dims(cls_index, axis=-1)
# else:
# cls_index = cls_index[..., tf.newaxis]
# cls_index = cls_index.expand((-1,) * (cls_index.dim()-1) + (hidden_states.size(-1),))
# shape of cls_index: (bsz, XX, 1, hidden_size) where XX are optional leading dim of hidden_states
output = tf.gather(hidden_states, cls_index, batch_dims=len(hidden_shape) - 2)
output = tf.squeeze(
output, axis=len(hidden_shape) - 2
) # shape of output: (batch, num choices, hidden_size)
elif self.summary_type == "attn":
raise NotImplementedError
if self.has_first_dropout:
output = self.first_dropout(output, training=training)
if self.has_summary:
output = self.summary(output)
if self.has_activation:
output = self.activation(output)
if self.has_last_dropout:
output = self.last_dropout(output, training=training)
return output
@classmethod
def register_for_auto_class(cls, auto_class="TFAutoModel"):
"""
Register this class with a given auto class. This should only be used for custom models as the ones in the
library are already mapped with an auto class.
<Tip warning={true}>
This API is experimental and may have some slight breaking changes in the next releases.
</Tip>
Args:
auto_class (`str` or `type`, *optional*, defaults to `"TFAutoModel"`):
The auto class to register this new model with.
"""
if not isinstance(auto_class, str):
auto_class = auto_class.__name__
import transformers.models.auto as auto_module
if not hasattr(auto_module, auto_class):
raise ValueError(f"{auto_class} is not a valid auto class.")
cls._auto_class = auto_class
def get_initializer(initializer_range: float = 0.02) -> tf.initializers.TruncatedNormal:
"""
Creates a `tf.initializers.TruncatedNormal` with the given range.
Args:
initializer_range (*float*, defaults to 0.02): Standard deviation of the initializer range.
Returns:
`tf.initializers.TruncatedNormal`: The truncated normal initializer.
"""
return tf.keras.initializers.TruncatedNormal(stddev=initializer_range)
class TFWrappedEmbeddings:
"""
this class wraps a the TFSharedEmbeddingTokens layer into a python 'no-keras-layer' class to avoid problem with
weight restoring. Also it makes sure that the layer is called from the correct scope to avoid problem with
saving/storing the correct weights
"""
def __init__(self, layer, abs_scope_name=None):
self._layer = layer
self._abs_scope_name = abs_scope_name
def call(self, inputs, mode="embedding"):
if self._abs_scope_name is None:
return self._layer.call(inputs, mode)
# if an abs scope name is given to the embedding variable, call variable from absolute scope
with tf.compat.v1.variable_scope(self._abs_scope_name, auxiliary_name_scope=False) as abs_scope_name:
with tf.name_scope(abs_scope_name.original_name_scope):
return self._layer.call(inputs, mode)
def __call__(self, inputs, mode="embedding"):
if self._abs_scope_name is None:
return self._layer(inputs, mode)
# if an abs scope name is given to the embedding variable, call variable from absolute scope
with tf.compat.v1.variable_scope(self._abs_scope_name, auxiliary_name_scope=False) as abs_scope_name:
with tf.name_scope(abs_scope_name.original_name_scope):
return self._layer(inputs, mode)
| 45.500417 | 154 | 0.626065 |
a1a4e53c32935973689cb31088aa0d9439fe4c9e
| 13,390 |
py
|
Python
|
game/engine.py
|
HagenSR/byte_le_royale_2022
|
d501bf2418337d543dac982112ea924d37164205
|
[
"MIT"
] | 1 |
2022-03-10T01:38:12.000Z
|
2022-03-10T01:38:12.000Z
|
game/engine.py
|
HagenSR/byte_le_royale_2022
|
d501bf2418337d543dac982112ea924d37164205
|
[
"MIT"
] | 2 |
2022-01-31T18:28:14.000Z
|
2022-01-31T18:28:24.000Z
|
game/engine.py
|
HagenSR/byte_le_royale_2022
|
d501bf2418337d543dac982112ea924d37164205
|
[
"MIT"
] | null | null | null |
from datetime import datetime
from game.common.stats import GameStats
from game.common.moving.shooter import Shooter
import importlib
import json
import os
import sys
import traceback
from game.common.player import Player
from game.common.game_board import GameBoard
from game.common.hitbox import Hitbox
from game.config import *
from game.controllers.master_controller import MasterController
from game.utils.helpers import write_json_file
from game.utils.engine_thread import Thread, CommunicationThread
from game.utils.validation import verify_code, verify_num_clients
from tqdm import tqdm
class Engine:
def __init__(self, quiet_mode=False, use_filenames_as_team_names=False):
self.clients = list()
self.master_controller = MasterController()
self.tick_number = 0
self.game_logs = dict()
self.world = dict()
self.current_world_key = None
self.quiet_mode = quiet_mode
self.use_filenames = use_filenames_as_team_names
# Delete logs, then recreate logs dir
for file in os.scandir(LOGS_DIR):
if ('map' not in file.path):
os.remove(file.path)
# Starting point of the engine. Runs other methods then sits on top of a
# basic game loop until over
def loop(self):
# If quiet mode is activated, replace stdout with devnull
error = ""
try:
f = sys.stdout
if self.quiet_mode:
f = open(os.devnull, 'w')
sys.stdout = f
self.boot()
self.load()
for self.current_world_key in tqdm(
self.master_controller.game_loop_logic(),
bar_format=TQDM_BAR_FORMAT,
unit=TQDM_UNITS,
file=f):
self.pre_tick()
self.tick()
self.post_tick()
if self.tick_number >= MAX_TICKS:
break
except Exception as e:
print("Exception raised during runtime: " + str(e))
finally:
self.shutdown()
# Finds, checks, and instantiates clients
def boot(self):
# Insert path of where clients are expected to be inside where python
# will look
current_dir = os.getcwd()
sys.path.insert(0, current_dir)
sys.path.insert(0, f'{current_dir}/{CLIENT_DIRECTORY}')
# Find and load clients in
for filename in os.listdir(CLIENT_DIRECTORY):
try:
filename = filename.replace('.py', '')
# Filter out files that do not contain CLIENT_KEYWORD in their
# filename (located in config)
if CLIENT_KEYWORD.upper() not in filename.upper():
continue
# Filter out folders
if os.path.isdir(os.path.join(CLIENT_DIRECTORY, filename)):
continue
# Otherwise, instantiate the player
# Add players one and two
player = Player()
self.clients.append(player)
# Verify client isn't using invalid imports or opening anything
imports, opening, printing = verify_code(filename + '.py')
if len(imports) != 0:
player.functional = False
player.error = f'Player has attempted illegal imports: {imports}'
if opening:
player.functional = False
player.error = PermissionError(
f'Player is using "open" which is forbidden.')
# Attempt creation of the client object
obj = None
try:
# Import client's code
im = importlib.import_module(f'{filename}', CLIENT_DIRECTORY)
obj = im.Client()
except Exception:
player.functional = False
player.error = str(traceback.format_exc())
player.code = obj
thr = None
try:
# Retrieve team name
thr = CommunicationThread(player.code.team_name, list(), str)
thr.start()
thr.join(0.01) # Shouldn't take long to get a string
if thr.is_alive():
player.functional = False
player.error = TimeoutError(
'Client failed to provide a team name in time.')
if thr.error is not None:
player.functional = False
player.error = thr.error
finally:
# Note: I keep the above thread for both naming conventions to check for client errors
try:
if self.use_filenames:
player.team_name = filename
thr.retrieve_value()
else:
player.team_name = thr.retrieve_value()
except Exception as e:
player.functional = False
player.error = str(e)
except Exception:
print(f"Bad client for {filename}")
self.clients.sort(key=lambda clnt: clnt.team_name, reverse=True)
# Verify correct number of clients have connected to start
func_clients = [client for client in self.clients if client.functional]
client_num_correct = verify_num_clients(func_clients,
SET_NUMBER_OF_CLIENTS_START,
MIN_CLIENTS_START,
MAX_CLIENTS_START)
if client_num_correct is not None:
self.shutdown(source='Client_error')
# Finally, request master controller to establish clients with basic
# objects
if SET_NUMBER_OF_CLIENTS_START == 1:
self.master_controller.give_clients_objects(self.clients[0])
else:
self.master_controller.give_clients_objects(self.clients)
# Loads in the world
def load(self):
# Verify the log directory exists
if not os.path.exists(LOGS_DIR):
raise FileNotFoundError('Log directory not found.')
# Verify the game map exists
if not os.path.exists(GAME_MAP_FILE):
raise FileNotFoundError('Game map not found.')
# Delete previous logs
if os.path.exists(LOGS_FILE):
os.remove(LOGS_FILE)
with open(GAME_MAP_FILE) as json_file:
world = json.load(json_file)
# Yes, this is a bit ugly. Load game map json to game map object
gameBoard = GameBoard()
game_map = gameBoard.from_json(world['game_map'])
# add game map object to dictionary
world.pop("game_map", None)
self.world["game_map"] = game_map
self.world['seed'] = world['seed']
# attach shooters to the game map
for client in self.clients:
self.world["game_map"].partition.add_object(client.shooter)
# Sits on top of all actions that need to happen before the player takes
# their turn
def pre_tick(self):
# Increment the tick
self.tick_number += 1
# game map isn't tick based, only need the previous game map to persist
# Retrieve current world info
# if self.current_world_key not in self.world:
# raise KeyError('Given generated world key does not exist inside the world.')
# current_world = self.world['game_map']
# Send current world information to master controller for purposes
if SET_NUMBER_OF_CLIENTS_START == 1:
self.master_controller.interpret_current_turn_data(
self.clients[0], self.world, self.tick_number)
else:
self.master_controller.interpret_current_turn_data(
self.clients, self.world, self.tick_number)
# Does actions like lets the player take their turn and asks master
# controller to perform game logic
def tick(self):
# Create list of threads to run client's code
threads = list()
for client in self.clients:
# Skip non-functional clients
if not client.functional:
continue
# Retrieve list of arguments to pass
arguments = self.master_controller.client_turn_arguments(
client, self.tick_number)
# Create the thread, pass the arguments
thr = Thread(func=client.code.take_turn, args=arguments)
threads.append(thr)
# Start all threads
[thr.start() for thr in threads]
# Time and wait for clients to be done
start_time = datetime.now()
for thr in threads:
# We only want to wait a maximum of MAX_SECONDS_PER_TURN once all of the clients have started.
# However, we can't simultaneously join threads without more threads or multiprocessing.
# Solution: join one thread at a time, keep track of total running time between each join, and reduce the
# join time so it is always less than MAX_SECONDS_PER_TURN.
# Get time elapsed in microseconds
time_elapsed = datetime.now().microsecond - start_time.microsecond
# Convert to seconds
time_elapsed /= 1000000
# Subtract value from MAX_SECONDS_PER_TURN to get time remaining
time_remaining = MAX_SECONDS_PER_TURN - time_elapsed
# Ensure value never goes negative
time_remaining = max(0.0, time_remaining)
thr.join(time_remaining)
# Go through each thread and check if they are still alive
for client, thr in zip(self.clients, threads):
# If thread is no longer alive, mark it as non-functional,
# preventing it from receiving future turns
if thr.is_alive():
client.functional = False
client.error = str(TimeoutError(
f'{client.id} failed to reply in time and has been dropped.'))
print(client.error)
# Also check to see if the client had created an error and save it
if thr.error is not None:
client.functional = False
client.error = str(thr.error)
print(thr.error)
# Verify there are enough clients to continue the game
func_clients = [client for client in self.clients if client.functional]
client_num_correct = verify_num_clients(func_clients,
SET_NUMBER_OF_CLIENTS_CONTINUE,
MIN_CLIENTS_CONTINUE,
MAX_CLIENTS_CONTINUE)
if client_num_correct is not None:
self.shutdown(source='Client_error')
# Finally, consult master controller for game logic
if SET_NUMBER_OF_CLIENTS_START == 1:
self.master_controller.turn_logic(
self.clients[0], self.tick_number)
else:
self.master_controller.turn_logic(self.clients, self.tick_number)
# Does any actions that need to happen after the game logic, then creates
# the game log for the turn
def post_tick(self):
# Add logs to logs list
data = None
if SET_NUMBER_OF_CLIENTS_START == 1:
data = self.master_controller.create_turn_log(
self.clients[0], self.tick_number)
else:
data = self.master_controller.create_turn_log(
self.clients, self.tick_number)
# self.game_logs[self.tick_number] = data
with open(os.path.join(LOGS_DIR, f"turn_{self.tick_number:04d}.json"), 'w+') as f:
json.dump(data, f)
# Perform a game over check
if self.master_controller.game_over:
self.shutdown()
# Perform a game over check
if self.master_controller.game_over:
self.shutdown()
# Attempts to safely handle an engine shutdown given any game state
def shutdown(self, source=None):
# Retrieve and write results information
results_information = None
if SET_NUMBER_OF_CLIENTS_START == 1:
results_information = self.master_controller.return_final_results(
self.clients[0], self.tick_number)
else:
results_information = self.master_controller.return_final_results(
self.clients, self.tick_number)
if source:
results_information['reason'] = source
write_json_file(results_information, RESULTS_FILE)
# Exit game
if source:
print(f'\nGame has ended due to {source}.')
# Flush standard out
sys.stdout.flush()
os._exit(1)
else:
print(f'\nGame has successfully ended.')
# Flush standard out
sys.stdout.flush()
os._exit(0)
# Debug print statement
def debug(*args):
if Debug.level >= DebugLevel.engine:
print('Engine: ', end='')
print(*args)
| 38.699422 | 117 | 0.580134 |
573772579f2cf3b6d1ca1a36eb3220259ee0a2a0
| 1,626 |
py
|
Python
|
stellar_sdk/xdr/curve25519_secret.py
|
Shaptic/py-stellar-base
|
f5fa47f4d96f215889d99249fb25c7be002f5cf3
|
[
"Apache-2.0"
] | null | null | null |
stellar_sdk/xdr/curve25519_secret.py
|
Shaptic/py-stellar-base
|
f5fa47f4d96f215889d99249fb25c7be002f5cf3
|
[
"Apache-2.0"
] | 27 |
2022-01-12T10:55:38.000Z
|
2022-03-28T01:38:24.000Z
|
stellar_sdk/xdr/curve25519_secret.py
|
Shaptic/py-stellar-base
|
f5fa47f4d96f215889d99249fb25c7be002f5cf3
|
[
"Apache-2.0"
] | 2 |
2021-12-02T12:42:03.000Z
|
2021-12-07T20:53:10.000Z
|
# This is an automatically generated file.
# DO NOT EDIT or your changes may be overwritten
import base64
from xdrlib import Packer, Unpacker
from ..type_checked import type_checked
from .base import Opaque
__all__ = ["Curve25519Secret"]
@type_checked
class Curve25519Secret:
"""
XDR Source Code::
struct Curve25519Secret
{
opaque key[32];
};
"""
def __init__(
self,
key: bytes,
) -> None:
self.key = key
def pack(self, packer: Packer) -> None:
Opaque(self.key, 32, True).pack(packer)
@classmethod
def unpack(cls, unpacker: Unpacker) -> "Curve25519Secret":
key = Opaque.unpack(unpacker, 32, True)
return cls(
key=key,
)
def to_xdr_bytes(self) -> bytes:
packer = Packer()
self.pack(packer)
return packer.get_buffer()
@classmethod
def from_xdr_bytes(cls, xdr: bytes) -> "Curve25519Secret":
unpacker = Unpacker(xdr)
return cls.unpack(unpacker)
def to_xdr(self) -> str:
xdr_bytes = self.to_xdr_bytes()
return base64.b64encode(xdr_bytes).decode()
@classmethod
def from_xdr(cls, xdr: str) -> "Curve25519Secret":
xdr_bytes = base64.b64decode(xdr.encode())
return cls.from_xdr_bytes(xdr_bytes)
def __eq__(self, other: object):
if not isinstance(other, self.__class__):
return NotImplemented
return self.key == other.key
def __str__(self):
out = [
f"key={self.key}",
]
return f"<Curve25519Secret {[', '.join(out)]}>"
| 23.911765 | 62 | 0.600246 |
258817560f79cf4bb7d739e6607ea83baca1b00b
| 12,999 |
py
|
Python
|
proteus/DiagUtils.py
|
robertsawko/proteus
|
6f1e4c2ca1af85a906b35a5162430006f0343861
|
[
"NASA-1.3"
] | null | null | null |
proteus/DiagUtils.py
|
robertsawko/proteus
|
6f1e4c2ca1af85a906b35a5162430006f0343861
|
[
"NASA-1.3"
] | null | null | null |
proteus/DiagUtils.py
|
robertsawko/proteus
|
6f1e4c2ca1af85a906b35a5162430006f0343861
|
[
"NASA-1.3"
] | null | null | null |
"""
Module for diagnostic utilities
"""
from EGeometry import *
from MeshTools import *
from FemTools import *
from LinearAlgebraTools import *
from LinearSolvers import *
from Transport import *
from Norms import *
from Profiling import logEvent
def L2errorFEMvsAF(analyticalFunction,quadraturePointArray,quadratureWeightArray,
functionValueArray,T=None):
"""
supposed to be L2 norm of error in vector quantity
I think just using dot would cover both scalar and vector case
"""
error=0.0
range_nQuadraturePoints_element = range(quadraturePointArray.shape[1])
for eN in range(quadraturePointArray.shape[0]):
for k in range_nQuadraturePoints_element:
AF = analyticalFunction.uOfXT(quadraturePointArray[eN,k],T)
eloc = functionValueArray[eN,k]-AF
error += numpy.dot(eloc,eloc)*quadratureWeightArray[eN,k]
error = sqrt(abs(error))
return error
def getQuadraturePhysPointsAndWeights(mesh,femSpace,quadrature,verbose=0):
"""
for convenience, hide steps for generating quadrature points and
weights, with Jacobians, on physical mesh based on points and
weights on reference element
returns points array that's nelem x nquadloc x 3
weight array that's nelem x nquadloc
"""
nd = femSpace.referenceFiniteElement.referenceElement.dim
nquad = len(quadrature.points)
qpoints = numpy.zeros((nquad,3),'d')
qweights = numpy.zeros(nquad,'d')
for k,p in enumerate(quadrature.points):
qpoints[k][:] = p
for k,w in enumerate(quadrature.weights):
qweights[k] = w
quadX = numpy.zeros((mesh.nElements_global,nquad,3),'d')
quadW = numpy.zeros((mesh.nElements_global,nquad),'d')
jacTmp = numpy.zeros((mesh.nElements_global,nquad,nd,nd),'d')
jInvTmp = numpy.zeros((mesh.nElements_global,nquad,nd,nd),'d')
detJTmp = numpy.zeros((mesh.nElements_global,nquad),'d')
femSpace.elementMaps.getValues(qpoints,quadX)
femSpace.elementMaps.getJacobianValues(qpoints,jacTmp,
jInvTmp,detJTmp)
for eN in range(mesh.nElements_global):
for k in range(nquad):
quadW[eN,k] = abs(detJTmp[eN,k])*qweights[k]
#end k
#end eN
return quadX,quadW,qpoints,qweights
def getFEMvals(u,xiArray,verbose=0):
"""
for convenience, hide steps for generating finite element solution
at physical points corresponding to reference points held in
xiArray
returns array that's nelem x npointloc
"""
nelems = u.femSpace.elementMaps.mesh.nElements_global
ndofs = u.femSpace.referenceFiniteElement.localFunctionSpace.dim
nploc = xiArray.shape[0]
bvals = numpy.zeros((nelems,nploc,ndofs),'d')
uvals = numpy.zeros((nelems,nploc),'d')
u.femSpace.getBasisValues(xiArray,bvals)
for eN in range(nelems):
for k in range(nploc):
for j in range(ndofs):
J = u.femSpace.dofMap.l2g[eN,j]
uvals[eN,k] += bvals[eN,k,j]*u.dof[J]
logEvent("""getFemValues eN=%d xiArray[%d]= %s
jloc=%d J=%d u.dof[%d]= %g
uvals[%d,%d]= %g
""" % (eN,k,xiArray[k],j,J,J,u.dof[J],eN,k,uvals[eN,k]),level=3)
#end verbose
#end j
#end k
#end eN
return uvals
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
#stuff for running test problems
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
"""
identity tensor in 1d,2d,3d
"""
Ident1 = numpy.ones((1,1),'d')
Ident2 = numpy.zeros((2,2),'d')
for k in range(2):
Ident2[k,k] = 1.0
#end k
Ident3 = numpy.zeros((3,3),'d')
for k in range(3):
Ident3[k,k] = 1.0
#end k
# # # # # # # # # # # # # # # # # # # # # # # # #
#some useful test routines
# # # # # # # # # # # # # # # # # # # # # # # # #
# # # # # # # # # # # # # # # # # # # #
#examples for testing a new finite element space
# # # # # # # # # # # # # # # # # # # # # # # # #
def testCrRavNodalBasis(nd,verbose=0):
"""
test local Crouzeix-Raviart element space
"""
if verbose > -1:
print 'creating CrouzeixRaviartWithNodalBasis space dim= ',nd
#end
#look at values at element barycenter, and face barycenters
npoints = 2 + nd
xiArray = numpy.zeros((npoints,3),'d')
if nd == 1:
xiArray[:,0] = [0.5, 1., 0.]
elif nd == 2:
xiArray[:,0:2] = [[1./3., 1./3.],
[0.5, 0.5],
[0., .5],
[0.5, 0.]]
elif nd == 3:
xiArray[:,:] = [[1./4., 1./4., 1./4.],
[1./3., 1./3., 1./3.],
[0., 1./3., 1./3.],
[1./3., 0., 1./3.],
[1./3., 1./3., 0.]]
#end if
if verbose > 1:
print 'trying to get values at points \n',xiArray
space = CrouzeixRaviartWithNodalBasis(nd)
#space = LinearOnSimplexWithNodalBasis(nd)
if verbose > -1:
print 'number of dofs= \n',space.dim
bvals = numpy.zeros((npoints,space.dim),'d')
bgrads= numpy.zeros((npoints,space.dim,nd),'d')
for j in range(npoints):
for k in space.range_dim:
bvals[j,k] = space.basis[k](xiArray[j])
bgrads[j,k,:]= space.basisGradients[k](xiArray[j])
#end k
#end j
print 'basis values at \n',xiArray
print 'are \n',bvals
print 'basis gradients are \n',bgrads
#end if
#look at values on faces as mapping from lower dimensional space
exiArray = numpy.zeros((1,max(nd-1,1)),'d')
if nd == 1:
exiArray[0,0] = 0.
elif nd == 2:
exiArray[0,0] = 0.5
else:
exiArray[0,0:2] = [1./3., 1./3.]
#end else
if verbose > -1:
nElementBoundaries = nd+1
bvals = numpy.zeros((nElementBoundaries,space.dim),'d')
bgrads= numpy.zeros((nElementBoundaries,space.dim,nd),'d')
for k in range(nElementBoundaries):
for j in space.range_dim:
bvals[k,j] = space.basisTrace[k][j](exiArray[0])
bgrads[k,j,:] = space.basisGradientsTrace[k][j](exiArray[0])
#end j
#end k
print 'trace basis values at ',exiArray,' on edges 0:nd+1 are '
print bvals
print 'trace basis gradients are '
print bgrads
#end if
#end testCr
def testQuadNodalBasis(nd,verbose=0):
"""
test local P2 nodal finite element space
"""
if verbose > -1:
print 'creating QuadraticOnSimplexWithNodal space dim= ',nd
#end
#look at values at element barycenter, and face barycenters
tdim = '1d'
if nd == 2:
tdim= '2d'
#end if
if nd == 3:
tdim= '3d'
#end if
#npoints = nd+2
#xiArray = numpy.zeros((npoints,nd),'d')
xiArray = p2refNodes[nd-1]
npoints = xiArray.shape[0]
#end if
if verbose > 1:
print 'trying to get values at points ',xiArray
space = QuadraticOnSimplexWithNodalBasis(nd)
if verbose > -1:
print 'number of dofs= ',space.dim
bvals = numpy.zeros((npoints,space.dim),'d')
bgrads= numpy.zeros((npoints,space.dim,nd),'d')
if verbose > 6:
for k in range(nd+1):
print 'baryCoord ',k,'(',xiArray[0],')=',baryCoords[tdim][k](xiArray[0])
#end k
for k in space.range_dim:
print 'basis func ',k,'(',xiArray[0],')=',space.basis[k](xiArray[0])
#end k
#end verbose
for j in range(npoints):
for k in space.range_dim:
bvals[j,k] = space.basis[k](xiArray[j])
bgrads[j,k,:]= space.basisGradients[k](xiArray[j])
#end k
#end j
print 'basis values at \n',xiArray
print 'are \n',bvals
print 'basis gradients are \n',bgrads
#end if
#look at values on faces as mapping from lower dimensional space
exiArray = numpy.zeros((1,max(nd-1,1)),'d')
if nd == 1:
exiArray[0,0] = 0.
elif nd == 2:
exiArray[0,0] = 0.5
else:
exiArray[0,0:2] = [1./3., 1./3.]
#end else
if verbose > -1:
nElementBoundaries = nd+1
bvals = numpy.zeros((nElementBoundaries,space.dim),'d')
bgrads= numpy.zeros((nElementBoundaries,space.dim,nd),'d')
for k in range(nElementBoundaries):
for j in space.range_dim:
bvals[k,j] = space.basisTrace[k][j](exiArray[0])
bgrads[k,j,:] = space.basisGradientsTrace[k][j](exiArray[0])
#end j
#end k
print 'trace basis values at ',exiArray,' on edges 0:nd+1 are'
print 'are \n',bvals
print 'trace basis gradients are \n',bgrads
#end if
#end testQuad
def testEdgeDOFMap(mesh,nd):
"""
test edge dof map to see what its doing
"""
#dofMap = EdgeDOFMap(mesh)
dofMap = NodalDOFMap(mesh)
if nd == 1:
ndofLoc= 1
#try to do a proto loop over elements and assemble local stiffness matrix
stiffMat = numpy.array([[1.0,-1.0],
[-1.0,1.0]])
#end 1d
elif nd == 2:
ndofLoc= 3
#try to do a proto loop over elements and assemble local stiffness matrix
#what I'm getting out of diffusion jacobian for p1c0
stiffMat = numpy.array([[0.5, 0., -0.5],
[0., 0., 0.],
[-0.5, 0., 0.5]])
#what I'm getting out of diffusion jacobian for p1nc
#stiffMat = numpy.array([[2.0, 0., -2.0],
# [0., 0., 0.],
# [-2.0, 0., 2.0]])
stiffMat = numpy.array([[4.0, -2., -2.],
[-2., 2., 0.],
[-2., 0., 2.]])
#end 2d
A = Mat(dofMap.nDOF,dofMap.nDOF)
for eN in range(mesh.nElements_global):
for i in range(ndofLoc):
ig = dofMap.l2g[eN,i]
for j in range(ndofLoc):
jg = dofMap.l2g[eN,j]
print 'loc(',i,',',j,') = ',stiffMat[i,j],' --> A(',ig,',',jg,')= ',A[ig,jg]
A[ig,jg] += stiffMat[i,j]
#end j
#end i
#end eN
print 'leaving testEdgeDofMap A= \n',A
def testQuadRefMats(nd,verbose=0):
"""
test quad reference matrices to see what its doing
"""
lspace = QuadraticOnSimplexWithNodalBasis(nd)
ndofLoc= lspace.dim
volWeights = [1.0,0.5,1.0/6.0]
#compute mass matrix numerically
quadRule = SimplexGaussQuadrature(nd)
quadRule.setOrder(4)
stiffMat = numpy.zeros((lspace.dim,lspace.dim),'d')
massMat = numpy.zeros((lspace.dim,lspace.dim),'d')
for p,w in zip(quadRule.points,quadRule.weights):
for i in lspace.range_dim:
for j in lspace.range_dim:
stiffMat[i,j] += numpy.dot(lspace.basisGradients[i](p),
lspace.basisGradients[j](p))*w*volWeights[nd-1]
massMat[i,j] += lspace.basis[i](p)*lspace.basis[j](p)*w*volWeights[nd-1]
#end j
#end i
#end p,w
print 'P2 localStiffMat = \n',stiffMat
print 'P2 localMassMat = \n',massMat
#end testQuadDofMap
def testQuadDOFMap(mesh,nd,verbose=0):
"""
test quad dof map to see what its doing
"""
lspace = QuadraticOnSimplexWithNodalBasis(nd)
#dofMap = NodalDOFMap(mesh)
dofMap = QuadraticLagrangeDOFMap(mesh,lspace,nd)
ndofLoc= lspace.dim
volWeights = [1.0,0.5,1.0/6.0]
#compute mass matrix numerically
quadRule = SimplexGaussQuadrature(nd)
quadRule.setOrder(4)
stiffMat = numpy.zeros((lspace.dim,lspace.dim),'d')
massMat = numpy.zeros((lspace.dim,lspace.dim),'d')
for p,w in zip(quadRule.points,quadRule.weights):
for i in lspace.range_dim:
for j in lspace.range_dim:
stiffMat[i,j] += numpy.dot(lspace.basisGradients[i](p),
lspace.basisGradients[j](p))*w*volWeights[nd-1]
massMat[i,j] += lspace.basis[i](p)*lspace.basis[j](p)*w*volWeights[nd-1]
#end j
#end i
#end p,w
if verbose > -1:
print 'P2 localStiffMat = \n',stiffMat
print 'P2 localMassMat = \n',massMat
#end verbose
if verbose > 2:
print 'testQuadNodalDOF locDof= ',ndofLoc,' global nDof=',dofMap.nDOF
A = Mat(dofMap.nDOF,dofMap.nDOF)
for eN in range(mesh.nElements_global):
for i in range(ndofLoc):
ig = dofMap.l2g[eN,i]
for j in range(ndofLoc):
jg = dofMap.l2g[eN,j]
print 'loc(',i,',',j,') = ',stiffMat[i,j],' --> A(',ig,',',jg,')= ',A[ig,jg]
A[ig,jg] += stiffMat[i,j]
#end j
#end i
#end eN
print 'leaving testQuadDofMap A= \n',A
#end testQuadDofMap
## @}
| 33.502577 | 92 | 0.548119 |
47cdf989e64bd51719e922a1f60b1c1ef00fe432
| 1,642 |
py
|
Python
|
scripts/misc/scp_info.py
|
ali1234/Greaseweazle
|
2a071e0a8b0b7ec876e014f25f3046b9cd5f1a3b
|
[
"Unlicense"
] | null | null | null |
scripts/misc/scp_info.py
|
ali1234/Greaseweazle
|
2a071e0a8b0b7ec876e014f25f3046b9cd5f1a3b
|
[
"Unlicense"
] | null | null | null |
scripts/misc/scp_info.py
|
ali1234/Greaseweazle
|
2a071e0a8b0b7ec876e014f25f3046b9cd5f1a3b
|
[
"Unlicense"
] | null | null | null |
import struct, sys
def dump_track(dat, trk_offs, trknr, show_dat):
print("Track %u:" % trknr)
trk_off = trk_offs[trknr]
if trk_off == 0:
print("Empty")
return
# Parse the SCP track header and extract the flux data.
thdr = dat[trk_off:trk_off+4+12*nr_revs]
sig, tnr, _, _, s_off = struct.unpack("<3sB3I", thdr[:16])
assert sig == b"TRK"
assert tnr == trknr
for i in range(nr_revs):
t,n,_ = struct.unpack("<3I", thdr[4+i*12:4+(i+1)*12])
print("Rev %u: time=%uus flux=%u" % (i, t//40, n))
if not show_dat:
return
_, e_nr, e_off = struct.unpack("<3I", thdr[-12:])
tdat = dat[trk_off+s_off:trk_off+e_off+e_nr*2]
fluxl = []
while tdat:
flux, = struct.unpack(">H", tdat[:2])
tdat = tdat[2:]
fluxl.append(flux / 40)
tot = 0.0
i = 0
for x in fluxl:
bad = ""
if (x < 3.6) or ((x > 4.4) and (x < 5.4)) \
or ((x > 6.6) and (x < 7.2)) or (x > 8.8):
bad = "BAD"
print("%d: %f %s" % (i, x, bad))
i += 1
tot += x
print("Total: %uus (%uus per rev)" % (int(tot), tot//nr_revs))
with open(sys.argv[1], "rb") as f:
dat = f.read()
header = struct.unpack("<3s9BI", dat[0:16])
(sig, _, _, nr_revs, s_trk, e_trk, flags, _, ss, _, _) = header
assert sig == b"SCP"
nr_sides = 1 if ss else 2
trk_offs = struct.unpack("<168I", dat[16:0x2b0])
print("Revolutions: %u" % nr_revs)
if len(sys.argv) == 3:
dump_track(dat, trk_offs, int(sys.argv[2]), True)
else:
for i in range(s_trk, e_trk+1):
dump_track(dat, trk_offs, i, False)
| 27.366667 | 66 | 0.532887 |
42b6063616842a541b749c822f15068bf2e88772
| 1,602 |
py
|
Python
|
products/migrations/0001_initial.py
|
geoffreynyaga/daraja
|
61db415b474fae004547a4caa057fedfe375ebb6
|
[
"MIT"
] | 23 |
2019-11-14T14:37:43.000Z
|
2022-02-25T01:53:09.000Z
|
products/migrations/0001_initial.py
|
geoffreynyaga/daraja
|
61db415b474fae004547a4caa057fedfe375ebb6
|
[
"MIT"
] | 11 |
2020-02-12T02:43:25.000Z
|
2022-02-19T04:43:54.000Z
|
products/migrations/0001_initial.py
|
geoffreynyaga/daraja
|
61db415b474fae004547a4caa057fedfe375ebb6
|
[
"MIT"
] | 15 |
2019-11-10T23:28:18.000Z
|
2022-03-04T08:35:17.000Z
|
# Generated by Django 2.1.7 on 2020-09-21 11:23
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Product',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=60)),
('description', models.CharField(blank=True, max_length=140, null=True)),
('price', models.FloatField()),
('stock_amount', models.IntegerField()),
('package_details', models.CharField(max_length=20)),
('picture', models.ImageField(upload_to='images/product')),
('delivery_option', models.BooleanField(default=False)),
('category', models.CharField(choices=[('GROC', 'GROCERIES'), ('ELEC', 'ELECTRONICS'), ('CLTH', 'CLOTHES'), ('HOME', 'HOME AND LIVING')], max_length=5)),
('date_created', models.DateTimeField(auto_now_add=True)),
('date_modified', models.DateTimeField(auto_now=True)),
('seller', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'Product',
'verbose_name_plural': 'Products',
},
),
]
| 41.076923 | 169 | 0.594881 |
5f86a6b63275c435c6c15b34dcc52012cd1b9a6a
| 30,149 |
py
|
Python
|
tests/test_emulators.py
|
tlvu/raven
|
f10e7946bf3d4a945b8b3fb0e8eaaf2b4599961b
|
[
"MIT"
] | null | null | null |
tests/test_emulators.py
|
tlvu/raven
|
f10e7946bf3d4a945b8b3fb0e8eaaf2b4599961b
|
[
"MIT"
] | null | null | null |
tests/test_emulators.py
|
tlvu/raven
|
f10e7946bf3d4a945b8b3fb0e8eaaf2b4599961b
|
[
"MIT"
] | null | null | null |
import datetime as dt
import os
import tempfile
import numpy as np
import xarray as xr
import pytest
from raven.models import (
Raven,
GR4JCN,
HMETS,
MOHYSE,
HBVEC,
GR4JCN_OST,
HMETS_OST,
MOHYSE_OST,
HBVEC_OST,
)
from raven.models.state import HRUStateVariables
from .common import TESTDATA, _convert_2d
import zipfile
@pytest.fixture
def input2d(tmpdir):
"""Convert 1D input to 2D output by copying all the time series along a new region dimension."""
ds = _convert_2d(TESTDATA["raven-gr4j-cemaneige-nc-ts"])
fn_out = os.path.join(tmpdir, "input2d.nc")
ds.to_netcdf(fn_out)
return fn_out
def test_race():
model1 = GR4JCN()
model1.rvi.suppress_output = True
model2 = GR4JCN()
ost = GR4JCN_OST()
assert model1.rvi.suppress_output.startswith(":SuppressOutput")
assert model2.rvi.suppress_output == ""
assert ost.rvi.suppress_output.startswith(":SuppressOutput")
class TestGR4JCN:
def test_simple(self):
ts = TESTDATA["raven-gr4j-cemaneige-nc-ts"]
model = GR4JCN(tempfile.mkdtemp())
model.rvi.start_date = dt.datetime(2000, 1, 1)
model.rvi.end_date = dt.datetime(2002, 1, 1)
model.rvi.run_name = "test"
model.rvh.name = "Salmon"
model.rvh.area = "4250.6"
model.rvh.elevation = "843.0"
model.rvh.latitude = 54.4848
model.rvh.longitude = -123.3659
model.rvp.params = model.params(0.529, -3.396, 407.29, 1.072, 16.9, 0.947)
assert model.rvi.suppress_output == ""
model([ts, ])
d = model.diagnostics
# yields NSE=0.???? for full period 1954-2010
# Check parser
assert 1 in model.solution["HRUStateVariableTable"]["data"]
np.testing.assert_almost_equal(d["DIAG_NASH_SUTCLIFFE"], -0.117301, 2)
hds = model.q_sim
assert hds.attrs["long_name"] == "Simulated outflows"
# Check attributes
assert model.hydrograph.attrs["model_id"] == "gr4jcn"
def test_tags(self):
model = GR4JCN(tempfile.mkdtemp())
tags = model.tags
assert "run_name" in tags
def test_rvobjs(self):
model = GR4JCN(tempfile.mkdtemp())
a = model.rvobjs
assert a
def test_assign(self):
model = GR4JCN()
model.assign("run_name", "test")
assert model.rvi.run_name == "test"
model.assign("params", np.array([0.529, -3.396, 407.29, 1.072, 16.9, 0.947]))
assert model.rvp.params.GR4J_X1 == 0.529
model.assign("params", [0.529, -3.396, 407.29, 1.072, 16.9, 0.947])
assert model.rvp.params.GR4J_X1 == 0.529
model.assign("params", (0.529, -3.396, 407.29, 1.072, 16.9, 0.947))
assert model.rvp.params.GR4J_X1 == 0.529
def test_run(self):
ts = TESTDATA["raven-gr4j-cemaneige-nc-ts"]
model = GR4JCN()
model(
ts,
start_date=dt.datetime(2000, 1, 1),
end_date=dt.datetime(2002, 1, 1),
area=4250.6,
elevation=843.0,
latitude=54.4848,
longitude=-123.3659,
params=(0.529, -3.396, 407.29, 1.072, 16.9, 0.947),
suppress_output=False,
)
d = model.diagnostics
np.testing.assert_almost_equal(d["DIAG_NASH_SUTCLIFFE"], -0.117301, 2)
def test_overwrite(self):
ts = TESTDATA["raven-gr4j-cemaneige-nc-ts"]
model = GR4JCN()
model(
ts,
start_date=dt.datetime(2000, 1, 1),
end_date=dt.datetime(2002, 1, 1),
area=4250.6,
elevation=843.0,
latitude=54.4848,
longitude=-123.3659,
params=(0.529, -3.396, 407.29, 1.072, 16.9, 0.947),
)
assert model.rvi.suppress_output == ""
qsim1 = model.q_sim.copy(deep=True)
m1 = qsim1.mean()
model(ts, params=(0.5289, -3.397, 407.3, 1.071, 16.89, 0.948), overwrite=True)
qsim2 = model.q_sim.copy(deep=True)
m2 = qsim2.mean()
assert m1 != m2
np.testing.assert_almost_equal(m1, m2, 1)
d = model.diagnostics
np.testing.assert_almost_equal(d["DIAG_NASH_SUTCLIFFE"], -0.117315, 2)
# Set initial conditions explicitly
model(
ts,
end_date=dt.datetime(2001, 2, 1),
hru_state=HRUStateVariables(soil0=0),
overwrite=True,
)
assert model.q_sim.isel(time=1).values[0] < qsim2.isel(time=1).values[0]
def test_resume(self):
ts = TESTDATA["raven-gr4j-cemaneige-nc-ts"]
model_ab = GR4JCN()
kwargs = dict(
area=4250.6,
elevation=843.0,
latitude=54.4848,
longitude=-123.3659,
params=(0.529, -3.396, 407.29, 1.072, 16.9, 0.947),
)
# Reference run
model_ab(
ts,
run_name="run_ab",
start_date=dt.datetime(2000, 1, 1),
end_date=dt.datetime(2001, 1, 1),
**kwargs
)
model_a = GR4JCN()
model_a(
ts,
run_name="run_a",
start_date=dt.datetime(2000, 1, 1),
end_date=dt.datetime(2000, 7, 1),
**kwargs
)
# Path to solution file from run A
rvc = model_a.outputs[
"solution"
] # <------- Richard, this is where the solution is.
# Resume with final state from live model
model_a.resume()
assert model_a.rvfiles["rvc"].content.startswith(":")
model_a(
ts,
run_name="run_2",
start_date=dt.datetime(2000, 7, 1),
end_date=dt.datetime(2001, 1, 1),
**kwargs
)
for key in ["Soil Water[0]", "Soil Water[1]"]:
np.testing.assert_array_almost_equal(
model_a.storage[1][key] - model_ab.storage[key], 0, 5
)
# Resume with final state from saved solution file
model_b = GR4JCN()
model_b.resume(
rvc
) # <--------- And this is how you feed it to a brand new model.
model_b(
ts,
run_name="run_2",
start_date=dt.datetime(2000, 7, 1),
end_date=dt.datetime(2001, 1, 1),
**kwargs
)
for key in ["Soil Water[0]", "Soil Water[1]"]:
np.testing.assert_array_almost_equal(
model_b.storage[key] - model_ab.storage[key], 0, 5
)
# model.solution loads the solution in a dictionary. I expected the variables to be identical,
# but some atmosphere related attributes are way off. Is it possible that `ATMOSPHERE` and `ATMOS_PRECIP` are
# cumulative sums of precipitation over the run ?
# assert model_b.solution == model_ab.solution # This does not work. Atmosphere attributes are off.
def test_version(self):
model = Raven()
assert model.version == "3.0"
model = GR4JCN()
assert model.version == "3.0"
def test_parallel_params(self):
ts = TESTDATA["raven-gr4j-cemaneige-nc-ts"]
model = GR4JCN()
model(
ts,
start_date=dt.datetime(2000, 1, 1),
end_date=dt.datetime(2002, 1, 1),
area=4250.6,
elevation=843.0,
latitude=54.4848,
longitude=-123.3659,
params=[
(0.529, -3.396, 407.29, 1.072, 16.9, 0.947),
(0.528, -3.4, 407.3, 1.07, 17, 0.95),
],
suppress_output=False,
)
assert len(model.diagnostics) == 2
assert model.hydrograph.dims["params"] == 2
z = zipfile.ZipFile(model.outputs["rv_config"])
assert len(z.filelist) == 10
def test_parallel_basins(self, input2d):
ts = input2d
model = GR4JCN()
model(
ts,
start_date=dt.datetime(2000, 1, 1),
end_date=dt.datetime(2002, 1, 1),
area=4250.6,
elevation=843.0,
latitude=54.4848,
longitude=-123.3659,
params=[0.529, -3.396, 407.29, 1.072, 16.9, 0.947],
nc_index=[0, 0],
name=["basin1", "basin2"],
suppress_output=False,
)
assert len(model.diagnostics) == 2
assert len(model.hydrograph.nbasins) == 2
np.testing.assert_array_equal(
model.hydrograph.basin_name[:], ["basin1", "basin2"]
)
z = zipfile.ZipFile(model.outputs["rv_config"])
assert len(z.filelist) == 10
class TestGR4JCN_OST:
def test_simple(self):
ts = TESTDATA["ostrich-gr4j-cemaneige-nc-ts"]
model = GR4JCN_OST()
params = (0.529, -3.396, 407.29, 1.072, 16.9, 0.053)
low = (0.01, -15.0, 10.0, 0.0, 1.0, 0.0)
high = (2.5, 10.0, 700.0, 7.0, 30.0, 1.0)
model(
ts,
start_date=dt.datetime(1954, 1, 1),
duration=208,
area=4250.6,
elevation=843.0,
latitude=54.4848,
longitude=-123.3659,
params=params,
lowerBounds=low,
upperBounds=high,
algorithm="DDS",
random_seed=0,
max_iterations=10,
)
d = model.diagnostics
np.testing.assert_almost_equal(d["DIAG_NASH_SUTCLIFFE"], 0.50717, 4)
# Random number seed: 123
# Budget: 10
# Algorithm: DDS
# :StartDate 1954-01-01 00:00:00
# :Duration 208
opt_para = model.calibrated_params
opt_func = model.obj_func
np.testing.assert_almost_equal(
opt_para,
[2.424726, 3.758972, 204.3856, 5.866946, 16.60408, 0.3728098],
4,
err_msg="calibrated parameter set is not matching expected value",
)
np.testing.assert_almost_equal(
opt_func,
-0.50717,
4,
err_msg="calibrated NSE is not matching expected value",
)
# # Random number seed: 123
# # Budget: 50
# # Algorithm: DDS
# # :StartDate 1954-01-01 00:00:00
# # :Duration 20819
# np.testing.assert_almost_equal( opt_para, [0.3243268,3.034247,407.2890,2.722774,12.18124,0.9468769], 4,
# err_msg='calibrated parameter set is not matching expected value')
# np.testing.assert_almost_equal( opt_func, -0.5779910, 4,
# err_msg='calibrated NSE is not matching expected value')
gr4j = GR4JCN()
gr4j(
ts,
start_date=dt.datetime(1954, 1, 1),
duration=208,
area=4250.6,
elevation=843.0,
latitude=54.4848,
longitude=-123.3659,
params=model.calibrated_params,
)
np.testing.assert_almost_equal(
gr4j.diagnostics["DIAG_NASH_SUTCLIFFE"], d["DIAG_NASH_SUTCLIFFE"]
)
class TestHMETS:
def test_simple(self):
ts = TESTDATA["raven-hmets-nc-ts"]
model = HMETS()
params = (
9.5019,
0.2774,
6.3942,
0.6884,
1.2875,
5.4134,
2.3641,
0.0973,
0.0464,
0.1998,
0.0222,
-1.0919,
2.6851,
0.3740,
1.0000,
0.4739,
0.0114,
0.0243,
0.0069,
310.7211,
916.1947,
)
model(
ts,
start_date=dt.datetime(2000, 1, 1),
end_date=dt.datetime(2002, 1, 1),
area=4250.6,
elevation=843.0,
latitude=54.4848,
longitude=-123.3659,
params=params,
suppress_output=True,
)
d = model.diagnostics
np.testing.assert_almost_equal(d["DIAG_NASH_SUTCLIFFE"], -3.0132, 4)
class TestHMETS_OST:
def test_simple(self):
ts = TESTDATA["raven-hmets-nc-ts"]
model = HMETS_OST()
params = (
9.5019,
0.2774,
6.3942,
0.6884,
1.2875,
5.4134,
2.3641,
0.0973,
0.0464,
0.1998,
0.0222,
-1.0919,
2.6851,
0.3740,
1.0000,
0.4739,
0.0114,
0.0243,
0.0069,
310.7211,
916.1947,
)
low = (
0.3,
0.01,
0.5,
0.15,
0.0,
0.0,
-2.0,
0.01,
0.0,
0.01,
0.005,
-5.0,
0.0,
0.0,
0.0,
0.0,
0.00001,
0.0,
0.00001,
0.0,
0.0,
)
high = (
20.0,
5.0,
13.0,
1.5,
20.0,
20.0,
3.0,
0.2,
0.1,
0.3,
0.1,
2.0,
5.0,
1.0,
3.0,
1.0,
0.02,
0.1,
0.01,
0.5,
2.0,
)
model(
ts,
start_date=dt.datetime(1954, 1, 1),
duration=208,
area=4250.6,
elevation=843.0,
latitude=54.4848,
longitude=-123.3659,
params=params,
lowerBounds=low,
upperBounds=high,
algorithm="DDS",
random_seed=0,
max_iterations=10,
)
d = model.diagnostics
np.testing.assert_almost_equal(d["DIAG_NASH_SUTCLIFFE"], -1.43474, 4)
opt_para = model.optimized_parameters
opt_func = model.obj_func
# # Random number seed: 123
# # Budget: 50
# # Algorithm: DDS
# # :StartDate 1954-01-01 00:00:00
# # :Duration 20819
# np.testing.assert_almost_equal( opt_para, [0.3243268,3.034247,407.2890,2.722774,12.18124,0.9468769], 4,
# err_msg='calibrated parameter set is not matching expected value')
# np.testing.assert_almost_equal( opt_func, -0.5779910, 4,
# err_msg='calibrated NSE is not matching expected value')
#
# Random number seed: 123 #
# Budget: 10 # This is the setup used for testing:
# Algorithm: DDS # shorter sim-period and lower budget
# :StartDate 1954-01-01 00:00:00 # First tested that example below matches
# :Duration 208 #
expected_value = [
1.777842e01,
3.317211e00,
5.727342e00,
1.419491e00,
1.382141e01,
1.637954e01,
7.166296e-01,
1.389346e-01,
2.620464e-02,
2.245525e-01,
2.839426e-02,
-2.003810e00,
9.479623e-01,
4.803857e-01,
2.524914e00,
4.117232e-01,
1.950058e-02,
4.494123e-02,
1.405815e-03,
2.815803e-02,
1.007823e00,
]
np.testing.assert_almost_equal(
opt_para,
expected_value,
4,
err_msg="calibrated parameter set is not matching expected value",
)
np.testing.assert_almost_equal(
opt_func,
1.43474,
4,
err_msg="calibrated NSE is not matching expected value",
)
# # Random number seed: 123 #
# # Budget: 50 # This is the setup in the Wiki:
# # Algorithm: DDS # https://github.com/Ouranosinc/raven/wiki/
# # :StartDate 1954-01-01 00:00:00 # Technical-Notes#example-setups-for-hmets
# # :Duration 20819 #
# np.testing.assert_almost_equal(opt_para, [5.008045E+00, 7.960246E-02, 4.332698E+00, 4.978125E-01,
# 1.997029E+00, 6.269773E-01, 1.516961E+00, 8.180383E-02,
# 6.730663E-02, 2.137822E-02, 2.097163E-02, 1.773348E+00,
# 3.036039E-01, 1.928524E-02, 1.758471E+00, 8.942299E-01,
# 8.741980E-03, 5.036474E-02, 9.465804E-03, 1.851839E-01,
# 1.653934E-01, 2.624006E+00, 8.868485E-02, 9.259195E+01,
# 8.269670E+01], 4,
# err_msg='calibrated parameter set is not matching expected value')
# np.testing.assert_almost_equal(opt_func, -6.350490E-01, 4,
# err_msg='calibrated NSE is not matching expected value')
hmets = HMETS()
hmets(
ts,
start_date=dt.datetime(1954, 1, 1),
duration=208,
area=4250.6,
elevation=843.0,
latitude=54.4848,
longitude=-123.3659,
params=model.calibrated_params,
)
np.testing.assert_almost_equal(
hmets.diagnostics["DIAG_NASH_SUTCLIFFE"], d["DIAG_NASH_SUTCLIFFE"], 4
)
class TestMOHYSE:
def test_simple(self):
ts = TESTDATA["raven-mohyse-nc-ts"]
model = MOHYSE()
params = (
1.0,
0.0468,
4.2952,
2.658,
0.4038,
0.0621,
0.0273,
0.0453,
0.9039,
5.6167,
)
model(
ts,
start_date=dt.datetime(2000, 1, 1),
end_date=dt.datetime(2002, 1, 1),
area=4250.6,
elevation=843.0,
latitude=54.4848,
longitude=-123.3659,
params=params,
suppress_output=True,
)
d = model.diagnostics
np.testing.assert_almost_equal(d["DIAG_NASH_SUTCLIFFE"], 0.194612, 4)
class TestMOHYSE_OST:
def test_simple(self):
ts = TESTDATA["ostrich-mohyse-nc-ts"]
model = MOHYSE_OST()
params = (
1.0,
0.0468,
4.2952,
2.658,
0.4038,
0.0621,
0.0273,
0.0453,
0.9039,
5.6167,
)
low_p = (0.01, 0.01, 0.01, -5.00, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01)
high_p = (20.0, 1.0, 20.0, 5.0, 0.5, 1.0, 1.0, 1.0, 15.0, 15.0)
model(
ts,
start_date=dt.datetime(1954, 1, 1),
duration=208,
area=4250.6,
elevation=843.0,
latitude=54.4848,
longitude=-123.3659,
params=params,
lowerBounds=low_p,
upperBounds=high_p,
algorithm="DDS",
random_seed=0,
max_iterations=10,
)
d = model.diagnostics
np.testing.assert_almost_equal(d["DIAG_NASH_SUTCLIFFE"], 0.3826810, 4)
opt_para = model.optimized_parameters
opt_func = model.obj_func
# # Random number seed: 123
# # Budget: 50
# # Algorithm: DDS
# # :StartDate 1954-01-01 00:00:00
# # :Duration 20819
# np.testing.assert_almost_equal( opt_para, [0.3243268,3.034247,407.2890,2.722774,12.18124,0.9468769], 4,
# err_msg='calibrated parameter set is not matching expected value')
# np.testing.assert_almost_equal( opt_func, -0.5779910, 4,
# err_msg='calibrated NSE is not matching expected value')
#
# Random number seed: 123 #
# Budget: 10 # This is the setup used for testing:
# Algorithm: DDS # shorter sim-period and lower budget
# :StartDate 1954-01-01 00:00:00 # First tested that example below matches
# :Duration 208 #
np.testing.assert_almost_equal(
opt_para,
[
7.721801e00,
8.551484e-01,
1.774571e01,
1.627677e00,
7.702450e-02,
9.409600e-01,
6.941596e-01,
8.207870e-01,
8.154455e00,
1.018226e01,
],
4,
err_msg="calibrated parameter set is not matching expected value",
)
np.testing.assert_almost_equal(
opt_func,
-0.3826810,
4,
err_msg="calibrated NSE is not matching expected value",
)
# # Random number seed: 123 #
# # Budget: 50 # This is the setup in the Wiki:
# # Algorithm: DDS # https://github.com/Ouranosinc/raven/wiki/
# # :StartDate 1954-01-01 00:00:00 # Technical-Notes#example-setups-for-mohyse
# # :Duration 20819 #
# np.testing.assert_almost_equal(opt_para, [1.517286E+01, 7.112556E-01, 1.981243E+01, -4.193046E+00,
# 1.791486E-01, 9.774897E-01, 5.353541E-01, 6.686806E-01,
# 1.040908E+01, 1.132304E+01, 8.831552E-02], 4,
# err_msg='calibrated parameter set is not matching expected value')
# np.testing.assert_almost_equal(opt_func, -0.3857010, 4,
# err_msg='calibrated NSE is not matching expected value')
mohyse = MOHYSE()
mohyse(
ts,
start_date=dt.datetime(1954, 1, 1),
duration=208,
area=4250.6,
elevation=843.0,
latitude=54.4848,
longitude=-123.3659,
params=model.calibrated_params,
)
np.testing.assert_almost_equal(
mohyse.diagnostics["DIAG_NASH_SUTCLIFFE"], d["DIAG_NASH_SUTCLIFFE"], 4
)
class TestHBVEC:
def test_simple(self):
ts = TESTDATA["raven-hbv-ec-nc-ts"]
model = HBVEC()
params = (
0.05984519,
4.072232,
2.001574,
0.03473693,
0.09985144,
0.506052,
3.438486,
38.32455,
0.4606565,
0.06303738,
2.277781,
4.873686,
0.5718813,
0.04505643,
0.877607,
18.94145,
2.036937,
0.4452843,
0.6771759,
1.141608,
1.024278,
)
model(
ts,
start_date=dt.datetime(2000, 1, 1),
end_date=dt.datetime(2002, 1, 1),
area=4250.6,
elevation=843.0,
latitude=54.4848,
longitude=-123.3659,
params=params,
suppress_output=True,
)
d = model.diagnostics
np.testing.assert_almost_equal(d["DIAG_NASH_SUTCLIFFE"], 0.0186633, 4)
def test_evap(self):
ts = TESTDATA["raven-hbv-ec-nc-ts"]
model = HBVEC()
params = (
0.05984519,
4.072232,
2.001574,
0.03473693,
0.09985144,
0.506052,
3.438486,
38.32455,
0.4606565,
0.06303738,
2.277781,
4.873686,
0.5718813,
0.04505643,
0.877607,
18.94145,
2.036937,
0.4452843,
0.6771759,
1.141608,
1.024278,
)
model(
ts,
start_date=dt.datetime(2000, 1, 1),
end_date=dt.datetime(2002, 1, 1),
area=4250.6,
elevation=843.0,
latitude=54.4848,
longitude=-123.3659,
params=params,
suppress_output=True,
evaporation="PET_OUDIN",
ow_evaporation="PET_OUDIN",
)
class TestHBVEC_OST:
def test_simple(self):
ts = TESTDATA["ostrich-hbv-ec-nc-ts"]
model = HBVEC_OST()
params = (
0.05984519,
4.072232,
2.001574,
0.03473693,
0.09985144,
0.506052,
3.438486,
38.32455,
0.4606565,
0.06303738,
2.277781,
4.873686,
0.5718813,
0.04505643,
0.877607,
18.94145,
2.036937,
0.4452843,
0.6771759,
1.141608,
1.024278,
)
low = (
-3.0,
0.0,
0.0,
0.0,
0.0,
0.3,
0.0,
0.0,
0.01,
0.05,
0.01,
0.0,
0.0,
0.0,
0.0,
0.0,
0.01,
0.0,
0.05,
0.8,
0.8,
)
high = (
3.0,
8.0,
8.0,
0.1,
1.0,
1.0,
7.0,
100.0,
1.0,
0.1,
6.0,
5.0,
5.0,
0.2,
1.0,
30.0,
3.0,
2.0,
1.0,
1.5,
1.5,
)
model(
ts,
start_date=dt.datetime(1954, 1, 1),
duration=208,
area=4250.6,
elevation=843.0,
latitude=54.4848,
longitude=-123.3659,
params=params,
lowerBounds=low,
upperBounds=high,
algorithm="DDS",
random_seed=0,
max_iterations=10,
)
d = model.diagnostics
np.testing.assert_almost_equal(d["DIAG_NASH_SUTCLIFFE"], -2.25991e-01, 4)
opt_para = model.calibrated_params
opt_func = model.obj_func
# Random number seed: 123 #
# Budget: 10 # This is the setup used for testing:
# Algorithm: DDS # shorter sim-period and lower budget
# :StartDate 1954-01-01 00:00:00 # First tested that example below matches
# :Duration 208 #
np.testing.assert_almost_equal(
opt_para,
[
-8.317931e-01,
4.072232e00,
2.001574e00,
5.736299e-03,
9.985144e-02,
4.422529e-01,
3.438486e00,
8.055843e01,
4.440133e-01,
8.451082e-02,
2.814201e00,
7.327970e-01,
1.119773e00,
1.161223e-03,
4.597179e-01,
1.545857e01,
1.223865e00,
4.452843e-01,
9.492006e-01,
9.948123e-01,
1.110682e00,
],
4,
err_msg="calibrated parameter set is not matching expected value",
)
np.testing.assert_almost_equal(
opt_func,
2.25991e-01,
4,
err_msg="calibrated NSE is not matching expected value",
)
# # Random number seed: 123 #
# # Budget: 50 # This is the setup in the Wiki:
# # Algorithm: DDS # https://github.com/Ouranosinc/raven/wiki/
# # :StartDate 1954-01-01 00:00:00 # Technical-Notes#example-setups-for-environment-
# # :Duration 20819 #
# np.testing.assert_almost_equal(opt_para, [5.984519E-02, 4.072232E+00, 2.001574E+00, 3.473693E-02,
# 9.985144E-02, 5.060520E-01, 2.944343E+00, 3.832455E+01,
# 4.606565E-01, 6.303738E-02, 2.277781E+00, 4.873686E+00,
# 5.718813E-01, 4.505643E-02, 8.776511E-01, 1.894145E+01,
# 2.036937E+00, 4.452843E-01, 6.771759E-01, 1.206053E+00,
# 1.024278E+00], 4,
# err_msg='calibrated parameter set is not matching expected value')
# np.testing.assert_almost_equal(opt_func, -6.034670E-01, 4,
# err_msg='calibrated NSE is not matching expected value')
hbvec = HBVEC()
hbvec(
ts,
start_date=dt.datetime(1954, 1, 1),
duration=208,
area=4250.6,
elevation=843.0,
latitude=54.4848,
longitude=-123.3659,
params=model.calibrated_params,
)
np.testing.assert_almost_equal(
hbvec.diagnostics["DIAG_NASH_SUTCLIFFE"], d["DIAG_NASH_SUTCLIFFE"], 4
)
| 30.670397 | 117 | 0.459584 |
20d0a75cedcf930ff48b44af369ca72d6a397c09
| 1,125 |
py
|
Python
|
app/movieapi/core/tests/tests_admin.py
|
joelpenov/moviestore
|
96815371d45852cdfb7750095ed842aeaff907b7
|
[
"MIT"
] | null | null | null |
app/movieapi/core/tests/tests_admin.py
|
joelpenov/moviestore
|
96815371d45852cdfb7750095ed842aeaff907b7
|
[
"MIT"
] | null | null | null |
app/movieapi/core/tests/tests_admin.py
|
joelpenov/moviestore
|
96815371d45852cdfb7750095ed842aeaff907b7
|
[
"MIT"
] | null | null | null |
from django.test import TestCase, Client
from django.contrib.auth import get_user_model
from django.urls import reverse
class AdminSiteTests(TestCase):
def setUp(self):
self.client = Client()
self.admin_user = get_user_model().objects.create_superuser("[email protected]", "superpass123")
self.client.force_login(self.admin_user)
self.user = get_user_model().objects.create_user(email="[email protected]", password="sample234", name="Test User Name")
def test_users_listed(self):
url = reverse("admin:core_user_changelist")
response = self.client.get(url)
self.assertContains(response, self.user.name)
self.assertContains(response, self.user.email)
def test_user_page_works_as_expected(self):
url = reverse("admin:core_user_change", args=[self.user.id])
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
def test_create_user_page_works_as_expected(self):
url = reverse("admin:core_user_add")
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
| 36.290323 | 126 | 0.709333 |
6559fc4c24ce283e333fb7795dff798d570cd0c6
| 219,150 |
py
|
Python
|
pandas/core/frame.py
|
AllenDowney/pandas
|
4875a3dcc23fac851627c0c6b93ded9d6b1aca5a
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"BSD-3-Clause"
] | 4 |
2016-10-05T17:38:58.000Z
|
2020-08-24T16:26:37.000Z
|
pandas/core/frame.py
|
AllenDowney/pandas
|
4875a3dcc23fac851627c0c6b93ded9d6b1aca5a
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"BSD-3-Clause"
] | null | null | null |
pandas/core/frame.py
|
AllenDowney/pandas
|
4875a3dcc23fac851627c0c6b93ded9d6b1aca5a
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"BSD-3-Clause"
] | 12 |
2017-05-23T06:01:12.000Z
|
2021-08-16T05:09:46.000Z
|
"""
DataFrame
---------
An efficient 2D container for potentially mixed-type time series or other
labeled data series.
Similar to its R counterpart, data.frame, except providing automatic data
alignment and a host of useful data manipulation methods having to do with the
labeling information
"""
from __future__ import division
# pylint: disable=E1101,E1103
# pylint: disable=W0212,W0231,W0703,W0622
import functools
import collections
import itertools
import sys
import types
import warnings
from textwrap import dedent
from numpy import nan as NA
import numpy as np
import numpy.ma as ma
from pandas.core.dtypes.cast import (
maybe_upcast, infer_dtype_from_scalar,
maybe_cast_to_datetime,
maybe_infer_to_datetimelike,
maybe_convert_platform,
maybe_downcast_to_dtype,
invalidate_string_dtypes,
coerce_to_dtypes,
maybe_upcast_putmask,
find_common_type)
from pandas.core.dtypes.common import (
is_categorical_dtype,
is_object_dtype,
is_extension_type,
is_datetimetz,
is_datetime64_any_dtype,
is_datetime64tz_dtype,
is_bool_dtype,
is_integer_dtype,
is_float_dtype,
is_integer,
is_scalar,
is_dtype_equal,
needs_i8_conversion,
_get_dtype_from_object,
_ensure_float,
_ensure_float64,
_ensure_int64,
_ensure_platform_int,
is_list_like,
is_iterator,
is_sequence,
is_named_tuple)
from pandas.core.dtypes.missing import isnull, notnull
from pandas.core.common import (_try_sort,
_default_index,
_values_from_object,
_maybe_box_datetimelike,
_dict_compat,
standardize_mapping)
from pandas.core.generic import NDFrame, _shared_docs
from pandas.core.index import Index, MultiIndex, _ensure_index
from pandas.core.indexing import (maybe_droplevels, convert_to_index_sliceable,
check_bool_indexer)
from pandas.core.internals import (BlockManager,
create_block_manager_from_arrays,
create_block_manager_from_blocks)
from pandas.core.series import Series
from pandas.core.categorical import Categorical
import pandas.core.computation.expressions as expressions
import pandas.core.algorithms as algorithms
from pandas.core.computation.eval import eval as _eval
from pandas.compat import (range, map, zip, lrange, lmap, lzip, StringIO, u,
OrderedDict, raise_with_traceback)
from pandas import compat
from pandas.compat.numpy import function as nv
from pandas.util._decorators import Appender, Substitution
from pandas.util._validators import validate_bool_kwarg
from pandas.core.indexes.period import PeriodIndex
from pandas.core.indexes.datetimes import DatetimeIndex
from pandas.core.indexes.timedeltas import TimedeltaIndex
import pandas.core.base as base
import pandas.core.common as com
import pandas.core.nanops as nanops
import pandas.core.ops as ops
import pandas.io.formats.format as fmt
import pandas.io.formats.console as console
from pandas.io.formats.printing import pprint_thing
import pandas.plotting._core as gfx
from pandas._libs import lib, algos as libalgos
from pandas.core.config import get_option
# ---------------------------------------------------------------------
# Docstring templates
_shared_doc_kwargs = dict(
axes='index, columns', klass='DataFrame',
axes_single_arg="{0 or 'index', 1 or 'columns'}",
optional_by="""
by : str or list of str
Name or list of names which refer to the axis items.""",
versionadded_to_excel='')
_numeric_only_doc = """numeric_only : boolean, default None
Include only float, int, boolean data. If None, will attempt to use
everything, then use only numeric data
"""
_merge_doc = """
Merge DataFrame objects by performing a database-style join operation by
columns or indexes.
If joining columns on columns, the DataFrame indexes *will be
ignored*. Otherwise if joining indexes on indexes or indexes on a column or
columns, the index will be passed on.
Parameters
----------%s
right : DataFrame
how : {'left', 'right', 'outer', 'inner'}, default 'inner'
* left: use only keys from left frame, similar to a SQL left outer join;
preserve key order
* right: use only keys from right frame, similar to a SQL right outer join;
preserve key order
* outer: use union of keys from both frames, similar to a SQL full outer
join; sort keys lexicographically
* inner: use intersection of keys from both frames, similar to a SQL inner
join; preserve the order of the left keys
on : label or list
Field names to join on. Must be found in both DataFrames. If on is
None and not merging on indexes, then it merges on the intersection of
the columns by default.
left_on : label or list, or array-like
Field names to join on in left DataFrame. Can be a vector or list of
vectors of the length of the DataFrame to use a particular vector as
the join key instead of columns
right_on : label or list, or array-like
Field names to join on in right DataFrame or vector/list of vectors per
left_on docs
left_index : boolean, default False
Use the index from the left DataFrame as the join key(s). If it is a
MultiIndex, the number of keys in the other DataFrame (either the index
or a number of columns) must match the number of levels
right_index : boolean, default False
Use the index from the right DataFrame as the join key. Same caveats as
left_index
sort : boolean, default False
Sort the join keys lexicographically in the result DataFrame. If False,
the order of the join keys depends on the join type (how keyword)
suffixes : 2-length sequence (tuple, list, ...)
Suffix to apply to overlapping column names in the left and right
side, respectively
copy : boolean, default True
If False, do not copy data unnecessarily
indicator : boolean or string, default False
If True, adds a column to output DataFrame called "_merge" with
information on the source of each row.
If string, column with information on source of each row will be added to
output DataFrame, and column will be named value of string.
Information column is Categorical-type and takes on a value of "left_only"
for observations whose merge key only appears in 'left' DataFrame,
"right_only" for observations whose merge key only appears in 'right'
DataFrame, and "both" if the observation's merge key is found in both.
.. versionadded:: 0.17.0
validate : string, default None
If specified, checks if merge is of specified type.
* "one_to_one" or "1:1": check if merge keys are unique in both
left and right datasets.
* "one_to_many" or "1:m": check if merge keys are unique in left
dataset.
* "many_to_one" or "m:1": check if merge keys are unique in right
dataset.
* "many_to_many" or "m:m": allowed, but does not result in checks.
.. versionadded:: 0.21.0
Examples
--------
>>> A >>> B
lkey value rkey value
0 foo 1 0 foo 5
1 bar 2 1 bar 6
2 baz 3 2 qux 7
3 foo 4 3 bar 8
>>> A.merge(B, left_on='lkey', right_on='rkey', how='outer')
lkey value_x rkey value_y
0 foo 1 foo 5
1 foo 4 foo 5
2 bar 2 bar 6
3 bar 2 bar 8
4 baz 3 NaN NaN
5 NaN NaN qux 7
Returns
-------
merged : DataFrame
The output type will the be same as 'left', if it is a subclass
of DataFrame.
See also
--------
merge_ordered
merge_asof
"""
# -----------------------------------------------------------------------
# DataFrame class
class DataFrame(NDFrame):
""" Two-dimensional size-mutable, potentially heterogeneous tabular data
structure with labeled axes (rows and columns). Arithmetic operations
align on both row and column labels. Can be thought of as a dict-like
container for Series objects. The primary pandas data structure
Parameters
----------
data : numpy ndarray (structured or homogeneous), dict, or DataFrame
Dict can contain Series, arrays, constants, or list-like objects
index : Index or array-like
Index to use for resulting frame. Will default to np.arange(n) if
no indexing information part of input data and no index provided
columns : Index or array-like
Column labels to use for resulting frame. Will default to
np.arange(n) if no column labels are provided
dtype : dtype, default None
Data type to force. Only a single dtype is allowed. If None, infer
copy : boolean, default False
Copy data from inputs. Only affects DataFrame / 2d ndarray input
Examples
--------
Constructing DataFrame from a dictionary.
>>> d = {'col1': [1, 2], 'col2': [3, 4]}
>>> df = pd.DataFrame(data=d)
>>> df
col1 col2
0 1 3
1 2 4
Notice that the inferred dtype is int64.
>>> df.dtypes
col1 int64
col2 int64
dtype: object
To enforce a single dtype:
>>> df = pd.DataFrame(data=d, dtype=np.int8)
>>> df.dtypes
col1 int8
col2 int8
dtype: object
Constructing DataFrame from numpy ndarray:
>>> df2 = pd.DataFrame(np.random.randint(low=0, high=10, size=(5, 5)),
... columns=['a', 'b', 'c', 'd', 'e'])
>>> df2
a b c d e
0 2 8 8 3 4
1 4 2 9 0 9
2 1 0 7 8 0
3 5 1 7 1 3
4 6 0 2 4 2
See also
--------
DataFrame.from_records : constructor from tuples, also record arrays
DataFrame.from_dict : from dicts of Series, arrays, or dicts
DataFrame.from_items : from sequence of (key, value) pairs
pandas.read_csv, pandas.read_table, pandas.read_clipboard
"""
@property
def _constructor(self):
return DataFrame
_constructor_sliced = Series
@property
def _constructor_expanddim(self):
from pandas.core.panel import Panel
return Panel
def __init__(self, data=None, index=None, columns=None, dtype=None,
copy=False):
if data is None:
data = {}
if dtype is not None:
dtype = self._validate_dtype(dtype)
if isinstance(data, DataFrame):
data = data._data
if isinstance(data, BlockManager):
mgr = self._init_mgr(data, axes=dict(index=index, columns=columns),
dtype=dtype, copy=copy)
elif isinstance(data, dict):
mgr = self._init_dict(data, index, columns, dtype=dtype)
elif isinstance(data, ma.MaskedArray):
import numpy.ma.mrecords as mrecords
# masked recarray
if isinstance(data, mrecords.MaskedRecords):
mgr = _masked_rec_array_to_mgr(data, index, columns, dtype,
copy)
# a masked array
else:
mask = ma.getmaskarray(data)
if mask.any():
data, fill_value = maybe_upcast(data, copy=True)
data[mask] = fill_value
else:
data = data.copy()
mgr = self._init_ndarray(data, index, columns, dtype=dtype,
copy=copy)
elif isinstance(data, (np.ndarray, Series, Index)):
if data.dtype.names:
data_columns = list(data.dtype.names)
data = dict((k, data[k]) for k in data_columns)
if columns is None:
columns = data_columns
mgr = self._init_dict(data, index, columns, dtype=dtype)
elif getattr(data, 'name', None) is not None:
mgr = self._init_dict({data.name: data}, index, columns,
dtype=dtype)
else:
mgr = self._init_ndarray(data, index, columns, dtype=dtype,
copy=copy)
elif isinstance(data, (list, types.GeneratorType)):
if isinstance(data, types.GeneratorType):
data = list(data)
if len(data) > 0:
if is_list_like(data[0]) and getattr(data[0], 'ndim', 1) == 1:
if is_named_tuple(data[0]) and columns is None:
columns = data[0]._fields
arrays, columns = _to_arrays(data, columns, dtype=dtype)
columns = _ensure_index(columns)
# set the index
if index is None:
if isinstance(data[0], Series):
index = _get_names_from_index(data)
elif isinstance(data[0], Categorical):
index = _default_index(len(data[0]))
else:
index = _default_index(len(data))
mgr = _arrays_to_mgr(arrays, columns, index, columns,
dtype=dtype)
else:
mgr = self._init_ndarray(data, index, columns, dtype=dtype,
copy=copy)
else:
mgr = self._init_dict({}, index, columns, dtype=dtype)
elif isinstance(data, collections.Iterator):
raise TypeError("data argument can't be an iterator")
else:
try:
arr = np.array(data, dtype=dtype, copy=copy)
except (ValueError, TypeError) as e:
exc = TypeError('DataFrame constructor called with '
'incompatible data and dtype: %s' % e)
raise_with_traceback(exc)
if arr.ndim == 0 and index is not None and columns is not None:
if isinstance(data, compat.string_types) and dtype is None:
dtype = np.object_
if dtype is None:
dtype, data = infer_dtype_from_scalar(data)
values = np.empty((len(index), len(columns)), dtype=dtype)
values.fill(data)
mgr = self._init_ndarray(values, index, columns, dtype=dtype,
copy=False)
else:
raise ValueError('DataFrame constructor not properly called!')
NDFrame.__init__(self, mgr, fastpath=True)
def _init_dict(self, data, index, columns, dtype=None):
"""
Segregate Series based on type and coerce into matrices.
Needs to handle a lot of exceptional cases.
"""
if columns is not None:
columns = _ensure_index(columns)
# GH10856
# raise ValueError if only scalars in dict
if index is None:
extract_index(list(data.values()))
# prefilter if columns passed
data = dict((k, v) for k, v in compat.iteritems(data)
if k in columns)
if index is None:
index = extract_index(list(data.values()))
else:
index = _ensure_index(index)
arrays = []
data_names = []
for k in columns:
if k not in data:
# no obvious "empty" int column
if dtype is not None and issubclass(dtype.type,
np.integer):
continue
if dtype is None:
# 1783
v = np.empty(len(index), dtype=object)
elif np.issubdtype(dtype, np.flexible):
v = np.empty(len(index), dtype=object)
else:
v = np.empty(len(index), dtype=dtype)
v.fill(NA)
else:
v = data[k]
data_names.append(k)
arrays.append(v)
else:
keys = list(data.keys())
if not isinstance(data, OrderedDict):
keys = _try_sort(keys)
columns = data_names = Index(keys)
arrays = [data[k] for k in keys]
return _arrays_to_mgr(arrays, data_names, index, columns, dtype=dtype)
def _init_ndarray(self, values, index, columns, dtype=None, copy=False):
# input must be a ndarray, list, Series, index
if isinstance(values, Series):
if columns is None:
if values.name is not None:
columns = [values.name]
if index is None:
index = values.index
else:
values = values.reindex(index)
# zero len case (GH #2234)
if not len(values) and columns is not None and len(columns):
values = np.empty((0, 1), dtype=object)
# helper to create the axes as indexes
def _get_axes(N, K, index=index, columns=columns):
# return axes or defaults
if index is None:
index = _default_index(N)
else:
index = _ensure_index(index)
if columns is None:
columns = _default_index(K)
else:
columns = _ensure_index(columns)
return index, columns
# we could have a categorical type passed or coerced to 'category'
# recast this to an _arrays_to_mgr
if (is_categorical_dtype(getattr(values, 'dtype', None)) or
is_categorical_dtype(dtype)):
if not hasattr(values, 'dtype'):
values = _prep_ndarray(values, copy=copy)
values = values.ravel()
elif copy:
values = values.copy()
index, columns = _get_axes(len(values), 1)
return _arrays_to_mgr([values], columns, index, columns,
dtype=dtype)
elif is_datetimetz(values):
return self._init_dict({0: values}, index, columns, dtype=dtype)
# by definition an array here
# the dtypes will be coerced to a single dtype
values = _prep_ndarray(values, copy=copy)
if dtype is not None:
if values.dtype != dtype:
try:
values = values.astype(dtype)
except Exception as orig:
e = ValueError("failed to cast to '%s' (Exception was: %s)"
% (dtype, orig))
raise_with_traceback(e)
index, columns = _get_axes(*values.shape)
values = values.T
# if we don't have a dtype specified, then try to convert objects
# on the entire block; this is to convert if we have datetimelike's
# embedded in an object type
if dtype is None and is_object_dtype(values):
values = maybe_infer_to_datetimelike(values)
return create_block_manager_from_blocks([values], [columns, index])
@property
def axes(self):
"""
Return a list with the row axis labels and column axis labels as the
only members. They are returned in that order.
"""
return [self.index, self.columns]
@property
def shape(self):
"""
Return a tuple representing the dimensionality of the DataFrame.
"""
return len(self.index), len(self.columns)
def _repr_fits_vertical_(self):
"""
Check length against max_rows.
"""
max_rows = get_option("display.max_rows")
return len(self) <= max_rows
def _repr_fits_horizontal_(self, ignore_width=False):
"""
Check if full repr fits in horizontal boundaries imposed by the display
options width and max_columns. In case off non-interactive session, no
boundaries apply.
ignore_width is here so ipnb+HTML output can behave the way
users expect. display.max_columns remains in effect.
GH3541, GH3573
"""
width, height = console.get_console_size()
max_columns = get_option("display.max_columns")
nb_columns = len(self.columns)
# exceed max columns
if ((max_columns and nb_columns > max_columns) or
((not ignore_width) and width and nb_columns > (width // 2))):
return False
# used by repr_html under IPython notebook or scripts ignore terminal
# dims
if ignore_width or not com.in_interactive_session():
return True
if (get_option('display.width') is not None or
com.in_ipython_frontend()):
# check at least the column row for excessive width
max_rows = 1
else:
max_rows = get_option("display.max_rows")
# when auto-detecting, so width=None and not in ipython front end
# check whether repr fits horizontal by actualy checking
# the width of the rendered repr
buf = StringIO()
# only care about the stuff we'll actually print out
# and to_string on entire frame may be expensive
d = self
if not (max_rows is None): # unlimited rows
# min of two, where one may be None
d = d.iloc[:min(max_rows, len(d))]
else:
return True
d.to_string(buf=buf)
value = buf.getvalue()
repr_width = max([len(l) for l in value.split('\n')])
return repr_width < width
def _info_repr(self):
"""True if the repr should show the info view."""
info_repr_option = (get_option("display.large_repr") == "info")
return info_repr_option and not (self._repr_fits_horizontal_() and
self._repr_fits_vertical_())
def __unicode__(self):
"""
Return a string representation for a particular DataFrame
Invoked by unicode(df) in py2 only. Yields a Unicode String in both
py2/py3.
"""
buf = StringIO(u(""))
if self._info_repr():
self.info(buf=buf)
return buf.getvalue()
max_rows = get_option("display.max_rows")
max_cols = get_option("display.max_columns")
show_dimensions = get_option("display.show_dimensions")
if get_option("display.expand_frame_repr"):
width, _ = console.get_console_size()
else:
width = None
self.to_string(buf=buf, max_rows=max_rows, max_cols=max_cols,
line_width=width, show_dimensions=show_dimensions)
return buf.getvalue()
def _repr_html_(self):
"""
Return a html representation for a particular DataFrame.
Mainly for IPython notebook.
"""
# qtconsole doesn't report its line width, and also
# behaves badly when outputting an HTML table
# that doesn't fit the window, so disable it.
# XXX: In IPython 3.x and above, the Qt console will not attempt to
# display HTML, so this check can be removed when support for
# IPython 2.x is no longer needed.
if com.in_qtconsole():
# 'HTML output is disabled in QtConsole'
return None
if self._info_repr():
buf = StringIO(u(""))
self.info(buf=buf)
# need to escape the <class>, should be the first line.
val = buf.getvalue().replace('<', r'<', 1)
val = val.replace('>', r'>', 1)
return '<pre>' + val + '</pre>'
if get_option("display.notebook_repr_html"):
max_rows = get_option("display.max_rows")
max_cols = get_option("display.max_columns")
show_dimensions = get_option("display.show_dimensions")
return self.to_html(max_rows=max_rows, max_cols=max_cols,
show_dimensions=show_dimensions, notebook=True)
else:
return None
@property
def style(self):
"""
Property returning a Styler object containing methods for
building a styled HTML representation fo the DataFrame.
See Also
--------
pandas.io.formats.style.Styler
"""
from pandas.io.formats.style import Styler
return Styler(self)
def iteritems(self):
"""
Iterator over (column name, Series) pairs.
See also
--------
iterrows : Iterate over DataFrame rows as (index, Series) pairs.
itertuples : Iterate over DataFrame rows as namedtuples of the values.
"""
if self.columns.is_unique and hasattr(self, '_item_cache'):
for k in self.columns:
yield k, self._get_item_cache(k)
else:
for i, k in enumerate(self.columns):
yield k, self._ixs(i, axis=1)
def iterrows(self):
"""
Iterate over DataFrame rows as (index, Series) pairs.
Notes
-----
1. Because ``iterrows`` returns a Series for each row,
it does **not** preserve dtypes across the rows (dtypes are
preserved across columns for DataFrames). For example,
>>> df = pd.DataFrame([[1, 1.5]], columns=['int', 'float'])
>>> row = next(df.iterrows())[1]
>>> row
int 1.0
float 1.5
Name: 0, dtype: float64
>>> print(row['int'].dtype)
float64
>>> print(df['int'].dtype)
int64
To preserve dtypes while iterating over the rows, it is better
to use :meth:`itertuples` which returns namedtuples of the values
and which is generally faster than ``iterrows``.
2. You should **never modify** something you are iterating over.
This is not guaranteed to work in all cases. Depending on the
data types, the iterator returns a copy and not a view, and writing
to it will have no effect.
Returns
-------
it : generator
A generator that iterates over the rows of the frame.
See also
--------
itertuples : Iterate over DataFrame rows as namedtuples of the values.
iteritems : Iterate over (column name, Series) pairs.
"""
columns = self.columns
klass = self._constructor_sliced
for k, v in zip(self.index, self.values):
s = klass(v, index=columns, name=k)
yield k, s
def itertuples(self, index=True, name="Pandas"):
"""
Iterate over DataFrame rows as namedtuples, with index value as first
element of the tuple.
Parameters
----------
index : boolean, default True
If True, return the index as the first element of the tuple.
name : string, default "Pandas"
The name of the returned namedtuples or None to return regular
tuples.
Notes
-----
The column names will be renamed to positional names if they are
invalid Python identifiers, repeated, or start with an underscore.
With a large number of columns (>255), regular tuples are returned.
See also
--------
iterrows : Iterate over DataFrame rows as (index, Series) pairs.
iteritems : Iterate over (column name, Series) pairs.
Examples
--------
>>> df = pd.DataFrame({'col1': [1, 2], 'col2': [0.1, 0.2]},
index=['a', 'b'])
>>> df
col1 col2
a 1 0.1
b 2 0.2
>>> for row in df.itertuples():
... print(row)
...
Pandas(Index='a', col1=1, col2=0.10000000000000001)
Pandas(Index='b', col1=2, col2=0.20000000000000001)
"""
arrays = []
fields = []
if index:
arrays.append(self.index)
fields.append("Index")
# use integer indexing because of possible duplicate column names
arrays.extend(self.iloc[:, k] for k in range(len(self.columns)))
# Python 3 supports at most 255 arguments to constructor, and
# things get slow with this many fields in Python 2
if name is not None and len(self.columns) + index < 256:
# `rename` is unsupported in Python 2.6
try:
itertuple = collections.namedtuple(name,
fields + list(self.columns),
rename=True)
return map(itertuple._make, zip(*arrays))
except Exception:
pass
# fallback to regular tuples
return zip(*arrays)
if compat.PY3: # pragma: no cover
items = iteritems
def __len__(self):
"""Returns length of info axis, but here we use the index """
return len(self.index)
def dot(self, other):
"""
Matrix multiplication with DataFrame or Series objects
Parameters
----------
other : DataFrame or Series
Returns
-------
dot_product : DataFrame or Series
"""
if isinstance(other, (Series, DataFrame)):
common = self.columns.union(other.index)
if (len(common) > len(self.columns) or
len(common) > len(other.index)):
raise ValueError('matrices are not aligned')
left = self.reindex(columns=common, copy=False)
right = other.reindex(index=common, copy=False)
lvals = left.values
rvals = right.values
else:
left = self
lvals = self.values
rvals = np.asarray(other)
if lvals.shape[1] != rvals.shape[0]:
raise ValueError('Dot product shape mismatch, %s vs %s' %
(lvals.shape, rvals.shape))
if isinstance(other, DataFrame):
return self._constructor(np.dot(lvals, rvals), index=left.index,
columns=other.columns)
elif isinstance(other, Series):
return Series(np.dot(lvals, rvals), index=left.index)
elif isinstance(rvals, (np.ndarray, Index)):
result = np.dot(lvals, rvals)
if result.ndim == 2:
return self._constructor(result, index=left.index)
else:
return Series(result, index=left.index)
else: # pragma: no cover
raise TypeError('unsupported type: %s' % type(other))
# ----------------------------------------------------------------------
# IO methods (to / from other formats)
@classmethod
def from_dict(cls, data, orient='columns', dtype=None):
"""
Construct DataFrame from dict of array-like or dicts
Parameters
----------
data : dict
{field : array-like} or {field : dict}
orient : {'columns', 'index'}, default 'columns'
The "orientation" of the data. If the keys of the passed dict
should be the columns of the resulting DataFrame, pass 'columns'
(default). Otherwise if the keys should be rows, pass 'index'.
dtype : dtype, default None
Data type to force, otherwise infer
Returns
-------
DataFrame
"""
index, columns = None, None
orient = orient.lower()
if orient == 'index':
if len(data) > 0:
# TODO speed up Series case
if isinstance(list(data.values())[0], (Series, dict)):
data = _from_nested_dict(data)
else:
data, index = list(data.values()), list(data.keys())
elif orient != 'columns': # pragma: no cover
raise ValueError('only recognize index or columns for orient')
return cls(data, index=index, columns=columns, dtype=dtype)
def to_dict(self, orient='dict', into=dict):
"""Convert DataFrame to dictionary.
Parameters
----------
orient : str {'dict', 'list', 'series', 'split', 'records', 'index'}
Determines the type of the values of the dictionary.
- dict (default) : dict like {column -> {index -> value}}
- list : dict like {column -> [values]}
- series : dict like {column -> Series(values)}
- split : dict like
{index -> [index], columns -> [columns], data -> [values]}
- records : list like
[{column -> value}, ... , {column -> value}]
- index : dict like {index -> {column -> value}}
.. versionadded:: 0.17.0
Abbreviations are allowed. `s` indicates `series` and `sp`
indicates `split`.
into : class, default dict
The collections.Mapping subclass used for all Mappings
in the return value. Can be the actual class or an empty
instance of the mapping type you want. If you want a
collections.defaultdict, you must pass it initialized.
.. versionadded:: 0.21.0
Returns
-------
result : collections.Mapping like {column -> {index -> value}}
Examples
--------
>>> df = pd.DataFrame(
{'col1': [1, 2], 'col2': [0.5, 0.75]}, index=['a', 'b'])
>>> df
col1 col2
a 1 0.1
b 2 0.2
>>> df.to_dict()
{'col1': {'a': 1, 'b': 2}, 'col2': {'a': 0.5, 'b': 0.75}}
You can specify the return orientation.
>>> df.to_dict('series')
{'col1': a 1
b 2
Name: col1, dtype: int64, 'col2': a 0.50
b 0.75
Name: col2, dtype: float64}
>>> df.to_dict('split')
{'columns': ['col1', 'col2'],
'data': [[1.0, 0.5], [2.0, 0.75]],
'index': ['a', 'b']}
>>> df.to_dict('records')
[{'col1': 1.0, 'col2': 0.5}, {'col1': 2.0, 'col2': 0.75}]
>>> df.to_dict('index')
{'a': {'col1': 1.0, 'col2': 0.5}, 'b': {'col1': 2.0, 'col2': 0.75}}
You can also specify the mapping type.
>>> from collections import OrderedDict, defaultdict
>>> df.to_dict(into=OrderedDict)
OrderedDict([('col1', OrderedDict([('a', 1), ('b', 2)])),
('col2', OrderedDict([('a', 0.5), ('b', 0.75)]))])
If you want a `defaultdict`, you need to initialize it:
>>> dd = defaultdict(list)
>>> df.to_dict('records', into=dd)
[defaultdict(<type 'list'>, {'col2': 0.5, 'col1': 1.0}),
defaultdict(<type 'list'>, {'col2': 0.75, 'col1': 2.0})]
"""
if not self.columns.is_unique:
warnings.warn("DataFrame columns are not unique, some "
"columns will be omitted.", UserWarning)
# GH16122
into_c = standardize_mapping(into)
if orient.lower().startswith('d'):
return into_c(
(k, v.to_dict(into)) for k, v in compat.iteritems(self))
elif orient.lower().startswith('l'):
return into_c((k, v.tolist()) for k, v in compat.iteritems(self))
elif orient.lower().startswith('sp'):
return into_c((('index', self.index.tolist()),
('columns', self.columns.tolist()),
('data', lib.map_infer(self.values.ravel(),
_maybe_box_datetimelike)
.reshape(self.values.shape).tolist())))
elif orient.lower().startswith('s'):
return into_c((k, _maybe_box_datetimelike(v))
for k, v in compat.iteritems(self))
elif orient.lower().startswith('r'):
return [into_c((k, _maybe_box_datetimelike(v))
for k, v in zip(self.columns, row))
for row in self.values]
elif orient.lower().startswith('i'):
return into_c((k, v.to_dict(into)) for k, v in self.iterrows())
else:
raise ValueError("orient '%s' not understood" % orient)
def to_gbq(self, destination_table, project_id, chunksize=10000,
verbose=True, reauth=False, if_exists='fail', private_key=None):
"""Write a DataFrame to a Google BigQuery table.
The main method a user calls to export pandas DataFrame contents to
Google BigQuery table.
Google BigQuery API Client Library v2 for Python is used.
Documentation is available `here
<https://developers.google.com/api-client-library/python/apis/bigquery/v2>`__
Authentication to the Google BigQuery service is via OAuth 2.0.
- If "private_key" is not provided:
By default "application default credentials" are used.
If default application credentials are not found or are restrictive,
user account credentials are used. In this case, you will be asked to
grant permissions for product name 'pandas GBQ'.
- If "private_key" is provided:
Service account credentials will be used to authenticate.
Parameters
----------
dataframe : DataFrame
DataFrame to be written
destination_table : string
Name of table to be written, in the form 'dataset.tablename'
project_id : str
Google BigQuery Account project ID.
chunksize : int (default 10000)
Number of rows to be inserted in each chunk from the dataframe.
verbose : boolean (default True)
Show percentage complete
reauth : boolean (default False)
Force Google BigQuery to reauthenticate the user. This is useful
if multiple accounts are used.
if_exists : {'fail', 'replace', 'append'}, default 'fail'
'fail': If table exists, do nothing.
'replace': If table exists, drop it, recreate it, and insert data.
'append': If table exists, insert data. Create if does not exist.
private_key : str (optional)
Service account private key in JSON format. Can be file path
or string contents. This is useful for remote server
authentication (eg. jupyter iPython notebook on remote host)
"""
from pandas.io import gbq
return gbq.to_gbq(self, destination_table, project_id=project_id,
chunksize=chunksize, verbose=verbose, reauth=reauth,
if_exists=if_exists, private_key=private_key)
@classmethod
def from_records(cls, data, index=None, exclude=None, columns=None,
coerce_float=False, nrows=None):
"""
Convert structured or record ndarray to DataFrame
Parameters
----------
data : ndarray (structured dtype), list of tuples, dict, or DataFrame
index : string, list of fields, array-like
Field of array to use as the index, alternately a specific set of
input labels to use
exclude : sequence, default None
Columns or fields to exclude
columns : sequence, default None
Column names to use. If the passed data do not have names
associated with them, this argument provides names for the
columns. Otherwise this argument indicates the order of the columns
in the result (any names not found in the data will become all-NA
columns)
coerce_float : boolean, default False
Attempt to convert values of non-string, non-numeric objects (like
decimal.Decimal) to floating point, useful for SQL result sets
Returns
-------
df : DataFrame
"""
# Make a copy of the input columns so we can modify it
if columns is not None:
columns = _ensure_index(columns)
if is_iterator(data):
if nrows == 0:
return cls()
try:
first_row = next(data)
except StopIteration:
return cls(index=index, columns=columns)
dtype = None
if hasattr(first_row, 'dtype') and first_row.dtype.names:
dtype = first_row.dtype
values = [first_row]
if nrows is None:
values += data
else:
values.extend(itertools.islice(data, nrows - 1))
if dtype is not None:
data = np.array(values, dtype=dtype)
else:
data = values
if isinstance(data, dict):
if columns is None:
columns = arr_columns = _ensure_index(sorted(data))
arrays = [data[k] for k in columns]
else:
arrays = []
arr_columns = []
for k, v in compat.iteritems(data):
if k in columns:
arr_columns.append(k)
arrays.append(v)
arrays, arr_columns = _reorder_arrays(arrays, arr_columns,
columns)
elif isinstance(data, (np.ndarray, DataFrame)):
arrays, columns = _to_arrays(data, columns)
if columns is not None:
columns = _ensure_index(columns)
arr_columns = columns
else:
arrays, arr_columns = _to_arrays(data, columns,
coerce_float=coerce_float)
arr_columns = _ensure_index(arr_columns)
if columns is not None:
columns = _ensure_index(columns)
else:
columns = arr_columns
if exclude is None:
exclude = set()
else:
exclude = set(exclude)
result_index = None
if index is not None:
if (isinstance(index, compat.string_types) or
not hasattr(index, "__iter__")):
i = columns.get_loc(index)
exclude.add(index)
if len(arrays) > 0:
result_index = Index(arrays[i], name=index)
else:
result_index = Index([], name=index)
else:
try:
to_remove = [arr_columns.get_loc(field) for field in index]
result_index = MultiIndex.from_arrays(
[arrays[i] for i in to_remove], names=index)
exclude.update(index)
except Exception:
result_index = index
if any(exclude):
arr_exclude = [x for x in exclude if x in arr_columns]
to_remove = [arr_columns.get_loc(col) for col in arr_exclude]
arrays = [v for i, v in enumerate(arrays) if i not in to_remove]
arr_columns = arr_columns.drop(arr_exclude)
columns = columns.drop(exclude)
mgr = _arrays_to_mgr(arrays, arr_columns, result_index, columns)
return cls(mgr)
def to_records(self, index=True, convert_datetime64=True):
"""
Convert DataFrame to record array. Index will be put in the
'index' field of the record array if requested
Parameters
----------
index : boolean, default True
Include index in resulting record array, stored in 'index' field
convert_datetime64 : boolean, default True
Whether to convert the index to datetime.datetime if it is a
DatetimeIndex
Returns
-------
y : recarray
"""
if index:
if is_datetime64_any_dtype(self.index) and convert_datetime64:
ix_vals = [self.index.to_pydatetime()]
else:
if isinstance(self.index, MultiIndex):
# array of tuples to numpy cols. copy copy copy
ix_vals = lmap(np.array, zip(*self.index.values))
else:
ix_vals = [self.index.values]
arrays = ix_vals + [self[c].get_values() for c in self.columns]
count = 0
index_names = list(self.index.names)
if isinstance(self.index, MultiIndex):
for i, n in enumerate(index_names):
if n is None:
index_names[i] = 'level_%d' % count
count += 1
elif index_names[0] is None:
index_names = ['index']
names = (lmap(compat.text_type, index_names) +
lmap(compat.text_type, self.columns))
else:
arrays = [self[c].get_values() for c in self.columns]
names = lmap(compat.text_type, self.columns)
formats = [v.dtype for v in arrays]
return np.rec.fromarrays(
arrays,
dtype={'names': names, 'formats': formats}
)
@classmethod
def from_items(cls, items, columns=None, orient='columns'):
"""
Convert (key, value) pairs to DataFrame. The keys will be the axis
index (usually the columns, but depends on the specified
orientation). The values should be arrays or Series.
Parameters
----------
items : sequence of (key, value) pairs
Values should be arrays or Series.
columns : sequence of column labels, optional
Must be passed if orient='index'.
orient : {'columns', 'index'}, default 'columns'
The "orientation" of the data. If the keys of the
input correspond to column labels, pass 'columns'
(default). Otherwise if the keys correspond to the index,
pass 'index'.
Returns
-------
frame : DataFrame
"""
keys, values = lzip(*items)
if orient == 'columns':
if columns is not None:
columns = _ensure_index(columns)
idict = dict(items)
if len(idict) < len(items):
if not columns.equals(_ensure_index(keys)):
raise ValueError('With non-unique item names, passed '
'columns must be identical')
arrays = values
else:
arrays = [idict[k] for k in columns if k in idict]
else:
columns = _ensure_index(keys)
arrays = values
return cls._from_arrays(arrays, columns, None)
elif orient == 'index':
if columns is None:
raise TypeError("Must pass columns with orient='index'")
keys = _ensure_index(keys)
arr = np.array(values, dtype=object).T
data = [lib.maybe_convert_objects(v) for v in arr]
return cls._from_arrays(data, columns, keys)
else: # pragma: no cover
raise ValueError("'orient' must be either 'columns' or 'index'")
@classmethod
def _from_arrays(cls, arrays, columns, index, dtype=None):
mgr = _arrays_to_mgr(arrays, columns, index, columns, dtype=dtype)
return cls(mgr)
@classmethod
def from_csv(cls, path, header=0, sep=',', index_col=0, parse_dates=True,
encoding=None, tupleize_cols=False,
infer_datetime_format=False):
"""
Read CSV file (DISCOURAGED, please use :func:`pandas.read_csv`
instead).
It is preferable to use the more powerful :func:`pandas.read_csv`
for most general purposes, but ``from_csv`` makes for an easy
roundtrip to and from a file (the exact counterpart of
``to_csv``), especially with a DataFrame of time series data.
This method only differs from the preferred :func:`pandas.read_csv`
in some defaults:
- `index_col` is ``0`` instead of ``None`` (take first column as index
by default)
- `parse_dates` is ``True`` instead of ``False`` (try parsing the index
as datetime by default)
So a ``pd.DataFrame.from_csv(path)`` can be replaced by
``pd.read_csv(path, index_col=0, parse_dates=True)``.
Parameters
----------
path : string file path or file handle / StringIO
header : int, default 0
Row to use as header (skip prior rows)
sep : string, default ','
Field delimiter
index_col : int or sequence, default 0
Column to use for index. If a sequence is given, a MultiIndex
is used. Different default from read_table
parse_dates : boolean, default True
Parse dates. Different default from read_table
tupleize_cols : boolean, default False
write multi_index columns as a list of tuples (if True)
or new (expanded format) if False)
infer_datetime_format: boolean, default False
If True and `parse_dates` is True for a column, try to infer the
datetime format based on the first datetime string. If the format
can be inferred, there often will be a large parsing speed-up.
See also
--------
pandas.read_csv
Returns
-------
y : DataFrame
"""
from pandas.io.parsers import read_table
return read_table(path, header=header, sep=sep,
parse_dates=parse_dates, index_col=index_col,
encoding=encoding, tupleize_cols=tupleize_cols,
infer_datetime_format=infer_datetime_format)
def to_sparse(self, fill_value=None, kind='block'):
"""
Convert to SparseDataFrame
Parameters
----------
fill_value : float, default NaN
kind : {'block', 'integer'}
Returns
-------
y : SparseDataFrame
"""
from pandas.core.sparse.frame import SparseDataFrame
return SparseDataFrame(self._series, index=self.index,
columns=self.columns, default_kind=kind,
default_fill_value=fill_value)
def to_panel(self):
"""
Transform long (stacked) format (DataFrame) into wide (3D, Panel)
format.
Currently the index of the DataFrame must be a 2-level MultiIndex. This
may be generalized later
Returns
-------
panel : Panel
"""
# only support this kind for now
if (not isinstance(self.index, MultiIndex) or # pragma: no cover
len(self.index.levels) != 2):
raise NotImplementedError('Only 2-level MultiIndex are supported.')
if not self.index.is_unique:
raise ValueError("Can't convert non-uniquely indexed "
"DataFrame to Panel")
self._consolidate_inplace()
# minor axis must be sorted
if self.index.lexsort_depth < 2:
selfsorted = self.sort_index(level=0)
else:
selfsorted = self
major_axis, minor_axis = selfsorted.index.levels
major_labels, minor_labels = selfsorted.index.labels
shape = len(major_axis), len(minor_axis)
# preserve names, if any
major_axis = major_axis.copy()
major_axis.name = self.index.names[0]
minor_axis = minor_axis.copy()
minor_axis.name = self.index.names[1]
# create new axes
new_axes = [selfsorted.columns, major_axis, minor_axis]
# create new manager
new_mgr = selfsorted._data.reshape_nd(axes=new_axes,
labels=[major_labels,
minor_labels],
shape=shape,
ref_items=selfsorted.columns)
return self._constructor_expanddim(new_mgr)
def to_csv(self, path_or_buf=None, sep=",", na_rep='', float_format=None,
columns=None, header=True, index=True, index_label=None,
mode='w', encoding=None, compression=None, quoting=None,
quotechar='"', line_terminator='\n', chunksize=None,
tupleize_cols=False, date_format=None, doublequote=True,
escapechar=None, decimal='.'):
r"""Write DataFrame to a comma-separated values (csv) file
Parameters
----------
path_or_buf : string or file handle, default None
File path or object, if None is provided the result is returned as
a string.
sep : character, default ','
Field delimiter for the output file.
na_rep : string, default ''
Missing data representation
float_format : string, default None
Format string for floating point numbers
columns : sequence, optional
Columns to write
header : boolean or list of string, default True
Write out column names. If a list of string is given it is assumed
to be aliases for the column names
index : boolean, default True
Write row names (index)
index_label : string or sequence, or False, default None
Column label for index column(s) if desired. If None is given, and
`header` and `index` are True, then the index names are used. A
sequence should be given if the DataFrame uses MultiIndex. If
False do not print fields for index names. Use index_label=False
for easier importing in R
mode : str
Python write mode, default 'w'
encoding : string, optional
A string representing the encoding to use in the output file,
defaults to 'ascii' on Python 2 and 'utf-8' on Python 3.
compression : string, optional
a string representing the compression to use in the output file,
allowed values are 'gzip', 'bz2', 'xz',
only used when the first argument is a filename
line_terminator : string, default ``'\n'``
The newline character or character sequence to use in the output
file
quoting : optional constant from csv module
defaults to csv.QUOTE_MINIMAL. If you have set a `float_format`
then floats are converted to strings and thus csv.QUOTE_NONNUMERIC
will treat them as non-numeric
quotechar : string (length 1), default '\"'
character used to quote fields
doublequote : boolean, default True
Control quoting of `quotechar` inside a field
escapechar : string (length 1), default None
character used to escape `sep` and `quotechar` when appropriate
chunksize : int or None
rows to write at a time
tupleize_cols : boolean, default False
write multi_index columns as a list of tuples (if True)
or new (expanded format) if False)
date_format : string, default None
Format string for datetime objects
decimal: string, default '.'
Character recognized as decimal separator. E.g. use ',' for
European data
.. versionadded:: 0.16.0
"""
formatter = fmt.CSVFormatter(self, path_or_buf,
line_terminator=line_terminator, sep=sep,
encoding=encoding,
compression=compression, quoting=quoting,
na_rep=na_rep, float_format=float_format,
cols=columns, header=header, index=index,
index_label=index_label, mode=mode,
chunksize=chunksize, quotechar=quotechar,
tupleize_cols=tupleize_cols,
date_format=date_format,
doublequote=doublequote,
escapechar=escapechar, decimal=decimal)
formatter.save()
if path_or_buf is None:
return formatter.path_or_buf.getvalue()
@Appender(_shared_docs['to_excel'] % _shared_doc_kwargs)
def to_excel(self, excel_writer, sheet_name='Sheet1', na_rep='',
float_format=None, columns=None, header=True, index=True,
index_label=None, startrow=0, startcol=0, engine=None,
merge_cells=True, encoding=None, inf_rep='inf', verbose=True,
freeze_panes=None):
from pandas.io.formats.excel import ExcelFormatter
formatter = ExcelFormatter(self, na_rep=na_rep, cols=columns,
header=header,
float_format=float_format, index=index,
index_label=index_label,
merge_cells=merge_cells,
inf_rep=inf_rep)
formatter.write(excel_writer, sheet_name=sheet_name, startrow=startrow,
startcol=startcol, freeze_panes=freeze_panes,
engine=engine)
def to_stata(self, fname, convert_dates=None, write_index=True,
encoding="latin-1", byteorder=None, time_stamp=None,
data_label=None, variable_labels=None):
"""
A class for writing Stata binary dta files from array-like objects
Parameters
----------
fname : str or buffer
String path of file-like object
convert_dates : dict
Dictionary mapping columns containing datetime types to stata
internal format to use when wirting the dates. Options are 'tc',
'td', 'tm', 'tw', 'th', 'tq', 'ty'. Column can be either an integer
or a name. Datetime columns that do not have a conversion type
specified will be converted to 'tc'. Raises NotImplementedError if
a datetime column has timezone information
write_index : bool
Write the index to Stata dataset.
encoding : str
Default is latin-1. Unicode is not supported
byteorder : str
Can be ">", "<", "little", or "big". default is `sys.byteorder`
time_stamp : datetime
A datetime to use as file creation date. Default is the current
time.
dataset_label : str
A label for the data set. Must be 80 characters or smaller.
variable_labels : dict
Dictionary containing columns as keys and variable labels as
values. Each label must be 80 characters or smaller.
.. versionadded:: 0.19.0
Raises
------
NotImplementedError
* If datetimes contain timezone information
* Column dtype is not representable in Stata
ValueError
* Columns listed in convert_dates are noth either datetime64[ns]
or datetime.datetime
* Column listed in convert_dates is not in DataFrame
* Categorical label contains more than 32,000 characters
.. versionadded:: 0.19.0
Examples
--------
>>> writer = StataWriter('./data_file.dta', data)
>>> writer.write_file()
Or with dates
>>> writer = StataWriter('./date_data_file.dta', data, {2 : 'tw'})
>>> writer.write_file()
"""
from pandas.io.stata import StataWriter
writer = StataWriter(fname, self, convert_dates=convert_dates,
encoding=encoding, byteorder=byteorder,
time_stamp=time_stamp, data_label=data_label,
write_index=write_index,
variable_labels=variable_labels)
writer.write_file()
def to_feather(self, fname):
"""
write out the binary feather-format for DataFrames
.. versionadded:: 0.20.0
Parameters
----------
fname : str
string file path
"""
from pandas.io.feather_format import to_feather
to_feather(self, fname)
@Substitution(header='Write out column names. If a list of string is given, \
it is assumed to be aliases for the column names')
@Appender(fmt.docstring_to_string, indents=1)
def to_string(self, buf=None, columns=None, col_space=None, header=True,
index=True, na_rep='NaN', formatters=None, float_format=None,
sparsify=None, index_names=True, justify=None,
line_width=None, max_rows=None, max_cols=None,
show_dimensions=False):
"""
Render a DataFrame to a console-friendly tabular output.
"""
formatter = fmt.DataFrameFormatter(self, buf=buf, columns=columns,
col_space=col_space, na_rep=na_rep,
formatters=formatters,
float_format=float_format,
sparsify=sparsify, justify=justify,
index_names=index_names,
header=header, index=index,
line_width=line_width,
max_rows=max_rows,
max_cols=max_cols,
show_dimensions=show_dimensions)
formatter.to_string()
if buf is None:
result = formatter.buf.getvalue()
return result
@Substitution(header='whether to print column labels, default True')
@Appender(fmt.docstring_to_string, indents=1)
def to_html(self, buf=None, columns=None, col_space=None, header=True,
index=True, na_rep='NaN', formatters=None, float_format=None,
sparsify=None, index_names=True, justify=None, bold_rows=True,
classes=None, escape=True, max_rows=None, max_cols=None,
show_dimensions=False, notebook=False, decimal='.',
border=None):
"""
Render a DataFrame as an HTML table.
`to_html`-specific options:
bold_rows : boolean, default True
Make the row labels bold in the output
classes : str or list or tuple, default None
CSS class(es) to apply to the resulting html table
escape : boolean, default True
Convert the characters <, >, and & to HTML-safe sequences.=
max_rows : int, optional
Maximum number of rows to show before truncating. If None, show
all.
max_cols : int, optional
Maximum number of columns to show before truncating. If None, show
all.
decimal : string, default '.'
Character recognized as decimal separator, e.g. ',' in Europe
.. versionadded:: 0.18.0
border : int
A ``border=border`` attribute is included in the opening
`<table>` tag. Default ``pd.options.html.border``.
.. versionadded:: 0.19.0
"""
formatter = fmt.DataFrameFormatter(self, buf=buf, columns=columns,
col_space=col_space, na_rep=na_rep,
formatters=formatters,
float_format=float_format,
sparsify=sparsify, justify=justify,
index_names=index_names,
header=header, index=index,
bold_rows=bold_rows, escape=escape,
max_rows=max_rows,
max_cols=max_cols,
show_dimensions=show_dimensions,
decimal=decimal)
# TODO: a generic formatter wld b in DataFrameFormatter
formatter.to_html(classes=classes, notebook=notebook, border=border)
if buf is None:
return formatter.buf.getvalue()
def info(self, verbose=None, buf=None, max_cols=None, memory_usage=None,
null_counts=None):
"""
Concise summary of a DataFrame.
Parameters
----------
verbose : {None, True, False}, optional
Whether to print the full summary.
None follows the `display.max_info_columns` setting.
True or False overrides the `display.max_info_columns` setting.
buf : writable buffer, defaults to sys.stdout
max_cols : int, default None
Determines whether full summary or short summary is printed.
None follows the `display.max_info_columns` setting.
memory_usage : boolean/string, default None
Specifies whether total memory usage of the DataFrame
elements (including index) should be displayed. None follows
the `display.memory_usage` setting. True or False overrides
the `display.memory_usage` setting. A value of 'deep' is equivalent
of True, with deep introspection. Memory usage is shown in
human-readable units (base-2 representation).
null_counts : boolean, default None
Whether to show the non-null counts
- If None, then only show if the frame is smaller than
max_info_rows and max_info_columns.
- If True, always show counts.
- If False, never show counts.
"""
from pandas.io.formats.format import _put_lines
if buf is None: # pragma: no cover
buf = sys.stdout
lines = []
lines.append(str(type(self)))
lines.append(self.index.summary())
if len(self.columns) == 0:
lines.append('Empty %s' % type(self).__name__)
_put_lines(buf, lines)
return
cols = self.columns
# hack
if max_cols is None:
max_cols = get_option('display.max_info_columns',
len(self.columns) + 1)
max_rows = get_option('display.max_info_rows', len(self) + 1)
if null_counts is None:
show_counts = ((len(self.columns) <= max_cols) and
(len(self) < max_rows))
else:
show_counts = null_counts
exceeds_info_cols = len(self.columns) > max_cols
def _verbose_repr():
lines.append('Data columns (total %d columns):' %
len(self.columns))
space = max([len(pprint_thing(k)) for k in self.columns]) + 4
counts = None
tmpl = "%s%s"
if show_counts:
counts = self.count()
if len(cols) != len(counts): # pragma: no cover
raise AssertionError('Columns must equal counts (%d != %d)'
% (len(cols), len(counts)))
tmpl = "%s non-null %s"
dtypes = self.dtypes
for i, col in enumerate(self.columns):
dtype = dtypes.iloc[i]
col = pprint_thing(col)
count = ""
if show_counts:
count = counts.iloc[i]
lines.append(_put_str(col, space) + tmpl % (count, dtype))
def _non_verbose_repr():
lines.append(self.columns.summary(name='Columns'))
def _sizeof_fmt(num, size_qualifier):
# returns size in human readable format
for x in ['bytes', 'KB', 'MB', 'GB', 'TB']:
if num < 1024.0:
return "%3.1f%s %s" % (num, size_qualifier, x)
num /= 1024.0
return "%3.1f%s %s" % (num, size_qualifier, 'PB')
if verbose:
_verbose_repr()
elif verbose is False: # specifically set to False, not nesc None
_non_verbose_repr()
else:
if exceeds_info_cols:
_non_verbose_repr()
else:
_verbose_repr()
counts = self.get_dtype_counts()
dtypes = ['%s(%d)' % k for k in sorted(compat.iteritems(counts))]
lines.append('dtypes: %s' % ', '.join(dtypes))
if memory_usage is None:
memory_usage = get_option('display.memory_usage')
if memory_usage:
# append memory usage of df to display
size_qualifier = ''
if memory_usage == 'deep':
deep = True
else:
# size_qualifier is just a best effort; not guaranteed to catch
# all cases (e.g., it misses categorical data even with object
# categories)
deep = False
if ('object' in counts or
self.index._is_memory_usage_qualified()):
size_qualifier = '+'
mem_usage = self.memory_usage(index=True, deep=deep).sum()
lines.append("memory usage: %s\n" %
_sizeof_fmt(mem_usage, size_qualifier))
_put_lines(buf, lines)
def memory_usage(self, index=True, deep=False):
"""Memory usage of DataFrame columns.
Parameters
----------
index : bool
Specifies whether to include memory usage of DataFrame's
index in returned Series. If `index=True` (default is False)
the first index of the Series is `Index`.
deep : bool
Introspect the data deeply, interrogate
`object` dtypes for system-level memory consumption
Returns
-------
sizes : Series
A series with column names as index and memory usage of
columns with units of bytes.
Notes
-----
Memory usage does not include memory consumed by elements that
are not components of the array if deep=False
See Also
--------
numpy.ndarray.nbytes
"""
result = Series([c.memory_usage(index=False, deep=deep)
for col, c in self.iteritems()], index=self.columns)
if index:
result = Series(self.index.memory_usage(deep=deep),
index=['Index']).append(result)
return result
def transpose(self, *args, **kwargs):
"""Transpose index and columns"""
nv.validate_transpose(args, dict())
return super(DataFrame, self).transpose(1, 0, **kwargs)
T = property(transpose)
# ----------------------------------------------------------------------
# Picklability
# legacy pickle formats
def _unpickle_frame_compat(self, state): # pragma: no cover
from pandas.core.common import _unpickle_array
if len(state) == 2: # pragma: no cover
series, idx = state
columns = sorted(series)
else:
series, cols, idx = state
columns = _unpickle_array(cols)
index = _unpickle_array(idx)
self._data = self._init_dict(series, index, columns, None)
def _unpickle_matrix_compat(self, state): # pragma: no cover
from pandas.core.common import _unpickle_array
# old unpickling
(vals, idx, cols), object_state = state
index = _unpickle_array(idx)
dm = DataFrame(vals, index=index, columns=_unpickle_array(cols),
copy=False)
if object_state is not None:
ovals, _, ocols = object_state
objects = DataFrame(ovals, index=index,
columns=_unpickle_array(ocols), copy=False)
dm = dm.join(objects)
self._data = dm._data
# ----------------------------------------------------------------------
# Getting and setting elements
def get_value(self, index, col, takeable=False):
"""
Quickly retrieve single value at passed column and index
Parameters
----------
index : row label
col : column label
takeable : interpret the index/col as indexers, default False
Returns
-------
value : scalar value
"""
if takeable:
series = self._iget_item_cache(col)
return _maybe_box_datetimelike(series._values[index])
series = self._get_item_cache(col)
engine = self.index._engine
try:
return engine.get_value(series._values, index)
except (TypeError, ValueError):
# we cannot handle direct indexing
# use positional
col = self.columns.get_loc(col)
index = self.index.get_loc(index)
return self.get_value(index, col, takeable=True)
def set_value(self, index, col, value, takeable=False):
"""
Put single value at passed column and index
Parameters
----------
index : row label
col : column label
value : scalar value
takeable : interpret the index/col as indexers, default False
Returns
-------
frame : DataFrame
If label pair is contained, will be reference to calling DataFrame,
otherwise a new object
"""
try:
if takeable is True:
series = self._iget_item_cache(col)
return series.set_value(index, value, takeable=True)
series = self._get_item_cache(col)
engine = self.index._engine
engine.set_value(series._values, index, value)
return self
except (KeyError, TypeError):
# set using a non-recursive method & reset the cache
self.loc[index, col] = value
self._item_cache.pop(col, None)
return self
def _ixs(self, i, axis=0):
"""
i : int, slice, or sequence of integers
axis : int
"""
# irow
if axis == 0:
"""
Notes
-----
If slice passed, the resulting data will be a view
"""
if isinstance(i, slice):
return self[i]
else:
label = self.index[i]
if isinstance(label, Index):
# a location index by definition
result = self.take(i, axis=axis)
copy = True
else:
new_values = self._data.fast_xs(i)
if is_scalar(new_values):
return new_values
# if we are a copy, mark as such
copy = (isinstance(new_values, np.ndarray) and
new_values.base is None)
result = self._constructor_sliced(new_values,
index=self.columns,
name=self.index[i],
dtype=new_values.dtype)
result._set_is_copy(self, copy=copy)
return result
# icol
else:
"""
Notes
-----
If slice passed, the resulting data will be a view
"""
label = self.columns[i]
if isinstance(i, slice):
# need to return view
lab_slice = slice(label[0], label[-1])
return self.loc[:, lab_slice]
else:
if isinstance(label, Index):
return self.take(i, axis=1, convert=True)
index_len = len(self.index)
# if the values returned are not the same length
# as the index (iow a not found value), iget returns
# a 0-len ndarray. This is effectively catching
# a numpy error (as numpy should really raise)
values = self._data.iget(i)
if index_len and not len(values):
values = np.array([np.nan] * index_len, dtype=object)
result = self._constructor_sliced.from_array(values,
index=self.index,
name=label,
fastpath=True)
# this is a cached value, mark it so
result._set_as_cached(label, self)
return result
def __getitem__(self, key):
key = com._apply_if_callable(key, self)
# shortcut if we are an actual column
is_mi_columns = isinstance(self.columns, MultiIndex)
try:
if key in self.columns and not is_mi_columns:
return self._getitem_column(key)
except:
pass
# see if we can slice the rows
indexer = convert_to_index_sliceable(self, key)
if indexer is not None:
return self._getitem_slice(indexer)
if isinstance(key, (Series, np.ndarray, Index, list)):
# either boolean or fancy integer index
return self._getitem_array(key)
elif isinstance(key, DataFrame):
return self._getitem_frame(key)
elif is_mi_columns:
return self._getitem_multilevel(key)
else:
return self._getitem_column(key)
def _getitem_column(self, key):
""" return the actual column """
# get column
if self.columns.is_unique:
return self._get_item_cache(key)
# duplicate columns & possible reduce dimensionality
result = self._constructor(self._data.get(key))
if result.columns.is_unique:
result = result[key]
return result
def _getitem_slice(self, key):
return self._slice(key, axis=0)
def _getitem_array(self, key):
# also raises Exception if object array with NA values
if com.is_bool_indexer(key):
# warning here just in case -- previously __setitem__ was
# reindexing but __getitem__ was not; it seems more reasonable to
# go with the __setitem__ behavior since that is more consistent
# with all other indexing behavior
if isinstance(key, Series) and not key.index.equals(self.index):
warnings.warn("Boolean Series key will be reindexed to match "
"DataFrame index.", UserWarning, stacklevel=3)
elif len(key) != len(self.index):
raise ValueError('Item wrong length %d instead of %d.' %
(len(key), len(self.index)))
# check_bool_indexer will throw exception if Series key cannot
# be reindexed to match DataFrame rows
key = check_bool_indexer(self.index, key)
indexer = key.nonzero()[0]
return self.take(indexer, axis=0, convert=False)
else:
indexer = self.loc._convert_to_indexer(key, axis=1)
return self.take(indexer, axis=1, convert=True)
def _getitem_multilevel(self, key):
loc = self.columns.get_loc(key)
if isinstance(loc, (slice, Series, np.ndarray, Index)):
new_columns = self.columns[loc]
result_columns = maybe_droplevels(new_columns, key)
if self._is_mixed_type:
result = self.reindex(columns=new_columns)
result.columns = result_columns
else:
new_values = self.values[:, loc]
result = self._constructor(new_values, index=self.index,
columns=result_columns)
result = result.__finalize__(self)
if len(result.columns) == 1:
top = result.columns[0]
if ((type(top) == str and top == '') or
(type(top) == tuple and top[0] == '')):
result = result['']
if isinstance(result, Series):
result = self._constructor_sliced(result,
index=self.index,
name=key)
result._set_is_copy(self)
return result
else:
return self._get_item_cache(key)
def _getitem_frame(self, key):
if key.values.size and not is_bool_dtype(key.values):
raise ValueError('Must pass DataFrame with boolean values only')
return self.where(key)
def query(self, expr, inplace=False, **kwargs):
"""Query the columns of a frame with a boolean expression.
.. versionadded:: 0.13
Parameters
----------
expr : string
The query string to evaluate. You can refer to variables
in the environment by prefixing them with an '@' character like
``@a + b``.
inplace : bool
Whether the query should modify the data in place or return
a modified copy
.. versionadded:: 0.18.0
kwargs : dict
See the documentation for :func:`pandas.eval` for complete details
on the keyword arguments accepted by :meth:`DataFrame.query`.
Returns
-------
q : DataFrame
Notes
-----
The result of the evaluation of this expression is first passed to
:attr:`DataFrame.loc` and if that fails because of a
multidimensional key (e.g., a DataFrame) then the result will be passed
to :meth:`DataFrame.__getitem__`.
This method uses the top-level :func:`pandas.eval` function to
evaluate the passed query.
The :meth:`~pandas.DataFrame.query` method uses a slightly
modified Python syntax by default. For example, the ``&`` and ``|``
(bitwise) operators have the precedence of their boolean cousins,
:keyword:`and` and :keyword:`or`. This *is* syntactically valid Python,
however the semantics are different.
You can change the semantics of the expression by passing the keyword
argument ``parser='python'``. This enforces the same semantics as
evaluation in Python space. Likewise, you can pass ``engine='python'``
to evaluate an expression using Python itself as a backend. This is not
recommended as it is inefficient compared to using ``numexpr`` as the
engine.
The :attr:`DataFrame.index` and
:attr:`DataFrame.columns` attributes of the
:class:`~pandas.DataFrame` instance are placed in the query namespace
by default, which allows you to treat both the index and columns of the
frame as a column in the frame.
The identifier ``index`` is used for the frame index; you can also
use the name of the index to identify it in a query.
For further details and examples see the ``query`` documentation in
:ref:`indexing <indexing.query>`.
See Also
--------
pandas.eval
DataFrame.eval
Examples
--------
>>> from numpy.random import randn
>>> from pandas import DataFrame
>>> df = DataFrame(randn(10, 2), columns=list('ab'))
>>> df.query('a > b')
>>> df[df.a > df.b] # same result as the previous expression
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if not isinstance(expr, compat.string_types):
msg = "expr must be a string to be evaluated, {0} given"
raise ValueError(msg.format(type(expr)))
kwargs['level'] = kwargs.pop('level', 0) + 1
kwargs['target'] = None
res = self.eval(expr, **kwargs)
try:
new_data = self.loc[res]
except ValueError:
# when res is multi-dimensional loc raises, but this is sometimes a
# valid query
new_data = self[res]
if inplace:
self._update_inplace(new_data)
else:
return new_data
def eval(self, expr, inplace=False, **kwargs):
"""Evaluate an expression in the context of the calling DataFrame
instance.
Parameters
----------
expr : string
The expression string to evaluate.
inplace : bool, default False
If the expression contains an assignment, whether to perform the
operation inplace and mutate the existing DataFrame. Otherwise,
a new DataFrame is returned.
.. versionadded:: 0.18.0
kwargs : dict
See the documentation for :func:`~pandas.eval` for complete details
on the keyword arguments accepted by
:meth:`~pandas.DataFrame.query`.
Returns
-------
ret : ndarray, scalar, or pandas object
See Also
--------
pandas.DataFrame.query
pandas.DataFrame.assign
pandas.eval
Notes
-----
For more details see the API documentation for :func:`~pandas.eval`.
For detailed examples see :ref:`enhancing performance with eval
<enhancingperf.eval>`.
Examples
--------
>>> from numpy.random import randn
>>> from pandas import DataFrame
>>> df = DataFrame(randn(10, 2), columns=list('ab'))
>>> df.eval('a + b')
>>> df.eval('c = a + b')
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
resolvers = kwargs.pop('resolvers', None)
kwargs['level'] = kwargs.pop('level', 0) + 1
if resolvers is None:
index_resolvers = self._get_index_resolvers()
resolvers = dict(self.iteritems()), index_resolvers
if 'target' not in kwargs:
kwargs['target'] = self
kwargs['resolvers'] = kwargs.get('resolvers', ()) + tuple(resolvers)
return _eval(expr, inplace=inplace, **kwargs)
def select_dtypes(self, include=None, exclude=None):
"""Return a subset of a DataFrame including/excluding columns based on
their ``dtype``.
Parameters
----------
include, exclude : scalar or list-like
A selection of dtypes or strings to be included/excluded. At least
one of these parameters must be supplied.
Raises
------
ValueError
* If both of ``include`` and ``exclude`` are empty
* If ``include`` and ``exclude`` have overlapping elements
* If any kind of string dtype is passed in.
Returns
-------
subset : DataFrame
The subset of the frame including the dtypes in ``include`` and
excluding the dtypes in ``exclude``.
Notes
-----
* To select all *numeric* types use the numpy dtype ``numpy.number``
* To select strings you must use the ``object`` dtype, but note that
this will return *all* object dtype columns
* See the `numpy dtype hierarchy
<http://docs.scipy.org/doc/numpy/reference/arrays.scalars.html>`__
* To select datetimes, use np.datetime64, 'datetime' or 'datetime64'
* To select timedeltas, use np.timedelta64, 'timedelta' or
'timedelta64'
* To select Pandas categorical dtypes, use 'category'
* To select Pandas datetimetz dtypes, use 'datetimetz' (new in 0.20.0),
or a 'datetime64[ns, tz]' string
Examples
--------
>>> df = pd.DataFrame({'a': np.random.randn(6).astype('f4'),
... 'b': [True, False] * 3,
... 'c': [1.0, 2.0] * 3})
>>> df
a b c
0 0.3962 True 1
1 0.1459 False 2
2 0.2623 True 1
3 0.0764 False 2
4 -0.9703 True 1
5 -1.2094 False 2
>>> df.select_dtypes(include='bool')
c
0 True
1 False
2 True
3 False
4 True
5 False
>>> df.select_dtypes(include=['float64'])
c
0 1
1 2
2 1
3 2
4 1
5 2
>>> df.select_dtypes(exclude=['floating'])
b
0 True
1 False
2 True
3 False
4 True
5 False
"""
if not is_list_like(include):
include = (include,) if include is not None else ()
if not is_list_like(exclude):
exclude = (exclude,) if exclude is not None else ()
selection = tuple(map(frozenset, (include, exclude)))
if not any(selection):
raise ValueError('at least one of include or exclude must be '
'nonempty')
# convert the myriad valid dtypes object to a single representation
include, exclude = map(
lambda x: frozenset(map(_get_dtype_from_object, x)), selection)
for dtypes in (include, exclude):
invalidate_string_dtypes(dtypes)
# can't both include AND exclude!
if not include.isdisjoint(exclude):
raise ValueError('include and exclude overlap on %s' %
(include & exclude))
# empty include/exclude -> defaults to True
# three cases (we've already raised if both are empty)
# case 1: empty include, nonempty exclude
# we have True, True, ... True for include, same for exclude
# in the loop below we get the excluded
# and when we call '&' below we get only the excluded
# case 2: nonempty include, empty exclude
# same as case 1, but with include
# case 3: both nonempty
# the "union" of the logic of case 1 and case 2:
# we get the included and excluded, and return their logical and
include_these = Series(not bool(include), index=self.columns)
exclude_these = Series(not bool(exclude), index=self.columns)
def is_dtype_instance_mapper(column, dtype):
return column, functools.partial(issubclass, dtype.type)
for column, f in itertools.starmap(is_dtype_instance_mapper,
self.dtypes.iteritems()):
if include: # checks for the case of empty include or exclude
include_these[column] = any(map(f, include))
if exclude:
exclude_these[column] = not any(map(f, exclude))
dtype_indexer = include_these & exclude_these
return self.loc[com._get_info_slice(self, dtype_indexer)]
def _box_item_values(self, key, values):
items = self.columns[self.columns.get_loc(key)]
if values.ndim == 2:
return self._constructor(values.T, columns=items, index=self.index)
else:
return self._box_col_values(values, items)
def _box_col_values(self, values, items):
""" provide boxed values for a column """
return self._constructor_sliced.from_array(values, index=self.index,
name=items, fastpath=True)
def __setitem__(self, key, value):
key = com._apply_if_callable(key, self)
# see if we can slice the rows
indexer = convert_to_index_sliceable(self, key)
if indexer is not None:
return self._setitem_slice(indexer, value)
if isinstance(key, (Series, np.ndarray, list, Index)):
self._setitem_array(key, value)
elif isinstance(key, DataFrame):
self._setitem_frame(key, value)
else:
# set column
self._set_item(key, value)
def _setitem_slice(self, key, value):
self._check_setitem_copy()
self.loc._setitem_with_indexer(key, value)
def _setitem_array(self, key, value):
# also raises Exception if object array with NA values
if com.is_bool_indexer(key):
if len(key) != len(self.index):
raise ValueError('Item wrong length %d instead of %d!' %
(len(key), len(self.index)))
key = check_bool_indexer(self.index, key)
indexer = key.nonzero()[0]
self._check_setitem_copy()
self.loc._setitem_with_indexer(indexer, value)
else:
if isinstance(value, DataFrame):
if len(value.columns) != len(key):
raise ValueError('Columns must be same length as key')
for k1, k2 in zip(key, value.columns):
self[k1] = value[k2]
else:
indexer = self.loc._convert_to_indexer(key, axis=1)
self._check_setitem_copy()
self.loc._setitem_with_indexer((slice(None), indexer), value)
def _setitem_frame(self, key, value):
# support boolean setting with DataFrame input, e.g.
# df[df > df2] = 0
if key.values.size and not is_bool_dtype(key.values):
raise TypeError('Must pass DataFrame with boolean values only')
self._check_inplace_setting(value)
self._check_setitem_copy()
self._where(-key, value, inplace=True)
def _ensure_valid_index(self, value):
"""
ensure that if we don't have an index, that we can create one from the
passed value
"""
# GH5632, make sure that we are a Series convertible
if not len(self.index) and is_list_like(value):
try:
value = Series(value)
except:
raise ValueError('Cannot set a frame with no defined index '
'and a value that cannot be converted to a '
'Series')
self._data = self._data.reindex_axis(value.index.copy(), axis=1,
fill_value=np.nan)
def _set_item(self, key, value):
"""
Add series to DataFrame in specified column.
If series is a numpy-array (not a Series/TimeSeries), it must be the
same length as the DataFrames index or an error will be thrown.
Series/TimeSeries will be conformed to the DataFrames index to
ensure homogeneity.
"""
self._ensure_valid_index(value)
value = self._sanitize_column(key, value)
NDFrame._set_item(self, key, value)
# check if we are modifying a copy
# try to set first as we want an invalid
# value exception to occur first
if len(self):
self._check_setitem_copy()
def insert(self, loc, column, value, allow_duplicates=False):
"""
Insert column into DataFrame at specified location.
Raises a ValueError if `column` is already contained in the DataFrame,
unless `allow_duplicates` is set to True.
Parameters
----------
loc : int
Insertion index. Must verify 0 <= loc <= len(columns)
column : string, number, or hashable object
label of the inserted column
value : int, Series, or array-like
allow_duplicates : bool, optional
"""
self._ensure_valid_index(value)
value = self._sanitize_column(column, value, broadcast=False)
self._data.insert(loc, column, value,
allow_duplicates=allow_duplicates)
def assign(self, **kwargs):
"""
Assign new columns to a DataFrame, returning a new object
(a copy) with all the original columns in addition to the new ones.
.. versionadded:: 0.16.0
Parameters
----------
kwargs : keyword, value pairs
keywords are the column names. If the values are
callable, they are computed on the DataFrame and
assigned to the new columns. The callable must not
change input DataFrame (though pandas doesn't check it).
If the values are not callable, (e.g. a Series, scalar, or array),
they are simply assigned.
Returns
-------
df : DataFrame
A new DataFrame with the new columns in addition to
all the existing columns.
Notes
-----
Since ``kwargs`` is a dictionary, the order of your
arguments may not be preserved. To make things predicatable,
the columns are inserted in alphabetical order, at the end of
your DataFrame. Assigning multiple columns within the same
``assign`` is possible, but you cannot reference other columns
created within the same ``assign`` call.
Examples
--------
>>> df = DataFrame({'A': range(1, 11), 'B': np.random.randn(10)})
Where the value is a callable, evaluated on `df`:
>>> df.assign(ln_A = lambda x: np.log(x.A))
A B ln_A
0 1 0.426905 0.000000
1 2 -0.780949 0.693147
2 3 -0.418711 1.098612
3 4 -0.269708 1.386294
4 5 -0.274002 1.609438
5 6 -0.500792 1.791759
6 7 1.649697 1.945910
7 8 -1.495604 2.079442
8 9 0.549296 2.197225
9 10 -0.758542 2.302585
Where the value already exists and is inserted:
>>> newcol = np.log(df['A'])
>>> df.assign(ln_A=newcol)
A B ln_A
0 1 0.426905 0.000000
1 2 -0.780949 0.693147
2 3 -0.418711 1.098612
3 4 -0.269708 1.386294
4 5 -0.274002 1.609438
5 6 -0.500792 1.791759
6 7 1.649697 1.945910
7 8 -1.495604 2.079442
8 9 0.549296 2.197225
9 10 -0.758542 2.302585
"""
data = self.copy()
# do all calculations first...
results = {}
for k, v in kwargs.items():
results[k] = com._apply_if_callable(v, data)
# ... and then assign
for k, v in sorted(results.items()):
data[k] = v
return data
def _sanitize_column(self, key, value, broadcast=True):
"""
Ensures new columns (which go into the BlockManager as new blocks) are
always copied and converted into an array.
Parameters
----------
key : object
value : scalar, Series, or array-like
broadcast : bool, default True
If ``key`` matches multiple duplicate column names in the
DataFrame, this parameter indicates whether ``value`` should be
tiled so that the returned array contains a (duplicated) column for
each occurrence of the key. If False, ``value`` will not be tiled.
Returns
-------
sanitized_column : numpy-array
"""
def reindexer(value):
# reindex if necessary
if value.index.equals(self.index) or not len(self.index):
value = value._values.copy()
else:
# GH 4107
try:
value = value.reindex(self.index)._values
except Exception as e:
# duplicate axis
if not value.index.is_unique:
raise e
# other
raise TypeError('incompatible index of inserted column '
'with frame index')
return value
if isinstance(value, Series):
value = reindexer(value)
elif isinstance(value, DataFrame):
# align right-hand-side columns if self.columns
# is multi-index and self[key] is a sub-frame
if isinstance(self.columns, MultiIndex) and key in self.columns:
loc = self.columns.get_loc(key)
if isinstance(loc, (slice, Series, np.ndarray, Index)):
cols = maybe_droplevels(self.columns[loc], key)
if len(cols) and not cols.equals(value.columns):
value = value.reindex_axis(cols, axis=1)
# now align rows
value = reindexer(value).T
elif isinstance(value, Categorical):
value = value.copy()
elif isinstance(value, Index) or is_sequence(value):
from pandas.core.series import _sanitize_index
# turn me into an ndarray
value = _sanitize_index(value, self.index, copy=False)
if not isinstance(value, (np.ndarray, Index)):
if isinstance(value, list) and len(value) > 0:
value = maybe_convert_platform(value)
else:
value = com._asarray_tuplesafe(value)
elif value.ndim == 2:
value = value.copy().T
elif isinstance(value, Index):
value = value.copy(deep=True)
else:
value = value.copy()
# possibly infer to datetimelike
if is_object_dtype(value.dtype):
value = maybe_infer_to_datetimelike(value)
else:
# upcast the scalar
dtype, value = infer_dtype_from_scalar(value)
value = np.repeat(value, len(self.index)).astype(dtype)
value = maybe_cast_to_datetime(value, dtype)
# return internal types directly
if is_extension_type(value):
return value
# broadcast across multiple columns if necessary
if broadcast and key in self.columns and value.ndim == 1:
if (not self.columns.is_unique or
isinstance(self.columns, MultiIndex)):
existing_piece = self[key]
if isinstance(existing_piece, DataFrame):
value = np.tile(value, (len(existing_piece.columns), 1))
return np.atleast_2d(np.asarray(value))
@property
def _series(self):
result = {}
for idx, item in enumerate(self.columns):
result[item] = Series(self._data.iget(idx), index=self.index,
name=item)
return result
def lookup(self, row_labels, col_labels):
"""Label-based "fancy indexing" function for DataFrame.
Given equal-length arrays of row and column labels, return an
array of the values corresponding to each (row, col) pair.
Parameters
----------
row_labels : sequence
The row labels to use for lookup
col_labels : sequence
The column labels to use for lookup
Notes
-----
Akin to::
result = []
for row, col in zip(row_labels, col_labels):
result.append(df.get_value(row, col))
Examples
--------
values : ndarray
The found values
"""
n = len(row_labels)
if n != len(col_labels):
raise ValueError('Row labels must have same size as column labels')
thresh = 1000
if not self._is_mixed_type or n > thresh:
values = self.values
ridx = self.index.get_indexer(row_labels)
cidx = self.columns.get_indexer(col_labels)
if (ridx == -1).any():
raise KeyError('One or more row labels was not found')
if (cidx == -1).any():
raise KeyError('One or more column labels was not found')
flat_index = ridx * len(self.columns) + cidx
result = values.flat[flat_index]
else:
result = np.empty(n, dtype='O')
for i, (r, c) in enumerate(zip(row_labels, col_labels)):
result[i] = self.get_value(r, c)
if is_object_dtype(result):
result = lib.maybe_convert_objects(result)
return result
# ----------------------------------------------------------------------
# Reindexing and alignment
def _reindex_axes(self, axes, level, limit, tolerance, method, fill_value,
copy):
frame = self
columns = axes['columns']
if columns is not None:
frame = frame._reindex_columns(columns, method, copy, level,
fill_value, limit, tolerance)
index = axes['index']
if index is not None:
frame = frame._reindex_index(index, method, copy, level,
fill_value, limit, tolerance)
return frame
def _reindex_index(self, new_index, method, copy, level, fill_value=NA,
limit=None, tolerance=None):
new_index, indexer = self.index.reindex(new_index, method=method,
level=level, limit=limit,
tolerance=tolerance)
return self._reindex_with_indexers({0: [new_index, indexer]},
copy=copy, fill_value=fill_value,
allow_dups=False)
def _reindex_columns(self, new_columns, method, copy, level, fill_value=NA,
limit=None, tolerance=None):
new_columns, indexer = self.columns.reindex(new_columns, method=method,
level=level, limit=limit,
tolerance=tolerance)
return self._reindex_with_indexers({1: [new_columns, indexer]},
copy=copy, fill_value=fill_value,
allow_dups=False)
def _reindex_multi(self, axes, copy, fill_value):
""" we are guaranteed non-Nones in the axes! """
new_index, row_indexer = self.index.reindex(axes['index'])
new_columns, col_indexer = self.columns.reindex(axes['columns'])
if row_indexer is not None and col_indexer is not None:
indexer = row_indexer, col_indexer
new_values = algorithms.take_2d_multi(self.values, indexer,
fill_value=fill_value)
return self._constructor(new_values, index=new_index,
columns=new_columns)
else:
return self._reindex_with_indexers({0: [new_index, row_indexer],
1: [new_columns, col_indexer]},
copy=copy,
fill_value=fill_value)
@Appender(_shared_docs['align'] % _shared_doc_kwargs)
def align(self, other, join='outer', axis=None, level=None, copy=True,
fill_value=None, method=None, limit=None, fill_axis=0,
broadcast_axis=None):
return super(DataFrame, self).align(other, join=join, axis=axis,
level=level, copy=copy,
fill_value=fill_value,
method=method, limit=limit,
fill_axis=fill_axis,
broadcast_axis=broadcast_axis)
@Appender(_shared_docs['reindex'] % _shared_doc_kwargs)
def reindex(self, index=None, columns=None, **kwargs):
return super(DataFrame, self).reindex(index=index, columns=columns,
**kwargs)
@Appender(_shared_docs['reindex_axis'] % _shared_doc_kwargs)
def reindex_axis(self, labels, axis=0, method=None, level=None, copy=True,
limit=None, fill_value=np.nan):
return super(DataFrame,
self).reindex_axis(labels=labels, axis=axis,
method=method, level=level, copy=copy,
limit=limit, fill_value=fill_value)
@Appender(_shared_docs['rename'] % _shared_doc_kwargs)
def rename(self, index=None, columns=None, **kwargs):
return super(DataFrame, self).rename(index=index, columns=columns,
**kwargs)
@Appender(_shared_docs['fillna'] % _shared_doc_kwargs)
def fillna(self, value=None, method=None, axis=None, inplace=False,
limit=None, downcast=None, **kwargs):
return super(DataFrame,
self).fillna(value=value, method=method, axis=axis,
inplace=inplace, limit=limit,
downcast=downcast, **kwargs)
@Appender(_shared_docs['shift'] % _shared_doc_kwargs)
def shift(self, periods=1, freq=None, axis=0):
return super(DataFrame, self).shift(periods=periods, freq=freq,
axis=axis)
def set_index(self, keys, drop=True, append=False, inplace=False,
verify_integrity=False):
"""
Set the DataFrame index (row labels) using one or more existing
columns. By default yields a new object.
Parameters
----------
keys : column label or list of column labels / arrays
drop : boolean, default True
Delete columns to be used as the new index
append : boolean, default False
Whether to append columns to existing index
inplace : boolean, default False
Modify the DataFrame in place (do not create a new object)
verify_integrity : boolean, default False
Check the new index for duplicates. Otherwise defer the check until
necessary. Setting to False will improve the performance of this
method
Examples
--------
>>> df = pd.DataFrame({'month': [1, 4, 7, 10],
... 'year': [2012, 2014, 2013, 2014],
... 'sale':[55, 40, 84, 31]})
month sale year
0 1 55 2012
1 4 40 2014
2 7 84 2013
3 10 31 2014
Set the index to become the 'month' column:
>>> df.set_index('month')
sale year
month
1 55 2012
4 40 2014
7 84 2013
10 31 2014
Create a multi-index using columns 'year' and 'month':
>>> df.set_index(['year', 'month'])
sale
year month
2012 1 55
2014 4 40
2013 7 84
2014 10 31
Create a multi-index using a set of values and a column:
>>> df.set_index([[1, 2, 3, 4], 'year'])
month sale
year
1 2012 1 55
2 2014 4 40
3 2013 7 84
4 2014 10 31
Returns
-------
dataframe : DataFrame
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if not isinstance(keys, list):
keys = [keys]
if inplace:
frame = self
else:
frame = self.copy()
arrays = []
names = []
if append:
names = [x for x in self.index.names]
if isinstance(self.index, MultiIndex):
for i in range(self.index.nlevels):
arrays.append(self.index._get_level_values(i))
else:
arrays.append(self.index)
to_remove = []
for col in keys:
if isinstance(col, MultiIndex):
# append all but the last column so we don't have to modify
# the end of this loop
for n in range(col.nlevels - 1):
arrays.append(col._get_level_values(n))
level = col._get_level_values(col.nlevels - 1)
names.extend(col.names)
elif isinstance(col, Series):
level = col._values
names.append(col.name)
elif isinstance(col, Index):
level = col
names.append(col.name)
elif isinstance(col, (list, np.ndarray, Index)):
level = col
names.append(None)
else:
level = frame[col]._values
names.append(col)
if drop:
to_remove.append(col)
arrays.append(level)
index = MultiIndex.from_arrays(arrays, names=names)
if verify_integrity and not index.is_unique:
duplicates = index.get_duplicates()
raise ValueError('Index has duplicate keys: %s' % duplicates)
for c in to_remove:
del frame[c]
# clear up memory usage
index._cleanup()
frame.index = index
if not inplace:
return frame
def reset_index(self, level=None, drop=False, inplace=False, col_level=0,
col_fill=''):
"""
For DataFrame with multi-level index, return new DataFrame with
labeling information in the columns under the index names, defaulting
to 'level_0', 'level_1', etc. if any are None. For a standard index,
the index name will be used (if set), otherwise a default 'index' or
'level_0' (if 'index' is already taken) will be used.
Parameters
----------
level : int, str, tuple, or list, default None
Only remove the given levels from the index. Removes all levels by
default
drop : boolean, default False
Do not try to insert index into dataframe columns. This resets
the index to the default integer index.
inplace : boolean, default False
Modify the DataFrame in place (do not create a new object)
col_level : int or str, default 0
If the columns have multiple levels, determines which level the
labels are inserted into. By default it is inserted into the first
level.
col_fill : object, default ''
If the columns have multiple levels, determines how the other
levels are named. If None then the index name is repeated.
Returns
-------
resetted : DataFrame
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if inplace:
new_obj = self
else:
new_obj = self.copy()
def _maybe_casted_values(index, labels=None):
if isinstance(index, PeriodIndex):
values = index.asobject.values
elif isinstance(index, DatetimeIndex) and index.tz is not None:
values = index
else:
values = index.values
if values.dtype == np.object_:
values = lib.maybe_convert_objects(values)
# if we have the labels, extract the values with a mask
if labels is not None:
mask = labels == -1
# we can have situations where the whole mask is -1,
# meaning there is nothing found in labels, so make all nan's
if mask.all():
values = np.empty(len(mask))
values.fill(np.nan)
else:
values = values.take(labels)
if mask.any():
values, changed = maybe_upcast_putmask(
values, mask, np.nan)
return values
new_index = _default_index(len(new_obj))
if level is not None:
if not isinstance(level, (tuple, list)):
level = [level]
level = [self.index._get_level_number(lev) for lev in level]
if isinstance(self.index, MultiIndex):
if len(level) < self.index.nlevels:
new_index = self.index.droplevel(level)
if not drop:
if isinstance(self.index, MultiIndex):
names = [n if n is not None else ('level_%d' % i)
for (i, n) in enumerate(self.index.names)]
to_insert = lzip(self.index.levels, self.index.labels)
else:
default = 'index' if 'index' not in self else 'level_0'
names = ([default] if self.index.name is None
else [self.index.name])
to_insert = ((self.index, None),)
multi_col = isinstance(self.columns, MultiIndex)
for i, (lev, lab) in reversed(list(enumerate(to_insert))):
if not (level is None or i in level):
continue
name = names[i]
if multi_col:
col_name = (list(name) if isinstance(name, tuple)
else [name])
if col_fill is None:
if len(col_name) not in (1, self.columns.nlevels):
raise ValueError("col_fill=None is incompatible "
"with incomplete column name "
"{}".format(name))
col_fill = col_name[0]
lev_num = self.columns._get_level_number(col_level)
name_lst = [col_fill] * lev_num + col_name
missing = self.columns.nlevels - len(name_lst)
name_lst += [col_fill] * missing
name = tuple(name_lst)
# to ndarray and maybe infer different dtype
level_values = _maybe_casted_values(lev, lab)
new_obj.insert(0, name, level_values)
new_obj.index = new_index
if not inplace:
return new_obj
# ----------------------------------------------------------------------
# Reindex-based selection methods
def dropna(self, axis=0, how='any', thresh=None, subset=None,
inplace=False):
"""
Return object with labels on given axis omitted where alternately any
or all of the data are missing
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, or tuple/list thereof
Pass tuple or list to drop on multiple axes
how : {'any', 'all'}
* any : if any NA values are present, drop that label
* all : if all values are NA, drop that label
thresh : int, default None
int value : require that many non-NA values
subset : array-like
Labels along other axis to consider, e.g. if you are dropping rows
these would be a list of columns to include
inplace : boolean, default False
If True, do operation inplace and return None.
Returns
-------
dropped : DataFrame
Examples
--------
>>> df = pd.DataFrame([[np.nan, 2, np.nan, 0], [3, 4, np.nan, 1],
... [np.nan, np.nan, np.nan, 5]],
... columns=list('ABCD'))
>>> df
A B C D
0 NaN 2.0 NaN 0
1 3.0 4.0 NaN 1
2 NaN NaN NaN 5
Drop the columns where all elements are nan:
>>> df.dropna(axis=1, how='all')
A B D
0 NaN 2.0 0
1 3.0 4.0 1
2 NaN NaN 5
Drop the columns where any of the elements is nan
>>> df.dropna(axis=1, how='any')
D
0 0
1 1
2 5
Drop the rows where all of the elements are nan
(there is no row to drop, so df stays the same):
>>> df.dropna(axis=0, how='all')
A B C D
0 NaN 2.0 NaN 0
1 3.0 4.0 NaN 1
2 NaN NaN NaN 5
Keep only the rows with at least 2 non-na values:
>>> df.dropna(thresh=2)
A B C D
0 NaN 2.0 NaN 0
1 3.0 4.0 NaN 1
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if isinstance(axis, (tuple, list)):
result = self
for ax in axis:
result = result.dropna(how=how, thresh=thresh, subset=subset,
axis=ax)
else:
axis = self._get_axis_number(axis)
agg_axis = 1 - axis
agg_obj = self
if subset is not None:
ax = self._get_axis(agg_axis)
indices = ax.get_indexer_for(subset)
check = indices == -1
if check.any():
raise KeyError(list(np.compress(check, subset)))
agg_obj = self.take(indices, axis=agg_axis)
count = agg_obj.count(axis=agg_axis)
if thresh is not None:
mask = count >= thresh
elif how == 'any':
mask = count == len(agg_obj._get_axis(agg_axis))
elif how == 'all':
mask = count > 0
else:
if how is not None:
raise ValueError('invalid how option: %s' % how)
else:
raise TypeError('must specify how or thresh')
result = self.take(mask.nonzero()[0], axis=axis, convert=False)
if inplace:
self._update_inplace(result)
else:
return result
def drop_duplicates(self, subset=None, keep='first', inplace=False):
"""
Return DataFrame with duplicate rows removed, optionally only
considering certain columns
Parameters
----------
subset : column label or sequence of labels, optional
Only consider certain columns for identifying duplicates, by
default use all of the columns
keep : {'first', 'last', False}, default 'first'
- ``first`` : Drop duplicates except for the first occurrence.
- ``last`` : Drop duplicates except for the last occurrence.
- False : Drop all duplicates.
inplace : boolean, default False
Whether to drop duplicates in place or to return a copy
Returns
-------
deduplicated : DataFrame
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
duplicated = self.duplicated(subset, keep=keep)
if inplace:
inds, = (-duplicated).nonzero()
new_data = self._data.take(inds)
self._update_inplace(new_data)
else:
return self[-duplicated]
def duplicated(self, subset=None, keep='first'):
"""
Return boolean Series denoting duplicate rows, optionally only
considering certain columns
Parameters
----------
subset : column label or sequence of labels, optional
Only consider certain columns for identifying duplicates, by
default use all of the columns
keep : {'first', 'last', False}, default 'first'
- ``first`` : Mark duplicates as ``True`` except for the
first occurrence.
- ``last`` : Mark duplicates as ``True`` except for the
last occurrence.
- False : Mark all duplicates as ``True``.
Returns
-------
duplicated : Series
"""
from pandas.core.sorting import get_group_index
from pandas._libs.hashtable import duplicated_int64, _SIZE_HINT_LIMIT
def f(vals):
labels, shape = algorithms.factorize(
vals, size_hint=min(len(self), _SIZE_HINT_LIMIT))
return labels.astype('i8', copy=False), len(shape)
if subset is None:
subset = self.columns
elif (not np.iterable(subset) or
isinstance(subset, compat.string_types) or
isinstance(subset, tuple) and subset in self.columns):
subset = subset,
vals = (self[col].values for col in subset)
labels, shape = map(list, zip(*map(f, vals)))
ids = get_group_index(labels, shape, sort=False, xnull=False)
return Series(duplicated_int64(ids, keep), index=self.index)
# ----------------------------------------------------------------------
# Sorting
@Appender(_shared_docs['sort_values'] % _shared_doc_kwargs)
def sort_values(self, by, axis=0, ascending=True, inplace=False,
kind='quicksort', na_position='last'):
inplace = validate_bool_kwarg(inplace, 'inplace')
axis = self._get_axis_number(axis)
other_axis = 0 if axis == 1 else 1
if not isinstance(by, list):
by = [by]
if is_sequence(ascending) and len(by) != len(ascending):
raise ValueError('Length of ascending (%d) != length of by (%d)' %
(len(ascending), len(by)))
if len(by) > 1:
from pandas.core.sorting import lexsort_indexer
def trans(v):
if needs_i8_conversion(v):
return v.view('i8')
return v
keys = []
for x in by:
k = self.xs(x, axis=other_axis).values
if k.ndim == 2:
raise ValueError('Cannot sort by duplicate column %s' %
str(x))
keys.append(trans(k))
indexer = lexsort_indexer(keys, orders=ascending,
na_position=na_position)
indexer = _ensure_platform_int(indexer)
else:
from pandas.core.sorting import nargsort
by = by[0]
k = self.xs(by, axis=other_axis).values
if k.ndim == 2:
# try to be helpful
if isinstance(self.columns, MultiIndex):
raise ValueError('Cannot sort by column %s in a '
'multi-index you need to explicitly '
'provide all the levels' % str(by))
raise ValueError('Cannot sort by duplicate column %s' %
str(by))
if isinstance(ascending, (tuple, list)):
ascending = ascending[0]
indexer = nargsort(k, kind=kind, ascending=ascending,
na_position=na_position)
new_data = self._data.take(indexer,
axis=self._get_block_manager_axis(axis),
convert=False, verify=False)
if inplace:
return self._update_inplace(new_data)
else:
return self._constructor(new_data).__finalize__(self)
@Appender(_shared_docs['sort_index'] % _shared_doc_kwargs)
def sort_index(self, axis=0, level=None, ascending=True, inplace=False,
kind='quicksort', na_position='last', sort_remaining=True,
by=None):
# TODO: this can be combined with Series.sort_index impl as
# almost identical
inplace = validate_bool_kwarg(inplace, 'inplace')
# 10726
if by is not None:
warnings.warn("by argument to sort_index is deprecated, pls use "
".sort_values(by=...)", FutureWarning, stacklevel=2)
if level is not None:
raise ValueError("unable to simultaneously sort by and level")
return self.sort_values(by, axis=axis, ascending=ascending,
inplace=inplace)
axis = self._get_axis_number(axis)
labels = self._get_axis(axis)
if level:
new_axis, indexer = labels.sortlevel(level, ascending=ascending,
sort_remaining=sort_remaining)
elif isinstance(labels, MultiIndex):
from pandas.core.sorting import lexsort_indexer
# make sure that the axis is lexsorted to start
# if not we need to reconstruct to get the correct indexer
labels = labels._sort_levels_monotonic()
indexer = lexsort_indexer(labels._get_labels_for_sorting(),
orders=ascending,
na_position=na_position)
else:
from pandas.core.sorting import nargsort
# Check monotonic-ness before sort an index
# GH11080
if ((ascending and labels.is_monotonic_increasing) or
(not ascending and labels.is_monotonic_decreasing)):
if inplace:
return
else:
return self.copy()
indexer = nargsort(labels, kind=kind, ascending=ascending,
na_position=na_position)
baxis = self._get_block_manager_axis(axis)
new_data = self._data.take(indexer,
axis=baxis,
convert=False, verify=False)
# reconstruct axis if needed
new_data.axes[baxis] = new_data.axes[baxis]._sort_levels_monotonic()
if inplace:
return self._update_inplace(new_data)
else:
return self._constructor(new_data).__finalize__(self)
def sortlevel(self, level=0, axis=0, ascending=True, inplace=False,
sort_remaining=True):
"""
DEPRECATED: use :meth:`DataFrame.sort_index`
Sort multilevel index by chosen axis and primary level. Data will be
lexicographically sorted by the chosen level followed by the other
levels (in order)
Parameters
----------
level : int
axis : {0 or 'index', 1 or 'columns'}, default 0
ascending : boolean, default True
inplace : boolean, default False
Sort the DataFrame without creating a new instance
sort_remaining : boolean, default True
Sort by the other levels too.
Returns
-------
sorted : DataFrame
See Also
--------
DataFrame.sort_index(level=...)
"""
warnings.warn("sortlevel is deprecated, use sort_index(level= ...)",
FutureWarning, stacklevel=2)
return self.sort_index(level=level, axis=axis, ascending=ascending,
inplace=inplace, sort_remaining=sort_remaining)
def nlargest(self, n, columns, keep='first'):
"""Get the rows of a DataFrame sorted by the `n` largest
values of `columns`.
.. versionadded:: 0.17.0
Parameters
----------
n : int
Number of items to retrieve
columns : list or str
Column name or names to order by
keep : {'first', 'last', False}, default 'first'
Where there are duplicate values:
- ``first`` : take the first occurrence.
- ``last`` : take the last occurrence.
Returns
-------
DataFrame
Examples
--------
>>> df = DataFrame({'a': [1, 10, 8, 11, -1],
... 'b': list('abdce'),
... 'c': [1.0, 2.0, np.nan, 3.0, 4.0]})
>>> df.nlargest(3, 'a')
a b c
3 11 c 3
1 10 b 2
2 8 d NaN
"""
return algorithms.SelectNFrame(self,
n=n,
keep=keep,
columns=columns).nlargest()
def nsmallest(self, n, columns, keep='first'):
"""Get the rows of a DataFrame sorted by the `n` smallest
values of `columns`.
.. versionadded:: 0.17.0
Parameters
----------
n : int
Number of items to retrieve
columns : list or str
Column name or names to order by
keep : {'first', 'last', False}, default 'first'
Where there are duplicate values:
- ``first`` : take the first occurrence.
- ``last`` : take the last occurrence.
Returns
-------
DataFrame
Examples
--------
>>> df = DataFrame({'a': [1, 10, 8, 11, -1],
... 'b': list('abdce'),
... 'c': [1.0, 2.0, np.nan, 3.0, 4.0]})
>>> df.nsmallest(3, 'a')
a b c
4 -1 e 4
0 1 a 1
2 8 d NaN
"""
return algorithms.SelectNFrame(self,
n=n,
keep=keep,
columns=columns).nsmallest()
def swaplevel(self, i=-2, j=-1, axis=0):
"""
Swap levels i and j in a MultiIndex on a particular axis
Parameters
----------
i, j : int, string (can be mixed)
Level of index to be swapped. Can pass level name as string.
Returns
-------
swapped : type of caller (new object)
.. versionchanged:: 0.18.1
The indexes ``i`` and ``j`` are now optional, and default to
the two innermost levels of the index.
"""
result = self.copy()
axis = self._get_axis_number(axis)
if axis == 0:
result.index = result.index.swaplevel(i, j)
else:
result.columns = result.columns.swaplevel(i, j)
return result
def reorder_levels(self, order, axis=0):
"""
Rearrange index levels using input order.
May not drop or duplicate levels
Parameters
----------
order : list of int or list of str
List representing new level order. Reference level by number
(position) or by key (label).
axis : int
Where to reorder levels.
Returns
-------
type of caller (new object)
"""
axis = self._get_axis_number(axis)
if not isinstance(self._get_axis(axis),
MultiIndex): # pragma: no cover
raise TypeError('Can only reorder levels on a hierarchical axis.')
result = self.copy()
if axis == 0:
result.index = result.index.reorder_levels(order)
else:
result.columns = result.columns.reorder_levels(order)
return result
# ----------------------------------------------------------------------
# Arithmetic / combination related
def _combine_frame(self, other, func, fill_value=None, level=None):
this, other = self.align(other, join='outer', level=level, copy=False)
new_index, new_columns = this.index, this.columns
def _arith_op(left, right):
if fill_value is not None:
left_mask = isnull(left)
right_mask = isnull(right)
left = left.copy()
right = right.copy()
# one but not both
mask = left_mask ^ right_mask
left[left_mask & mask] = fill_value
right[right_mask & mask] = fill_value
return func(left, right)
if this._is_mixed_type or other._is_mixed_type:
# unique
if this.columns.is_unique:
def f(col):
r = _arith_op(this[col].values, other[col].values)
return self._constructor_sliced(r, index=new_index,
dtype=r.dtype)
result = dict([(col, f(col)) for col in this])
# non-unique
else:
def f(i):
r = _arith_op(this.iloc[:, i].values,
other.iloc[:, i].values)
return self._constructor_sliced(r, index=new_index,
dtype=r.dtype)
result = dict([
(i, f(i)) for i, col in enumerate(this.columns)
])
result = self._constructor(result, index=new_index, copy=False)
result.columns = new_columns
return result
else:
result = _arith_op(this.values, other.values)
return self._constructor(result, index=new_index, columns=new_columns,
copy=False)
def _combine_series(self, other, func, fill_value=None, axis=None,
level=None):
if axis is not None:
axis = self._get_axis_name(axis)
if axis == 'index':
return self._combine_match_index(other, func, level=level,
fill_value=fill_value)
else:
return self._combine_match_columns(other, func, level=level,
fill_value=fill_value)
return self._combine_series_infer(other, func, level=level,
fill_value=fill_value)
def _combine_series_infer(self, other, func, level=None, fill_value=None):
if len(other) == 0:
return self * NA
if len(self) == 0:
# Ambiguous case, use _series so works with DataFrame
return self._constructor(data=self._series, index=self.index,
columns=self.columns)
return self._combine_match_columns(other, func, level=level,
fill_value=fill_value)
def _combine_match_index(self, other, func, level=None, fill_value=None):
left, right = self.align(other, join='outer', axis=0, level=level,
copy=False)
if fill_value is not None:
raise NotImplementedError("fill_value %r not supported." %
fill_value)
return self._constructor(func(left.values.T, right.values).T,
index=left.index, columns=self.columns,
copy=False)
def _combine_match_columns(self, other, func, level=None, fill_value=None):
left, right = self.align(other, join='outer', axis=1, level=level,
copy=False)
if fill_value is not None:
raise NotImplementedError("fill_value %r not supported" %
fill_value)
new_data = left._data.eval(func=func, other=right,
axes=[left.columns, self.index])
return self._constructor(new_data)
def _combine_const(self, other, func, raise_on_error=True):
new_data = self._data.eval(func=func, other=other,
raise_on_error=raise_on_error)
return self._constructor(new_data)
def _compare_frame_evaluate(self, other, func, str_rep):
# unique
if self.columns.is_unique:
def _compare(a, b):
return dict([(col, func(a[col], b[col])) for col in a.columns])
new_data = expressions.evaluate(_compare, str_rep, self, other)
return self._constructor(data=new_data, index=self.index,
columns=self.columns, copy=False)
# non-unique
else:
def _compare(a, b):
return dict([(i, func(a.iloc[:, i], b.iloc[:, i]))
for i, col in enumerate(a.columns)])
new_data = expressions.evaluate(_compare, str_rep, self, other)
result = self._constructor(data=new_data, index=self.index,
copy=False)
result.columns = self.columns
return result
def _compare_frame(self, other, func, str_rep):
if not self._indexed_same(other):
raise ValueError('Can only compare identically-labeled '
'DataFrame objects')
return self._compare_frame_evaluate(other, func, str_rep)
def _flex_compare_frame(self, other, func, str_rep, level):
if not self._indexed_same(other):
self, other = self.align(other, 'outer', level=level, copy=False)
return self._compare_frame_evaluate(other, func, str_rep)
def combine(self, other, func, fill_value=None, overwrite=True):
"""
Add two DataFrame objects and do not propagate NaN values, so if for a
(column, time) one frame is missing a value, it will default to the
other frame's value (which might be NaN as well)
Parameters
----------
other : DataFrame
func : function
fill_value : scalar value
overwrite : boolean, default True
If True then overwrite values for common keys in the calling frame
Returns
-------
result : DataFrame
"""
other_idxlen = len(other.index) # save for compare
this, other = self.align(other, copy=False)
new_index = this.index
if other.empty and len(new_index) == len(self.index):
return self.copy()
if self.empty and len(other) == other_idxlen:
return other.copy()
# sorts if possible
new_columns = this.columns.union(other.columns)
do_fill = fill_value is not None
result = {}
for col in new_columns:
series = this[col]
otherSeries = other[col]
this_dtype = series.dtype
other_dtype = otherSeries.dtype
this_mask = isnull(series)
other_mask = isnull(otherSeries)
# don't overwrite columns unecessarily
# DO propagate if this column is not in the intersection
if not overwrite and other_mask.all():
result[col] = this[col].copy()
continue
if do_fill:
series = series.copy()
otherSeries = otherSeries.copy()
series[this_mask] = fill_value
otherSeries[other_mask] = fill_value
# if we have different dtypes, possibily promote
new_dtype = this_dtype
if not is_dtype_equal(this_dtype, other_dtype):
new_dtype = find_common_type([this_dtype, other_dtype])
if not is_dtype_equal(this_dtype, new_dtype):
series = series.astype(new_dtype)
if not is_dtype_equal(other_dtype, new_dtype):
otherSeries = otherSeries.astype(new_dtype)
# see if we need to be represented as i8 (datetimelike)
# try to keep us at this dtype
needs_i8_conversion_i = needs_i8_conversion(new_dtype)
if needs_i8_conversion_i:
arr = func(series, otherSeries, True)
else:
arr = func(series, otherSeries)
if do_fill:
arr = _ensure_float(arr)
arr[this_mask & other_mask] = NA
# try to downcast back to the original dtype
if needs_i8_conversion_i:
# ToDo: This conversion should be handled in
# _maybe_cast_to_datetime but the change affects lot...
if is_datetime64tz_dtype(new_dtype):
arr = DatetimeIndex._simple_new(arr, tz=new_dtype.tz)
else:
arr = maybe_cast_to_datetime(arr, new_dtype)
else:
arr = maybe_downcast_to_dtype(arr, this_dtype)
result[col] = arr
# convert_objects just in case
return self._constructor(result, index=new_index,
columns=new_columns)._convert(datetime=True,
copy=False)
def combine_first(self, other):
"""
Combine two DataFrame objects and default to non-null values in frame
calling the method. Result index columns will be the union of the
respective indexes and columns
Parameters
----------
other : DataFrame
Examples
--------
a's values prioritized, use values from b to fill holes:
>>> a.combine_first(b)
Returns
-------
combined : DataFrame
"""
def combiner(x, y, needs_i8_conversion=False):
x_values = x.values if hasattr(x, 'values') else x
y_values = y.values if hasattr(y, 'values') else y
if needs_i8_conversion:
mask = isnull(x)
x_values = x_values.view('i8')
y_values = y_values.view('i8')
else:
mask = isnull(x_values)
return expressions.where(mask, y_values, x_values,
raise_on_error=True)
return self.combine(other, combiner, overwrite=False)
def update(self, other, join='left', overwrite=True, filter_func=None,
raise_conflict=False):
"""
Modify DataFrame in place using non-NA values from passed
DataFrame. Aligns on indices
Parameters
----------
other : DataFrame, or object coercible into a DataFrame
join : {'left'}, default 'left'
overwrite : boolean, default True
If True then overwrite values for common keys in the calling frame
filter_func : callable(1d-array) -> 1d-array<boolean>, default None
Can choose to replace values other than NA. Return True for values
that should be updated
raise_conflict : boolean
If True, will raise an error if the DataFrame and other both
contain data in the same place.
"""
# TODO: Support other joins
if join != 'left': # pragma: no cover
raise NotImplementedError("Only left join is supported")
if not isinstance(other, DataFrame):
other = DataFrame(other)
other = other.reindex_like(self)
for col in self.columns:
this = self[col].values
that = other[col].values
if filter_func is not None:
with np.errstate(all='ignore'):
mask = ~filter_func(this) | isnull(that)
else:
if raise_conflict:
mask_this = notnull(that)
mask_that = notnull(this)
if any(mask_this & mask_that):
raise ValueError("Data overlaps.")
if overwrite:
mask = isnull(that)
else:
mask = notnull(this)
# don't overwrite columns unecessarily
if mask.all():
continue
self[col] = expressions.where(mask, this, that,
raise_on_error=True)
# ----------------------------------------------------------------------
# Misc methods
def first_valid_index(self):
"""
Return label for first non-NA/null value
"""
if len(self) == 0:
return None
return self.index[self.count(1) > 0][0]
def last_valid_index(self):
"""
Return label for last non-NA/null value
"""
if len(self) == 0:
return None
return self.index[self.count(1) > 0][-1]
# ----------------------------------------------------------------------
# Data reshaping
def pivot(self, index=None, columns=None, values=None):
"""
Reshape data (produce a "pivot" table) based on column values. Uses
unique values from index / columns to form axes of the resulting
DataFrame.
Parameters
----------
index : string or object, optional
Column name to use to make new frame's index. If None, uses
existing index.
columns : string or object
Column name to use to make new frame's columns
values : string or object, optional
Column name to use for populating new frame's values. If not
specified, all remaining columns will be used and the result will
have hierarchically indexed columns
Returns
-------
pivoted : DataFrame
See also
--------
DataFrame.pivot_table : generalization of pivot that can handle
duplicate values for one index/column pair
DataFrame.unstack : pivot based on the index values instead of a
column
Notes
-----
For finer-tuned control, see hierarchical indexing documentation along
with the related stack/unstack methods
Examples
--------
>>> df = pd.DataFrame({'foo': ['one','one','one','two','two','two'],
'bar': ['A', 'B', 'C', 'A', 'B', 'C'],
'baz': [1, 2, 3, 4, 5, 6]})
>>> df
foo bar baz
0 one A 1
1 one B 2
2 one C 3
3 two A 4
4 two B 5
5 two C 6
>>> df.pivot(index='foo', columns='bar', values='baz')
A B C
one 1 2 3
two 4 5 6
>>> df.pivot(index='foo', columns='bar')['baz']
A B C
one 1 2 3
two 4 5 6
"""
from pandas.core.reshape.reshape import pivot
return pivot(self, index=index, columns=columns, values=values)
def stack(self, level=-1, dropna=True):
"""
Pivot a level of the (possibly hierarchical) column labels, returning a
DataFrame (or Series in the case of an object with a single level of
column labels) having a hierarchical index with a new inner-most level
of row labels.
The level involved will automatically get sorted.
Parameters
----------
level : int, string, or list of these, default last level
Level(s) to stack, can pass level name
dropna : boolean, default True
Whether to drop rows in the resulting Frame/Series with no valid
values
Examples
----------
>>> s
a b
one 1. 2.
two 3. 4.
>>> s.stack()
one a 1
b 2
two a 3
b 4
Returns
-------
stacked : DataFrame or Series
"""
from pandas.core.reshape.reshape import stack, stack_multiple
if isinstance(level, (tuple, list)):
return stack_multiple(self, level, dropna=dropna)
else:
return stack(self, level, dropna=dropna)
def unstack(self, level=-1, fill_value=None):
"""
Pivot a level of the (necessarily hierarchical) index labels, returning
a DataFrame having a new level of column labels whose inner-most level
consists of the pivoted index labels. If the index is not a MultiIndex,
the output will be a Series (the analogue of stack when the columns are
not a MultiIndex).
The level involved will automatically get sorted.
Parameters
----------
level : int, string, or list of these, default -1 (last level)
Level(s) of index to unstack, can pass level name
fill_value : replace NaN with this value if the unstack produces
missing values
.. versionadded: 0.18.0
See also
--------
DataFrame.pivot : Pivot a table based on column values.
DataFrame.stack : Pivot a level of the column labels (inverse operation
from `unstack`).
Examples
--------
>>> index = pd.MultiIndex.from_tuples([('one', 'a'), ('one', 'b'),
... ('two', 'a'), ('two', 'b')])
>>> s = pd.Series(np.arange(1.0, 5.0), index=index)
>>> s
one a 1.0
b 2.0
two a 3.0
b 4.0
dtype: float64
>>> s.unstack(level=-1)
a b
one 1.0 2.0
two 3.0 4.0
>>> s.unstack(level=0)
one two
a 1.0 3.0
b 2.0 4.0
>>> df = s.unstack(level=0)
>>> df.unstack()
one a 1.0
b 2.0
two a 3.0
b 4.0
dtype: float64
Returns
-------
unstacked : DataFrame or Series
"""
from pandas.core.reshape.reshape import unstack
return unstack(self, level, fill_value)
_shared_docs['melt'] = ("""
"Unpivots" a DataFrame from wide format to long format, optionally
leaving identifier variables set.
This function is useful to massage a DataFrame into a format where one
or more columns are identifier variables (`id_vars`), while all other
columns, considered measured variables (`value_vars`), are "unpivoted" to
the row axis, leaving just two non-identifier columns, 'variable' and
'value'.
%(versionadded)s
Parameters
----------
frame : DataFrame
id_vars : tuple, list, or ndarray, optional
Column(s) to use as identifier variables.
value_vars : tuple, list, or ndarray, optional
Column(s) to unpivot. If not specified, uses all columns that
are not set as `id_vars`.
var_name : scalar
Name to use for the 'variable' column. If None it uses
``frame.columns.name`` or 'variable'.
value_name : scalar, default 'value'
Name to use for the 'value' column.
col_level : int or string, optional
If columns are a MultiIndex then use this level to melt.
See also
--------
%(other)s
pivot_table
DataFrame.pivot
Examples
--------
>>> import pandas as pd
>>> df = pd.DataFrame({'A': {0: 'a', 1: 'b', 2: 'c'},
... 'B': {0: 1, 1: 3, 2: 5},
... 'C': {0: 2, 1: 4, 2: 6}})
>>> df
A B C
0 a 1 2
1 b 3 4
2 c 5 6
>>> %(caller)sid_vars=['A'], value_vars=['B'])
A variable value
0 a B 1
1 b B 3
2 c B 5
>>> %(caller)sid_vars=['A'], value_vars=['B', 'C'])
A variable value
0 a B 1
1 b B 3
2 c B 5
3 a C 2
4 b C 4
5 c C 6
The names of 'variable' and 'value' columns can be customized:
>>> %(caller)sid_vars=['A'], value_vars=['B'],
... var_name='myVarname', value_name='myValname')
A myVarname myValname
0 a B 1
1 b B 3
2 c B 5
If you have multi-index columns:
>>> df.columns = [list('ABC'), list('DEF')]
>>> df
A B C
D E F
0 a 1 2
1 b 3 4
2 c 5 6
>>> %(caller)scol_level=0, id_vars=['A'], value_vars=['B'])
A variable value
0 a B 1
1 b B 3
2 c B 5
>>> %(caller)sid_vars=[('A', 'D')], value_vars=[('B', 'E')])
(A, D) variable_0 variable_1 value
0 a B E 1
1 b B E 3
2 c B E 5
""")
@Appender(_shared_docs['melt'] %
dict(caller='df.melt(',
versionadded='.. versionadded:: 0.20.0\n',
other='melt'))
def melt(self, id_vars=None, value_vars=None, var_name=None,
value_name='value', col_level=None):
from pandas.core.reshape.reshape import melt
return melt(self, id_vars=id_vars, value_vars=value_vars,
var_name=var_name, value_name=value_name,
col_level=col_level)
# ----------------------------------------------------------------------
# Time series-related
def diff(self, periods=1, axis=0):
"""
1st discrete difference of object
Parameters
----------
periods : int, default 1
Periods to shift for forming difference
axis : {0 or 'index', 1 or 'columns'}, default 0
Take difference over rows (0) or columns (1).
.. versionadded: 0.16.1
Returns
-------
diffed : DataFrame
"""
bm_axis = self._get_block_manager_axis(axis)
new_data = self._data.diff(n=periods, axis=bm_axis)
return self._constructor(new_data)
# ----------------------------------------------------------------------
# Function application
def _gotitem(self, key, ndim, subset=None):
"""
sub-classes to define
return a sliced object
Parameters
----------
key : string / list of selections
ndim : 1,2
requested ndim of result
subset : object, default None
subset to act on
"""
if subset is None:
subset = self
# TODO: _shallow_copy(subset)?
return self[key]
_agg_doc = dedent("""
Examples
--------
>>> df = pd.DataFrame(np.random.randn(10, 3), columns=['A', 'B', 'C'],
... index=pd.date_range('1/1/2000', periods=10))
>>> df.iloc[3:7] = np.nan
Aggregate these functions across all columns
>>> df.agg(['sum', 'min'])
A B C
sum -0.182253 -0.614014 -2.909534
min -1.916563 -1.460076 -1.568297
Different aggregations per column
>>> df.agg({'A' : ['sum', 'min'], 'B' : ['min', 'max']})
A B
max NaN 1.514318
min -1.916563 -1.460076
sum -0.182253 NaN
See also
--------
pandas.DataFrame.apply
pandas.DataFrame.transform
pandas.DataFrame.groupby.aggregate
pandas.DataFrame.resample.aggregate
pandas.DataFrame.rolling.aggregate
""")
@Appender(_agg_doc)
@Appender(_shared_docs['aggregate'] % dict(
versionadded='.. versionadded:: 0.20.0',
**_shared_doc_kwargs))
def aggregate(self, func, axis=0, *args, **kwargs):
axis = self._get_axis_number(axis)
# TODO: flipped axis
result = None
if axis == 0:
try:
result, how = self._aggregate(func, axis=0, *args, **kwargs)
except TypeError:
pass
if result is None:
return self.apply(func, axis=axis, args=args, **kwargs)
return result
agg = aggregate
def apply(self, func, axis=0, broadcast=False, raw=False, reduce=None,
args=(), **kwds):
"""
Applies function along input axis of DataFrame.
Objects passed to functions are Series objects having index
either the DataFrame's index (axis=0) or the columns (axis=1).
Return type depends on whether passed function aggregates, or the
reduce argument if the DataFrame is empty.
Parameters
----------
func : function
Function to apply to each column/row
axis : {0 or 'index', 1 or 'columns'}, default 0
* 0 or 'index': apply function to each column
* 1 or 'columns': apply function to each row
broadcast : boolean, default False
For aggregation functions, return object of same size with values
propagated
raw : boolean, default False
If False, convert each row or column into a Series. If raw=True the
passed function will receive ndarray objects instead. If you are
just applying a NumPy reduction function this will achieve much
better performance
reduce : boolean or None, default None
Try to apply reduction procedures. If the DataFrame is empty,
apply will use reduce to determine whether the result should be a
Series or a DataFrame. If reduce is None (the default), apply's
return value will be guessed by calling func an empty Series (note:
while guessing, exceptions raised by func will be ignored). If
reduce is True a Series will always be returned, and if False a
DataFrame will always be returned.
args : tuple
Positional arguments to pass to function in addition to the
array/series
Additional keyword arguments will be passed as keywords to the function
Notes
-----
In the current implementation apply calls func twice on the
first column/row to decide whether it can take a fast or slow
code path. This can lead to unexpected behavior if func has
side-effects, as they will take effect twice for the first
column/row.
Examples
--------
>>> df.apply(numpy.sqrt) # returns DataFrame
>>> df.apply(numpy.sum, axis=0) # equiv to df.sum(0)
>>> df.apply(numpy.sum, axis=1) # equiv to df.sum(1)
See also
--------
DataFrame.applymap: For elementwise operations
DataFrame.aggregate: only perform aggregating type operations
DataFrame.transform: only perform transformating type operations
Returns
-------
applied : Series or DataFrame
"""
axis = self._get_axis_number(axis)
ignore_failures = kwds.pop('ignore_failures', False)
# dispatch to agg
if axis == 0 and isinstance(func, (list, dict)):
return self.aggregate(func, axis=axis, *args, **kwds)
if len(self.columns) == 0 and len(self.index) == 0:
return self._apply_empty_result(func, axis, reduce, *args, **kwds)
# if we are a string, try to dispatch
if isinstance(func, compat.string_types):
if axis:
kwds['axis'] = axis
return getattr(self, func)(*args, **kwds)
if kwds or args and not isinstance(func, np.ufunc):
def f(x):
return func(x, *args, **kwds)
else:
f = func
if isinstance(f, np.ufunc):
with np.errstate(all='ignore'):
results = f(self.values)
return self._constructor(data=results, index=self.index,
columns=self.columns, copy=False)
else:
if not broadcast:
if not all(self.shape):
return self._apply_empty_result(func, axis, reduce, *args,
**kwds)
if raw and not self._is_mixed_type:
return self._apply_raw(f, axis)
else:
if reduce is None:
reduce = True
return self._apply_standard(
f, axis,
reduce=reduce,
ignore_failures=ignore_failures)
else:
return self._apply_broadcast(f, axis)
def _apply_empty_result(self, func, axis, reduce, *args, **kwds):
if reduce is None:
reduce = False
try:
reduce = not isinstance(func(_EMPTY_SERIES, *args, **kwds),
Series)
except Exception:
pass
if reduce:
return Series(NA, index=self._get_agg_axis(axis))
else:
return self.copy()
def _apply_raw(self, func, axis):
try:
result = lib.reduce(self.values, func, axis=axis)
except Exception:
result = np.apply_along_axis(func, axis, self.values)
# TODO: mixed type case
if result.ndim == 2:
return DataFrame(result, index=self.index, columns=self.columns)
else:
return Series(result, index=self._get_agg_axis(axis))
def _apply_standard(self, func, axis, ignore_failures=False, reduce=True):
# skip if we are mixed datelike and trying reduce across axes
# GH6125
if (reduce and axis == 1 and self._is_mixed_type and
self._is_datelike_mixed_type):
reduce = False
# try to reduce first (by default)
# this only matters if the reduction in values is of different dtype
# e.g. if we want to apply to a SparseFrame, then can't directly reduce
if reduce:
values = self.values
# we cannot reduce using non-numpy dtypes,
# as demonstrated in gh-12244
if not is_extension_type(values):
# Create a dummy Series from an empty array
index = self._get_axis(axis)
empty_arr = np.empty(len(index), dtype=values.dtype)
dummy = Series(empty_arr, index=self._get_axis(axis),
dtype=values.dtype)
try:
labels = self._get_agg_axis(axis)
result = lib.reduce(values, func, axis=axis, dummy=dummy,
labels=labels)
return Series(result, index=labels)
except Exception:
pass
dtype = object if self._is_mixed_type else None
if axis == 0:
series_gen = (self._ixs(i, axis=1)
for i in range(len(self.columns)))
res_index = self.columns
res_columns = self.index
elif axis == 1:
res_index = self.index
res_columns = self.columns
values = self.values
series_gen = (Series.from_array(arr, index=res_columns, name=name,
dtype=dtype)
for i, (arr, name) in enumerate(zip(values,
res_index)))
else: # pragma : no cover
raise AssertionError('Axis must be 0 or 1, got %s' % str(axis))
i = None
keys = []
results = {}
if ignore_failures:
successes = []
for i, v in enumerate(series_gen):
try:
results[i] = func(v)
keys.append(v.name)
successes.append(i)
except Exception:
pass
# so will work with MultiIndex
if len(successes) < len(res_index):
res_index = res_index.take(successes)
else:
try:
for i, v in enumerate(series_gen):
results[i] = func(v)
keys.append(v.name)
except Exception as e:
if hasattr(e, 'args'):
# make sure i is defined
if i is not None:
k = res_index[i]
e.args = e.args + ('occurred at index %s' %
pprint_thing(k), )
raise
if len(results) > 0 and is_sequence(results[0]):
if not isinstance(results[0], Series):
index = res_columns
else:
index = None
result = self._constructor(data=results, index=index)
result.columns = res_index
if axis == 1:
result = result.T
result = result._convert(datetime=True, timedelta=True, copy=False)
else:
result = Series(results)
result.index = res_index
return result
def _apply_broadcast(self, func, axis):
if axis == 0:
target = self
elif axis == 1:
target = self.T
else: # pragma: no cover
raise AssertionError('Axis must be 0 or 1, got %s' % axis)
result_values = np.empty_like(target.values)
columns = target.columns
for i, col in enumerate(columns):
result_values[:, i] = func(target[col])
result = self._constructor(result_values, index=target.index,
columns=target.columns)
if axis == 1:
result = result.T
return result
def applymap(self, func):
"""
Apply a function to a DataFrame that is intended to operate
elementwise, i.e. like doing map(func, series) for each series in the
DataFrame
Parameters
----------
func : function
Python function, returns a single value from a single value
Examples
--------
>>> df = pd.DataFrame(np.random.randn(3, 3))
>>> df
0 1 2
0 -0.029638 1.081563 1.280300
1 0.647747 0.831136 -1.549481
2 0.513416 -0.884417 0.195343
>>> df = df.applymap(lambda x: '%.2f' % x)
>>> df
0 1 2
0 -0.03 1.08 1.28
1 0.65 0.83 -1.55
2 0.51 -0.88 0.20
Returns
-------
applied : DataFrame
See also
--------
DataFrame.apply : For operations on rows/columns
"""
# if we have a dtype == 'M8[ns]', provide boxed values
def infer(x):
if x.empty:
return lib.map_infer(x, func)
return lib.map_infer(x.asobject, func)
return self.apply(infer)
# ----------------------------------------------------------------------
# Merging / joining methods
def append(self, other, ignore_index=False, verify_integrity=False):
"""
Append rows of `other` to the end of this frame, returning a new
object. Columns not in this frame are added as new columns.
Parameters
----------
other : DataFrame or Series/dict-like object, or list of these
The data to append.
ignore_index : boolean, default False
If True, do not use the index labels.
verify_integrity : boolean, default False
If True, raise ValueError on creating index with duplicates.
Returns
-------
appended : DataFrame
Notes
-----
If a list of dict/series is passed and the keys are all contained in
the DataFrame's index, the order of the columns in the resulting
DataFrame will be unchanged.
See also
--------
pandas.concat : General function to concatenate DataFrame, Series
or Panel objects
Examples
--------
>>> df = pd.DataFrame([[1, 2], [3, 4]], columns=list('AB'))
>>> df
A B
0 1 2
1 3 4
>>> df2 = pd.DataFrame([[5, 6], [7, 8]], columns=list('AB'))
>>> df.append(df2)
A B
0 1 2
1 3 4
0 5 6
1 7 8
With `ignore_index` set to True:
>>> df.append(df2, ignore_index=True)
A B
0 1 2
1 3 4
2 5 6
3 7 8
"""
if isinstance(other, (Series, dict)):
if isinstance(other, dict):
other = Series(other)
if other.name is None and not ignore_index:
raise TypeError('Can only append a Series if ignore_index=True'
' or if the Series has a name')
if other.name is None:
index = None
else:
# other must have the same index name as self, otherwise
# index name will be reset
index = Index([other.name], name=self.index.name)
combined_columns = self.columns.tolist() + self.columns.union(
other.index).difference(self.columns).tolist()
other = other.reindex(combined_columns, copy=False)
other = DataFrame(other.values.reshape((1, len(other))),
index=index,
columns=combined_columns)
other = other._convert(datetime=True, timedelta=True)
if not self.columns.equals(combined_columns):
self = self.reindex(columns=combined_columns)
elif isinstance(other, list) and not isinstance(other[0], DataFrame):
other = DataFrame(other)
if (self.columns.get_indexer(other.columns) >= 0).all():
other = other.loc[:, self.columns]
from pandas.core.reshape.concat import concat
if isinstance(other, (list, tuple)):
to_concat = [self] + other
else:
to_concat = [self, other]
return concat(to_concat, ignore_index=ignore_index,
verify_integrity=verify_integrity)
def join(self, other, on=None, how='left', lsuffix='', rsuffix='',
sort=False):
"""
Join columns with other DataFrame either on index or on a key
column. Efficiently Join multiple DataFrame objects by index at once by
passing a list.
Parameters
----------
other : DataFrame, Series with name field set, or list of DataFrame
Index should be similar to one of the columns in this one. If a
Series is passed, its name attribute must be set, and that will be
used as the column name in the resulting joined DataFrame
on : column name, tuple/list of column names, or array-like
Column(s) in the caller to join on the index in other,
otherwise joins index-on-index. If multiples
columns given, the passed DataFrame must have a MultiIndex. Can
pass an array as the join key if not already contained in the
calling DataFrame. Like an Excel VLOOKUP operation
how : {'left', 'right', 'outer', 'inner'}, default: 'left'
How to handle the operation of the two objects.
* left: use calling frame's index (or column if on is specified)
* right: use other frame's index
* outer: form union of calling frame's index (or column if on is
specified) with other frame's index, and sort it
lexicographically
* inner: form intersection of calling frame's index (or column if
on is specified) with other frame's index, preserving the order
of the calling's one
lsuffix : string
Suffix to use from left frame's overlapping columns
rsuffix : string
Suffix to use from right frame's overlapping columns
sort : boolean, default False
Order result DataFrame lexicographically by the join key. If False,
the order of the join key depends on the join type (how keyword)
Notes
-----
on, lsuffix, and rsuffix options are not supported when passing a list
of DataFrame objects
Examples
--------
>>> caller = pd.DataFrame({'key': ['K0', 'K1', 'K2', 'K3', 'K4', 'K5'],
... 'A': ['A0', 'A1', 'A2', 'A3', 'A4', 'A5']})
>>> caller
A key
0 A0 K0
1 A1 K1
2 A2 K2
3 A3 K3
4 A4 K4
5 A5 K5
>>> other = pd.DataFrame({'key': ['K0', 'K1', 'K2'],
... 'B': ['B0', 'B1', 'B2']})
>>> other
B key
0 B0 K0
1 B1 K1
2 B2 K2
Join DataFrames using their indexes.
>>> caller.join(other, lsuffix='_caller', rsuffix='_other')
>>> A key_caller B key_other
0 A0 K0 B0 K0
1 A1 K1 B1 K1
2 A2 K2 B2 K2
3 A3 K3 NaN NaN
4 A4 K4 NaN NaN
5 A5 K5 NaN NaN
If we want to join using the key columns, we need to set key to be
the index in both caller and other. The joined DataFrame will have
key as its index.
>>> caller.set_index('key').join(other.set_index('key'))
>>> A B
key
K0 A0 B0
K1 A1 B1
K2 A2 B2
K3 A3 NaN
K4 A4 NaN
K5 A5 NaN
Another option to join using the key columns is to use the on
parameter. DataFrame.join always uses other's index but we can use any
column in the caller. This method preserves the original caller's
index in the result.
>>> caller.join(other.set_index('key'), on='key')
>>> A key B
0 A0 K0 B0
1 A1 K1 B1
2 A2 K2 B2
3 A3 K3 NaN
4 A4 K4 NaN
5 A5 K5 NaN
See also
--------
DataFrame.merge : For column(s)-on-columns(s) operations
Returns
-------
joined : DataFrame
"""
# For SparseDataFrame's benefit
return self._join_compat(other, on=on, how=how, lsuffix=lsuffix,
rsuffix=rsuffix, sort=sort)
def _join_compat(self, other, on=None, how='left', lsuffix='', rsuffix='',
sort=False):
from pandas.core.reshape.merge import merge
from pandas.core.reshape.concat import concat
if isinstance(other, Series):
if other.name is None:
raise ValueError('Other Series must have a name')
other = DataFrame({other.name: other})
if isinstance(other, DataFrame):
return merge(self, other, left_on=on, how=how,
left_index=on is None, right_index=True,
suffixes=(lsuffix, rsuffix), sort=sort)
else:
if on is not None:
raise ValueError('Joining multiple DataFrames only supported'
' for joining on index')
# join indexes only using concat
if how == 'left':
how = 'outer'
join_axes = [self.index]
else:
join_axes = None
frames = [self] + list(other)
can_concat = all(df.index.is_unique for df in frames)
if can_concat:
return concat(frames, axis=1, join=how, join_axes=join_axes,
verify_integrity=True)
joined = frames[0]
for frame in frames[1:]:
joined = merge(joined, frame, how=how, left_index=True,
right_index=True)
return joined
@Substitution('')
@Appender(_merge_doc, indents=2)
def merge(self, right, how='inner', on=None, left_on=None, right_on=None,
left_index=False, right_index=False, sort=False,
suffixes=('_x', '_y'), copy=True, indicator=False,
validate=None):
from pandas.core.reshape.merge import merge
return merge(self, right, how=how, on=on, left_on=left_on,
right_on=right_on, left_index=left_index,
right_index=right_index, sort=sort, suffixes=suffixes,
copy=copy, indicator=indicator, validate=validate)
def round(self, decimals=0, *args, **kwargs):
"""
Round a DataFrame to a variable number of decimal places.
.. versionadded:: 0.17.0
Parameters
----------
decimals : int, dict, Series
Number of decimal places to round each column to. If an int is
given, round each column to the same number of places.
Otherwise dict and Series round to variable numbers of places.
Column names should be in the keys if `decimals` is a
dict-like, or in the index if `decimals` is a Series. Any
columns not included in `decimals` will be left as is. Elements
of `decimals` which are not columns of the input will be
ignored.
Examples
--------
>>> df = pd.DataFrame(np.random.random([3, 3]),
... columns=['A', 'B', 'C'], index=['first', 'second', 'third'])
>>> df
A B C
first 0.028208 0.992815 0.173891
second 0.038683 0.645646 0.577595
third 0.877076 0.149370 0.491027
>>> df.round(2)
A B C
first 0.03 0.99 0.17
second 0.04 0.65 0.58
third 0.88 0.15 0.49
>>> df.round({'A': 1, 'C': 2})
A B C
first 0.0 0.992815 0.17
second 0.0 0.645646 0.58
third 0.9 0.149370 0.49
>>> decimals = pd.Series([1, 0, 2], index=['A', 'B', 'C'])
>>> df.round(decimals)
A B C
first 0.0 1 0.17
second 0.0 1 0.58
third 0.9 0 0.49
Returns
-------
DataFrame object
See Also
--------
numpy.around
Series.round
"""
from pandas.core.reshape.concat import concat
def _dict_round(df, decimals):
for col, vals in df.iteritems():
try:
yield _series_round(vals, decimals[col])
except KeyError:
yield vals
def _series_round(s, decimals):
if is_integer_dtype(s) or is_float_dtype(s):
return s.round(decimals)
return s
nv.validate_round(args, kwargs)
if isinstance(decimals, (dict, Series)):
if isinstance(decimals, Series):
if not decimals.index.is_unique:
raise ValueError("Index of decimals must be unique")
new_cols = [col for col in _dict_round(self, decimals)]
elif is_integer(decimals):
# Dispatch to Series.round
new_cols = [_series_round(v, decimals)
for _, v in self.iteritems()]
else:
raise TypeError("decimals must be an integer, a dict-like or a "
"Series")
if len(new_cols) > 0:
return self._constructor(concat(new_cols, axis=1),
index=self.index,
columns=self.columns)
else:
return self
# ----------------------------------------------------------------------
# Statistical methods, etc.
def corr(self, method='pearson', min_periods=1):
"""
Compute pairwise correlation of columns, excluding NA/null values
Parameters
----------
method : {'pearson', 'kendall', 'spearman'}
* pearson : standard correlation coefficient
* kendall : Kendall Tau correlation coefficient
* spearman : Spearman rank correlation
min_periods : int, optional
Minimum number of observations required per pair of columns
to have a valid result. Currently only available for pearson
and spearman correlation
Returns
-------
y : DataFrame
"""
numeric_df = self._get_numeric_data()
cols = numeric_df.columns
idx = cols.copy()
mat = numeric_df.values
if method == 'pearson':
correl = libalgos.nancorr(_ensure_float64(mat), minp=min_periods)
elif method == 'spearman':
correl = libalgos.nancorr_spearman(_ensure_float64(mat),
minp=min_periods)
else:
if min_periods is None:
min_periods = 1
mat = _ensure_float64(mat).T
corrf = nanops.get_corr_func(method)
K = len(cols)
correl = np.empty((K, K), dtype=float)
mask = np.isfinite(mat)
for i, ac in enumerate(mat):
for j, bc in enumerate(mat):
if i > j:
continue
valid = mask[i] & mask[j]
if valid.sum() < min_periods:
c = NA
elif i == j:
c = 1.
elif not valid.all():
c = corrf(ac[valid], bc[valid])
else:
c = corrf(ac, bc)
correl[i, j] = c
correl[j, i] = c
return self._constructor(correl, index=idx, columns=cols)
def cov(self, min_periods=None):
"""
Compute pairwise covariance of columns, excluding NA/null values
Parameters
----------
min_periods : int, optional
Minimum number of observations required per pair of columns
to have a valid result.
Returns
-------
y : DataFrame
Notes
-----
`y` contains the covariance matrix of the DataFrame's time series.
The covariance is normalized by N-1 (unbiased estimator).
"""
numeric_df = self._get_numeric_data()
cols = numeric_df.columns
idx = cols.copy()
mat = numeric_df.values
if notnull(mat).all():
if min_periods is not None and min_periods > len(mat):
baseCov = np.empty((mat.shape[1], mat.shape[1]))
baseCov.fill(np.nan)
else:
baseCov = np.cov(mat.T)
baseCov = baseCov.reshape((len(cols), len(cols)))
else:
baseCov = libalgos.nancorr(_ensure_float64(mat), cov=True,
minp=min_periods)
return self._constructor(baseCov, index=idx, columns=cols)
def corrwith(self, other, axis=0, drop=False):
"""
Compute pairwise correlation between rows or columns of two DataFrame
objects.
Parameters
----------
other : DataFrame
axis : {0 or 'index', 1 or 'columns'}, default 0
0 or 'index' to compute column-wise, 1 or 'columns' for row-wise
drop : boolean, default False
Drop missing indices from result, default returns union of all
Returns
-------
correls : Series
"""
axis = self._get_axis_number(axis)
if isinstance(other, Series):
return self.apply(other.corr, axis=axis)
this = self._get_numeric_data()
other = other._get_numeric_data()
left, right = this.align(other, join='inner', copy=False)
# mask missing values
left = left + right * 0
right = right + left * 0
if axis == 1:
left = left.T
right = right.T
# demeaned data
ldem = left - left.mean()
rdem = right - right.mean()
num = (ldem * rdem).sum()
dom = (left.count() - 1) * left.std() * right.std()
correl = num / dom
if not drop:
raxis = 1 if axis == 0 else 0
result_index = this._get_axis(raxis).union(other._get_axis(raxis))
correl = correl.reindex(result_index)
return correl
# ----------------------------------------------------------------------
# ndarray-like stats methods
def count(self, axis=0, level=None, numeric_only=False):
"""
Return Series with number of non-NA/null observations over requested
axis. Works with non-floating point data as well (detects NaN and None)
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
0 or 'index' for row-wise, 1 or 'columns' for column-wise
level : int or level name, default None
If the axis is a MultiIndex (hierarchical), count along a
particular level, collapsing into a DataFrame
numeric_only : boolean, default False
Include only float, int, boolean data
Returns
-------
count : Series (or DataFrame if level specified)
"""
axis = self._get_axis_number(axis)
if level is not None:
return self._count_level(level, axis=axis,
numeric_only=numeric_only)
if numeric_only:
frame = self._get_numeric_data()
else:
frame = self
# GH #423
if len(frame._get_axis(axis)) == 0:
result = Series(0, index=frame._get_agg_axis(axis))
else:
if frame._is_mixed_type:
result = notnull(frame).sum(axis=axis)
else:
counts = notnull(frame.values).sum(axis=axis)
result = Series(counts, index=frame._get_agg_axis(axis))
return result.astype('int64')
def _count_level(self, level, axis=0, numeric_only=False):
if numeric_only:
frame = self._get_numeric_data()
else:
frame = self
count_axis = frame._get_axis(axis)
agg_axis = frame._get_agg_axis(axis)
if not isinstance(count_axis, MultiIndex):
raise TypeError("Can only count levels on hierarchical %s." %
self._get_axis_name(axis))
if frame._is_mixed_type:
# Since we have mixed types, calling notnull(frame.values) might
# upcast everything to object
mask = notnull(frame).values
else:
# But use the speedup when we have homogeneous dtypes
mask = notnull(frame.values)
if axis == 1:
# We're transposing the mask rather than frame to avoid potential
# upcasts to object, which induces a ~20x slowdown
mask = mask.T
if isinstance(level, compat.string_types):
level = count_axis._get_level_number(level)
level_index = count_axis.levels[level]
labels = _ensure_int64(count_axis.labels[level])
counts = lib.count_level_2d(mask, labels, len(level_index), axis=0)
result = DataFrame(counts, index=level_index, columns=agg_axis)
if axis == 1:
# Undo our earlier transpose
return result.T
else:
return result
def _reduce(self, op, name, axis=0, skipna=True, numeric_only=None,
filter_type=None, **kwds):
axis = self._get_axis_number(axis)
def f(x):
return op(x, axis=axis, skipna=skipna, **kwds)
labels = self._get_agg_axis(axis)
# exclude timedelta/datetime unless we are uniform types
if axis == 1 and self._is_mixed_type and self._is_datelike_mixed_type:
numeric_only = True
if numeric_only is None:
try:
values = self.values
result = f(values)
except Exception as e:
# try by-column first
if filter_type is None and axis == 0:
try:
# this can end up with a non-reduction
# but not always. if the types are mixed
# with datelike then need to make sure a series
# we only end up here if we have not specified
# numeric_only and yet we have tried a
# column-by-column reduction, where we have mixed type.
# So let's just do what we can
result = self.apply(f, reduce=False,
ignore_failures=True)
if result.ndim == self.ndim:
result = result.iloc[0]
return result
except:
pass
if filter_type is None or filter_type == 'numeric':
data = self._get_numeric_data()
elif filter_type == 'bool':
data = self._get_bool_data()
else: # pragma: no cover
e = NotImplementedError("Handling exception with filter_"
"type %s not implemented." %
filter_type)
raise_with_traceback(e)
with np.errstate(all='ignore'):
result = f(data.values)
labels = data._get_agg_axis(axis)
else:
if numeric_only:
if filter_type is None or filter_type == 'numeric':
data = self._get_numeric_data()
elif filter_type == 'bool':
data = self._get_bool_data()
else: # pragma: no cover
msg = ("Generating numeric_only data with filter_type %s"
"not supported." % filter_type)
raise NotImplementedError(msg)
values = data.values
labels = data._get_agg_axis(axis)
else:
values = self.values
result = f(values)
if hasattr(result, 'dtype') and is_object_dtype(result.dtype):
try:
if filter_type is None or filter_type == 'numeric':
result = result.astype(np.float64)
elif filter_type == 'bool' and notnull(result).all():
result = result.astype(np.bool_)
except (ValueError, TypeError):
# try to coerce to the original dtypes item by item if we can
if axis == 0:
result = coerce_to_dtypes(result, self.dtypes)
return Series(result, index=labels)
def nunique(self, axis=0, dropna=True):
"""
Return Series with number of distinct observations over requested
axis.
.. versionadded:: 0.20.0
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
dropna : boolean, default True
Don't include NaN in the counts.
Returns
-------
nunique : Series
Examples
--------
>>> df = pd.DataFrame({'A': [1, 2, 3], 'B': [1, 1, 1]})
>>> df.nunique()
A 3
B 1
>>> df.nunique(axis=1)
0 1
1 2
2 2
"""
return self.apply(Series.nunique, axis=axis, dropna=dropna)
def idxmin(self, axis=0, skipna=True):
"""
Return index of first occurrence of minimum over requested axis.
NA/null values are excluded.
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
0 or 'index' for row-wise, 1 or 'columns' for column-wise
skipna : boolean, default True
Exclude NA/null values. If an entire row/column is NA, the result
will be NA
Returns
-------
idxmin : Series
Notes
-----
This method is the DataFrame version of ``ndarray.argmin``.
See Also
--------
Series.idxmin
"""
axis = self._get_axis_number(axis)
indices = nanops.nanargmin(self.values, axis=axis, skipna=skipna)
index = self._get_axis(axis)
result = [index[i] if i >= 0 else NA for i in indices]
return Series(result, index=self._get_agg_axis(axis))
def idxmax(self, axis=0, skipna=True):
"""
Return index of first occurrence of maximum over requested axis.
NA/null values are excluded.
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
0 or 'index' for row-wise, 1 or 'columns' for column-wise
skipna : boolean, default True
Exclude NA/null values. If an entire row/column is NA, the result
will be first index.
Returns
-------
idxmax : Series
Notes
-----
This method is the DataFrame version of ``ndarray.argmax``.
See Also
--------
Series.idxmax
"""
axis = self._get_axis_number(axis)
indices = nanops.nanargmax(self.values, axis=axis, skipna=skipna)
index = self._get_axis(axis)
result = [index[i] if i >= 0 else NA for i in indices]
return Series(result, index=self._get_agg_axis(axis))
def _get_agg_axis(self, axis_num):
""" let's be explict about this """
if axis_num == 0:
return self.columns
elif axis_num == 1:
return self.index
else:
raise ValueError('Axis must be 0 or 1 (got %r)' % axis_num)
def mode(self, axis=0, numeric_only=False):
"""
Gets the mode(s) of each element along the axis selected. Adds a row
for each mode per label, fills in gaps with nan.
Note that there could be multiple values returned for the selected
axis (when more than one item share the maximum frequency), which is
the reason why a dataframe is returned. If you want to impute missing
values with the mode in a dataframe ``df``, you can just do this:
``df.fillna(df.mode().iloc[0])``
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
* 0 or 'index' : get mode of each column
* 1 or 'columns' : get mode of each row
numeric_only : boolean, default False
if True, only apply to numeric columns
Returns
-------
modes : DataFrame (sorted)
Examples
--------
>>> df = pd.DataFrame({'A': [1, 2, 1, 2, 1, 2, 3]})
>>> df.mode()
A
0 1
1 2
"""
data = self if not numeric_only else self._get_numeric_data()
def f(s):
return s.mode()
return data.apply(f, axis=axis)
def quantile(self, q=0.5, axis=0, numeric_only=True,
interpolation='linear'):
"""
Return values at the given quantile over requested axis, a la
numpy.percentile.
Parameters
----------
q : float or array-like, default 0.5 (50% quantile)
0 <= q <= 1, the quantile(s) to compute
axis : {0, 1, 'index', 'columns'} (default 0)
0 or 'index' for row-wise, 1 or 'columns' for column-wise
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
.. versionadded:: 0.18.0
This optional parameter specifies the interpolation method to use,
when the desired quantile lies between two data points `i` and `j`:
* linear: `i + (j - i) * fraction`, where `fraction` is the
fractional part of the index surrounded by `i` and `j`.
* lower: `i`.
* higher: `j`.
* nearest: `i` or `j` whichever is nearest.
* midpoint: (`i` + `j`) / 2.
Returns
-------
quantiles : Series or DataFrame
- If ``q`` is an array, a DataFrame will be returned where the
index is ``q``, the columns are the columns of self, and the
values are the quantiles.
- If ``q`` is a float, a Series will be returned where the
index is the columns of self and the values are the quantiles.
Examples
--------
>>> df = DataFrame(np.array([[1, 1], [2, 10], [3, 100], [4, 100]]),
columns=['a', 'b'])
>>> df.quantile(.1)
a 1.3
b 3.7
dtype: float64
>>> df.quantile([.1, .5])
a b
0.1 1.3 3.7
0.5 2.5 55.0
"""
self._check_percentile(q)
data = self._get_numeric_data() if numeric_only else self
axis = self._get_axis_number(axis)
is_transposed = axis == 1
if is_transposed:
data = data.T
result = data._data.quantile(qs=q,
axis=1,
interpolation=interpolation,
transposed=is_transposed)
if result.ndim == 2:
result = self._constructor(result)
else:
result = self._constructor_sliced(result, name=q)
if is_transposed:
result = result.T
return result
def to_timestamp(self, freq=None, how='start', axis=0, copy=True):
"""
Cast to DatetimeIndex of timestamps, at *beginning* of period
Parameters
----------
freq : string, default frequency of PeriodIndex
Desired frequency
how : {'s', 'e', 'start', 'end'}
Convention for converting period to timestamp; start of period
vs. end
axis : {0 or 'index', 1 or 'columns'}, default 0
The axis to convert (the index by default)
copy : boolean, default True
If false then underlying input data is not copied
Returns
-------
df : DataFrame with DatetimeIndex
"""
new_data = self._data
if copy:
new_data = new_data.copy()
axis = self._get_axis_number(axis)
if axis == 0:
new_data.set_axis(1, self.index.to_timestamp(freq=freq, how=how))
elif axis == 1:
new_data.set_axis(0, self.columns.to_timestamp(freq=freq, how=how))
else: # pragma: no cover
raise AssertionError('Axis must be 0 or 1. Got %s' % str(axis))
return self._constructor(new_data)
def to_period(self, freq=None, axis=0, copy=True):
"""
Convert DataFrame from DatetimeIndex to PeriodIndex with desired
frequency (inferred from index if not passed)
Parameters
----------
freq : string, default
axis : {0 or 'index', 1 or 'columns'}, default 0
The axis to convert (the index by default)
copy : boolean, default True
If False then underlying input data is not copied
Returns
-------
ts : TimeSeries with PeriodIndex
"""
new_data = self._data
if copy:
new_data = new_data.copy()
axis = self._get_axis_number(axis)
if axis == 0:
new_data.set_axis(1, self.index.to_period(freq=freq))
elif axis == 1:
new_data.set_axis(0, self.columns.to_period(freq=freq))
else: # pragma: no cover
raise AssertionError('Axis must be 0 or 1. Got %s' % str(axis))
return self._constructor(new_data)
def isin(self, values):
"""
Return boolean DataFrame showing whether each element in the
DataFrame is contained in values.
Parameters
----------
values : iterable, Series, DataFrame or dictionary
The result will only be true at a location if all the
labels match. If `values` is a Series, that's the index. If
`values` is a dictionary, the keys must be the column names,
which must match. If `values` is a DataFrame,
then both the index and column labels must match.
Returns
-------
DataFrame of booleans
Examples
--------
When ``values`` is a list:
>>> df = DataFrame({'A': [1, 2, 3], 'B': ['a', 'b', 'f']})
>>> df.isin([1, 3, 12, 'a'])
A B
0 True True
1 False False
2 True False
When ``values`` is a dict:
>>> df = DataFrame({'A': [1, 2, 3], 'B': [1, 4, 7]})
>>> df.isin({'A': [1, 3], 'B': [4, 7, 12]})
A B
0 True False # Note that B didn't match the 1 here.
1 False True
2 True True
When ``values`` is a Series or DataFrame:
>>> df = DataFrame({'A': [1, 2, 3], 'B': ['a', 'b', 'f']})
>>> other = DataFrame({'A': [1, 3, 3, 2], 'B': ['e', 'f', 'f', 'e']})
>>> df.isin(other)
A B
0 True False
1 False False # Column A in `other` has a 3, but not at index 1.
2 True True
"""
if isinstance(values, dict):
from collections import defaultdict
from pandas.core.reshape.concat import concat
values = defaultdict(list, values)
return concat((self.iloc[:, [i]].isin(values[col])
for i, col in enumerate(self.columns)), axis=1)
elif isinstance(values, Series):
if not values.index.is_unique:
raise ValueError("cannot compute isin with "
"a duplicate axis.")
return self.eq(values.reindex_like(self), axis='index')
elif isinstance(values, DataFrame):
if not (values.columns.is_unique and values.index.is_unique):
raise ValueError("cannot compute isin with "
"a duplicate axis.")
return self.eq(values.reindex_like(self))
else:
if not is_list_like(values):
raise TypeError("only list-like or dict-like objects are "
"allowed to be passed to DataFrame.isin(), "
"you passed a "
"{0!r}".format(type(values).__name__))
return DataFrame(
algorithms.isin(self.values.ravel(),
values).reshape(self.shape), self.index,
self.columns)
DataFrame._setup_axes(['index', 'columns'], info_axis=1, stat_axis=0,
axes_are_reversed=True, aliases={'rows': 0})
DataFrame._add_numeric_operations()
DataFrame._add_series_or_dataframe_operations()
_EMPTY_SERIES = Series([])
def _arrays_to_mgr(arrays, arr_names, index, columns, dtype=None):
"""
Segregate Series based on type and coerce into matrices.
Needs to handle a lot of exceptional cases.
"""
# figure out the index, if necessary
if index is None:
index = extract_index(arrays)
else:
index = _ensure_index(index)
# don't force copy because getting jammed in an ndarray anyway
arrays = _homogenize(arrays, index, dtype)
# from BlockManager perspective
axes = [_ensure_index(columns), _ensure_index(index)]
return create_block_manager_from_arrays(arrays, arr_names, axes)
def extract_index(data):
from pandas.core.index import _union_indexes
index = None
if len(data) == 0:
index = Index([])
elif len(data) > 0:
raw_lengths = []
indexes = []
have_raw_arrays = False
have_series = False
have_dicts = False
for v in data:
if isinstance(v, Series):
have_series = True
indexes.append(v.index)
elif isinstance(v, dict):
have_dicts = True
indexes.append(list(v.keys()))
elif is_list_like(v) and getattr(v, 'ndim', 1) == 1:
have_raw_arrays = True
raw_lengths.append(len(v))
if not indexes and not raw_lengths:
raise ValueError('If using all scalar values, you must pass'
' an index')
if have_series or have_dicts:
index = _union_indexes(indexes)
if have_raw_arrays:
lengths = list(set(raw_lengths))
if len(lengths) > 1:
raise ValueError('arrays must all be same length')
if have_dicts:
raise ValueError('Mixing dicts with non-Series may lead to '
'ambiguous ordering.')
if have_series:
if lengths[0] != len(index):
msg = ('array length %d does not match index length %d' %
(lengths[0], len(index)))
raise ValueError(msg)
else:
index = _default_index(lengths[0])
return _ensure_index(index)
def _prep_ndarray(values, copy=True):
if not isinstance(values, (np.ndarray, Series, Index)):
if len(values) == 0:
return np.empty((0, 0), dtype=object)
def convert(v):
return maybe_convert_platform(v)
# we could have a 1-dim or 2-dim list here
# this is equiv of np.asarray, but does object conversion
# and platform dtype preservation
try:
if is_list_like(values[0]) or hasattr(values[0], 'len'):
values = np.array([convert(v) for v in values])
else:
values = convert(values)
except:
values = convert(values)
else:
# drop subclass info, do not copy data
values = np.asarray(values)
if copy:
values = values.copy()
if values.ndim == 1:
values = values.reshape((values.shape[0], 1))
elif values.ndim != 2:
raise ValueError('Must pass 2-d input')
return values
def _to_arrays(data, columns, coerce_float=False, dtype=None):
"""
Return list of arrays, columns
"""
if isinstance(data, DataFrame):
if columns is not None:
arrays = [data._ixs(i, axis=1).values
for i, col in enumerate(data.columns) if col in columns]
else:
columns = data.columns
arrays = [data._ixs(i, axis=1).values for i in range(len(columns))]
return arrays, columns
if not len(data):
if isinstance(data, np.ndarray):
columns = data.dtype.names
if columns is not None:
return [[]] * len(columns), columns
return [], [] # columns if columns is not None else []
if isinstance(data[0], (list, tuple)):
return _list_to_arrays(data, columns, coerce_float=coerce_float,
dtype=dtype)
elif isinstance(data[0], collections.Mapping):
return _list_of_dict_to_arrays(data, columns,
coerce_float=coerce_float, dtype=dtype)
elif isinstance(data[0], Series):
return _list_of_series_to_arrays(data, columns,
coerce_float=coerce_float,
dtype=dtype)
elif isinstance(data[0], Categorical):
if columns is None:
columns = _default_index(len(data))
return data, columns
elif (isinstance(data, (np.ndarray, Series, Index)) and
data.dtype.names is not None):
columns = list(data.dtype.names)
arrays = [data[k] for k in columns]
return arrays, columns
else:
# last ditch effort
data = lmap(tuple, data)
return _list_to_arrays(data, columns, coerce_float=coerce_float,
dtype=dtype)
def _masked_rec_array_to_mgr(data, index, columns, dtype, copy):
""" extract from a masked rec array and create the manager """
# essentially process a record array then fill it
fill_value = data.fill_value
fdata = ma.getdata(data)
if index is None:
index = _get_names_from_index(fdata)
if index is None:
index = _default_index(len(data))
index = _ensure_index(index)
if columns is not None:
columns = _ensure_index(columns)
arrays, arr_columns = _to_arrays(fdata, columns)
# fill if needed
new_arrays = []
for fv, arr, col in zip(fill_value, arrays, arr_columns):
mask = ma.getmaskarray(data[col])
if mask.any():
arr, fv = maybe_upcast(arr, fill_value=fv, copy=True)
arr[mask] = fv
new_arrays.append(arr)
# create the manager
arrays, arr_columns = _reorder_arrays(new_arrays, arr_columns, columns)
if columns is None:
columns = arr_columns
mgr = _arrays_to_mgr(arrays, arr_columns, index, columns)
if copy:
mgr = mgr.copy()
return mgr
def _reorder_arrays(arrays, arr_columns, columns):
# reorder according to the columns
if (columns is not None and len(columns) and arr_columns is not None and
len(arr_columns)):
indexer = _ensure_index(arr_columns).get_indexer(columns)
arr_columns = _ensure_index([arr_columns[i] for i in indexer])
arrays = [arrays[i] for i in indexer]
return arrays, arr_columns
def _list_to_arrays(data, columns, coerce_float=False, dtype=None):
if len(data) > 0 and isinstance(data[0], tuple):
content = list(lib.to_object_array_tuples(data).T)
else:
# list of lists
content = list(lib.to_object_array(data).T)
return _convert_object_array(content, columns, dtype=dtype,
coerce_float=coerce_float)
def _list_of_series_to_arrays(data, columns, coerce_float=False, dtype=None):
from pandas.core.index import _get_combined_index
if columns is None:
columns = _get_combined_index([
s.index for s in data if getattr(s, 'index', None) is not None
])
indexer_cache = {}
aligned_values = []
for s in data:
index = getattr(s, 'index', None)
if index is None:
index = _default_index(len(s))
if id(index) in indexer_cache:
indexer = indexer_cache[id(index)]
else:
indexer = indexer_cache[id(index)] = index.get_indexer(columns)
values = _values_from_object(s)
aligned_values.append(algorithms.take_1d(values, indexer))
values = np.vstack(aligned_values)
if values.dtype == np.object_:
content = list(values.T)
return _convert_object_array(content, columns, dtype=dtype,
coerce_float=coerce_float)
else:
return values.T, columns
def _list_of_dict_to_arrays(data, columns, coerce_float=False, dtype=None):
if columns is None:
gen = (list(x.keys()) for x in data)
sort = not any(isinstance(d, OrderedDict) for d in data)
columns = lib.fast_unique_multiple_list_gen(gen, sort=sort)
# assure that they are of the base dict class and not of derived
# classes
data = [(type(d) is dict) and d or dict(d) for d in data]
content = list(lib.dicts_to_array(data, list(columns)).T)
return _convert_object_array(content, columns, dtype=dtype,
coerce_float=coerce_float)
def _convert_object_array(content, columns, coerce_float=False, dtype=None):
if columns is None:
columns = _default_index(len(content))
else:
if len(columns) != len(content): # pragma: no cover
# caller's responsibility to check for this...
raise AssertionError('%d columns passed, passed data had %s '
'columns' % (len(columns), len(content)))
# provide soft conversion of object dtypes
def convert(arr):
if dtype != object and dtype != np.object:
arr = lib.maybe_convert_objects(arr, try_float=coerce_float)
arr = maybe_cast_to_datetime(arr, dtype)
return arr
arrays = [convert(arr) for arr in content]
return arrays, columns
def _get_names_from_index(data):
has_some_name = any([getattr(s, 'name', None) is not None for s in data])
if not has_some_name:
return _default_index(len(data))
index = lrange(len(data))
count = 0
for i, s in enumerate(data):
n = getattr(s, 'name', None)
if n is not None:
index[i] = n
else:
index[i] = 'Unnamed %d' % count
count += 1
return index
def _homogenize(data, index, dtype=None):
from pandas.core.series import _sanitize_array
oindex = None
homogenized = []
for v in data:
if isinstance(v, Series):
if dtype is not None:
v = v.astype(dtype)
if v.index is not index:
# Forces alignment. No need to copy data since we
# are putting it into an ndarray later
v = v.reindex(index, copy=False)
else:
if isinstance(v, dict):
if oindex is None:
oindex = index.astype('O')
if isinstance(index, (DatetimeIndex, TimedeltaIndex)):
v = _dict_compat(v)
else:
v = dict(v)
v = lib.fast_multiget(v, oindex.values, default=NA)
v = _sanitize_array(v, index, dtype=dtype, copy=False,
raise_cast_failure=False)
homogenized.append(v)
return homogenized
def _from_nested_dict(data):
# TODO: this should be seriously cythonized
new_data = OrderedDict()
for index, s in compat.iteritems(data):
for col, v in compat.iteritems(s):
new_data[col] = new_data.get(col, OrderedDict())
new_data[col][index] = v
return new_data
def _put_str(s, space):
return ('%s' % s)[:space].ljust(space)
# ----------------------------------------------------------------------
# Add plotting methods to DataFrame
DataFrame.plot = base.AccessorProperty(gfx.FramePlotMethods,
gfx.FramePlotMethods)
DataFrame.hist = gfx.hist_frame
@Appender(_shared_docs['boxplot'] % _shared_doc_kwargs)
def boxplot(self, column=None, by=None, ax=None, fontsize=None, rot=0,
grid=True, figsize=None, layout=None, return_type=None, **kwds):
from pandas.plotting._core import boxplot
import matplotlib.pyplot as plt
ax = boxplot(self, column=column, by=by, ax=ax, fontsize=fontsize,
grid=grid, rot=rot, figsize=figsize, layout=layout,
return_type=return_type, **kwds)
plt.draw_if_interactive()
return ax
DataFrame.boxplot = boxplot
ops.add_flex_arithmetic_methods(DataFrame, **ops.frame_flex_funcs)
ops.add_special_arithmetic_methods(DataFrame, **ops.frame_special_funcs)
| 36.549366 | 85 | 0.540114 |
e889cbf31914e3cba67abc62fe069b4ec7fc1795
| 15,474 |
py
|
Python
|
scripts/ice_class.py
|
mixerupper/mltools-fi_cate
|
9a43af3f58d91cadce584863f388111abbb80b39
|
[
"MIT"
] | null | null | null |
scripts/ice_class.py
|
mixerupper/mltools-fi_cate
|
9a43af3f58d91cadce584863f388111abbb80b39
|
[
"MIT"
] | 1 |
2021-03-31T19:52:04.000Z
|
2021-03-31T20:03:02.000Z
|
scripts/ice_class.py
|
mixerupper/mltools-fi_cate
|
9a43af3f58d91cadce584863f388111abbb80b39
|
[
"MIT"
] | 1 |
2021-06-01T08:45:07.000Z
|
2021-06-01T08:45:07.000Z
|
from sklearn.linear_model import LogisticRegression
class ICE():
def __init__(self, model_type, frac_sample = 1, seed_num = None, time = False, trace = False):
'''
Instantiates the ICE class
@param model_type : "binary" or "continuous" y-variable
@param frac_sample : Fraction of data set to sample for ICE df.
@param seed_num : Random seed for reproducibility.
@param trace : Turn on/off trace messages for debugging
@return ICE data (dataframe) with N observations.
@examples
ICE("binary", num_per_ob = 50, frac_sample = 0.5, seed_num = 420)
'''
self.model_type = model_type
self.frac_sample = frac_sample
self.seed_num = seed_num
self.trace = trace
self.time = time
# Initializations to raise exceptions
self.fit_all = False
self.ice_dfs = {}
self.ice_fis = {}
def fit(self, X, model, lice = False):
'''
Creates all ICE datasets for each feature
@param X : Covariate matrix
@param model : Model to interpet
@param lice : Linearly spaced feature distribution instead of unique values
'''
self.features = list(X.columns)
self.data = X.copy()
if self.model_type == "binary":
self.data['y_pred'] = model.predict_proba(X)[:,1]
else:
self.data['y_pred'] = model.predict(X)
for feature in X:
try:
start = datetime.now()
self.ice_dfs[feature], self.ice_fis[feature] = self.ice_fit_helper(X, model, feature, lice)
end = datetime.now()
if self.time:
print(f"Fit {feature} in {(end - start).total_seconds():.2f} seconds")
except ValueError:
print(f"Could not fit {feature} because of ValueError")
self.fit_all = True
return
def fit_single_feature(self, X, model, feature, lice = False):
'''
Create single ICE dataset for a feature.
Used when only some features are of interest.
@param X : Covariate matrix
@param model : Model to interpet
@param feature : Single feature to create ICE dataset for
'''
start = datetime.now()
self.ice_dfs[feature], self.ice_fis[feature] = self.ice_fit_helper(X, model, feature, lice)
self.data = X.copy()
if self.model_type == "binary":
self.data['y_pred'] = model.predict_proba(X)[:,1]
else:
self.data['y_pred'] = model.predict(X)
end = datetime.now()
if self.time:
print(f"Fit {feature} in {(end - start).total_seconds():.2f} seconds")
def ice_fit_helper(self, X, model, feature, lice = False,
min_obs_per_feature = 10, likelihood_decay = 0.75):
'''
Create ICE dataset for a single feature. Called by fit.
@param X : Covariate matrix
@param model : Model to interpet
@param feature : Single feature to create ICE dataset for
'''
# uniformly sample
X = self.uniform_sample(X, feature, self.frac_sample)
feature_min = np.min(X[feature])
feature_max = np.max(X[feature])
if lice:
feature_range = np.linspace(feature_min, feature_max, num = self.num_per_ob)
else:
feature_range = np.sort(np.unique(X[feature]))
df = X.loc[np.repeat(X.index, len(feature_range))]
df['orig_'+feature] = df[feature]
df['obs'] = df.index
df[feature] = np.tile(feature_range, len(X.index))
# get predictions
if self.model_type == "binary":
preds = model.predict_proba(
df.drop(['obs', 'orig_'+feature], axis = 1))[:,1]
else:
preds = model.predict(df.drop(['obs', 'orig_'+feature], axis = 1))
df['y_pred'] = preds
df['y_pred_centered'] = df\
.groupby('obs')['y_pred']\
.transform(lambda x:(x - x.shift(1)).cumsum())\
.fillna(0)
# Add on dydx for histogram and feature importance
# TODO: Deal with case where these names collide with existing feature names.
df['feature_distance'] = np.abs(df[feature] - df['orig_'+feature])
df['original_point'] = (df['feature_distance'] == 0)*1
feature_std = np.std(X[feature])
# Add likelihood on phantom/real obs based on logistic regression
# logr = LogisticRegression(class_weight = 'balanced')
# logr.fit(df[[feature]], df['original_point'])
if feature_std != 0:
df['likelihood'] = likelihood_decay ** (df['feature_distance']/feature_std)
else:
df['likelihood'] = 1
# Add feature impact
df['dy'] = df\
.groupby('obs')['y_pred']\
.transform(lambda x:x - x.shift(1))
df['dx'] = df\
.groupby('obs')[feature]\
.transform(lambda x:x - x.shift(1))
df['dydx'] = df['dy'] / df['dx']
# Account for NA of very first unique value that doesn't have a lag
df['dydx'] = df\
.groupby('obs')['dydx']\
.transform(lambda x:np.where(x.isna(), x.shift(-1), x))
df['dydx_abs'] = np.abs(df['dydx'])
df = df.loc[lambda x:~x.dydx_abs.isna()]
if df.shape[0] == 0:
fi_dict = {'Feature':feature,
'ICE FI':0,
'ICE In-Dist FI':0}
else:
# Calculate feature impact
# Normalize a feature by subtracting mean and dividing by SD
# Therefore, we normalize these FIs by multiplying by SD
temp_df = df.loc[lambda x:~x.dydx_abs.isna()]
# Feature impact/In-Dist Feature impact
fi_raw = np.mean(temp_df['dydx_abs'])
fi_in_dist_raw = np.sum(temp_df['dydx_abs'] * temp_df['likelihood'])/np.sum(temp_df['likelihood'])
fi_standard = fi_raw * feature_std
fi_in_dist_standard = fi_in_dist_raw * feature_std
# Heterogeneity
fi_het = temp_df\
.groupby(feature)\
.agg(dydx_std = ('dydx', 'std'))\
.reset_index(drop = True)\
.loc[:,'dydx_std']\
.mean()
fi_het = fi_het * feature_std
# Non-linearity
fi_nl = temp_df\
.groupby('obs')\
.agg(dydx_std = ('dydx', 'std'))\
.reset_index(drop = True)\
.loc[:,'dydx_std']\
.mean()
fi_nl = fi_nl * feature_std
fi_dict = {'Feature':feature,
'ICE FI':fi_standard,
'ICE In-Dist FI':fi_in_dist_standard,
'ICE Heterogeneity':fi_het,
'ICE Non-linearity':fi_nl}
# TODO: drop every column except necessary ones for plotting to save space
return df, fi_dict
def ice_plot_single_feature(self, feature, save_path = None,
plot_num = 200, close_multiple = 0.5, mode = "ice"):
'''
Plots the ICE chart for a single feature.
Can only be called after fitting for that feature.
@param feature : Target covariate to plot.
@param plot_num : Number of lines to plot.
@param close_multiple : Mark parts of the line within close_multiple
times standard deviation of feature as "close"
with a solid line
@param mode: ice|d-ice|c-ice
@examples
plot_single_feature('Age', plot_num = 500)
'''
start = datetime.now()
plot_data = self.ice_dfs[feature]
unique_features = plot_data[feature].unique()
if len(unique_features) > 10:
feature_continuous = True
else:
feature_continuous = False
y_var = np.select(
[mode == "ice",
mode == "d-ice",
mode == "c-ice"],
["y_pred", "dydx", "y_pred_centered"]).item()
unique_obs = plot_data.obs.unique()
ob_sample = np.random.choice(unique_obs,
size = min(len(unique_obs), plot_num), replace = False)
mean_line = plot_data\
.groupby(feature)\
.agg(y_pred = (y_var, 'mean'))\
.reset_index()\
.rename({'y_pred':y_var}, axis = 1)\
.assign(obs = -1,
mean_line = 1)
plot_sub_data = plot_data\
.loc[lambda x:x.obs.isin(ob_sample)]\
.assign(mean_line = 0)\
.append(mean_line, ignore_index = True)
# set fig size
fig, ax = plt.subplots()
end = datetime.now()
if self.time:
print(f"Preprocessed data in {(end - start).total_seconds():.2f} seconds")
# plot ICE
start = datetime.now()
self.ice_plot_helper(plot_data = plot_sub_data, ax = ax,
feature = feature, y_var = y_var,plot_close = feature_continuous)
handles, labels = ax.get_legend_handles_labels()
unique_labels, i = np.unique(labels, return_index = True)
unique_handles = np.array(handles)[i]
ax.legend(unique_handles, unique_labels,
markerscale = 0.6, fontsize = 'x-small')
end = datetime.now()
if self.time:
print(f"Plotted in {(end - start).total_seconds():.2f} seconds")
plt.tight_layout()
if save_path is not None:
fig.savefig(save_path,
bbox_inches = 'tight',
pad_inches = 0.1)
return
# return (ax, fig)
def ice_plot(self, save_path = None,
plot_num = 200, ncols = 3, mode = "ice"):
'''
Plot all ICE plots in a grid
'''
if not self.fit_all:
raise Exception("Call `fit` method before trying to plot. You can also call `plot_single_feature`.")
nrows, num_plots = int(np.ceil(len(self.ice_dfs.keys()) / ncols)), len(self.ice_dfs.keys())
all_features = np.sort(list(self.ice_dfs.keys()))
y_var = np.select(
[mode == "ice",
mode == "d-ice",
mode == "c-ice"],
["y_pred", "dydx", "y_pred_centered"]).item()
if nrows == 1:
ncols = num_plots
fig, axs = plt.subplots(nrows = nrows, ncols = ncols, figsize = (5*ncols,1*num_plots))
if self.trace:
print(f"Num rows: {nrows}, Num columns: {ncols}, Num plots: {num_plots}")
for i, feature in enumerate(all_features):
plot_data = self.ice_dfs[feature]
unique_features = plot_data[feature].unique()
if len(unique_features) >= 10:
feature_continuous = True
else:
feature_continuous = False
unique_obs = plot_data.obs.unique()
ob_sample = np.random.choice(plot_data.obs.unique(),
size = min(len(unique_obs), plot_num), replace = False)
mean_line = plot_data\
.groupby(feature)\
.agg(y_pred = (y_var, 'mean'))\
.reset_index()\
.rename({'y_pred':y_var}, axis = 1)\
.assign(obs = -1,
mean_line = 1)
plot_sub_data = plot_data\
.loc[lambda x:x.obs.isin(ob_sample)]\
.assign(mean_line = 0)\
.append(mean_line, ignore_index = True)
# plot ICE
if self.trace:
print(f"Plotting for {feature}")
if nrows == 1:
self.ice_plot_helper(plot_data = plot_sub_data,
ax = axs[i], feature = feature,
y_var = y_var,
plot_close = feature_continuous)
else:
self.ice_plot_helper(plot_data = plot_sub_data,
ax = axs[int(i/ncols),i%ncols], feature = feature,
y_var = y_var,
plot_close = feature_continuous)
if nrows == 1:
handles, labels = axs[0].get_legend_handles_labels()
else:
handles, labels = axs[0,0].get_legend_handles_labels()
unique_labels, i = np.unique(labels, return_index = True)
unique_handles = np.array(handles)[i]
# fig.subplots_adjust(hspace=.5)
fig.legend(unique_handles, unique_labels,
loc='lower center', borderaxespad = 0.5, borderpad = 0.5)
plt.tight_layout()
if save_path is not None:
fig.savefig(save_path,
bbox_inches = 'tight',
pad_inches = 1)
def ice_plot_helper(self, plot_data, ax, feature, y_var,
plot_mean = True, plot_points = True, plot_close = True,
close_multiple = 0.5, axis_font_size = 10):
'''
Given the 'obs' column in @plot_data, plot the ICE plot onto @ax.
@param plot_data: Dataset to plot with 'obs', @feature,
'feature_distance,' and 'y_pred' columns
@param ax: Plot axis object
@param feature: Feature to make ICE plot of
@param plot_mean: whether to plot the mean line
@param plot_points: Whether to plot a scatterplot of original data
@param close_multiple: Multiple of standard deviation to be "close"
to original data point
@param axis_font_size: Font size of x- and y-labels
'''
unique_obs = plot_data.obs.unique()
unique_obs = unique_obs[unique_obs != -1]
close_radius = close_multiple*np.std(plot_data[feature])
# Plot observation lines
for ob in unique_obs:
d = plot_data.loc[lambda x:x.obs == ob]
if plot_close:
d_close = d.loc[lambda x:x.feature_distance <= close_radius]
ax.plot(feature, y_var,
label = "Full range",
alpha = 0.3, data = d, color = "grey", ls = "--")
ax.plot(feature, y_var,
label = fr'Close: $\pm {close_multiple} \sigma$',
alpha = 0.3, data = d_close, color = "black", ls = "-")
else:
ax.plot(feature, y_var,
label = fr'Close: $\pm {close_multiple} \sigma$',
alpha = 0.3, data = d, color = "black", ls = "-")
# Plot mean line
if plot_mean:
d = plot_data.loc[lambda x:x.obs == -1]
ax.plot(feature, y_var, label = "Mean line", alpha = 5,
data = d, color = "gold", ls = "-")
# Plot scatterplot of points
if plot_points:
point_data = plot_data\
.loc[lambda x:x.feature_distance == 0]\
ax.scatter(point_data[feature],
point_data[y_var],
color = 'green',
alpha = 0.5,
label = "Original data")
ax.set_xlabel(feature, fontsize=axis_font_size)
if self.model_type == 'binary':
ax.set_ylabel('Predicted Probability', fontsize=axis_font_size)
elif self.model_type == 'continuous':
ax.set_ylabel('Target', fontsize=axis_font_size)
else:
raise ValueError
return ax
def feature_hist(self, save_path = None, remove_zeros = True, ncols = 3, plot_num = 300):
'''
Plot all feature importance histograms in a grid
'''
if not self.fit_all:
raise Exception("Call `fit` method before trying to plot.")
nrows, num_plots = int(np.ceil(len(self.ice_dfs.keys())/ ncols)), len(self.ice_dfs.keys())
all_features = np.sort(list(self.ice_dfs.keys()))
if nrows == 1:
ncols = num_plots
fig, axs = plt.subplots(nrows = nrows, ncols = ncols,
figsize = (5*ncols,1*num_plots), sharey = True)
for i, feature in enumerate(all_features):
plot_data = self.ice_dfs[feature]\
.loc[:,['dydx']]\
.dropna(how = 'any')
if remove_zeros:
plot_data = plot_data\
.loc[lambda x:x.dydx != 0]
if nrows == 1:
axs[i].hist(plot_data['dydx'])
axs[i].set_xlabel(feature, fontsize=10)
else:
axs[int(i/3),i%3].hist(plot_data['dydx'])
axs[int(i/3),i%3].set_xlabel(feature, fontsize=10)
# fig.subplots_adjust(hspace=.5)
plt.tight_layout()
if save_path is not None:
fig.savefig(save_path,
bbox_inches = 'tight',
pad_inches = 1)
def feature_table(self):
fi_df = pd.DataFrame()
for feature in ice.ice_fis:
fi_df = fi_df\
.append(self.get_feature_impact(feature), ignore_index = True)
fi_df = fi_df.fillna(0)
return fi_df
def get_feature_impact(self, feature):
return self.ice_fis[feature]
def uniform_sample(self, df, feature, frac_sample):
'''
Uniformly sample across quantiles of feature to ensure not to leave out
portions of the dist of the feature.
@param df : Covariate matrix.
@param feature : Target covariate bin.
@examples
uniform_sample(df, 'Age')
'''
# Determine if categorical or continuous
num_obs = df.shape[0]
num_unique_feature_values = len(df[feature].unique())
if num_unique_feature_values > 10:
featureIsCategorical = False
else:
featureIsCategorical = True
if self.trace:
print(f"{feature} is categorical: {featureIsCategorical}")
# Categorical
if featureIsCategorical:
sample_df = df\
.groupby(feature)\
.apply(lambda x:x.sample(int(np.ceil(x.shape[0] * frac_sample))))\
.reset_index(drop = True)
elif not featureIsCategorical:
sample_df = df.copy()
sample_df['quantile'] = pd.qcut(sample_df[feature], q = 10, duplicates = 'drop')
sample_df = sample_df\
.groupby('quantile')\
.apply(lambda x:x.sample(int(np.ceil(x.shape[0] * frac_sample))))\
.reset_index(drop = True)\
.drop('quantile', axis = 1)
if self.trace:
print(f"Sample df has {sample_df.shape[0]} observations, {sample_df.shape[0]/num_obs}% of the observations in the original df.")
return sample_df
| 29.141243 | 131 | 0.656585 |
4ae74d58b77d8afd83615ad0346a8e2dfcf0bbb2
| 19,824 |
py
|
Python
|
neural_style_my_edits.py
|
spot92/neural-style-pt
|
af530888c14be348c65367257b6dbb6363c96276
|
[
"MIT"
] | 1 |
2020-12-30T22:22:23.000Z
|
2020-12-30T22:22:23.000Z
|
neural_style_my_edits.py
|
spot92/neural-style-pt
|
af530888c14be348c65367257b6dbb6363c96276
|
[
"MIT"
] | null | null | null |
neural_style_my_edits.py
|
spot92/neural-style-pt
|
af530888c14be348c65367257b6dbb6363c96276
|
[
"MIT"
] | null | null | null |
import os
import copy
import torch
import torch.nn as nn
import torch.optim as optim
import torchvision.transforms as transforms
#Changes made between short to long segments of ####
#Some things say Deleted:
from PIL import Image
from CaffeLoader import loadCaffemodel, ModelParallel
import argparse
parser = argparse.ArgumentParser()
# Basic options
parser.add_argument("-style_image", help="Style target image", default='examples/inputs/seated-nude.jpg')
parser.add_argument("-style_blend_weights", default=None)
parser.add_argument("-content_image", help="Content target image", default='examples/inputs/tubingen.jpg')
parser.add_argument("-image_size", help="Maximum height / width of generated image", type=int, default=512)
parser.add_argument("-gpu", help="Zero-indexed ID of the GPU to use; for CPU mode set -gpu = c", default=0)
# Optimization options
parser.add_argument("-content_weight", type=float, default=5e0)
parser.add_argument("-style_weight", type=float, default=1e2)
parser.add_argument("-normalize_weights", action='store_true')
parser.add_argument("-tv_weight", type=float, default=1e-3)
parser.add_argument("-num_iterations", type=int, default=1000)
parser.add_argument("-init", choices=['random', 'image'], default='random')
parser.add_argument("-init_image", default=None)
parser.add_argument("-optimizer", choices=['lbfgs', 'adam'], default='lbfgs')
parser.add_argument("-learning_rate", type=float, default=1e0)
parser.add_argument("-lbfgs_num_correction", type=int, default=100)
# Output options
parser.add_argument("-print_iter", type=int, default=50)
parser.add_argument("-save_iter", type=int, default=100)
parser.add_argument("-output_image", default='out.png')
# Other options
parser.add_argument("-style_scale", type=float, default=1.0)
parser.add_argument("-original_colors", type=int, choices=[0, 1], default=0)
parser.add_argument("-pooling", choices=['avg', 'max'], default='max')
parser.add_argument("-model_file", type=str, default='models/vgg19-d01eb7cb.pth')
parser.add_argument("-disable_check", action='store_true')
parser.add_argument("-backend", choices=['nn', 'cudnn', 'mkl', 'mkldnn', 'openmp', 'mkl,cudnn', 'cudnn,mkl'], default='nn')
parser.add_argument("-cudnn_autotune", action='store_true')
parser.add_argument("-seed", type=int, default=-1)
parser.add_argument("-content_layers", help="layers for content", default='relu4_2')
parser.add_argument("-style_layers", help="layers for style", default='relu1_1,relu2_1,relu3_1,relu4_1,relu5_1')
parser.add_argument("-multidevice_strategy", default='4,7,29')
params = parser.parse_args()
Image.MAX_IMAGE_PIXELS = 1000000000 # Support gigapixel images
def main():
dtype, multidevice, backward_device = setup_gpu()
cnn, layerList = loadCaffemodel(params.model_file, params.pooling, params.gpu, params.disable_check)
content_image = preprocess(params.content_image, params.image_size).type(dtype)
#####################################################
Ch = content_image.size(2) #literally no idea why its (2) and not [0]
Cw = content_image.size(3) #literally no idea why its (3) and not [1]
#################################################################
style_image_input = params.style_image.split(',')
style_image_list, ext = [], [".jpg", ".jpeg", ".png", ".tiff"]
for image in style_image_input:
if os.path.isdir(image):
images = (image + "/" + file for file in os.listdir(image)
if os.path.splitext(file)[1].lower() in ext)
style_image_list.extend(images)
else:
style_image_list.append(image)
style_images_caffe = []
for image in style_image_list:
#################################################
image_path = 'D:/Neural Style Python/' + image
print(image_path)
im_sizing = Image.open(image_path)
print(im_sizing)
Sh = im_sizing.size[0] #this one is the way I expect it to be, but the Ch is not
Sw = im_sizing.size[1] #this one is the way I expect it to be, but the Ch is not
style_size = 0
resizeStyle = 1
Cr = Cw / Ch
Sr = Sw / Sh
if Cr >= Sr:
if Sr >= 1:
style_size = Cw * params.style_scale
else:
style_size = params.style_scale * Cw * Sh /Sw
if style_size > Sw:
style_size = Sw
resizeStyle = 0
else:
if Sr >= 1:
style_size = params.style_scale * Ch * Sw /Sh
else:
style_size = Ch * params.style_scale
if style_size > Sh:
style_size = Sh
resizeStyle = 0
#############################################################
#Deleted: style_size = int(params.image_size * params.style_scale)
img_caffe = preprocess(image, style_size).type(dtype)
style_images_caffe.append(img_caffe)
if params.init_image != None:
image_size = (content_image.size(2), content_image.size(3))
init_image = preprocess(params.init_image, image_size).type(dtype)
# Handle style blending weights for multiple style inputs
style_blend_weights = []
if params.style_blend_weights == None:
# Style blending not specified, so use equal weighting
for i in style_image_list:
style_blend_weights.append(1.0)
for i, blend_weights in enumerate(style_blend_weights):
style_blend_weights[i] = int(style_blend_weights[i])
else:
style_blend_weights = params.style_blend_weights.split(',')
assert len(style_blend_weights) == len(style_image_list), \
"-style_blend_weights and -style_images must have the same number of elements!"
# Normalize the style blending weights so they sum to 1
style_blend_sum = 0
for i, blend_weights in enumerate(style_blend_weights):
style_blend_weights[i] = float(style_blend_weights[i])
style_blend_sum = float(style_blend_sum) + style_blend_weights[i]
for i, blend_weights in enumerate(style_blend_weights):
style_blend_weights[i] = float(style_blend_weights[i]) / float(style_blend_sum)
content_layers = params.content_layers.split(',')
style_layers = params.style_layers.split(',')
# Set up the network, inserting style and content loss modules
cnn = copy.deepcopy(cnn)
content_losses, style_losses, tv_losses = [], [], []
next_content_idx, next_style_idx = 1, 1
net = nn.Sequential()
c, r = 0, 0
if params.tv_weight > 0:
tv_mod = TVLoss(params.tv_weight).type(dtype)
net.add_module(str(len(net)), tv_mod)
tv_losses.append(tv_mod)
for i, layer in enumerate(list(cnn), 1):
if next_content_idx <= len(content_layers) or next_style_idx <= len(style_layers):
if isinstance(layer, nn.Conv2d):
net.add_module(str(len(net)), layer)
if layerList['C'][c] in content_layers:
print("Setting up content layer " + str(i) + ": " + str(layerList['C'][c]))
loss_module = ContentLoss(params.content_weight)
net.add_module(str(len(net)), loss_module)
content_losses.append(loss_module)
if layerList['C'][c] in style_layers:
print("Setting up style layer " + str(i) + ": " + str(layerList['C'][c]))
loss_module = StyleLoss(params.style_weight)
net.add_module(str(len(net)), loss_module)
style_losses.append(loss_module)
c+=1
if isinstance(layer, nn.ReLU):
net.add_module(str(len(net)), layer)
if layerList['R'][r] in content_layers:
print("Setting up content layer " + str(i) + ": " + str(layerList['R'][r]))
loss_module = ContentLoss(params.content_weight)
net.add_module(str(len(net)), loss_module)
content_losses.append(loss_module)
next_content_idx += 1
if layerList['R'][r] in style_layers:
print("Setting up style layer " + str(i) + ": " + str(layerList['R'][r]))
loss_module = StyleLoss(params.style_weight)
net.add_module(str(len(net)), loss_module)
style_losses.append(loss_module)
next_style_idx += 1
r+=1
if isinstance(layer, nn.MaxPool2d) or isinstance(layer, nn.AvgPool2d):
net.add_module(str(len(net)), layer)
if multidevice:
net = setup_multi_device(net)
# Capture content targets
for i in content_losses:
i.mode = 'capture'
print("Capturing content targets")
print_torch(net, multidevice)
net(content_image)
# Capture style targets
for i in content_losses:
i.mode = 'None'
for i, image in enumerate(style_images_caffe):
print("Capturing style target " + str(i+1))
for j in style_losses:
j.mode = 'capture'
j.blend_weight = style_blend_weights[i]
net(style_images_caffe[i])
# Set all loss modules to loss mode
for i in content_losses:
i.mode = 'loss'
for i in style_losses:
i.mode = 'loss'
# Maybe normalize content and style weights
if params.normalize_weights:
normalize_weights(content_losses, style_losses)
# Freeze the network in order to prevent
# unnecessary gradient calculations
for param in net.parameters():
param.requires_grad = False
# Initialize the image
if params.seed >= 0:
torch.manual_seed(params.seed)
torch.cuda.manual_seed_all(params.seed)
torch.backends.cudnn.deterministic=True
if params.init == 'random':
B, C, H, W = content_image.size()
img = torch.randn(C, H, W).mul(0.001).unsqueeze(0).type(dtype)
elif params.init == 'image':
if params.init_image != None:
img = init_image.clone()
else:
img = content_image.clone()
img = nn.Parameter(img)
def maybe_print(t, loss):
if params.print_iter > 0 and t % params.print_iter == 0:
print("Iteration " + str(t) + " / "+ str(params.num_iterations))
for i, loss_module in enumerate(content_losses):
print(" Content " + str(i+1) + " loss: " + str(loss_module.loss.item()))
for i, loss_module in enumerate(style_losses):
print(" Style " + str(i+1) + " loss: " + str(loss_module.loss.item()))
print(" Total loss: " + str(loss.item()))
def maybe_save(t):
should_save = params.save_iter > 0 and t % params.save_iter == 0
should_save = should_save or t == params.num_iterations
if should_save:
output_filename, file_extension = os.path.splitext(params.output_image)
if t == params.num_iterations:
filename = output_filename + str(file_extension)
else:
filename = str(output_filename) + "_" + str(t) + str(file_extension)
disp = deprocess(img.clone())
# Maybe perform postprocessing for color-independent style transfer
if params.original_colors == 1:
disp = original_colors(deprocess(content_image.clone()), disp)
disp.save(str(filename))
# Function to evaluate loss and gradient. We run the net forward and
# backward to get the gradient, and sum up losses from the loss modules.
# optim.lbfgs internally handles iteration and calls this function many
# times, so we manually count the number of iterations to handle printing
# and saving intermediate results.
num_calls = [0]
def feval():
num_calls[0] += 1
optimizer.zero_grad()
net(img)
loss = 0
for mod in content_losses:
loss += mod.loss.to(backward_device)
for mod in style_losses:
loss += mod.loss.to(backward_device)
if params.tv_weight > 0:
for mod in tv_losses:
loss += mod.loss.to(backward_device)
loss.backward()
maybe_save(num_calls[0])
maybe_print(num_calls[0], loss)
return loss
optimizer, loopVal = setup_optimizer(img)
while num_calls[0] <= loopVal:
optimizer.step(feval)
# Configure the optimizer
def setup_optimizer(img):
if params.optimizer == 'lbfgs':
print("Running optimization with L-BFGS")
optim_state = {
'max_iter': params.num_iterations,
'tolerance_change': -1,
'tolerance_grad': -1,
}
if params.lbfgs_num_correction != 100:
optim_state['history_size'] = params.lbfgs_num_correction
optimizer = optim.LBFGS([img], **optim_state)
loopVal = 1
elif params.optimizer == 'adam':
print("Running optimization with ADAM")
optimizer = optim.Adam([img], lr = params.learning_rate)
loopVal = params.num_iterations - 1
return optimizer, loopVal
def setup_gpu():
def setup_cuda():
if 'cudnn' in params.backend:
torch.backends.cudnn.enabled = True
if params.cudnn_autotune:
torch.backends.cudnn.benchmark = True
else:
torch.backends.cudnn.enabled = False
def setup_cpu():
if 'mkl' in params.backend and 'mkldnn' not in params.backend:
torch.backends.mkl.enabled = True
elif 'mkldnn' in params.backend:
raise ValueError("MKL-DNN is not supported yet.")
elif 'openmp' in params.backend:
torch.backends.openmp.enabled = True
multidevice = False
if "," in str(params.gpu):
devices = params.gpu.split(',')
multidevice = True
if 'c' in str(devices[0]).lower():
backward_device = "cpu"
setup_cuda(), setup_cpu()
else:
backward_device = "cuda:" + devices[0]
setup_cuda()
dtype = torch.FloatTensor
elif "c" not in str(params.gpu).lower():
setup_cuda()
dtype, backward_device = torch.cuda.FloatTensor, "cuda:" + str(params.gpu)
else:
setup_cpu()
dtype, backward_device = torch.FloatTensor, "cpu"
return dtype, multidevice, backward_device
def setup_multi_device(net):
assert len(params.gpu.split(',')) - 1 == len(params.multidevice_strategy.split(',')), \
"The number of -multidevice_strategy layer indices minus 1, must be equal to the number of -gpu devices."
new_net = ModelParallel(net, params.gpu, params.multidevice_strategy)
return new_net
# Preprocess an image before passing it to a model.
# We need to rescale from [0, 1] to [0, 255], convert from RGB to BGR,
# and subtract the mean pixel.
def preprocess(image_name, image_size):
image = Image.open(image_name).convert('RGB')
if type(image_size) is not tuple:
image_size = tuple([int((float(image_size) / max(image.size))*x) for x in (image.height, image.width)])
Loader = transforms.Compose([transforms.Resize(image_size), transforms.ToTensor()])
rgb2bgr = transforms.Compose([transforms.Lambda(lambda x: x[torch.LongTensor([2,1,0])])])
Normalize = transforms.Compose([transforms.Normalize(mean=[103.939, 116.779, 123.68], std=[1,1,1])])
tensor = Normalize(rgb2bgr(Loader(image) * 256)).unsqueeze(0)
return tensor
# Undo the above preprocessing.
def deprocess(output_tensor):
Normalize = transforms.Compose([transforms.Normalize(mean=[-103.939, -116.779, -123.68], std=[1,1,1])])
bgr2rgb = transforms.Compose([transforms.Lambda(lambda x: x[torch.LongTensor([2,1,0])])])
output_tensor = bgr2rgb(Normalize(output_tensor.squeeze(0).cpu())) / 256
output_tensor.clamp_(0, 1)
Image2PIL = transforms.ToPILImage()
image = Image2PIL(output_tensor.cpu())
return image
# Combine the Y channel of the generated image and the UV/CbCr channels of the
# content image to perform color-independent style transfer.
def original_colors(content, generated):
content_channels = list(content.convert('YCbCr').split())
generated_channels = list(generated.convert('YCbCr').split())
content_channels[0] = generated_channels[0]
return Image.merge('YCbCr', content_channels).convert('RGB')
# Print like Lua/Torch7
def print_torch(net, multidevice):
if multidevice:
return
simplelist = ""
for i, layer in enumerate(net, 1):
simplelist = simplelist + "(" + str(i) + ") -> "
print("nn.Sequential ( \n [input -> " + simplelist + "output]")
def strip(x):
return str(x).replace(", ",',').replace("(",'').replace(")",'') + ", "
def n():
return " (" + str(i) + "): " + "nn." + str(l).split("(", 1)[0]
for i, l in enumerate(net, 1):
if "2d" in str(l):
ks, st, pd = strip(l.kernel_size), strip(l.stride), strip(l.padding)
if "Conv2d" in str(l):
ch = str(l.in_channels) + " -> " + str(l.out_channels)
print(n() + "(" + ch + ", " + (ks).replace(",",'x', 1) + st + pd.replace(", ",')'))
elif "Pool2d" in str(l):
st = st.replace(" ",' ') + st.replace(", ",')')
print(n() + "(" + ((ks).replace(",",'x' + ks, 1) + st).replace(", ",','))
else:
print(n())
print(")")
# Divide weights by channel size
def normalize_weights(content_losses, style_losses):
for n, i in enumerate(content_losses):
i.strength = i.strength / max(i.target.size())
for n, i in enumerate(style_losses):
i.strength = i.strength / max(i.target.size())
# Define an nn Module to compute content loss
class ContentLoss(nn.Module):
def __init__(self, strength):
super(ContentLoss, self).__init__()
self.strength = strength
self.crit = nn.MSELoss()
self.mode = 'None'
def forward(self, input):
if self.mode == 'loss':
self.loss = self.crit(input, self.target) * self.strength
elif self.mode == 'capture':
self.target = input.detach()
return input
class GramMatrix(nn.Module):
def forward(self, input):
B, C, H, W = input.size()
x_flat = input.view(C, H * W)
return torch.mm(x_flat, x_flat.t())
# Define an nn Module to compute style loss
class StyleLoss(nn.Module):
def __init__(self, strength):
super(StyleLoss, self).__init__()
self.target = torch.Tensor()
self.strength = strength
self.gram = GramMatrix()
self.crit = nn.MSELoss()
self.mode = 'None'
self.blend_weight = None
def forward(self, input):
self.G = self.gram(input)
self.G = self.G.div(input.nelement())
if self.mode == 'capture':
if self.blend_weight == None:
self.target = self.G.detach()
elif self.target.nelement() == 0:
self.target = self.G.detach().mul(self.blend_weight)
else:
self.target = self.target.add(self.blend_weight, self.G.detach())
elif self.mode == 'loss':
self.loss = self.strength * self.crit(self.G, self.target)
return input
class TVLoss(nn.Module):
def __init__(self, strength):
super(TVLoss, self).__init__()
self.strength = strength
def forward(self, input):
self.x_diff = input[:,:,1:,:] - input[:,:,:-1,:]
self.y_diff = input[:,:,:,1:] - input[:,:,:,:-1]
self.loss = self.strength * (torch.sum(torch.abs(self.x_diff)) + torch.sum(torch.abs(self.y_diff)))
return input
if __name__ == "__main__":
main()
| 38.870588 | 123 | 0.61592 |
157c236aa413d0b53bd35fb999fb02fa305306eb
| 23,273 |
py
|
Python
|
core/platform/auth/firebase_auth_services.py
|
YBCS/oppia
|
f74b606e8511cd4296b3c99aad37e53b66cca196
|
[
"Apache-2.0"
] | null | null | null |
core/platform/auth/firebase_auth_services.py
|
YBCS/oppia
|
f74b606e8511cd4296b3c99aad37e53b66cca196
|
[
"Apache-2.0"
] | 4 |
2022-02-12T14:02:05.000Z
|
2022-03-27T18:08:48.000Z
|
core/platform/auth/firebase_auth_services.py
|
YBCS/oppia
|
f74b606e8511cd4296b3c99aad37e53b66cca196
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
#
# Copyright 2020 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Service layer for handling user-authentication with Firebase.
Oppia depends on OpenID Connect 1.0 to handle user authentication. We use
[Firebase authentication](https://firebase.google.com/docs/auth) to do the
heavy-lifting, especially for securely storing user credentials and associating
users to their identity providers. This helps us minimize the contact we make
with private information.
Terminology:
OpenID Connect 1.0 (OIDC):
A simple identity layer on top of the OAuth 2.0 protocol. It is a
specification (i.e. a strict set of algorithms, data structures, and
rules) that defines how two parties must share data about a user in
a secure way on that user's behalf.
OAuth 2.0 (OAuth):
The industry-standard protocol for authorization. It enables a
third-party application to obtain limited access to an HTTP service on
behalf of a user.
Claim:
A piece of information about a user (name, address, phone number, etc.)
that has been encrypted and digitally signed.
JSON Web Token (JWT):
A compact and URL-safe protocol primarily designed to send Claims
between two parties. Claims are organized into JSON objects that map
"Claim Names" to "Claim Values".
Identity provider:
An entity that creates, maintains, and manages identity information and
provides authentication services. Such services rely on JWTs to send
identity information. Examples of identity providers include: Google,
Facebook, Email verification links, and Text message SMS codes.
Subject Identifier:
A Claim that can uniquely identify a user. It is locally unique and
never reassigned with respect to the provider who issued it. The Claim's
name is 'sub'.
Example values: `24400320` or `AItOawmwtWwcT0k51BayewNvutrJUqsvl6qs7A4`.
"""
from __future__ import annotations
import logging
from core import feconf
from core import python_utils
from core.constants import constants
from core.domain import auth_domain
from core.platform import models
import firebase_admin
from firebase_admin import auth as firebase_auth
from firebase_admin import exceptions as firebase_exceptions
from typing import List, Optional
import webapp2
MYPY = False
if MYPY: # pragma: no cover
from mypy_imports import auth_models
auth_models, user_models = (
models.Registry.import_models([models.NAMES.auth, models.NAMES.user]))
transaction_services = models.Registry.import_transaction_services()
def establish_firebase_connection() -> None:
"""Establishes the connection to Firebase needed by the rest of the SDK.
All Firebase operations require an "app", the abstraction used for a
Firebase server connection. The initialize_app() function raises an error
when it's called more than once, however, so we make this function
idempotent by trying to "get" the app first.
Returns:
firebase_admin.App. The App being by the Firebase SDK.
Raises:
Exception. The Firebase app has a genuine problem.
"""
try:
firebase_admin.get_app()
except ValueError as error:
if 'initialize_app' in str(error):
firebase_admin.initialize_app(
options={'projectId': feconf.OPPIA_PROJECT_ID})
else:
raise
def establish_auth_session(
request: webapp2.Request,
response: webapp2.Response
) -> None:
"""Sets login cookies to maintain a user's sign-in session.
Args:
request: webapp2.Request. The request with the authorization to begin a
new session.
response: webapp2.Response. The response to establish the new session
upon.
"""
claims = _get_auth_claims_from_session_cookie(_get_session_cookie(request))
# If the request already contains a valid session cookie, then there's no
# action necessary; the session is already established.
if claims is not None:
return
fresh_cookie = firebase_auth.create_session_cookie(
_get_id_token(request), feconf.FIREBASE_SESSION_COOKIE_MAX_AGE)
response.set_cookie(
constants.FIREBASE_AUTH_SESSION_COOKIE_NAME,
value=fresh_cookie,
max_age=feconf.FIREBASE_SESSION_COOKIE_MAX_AGE,
overwrite=True,
# Toggles https vs http. The production server uses https, but the local
# developement server uses http.
secure=(not constants.EMULATOR_MODE),
# Using the HttpOnly flag when generating a cookie helps mitigate the
# risk of client side script accessing the protected cookie (if the
# browser supports it).
# Learn more: https://owasp.org/www-community/HttpOnly.
httponly=True)
def destroy_auth_session(response: webapp2.Response) -> None:
"""Clears login cookies from the given response headers.
Args:
response: webapp2.Response. Response to clear the cookies from.
"""
response.delete_cookie(constants.FIREBASE_AUTH_SESSION_COOKIE_NAME)
def get_auth_claims_from_request(
request: webapp2.Request
) -> Optional[auth_domain.AuthClaims]:
"""Authenticates the request and returns claims about its authorizer.
Args:
request: webapp2.Request. The HTTP request to authenticate.
Returns:
AuthClaims|None. Claims about the currently signed in user. If no user
is signed in, then returns None.
Raises:
InvalidAuthSessionError. The request contains an invalid session.
StaleAuthSessionError. The cookie has lost its authority.
"""
return _get_auth_claims_from_session_cookie(_get_session_cookie(request))
def mark_user_for_deletion(user_id: str) -> None:
"""Marks the user, and all of their auth associations, as deleted.
This function also disables the user's Firebase account so that they cannot
be used to sign in.
Args:
user_id: str. The unique ID of the user whose associations should be
deleted.
"""
# NOTE: We use get_multi(include_deleted=True) because get() returns None
# for models with deleted=True, but we need to make changes to those models
# when managing deletion.
(assoc_by_user_id_model,) = auth_models.UserAuthDetailsModel.get_multi(
[user_id], include_deleted=True)
if assoc_by_user_id_model is not None:
assoc_by_user_id_model.deleted = True
assoc_by_user_id_model.update_timestamps()
assoc_by_user_id_model.put()
assoc_by_auth_id_model = (
auth_models.UserIdByFirebaseAuthIdModel.get_by_user_id(user_id)
if assoc_by_user_id_model is None else
# NOTE: We use get_multi(include_deleted=True) because get() returns
# None for models with deleted=True, but we need to make changes to
# those models when managing deletion.
auth_models.UserIdByFirebaseAuthIdModel.get_multi(
[assoc_by_user_id_model.firebase_auth_id], include_deleted=True)[0])
if assoc_by_auth_id_model is not None:
assoc_by_auth_id_model.deleted = True
assoc_by_auth_id_model.update_timestamps()
assoc_by_auth_id_model.put()
else:
logging.error(
'[WIPEOUT] User with user_id=%s has no Firebase account' % user_id)
return
try:
firebase_auth.update_user(assoc_by_auth_id_model.id, disabled=True)
except (firebase_exceptions.FirebaseError, ValueError):
# NOTE: logging.exception appends the stack trace automatically. The
# errors are not re-raised because wipeout_services, the user of this
# function, does not use exceptions to keep track of failures. It uses
# the verify_external_auth_associations_are_deleted() function instead.
logging.exception(
'[WIPEOUT] Failed to disable Firebase account! Stack trace:')
def delete_external_auth_associations(user_id: str) -> None:
"""Deletes all associations that refer to the user outside of Oppia.
Args:
user_id: str. The unique ID of the user whose associations should be
deleted.
"""
auth_id = get_auth_id_from_user_id(user_id, include_deleted=True)
if auth_id is None:
return
try:
firebase_auth.delete_user(auth_id)
except firebase_auth.UserNotFoundError:
logging.exception('[WIPEOUT] Firebase account already deleted')
except (firebase_exceptions.FirebaseError, ValueError):
# NOTE: logging.exception appends the stack trace automatically. The
# errors are not re-raised because wipeout_services, the user of this
# function, does not use exceptions to keep track of failures. It uses
# the verify_external_auth_associations_are_deleted() function instead.
logging.exception('[WIPEOUT] Firebase Admin SDK failed! Stack trace:')
def verify_external_auth_associations_are_deleted(user_id: str) -> bool:
"""Returns true if and only if we have successfully verified that all
external associations have been deleted.
Args:
user_id: str. The unique ID of the user whose associations should be
checked.
Returns:
bool. True if and only if we have successfully verified that all
external associations have been deleted.
"""
auth_id = get_auth_id_from_user_id(user_id, include_deleted=True)
if auth_id is None:
return True
try:
result = firebase_auth.get_users([firebase_auth.UidIdentifier(auth_id)])
return len(result.users) == 0
except (firebase_exceptions.FirebaseError, ValueError):
# NOTE: logging.exception appends the stack trace automatically. The
# errors are not re-raised because wipeout_services, the user of this
# function, will keep retrying the other "delete" family of functions
# until this returns True (in 12h intervals).
logging.exception('[WIPEOUT] Firebase Admin SDK failed! Stack trace:')
return False
def get_auth_id_from_user_id(
user_id: str, include_deleted: bool = False
) -> Optional[str]:
"""Returns the auth ID associated with the given user ID.
Args:
user_id: str. The user ID.
include_deleted: bool. Whether to return the ID of models marked for
deletion.
Returns:
str|None. The auth ID associated with the given user ID, or None if no
association exists.
"""
(assoc_by_user_id_model,) = auth_models.UserAuthDetailsModel.get_multi(
[user_id], include_deleted=include_deleted)
return (
None if assoc_by_user_id_model is None else
assoc_by_user_id_model.firebase_auth_id)
def get_multi_auth_ids_from_user_ids(
user_ids: List[str]
) -> List[Optional[str]]:
"""Returns the auth IDs associated with the given user IDs.
Args:
user_ids: list(str). The user IDs.
Returns:
list(str|None). The auth IDs associated with each of the given user IDs,
or None for associations which don't exist.
"""
return [
None if model is None else model.firebase_auth_id
for model in auth_models.UserAuthDetailsModel.get_multi(user_ids)
]
def get_user_id_from_auth_id(
auth_id: str, include_deleted: bool = False
) -> Optional[str]:
"""Returns the user ID associated with the given auth ID.
Args:
auth_id: str. The auth ID.
include_deleted: bool. Whether to return the ID of models marked for
deletion.
Returns:
str|None. The user ID associated with the given auth ID, or None if no
association exists.
"""
(assoc_by_auth_id_model,) = (
auth_models.UserIdByFirebaseAuthIdModel.get_multi(
[auth_id], include_deleted=include_deleted))
return (
None if assoc_by_auth_id_model is None else
assoc_by_auth_id_model.user_id)
def get_multi_user_ids_from_auth_ids(
auth_ids: List[str]
) -> List[Optional[str]]:
"""Returns the user IDs associated with the given auth IDs.
Args:
auth_ids: list(str). The auth IDs.
Returns:
list(str|None). The user IDs associated with each of the given auth IDs,
or None for associations which don't exist.
"""
return [
None if model is None else model.user_id
for model in auth_models.UserIdByFirebaseAuthIdModel.get_multi(auth_ids)
]
def associate_auth_id_with_user_id(
auth_id_user_id_pair: auth_domain.AuthIdUserIdPair
) -> None:
"""Commits the association between auth ID and user ID.
Args:
auth_id_user_id_pair: auth_domain.AuthIdUserIdPair. The association to
commit.
Raises:
Exception. The IDs are already associated with a value.
"""
auth_id, user_id = auth_id_user_id_pair
user_id_collision = get_user_id_from_auth_id(auth_id, include_deleted=True)
if user_id_collision is not None:
raise Exception('auth_id=%r is already associated with user_id=%r' % (
auth_id, user_id_collision))
auth_id_collision = get_auth_id_from_user_id(user_id, include_deleted=True)
if auth_id_collision is not None:
raise Exception('user_id=%r is already associated with auth_id=%r' % (
user_id, auth_id_collision))
# A new {auth_id: user_id} mapping needs to be created. We know the model
# doesn't exist because get_auth_id_from_user_id returned None, even with
# include_deleted=True.
assoc_by_auth_id_model = (
auth_models.UserIdByFirebaseAuthIdModel(id=auth_id, user_id=user_id))
assoc_by_auth_id_model.update_timestamps()
assoc_by_auth_id_model.put()
# The {user_id: auth_id} mapping needs to be created, but the model used to
# store the relationship might already exist because other services use it
# as well (e.g. user_services uses UserAuthDetailsModel.parent_user_id). In
# such situations, the return value of get_auth_id_from_user_id would be
# None, so that isn't strong enough to determine whether we need to create a
# new model rather than update an existing one.
#
# NOTE: We use get_multi(include_deleted=True) because get() returns None
# for models with deleted=True, but we need to make changes to those models
# when managing deletion.
(assoc_by_user_id_model,) = auth_models.UserAuthDetailsModel.get_multi(
[user_id], include_deleted=True)
if (assoc_by_user_id_model is None or
assoc_by_user_id_model.firebase_auth_id is None):
assoc_by_user_id_model = auth_models.UserAuthDetailsModel(
id=user_id, firebase_auth_id=auth_id)
assoc_by_user_id_model.update_timestamps()
assoc_by_user_id_model.put()
def associate_multi_auth_ids_with_user_ids(
auth_id_user_id_pairs: List[auth_domain.AuthIdUserIdPair]
) -> None:
"""Commits the associations between auth IDs and user IDs.
Args:
auth_id_user_id_pairs: list(auth_domain.AuthIdUserIdPair). The
associations to commit.
Raises:
Exception. One or more auth associations already exist.
"""
# Turn list(pair) to pair(list): https://stackoverflow.com/a/7558990/4859885
auth_ids, user_ids = python_utils.ZIP(*auth_id_user_id_pairs)
user_id_collisions = get_multi_user_ids_from_auth_ids(auth_ids)
if any(user_id is not None for user_id in user_id_collisions):
user_id_collisions_text = ', '.join(
'{auth_id=%r: user_id=%r}' % (auth_id, user_id)
for auth_id, user_id in python_utils.ZIP(
auth_ids, user_id_collisions)
if user_id is not None)
raise Exception('already associated: %s' % user_id_collisions_text)
auth_id_collisions = get_multi_auth_ids_from_user_ids(user_ids)
if any(auth_id is not None for auth_id in auth_id_collisions):
auth_id_collisions_text = ', '.join(
'{user_id=%r: auth_id=%r}' % (user_id, auth_id)
for user_id, auth_id in python_utils.ZIP(
user_ids, auth_id_collisions)
if auth_id is not None)
raise Exception('already associated: %s' % auth_id_collisions_text)
# A new {auth_id: user_id} mapping needs to be created. We know the model
# doesn't exist because get_auth_id_from_user_id returned None.
assoc_by_auth_id_models = [
auth_models.UserIdByFirebaseAuthIdModel(id=auth_id, user_id=user_id)
for auth_id, user_id in python_utils.ZIP(auth_ids, user_ids)
]
auth_models.UserIdByFirebaseAuthIdModel.update_timestamps_multi(
assoc_by_auth_id_models)
auth_models.UserIdByFirebaseAuthIdModel.put_multi(assoc_by_auth_id_models)
# The {user_id: auth_id} mapping needs to be created, but the model used to
# store the relationship might already exist because other services use it
# as well (e.g. user_services uses UserAuthDetailsModel.parent_user_id). In
# such situations, the return value of get_multi_auth_ids_from_user_ids
# would be None, so that isn't strong enough to determine whether we need to
# create a new model rather than update an existing one.
assoc_by_user_id_models = [
auth_models.UserAuthDetailsModel(id=user_id, firebase_auth_id=auth_id)
for auth_id, user_id, assoc_by_user_id_model in python_utils.ZIP(
auth_ids, user_ids,
auth_models.UserAuthDetailsModel.get_multi(user_ids))
if (assoc_by_user_id_model is None or
assoc_by_user_id_model.firebase_auth_id is None)
]
if assoc_by_user_id_models:
auth_models.UserAuthDetailsModel.update_timestamps_multi(
assoc_by_user_id_models)
auth_models.UserAuthDetailsModel.put_multi(assoc_by_user_id_models)
def grant_super_admin_privileges(user_id: str) -> None:
"""Grants the user super admin privileges.
Args:
user_id: str. The Oppia user ID to promote to super admin.
"""
auth_id = get_auth_id_from_user_id(user_id)
if auth_id is None:
raise ValueError('user_id=%s has no Firebase account' % user_id)
custom_claims = '{"role":"%s"}' % feconf.FIREBASE_ROLE_SUPER_ADMIN
firebase_auth.set_custom_user_claims(auth_id, custom_claims)
# NOTE: Revoke session cookies and ID tokens of the user so they are forced
# to log back in to obtain their updated privileges.
firebase_auth.revoke_refresh_tokens(auth_id)
def revoke_super_admin_privileges(user_id: str) -> None:
"""Revokes the user's super admin privileges.
Args:
user_id: str. The Oppia user ID to revoke privileges from.
"""
auth_id = get_auth_id_from_user_id(user_id)
if auth_id is None:
raise ValueError('user_id=%s has no Firebase account' % user_id)
firebase_auth.set_custom_user_claims(auth_id, None)
# NOTE: Revoke session cookies and ID tokens of the user so they are forced
# to log back in to obtain their updated privileges.
firebase_auth.revoke_refresh_tokens(auth_id)
def _get_session_cookie(request: webapp2.Request) -> Optional[str]:
"""Returns the session cookie authorizing the signed in user, if present.
Args:
request: webapp2.Request. The HTTP request to inspect.
Returns:
str|None. Value of the session cookie authorizing the signed in user, if
present, otherwise None.
"""
return request.cookies.get(constants.FIREBASE_AUTH_SESSION_COOKIE_NAME)
def _get_id_token(request: webapp2.Request) -> Optional[str]:
"""Returns the ID token authorizing a user, or None if missing.
Oppia uses the OAuth 2.0's Bearer authentication scheme to send ID Tokens.
Bearer authentication (a.k.a. token authentication) is an HTTP
authentication scheme based on "bearer tokens", an encrypted JWT generated
by a trusted identity provider in response to login requests.
The name "Bearer authentication" can be understood as: "give access to the
bearer of this token." These tokens _must_ be sent in the `Authorization`
header of HTTP requests, and _must_ have the format: `Bearer <token>`.
Learn more about:
HTTP authentication schemes:
https://developer.mozilla.org/en-US/docs/Web/HTTP/Authentication
OAuth 2.0 Bearer authentication scheme:
https://oauth.net/2/bearer-tokens/
OpenID Connect 1.0 ID Tokens:
https://openid.net/specs/openid-connect-core-1_0.html#IDToken
Args:
request: webapp2.Request. The HTTP request to inspect.
Returns:
str|None. The ID Token of the request, if present, otherwise None.
"""
scheme, _, token = request.headers.get('Authorization', '').partition(' ')
return token if scheme == 'Bearer' else None
def _get_auth_claims_from_session_cookie(
cookie: Optional[str]
) -> Optional[auth_domain.AuthClaims]:
"""Returns claims from the session cookie, or None if invalid.
Args:
cookie: str|None. The session cookie to extract claims from.
Returns:
AuthClaims|None. The claims from the session cookie, if available.
Otherwise returns None.
Raises:
InvalidAuthSessionError. The cookie has an invalid value.
StaleAuthSessionError. The cookie has lost its authority.
"""
# It's OK for a session cookie to be None or empty, it just means that the
# request hasn't been authenticated.
if not cookie:
return None
try:
claims = firebase_auth.verify_session_cookie(cookie, check_revoked=True)
except firebase_auth.ExpiredSessionCookieError:
raise auth_domain.StaleAuthSessionError('session has expired')
except firebase_auth.RevokedSessionCookieError:
raise auth_domain.StaleAuthSessionError('session has been revoked')
except (firebase_exceptions.FirebaseError, ValueError) as error:
raise auth_domain.InvalidAuthSessionError('session invalid: %s' % error)
else:
return _create_auth_claims(claims)
def _create_auth_claims(
firebase_claims: auth_domain.AuthClaimsDict
) -> auth_domain.AuthClaims:
"""Returns a new AuthClaims domain object from Firebase claims.
Args:
firebase_claims: dict(str: *). The raw claims returned by the Firebase
SDK.
Returns:
AuthClaims. Oppia's representation of auth claims.
"""
auth_id = firebase_claims['sub']
email = firebase_claims.get('email')
role_is_super_admin = (
email == feconf.ADMIN_EMAIL_ADDRESS or
firebase_claims.get('role') == feconf.FIREBASE_ROLE_SUPER_ADMIN)
return auth_domain.AuthClaims(
auth_id, email, role_is_super_admin=role_is_super_admin)
| 39.579932 | 80 | 0.712542 |
dc12ba92b3979d31738e26b377869a6506325a84
| 11,315 |
py
|
Python
|
src/sagemaker/job.py
|
eitansela/sagemaker-python-sdk
|
aa54102b5113b1d39bbbd4d9d341775f84641681
|
[
"Apache-2.0"
] | 1 |
2021-07-22T00:23:51.000Z
|
2021-07-22T00:23:51.000Z
|
src/sagemaker/job.py
|
eitansela/sagemaker-python-sdk
|
aa54102b5113b1d39bbbd4d9d341775f84641681
|
[
"Apache-2.0"
] | 24 |
2021-05-18T07:10:27.000Z
|
2021-05-28T13:36:51.000Z
|
src/sagemaker/job.py
|
eitansela/sagemaker-python-sdk
|
aa54102b5113b1d39bbbd4d9d341775f84641681
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2017-2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
"""Placeholder docstring"""
from __future__ import absolute_import
from abc import abstractmethod
from six import string_types
from sagemaker.inputs import FileSystemInput, TrainingInput
from sagemaker.local import file_input
class _Job(object):
"""Handle creating, starting and waiting for Amazon SageMaker jobs to finish.
This class shouldn't be directly instantiated.
Subclasses must define a way to create, start and wait for an Amazon
SageMaker job.
"""
def __init__(self, sagemaker_session, job_name):
"""Placeholder docstring"""
self.sagemaker_session = sagemaker_session
self.job_name = job_name
@abstractmethod
def start_new(self, estimator, inputs):
"""Create a new Amazon SageMaker job from the estimator.
Args:
estimator (sagemaker.estimator.EstimatorBase): Estimator object
created by the user.
inputs (str): Parameters used when called
:meth:`~sagemaker.estimator.EstimatorBase.fit`.
Returns:
sagemaker.job: Constructed object that captures all information
about the started job.
"""
@abstractmethod
def wait(self):
"""Wait for the Amazon SageMaker job to finish."""
@abstractmethod
def describe(self):
"""Describe the job."""
@abstractmethod
def stop(self):
"""Stop the job."""
@staticmethod
def _load_config(inputs, estimator, expand_role=True, validate_uri=True):
"""Placeholder docstring"""
input_config = _Job._format_inputs_to_input_config(inputs, validate_uri)
role = (
estimator.sagemaker_session.expand_role(estimator.role)
if expand_role
else estimator.role
)
output_config = _Job._prepare_output_config(estimator.output_path, estimator.output_kms_key)
resource_config = _Job._prepare_resource_config(
estimator.instance_count,
estimator.instance_type,
estimator.volume_size,
estimator.volume_kms_key,
)
stop_condition = _Job._prepare_stop_condition(estimator.max_run, estimator.max_wait)
vpc_config = estimator.get_vpc_config()
model_channel = _Job._prepare_channel(
input_config,
estimator.model_uri,
estimator.model_channel_name,
validate_uri,
content_type="application/x-sagemaker-model",
input_mode="File",
)
if model_channel:
input_config = [] if input_config is None else input_config
input_config.append(model_channel)
if estimator.enable_network_isolation():
code_channel = _Job._prepare_channel(
input_config, estimator.code_uri, estimator.code_channel_name, validate_uri
)
if code_channel:
input_config = [] if input_config is None else input_config
input_config.append(code_channel)
return {
"input_config": input_config,
"role": role,
"output_config": output_config,
"resource_config": resource_config,
"stop_condition": stop_condition,
"vpc_config": vpc_config,
}
@staticmethod
def _format_inputs_to_input_config(inputs, validate_uri=True):
"""Placeholder docstring"""
if inputs is None:
return None
# Deferred import due to circular dependency
from sagemaker.amazon.amazon_estimator import RecordSet
from sagemaker.amazon.amazon_estimator import FileSystemRecordSet
if isinstance(inputs, (RecordSet, FileSystemRecordSet)):
inputs = inputs.data_channel()
input_dict = {}
if isinstance(inputs, string_types):
input_dict["training"] = _Job._format_string_uri_input(inputs, validate_uri)
elif isinstance(inputs, TrainingInput):
input_dict["training"] = inputs
elif isinstance(inputs, file_input):
input_dict["training"] = inputs
elif isinstance(inputs, dict):
for k, v in inputs.items():
input_dict[k] = _Job._format_string_uri_input(v, validate_uri)
elif isinstance(inputs, list):
input_dict = _Job._format_record_set_list_input(inputs)
elif isinstance(inputs, FileSystemInput):
input_dict["training"] = inputs
else:
msg = (
"Cannot format input {}. Expecting one of str, dict, TrainingInput or "
"FileSystemInput"
)
raise ValueError(msg.format(inputs))
channels = [
_Job._convert_input_to_channel(name, input) for name, input in input_dict.items()
]
return channels
@staticmethod
def _convert_input_to_channel(channel_name, channel_s3_input):
"""Placeholder docstring"""
channel_config = channel_s3_input.config.copy()
channel_config["ChannelName"] = channel_name
return channel_config
@staticmethod
def _format_string_uri_input(
uri_input,
validate_uri=True,
content_type=None,
input_mode=None,
compression=None,
target_attribute_name=None,
):
"""Placeholder docstring"""
if isinstance(uri_input, str) and validate_uri and uri_input.startswith("s3://"):
s3_input_result = TrainingInput(
uri_input,
content_type=content_type,
input_mode=input_mode,
compression=compression,
target_attribute_name=target_attribute_name,
)
return s3_input_result
if isinstance(uri_input, str) and validate_uri and uri_input.startswith("file://"):
return file_input(uri_input)
if isinstance(uri_input, str) and validate_uri:
raise ValueError(
'URI input {} must be a valid S3 or FILE URI: must start with "s3://" or '
'"file://"'.format(uri_input)
)
if isinstance(uri_input, str):
s3_input_result = TrainingInput(
uri_input,
content_type=content_type,
input_mode=input_mode,
compression=compression,
target_attribute_name=target_attribute_name,
)
return s3_input_result
if isinstance(uri_input, (TrainingInput, file_input, FileSystemInput)):
return uri_input
raise ValueError(
"Cannot format input {}. Expecting one of str, TrainingInput, file_input or "
"FileSystemInput".format(uri_input)
)
@staticmethod
def _prepare_channel(
input_config,
channel_uri=None,
channel_name=None,
validate_uri=True,
content_type=None,
input_mode=None,
):
"""Placeholder docstring"""
if not channel_uri:
return None
if not channel_name:
raise ValueError(
"Expected a channel name if a channel URI {} is specified".format(channel_uri)
)
if input_config:
for existing_channel in input_config:
if existing_channel["ChannelName"] == channel_name:
raise ValueError("Duplicate channel {} not allowed.".format(channel_name))
channel_input = _Job._format_string_uri_input(
channel_uri, validate_uri, content_type, input_mode
)
channel = _Job._convert_input_to_channel(channel_name, channel_input)
return channel
@staticmethod
def _format_model_uri_input(model_uri, validate_uri=True):
"""Placeholder docstring"""
if isinstance(model_uri, string_types) and validate_uri and model_uri.startswith("s3://"):
return TrainingInput(
model_uri,
input_mode="File",
distribution="FullyReplicated",
content_type="application/x-sagemaker-model",
)
if isinstance(model_uri, string_types) and validate_uri and model_uri.startswith("file://"):
return file_input(model_uri)
if isinstance(model_uri, string_types) and validate_uri:
raise ValueError(
'Model URI must be a valid S3 or FILE URI: must start with "s3://" or ' '"file://'
)
if isinstance(model_uri, string_types):
return TrainingInput(
model_uri,
input_mode="File",
distribution="FullyReplicated",
content_type="application/x-sagemaker-model",
)
raise ValueError("Cannot format model URI {}. Expecting str".format(model_uri))
@staticmethod
def _format_record_set_list_input(inputs):
"""Placeholder docstring"""
# Deferred import due to circular dependency
from sagemaker.amazon.amazon_estimator import FileSystemRecordSet, RecordSet
input_dict = {}
for record in inputs:
if not isinstance(record, (RecordSet, FileSystemRecordSet)):
raise ValueError("List compatible only with RecordSets or FileSystemRecordSets.")
if record.channel in input_dict:
raise ValueError("Duplicate channels not allowed.")
if isinstance(record, RecordSet):
input_dict[record.channel] = record.records_s3_input()
if isinstance(record, FileSystemRecordSet):
input_dict[record.channel] = record.file_system_input
return input_dict
@staticmethod
def _prepare_output_config(s3_path, kms_key_id):
"""Placeholder docstring"""
config = {"S3OutputPath": s3_path}
if kms_key_id is not None:
config["KmsKeyId"] = kms_key_id
return config
@staticmethod
def _prepare_resource_config(instance_count, instance_type, volume_size, volume_kms_key):
"""Placeholder docstring"""
resource_config = {
"InstanceCount": instance_count,
"InstanceType": instance_type,
"VolumeSizeInGB": volume_size,
}
if volume_kms_key is not None:
resource_config["VolumeKmsKeyId"] = volume_kms_key
return resource_config
@staticmethod
def _prepare_stop_condition(max_run, max_wait):
"""Placeholder docstring"""
if max_wait:
return {"MaxRuntimeInSeconds": max_run, "MaxWaitTimeInSeconds": max_wait}
return {"MaxRuntimeInSeconds": max_run}
@property
def name(self):
"""Placeholder docstring"""
return self.job_name
| 36.618123 | 100 | 0.631905 |
db567e2ee56f0ca9105607b3ca8d1bb45d32d128
| 5,113 |
py
|
Python
|
deps/qualysapi/qualysapi/api_objects.py
|
elasticsearchvn/VulnWhisperer
|
a92cadf2af33c802cb86f9e10e9228d81339af5b
|
[
"Apache-2.0"
] | 1 |
2021-03-17T21:19:48.000Z
|
2021-03-17T21:19:48.000Z
|
deps/qualysapi/qualysapi/api_objects.py
|
elasticsearchvn/VulnWhisperer
|
a92cadf2af33c802cb86f9e10e9228d81339af5b
|
[
"Apache-2.0"
] | 1 |
2021-12-13T20:52:38.000Z
|
2021-12-13T20:52:38.000Z
|
deps/qualysapi/qualysapi/api_objects.py
|
codegrande/VulnWhisperer
|
9f071a646b4f7650c58be0f40396172a04e065a9
|
[
"Apache-2.0"
] | 1 |
2020-12-02T18:36:35.000Z
|
2020-12-02T18:36:35.000Z
|
from __future__ import absolute_import
import datetime
from lxml import objectify
class Host(object):
def __init__(self, dns, id, ip, last_scan, netbios, os, tracking_method):
self.dns = str(dns)
self.id = int(id)
self.ip = str(ip)
last_scan = str(last_scan).replace('T', ' ').replace('Z', '').split(' ')
date = last_scan[0].split('-')
time = last_scan[1].split(':')
self.last_scan = datetime.datetime(int(date[0]), int(date[1]), int(date[2]), int(time[0]), int(time[1]), int(time[2]))
self.netbios = str(netbios)
self.os = str(os)
self.tracking_method = str(tracking_method)
class AssetGroup(object):
def __init__(self, business_impact, id, last_update, scanips, scandns, scanner_appliances, title):
self.business_impact = str(business_impact)
self.id = int(id)
self.last_update = str(last_update)
self.scanips = scanips
self.scandns = scandns
self.scanner_appliances = scanner_appliances
self.title = str(title)
def addAsset(conn, ip):
call = '/api/2.0/fo/asset/group/'
parameters = {'action': 'edit', 'id': self.id, 'add_ips': ip}
conn.request(call, parameters)
self.scanips.append(ip)
def setAssets(conn, ips):
call = '/api/2.0/fo/asset/group/'
parameters = {'action': 'edit', 'id': self.id, 'set_ips': ips}
conn.request(call, parameters)
class ReportTemplate(object):
def __init__(self, isGlobal, id, last_update, template_type, title, type, user):
self.isGlobal = int(isGlobal)
self.id = int(id)
self.last_update = str(last_update).replace('T', ' ').replace('Z', '').split(' ')
self.template_type = template_type
self.title = title
self.type = type
self.user = user.LOGIN
class Report(object):
def __init__(self, expiration_datetime, id, launch_datetime, output_format, size, status, type, user_login):
self.expiration_datetime = str(expiration_datetime).replace('T', ' ').replace('Z', '').split(' ')
self.id = int(id)
self.launch_datetime = str(launch_datetime).replace('T', ' ').replace('Z', '').split(' ')
self.output_format = output_format
self.size = size
self.status = status.STATE
self.type = type
self.user_login = user_login
def download(self, conn):
call = '/api/2.0/fo/report'
parameters = {'action': 'fetch', 'id': self.id}
if self.status == 'Finished':
return conn.request(call, parameters)
class Scan(object):
def __init__(self, assetgroups, duration, launch_datetime, option_profile, processed, ref, status, target, title, type, user_login):
self.assetgroups = assetgroups
self.duration = str(duration)
launch_datetime = str(launch_datetime).replace('T', ' ').replace('Z', '').split(' ')
date = launch_datetime[0].split('-')
time = launch_datetime[1].split(':')
self.launch_datetime = datetime.datetime(int(date[0]), int(date[1]), int(date[2]), int(time[0]), int(time[1]), int(time[2]))
self.option_profile = str(option_profile)
self.processed = int(processed)
self.ref = str(ref)
self.status = str(status.STATE)
self.target = str(target).split(', ')
self.title = str(title)
self.type = str(type)
self.user_login = str(user_login)
def cancel(self, conn):
cancelled_statuses = ['Cancelled', 'Finished', 'Error']
if any(self.status in s for s in cancelled_statuses):
raise ValueError("Scan cannot be cancelled because its status is " + self.status)
else:
call = '/api/2.0/fo/scan/'
parameters = {'action': 'cancel', 'scan_ref': self.ref}
conn.request(call, parameters)
parameters = {'action': 'list', 'scan_ref': self.ref, 'show_status': 1}
self.status = objectify.fromstring(conn.request(call, parameters)).RESPONSE.SCAN_LIST.SCAN.STATUS.STATE
def pause(self, conn):
if self.status != "Running":
raise ValueError("Scan cannot be paused because its status is " + self.status)
else:
call = '/api/2.0/fo/scan/'
parameters = {'action': 'pause', 'scan_ref': self.ref}
conn.request(call, parameters)
parameters = {'action': 'list', 'scan_ref': self.ref, 'show_status': 1}
self.status = objectify.fromstring(conn.request(call, parameters)).RESPONSE.SCAN_LIST.SCAN.STATUS.STATE
def resume(self, conn):
if self.status != "Paused":
raise ValueError("Scan cannot be resumed because its status is " + self.status)
else:
call = '/api/2.0/fo/scan/'
parameters = {'action': 'resume', 'scan_ref': self.ref}
conn.request(call, parameters)
parameters = {'action': 'list', 'scan_ref': self.ref, 'show_status': 1}
self.status = objectify.fromstring(conn.request(call, parameters)).RESPONSE.SCAN_LIST.SCAN.STATUS.STATE
| 42.256198 | 136 | 0.611969 |
391187b74f66ee82e8b536a619011ba05d724101
| 1,941 |
py
|
Python
|
config/wsgi.py
|
caizhimin/demo
|
9b13afee128353f9cb1e7cefe5a9f476ba2f0aa5
|
[
"MIT"
] | null | null | null |
config/wsgi.py
|
caizhimin/demo
|
9b13afee128353f9cb1e7cefe5a9f476ba2f0aa5
|
[
"MIT"
] | null | null | null |
config/wsgi.py
|
caizhimin/demo
|
9b13afee128353f9cb1e7cefe5a9f476ba2f0aa5
|
[
"MIT"
] | null | null | null |
"""
WSGI config for demo project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
import sys
from django.core.wsgi import get_wsgi_application
# This allows easy placement of apps within the interior
# demo directory.
app_path = os.path.abspath(os.path.join(
os.path.dirname(os.path.abspath(__file__)), os.pardir))
sys.path.append(os.path.join(app_path, 'demo'))
if os.environ.get('DJANGO_SETTINGS_MODULE') == 'config.settings.production':
from raven.contrib.django.raven_compat.middleware.wsgi import Sentry
# We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks
# if running multiple sites in the same mod_wsgi process. To fix this, use
# mod_wsgi daemon mode with each site in its own daemon process, or use
# os.environ["DJANGO_SETTINGS_MODULE"] = "config.settings.production"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "config.settings.production")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
application = get_wsgi_application()
if os.environ.get('DJANGO_SETTINGS_MODULE') == 'config.settings.production':
application = Sentry(application)
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
| 43.133333 | 79 | 0.793921 |
902c0e46068a28f54f95d4abee8ccea8a07aea00
| 532 |
py
|
Python
|
allennlp_test/testdata/allennlp/tmp.py
|
rahman-mahmudur/PyART
|
36591cd10b2b7a560bbcb47a6cf744b72466f92a
|
[
"Apache-2.0"
] | null | null | null |
allennlp_test/testdata/allennlp/tmp.py
|
rahman-mahmudur/PyART
|
36591cd10b2b7a560bbcb47a6cf744b72466f92a
|
[
"Apache-2.0"
] | null | null | null |
allennlp_test/testdata/allennlp/tmp.py
|
rahman-mahmudur/PyART
|
36591cd10b2b7a560bbcb47a6cf744b72466f92a
|
[
"Apache-2.0"
] | null | null | null |
import logging
import os
import sys
if os.environ.get("ALLENNLP_DEBUG"):
LEVEL = logging.DEBUG
else:
level_name = os.environ.get("ALLENNLP_LOG_LEVEL")
LEVEL = logging._nameToLevel.get(level_name, logging.INFO)
sys.path.insert(0, os.path.dirname(os.path.abspath(os.path.join(__file__, os.pardir))))
logging.basicConfig(format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", level=LEVEL)
logging.getLogger("filelock").setLevel(logging.WARNING)
def _transformers_log_filter(record):
reveal_type(record.msg)
| 29.555556 | 95 | 0.746241 |
b940be8fc693eb3b5c92835951c6bd97449fd7b7
| 2,063 |
py
|
Python
|
ibis/backends/sqlite/tests/conftest.py
|
bfgray3/ibis
|
6fa1b5e4018d73a8c8357665df63f0fd7e818590
|
[
"Apache-2.0"
] | null | null | null |
ibis/backends/sqlite/tests/conftest.py
|
bfgray3/ibis
|
6fa1b5e4018d73a8c8357665df63f0fd7e818590
|
[
"Apache-2.0"
] | null | null | null |
ibis/backends/sqlite/tests/conftest.py
|
bfgray3/ibis
|
6fa1b5e4018d73a8c8357665df63f0fd7e818590
|
[
"Apache-2.0"
] | 1 |
2021-09-20T07:51:20.000Z
|
2021-09-20T07:51:20.000Z
|
import os
from pathlib import Path
import pytest
import ibis
import ibis.expr.types as ir
from ibis.backends.tests.base import BackendTest, RoundAwayFromZero
class TestConf(BackendTest, RoundAwayFromZero):
supports_arrays = False
supports_arrays_outside_of_select = supports_arrays
supports_window_operations = True
check_dtype = False
returned_timestamp_unit = 's'
@staticmethod
def connect(data_directory: Path):
path = Path(
os.environ.get(
'IBIS_TEST_SQLITE_DATABASE', data_directory / 'ibis_testing.db'
)
)
return ibis.sqlite.connect(str(path)) # type: ignore
@property
def functional_alltypes(self) -> ir.TableExpr:
t = super().functional_alltypes
return t.mutate(timestamp_col=t.timestamp_col.cast('timestamp'))
@pytest.fixture(scope='module')
def dbpath(data_directory):
default = str(data_directory / 'ibis_testing.db')
return os.environ.get('IBIS_TEST_SQLITE_DATABASE', default)
@pytest.fixture(scope='module')
def con(dbpath):
return ibis.sqlite.connect(dbpath)
@pytest.fixture(scope='module')
def db(con):
return con.database()
@pytest.fixture
def dialect():
import sqlalchemy as sa
return sa.dialects.sqlite.dialect()
@pytest.fixture
def translate(dialect):
from ibis.backends.sqlite import SQLiteClient
client = SQLiteClient
context = client.compiler.make_context()
return lambda expr: str(
client.compiler.translator_class(expr, context)
.get_result()
.compile(dialect=dialect, compile_kwargs={'literal_binds': True})
)
@pytest.fixture
def sqla_compile(dialect):
return lambda expr: str(
expr.compile(dialect=dialect, compile_kwargs={'literal_binds': True})
)
@pytest.fixture(scope='module')
def alltypes(db):
return db.functional_alltypes
@pytest.fixture(scope='module')
def alltypes_sqla(alltypes):
return alltypes.op().sqla_table
@pytest.fixture(scope='module')
def df(alltypes):
return alltypes.execute()
| 23.179775 | 79 | 0.707707 |
f073acefbaaf4d3ad16d6c6c5750a3de6786fa50
| 10,102 |
py
|
Python
|
ppq/quantization/optim/calibration.py
|
wdian/ppq
|
58bd1271ea6f0dfaf602eb72bdca63ea79f191b8
|
[
"Apache-2.0"
] | null | null | null |
ppq/quantization/optim/calibration.py
|
wdian/ppq
|
58bd1271ea6f0dfaf602eb72bdca63ea79f191b8
|
[
"Apache-2.0"
] | null | null | null |
ppq/quantization/optim/calibration.py
|
wdian/ppq
|
58bd1271ea6f0dfaf602eb72bdca63ea79f191b8
|
[
"Apache-2.0"
] | null | null | null |
from math import ceil
from typing import Callable, Dict, Iterable, List
from ppq.core import empty_ppq_cache
from ppq.core.quant import QuantizationStates
from ppq.executor import BaseGraphExecutor, RuntimeHook
from ppq.IR import GraphCommandProcesser, QuantableOperation
from ppq.quantization.observer import OperationObserver, TorchHistObserver
from ppq.quantization.observer.range import TorchMSEObserver
from tqdm import tqdm
from .base import QuantizationOptimizationPass
class RuntimeCalibrationPass(QuantizationOptimizationPass):
"""
PPQ Runtime Calibration Pass
For int8 quantization, you need to calibrate or estimate the value range,
i.e, (min, max) of all floating-point tensors in the model.
Unlike constant tensors such as weights and biases,
variable tensors such as model input, activations (outputs of intermediate layers)
and model output cannot be calibrated unless we run a few inference cycles.
As a result, the converter requires a representative dataset to calibrate them.
This dataset is supposed to be a small subset (about 100~500 samples) of the training or validation data.
ATTENTION: DO NOT GIVE A LARGER DATASET THAN EXPECTED, PPQ WILL RAISE AN ERROR ABOUT IT.
"""
def __init__(self, method: str = None, override: bool = False) -> None:
"""
Args:
method (str, optional): calibration method, if is not None, will override quantizer's setting.
Defaults to None.
override (bool, optional): whether to override existing quantization configurations.
"""
super().__init__(name='PPQ Runtime Calibration Pass')
self._method = method
self._observers = {}
self._collate_fn = None
self._calib_steps = None
self._override = override
def calibrate(self, desc: str, dataloader: Iterable, executor: BaseGraphExecutor,
hooks:Dict[str, RuntimeHook], output_names: List[str] = None):
calib_step = 0
with tqdm(total=self._calib_steps, desc=desc) as progressing_bar:
for calib_epoch in range(ceil(self._calib_steps / len(dataloader))):
for data in dataloader:
if self._collate_fn is not None:
data = self._collate_fn(data)
executor.forward(inputs=data, hooks=hooks,
output_names=output_names)
progressing_bar.update()
calib_step += 1
if calib_step >= self._calib_steps: break
@ empty_ppq_cache
def optimize(
self,
processer: GraphCommandProcesser,
dataloader: Iterable,
executor: BaseGraphExecutor,
calib_steps: int,
collate_fn: Callable,
**kwargs,
) -> None:
self._collate_fn = collate_fn
self._calib_steps = calib_steps
assert calib_steps >= 8, 'Insufficient Calibration Detected, to better quantize your network, '\
'more calibration steps is demonded, we strongly recommend you to prepare more calibration data '\
'and more calibration steps is perferred here. (at least 8)'
assert calib_steps <= 512, 'Calibration steps is too large, ppq is capable for quantizing your network within 32-128 '\
'calibration steps. More calibraiton steps will greatly delay ppq\'s calibration procedure. '\
'Reset your calib_steps parameter please.'
# -------------------------------------------------
# Override existing quantization configurations
# -------------------------------------------------
if self._override:
for operation in processer.graph.operations.values():
if not isinstance(operation, QuantableOperation): continue
for config, var in operation.config_with_variable:
if (not var.is_parameter and
config.state == QuantizationStates.ACTIVATED and
config.dominated_by == config):
config.state = QuantizationStates.INITIAL
# build observer and hook for each quantable operation
hooks = {}
for op_name, operation in processer.graph.operations.items():
if not isinstance(operation, QuantableOperation): continue
# override algorithm setting if necessary
for config, var in operation.config_with_variable:
if not var.is_parameter and self._method is not None:
config.observer_algorithm = self._method
observer = OperationObserver(
opeartion=executor._graph.operations[op_name],
monitor_parameter=False)
self._observers[op_name] = observer
hooks[op_name] = observer.hook
# ready for calibration
# hook forward function, let observers take effects.
self.calibrate(desc='Calibration Progress(Phase 1)', dataloader=dataloader,
executor=executor, hooks=hooks, output_names=None)
# render calibration result.
for _, observer in self._observers.items():
assert isinstance(observer, OperationObserver)
observer.render_quantization_config()
observer.report()
# -------------------------------------------------
# There are some two-phase observer in ppq,
# which means they have to be calibrated for a second time.
# see aslo: TorchHistObserver
# -------------------------------------------------
# remove one-phase observer from hook dict.
pop_list = []
for op_name, observer in self._observers.items():
assert isinstance(observer, OperationObserver)
if all([type(var_observer) not in {TorchHistObserver, TorchMSEObserver}
for var_observer in observer._hook._observer_table.values()]):
pop_list.append(op_name)
for op_name in pop_list:
self._observers.pop(op_name)
hooks.pop(op_name)
if len(hooks) > 0:
# ready for calibration(Phase 2)
# hook forward function, let observers take effects.
self.calibrate(desc='Calibration Progress(Phase 2)', dataloader=dataloader,
executor=executor, hooks=hooks, output_names=None)
# render calibration result for a second time.
for _, observer in self._observers.items():
assert isinstance(observer, OperationObserver)
observer.render_quantization_config()
observer.report()
class RuntimePerlayerCalibrationPass(RuntimeCalibrationPass):
"""
PPQ Runtime Calibration Pass(Per layer calibration)
For int8 quantization, you need to calibrate or estimate the value range,
i.e, (min, max) of all floating-point tensors in the model.
Unlike constant tensors such as weights and biases,
variable tensors such as model input, activations (outputs of intermediate layers)
and model output cannot be calibrated unless we run a few inference cycles.
As a result, the converter requires a representative dataset to calibrate them.
This dataset is supposed to be a small subset (around ~100-500 samples) of the training or validation data.
ATTENTION: DO NOT GIVE A LARGER DATASET THAN EXPECTED, PPQ WILL RAISE AN ERROR ABOUT IT.
"""
def __init__(self, method: str) -> None:
super().__init__()
self._method = method
self.name = 'PPQ Runtime Calibration Pass(Per Layer)'
def optimize(self, processer: GraphCommandProcesser, dataloader: Iterable,
executor: BaseGraphExecutor, calib_steps: int, collate_fn: Callable, **kwargs) -> None:
self._collate_fn = collate_fn
self._calib_steps = calib_steps
assert calib_steps >= 8, 'Insufficient Calibration Detected, to better quantize your network, '\
'more calibration steps is demonded, we strongly recommend you to prepare more calibration data '\
'and more calibration steps is perferred here. (at least 8)'
assert calib_steps <= 512, 'Calibration steps is too large, ppq is capable for quantizing your network within 32-128 '\
'calibration steps. More calibraiton steps will greatly delay ppq\'s calibration procedure. '\
'Reset your calib_steps parameter please.'
for operation in tqdm(processer.graph.topological_sort(),
desc='Runtime Calibration(Per Layer)'):
if not isinstance(operation, QuantableOperation): continue
# override algorithm setting if necessary
for config, var in operation.config_with_variable:
if not var.is_parameter and self._method is not None:
config.observer_algorithm = self._method
observer = OperationObserver(
opeartion=operation,
monitor_parameter=False)
self.calibrate(desc=f'Runtime Calibration for {operation.name}',
dataloader=dataloader, executor=executor,
hooks={operation.name: observer.hook},
output_names=[var.name for var in operation.outputs])
if any([type(var_observer) in {TorchHistObserver}
for var_observer in observer._hook._observer_table.values()]):
self.calibrate(desc=f'Runtime Calibration for {operation.name} (Phrase 2)',
dataloader=dataloader, executor=executor,
hooks={operation.name: observer.hook},
output_names=[var.name for var in operation.outputs])
observer.render_quantization_config()
observer.report()
| 47.42723 | 127 | 0.623144 |
6120ad1a02ae96d83394c50ac06b6a8ad995a4f1
| 37,345 |
py
|
Python
|
tensorflow/python/ops/nn.py
|
jylinman/tensorflow
|
5248d111c3aeaf9f560cd77bff0f183f38e31e0b
|
[
"Apache-2.0"
] | null | null | null |
tensorflow/python/ops/nn.py
|
jylinman/tensorflow
|
5248d111c3aeaf9f560cd77bff0f183f38e31e0b
|
[
"Apache-2.0"
] | null | null | null |
tensorflow/python/ops/nn.py
|
jylinman/tensorflow
|
5248d111c3aeaf9f560cd77bff0f183f38e31e0b
|
[
"Apache-2.0"
] | 1 |
2020-10-21T09:39:19.000Z
|
2020-10-21T09:39:19.000Z
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=unused-import,g-bad-import-order
"""## Activation Functions
The activation ops provide different types of nonlinearities for use in neural
networks. These include smooth nonlinearities (`sigmoid`, `tanh`, `elu`,
`softplus`, and `softsign`), continuous but not everywhere differentiable
functions (`relu`, `relu6`, and `relu_x`), and random regularization
(`dropout`).
All activation ops apply componentwise, and produce a tensor of the same
shape as the input tensor.
@@relu
@@relu6
@@elu
@@softplus
@@softsign
@@dropout
@@bias_add
@@sigmoid
@@tanh
## Convolution
The convolution ops sweep a 2-D filter over a batch of images, applying the
filter to each window of each image of the appropriate size. The different
ops trade off between generic vs. specific filters:
* `conv2d`: Arbitrary filters that can mix channels together.
* `depthwise_conv2d`: Filters that operate on each channel independently.
* `separable_conv2d`: A depthwise spatial filter followed by a pointwise filter.
Note that although these ops are called "convolution", they are strictly
speaking "cross-correlation" since the filter is combined with an input window
without reversing the filter. For details, see [the properties of
cross-correlation](https://en.wikipedia.org/wiki/Cross-correlation#Properties).
The filter is applied to image patches of the same size as the filter and
strided according to the `strides` argument. `strides = [1, 1, 1, 1]` applies
the filter to a patch at every offset, `strides = [1, 2, 2, 1]` applies the
filter to every other image patch in each dimension, etc.
Ignoring channels for the moment, and assume that the 4-D `input` has shape
`[batch, in_height, in_width, ...]` and the 4-D `filter` has shape
`[filter_height, filter_width, ...]`, then the spatial semantics of the
convolution ops are as follows: first, according to the padding scheme chosen
as `'SAME'` or `'VALID'`, the output size and the padding pixels are computed.
For the `'SAME'` padding, the output height and width are computed as:
out_height = ceil(float(in_height) / float(strides[1]))
out_width = ceil(float(in_width) / float(strides[2]))
and the padding on the top and left are computed as:
pad_along_height = ((out_height - 1) * strides[1] +
filter_height - in_height)
pad_along_width = ((out_width - 1) * strides[2] +
filter_width - in_width)
pad_top = pad_along_height / 2
pad_left = pad_along_width / 2
Note that the division by 2 means that there might be cases when the padding on
both sides (top vs bottom, right vs left) are off by one. In this case, the
bottom and right sides always get the one additional padded pixel. For example,
when `pad_along_height` is 5, we pad 2 pixels at the top and 3 pixels at the
bottom. Note that this is different from existing libraries such as cuDNN and
Caffe, which explicitly specify the number of padded pixels and always pad the
same number of pixels on both sides.
For the `'VALID`' padding, the output height and width are computed as:
out_height = ceil(float(in_height - filter_height + 1) / float(strides[1]))
out_width = ceil(float(in_width - filter_width + 1) / float(strides[2]))
and the padding values are always zero. The output is then computed as
output[b, i, j, :] =
sum_{di, dj} input[b, strides[1] * i + di - pad_top,
strides[2] * j + dj - pad_left, ...] *
filter[di, dj, ...]
where any value outside the original input image region are considered zero (
i.e. we pad zero values around the border of the image).
Since `input` is 4-D, each `input[b, i, j, :]` is a vector. For `conv2d`, these
vectors are multiplied by the `filter[di, dj, :, :]` matrices to produce new
vectors. For `depthwise_conv_2d`, each scalar component `input[b, i, j, k]`
is multiplied by a vector `filter[di, dj, k]`, and all the vectors are
concatenated.
@@conv2d
@@depthwise_conv2d
@@separable_conv2d
@@conv2d_transpose
## Pooling
The pooling ops sweep a rectangular window over the input tensor, computing a
reduction operation for each window (average, max, or max with argmax). Each
pooling op uses rectangular windows of size `ksize` separated by offset
`strides`. For example, if `strides` is all ones every window is used, if
`strides` is all twos every other window is used in each dimension, etc.
In detail, the output is
output[i] = reduce(value[strides * i:strides * i + ksize])
where the indices also take into consideration the padding values. Please refer
to the `Convolution` section for details about the padding calculation.
@@avg_pool
@@max_pool
@@max_pool_with_argmax
## Normalization
Normalization is useful to prevent neurons from saturating when inputs may
have varying scale, and to aid generalization.
@@l2_normalize
@@local_response_normalization
@@moments
## Losses
The loss ops measure error between two tensors, or between a tensor and zero.
These can be used for measuring accuracy of a network in a regression task
or for regularization purposes (weight decay).
@@l2_loss
## Classification
TensorFlow provides several operations that help you perform classification.
@@sigmoid_cross_entropy_with_logits
@@softmax
@@softmax_cross_entropy_with_logits
@@sparse_softmax_cross_entropy_with_logits
## Embeddings
TensorFlow provides library support for looking up values in embedding
tensors.
@@embedding_lookup
## Evaluation
The evaluation ops are useful for measuring the performance of a network.
Since they are nondifferentiable, they are typically used at evaluation time.
@@top_k
@@in_top_k
## Candidate Sampling
Do you want to train a multiclass or multilabel model with thousands
or millions of output classes (for example, a language model with a
large vocabulary)? Training with a full Softmax is slow in this case,
since all of the classes are evaluated for every training example.
Candidate Sampling training algorithms can speed up your step times by
only considering a small randomly-chosen subset of contrastive classes
(called candidates) for each batch of training examples.
See our [Candidate Sampling Algorithms Reference]
(../../extras/candidate_sampling.pdf)
### Sampled Loss Functions
TensorFlow provides the following sampled loss functions for faster training.
@@nce_loss
@@sampled_softmax_loss
### Candidate Samplers
TensorFlow provides the following samplers for randomly sampling candidate
classes when using one of the sampled loss functions above.
@@uniform_candidate_sampler
@@log_uniform_candidate_sampler
@@learned_unigram_candidate_sampler
@@fixed_unigram_candidate_sampler
### Miscellaneous candidate sampling utilities
@@compute_accidental_hits
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import candidate_sampling_ops
from tensorflow.python.ops import constant_op
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import embedding_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_grad
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import numerics
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import rnn_cell
from tensorflow.python.ops import seq2seq
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.ops.math_ops import sigmoid
from tensorflow.python.ops.math_ops import tanh
# Bring more nn-associated functionality into this package.
# pylint: disable=wildcard-import
from tensorflow.python.ops.nn_ops import *
from tensorflow.python.ops.candidate_sampling_ops import *
from tensorflow.python.ops.embedding_ops import *
from tensorflow.python.ops.rnn import *
# pylint: enable=wildcard-import
def sigmoid_cross_entropy_with_logits(logits, targets, name=None):
"""Computes sigmoid cross entropy given `logits`.
Measures the probability error in discrete classification tasks in which each
class is independent and not mutually exclusive. For instance, one could
perform multilabel classification where a picture can contain both an elephant
and a dog at the same time.
For brevity, let `x = logits`, `z = targets`. The logistic loss is
z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))
= z * -log(1 / (1 + exp(-x))) + (1 - z) * -log(exp(-x) / (1 + exp(-x)))
= z * log(1 + exp(-x)) + (1 - z) * (-log(exp(-x)) + log(1 + exp(-x)))
= z * log(1 + exp(-x)) + (1 - z) * (x + log(1 + exp(-x))
= (1 - z) * x + log(1 + exp(-x))
= x - x * z + log(1 + exp(-x))
To ensure stability and avoid overflow, the implementation uses
max(x, 0) - x * z + log(1 + exp(-abs(x)))
`logits` and `targets` must have the same type and shape.
Args:
logits: A `Tensor` of type `float32` or `float64`.
targets: A `Tensor` of the same type and shape as `logits`.
name: A name for the operation (optional).
Returns:
A `Tensor` of the same shape as `logits` with the componentwise
logistic losses.
"""
with ops.op_scope([logits, targets], name, "logistic_loss") as name:
logits = ops.convert_to_tensor(logits, name="logits")
targets = ops.convert_to_tensor(targets, name="targets")
# The logistic loss formula from above is
# x - x * z + log(1 + exp(-x))
# For x < 0, a more numerically stable formula is
# -x * z + log(1 + exp(x))
# To avoid branching, we use the combined version
# max(x, 0) - x * z + log(1 + exp(-abs(x)))
return math_ops.add(nn_ops.relu(logits) - logits * targets,
math_ops.log(1 + math_ops.exp(-math_ops.abs(logits))),
name=name)
def relu_layer(x, weights, biases, name=None):
"""Computes Relu(x * weight + biases).
Args:
x: a 2D tensor. Dimensions typically: batch, in_units
weights: a 2D tensor. Dimensions typically: in_units, out_units
biases: a 1D tensor. Dimensions: out_units
name: A name for the operation (optional). If not specified
"nn_relu_layer" is used.
Returns:
A 2-D Tensor computing relu(matmul(x, weights) + biases).
Dimensions typically: batch, out_units.
"""
with ops.op_scope([x, weights, biases], name, "relu_layer") as name:
x = ops.convert_to_tensor(x, name="x")
weights = ops.convert_to_tensor(weights, name="weights")
biases = ops.convert_to_tensor(biases, name="biases")
xw_plus_b = nn_ops.bias_add(math_ops.matmul(x, weights), biases)
return nn_ops.relu(xw_plus_b, name=name)
def l2_normalize(x, dim, epsilon=1e-12, name=None):
"""Normalizes along dimension `dim` using an L2 norm.
For a 1-D tensor with `dim = 0`, computes
output = x / sqrt(max(sum(x**2), epsilon))
For `x` with more dimensions, independently normalizes each 1-D slice along
dimension `dim`.
Args:
x: A `Tensor`.
dim: Dimension along which to normalize.
epsilon: A lower bound value for the norm. Will use `sqrt(epsilon)` as the
divisor if `norm < sqrt(epsilon)`.
name: A name for this operation (optional).
Returns:
A `Tensor` with the same shape as `x`.
"""
with ops.op_scope([x], name, "l2_normalize") as name:
x = ops.convert_to_tensor(x, name="x")
square_sum = math_ops.reduce_sum(math_ops.square(x), [dim], keep_dims=True)
x_inv_norm = math_ops.rsqrt(math_ops.maximum(square_sum, epsilon))
return math_ops.mul(x, x_inv_norm, name=name)
def zero_fraction(value, name=None):
"""Returns the fraction of zeros in `value`.
If `value` is empty, the result is `nan`.
This is useful in summaries to measure and report sparsity. For example,
z = tf.Relu(...)
summ = tf.scalar_summary('sparsity', tf.zero_fraction(z))
Args:
value: A tensor of numeric type.
name: A name for the operation (optional).
Returns:
The fraction of zeros in `value`, with type `float32`.
"""
with ops.op_scope([value], name, "zero_fraction"):
value = ops.convert_to_tensor(value, name="value")
zero = constant_op.constant(0, dtype=value.dtype, name="zero")
return math_ops.reduce_mean(math_ops.cast(math_ops.equal(value, zero),
dtypes.float32))
def depthwise_conv2d(input, filter, strides, padding, name=None):
"""Depthwise 2-D convolution.
Given an input tensor of shape `[batch, in_height, in_width, in_channels]`
and a filter tensor of shape
`[filter_height, filter_width, in_channels, channel_multiplier]`
containing `in_channels` convolutional filters of depth 1, `depthwise_conv2d`
applies a different filter to each input channel (expanding from 1 channel
to `channel_multiplier` channels for each), then concatenates the results
together. The output has `in_channels * channel_multiplier` channels.
In detail,
output[b, i, j, k * channel_multiplier + q] =
sum_{di, dj} input[b, strides[1] * i + di, strides[2] * j + dj, k] *
filter[di, dj, k, q]
Must have `strides[0] = strides[3] = 1`. For the most common case of the
same horizontal and vertical strides, `strides = [1, stride, stride, 1]`.
Args:
input: 4-D with shape `[batch, in_height, in_width, in_channels]`.
filter: 4-D with shape
`[filter_height, filter_width, in_channels, channel_multiplier]`.
strides: 1-D of size 4. The stride of the sliding window for each
dimension of `input`.
padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm.
name: A name for this operation (optional).
Returns:
A 4-D `Tensor` of shape
`[batch, out_height, out_width, in_channels * channel_multiplier].`
"""
with ops.op_scope([input, filter], name, "depthwise") as name:
input = ops.convert_to_tensor(input, name="tensor_in")
filter = ops.convert_to_tensor(filter, name="filter_in")
# A shape is required to statically compute the number of separable filters.
if filter.get_shape().ndims is not None:
assert len(filter.get_shape()) == 4
in_channels = filter.get_shape()[2]
# Sanity checks, if shape information is available for the inputs.
if input.get_shape().ndims is not None:
assert len(input.get_shape()) == 4
assert input.get_shape()[3] == in_channels, (
"Mismatched input depth %d and number of depthwise filters %d." % (
input.get_shape()[3].value, in_channels))
else:
assert input.get_shape().ndims is not None, (
"Either tensor must provide static shape information.")
assert input.get_shape().ndims == 4
in_channels = input.get_shape()[3]
if in_channels == 1:
return nn_ops.conv2d(input, filter, strides, padding, name=name)
else:
# Create one separate convolution per channel.
convs = []
for channel in xrange(in_channels):
with ops.name_scope("depth%d" % channel) as channel_scope:
t_in = array_ops.slice(input, [0, 0, 0, channel], [-1, -1, -1, 1],
name="slice_inputs")
f_in = array_ops.slice(filter, [0, 0, channel, 0], [-1, -1, 1, -1],
name="slice_params")
convs.append(nn_ops.conv2d(t_in, f_in,
strides, padding, name=channel_scope))
# Concatenate the per-channel convolutions along the channel dimension.
return array_ops.concat(3, convs, name=name)
def separable_conv2d(input, depthwise_filter, pointwise_filter, strides,
padding,
name=None):
"""2-D convolution with separable filters.
Performs a depthwise convolution that acts separately on channels followed by
a pointwise convolution that mixes channels. Note that this is separability
between dimensions `[1, 2]` and `3`, not spatial separability between
dimensions `1` and `2`.
In detail,
output[b, i, j, k] = sum_{di, dj, q, r]
input[b, strides[1] * i + di, strides[2] * j + dj, q] *
depthwise_filter[di, dj, q, r] *
pointwise_filter[0, 0, q * channel_multiplier + r, k]
`strides` controls the strides for the depthwise convolution only, since
the pointwise convolution has implicit strides of `[1, 1, 1, 1]`. Must have
`strides[0] = strides[3] = 1`. For the most common case of the same
horizontal and vertical strides, `strides = [1, stride, stride, 1]`.
Args:
input: 4-D `Tensor` with shape `[batch, in_height, in_width, in_channels]`.
depthwise_filter: 4-D `Tensor` with shape
`[filter_height, filter_width, in_channels, channel_multiplier]`.
Contains `in_channels` convolutional filters of depth 1.
pointwise_filter: 4-D `Tensor` with shape
`[1, 1, channel_multiplier * in_channels, out_channels]`. Pointwise
filter to mix channels after `depthwise_filter` has convolved spatially.
strides: 1-D of size 4. The strides for the depthwise convolution for
each dimension of `input`.
padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm.
name: A name for this operation (optional).
Returns:
A 4-D `Tensor` of shape `[batch, out_height, out_width, out_channels]`.
"""
with ops.op_scope([input, depthwise_filter, pointwise_filter],
name, "separable_conv2d") as name:
input = ops.convert_to_tensor(input, name="tensor_in")
depthwise_filter = ops.convert_to_tensor(depthwise_filter,
name="depthwise_filter")
pointwise_filter = ops.convert_to_tensor(pointwise_filter,
name="pointwise_filter")
if pointwise_filter.get_shape().ndims is not None:
assert len(pointwise_filter.get_shape()) == 4
assert pointwise_filter.get_shape()[0] == 1
assert pointwise_filter.get_shape()[1] == 1
if depthwise_filter.get_shape().ndims and input.get_shape().ndims:
channel_multiplier = depthwise_filter.get_shape()[3]
in_channels = input.get_shape()[3]
out_channels = pointwise_filter.get_shape()[3]
# This would mean the separable convolutions is over-parametrized.
assert channel_multiplier * in_channels < out_channels
# The layout of the ops in the graph are expected to be as follows:
# separable_conv2d // Conv2D op corresponding to the pointwise conv.
# separable_conv2d/depthwise // Concat op for the deptwise outputs.
# separable_conv2d/depthwise/depth0 // Conv2D op for depth 0
# separable_conv2d/depthwise/depth1 // Conv2D op for depth 1
# separable_conv2d/depthwise/depth2 // Conv2D op for depth 2
depthwise = depthwise_conv2d(input, depthwise_filter, strides,
padding, name="depthwise")
return nn_ops.conv2d(depthwise, pointwise_filter, [1, 1, 1, 1],
padding="VALID", name=name)
def moments(x, axes, name=None, keep_dims=False):
"""Calculate the mean and variance of `x`.
The mean and variance are calculated by aggregating the contents of `x`
across `axes`. If `x` is 1-D and `axes = [0]` this is just the mean
and variance of a vector.
For so-called "global normalization" needed for convolutional filters pass
`axes=[0, 1, 2]` (batch, height, width). For batch normalization pass
`axes=[0]` (batch).
Args:
x: A `Tensor`.
axes: array of ints. Axes along which to compute mean and
variance.
keep_dims: produce moments with the same dimensionality as the input.
name: Name used to scope the operations that compute the moments.
Returns:
Two `Tensor` objects: `mean` and `variance`.
"""
with ops.op_scope([x, axes], name, "moments"):
x = ops.convert_to_tensor(x, name="x")
x_shape = x.get_shape()
if all(x_shape[d].value is not None for d in axes):
# The shape is known in the relevant axes, so we can statically
# compute the divisor.
divisor = 1.0
for d in set(axes):
divisor *= x.get_shape()[d].value
divisor = constant_op.constant(1.0 / divisor, x.dtype, name="divisor")
else:
divisor = constant_op.constant(1.0, dtype=x.dtype)
x_dynamic_shape = array_ops.shape(x)
for d in set(axes):
divisor *= math_ops.cast(x_dynamic_shape[d], x.dtype)
divisor = math_ops.inv(divisor, name="divisor")
constant_axes = constant_op.constant(axes, name="axes")
# Note: We do not use Mean here because it is very slow on GPU.
# Note 2: The expression below is potentially more stable.
# It is however a bit slower and stability doesn't appear to be an issue.
# mean = math_ops.reduce_sum(math_ops.mul(x, divisor), axes, name="mean")
# var = math_ops.reduce_sum(math_ops.mul(math_ops.square(x - mean),
# divisor), axes,
# name="variance")
mean = math_ops.mul(
math_ops.reduce_sum(x,
constant_axes,
keep_dims=True),
divisor,
name="mean")
# Give x-mean a specific name, so the caller might take advantage of it.
# The caller should have a fallback plan, however: this tensor may not be
# available if this function implementation changes.
x_centered = math_ops.sub(x, mean, name="x_centered")
var = math_ops.mul(
math_ops.reduce_sum(
math_ops.square(x_centered),
constant_axes,
keep_dims=keep_dims),
divisor,
name="variance")
if keep_dims:
return mean, var
else:
return array_ops.squeeze(mean, squeeze_dims=axes), var
def _sum_rows(x):
"""Returns a vector summing up each row of the matrix x."""
# _sum_rows(x) is equivalent to math_ops.reduce_sum(x, 1) when x is
# a matrix. The gradient of _sum_rows(x) is more efficient than
# reduce_sum(x, 1)'s gradient in today's implementation. Therefore,
# we use _sum_rows(x) in the nce_loss() computation since the loss
# is mostly used for training.
cols = array_ops.shape(x)[1]
ones_shape = array_ops.pack([cols, 1])
ones = array_ops.ones(ones_shape, x.dtype)
return array_ops.reshape(math_ops.matmul(x, ones), [-1])
def _compute_sampled_logits(weights, biases, inputs, labels, num_sampled,
num_classes, num_true=1,
sampled_values=None,
subtract_log_q=True,
remove_accidental_hits=False,
partition_strategy="mod",
name=None):
"""Helper function for nce_loss and sampled_softmax_loss functions.
Computes sampled output training logits and labels suitable for implementing
e.g. noise-contrastive estimation (see nce_loss) or sampled softmax (see
sampled_softmax_loss).
Note: In the case where num_true > 1, we assign to each target class
the target probability 1 / num_true so that the target probabilities
sum to 1 per-example.
Args:
weights: A `Tensor` of shape `[num_classes, dim]`, or a list of `Tensor`
objects whose concatenation along dimension 0 has shape
`[num_classes, dim]`. The (possibly-partitioned) class embeddings.
biases: A `Tensor` of shape `[num_classes]`. The class biases.
inputs: A `Tensor` of shape `[batch_size, dim]`. The forward
activations of the input network.
labels: A `Tensor` of type `int64` and shape `[batch_size,
num_true]`. The target classes. Note that this format differs from
the `labels` argument of `nn.softmax_cross_entropy_with_logits`.
num_sampled: An `int`. The number of classes to randomly sample per batch.
num_classes: An `int`. The number of possible classes.
num_true: An `int`. The number of target classes per training example.
sampled_values: a tuple of (`sampled_candidates`, `true_expected_count`,
`sampled_expected_count`) returned by a `*_candidate_sampler` function.
(if None, we default to `log_uniform_candidate_sampler`)
subtract_log_q: A `bool`. whether to subtract the log expected count of
the labels in the sample to get the logits of the true labels.
Default is True. Turn off for Negative Sampling.
remove_accidental_hits: A `bool`. whether to remove "accidental hits"
where a sampled class equals one of the target classes. Default is
False.
partition_strategy: A string specifying the partitioning strategy, relevant
if `len(weights) > 1`. Currently `"div"` and `"mod"` are supported.
Default is `"mod"`. See `tf.nn.embedding_lookup` for more details.
name: A name for the operation (optional).
Returns:
out_logits, out_labels: `Tensor` objects each with shape
`[batch_size, num_true + num_sampled]`, for passing to either
`nn.sigmoid_cross_entropy_with_logits` (NCE) or
`nn.softmax_cross_entropy_with_logits` (sampled softmax).
"""
if not isinstance(weights, list):
weights = [weights]
with ops.op_scope(
weights + [biases, inputs, labels], name, "compute_sampled_logits"):
if labels.dtype != dtypes.int64:
labels = math_ops.cast(labels, dtypes.int64)
labels_flat = array_ops.reshape(labels, [-1])
# Sample the negative labels.
# sampled shape: [num_sampled] tensor
# true_expected_count shape = [batch_size, 1] tensor
# sampled_expected_count shape = [num_sampled] tensor
if sampled_values is None:
sampled_values = candidate_sampling_ops.log_uniform_candidate_sampler(
true_classes=labels,
num_true=num_true,
num_sampled=num_sampled,
unique=True,
range_max=num_classes)
# NOTE: pylint cannot tell that 'sampled_values' is a sequence
# pylint: disable=unpacking-non-sequence
sampled, true_expected_count, sampled_expected_count = sampled_values
# pylint: enable=unpacking-non-sequence
# labels_flat is a [batch_size * num_true] tensor
# sampled is a [num_sampled] int tensor
all_ids = array_ops.concat(0, [labels_flat, sampled])
# weights shape is [num_classes, dim]
all_w = embedding_ops.embedding_lookup(
weights, all_ids, partition_strategy=partition_strategy)
all_b = embedding_ops.embedding_lookup(biases, all_ids)
# true_w shape is [batch_size * num_true, dim]
# true_b is a [batch_size * num_true] tensor
true_w = array_ops.slice(
all_w, [0, 0], array_ops.pack([array_ops.shape(labels_flat)[0], -1]))
true_b = array_ops.slice(all_b, [0], array_ops.shape(labels_flat))
# inputs shape is [batch_size, dim]
# true_w shape is [batch_size * num_true, dim]
# row_wise_dots is [batch_size, num_true, dim]
dim = array_ops.shape(true_w)[1:2]
new_true_w_shape = array_ops.concat(0, [[-1, num_true], dim])
row_wise_dots = math_ops.mul(
array_ops.expand_dims(inputs, 1),
array_ops.reshape(true_w, new_true_w_shape))
# We want the row-wise dot plus biases which yields a
# [batch_size, num_true] tensor of true_logits.
dots_as_matrix = array_ops.reshape(row_wise_dots,
array_ops.concat(0, [[-1], dim]))
true_logits = array_ops.reshape(_sum_rows(dots_as_matrix), [-1, num_true])
true_b = array_ops.reshape(true_b, [-1, num_true])
true_logits += true_b
# Lookup weights and biases for sampled labels.
# sampled_w shape is [num_sampled, dim]
# sampled_b is a [num_sampled] float tensor
sampled_w = array_ops.slice(
all_w, array_ops.pack([array_ops.shape(labels_flat)[0], 0]), [-1, -1])
sampled_b = array_ops.slice(all_b, array_ops.shape(labels_flat), [-1])
# inputs has shape [batch_size, dim]
# sampled_w has shape [num_sampled, dim]
# sampled_b has shape [num_sampled]
# Apply X*W'+B, which yields [batch_size, num_sampled]
sampled_logits = math_ops.matmul(inputs,
sampled_w,
transpose_b=True) + sampled_b
if remove_accidental_hits:
acc_hits = candidate_sampling_ops.compute_accidental_hits(
labels, sampled, num_true=num_true)
acc_indices, acc_ids, acc_weights = acc_hits
# This is how SparseToDense expects the indices.
acc_indices_2d = array_ops.reshape(acc_indices, [-1, 1])
acc_ids_2d_int32 = array_ops.reshape(math_ops.cast(
acc_ids, dtypes.int32), [-1, 1])
sparse_indices = array_ops.concat(
1, [acc_indices_2d, acc_ids_2d_int32], "sparse_indices")
# Create sampled_logits_shape = [batch_size, num_sampled]
sampled_logits_shape = array_ops.concat(
0,
[array_ops.shape(labels)[:1], array_ops.expand_dims(num_sampled, 0)])
if sampled_logits.dtype != acc_weights.dtype:
acc_weights = math_ops.cast(acc_weights, sampled_logits.dtype)
sampled_logits += sparse_ops.sparse_to_dense(
sparse_indices, sampled_logits_shape, acc_weights,
default_value=0.0, validate_indices=False)
if subtract_log_q:
# Subtract log of Q(l), prior probability that l appears in sampled.
true_logits -= math_ops.log(true_expected_count)
sampled_logits -= math_ops.log(sampled_expected_count)
# Construct output logits and labels. The true labels/logits start at col 0.
out_logits = array_ops.concat(1, [true_logits, sampled_logits])
# true_logits is a float tensor, ones_like(true_logits) is a float tensor
# of ones. We then divide by num_true to ensure the per-example labels sum
# to 1.0, i.e. form a proper probability distribution.
out_labels = array_ops.concat(
1, [array_ops.ones_like(true_logits) / num_true,
array_ops.zeros_like(sampled_logits)])
return out_logits, out_labels
def nce_loss(weights, biases, inputs, labels, num_sampled, num_classes,
num_true=1,
sampled_values=None,
remove_accidental_hits=False,
partition_strategy="mod",
name="nce_loss"):
"""Computes and returns the noise-contrastive estimation training loss.
See [Noise-contrastive estimation: A new estimation principle for
unnormalized statistical models]
(http://www.jmlr.org/proceedings/papers/v9/gutmann10a/gutmann10a.pdf).
Also see our [Candidate Sampling Algorithms Reference]
(../../extras/candidate_sampling.pdf)
Note: In the case where `num_true` > 1, we assign to each target class
the target probability 1 / `num_true` so that the target probabilities
sum to 1 per-example.
Note: It would be useful to allow a variable number of target classes per
example. We hope to provide this functionality in a future release.
For now, if you have a variable number of target classes, you can pad them
out to a constant number by either repeating them or by padding
with an otherwise unused class.
Args:
weights: A `Tensor` of shape `[num_classes, dim]`, or a list of `Tensor`
objects whose concatenation along dimension 0 has shape
[num_classes, dim]. The (possibly-partitioned) class embeddings.
biases: A `Tensor` of shape `[num_classes]`. The class biases.
inputs: A `Tensor` of shape `[batch_size, dim]`. The forward
activations of the input network.
labels: A `Tensor` of type `int64` and shape `[batch_size,
num_true]`. The target classes.
num_sampled: An `int`. The number of classes to randomly sample per batch.
num_classes: An `int`. The number of possible classes.
num_true: An `int`. The number of target classes per training example.
sampled_values: a tuple of (`sampled_candidates`, `true_expected_count`,
`sampled_expected_count`) returned by a `*_candidate_sampler` function.
(if None, we default to `log_uniform_candidate_sampler`)
remove_accidental_hits: A `bool`. Whether to remove "accidental hits"
where a sampled class equals one of the target classes. If set to
`True`, this is a "Sampled Logistic" loss instead of NCE, and we are
learning to generate log-odds instead of log probabilities. See
our [Candidate Sampling Algorithms Reference]
(../../extras/candidate_sampling.pdf).
Default is False.
partition_strategy: A string specifying the partitioning strategy, relevant
if `len(weights) > 1`. Currently `"div"` and `"mod"` are supported.
Default is `"mod"`. See `tf.nn.embedding_lookup` for more details.
name: A name for the operation (optional).
Returns:
A `batch_size` 1-D tensor of per-example NCE losses.
"""
logits, labels = _compute_sampled_logits(
weights, biases, inputs, labels, num_sampled, num_classes,
num_true=num_true,
sampled_values=sampled_values,
subtract_log_q=True,
remove_accidental_hits=remove_accidental_hits,
partition_strategy=partition_strategy,
name=name)
sampled_losses = sigmoid_cross_entropy_with_logits(logits,
labels,
name="sampled_losses")
# sampled_losses is batch_size x {true_loss, sampled_losses...}
# We sum out true and sampled losses.
return _sum_rows(sampled_losses)
def sampled_softmax_loss(weights, biases, inputs, labels, num_sampled,
num_classes, num_true=1,
sampled_values=None,
remove_accidental_hits=True,
partition_strategy="mod",
name="sampled_softmax_loss"):
"""Computes and returns the sampled softmax training loss.
This is a faster way to train a softmax classifier over a huge number of
classes.
This operation is for training only. It is generally an underestimate of
the full softmax loss.
At inference time, you can compute full softmax probabilities with the
expression `tf.nn.softmax(tf.matmul(inputs, weights) + biases)`.
See our [Candidate Sampling Algorithms Reference]
(../../extras/candidate_sampling.pdf)
Also see Section 3 of [Jean et al., 2014](http://arxiv.org/abs/1412.2007)
([pdf](http://arxiv.org/pdf/1412.2007.pdf)) for the math.
Args:
weights: A `Tensor` of shape `[num_classes, dim]`, or a list of `Tensor`
objects whose concatenation along dimension 0 has shape
[num_classes, dim]. The (possibly-sharded) class embeddings.
biases: A `Tensor` of shape `[num_classes]`. The class biases.
inputs: A `Tensor` of shape `[batch_size, dim]`. The forward
activations of the input network.
labels: A `Tensor` of type `int64` and shape `[batch_size,
num_true]`. The target classes. Note that this format differs from
the `labels` argument of `nn.softmax_cross_entropy_with_logits`.
num_sampled: An `int`. The number of classes to randomly sample per batch.
num_classes: An `int`. The number of possible classes.
num_true: An `int`. The number of target classes per training example.
sampled_values: a tuple of (`sampled_candidates`, `true_expected_count`,
`sampled_expected_count`) returned by a `*_candidate_sampler` function.
(if None, we default to `log_uniform_candidate_sampler`)
remove_accidental_hits: A `bool`. whether to remove "accidental hits"
where a sampled class equals one of the target classes. Default is
True.
partition_strategy: A string specifying the partitioning strategy, relevant
if `len(weights) > 1`. Currently `"div"` and `"mod"` are supported.
Default is `"mod"`. See `tf.nn.embedding_lookup` for more details.
name: A name for the operation (optional).
Returns:
A `batch_size` 1-D tensor of per-example sampled softmax losses.
"""
logits, labels = _compute_sampled_logits(
weights, biases, inputs, labels, num_sampled, num_classes,
num_true=num_true,
sampled_values=sampled_values,
subtract_log_q=True,
remove_accidental_hits=remove_accidental_hits,
partition_strategy=partition_strategy,
name=name)
sampled_losses = nn_ops.softmax_cross_entropy_with_logits(logits, labels)
# sampled_losses is a [batch_size] tensor.
return sampled_losses
| 42.974684 | 80 | 0.690802 |
5af0b675b0cde6f92fad81c8e453887acd367e1b
| 1,907 |
py
|
Python
|
setup.py
|
momipsl/pycspr
|
82c1ca003525a3d205d2aa3b7da5d1ecd275e9b5
|
[
"Apache-2.0"
] | 2 |
2021-04-14T13:49:20.000Z
|
2021-07-06T22:07:02.000Z
|
setup.py
|
momipsl/pycspr
|
82c1ca003525a3d205d2aa3b7da5d1ecd275e9b5
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
momipsl/pycspr
|
82c1ca003525a3d205d2aa3b7da5d1ecd275e9b5
|
[
"Apache-2.0"
] | 1 |
2021-04-15T12:52:42.000Z
|
2021-04-15T12:52:42.000Z
|
import os
import re
from codecs import open
from setuptools import setup
from setuptools import find_packages
from setuptools.dist import Distribution
# List of 3rd party python dependencies.
_REQUIRES = [
'pytest',
'tox'
]
class _BinaryDistribution(Distribution):
"""Distribution sub-class to override defaults.
"""
def is_pure(self):
"""Gets flag indicating whether build is pure python or not.
"""
return False
def _read(fname):
"""Returns content of a file.
"""
fpath = os.path.dirname(__file__)
fpath = os.path.join(fpath, fname)
with open(fpath, 'r', 'utf-8') as file_:
return file_.read()
def _get_version():
"""Returns library version by inspecting __init__.py file.
"""
return re.search(r'^__version__\s*=\s*[\'"]([^\'"]*)[\'"]',
_read("pycspr/__init__.py"),
re.MULTILINE).group(1)
# Libary version.
_VERSION = _get_version()
# Library packages.
_PACKAGES = find_packages()
# User readme.
_README = _read('README.md')
setup(
name='pycspr',
version=_VERSION,
description='Python library for interacting with a CSPR node.',
long_description=_README,
author='Mark A. Greenslade',
author_email='[email protected]',
url='https://github.com/pycspr',
packages=_PACKAGES,
include_package_data=True,
install_requires=_REQUIRES,
license='Apache-2.0',
zip_safe=False,
distclass=_BinaryDistribution,
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Natural Language :: English',
'License :: OSI Approved :: Apache 2.0',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Topic :: Software Development :: Libraries :: Python Modules',
]
)
| 22.975904 | 71 | 0.634504 |
9468a84ae42d928ec5c1b3fbd116ee6be8c3600e
| 74 |
py
|
Python
|
shorttext/stack/__init__.py
|
vishalbelsare/PyShortTextCategorization
|
4fa46a148a3eeb923885a7d70c789e988554f758
|
[
"MIT"
] | 481 |
2016-10-07T16:48:40.000Z
|
2022-03-16T12:44:12.000Z
|
shorttext/stack/__init__.py
|
vishalbelsare/PyShortTextCategorization
|
4fa46a148a3eeb923885a7d70c789e988554f758
|
[
"MIT"
] | 56 |
2017-02-02T17:50:14.000Z
|
2021-12-15T05:14:28.000Z
|
shorttext/stack/__init__.py
|
vishalbelsare/PyShortTextCategorization
|
4fa46a148a3eeb923885a7d70c789e988554f758
|
[
"MIT"
] | 70 |
2017-01-28T15:20:46.000Z
|
2021-09-30T15:08:41.000Z
|
from .stacking import StackedGeneralization, LogisticStackedGeneralization
| 74 | 74 | 0.918919 |
fe077b953ed2a25df6d43b3f99b1c7d9d0b55e1c
| 2,195 |
py
|
Python
|
osp/www/app.py
|
davidmcclure/open-syllabus-project
|
078cfd4c5a257fbfb0901d43bfbc6350824eed4e
|
[
"Apache-2.0"
] | 220 |
2016-01-22T21:19:02.000Z
|
2022-01-25T04:33:55.000Z
|
osp/www/app.py
|
davidmcclure/open-syllabus-project
|
078cfd4c5a257fbfb0901d43bfbc6350824eed4e
|
[
"Apache-2.0"
] | 14 |
2016-01-23T14:34:39.000Z
|
2016-09-19T19:58:37.000Z
|
osp/www/app.py
|
davidmcclure/open-syllabus-project
|
078cfd4c5a257fbfb0901d43bfbc6350824eed4e
|
[
"Apache-2.0"
] | 14 |
2016-02-03T13:47:48.000Z
|
2019-03-27T13:09:05.000Z
|
import os
from flask import Flask, request, render_template, jsonify
from webargs.flaskparser import use_args
from webargs.fields import List, Str, Int
from osp.common import config
from osp.citations.models import Text_Index
from osp.www import utils
from osp.www.cache import cache
from osp.www.hit import Hit
app = Flask(__name__)
cache.init_app(app)
@app.route('/')
@use_args(dict(institution_id = List(Int(), missing=None)))
def home(args):
"""
Home page + ranking interface.
"""
facets = utils.bootstrap_facets()
# Bootstrap URL institution(s).
facets['institution'] = utils.institution_facets(
include=args['institution_id']
)
return render_template('home.html', facets=facets)
@app.route('/api/ranks')
@use_args(dict(
query = Str(missing=None),
size = Int(missing=200),
page = Int(missing=1),
corpus = List(Str(), missing=None),
field_id = List(Int(), missing=None),
subfield_id = List(Int(), missing=None),
institution_id = List(Int(), missing=None),
state = List(Str(), missing=None),
country = List(Str(), missing=None),
))
def api_ranks(args):
"""
Ranking API.
"""
filters = {f: args[f] for f in [
'corpus',
'field_id',
'subfield_id',
'institution_id',
'state',
'country',
]}
results = utils.rank_texts(
filters=filters,
query=args['query'],
size=args['size'],
page=args['page'],
)
return jsonify(**results)
@app.route('/text/<text_id>')
def text(text_id):
"""
Text profile pages.
"""
# Load the text.
text = config.es.get('text', text_id)
# Assigned-with list.
siblings = utils.assigned_with(text_id)
return render_template(
'text.html',
text=Hit(text),
siblings=siblings,
Hit=Hit,
)
@app.route('/graph')
def graph():
"""
Graph viewer.
"""
return render_template('graph.html')
if __name__ == '__main__':
app.run(
host='0.0.0.0',
port=os.getenv('PORT', 5000),
debug=True,
)
| 18.922414 | 59 | 0.584055 |
81dfdc16711e0dc1059df15493d3df5deb7686a1
| 1,346 |
py
|
Python
|
HTMLParser.py
|
0xff1234/wenshuSpider
|
ead15693ecd854eb700b03f47acf905a2d87e423
|
[
"MIT"
] | 23 |
2018-04-25T09:04:01.000Z
|
2022-01-06T07:01:22.000Z
|
HTMLParser.py
|
booltime/wenshuSpider
|
ead15693ecd854eb700b03f47acf905a2d87e423
|
[
"MIT"
] | 1 |
2018-04-28T04:37:54.000Z
|
2018-04-28T04:37:54.000Z
|
HTMLParser.py
|
booltime/wenshuSpider
|
ead15693ecd854eb700b03f47acf905a2d87e423
|
[
"MIT"
] | 9 |
2018-04-29T11:08:31.000Z
|
2022-01-06T07:01:22.000Z
|
# encoding:utf-8
'''
author: ztcooper(github)
contact: [email protected]
LICENSE: MIT
解析页面,得到数据
'''
from bs4 import BeautifulSoup
import re
class HtmlParser(object):
def __init__(self):
self.item = dict()
def parse(self, source):
p_title = re.compile(r'"Title\\":\\"(.*?)\\"')
p_pubdate = re.compile(r'"PubDate\\":\\"(.*?)\\"')
p_html = re.compile(r'"Html\\":\\"(.*?)\\"')
p_province = re.compile(r'"法院省份":"(.*?)"')
p_city = re.compile(r'"法院地市":"(.*?)"')
p_area1 = re.compile(r'"法院区县":"(.*?)"')
p_area2 = re.compile(r'"法院区域":"(.*?)"')
self.item['title'] = p_title.findall(source) # 标题
self.item['pubdate'] = p_pubdate.findall(source) # 发布时间
self.item['region'] = p_province.findall(source)[0] + " " + p_city.findall(
source)[0] + " " + (p_area1.findall(source)[0] or p_area2.findall(source)[0]) # 地区
html = p_html.findall(source)[0]
# 提取正文
soup = BeautifulSoup(html, 'lxml')
divs = soup.find_all('div')
article = ""
for div in divs:
try:
article += div.get_text()
except TypeError:
continue
self.item['article'] = article.strip() # 正文
return self.item
| 31.302326 | 98 | 0.508172 |
c0f72de52a5d6c43d9ad11890060231976450ef2
| 1,486 |
py
|
Python
|
setup.py
|
feature-engineer/uttlv
|
ec5633f51eee047c1cdd4902ff0af7873c4f46cd
|
[
"MIT"
] | null | null | null |
setup.py
|
feature-engineer/uttlv
|
ec5633f51eee047c1cdd4902ff0af7873c4f46cd
|
[
"MIT"
] | null | null | null |
setup.py
|
feature-engineer/uttlv
|
ec5633f51eee047c1cdd4902ff0af7873c4f46cd
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import os
from setuptools import setup
with open("README.md", "r") as fh:
long_description = fh.read()
setup(
name='uttlv',
version='0.3.1',
description='Python library for TLV objects',
long_description=long_description,
long_description_content_type='text/markdown',
url='https://github.com/ustropo/uttlv',
download_url='https://github.com/ustropo/uttlv/archive/v0.3.1.tar.gz',
author='Fernando C. de Souza',
author_email='[email protected]',
license='MIT',
packages=['uttlv'],
install_requires=[],
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 4 - Beta',
# Indicate who your project is intended for
'Intended Audience :: Developers',
'Topic :: Software Development',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
]
)
| 31.617021 | 78 | 0.606999 |
cc60b05571d25f5f179e22f96816f16a85c3bf3a
| 1,615 |
py
|
Python
|
passgen.py
|
giovannifreitas/password-generator
|
0882542d0758d7965fb5726b453514d29dfdfe91
|
[
"MIT"
] | null | null | null |
passgen.py
|
giovannifreitas/password-generator
|
0882542d0758d7965fb5726b453514d29dfdfe91
|
[
"MIT"
] | null | null | null |
passgen.py
|
giovannifreitas/password-generator
|
0882542d0758d7965fb5726b453514d29dfdfe91
|
[
"MIT"
] | null | null | null |
import random
import string
import sys
import getopt
# Types of password
LOWERCASE = string.ascii_lowercase
UPPERCASE = string.ascii_uppercase
DIGITS = string.digits
SPECIALS = string.punctuation
# Command line options and parameter list
short_options = "luds"
long_options = ["lowercase", "uppercase", "digits", "specials"]
all_arguments = sys.argv
argument_list = all_arguments[1:]
def show_menu():
print("Usage: python passgen.py -[options] -[length] \n")
print("Options:\n")
print("-l --lowercase Put lowercase characters in your password")
print("-u --uppercase Put uppercase characters in your password")
print("-d --digits Put digits in your password")
print("-s --specials Put special characters in your password")
def generate_password(argv):
opts, value = getopt.getopt(argument_list, short_options, long_options)
password_len = int(value[0])
output_password = ''
for i, v in opts:
if i in ("-l", "--lowercase"):
output_password += LOWERCASE
if i in ("-u", "--uppercase"):
output_password += UPPERCASE
if i in ("-s", "--specials"):
output_password += SPECIALS
if i in ("-d", "--digits"):
output_password += DIGITS
password_list = list(output_password)
random.shuffle(password_list)
output_password = ''.join(random.choices(password_list, k=password_len))
return output_password
if __name__ == '__main__':
if len(sys.argv) == 1:
show_menu()
else:
print(f"\nPassword generated: {generate_password(sys.argv)}")
| 30.471698 | 76 | 0.656966 |
e0066250afc3ddf4238393a2d09eb8d7493ae4f8
| 1,160 |
py
|
Python
|
Codes/xiaohong2019/leetcode/1_two_sum.py
|
GinRyan/algorithm
|
2b2dafbeaa9f104a541cbd4172e0f3e0786095f2
|
[
"Apache-2.0"
] | 1 |
2019-05-17T15:56:08.000Z
|
2019-05-17T15:56:08.000Z
|
Codes/xiaohong2019/leetcode/1_two_sum.py
|
GinRyan/algorithm
|
2b2dafbeaa9f104a541cbd4172e0f3e0786095f2
|
[
"Apache-2.0"
] | null | null | null |
Codes/xiaohong2019/leetcode/1_two_sum.py
|
GinRyan/algorithm
|
2b2dafbeaa9f104a541cbd4172e0f3e0786095f2
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# URL : https://leetcode.com/problems/two-sum/
"""
给定一个整数数组 nums 和一个目标值 target,请你在该数组中找出和为目标值的那 两个 整数,并返回他们的数组下标。
你可以假设每种输入只会对应一个答案。但是,你不能重复利用这个数组中同样的元素。
示例:
给定 nums = [2, 7, 11, 15], target = 9
因为 nums[0] + nums[1] = 2 + 7 = 9
所以返回 [0, 1]
"""
"""
nums中每两个相加,如果等于target就返回,那需要遍历(n^2)。
作差值计算,比如9-2==7,就是说2需要7,
再判定7是否已经作了差值计算了,如果有那就返回下标,完成计算;如果没有,就存在已经进行差值计算的dict中。
所以这些差值需要额外的空间存储,其中的元素应当含有原数组的值和下标。
"""
"""
执行用时 :36 ms, 在所有 Python 提交中击败了99.70%的用户
内存消耗 :13.1 MB, 在所有 Python 提交中击败了14.79%的用户
"""
class Solution(object):
def twoSum(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: List[int]
"""
difference_value_dict = dict()
for index, num in enumerate(nums):
difference_value = target - num
if difference_value in difference_value_dict:
return [difference_value_dict[difference_value], index]
difference_value_dict[num] = index
if __name__ == "__main__":
solution = Solution()
assert solution.twoSum([2, 7, 11, 15], 9) == [0, 1]
assert solution.twoSum([4, 7, 0, 3], 3) == [2, 3]
| 23.673469 | 71 | 0.62931 |
0691b9e93df553497f0471e4fe9b136a1d01bebf
| 10,669 |
py
|
Python
|
whoSentMail.py
|
Mukesh7197/anlp-1
|
eff3de6349b9ba4cab3702d36ecbacb6cb551611
|
[
"MIT"
] | null | null | null |
whoSentMail.py
|
Mukesh7197/anlp-1
|
eff3de6349b9ba4cab3702d36ecbacb6cb551611
|
[
"MIT"
] | null | null | null |
whoSentMail.py
|
Mukesh7197/anlp-1
|
eff3de6349b9ba4cab3702d36ecbacb6cb551611
|
[
"MIT"
] | null | null | null |
###################################################################################################################
#To determine the sender of a mail using python and nltk
#Five sentences from sender Ram and Raj is available as corpus
#Preprocess the corpus and collect number of words in each corpus and calculate total words in corpus
#Calculate probability of each word and store as a fraction for each of the corpus
#Calculate probability of test sentence is I wish you would come
#To resolve the problem, add 1 to the numerator for each word probability
#Recalculate probability for each of the corpus and arrive at a decision
###################################################################################################################
IMPORT LIBRARIES
###################################################################################################################
import pandas as pd
from fractions import Fraction
import nltk
from nltk import FreqDist
###################################################################################################################
CORPUS
###################################################################################################################
Ram = ['I wish you the best', 'I hope to reach home by 6 P M', 'I wish to go home early',
'I do not want to buy this', 'I hope it rains today']
Raj = ['I hope to play tennis tonight', 'I hope to win this tournament', 'I hope to buy this car in the next year',
'I wish to get a good score this time', 'I wish they would come']
###################################################################################################################
PREPROCESS CORPUS AND COLLECT DATA LIKE NUMBER OF WORDS IN EACH CORPUS AND CALCULATE TOTAL WORDS
###################################################################################################################
ramWords = []
for i in range(0,len(Ram)):
#Split the strings based on blankspace
sen = Ram[i].split(' ')
#Extend the list by adding
ramWords.extend(sen)
print("Number of words in Ram: ", len(ramWords))
rajWords = []
for i in range(0,len(Raj)):
#Split the strings based on blankspace
sen = Raj[i].split(' ')
#Extend the list by adding
rajWords.extend(sen)
print("Number of words in Raj: ", len(rajWords))
totWords = len(ramWords) + len(rajWords)
print("Total words in both the corpus: ", totWords)
uniqRamWords = list(set(ramWords))
uniqRajWords = list(set(rajWords))
UniqWords = uniqRamWords + uniqRajWords
ttlUniqWords = set(UniqWords)
print("Vocabulary of ram corpus: ", len(uniqRamWords))
print("Vocabulary of raj corpus: ", len(uniqRajWords))
print("Vocabulary of combined corpus: ", len(ttlUniqWords))
#Store the frequency distribution of words in the respective corpus as a dictionary
fDistRam = dict(nltk.FreqDist(ramWords))
fDistRaj = dict(nltk.FreqDist(rajWords))
print("Frequency of words in Ram Corpus\n", fDistRam)
print("Frequency of words in Raj Corpus\n", fDistRaj)
###################################################################################################################
#Calculate P(X1|y) = Count(X1,y)/Count(Y)
#y are class labels (Ram or Raj)
#X1 are words (I, wish, hope etc.)
#Y is the total number of words in both the corpus (ie) 68
###################################################################################################################
#Define a function to calculate probability and store result as a fraction
probRam = {}
probRaj = {}
def probRamXY(w1):
probRam[w1] = 0
for key, value in fDistRam.items():
if w1 in key:
probRam[w1] = Fraction(value,totWords)
return probRam[w1]
def probRajXY(w1):
probRaj[w1] = 0
for key, value in fDistRaj.items():
if w1 in key:
probRaj[w1] = Fraction(value,totWords)
return probRaj[w1]
probRajXY('hope')
probRajXY('I')
#Calculate P(X1|y) for all unique words in Ram and Raj corpus and store it in a list
prRam = {}
prRaj = {}
allWords = ramWords + rajWords
print("Total number of words in the combined corpus: ", len(allWords))
uniqWords = set(allWords)
print("\nUnique words in the combined corpus: ", len(uniqWords))
for words in uniqWords:
prRam[words] = probRamXY(words)
prRaj[words] = probRajXY(words)
print("\nProbabilities of words in Ram corpus: \n", prRam)
print("\n\nLength of words for which probability calculated in Ram corpus: ", len(prRam))
print("\nProbabilities of words in Raj corpus: \n", prRaj)
print("\n\nLength of words for which probability calculated in Raj corpus: ", len(prRaj))
#Prior probability P(y) = count(y)/count(Y). As there are only two classes it is 1/2
PrProb = Fraction(1,2)
print("Prior probability :", PrProb)
###################################################################################################################
#Guess who wrote the sentence "I wish you would come"
###################################################################################################################
#For Ram Corpus
def bRam(w1,w2,w3,w4,w5):
lstVal = []
for key, value in prRam.items():
if key == w1:
lstVal.append(value)
if key == w2:
lstVal.append(value)
if key == w3:
lstVal.append(value)
if key == w4:
lstVal.append(value)
if key == w5:
lstVal.append(value)
finProb = 1
for i in range(len(lstVal)):
finProb = finProb*lstVal[i]
print("Baye's Probability from Ram Corpus is: ", PrProb*finProb)
return lstVal
bRam('I','wish','you','would','come')
#Result is zero
###################################################################################################################
#Guess who wrote the sentence "I wish you would come"
###################################################################################################################
#For Raj Corpus
def bRaj(w1,w2,w3,w4,w5):
lstVal = []
for key, value in prRaj.items():
if key == w1:
lstVal.append(value)
if key == w2:
lstVal.append(value)
if key == w3:
lstVal.append(value)
if key == w4:
lstVal.append(value)
if key == w5:
lstVal.append(value)
#print(any(x == 0 for x in lstVal))
finProb = 1
for i in range(len(lstVal)):
finProb = finProb*lstVal[i]
print("Baye's Probability from Raj Corpus is: ", PrProb*finProb)
return lstVal
bRaj('I','wish','you','would','come')
#Result is zero
###################################################################################################################
#Both probabilities are zero. #Hence add 1 to each of the words in the numerator only
###################################################################################################################
#Get the keys of Ram corpus for which the value is zero and store the keys separately
keyRam0 = []
keyRaj0 = []
for k, v in prRam.items():
if v == 0:
keyRam0.append(k)
for k, v in prRaj.items():
if v == 0:
keyRaj0.append(k)
#print(keyRam0)
#print("Number of words in combined corpus but not in Ram corpus: ", len(keyRam0))
#print(keyRaj0)
#print("Number of words in combined corpus but not in Raj corpus: ", len(keyRaj0))
#Increase numerator values by 1 in the respective dictionary
def upProbRamXY(w1):
probRam[w1] = Fraction(1,68)
for key, value in fDistRam.items():
if w1 in key:
probRam[w1] = Fraction(value+1,totWords)
return probRam[w1]
def upProbRajXY(w1):
probRaj[w1] = Fraction(1,68)
for key, value in fDistRaj.items():
if w1 in key:
probRaj[w1] = Fraction(value+1,totWords)
return probRaj[w1]
#print("Probability of missing word car in Ram corpus", upProbRamXY('car'))
#print("Probability of missing word home in Raj corpus",upProbRajXY('home'))
#print("Original Probability of present word I in Ram corpus", probRamXY('I'))
#print("Updated Probability of present word I in Ram corpus", upProbRamXY('I'))
#print("Original Probability of present word I in Raj corpus", probRajXY('I'))
#print("Updated Probability of present word I in Raj corpus", upProbRajXY('I'))
###################################################################################################################
#update P(X1|y) for all unique words in Ram and Raj corpus and store it in a list
uprRam = {}
uprRaj = {}
for words in uniqWords:
uprRam[words] = upProbRamXY(words)
uprRaj[words] = upProbRajXY(words)
#print("\nUpdated Probabilities of words in Ram corpus: \n", uprRam)
#print("\n\nUpdated number of words for which probability calculated in Ram corpus: ", len(uprRam))
#print("\nUpdated Probabilities of words in Raj corpus: \n", uprRaj)
#print("\n\nUpdated number of words for which probability calculated in Raj corpus: ", len(uprRaj))
def ubRam(w1,w2,w3,w4,w5):
lstVal = []
for key, value in uprRam.items():
if key == w1:
lstVal.append(value)
if key == w2:
lstVal.append(value)
if key == w3:
lstVal.append(value)
if key == w4:
lstVal.append(value)
if key == w5:
lstVal.append(value)
finProb = 1
for i in range(len(lstVal)):
finProb = finProb*lstVal[i]
print("Baye's Probability from revised Ram Corpus is: ", PrProb*finProb)
return finProb
def ubRaj(w1,w2,w3,w4,w5):
lstVal = []
for key, value in uprRaj.items():
if key == w1:
lstVal.append(value)
if key == w2:
lstVal.append(value)
if key == w3:
lstVal.append(value)
if key == w4:
lstVal.append(value)
if key == w5:
lstVal.append(value)
finProb = 1
for i in range(len(lstVal)):
finProb = finProb*lstVal[i]
print("Baye's Probability from revised Raj Corpus is: ", PrProb*finProb)
return finProb
###################################################################################################################
#FINAL DECISION
###################################################################################################################
#print(bRam('I','wish','you','would','come'))
#print(bRaj('I','wish','you','would','come'))
valUpdatedRam = ubRam('I','wish','you','would','come')
valUpdatedRaj = ubRaj('I','wish','you','would','come')
print("Ram sent the mail") if valUpdatedRam > valUpdatedRaj else print("Raj sent the mail")
###################################################################################################################
| 40.260377 | 121 | 0.523292 |
204516f5832d291557578f8f141064ebbd2f6156
| 5,639 |
py
|
Python
|
test_utils/testmaker/__init__.py
|
frac/django-test-utils
|
35263eb74697b61ba56aec59c8c7831425bc70b0
|
[
"MIT"
] | 1 |
2015-11-05T02:50:34.000Z
|
2015-11-05T02:50:34.000Z
|
test_utils/testmaker/__init__.py
|
frac/django-test-utils
|
35263eb74697b61ba56aec59c8c7831425bc70b0
|
[
"MIT"
] | null | null | null |
test_utils/testmaker/__init__.py
|
frac/django-test-utils
|
35263eb74697b61ba56aec59c8c7831425bc70b0
|
[
"MIT"
] | null | null | null |
import logging
import os
from os import path
from django.core import serializers as django_serializers
from test_utils.management.commands.relational_dumpdata import _relational_dumpdata
from django.template import Context, Template
from django.conf import settings
TESTMAKER_TEMPLATE = """
from django.test import TestCase
from django.test import Client
from django import template
from django.db.models import get_model
class Testmaker(TestCase):
{% if create_fixtures %}
fixtures = ["{{ fixture_file }}"]
{% else %}
#fixtures = ["{{ app_name }}_testmaker"]
{% endif %}
"""
class Testmaker(object):
enabled = False
#Have global log and serializer objects so that we never log things twice.
log = None
serializer = None
def __init__(self, app=None, verbosity=0, create_fixtures=False, fixture_format='xml', addrport='', **kwargs):
self.app = app
self.verbosity = verbosity
self.create_fixtures = create_fixtures
self.fixture_format = fixture_format
self.addrport = addrport
self.kwargs = kwargs
#Assume we're writing new tests until proven otherwise
self.new_tests = True
def prepare(self, insert_middleware=False):
self.set_paths()
if not hasattr(self, 'has_run_logging'):
self.setup_logging()
self.prepare_test_file()
if insert_middleware:
self.insert_middleware()
Testmaker.enabled = True
def set_paths(self):
if self.app:
self.app_name = self.app.__name__.split('.')[-2]
self.base_dir = path.dirname(self.app.__file__)
else:
self.app_name = 'tmp'
#TODO: Need to make this platform independent.
self.base_dir = '/tmp/testmaker/'
if not path.exists(self.base_dir):
os.mkdir(self.base_dir)
#Figure out where to store data
self.fixtures_dir = path.join(self.base_dir, 'fixtures')
self.fixture_file = path.join(self.fixtures_dir, '%s_testmaker.%s' % (self.app_name, self.fixture_format))
if self.create_fixtures:
if not path.exists(self.fixtures_dir):
os.mkdir(self.fixtures_dir)
#Setup test and serializer files
self.tests_dir = path.join(self.base_dir, 'tests')
self.test_file = path.join(self.tests_dir, '%s_testmaker.py' % (self.app_name))
#TODO: Make this have the correct file extension based on serializer used
self.serialize_file = path.join(self.tests_dir, '%s_testdata.serialized' % (self.app_name))
if not path.exists(self.tests_dir):
os.mkdir(self.tests_dir)
if path.exists(self.test_file):
#Already have tests there.
self.new_tests = False
if self.verbosity > 0:
print "Handling app '%s'" % self.app_name
print "Logging tests to %s" % self.test_file
if self.create_fixtures:
print "Logging fixtures to %s" % self.fixture_file
def setup_logging(self, test_file=None, serialize_file=None):
#supress other logging
logging.basicConfig(level=logging.CRITICAL,
filename=path.devnull)
#Override default if its passed in
if not test_file:
test_file = self.test_file
else:
self.test_file = test_file
log = logging.getLogger('testprocessor')
[log.removeHandler(h) for h in log.handlers]
log.setLevel(logging.INFO)
handler = logging.FileHandler(test_file, 'a')
handler.setFormatter(logging.Formatter('%(message)s'))
log.addHandler(handler)
Testmaker.log = log
#Override default if its passed in
if not serialize_file:
serialize_file = self.serialize_file
else:
self.serialize_file = serialize_file
log_s = logging.getLogger('testserializer')
[log_s.removeHandler(h) for h in log_s.handlers]
log_s.setLevel(logging.INFO)
handler_s = logging.FileHandler(self.serialize_file, 'a')
handler_s.setFormatter(logging.Formatter('%(message)s'))
log_s.addHandler(handler_s)
Testmaker.serializer = log_s
self.has_run_logging = True
def prepare_test_file(self):
if self.new_tests:
t = Template(TESTMAKER_TEMPLATE)
c = Context({
'create_fixtures': self.create_fixtures,
'app_name': self.app_name,
'fixture_file': self.fixture_file,
})
self.log.info(t.render(c))
else:
if self.verbosity > 0:
print "Appending to current log file"
def insert_middleware(self):
if self.verbosity > 0:
print "Inserting TestMaker logging server..."
if 'test_utils.testmaker.middleware.testmaker.TestMakerMiddleware' not in settings.MIDDLEWARE_CLASSES:
settings.MIDDLEWARE_CLASSES += ('test_utils.testmaker.middleware.testmaker.TestMakerMiddleware',)
def make_fixtures(self):
if self.verbosity > 0:
print "Creating fixture at " + self.fixture_file
objects, collected = _relational_dumpdata(self.app, set())
serial_file = open(self.fixture_file, 'a')
try:
django_serializers.serialize(self.fixture_format, objects, stream=serial_file, indent=4)
except Exception, e:
if self.verbosity > 0:
print ("Unable to serialize database: %s" % e)
@classmethod
def logfile(klass):
return klass.log.handlers[0].baseFilename
| 37.098684 | 114 | 0.638411 |
731ff71b7fbec704391a3b5639f622b50bf91d77
| 1,608 |
py
|
Python
|
app/helper/detectors.py
|
suinaowawa/chinese-ocr-flask-deploy
|
250c2650c5fb3ed09b29a50b5399fe86508d1775
|
[
"MIT"
] | 1 |
2021-11-27T00:03:06.000Z
|
2021-11-27T00:03:06.000Z
|
helper/detectors.py
|
alvinli-jp/darknet-ocr
|
db9510c2dfc9609f6a7b524f692663def6c9d120
|
[
"MIT"
] | null | null | null |
helper/detectors.py
|
alvinli-jp/darknet-ocr
|
db9510c2dfc9609f6a7b524f692663def6c9d120
|
[
"MIT"
] | null | null | null |
#coding:utf-8
import numpy as np
from helper.text_proposal_connector import TextProposalConnector
from helper.image import rotate_nms,nms,get_boxes
def normalize(data):
if data.shape[0]==0:
return data
max_=data.max()
min_=data.min()
return (data-min_)/(max_-min_) if max_-min_!=0 else data-min_
class TextDetector:
"""
Detect text from an image
"""
def __init__(self,MAX_HORIZONTAL_GAP=30,MIN_V_OVERLAPS=0.6,MIN_SIZE_SIM=0.6):
"""
pass
"""
self.text_proposal_connector=TextProposalConnector(MAX_HORIZONTAL_GAP,MIN_V_OVERLAPS,MIN_SIZE_SIM)
def detect(self, text_proposals,scores,size,
TEXT_PROPOSALS_MIN_SCORE=0.7,
TEXT_PROPOSALS_NMS_THRESH=0.3,
TEXT_LINE_NMS_THRESH = 0.3,
TEXT_LINE_SCORE=0.7
):
ind = scores>TEXT_PROPOSALS_MIN_SCORE
text_proposals = text_proposals[ind]
scores = scores[ind]
text_proposals, scores = nms(text_proposals,scores,TEXT_PROPOSALS_MIN_SCORE,TEXT_PROPOSALS_NMS_THRESH)
if len(text_proposals)>0:
scores = normalize(scores)
text_lines,scores = self.text_proposal_connector.get_text_lines(text_proposals, scores, size)##cluster lines
text_lines = get_boxes(text_lines)
#text_lines, scores = rotate_nms(text_lines,scores,TEXT_LINE_SCORE,TEXT_LINE_NMS_THRESH)##?cv2.dnn.rotate_nms error
return text_lines, scores
else:
return [],[]
| 34.212766 | 131 | 0.63806 |
d6b32ebaf3d896a3ee0e12cecfa06519b0bdfc6c
| 21,046 |
py
|
Python
|
src/app.py
|
vbrik/topology
|
de07dab847f35e6ea5e1ddc043a768c478d8e36a
|
[
"Apache-2.0"
] | null | null | null |
src/app.py
|
vbrik/topology
|
de07dab847f35e6ea5e1ddc043a768c478d8e36a
|
[
"Apache-2.0"
] | null | null | null |
src/app.py
|
vbrik/topology
|
de07dab847f35e6ea5e1ddc043a768c478d8e36a
|
[
"Apache-2.0"
] | null | null | null |
"""
Application File
"""
import csv
import flask
import flask.logging
from flask import Flask, Response, make_response, request, render_template
from io import StringIO
import logging
import os
import re
import sys
import traceback
import urllib.parse
from webapp import default_config
from webapp.common import readfile, to_xml_bytes, to_json_bytes, Filters
from webapp.forms import GenerateDowntimeForm
from webapp.models import GlobalData
from webapp.topology import GRIDTYPE_1, GRIDTYPE_2
from webapp.oasis_managers import get_oasis_manager_endpoint_info
try:
import stashcache
except ImportError as e:
stashcache = None
print("*** Couldn't import stashcache", file=sys.stderr)
traceback.print_exc(file=sys.stderr)
print("*** Continuing without authfile support", file=sys.stderr)
class InvalidArgumentsError(Exception): pass
def _verify_config(cfg):
if not cfg["NO_GIT"]:
ssh_key = cfg["GIT_SSH_KEY"]
if not ssh_key:
raise ValueError("GIT_SSH_KEY must be specified if using Git")
elif not os.path.exists(ssh_key):
raise FileNotFoundError(ssh_key)
else:
st = os.stat(ssh_key)
if st.st_uid != os.getuid() or (st.st_mode & 0o7777) not in (0o700, 0o600, 0o400):
if cfg["IGNORE_SECRET_PERMS"]:
app.logger.info("Ignoring permissions/ownership issues on " + ssh_key)
else:
raise PermissionError(ssh_key)
default_authorized = False
app = Flask(__name__)
app.config.from_object(default_config)
app.config.from_pyfile("config.py", silent=True)
if "TOPOLOGY_CONFIG" in os.environ:
app.config.from_envvar("TOPOLOGY_CONFIG", silent=False)
_verify_config(app.config)
if "AUTH" in app.config:
if app.debug:
default_authorized = app.config["AUTH"]
else:
print("ignoring AUTH option when FLASK_ENV != development", file=sys.stderr)
if not app.config.get("SECRET_KEY"):
app.config["SECRET_KEY"] = "this is not very secret"
### Replace previous with this when we want to add CSRF protection
# if app.debug:
# app.config["SECRET_KEY"] = "this is not very secret"
# else:
# raise Exception("SECRET_KEY required when FLASK_ENV != development")
if "LOGLEVEL" in app.config:
app.logger.setLevel(app.config["LOGLEVEL"])
global_data = GlobalData(app.config, strict=app.config.get("STRICT", app.debug))
cilogon_pass = readfile(global_data.cilogon_ldap_passfile, app.logger)
if not cilogon_pass:
app.logger.warning("Note, no CILOGON_LDAP_PASSFILE configured; "
"OASIS Manager ssh key lookups will be unavailable.")
def _fix_unicode(text):
"""Convert a partial unicode string to full unicode"""
return text.encode('utf-8', 'surrogateescape').decode('utf-8')
@app.route('/')
def homepage():
return render_template('homepage.html.j2')
@app.route('/map/iframe')
def map():
rgsummary = global_data.get_topology().get_resource_summary()
return _fix_unicode(render_template('iframe.html.j2', resourcegroups=rgsummary["ResourceSummary"]["ResourceGroup"]))
@app.route('/schema/<xsdfile>')
def schema(xsdfile):
if xsdfile in ["vosummary.xsd", "rgsummary.xsd", "rgdowntime.xsd", "miscuser.xsd", "miscproject.xsd"]:
with open("schema/" + xsdfile, "r") as xsdfh:
return Response(xsdfh.read(), mimetype="text/xml")
else:
flask.abort(404)
@app.route('/miscuser/xml')
def miscuser_xml():
return Response(to_xml_bytes(global_data.get_contacts_data().get_tree(_get_authorized())),
mimetype='text/xml')
@app.route('/nsfscience/csv')
def nsfscience_csv():
nsfscience = global_data.get_mappings().nsfscience
if not nsfscience:
return Response("Error getting Field of Science mappings", status=503)
buffer = StringIO()
writer = csv.writer(buffer, delimiter=",")
writer.writerow(["Topology Field of Science", "NSF Field of Science"])
writer.writerows(nsfscience.items())
response = make_response(buffer.getvalue())
response.headers.set("Content-Type", "text/csv")
response.headers.set("Content-Disposition", "attachment", filename="nsfscience.csv")
return response
@app.route('/contacts')
def contacts():
try:
authorized = _get_authorized()
users_list = global_data.get_contacts_data().get_tree(_get_authorized())["Users"]["User"]
return _fix_unicode(render_template('contacts.html.j2', users=users_list, authorized=authorized))
except (KeyError, AttributeError):
app.log_exception(sys.exc_info())
return Response("Error getting users", status=503) # well, it's better than crashing
@app.route('/miscproject/xml')
def miscproject_xml():
return Response(to_xml_bytes(global_data.get_projects()), mimetype='text/xml')
@app.route('/vosummary/xml')
def vosummary_xml():
return _get_xml_or_fail(global_data.get_vos_data().get_tree, request.args)
@app.route('/rgsummary/xml')
def rgsummary_xml():
return _get_xml_or_fail(global_data.get_topology().get_resource_summary, request.args)
@app.route('/rgdowntime/xml')
def rgdowntime_xml():
return _get_xml_or_fail(global_data.get_topology().get_downtimes, request.args)
@app.route('/rgdowntime/ical')
def rgdowntime_ical():
try:
filters = get_filters_from_args(request.args)
except InvalidArgumentsError as e:
return Response("Invalid arguments: " + str(e), status=400)
response = make_response(global_data.get_topology().get_downtimes_ical(False, filters).to_ical())
response.headers.set("Content-Type", "text/calendar")
response.headers.set("Content-Disposition", "attachment", filename="downtime.ics")
return response
@app.route("/stashcache/authfile")
def authfile():
return _get_cache_authfile(public_only=False)
@app.route("/stashcache/authfile-public")
def authfile_public():
return _get_cache_authfile(public_only=True)
@app.route("/stashcache/origin-authfile-public")
def origin_authfile_public():
return _get_origin_authfile(public_only=True)
@app.route("/stashcache/origin-authfile")
def origin_authfile():
return _get_origin_authfile(public_only=False)
@app.route("/stashcache/scitokens")
def scitokens():
if not stashcache:
return Response("Can't get scitokens config: stashcache module unavailable", status=503)
cache_fqdn = request.args.get("cache_fqdn")
origin_fqdn = request.args.get("origin_fqdn")
if not cache_fqdn and not origin_fqdn:
return Response("FQDN of cache or origin server required in the 'cache_fqdn' or 'origin_fqdn' argument", status=400)
try:
if cache_fqdn:
cache_scitokens = stashcache.generate_cache_scitokens(global_data.get_vos_data(),
global_data.get_topology().get_resource_group_list(),
fqdn=cache_fqdn,
suppress_errors=False)
return Response(cache_scitokens, mimetype="text/plain")
elif origin_fqdn:
origin_scitokens = stashcache.generate_origin_scitokens(global_data.get_vos_data(),
global_data.get_topology().get_resource_group_list(),
fqdn=origin_fqdn,
suppress_errors=False)
return Response(origin_scitokens, mimetype="text/plain")
except stashcache.NotRegistered as e:
return Response("# No resource registered for {}\n"
"# Please check your query or contact [email protected]\n"
.format(str(e)),
mimetype="text/plain", status=404)
except stashcache.DataError as e:
app.logger.error("{}: {}".format(request.full_path, str(e)))
return Response("# Error generating scitokens config for this FQDN: {}\n".format(str(e)) +
"# Please check configuration in OSG topology or contact [email protected]\n",
mimetype="text/plain", status=400)
except Exception:
app.log_exception(sys.exc_info())
return Response("Server error getting scitokens config, please contact [email protected]", status=503)
@app.route("/oasis-managers/json")
def oasis_managers():
if not _get_authorized():
return Response("Not authorized", status=403)
vo = request.args.get("vo")
if not vo:
return Response("'vo' argument is required", status=400)
if not cilogon_pass:
return Response("CILOGON_LDAP_PASSFILE not configured; "
"OASIS Managers info unavailable", status=503)
mgrs = get_oasis_manager_endpoint_info(global_data, vo, cilogon_pass)
return Response(to_json_bytes(mgrs), mimetype='application/json')
def _get_cache_authfile(public_only):
if not stashcache:
return Response("Can't get authfile: stashcache module unavailable", status=503)
cache_fqdn = request.args.get("cache_fqdn")
try:
if public_only:
generate_function = stashcache.generate_public_cache_authfile
else:
generate_function = stashcache.generate_cache_authfile
auth = generate_function(global_data.get_vos_data(),
global_data.get_topology().get_resource_group_list(),
fqdn=cache_fqdn,
legacy=app.config["STASHCACHE_LEGACY_AUTH"],
suppress_errors=False)
except stashcache.NotRegistered as e:
return Response("# No resource registered for {}\n"
"# Please check your query or contact [email protected]\n"
.format(str(e)),
mimetype="text/plain", status=404)
except stashcache.DataError as e:
app.logger.error("{}: {}".format(request.full_path, str(e)))
return Response("# Error generating authfile for this FQDN: {}\n".format(str(e)) +
"# Please check configuration in OSG topology or contact [email protected]\n",
mimetype="text/plain", status=400)
except Exception:
app.log_exception(sys.exc_info())
return Response("Server error getting authfile, please contact [email protected]", status=503)
return Response(auth, mimetype="text/plain")
def _get_origin_authfile(public_only):
if not stashcache:
return Response("Can't get authfile: stashcache module unavailable", status=503)
if 'fqdn' not in request.args:
return Response("FQDN of origin server required in the 'fqdn' argument", status=400)
try:
auth = stashcache.generate_origin_authfile(request.args['fqdn'],
global_data.get_vos_data(),
global_data.get_topology().get_resource_group_list(),
suppress_errors=False,
public_only=public_only)
except stashcache.NotRegistered as e:
return Response("# No resource registered for {}\n"
"# Please check your query or contact [email protected]\n"
.format(str(e)),
mimetype="text/plain", status=404)
except stashcache.DataError as e:
app.logger.error("{}: {}".format(request.full_path, str(e)))
return Response("# Error generating authfile for this FQDN: {}\n".format(str(e)) +
"# Please check configuration in OSG topology or contact [email protected]\n",
mimetype="text/plain", status=400)
except Exception:
app.log_exception(sys.exc_info())
return Response("Server error getting authfile, please contact [email protected]", status=503)
if not auth.strip():
auth = """\
# No authorizations generated for this origin; please check configuration in OSG topology or contact [email protected]
"""
return Response(auth, mimetype="text/plain")
@app.route("/generate_downtime", methods=["GET", "POST"])
def generate_downtime():
form = GenerateDowntimeForm(request.form)
def github_url(action, path):
assert action in ("tree", "edit", "new"), "invalid action"
base = global_data.topology_data_repo
branch_q = urllib.parse.quote(global_data.topology_data_branch)
path_q = urllib.parse.quote(path)
param = f"?filename={path_q}" if action == "new" else f"/{path_q}"
return f"{base}/{action}/{branch_q}{param}"
github = False
github_topology_root = ""
if re.match("http(s?)://github.com", global_data.topology_data_repo):
github = True
github_topology_root = github_url("tree", "topology")
def render_form(**kwargs):
return render_template("generate_downtime_form.html.j2", form=form, infos=form.infos, github=github,
github_topology_root=github_topology_root, **kwargs)
topo = global_data.get_topology()
form.facility.choices = _make_choices(topo.resources_by_facility.keys(), select_one=True)
facility = form.facility.data
if facility not in topo.resources_by_facility:
form.facility.data = ""
form.resource.choices = [("", "-- Select a facility first --")]
form.resource.data = ""
form.services.choices = [("", "-- Select a facility and a resource first --")]
return render_form()
resource_choices = [("", "-- Select one --")]
for r in topo.resources_by_facility[facility]:
resource_choices.append((_fix_unicode(r.name),
f"{_fix_unicode(r.name)} ({_fix_unicode(r.fqdn)})"))
form.resource.choices = resource_choices
if form.change_facility.data: # "Change Facility" clicked
form.resource.data = ""
form.services.choices = [("", "-- Select a resource first --")]
return render_form()
resource = form.resource.data
if resource not in topo.service_names_by_resource:
return render_form()
form.services.choices = _make_choices(topo.service_names_by_resource[resource])
if form.change_resource.data: # "Change Resource" clicked
return render_form()
if not form.validate_on_submit():
return render_form()
filepath = "topology/" + topo.downtime_path_by_resource[resource]
# ^ filepath relative to the root of the topology repo checkout
filename = os.path.basename(filepath)
# Add github edit URLs or directory URLs for the repo, if we can.
new_url = edit_url = site_dir_url = ""
if github:
site_dir_url = github_url("tree", os.path.dirname(filepath))
if os.path.exists(os.path.join(global_data.topology_dir, topo.downtime_path_by_resource[resource])):
edit_url = github_url("edit", filepath)
else:
new_url = github_url("new", filepath)
form.yamloutput.data = form.get_yaml()
return render_form(filepath=filepath, filename=filename,
edit_url=edit_url, site_dir_url=site_dir_url,
new_url=new_url)
def _make_choices(iterable, select_one=False):
c = [(_fix_unicode(x), _fix_unicode(x)) for x in sorted(iterable)]
if select_one:
c.insert(0, ("", "-- Select one --"))
return c
def get_filters_from_args(args) -> Filters:
filters = Filters()
def filter_value(filter_key):
filter_value_key = filter_key + "_value"
if filter_key in args:
filter_value_str = args.get(filter_value_key, "")
if filter_value_str == "0":
return False
elif filter_value_str == "1":
return True
else:
raise InvalidArgumentsError("{0} must be 0 or 1".format(filter_value_key))
filters.active = filter_value("active")
filters.disable = filter_value("disable")
filters.oasis = filter_value("oasis")
if "gridtype" in args:
gridtype_1, gridtype_2 = args.get("gridtype_1", ""), args.get("gridtype_2", "")
if gridtype_1 == "on" and gridtype_2 == "on":
pass
elif gridtype_1 == "on":
filters.grid_type = GRIDTYPE_1
elif gridtype_2 == "on":
filters.grid_type = GRIDTYPE_2
else:
raise InvalidArgumentsError("gridtype_1 or gridtype_2 or both must be \"on\"")
if "service_hidden_value" in args: # note no "service_hidden" args
if args["service_hidden_value"] == "0":
filters.service_hidden = False
elif args["service_hidden_value"] == "1":
filters.service_hidden = True
else:
raise InvalidArgumentsError("service_hidden_value must be 0 or 1")
if "downtime_attrs_showpast" in args:
# doesn't make sense for rgsummary but will be ignored anyway
try:
v = args["downtime_attrs_showpast"]
if v == "all":
filters.past_days = -1
elif not v:
filters.past_days = 0
else:
filters.past_days = int(args["downtime_attrs_showpast"])
except ValueError:
raise InvalidArgumentsError("downtime_attrs_showpast must be an integer, \"\", or \"all\"")
if "has_wlcg" in args:
filters.has_wlcg = True
# 2 ways to filter by a key like "facility", "service", "sc", "site", etc.:
# - either pass KEY_1=on, KEY_2=on, etc.
# - pass KEY_sel[]=1, KEY_sel[]=2, etc. (multiple KEY_sel[] args).
for filter_key, filter_list, description in [
("facility", filters.facility_id, "facility ID"),
("rg", filters.rg_id, "resource group ID"),
("service", filters.service_id, "service ID"),
("sc", filters.support_center_id, "support center ID"),
("site", filters.site_id, "site ID"),
("vo", filters.vo_id, "VO ID"),
("voown", filters.voown_id, "VO owner ID"),
]:
if filter_key in args:
pat = re.compile(r"{0}_(\d+)".format(filter_key))
arg_sel = "{0}_sel[]".format(filter_key)
for k, v in args.items():
if k == arg_sel:
try:
filter_list.append(int(v))
except ValueError:
raise InvalidArgumentsError("{0}={1}: must be int".format(k,v))
elif pat.match(k):
m = pat.match(k)
filter_list.append(int(m.group(1)))
if not filter_list:
raise InvalidArgumentsError("at least one {0} must be specified"
" via the syntax <code>{1}_<b>ID</b>=on</code>"
" or <code>{1}_sel[]=<b>ID</b></code>."
" (These may be specified multiple times for multiple IDs.)"\
.format(description, filter_key))
if filters.voown_id:
filters.populate_voown_name(global_data.get_vos_data().get_vo_id_to_name())
return filters
def _get_xml_or_fail(getter_function, args):
try:
filters = get_filters_from_args(args)
except InvalidArgumentsError as e:
return Response("Invalid arguments: " + str(e), status=400)
return Response(
to_xml_bytes(getter_function(_get_authorized(), filters)),
mimetype="text/xml"
)
def _get_authorized():
"""
Determine if the client is authorized
returns: True if authorized, False otherwise
"""
global app
# Loop through looking for all of the creds
for key, value in request.environ.items():
if key.startswith('GRST_CRED_AURI_') and value.startswith("dn:"):
# HTTP unquote the DN:
client_dn = urllib.parse.unquote_plus(value)
# Get list of authorized DNs
authorized_dns = global_data.get_dns()
# Authorized dns should be a set, or dict, that supports the "in"
if client_dn[3:] in authorized_dns: # "dn:" is at the beginning of the DN
if app and app.logger:
app.logger.info("Authorized %s", client_dn)
return True
else:
if app and app.logger:
app.logger.debug("Rejected %s", client_dn)
# If it gets here, then it is not authorized
return default_authorized
if __name__ == '__main__':
if "--auth" in sys.argv[1:]:
default_authorized = True
logging.basicConfig(level=logging.DEBUG)
app.run(debug=True, use_reloader=True)
else:
root = logging.getLogger()
root.addHandler(flask.logging.default_handler)
| 40.318008 | 125 | 0.632994 |
0a8227868933e098592b860627b4df5c82d7f0e5
| 28,675 |
py
|
Python
|
cooltools/api/snipping.py
|
gfudenberg/cooltools
|
2c5efcfa2810414f5e1cfeba8806b23d626abaa2
|
[
"MIT"
] | null | null | null |
cooltools/api/snipping.py
|
gfudenberg/cooltools
|
2c5efcfa2810414f5e1cfeba8806b23d626abaa2
|
[
"MIT"
] | null | null | null |
cooltools/api/snipping.py
|
gfudenberg/cooltools
|
2c5efcfa2810414f5e1cfeba8806b23d626abaa2
|
[
"MIT"
] | null | null | null |
from functools import partial
import warnings
import numpy as np
import pandas as pd
import bioframe
from ..lib.checks import (
is_compatible_viewframe,
is_cooler_balanced,
is_valid_expected,
)
from ..lib.common import assign_regions, make_cooler_view
from ..lib.numutils import LazyToeplitz
import warnings
import multiprocessing
def expand_align_features(features_df, flank, resolution, format="bed"):
"""Short summary.
Parameters
----------
features_df : pd.DataFrame
Dataframe with feature coordinates.
flank : int
Flank size to add to the central bin of each feature.
resolution : int
Size of the bins to use.
format : str
"bed" or "bedpe" format: has to have 'chrom', 'start', 'end'
or 'chrom1', 'start1', 'end1', 'chrom2', 'start2', 'end1' columns, repectively.
Returns
-------
pd.DataFrame
DataFrame with features with new columns
"center", "orig_start" "orig_end"
or "center1", "orig_start1", "orig_end1",
"center2", "orig_start2", "orig_rank_end2", depending on format.
"""
features_df = features_df.copy()
if format == "bed":
features_df[["orig_start", "orig_end"]] = features_df[["start", "end"]]
features_df["center"] = (features_df["start"] + features_df["end"]) / 2
features_df["lo"] = (
np.floor(features_df["center"] / resolution) - flank // resolution
).astype(int)
features_df["hi"] = (
np.floor(features_df["center"] / resolution) + flank // resolution + 1
).astype(int)
features_df["start"] = features_df["lo"] * resolution
features_df["end"] = features_df["hi"] * resolution
elif format == "bedpe":
features_df[
["orig_start1", "orig_end1", "orig_start2", "orig_end2"]
] = features_df[["start1", "end1", "start2", "end2"]]
features_df["center1"] = (features_df["start1"] + features_df["end1"]) / 2
features_df["center2"] = (features_df["start2"] + features_df["end2"]) / 2
features_df["lo1"] = (
np.floor(features_df["center1"] / resolution) - flank // resolution
).astype(int)
features_df["hi1"] = (
np.floor(features_df["center1"] / resolution) + flank // resolution + 1
).astype(int)
features_df["start1"] = features_df["lo1"] * resolution
features_df["end1"] = features_df["hi1"] * resolution
features_df["lo2"] = (
np.floor(features_df["center2"] / resolution) - flank // resolution
).astype(int)
features_df["hi2"] = (
np.floor(features_df["center2"] / resolution) + flank // resolution + 1
).astype(int)
features_df["start2"] = features_df["lo2"] * resolution
features_df["end2"] = features_df["hi2"] * resolution
return features_df
def make_bin_aligned_windows(
binsize,
chroms,
centers_bp,
flank_bp=0,
region_start_bp=0,
ignore_index=False,
):
"""
Convert genomic loci into bin spans on a fixed bin-segmentation of a
genomic region. Window limits are adjusted to align with bin edges.
Parameters
-----------
binsize : int
Bin size (resolution) in base pairs.
chroms : 1D array-like
Column of chromosome names.
centers_bp : 1D or nx2 array-like
If 1D, center points of each window. If 2D, the starts and ends.
flank_bp : int
Distance in base pairs to extend windows on either side.
region_start_bp : int, optional
If region is a subset of a chromosome, shift coordinates by this amount.
Default is 0.
Returns
-------
DataFrame with columns:
'chrom' - chromosome
'start', 'end' - window limits in base pairs
'lo', 'hi' - window limits in bins
"""
if not (flank_bp % binsize == 0):
raise ValueError("Flanking distance must be divisible by the bin size.")
if isinstance(chroms, pd.Series) and not ignore_index:
index = chroms.index
else:
index = None
chroms = np.asarray(chroms)
centers_bp = np.asarray(centers_bp)
if len(centers_bp.shape) == 2:
left_bp = centers_bp[:, 0]
right_bp = centers_bp[:, 1]
else:
left_bp = right_bp = centers_bp
if np.any(left_bp > right_bp):
raise ValueError("Found interval with end > start.")
left = left_bp - region_start_bp
right = right_bp - region_start_bp
left_bin = (left / binsize).astype(int)
right_bin = (right / binsize).astype(int)
flank_bin = flank_bp // binsize
lo = left_bin - flank_bin
hi = right_bin + flank_bin + 1
windows = pd.DataFrame(index=index)
windows["chrom"] = chroms
windows["start"] = lo * binsize
windows["end"] = hi * binsize
windows["lo"] = lo
windows["hi"] = hi
return windows
def _pileup(data_select, data_snip, arg):
support, feature_group = arg
# return empty snippets if region is unannotated:
if len(support) == 0:
if "start" in feature_group: # on-diagonal off-region case:
lo = feature_group["lo"].values
hi = feature_group["hi"].values
s = hi - lo # Shape of individual snips
assert (
s.max() == s.min()
), "Pileup accepts only the windows of the same size"
stack = np.full((s[0], s[0], len(feature_group)), np.nan)
else: # off-diagonal off-region case:
lo1 = feature_group["lo1"].values
hi1 = feature_group["hi1"].values
lo2 = feature_group["lo2"].values
hi2 = feature_group["hi2"].values
s1 = hi1 - lo1 # Shape of individual snips
s2 = hi1 - lo1
assert (
s1.max() == s1.min()
), "Pileup accepts only the windows of the same size"
assert (
s2.max() == s2.min()
), "Pileup accepts only the windows of the same size"
stack = np.full((s1[0], s2[0], len(feature_group)), np.nan)
return stack, feature_group["_rank"].values
# check if support region is on- or off-diagonal
if len(support) == 2:
region1, region2 = support
else:
region1 = region2 = support
# check if features are on- or off-diagonal
if "start" in feature_group:
s1 = feature_group["start"].values
e1 = feature_group["end"].values
s2, e2 = s1, e1
else:
s1 = feature_group["start1"].values
e1 = feature_group["end1"].values
s2 = feature_group["start2"].values
e2 = feature_group["end2"].values
data = data_select(region1, region2)
stack = list(map(partial(data_snip, data, region1, region2), zip(s1, e1, s2, e2)))
return np.dstack(stack), feature_group["_rank"].values
def pileup_legacy(features, data_select, data_snip, map=map):
"""
Handles on-diagonal and off-diagonal cases.
Parameters
----------
features : DataFrame
Table of features. Requires columns ['chrom', 'start', 'end'].
Or ['chrom1', 'start1', 'end1', 'chrom1', 'start2', 'end2'].
start, end are bp coordinates.
lo, hi are bin coordinates.
data_select : callable
Callable that takes a region as argument and returns
the data, mask and bin offset of a support region
data_snip : callable
Callable that takes data, mask and a 2D bin span (lo1, hi1, lo2, hi2)
and returns a snippet from the selected support region
"""
if features["region"].isnull().any():
warnings.warn(
"Some features do not have view regions assigned! Some snips will be empty."
)
features = features.copy()
features["region"] = features["region"].fillna(
""
) # fill in unanotated view regions with empty string
features["_rank"] = range(len(features))
# cumul_stack = []
# orig_rank = []
cumul_stack, orig_rank = zip(
*map(
partial(_pileup, data_select, data_snip),
# Note that unannotated regions will form a separate group
features.groupby("region", sort=False),
)
)
# Restore the original rank of the input features
cumul_stack = np.dstack(cumul_stack)
orig_rank = np.concatenate(orig_rank)
idx = np.argsort(orig_rank)
cumul_stack = cumul_stack[:, :, idx]
return cumul_stack
def pair_sites(sites, separation, slop):
"""
Create "hand" intervals to the right and to the left of each site.
Then join right hands with left hands to pair sites together.
"""
from bioframe.tools import tsv, bedtools
mids = (sites["start"] + sites["end"]) // 2
left_hand = sites[["chrom"]].copy()
left_hand["start"] = mids - separation - slop
left_hand["end"] = mids - separation + slop
left_hand["site_id"] = left_hand.index
left_hand["direction"] = "L"
left_hand["snip_mid"] = mids
left_hand["snip_strand"] = sites["strand"]
right_hand = sites[["chrom"]].copy()
right_hand["start"] = mids + separation - slop
right_hand["end"] = mids + separation + slop
right_hand["site_id"] = right_hand.index
right_hand["direction"] = "R"
right_hand["snip_mid"] = mids
right_hand["snip_strand"] = sites["strand"]
# ignore out-of-bounds hands
mask = (left_hand["start"] > 0) & (right_hand["start"] > 0)
left_hand = left_hand[mask].copy()
right_hand = right_hand[mask].copy()
# intersect right hands (left anchor site)
# with left hands (right anchor site)
with tsv(right_hand) as R, tsv(left_hand) as L:
out = bedtools.intersect(a=R.name, b=L.name, wa=True, wb=True)
out.columns = [c + "_r" for c in right_hand.columns] + [
c + "_l" for c in left_hand.columns
]
return out
class CoolerSnipper:
def __init__(self, clr, cooler_opts=None, view_df=None, min_diag=2):
# get chromosomes from cooler, if view_df not specified:
if view_df is None:
view_df = make_cooler_view(clr)
else:
# Make sure view_df is a proper viewframe
try:
_ = is_compatible_viewframe(
view_df,
clr,
check_sorting=True,
raise_errors=True,
)
except Exception as e:
raise ValueError(
"view_df is not a valid viewframe or incompatible"
) from e
self.view_df = view_df.set_index("name")
self.clr = clr
self.binsize = self.clr.binsize
self.offsets = {}
self.diag_indicators = {}
self.pad = True
self.cooler_opts = {} if cooler_opts is None else cooler_opts
self.cooler_opts.setdefault("sparse", True)
if "balance" in self.cooler_opts:
if self.cooler_opts["balance"] is True:
self.clr_weight_name = "weight"
elif (
self.cooler_opts["balance"] is False
or self.cooler_opts["balance"] is None
):
self.clr_weight_name = None
else:
self.clr_weight_name = self.cooler_opts["balance"]
else:
self.clr_weight_name = "weight"
self.min_diag = min_diag
def select(self, region1, region2):
region1_coords = self.view_df.loc[region1]
region2_coords = self.view_df.loc[region2]
self.offsets[region1] = self.clr.offset(region1_coords) - self.clr.offset(
region1_coords[0]
)
self.offsets[region2] = self.clr.offset(region2_coords) - self.clr.offset(
region2_coords[0]
)
matrix = self.clr.matrix(**self.cooler_opts).fetch(
region1_coords, region2_coords
)
if self.clr_weight_name:
self._isnan1 = np.isnan(
self.clr.bins()[self.clr_weight_name].fetch(region1_coords).values
)
self._isnan2 = np.isnan(
self.clr.bins()[self.clr_weight_name].fetch(region2_coords).values
)
else:
self._isnan1 = np.zeros_like(
self.clr.bins()["start"].fetch(region1_coords).values
).astype(bool)
self._isnan2 = np.zeros_like(
self.clr.bins()["start"].fetch(region2_coords).values
).astype(bool)
if self.cooler_opts["sparse"]:
matrix = matrix.tocsr()
if self.min_diag is not None:
diags = np.arange(np.diff(self.clr.extent(region1_coords)), dtype=np.int32)
self.diag_indicators[region1] = LazyToeplitz(-diags, diags)
return matrix
def snip(self, matrix, region1, region2, tup):
s1, e1, s2, e2 = tup
offset1 = self.offsets[region1]
offset2 = self.offsets[region2]
binsize = self.binsize
lo1, hi1 = (s1 // binsize) - offset1, (e1 // binsize) - offset1
lo2, hi2 = (s2 // binsize) - offset2, (e2 // binsize) - offset2
assert hi1 >= 0
assert hi2 >= 0
m, n = matrix.shape
dm, dn = hi1 - lo1, hi2 - lo2
out_of_bounds = False
pad_left = pad_right = pad_bottom = pad_top = None
if lo1 < 0:
pad_bottom = -lo1
out_of_bounds = True
if lo2 < 0:
pad_left = -lo2
out_of_bounds = True
if hi1 > m:
pad_top = dm - (hi1 - m)
out_of_bounds = True
if hi2 > n:
pad_right = dn - (hi2 - n)
out_of_bounds = True
if out_of_bounds:
i0 = max(lo1, 0)
i1 = min(hi1, m)
j0 = max(lo2, 0)
j1 = min(hi2, n)
snippet = np.full((dm, dn), np.nan)
# snippet[pad_bottom:pad_top,
# pad_left:pad_right] = matrix[i0:i1, j0:j1].toarray()
else:
snippet = matrix[lo1:hi1, lo2:hi2].toarray().astype("float")
snippet[self._isnan1[lo1:hi1], :] = np.nan
snippet[:, self._isnan2[lo2:hi2]] = np.nan
if self.min_diag is not None:
D = self.diag_indicators[region1][lo1:hi1, lo2:hi2] < self.min_diag
snippet[D] = np.nan
return snippet
class ObsExpSnipper:
def __init__(
self,
clr,
expected,
cooler_opts=None,
view_df=None,
min_diag=2,
expected_value_col="balanced.avg",
):
self.clr = clr
self.expected = expected
self.expected_value_col = expected_value_col
# get chromosomes from cooler, if view_df not specified:
if view_df is None:
view_df = make_cooler_view(clr)
else:
# Make sure view_df is a proper viewframe
try:
_ = is_compatible_viewframe(
view_df,
clr,
check_sorting=True,
raise_errors=True,
)
except Exception as e:
raise ValueError(
"view_df is not a valid viewframe or incompatible"
) from e
# make sure expected is compatible
try:
_ = is_valid_expected(
expected,
"cis",
view_df,
verify_cooler=clr,
expected_value_cols=[
self.expected_value_col,
],
raise_errors=True,
)
except Exception as e:
raise ValueError("provided expected is not valid") from e
self.view_df = view_df.set_index("name")
self.binsize = self.clr.binsize
self.offsets = {}
self.diag_indicators = {}
self.pad = True
self.cooler_opts = {} if cooler_opts is None else cooler_opts
self.cooler_opts.setdefault("sparse", True)
if "balance" in self.cooler_opts:
if self.cooler_opts["balance"] is True:
self.clr_weight_name = "weight"
elif (
self.cooler_opts["balance"] is False
or self.cooler_opts["balance"] is None
):
self.clr_weight_name = None
else:
self.clr_weight_name = self.cooler_opts["balance"]
else:
self.clr_weight_name = "weight"
self.min_diag = min_diag
def select(self, region1, region2):
if not region1 == region2:
raise ValueError("ObsExpSnipper is implemented for cis contacts only.")
region1_coords = self.view_df.loc[region1]
region2_coords = self.view_df.loc[region2]
self.offsets[region1] = self.clr.offset(region1_coords) - self.clr.offset(
region1_coords[0]
)
self.offsets[region2] = self.clr.offset(region2_coords) - self.clr.offset(
region2_coords[0]
)
matrix = self.clr.matrix(**self.cooler_opts).fetch(
region1_coords, region2_coords
)
if self.cooler_opts["sparse"]:
matrix = matrix.tocsr()
if self.clr_weight_name:
self._isnan1 = np.isnan(
self.clr.bins()[self.clr_weight_name].fetch(region1_coords).values
)
self._isnan2 = np.isnan(
self.clr.bins()[self.clr_weight_name].fetch(region2_coords).values
)
else:
self._isnan1 = np.zeros_like(
self.clr.bins()["start"].fetch(region1_coords).values
).astype(bool)
self._isnan2 = np.zeros_like(
self.clr.bins()["start"].fetch(region2_coords).values
).astype(bool)
self._expected = LazyToeplitz(
self.expected.groupby(["region1", "region2"])
.get_group((region1, region2))[self.expected_value_col]
.values
)
if self.min_diag is not None:
diags = np.arange(np.diff(self.clr.extent(region1_coords)), dtype=np.int32)
self.diag_indicators[region1] = LazyToeplitz(-diags, diags)
return matrix
def snip(self, matrix, region1, region2, tup):
s1, e1, s2, e2 = tup
offset1 = self.offsets[region1]
offset2 = self.offsets[region2]
binsize = self.binsize
lo1, hi1 = (s1 // binsize) - offset1, (e1 // binsize) - offset1
lo2, hi2 = (s2 // binsize) - offset2, (e2 // binsize) - offset2
assert hi1 >= 0
assert hi2 >= 0
m, n = matrix.shape
dm, dn = hi1 - lo1, hi2 - lo2
out_of_bounds = False
pad_left = pad_right = pad_bottom = pad_top = None
if lo1 < 0:
pad_bottom = -lo1
out_of_bounds = True
if lo2 < 0:
pad_left = -lo2
out_of_bounds = True
if hi1 > m:
pad_top = dm - (hi1 - m)
out_of_bounds = True
if hi2 > n:
pad_right = dn - (hi2 - n)
out_of_bounds = True
if out_of_bounds:
i0 = max(lo1, 0)
i1 = min(hi1, m)
j0 = max(lo2, 0)
j1 = min(hi2, n)
return np.full((dm, dn), np.nan)
# snippet[pad_bottom:pad_top,
# pad_left:pad_right] = matrix[i0:i1, j0:j1].toarray()
else:
snippet = matrix[lo1:hi1, lo2:hi2].toarray().astype("float")
snippet[self._isnan1[lo1:hi1], :] = np.nan
snippet[:, self._isnan2[lo2:hi2]] = np.nan
e = self._expected[lo1:hi1, lo2:hi2]
if self.min_diag is not None:
D = self.diag_indicators[region1][lo1:hi1, lo2:hi2] < self.min_diag
snippet[D] = np.nan
return snippet / e
class ExpectedSnipper:
def __init__(
self, clr, expected, view_df=None, min_diag=2, expected_value_col="balanced.avg"
):
self.clr = clr
self.expected = expected
self.expected_value_col = expected_value_col
# get chromosomes from cooler, if view_df not specified:
if view_df is None:
view_df = make_cooler_view(clr)
else:
# Make sure view_df is a proper viewframe
try:
_ = is_compatible_viewframe(
view_df,
clr,
check_sorting=True,
raise_errors=True,
)
except Exception as e:
raise ValueError(
"view_df is not a valid viewframe or incompatible"
) from e
# make sure expected is compatible
try:
_ = is_valid_expected(
expected,
"cis",
view_df,
verify_cooler=clr,
expected_value_cols=[
self.expected_value_col,
],
raise_errors=True,
)
except Exception as e:
raise ValueError("provided expected is not valid") from e
self.view_df = view_df.set_index("name")
self.binsize = self.clr.binsize
self.offsets = {}
self.diag_indicators = {}
self.min_diag = min_diag
def select(self, region1, region2):
if not region1 == region2:
raise ValueError("ExpectedSnipper is implemented for cis contacts only.")
region1_coords = self.view_df.loc[region1]
region2_coords = self.view_df.loc[region2]
self.offsets[region1] = self.clr.offset(region1_coords) - self.clr.offset(
region1_coords[0]
)
self.offsets[region2] = self.clr.offset(region2_coords) - self.clr.offset(
region2_coords[0]
)
self.m = np.diff(self.clr.extent(region1_coords))
self.n = np.diff(self.clr.extent(region2_coords))
self._expected = LazyToeplitz(
self.expected.groupby(["region1", "region2"])
.get_group((region1, region2))[self.expected_value_col]
.values
)
if self.min_diag is not None:
diags = np.arange(np.diff(self.clr.extent(region1_coords)), dtype=np.int32)
self.diag_indicators[region1] = LazyToeplitz(-diags, diags)
return self._expected
def snip(self, exp, region1, region2, tup):
s1, e1, s2, e2 = tup
offset1 = self.offsets[region1]
offset2 = self.offsets[region2]
binsize = self.binsize
lo1, hi1 = (s1 // binsize) - offset1, (e1 // binsize) - offset1
lo2, hi2 = (s2 // binsize) - offset2, (e2 // binsize) - offset2
assert hi1 >= 0
assert hi2 >= 0
dm, dn = hi1 - lo1, hi2 - lo2
if lo1 < 0 or lo2 < 0 or hi1 > self.m or hi2 > self.n:
return np.full((dm, dn), np.nan)
snippet = exp[lo1:hi1, lo2:hi2]
if self.min_diag is not None:
D = self.diag_indicators[region1][lo1:hi1, lo2:hi2] < self.min_diag
snippet[D] = np.nan
return snippet
def pileup(
clr,
features_df,
view_df=None,
expected_df=None,
expected_value_col="balanced.avg",
flank=100_000,
min_diag="auto",
clr_weight_name="weight",
nproc=1,
):
"""
Pileup features over the cooler.
Parameters
----------
clr : cooler.Cooler
Cooler with Hi-C data
features_df : pd.DataFrame
Dataframe in bed or bedpe format: has to have 'chrom', 'start', 'end'
or 'chrom1', 'start1', 'end1', 'chrom2', 'start2', 'end1' columns.
view_df : pd.DataFrame
Dataframe with the genomic view for this operation (has to match the
expected_df, if provided)
expected_df : pd.DataFrame
Dataframe with the expected level of interactions at different
genomic separations
expected_value_col : str
Name of the column in expected used for normalizing.
flank : int
How much to flank the center of the features by, in bp
min_diag: str or int
All diagonals of the matrix below this value are ignored. 'auto'
tries to extract the value used during the matrix balancing,
if it fails defaults to 2
clr_weight_name : str
Value of the column that contains the balancing weights
force : bool
Allows start>end in the features (not implemented)
nproc : str
How many cores to use
Returns
-------
np.ndarray: a stackup of all snippets corresponding to the features
"""
if {"chrom", "start", "end"}.issubset(features_df.columns):
feature_type = "bed"
elif {"chrom1", "start1", "end1", "chrom2", "start2", "end1"}.issubset(
features_df.columns
):
feature_type = "bedpe"
else:
raise ValueError("Unknown feature_df format")
features_df = assign_regions(features_df, view_df)
# TODO: switch to bioframe.assign_view upon update
if flank is not None:
features_df = expand_align_features(
features_df, flank, clr.binsize, format=feature_type
)
else:
features_df = features_df.copy()
if feature_type == "bed":
features_df["lo"] = (features_df["start"] / clr.binsize).astype(int)
features_df["hi"] = (features_df["end"] / clr.binsize).astype(int)
else:
features_df["lo1"] = (features_df["start1"] / clr.binsize).astype(int)
features_df["hi1"] = (features_df["end1"] / clr.binsize).astype(int)
features_df["lo2"] = (features_df["start2"] / clr.binsize).astype(int)
features_df["hi2"] = (features_df["end2"] / clr.binsize).astype(int)
if view_df is None:
view_df = make_cooler_view(clr)
else:
try:
_ = is_compatible_viewframe(
view_df,
clr,
check_sorting=True,
raise_errors=True,
)
except Exception as e:
raise ValueError("view_df is not a valid viewframe or incompatible") from e
if clr_weight_name not in [None, False]:
# check if cooler is balanced
try:
_ = is_cooler_balanced(clr, clr_weight_name, raise_errors=True)
except Exception as e:
raise ValueError(
f"provided cooler is not balanced or {clr_weight_name} is missing"
) from e
if min_diag == "auto" and clr_weight_name not in [None, False]:
min_diag = dict(clr.open()[f"bins/{clr_weight_name}"].attrs).get(
"ignore_diags", 2
)
elif clr_weight_name in [None, False]:
min_diag = 2
# Find region offsets and then subtract them from the feature extents
region_offsets = view_df[["chrom", "start", "end"]].apply(clr.offset, axis=1)
region_offsets_dict = dict(zip(view_df["name"].values, region_offsets))
features_df["region_offset"] = features_df["region"].replace(region_offsets_dict)
if feature_type == "bed":
features_df[["lo", "hi"]] = (
features_df[["lo", "hi"]]
.subtract(
features_df["region_offset"].fillna(0),
axis=0,
)
.astype(int)
)
else:
features_df[["lo1", "hi1"]] = (
features_df[["lo1", "hi1"]]
.subtract(
features_df["region_offset"].fillna(0),
axis=0,
)
.astype(int)
)
features_df[["lo2", "hi2"]] = (
features_df[["lo2", "hi2"]]
.subtract(
features_df["region_offset"].fillna(0),
axis=0,
)
.astype(int)
)
# TODO move view, expected and other checks in the user-facing functions, add tests
if expected_df is None:
snipper = CoolerSnipper(
clr,
view_df=view_df,
cooler_opts={"balance": clr_weight_name},
min_diag=min_diag,
)
else:
snipper = ObsExpSnipper(
clr,
expected_df,
view_df=view_df,
cooler_opts={"balance": clr_weight_name},
min_diag=min_diag,
expected_value_col=expected_value_col,
)
if nproc > 1:
pool = multiprocessing.Pool(nproc)
mymap = pool.map
else:
mymap = map
stack = pileup_legacy(features_df, snipper.select, snipper.snip, map=mymap)
if feature_type == "bed":
stack = np.nansum([stack, np.transpose(stack, axes=(1, 0, 2))], axis=0)
if nproc > 1:
pool.close()
return stack
| 34.631643 | 88 | 0.569416 |
8674819845a3832062d2edd61e5b75b7bb627a1e
| 26,653 |
py
|
Python
|
xbee/tornado/tests/test_ieee.py
|
PowerFlex/python-xbee-intercept
|
0c07f3a5f16f479ad7c925cd31638598030cf5a7
|
[
"MIT"
] | null | null | null |
xbee/tornado/tests/test_ieee.py
|
PowerFlex/python-xbee-intercept
|
0c07f3a5f16f479ad7c925cd31638598030cf5a7
|
[
"MIT"
] | null | null | null |
xbee/tornado/tests/test_ieee.py
|
PowerFlex/python-xbee-intercept
|
0c07f3a5f16f479ad7c925cd31638598030cf5a7
|
[
"MIT"
] | null | null | null |
#! /usr/bin/python
"""
test_ieee.py
By Paul Malmsten, 2010
[email protected]
Tests the XBee (IEEE 802.15.4) implementation class for XBee API compliance
"""
import unittest
from xbee.tornado import has_tornado
if not has_tornado:
raise unittest.SkipTest("Requires Tornado")
from xbee.tests.Fake import Serial # noqa
from xbee.tornado.ieee import XBee # noqa
from xbee.frame import APIFrame # noqa
from xbee.python2to3 import intToByte, stringToBytes # noqa
from tornado.testing import AsyncTestCase, gen_test # noqa
from tornado.test.util import unittest # noqa
import sys # noqa
import traceback # noqa
class InitXBee(AsyncTestCase):
"""
Base initalization class
"""
def setUp(self):
"""
Initialize XBee object
"""
super(InitXBee, self).setUp()
self.xbee = XBee(None)
class TestBuildCommand(InitXBee):
"""
_build_command should properly build a command packet
"""
def test_build_at_data_mismatch(self):
"""
if not enough or incorrect data is provided, an exception should
be raised.
"""
try:
self.xbee._build_command("at")
except KeyError:
# Test passes
return
# No exception? Fail.
self.fail(
"An exception was not raised with improper data supplied"
)
def test_build_at_data_len_mismatch(self):
"""
if data of incorrect length is provided, an exception should be
raised
"""
try:
self.xbee._build_command("at", frame_id="AB", command="MY")
except ValueError:
# Test passes
return
# No exception? Fail.
self.fail(
"An exception was not raised with improper data length"
)
def test_build_at(self):
"""
_build_command should build a valid at command packet which has
no parameter data to be saved
"""
at_command = stringToBytes("MY")
frame = intToByte(43)
data = self.xbee._build_command(
"at",
frame_id=frame,
command=at_command
)
expected_data = b'\x08+MY'
self.assertEqual(data, expected_data)
def test_build_at_with_default(self):
"""
_build_command should build a valid at command packet which has
no parameter data to be saved and no frame specified (the
default value of \x00 should be used)
"""
at_command = stringToBytes("MY")
data = self.xbee._build_command("at", command=at_command)
expected_data = b'\x08\x00MY'
self.assertEqual(data, expected_data)
class TestSplitResponse(InitXBee):
"""
_split_response should properly split a response packet
"""
def test_unrecognized_response(self):
"""
if a response begins with an unrecognized id byte,
_split_response should raise an exception
"""
data = b'\x23\x00\x00\x00'
try:
self.xbee._split_response(data)
except KeyError:
# Passes
return
# Test Fails
self.fail()
def test_transmit_packet_received(self):
"""
if a response begins with an ID that is unrecognized as a response
ID but is a valid transmission ID, show a helpful error indicating
that a device may be in command mode.
"""
from xbee.backend.base import CommandFrameException
data = b'\x01\x00\x00\x00'
try:
self.xbee._split_response(data)
except CommandFrameException:
# Passes
return
# Test Fails
self.fail()
def test_bad_data_long(self):
"""
if a response doesn't match the specification's layout,
_split_response should raise an exception
"""
# Over length
data = b'\x8a\x00\x00\x00'
self.assertRaises(ValueError, self.xbee._split_response, data)
def test_bad_data_short(self):
"""
if a response doesn't match the specification's layout,
_split_response should raise an exception
"""
# Under length
data = b'\x8a'
self.assertRaises(ValueError, self.xbee._split_response, data)
def test_split_status_response(self):
"""
_split_response should properly split a status response packet
"""
data = b'\x8a\x01'
info = self.xbee._split_response(data)
expected_info = {'id': 'status',
'status': b'\x01'}
self.assertEqual(info, expected_info)
def test_split_short_at_response(self):
"""
_split_response should properly split an at_response packet which
has no parameter data
"""
data = b'\x88DMY\x01'
info = self.xbee._split_response(data)
expected_info = {'id': 'at_response',
'frame_id': b'D',
'command': b'MY',
'status': b'\x01'}
self.assertEqual(info, expected_info)
def test_split_at_resp_with_param(self):
"""
_split_response should properly split an at_response packet which
has parameter data
"""
data = b'\x88DMY\x01ABCDEF'
info = self.xbee._split_response(data)
expected_info = {'id': 'at_response',
'frame_id': b'D',
'command': b'MY',
'status': b'\x01',
'parameter': b'ABCDEF'}
self.assertEqual(info, expected_info)
def test_generalized_packet_parsing(self):
"""
_split_response should properly parse packets in a generalized
manner when specified by the protocol definition.
"""
# Temporarily modify parsing rule (taking a backup of the original rule)
parse_rule_orig = self.xbee.api_responses[b"\x88"]["parsing"]
self.xbee.api_responses[b"\x88"]["parsing"] = \
[("parameter", lambda self, orig: b"GHIJKL")]
data = b'\x88DMY\x01ABCDEF'
info = self.xbee._split_response(data)
expected_info = {'id': 'at_response',
'frame_id': b'D',
'command': b'MY',
'status': b'\x01',
'parameter': b'GHIJKL'}
# Restore parsing rule to original
self.xbee.api_responses[b"\x88"]["parsing"] = parse_rule_orig
self.assertEqual(info, expected_info)
class TestParseIOData(InitXBee):
"""
XBee class should properly parse IO data received from an XBee
device
"""
def test_parse_single_dio(self):
"""
_parse_samples should properly parse a packet containing a single
sample of only digital io data
"""
# One sample, ADC disabled and DIO8 enabled, DIO 0-7 enabled
header = b'\x01\x01\xFF'
# First 7 bits ignored, DIO8 high, DIO 0-7 high
sample = b'\x01\xFF'
data = header + sample
expected_results = [{'dio-0': True,
'dio-1': True,
'dio-2': True,
'dio-3': True,
'dio-4': True,
'dio-5': True,
'dio-6': True,
'dio-7': True,
'dio-8': True}]
results = self.xbee._parse_samples(data)
self.assertEqual(results, expected_results)
def test_parse_single_dio_again(self):
"""
_parse_samples should properly parse a packet containing a single
sample of only digital io data, which alternates between on and
off
"""
# One sample, ADC disabled and DIO8 enabled, DIO 0-7 enabled
header = b'\x01\x01\xFF'
# First 7 bits ignored, DIO8 low, DIO 0-7 alternating
sample = b'\x00\xAA'
data = header + sample
expected_results = [{'dio-0': False,
'dio-1': True,
'dio-2': False,
'dio-3': True,
'dio-4': False,
'dio-5': True,
'dio-6': False,
'dio-7': True,
'dio-8': False}]
results = self.xbee._parse_samples(data)
self.assertEqual(results, expected_results)
def test_parse_single_dio_subset(self):
"""
_parse_samples should properly parse a packet containing a single
sample of only digital io data for only a subset of the
available pins
"""
# One sample, ADC disabled
# DIO 1,3,5,7 enabled
header = b'\x01\x00\xAA'
# First 7 bits ignored, DIO8 low, DIO 0-7 alternating
sample = b'\x00\xAA'
data = header + sample
expected_results = [{'dio-1': True,
'dio-3': True,
'dio-5': True,
'dio-7': True}]
results = self.xbee._parse_samples(data)
self.assertEqual(results, expected_results)
def test_parse_single_dio_subset_again(self):
"""
_parse_samples should properly parse a packet containing a single
sample of only digital io data for only a subset of the
available pins
"""
# One sample, ADC disabled
# DIO 0 enabled
header = b'\x01\x00\x01'
# First 7 bits ignored, DIO8 low, DIO 0-7 alternating
sample = b'\x00\xAA'
data = header + sample
expected_results = [{'dio-0': False}]
results = self.xbee._parse_samples(data)
self.assertEqual(results, expected_results)
def test_parse_multiple_dio_subset(self):
"""
_parse_samples should properly parse a packet containing two
samples of only digital io data for one dio line
"""
# Two samples, ADC disabled
# DIO 0 enabled
header = b'\x02\x00\x01'
# First 7 bits ignored, DIO8 low, DIO 0-7 alternating
sample = b'\x00\xAA' + b'\x00\x01'
data = header + sample
expected_results = [{'dio-0': False},
{'dio-0': True}]
results = self.xbee._parse_samples(data)
self.assertEqual(results, expected_results)
def test_parse_multiple_dio(self):
"""
_parse_samples should properly parse a packet containing three
samples of only digital io data
"""
# Three samples, ADC disabled and DIO8 enabled, DIO 0-7 enabled
header = b'\x03\x01\xFF'
# First 7 bits ignored
# First sample: all bits on
# Second sample: alternating bits on
# Third sample: all bits off
sample = b'\x01\xFF' + b'\x00\xAA' + b'\x00\x00'
data = header + sample
expected_results = [{'dio-0': True,
'dio-1': True,
'dio-2': True,
'dio-3': True,
'dio-4': True,
'dio-5': True,
'dio-6': True,
'dio-7': True,
'dio-8': True},
{'dio-0': False,
'dio-1': True,
'dio-2': False,
'dio-3': True,
'dio-4': False,
'dio-5': True,
'dio-6': False,
'dio-7': True,
'dio-8': False},
{'dio-0': False,
'dio-1': False,
'dio-2': False,
'dio-3': False,
'dio-4': False,
'dio-5': False,
'dio-6': False,
'dio-7': False,
'dio-8': False}]
results = self.xbee._parse_samples(data)
self.assertEqual(results, expected_results)
def test_parse_multiple_adc_subset(self):
"""
_parse_samples should parse a data packet containing multiple
samples of adc data from multiple pins in the proper order
"""
# One sample, ADC 0,1 enabled
# DIO disabled
header = b'\x02\x06\x00'
# No dio data
# ADC0 value of 0
# ADC1 value of 255
# ADC0 value of 5
# ADC1 value of 7
sample = b'\x00\x00' + b'\x00\xFF' + b'\x00\x05' + b'\x00\x07'
data = header + sample
expected_results = [{'adc-0': 0,
'adc-1': 255},
{'adc-0': 5,
'adc-1': 7}]
results = self.xbee._parse_samples(data)
self.assertEqual(results, expected_results)
def test_parse_single_dio_adc_subset(self):
"""
_parse_samples should properly parse a packet containing a single
sample of digital and analog io data for only a subset of the
available pins
"""
# One sample, ADC 0 enabled
# DIO 1,3,5,7 enabled
header = b'\x01\x02\xAA'
# First 7 bits ignored, DIO8 low, DIO 0-7 alternating
# ADC0 value of 255
sample = b'\x00\xAA\x00\xFF'
data = header + sample
expected_results = [{'dio-1': True,
'dio-3': True,
'dio-5': True,
'dio-7': True,
'adc-0': 255}]
results = self.xbee._parse_samples(data)
self.assertEqual(results, expected_results)
class TestWriteToDevice(InitXBee):
"""
XBee class should properly write binary data in a valid API
frame to a given serial device, including a valid command packet.
"""
def test_send_at_command(self):
"""
calling send should write a full API frame containing the
API AT command packet to the serial device.
"""
serial_port = Serial()
xbee = XBee(serial_port)
# Send an AT command
xbee.send('at', frame_id=stringToBytes('A'),
command=stringToBytes('MY'))
# Expect a full packet to be written to the device
expected_data = b'\x7E\x00\x04\x08AMY\x10'
result_data = serial_port.get_data_written()
self.assertEqual(result_data, expected_data)
def test_send_at_command_with_param(self):
"""
calling send should write a full API frame containing the
API AT command packet to the serial device.
"""
serial_port = Serial()
xbee = XBee(serial_port)
# Send an AT command
xbee.send(
'at',
frame_id=stringToBytes('A'),
command=stringToBytes('MY'),
parameter=b'\x00\x00'
)
# Expect a full packet to be written to the device
result_data = serial_port.get_data_written()
expected_data = b'\x7E\x00\x06\x08AMY\x00\x00\x10'
self.assertEqual(result_data, expected_data)
class TestSendShorthand(InitXBee):
"""
Tests shorthand for sending commands to an XBee provided by
XBee.__getattr__
"""
def setUp(self):
"""
Prepare a fake device to read from
"""
super(TestSendShorthand, self).setUp()
self.ser = Serial()
self.xbee = XBee(self.ser)
def test_send_at_command(self):
"""
Send an AT command with a shorthand call
"""
# Send an AT command
self.xbee.at(frame_id=stringToBytes('A'), command=stringToBytes('MY'))
# Expect a full packet to be written to the device
result_data = self.ser.get_data_written()
expected_data = b'\x7E\x00\x04\x08AMY\x10'
self.assertEqual(result_data, expected_data)
def test_send_at_command_with_param(self):
"""
calling send should write a full API frame containing the
API AT command packet to the serial device.
"""
# Send an AT command
self.xbee.at(frame_id=stringToBytes('A'), command=stringToBytes('MY'),
parameter=b'\x00\x00')
# Expect a full packet to be written to the device
result_data = self.ser.get_data_written()
expected_data = b'\x7E\x00\x06\x08AMY\x00\x00\x10'
self.assertEqual(result_data, expected_data)
def test_send_tx_with_close_brace(self):
"""
Calling tx where the given data string includes a close brace '}'
must write correctly.
"""
self.xbee.tx(dest_addr=b'\x01\x02', data=b'{test=1}')
result_data = self.ser.get_data_written()
expected_data = b'\x7E\x00\x0D\x01\x00\x01\x02\x00{test=1}\xD5'
self.assertEqual(result_data, expected_data)
def test_shorthand_disabled(self):
"""
When shorthand is disabled, any attempt at calling a
non-existant attribute should raise AttributeError
"""
self.xbee = XBee(self.ser, shorthand=False)
try:
self.xbee.at
except AttributeError:
pass
else:
self.fail("Specified shorthand command should not exist")
class TestReadFromDevice(InitXBee):
"""
XBee class should properly read and parse binary data from a serial
port device.
"""
@gen_test
def test_read_at(self):
"""
read and parse a parameterless AT command
"""
device = Serial()
device.set_read_data(b'\x7E\x00\x05\x88DMY\x01\x8c')
xbee = XBee(device)
xbee._process_input(None, None)
info = yield xbee.wait_read_frame()
expected_info = {'id': 'at_response',
'frame_id': b'D',
'command': b'MY',
'status': b'\x01'}
self.assertEqual(info, expected_info)
@gen_test
def test_read_at_params(self):
"""
read and parse an AT command with a parameter
"""
device = Serial()
device.set_read_data(b'\x7E\x00\x08\x88DMY\x01\x00\x00\x00\x8c')
xbee = XBee(device)
xbee._process_input(None, None)
info = yield xbee.wait_read_frame()
expected_info = {'id': 'at_response',
'frame_id': b'D',
'command': b'MY',
'status': b'\x01',
'parameter': b'\x00\x00\x00'}
self.assertEqual(info, expected_info)
@gen_test
def test_is_response_parsed_as_io(self):
"""
I/O data in a AT response for an IS command is parsed.
"""
# Build IO data
# One sample, ADC 0 enabled
# DIO 1,3,5,7 enabled
header = b'\x01\x02\xAA'
# First 7 bits ignored, DIO8 low, DIO 0-7 alternating
# ADC0 value of 255
sample = b'\x00\xAA\x00\xFF'
data = header + sample
device = Serial()
device.set_read_data(APIFrame(data=b'\x88DIS\x00' + data).output())
xbee = XBee(device)
xbee._process_input(None, None)
info = yield xbee.wait_read_frame()
expected_info = {'id': 'at_response',
'frame_id': b'D',
'command': b'IS',
'status': b'\x00',
'parameter': [{'dio-1': True,
'dio-3': True,
'dio-5': True,
'dio-7': True,
'adc-0': 255}]}
self.assertEqual(info, expected_info)
@gen_test
def test_is_remote_response_parsed_as_io(self):
"""
I/O data in a Remote AT response for an IS command is parsed.
"""
# Build IO data
# One sample, ADC 0 enabled
# DIO 1,3,5,7 enabled
header = b'\x01\x02\xAA'
# First 7 bits ignored, DIO8 low, DIO 0-7 alternating
# ADC0 value of 255
sample = b'\x00\xAA\x00\xFF'
data = header + sample
device = Serial()
device.set_read_data(APIFrame(
data=b'\x97D\x00\x13\xa2\x00@oG\xe4v\x1aIS\x00' + data).output()
)
xbee = XBee(device)
xbee._process_input(None, None)
info = yield xbee.wait_read_frame()
expected_info = {'id': 'remote_at_response',
'frame_id': b'D',
'source_addr_long': b'\x00\x13\xa2\x00@oG\xe4',
'source_addr': b'v\x1a',
'command': b'IS',
'status': b'\x00',
'parameter': [{'dio-1': True,
'dio-3': True,
'dio-5': True,
'dio-7': True,
'adc-0': 255}]}
self.assertEqual(info, expected_info)
@gen_test
def test_read_io_data(self):
"""
XBee class should properly read and parse incoming IO data
"""
# Build IO data
# One sample, ADC 0 enabled
# DIO 1,3,5,7 enabled
header = b'\x01\x02\xAA'
# First 7 bits ignored, DIO8 low, DIO 0-7 alternating
# ADC0 value of 255
sample = b'\x00\xAA\x00\xFF'
data = header + sample
# Wrap data in frame
# RX frame data
rx_io_resp = b'\x83\x00\x01\x28\x00'
device = Serial()
device.set_read_data(b'\x7E\x00\x0C' + rx_io_resp + data + b'\xfd')
xbee = XBee(device)
xbee._process_input(None, None)
info = yield xbee.wait_read_frame()
expected_info = {'id': 'rx_io_data',
'source_addr': b'\x00\x01',
'rssi': b'\x28',
'options': b'\x00',
'samples': [{'dio-1': True,
'dio-3': True,
'dio-5': True,
'dio-7': True,
'adc-0': 255}]
}
self.assertEqual(info, expected_info)
@gen_test
def test_read_empty_string(self):
"""
Reading an empty string must not cause a crash
Occasionally, the serial port fails to read properly, and returns
an empty string. In this event, we must not crash.
"""
class BadReadDevice(Serial):
def __init__(self, bad_read_index, data):
self.read_id = 0
self.bad_read_index = bad_read_index
super(BadReadDevice, self).__init__()
self.set_read_data(data)
def inWaiting(self):
return 1
def read(self, length=1):
if self.read_id == self.bad_read_index:
self.read_id += 1
return ''
else:
self.read_id += 1
return super(BadReadDevice, self).read()
badDevice = BadReadDevice(1, b'\x7E\x00\x05\x88DMY\x01\x8c')
xbee = XBee(badDevice)
try:
xbee._process_input(None, None)
yield xbee.wait_read_frame()
except Exception:
exc_type, exc_value, exc_traceback = sys.exc_info()
self.fail("".join(traceback.format_exception(
exc_type, exc_value, exc_traceback
)))
@gen_test
def test_read_at_params_in_escaped_mode(self):
"""
read and parse an AT command with a parameter in escaped API mode
"""
device = Serial()
device.set_read_data(b'~\x00\t\x88DMY\x01}^}]}1}3m')
xbee = XBee(device, escaped=True)
xbee._process_input(None, None)
info = yield xbee.wait_read_frame()
expected_info = {'id': 'at_response',
'frame_id': b'D',
'command': b'MY',
'status': b'\x01',
'parameter': b'\x7E\x7D\x11\x13'}
self.assertEqual(info, expected_info)
@gen_test
def test_empty_frame_ignored(self):
"""
If an empty frame is received from a device, it must be ignored.
"""
device = Serial()
device.set_read_data(b'\x7E\x00\x00\xFF\x7E\x00\x05\x88DMY\x01\x8c')
xbee = XBee(device)
xbee._process_input(None, None)
xbee._process_input(None, None)
info = yield xbee.wait_read_frame()
expected_info = {'id': 'at_response',
'frame_id': b'D',
'command': b'MY',
'status': b'\x01'}
self.assertEqual(info, expected_info)
@gen_test
def test_read_rx_with_close_brace(self):
"""
An rx data frame including a close brace must be read properly.
"""
device = Serial()
device.set_read_data(APIFrame(b'\x81\x01\x02\x55\x00{test=1}').output())
xbee = XBee(device)
xbee._process_input(None, None)
info = yield xbee.wait_read_frame()
expected_info = {'id': 'rx',
'source_addr': b'\x01\x02',
'rssi': b'\x55',
'options': b'\x00',
'rf_data': b'{test=1}'}
self.assertEqual(info, expected_info)
@gen_test
def test_read_rx_with_close_brace_escaped(self):
"""
An escaped rx data frame including a close brace must be read properly.
"""
device = Serial()
device.set_read_data(APIFrame(b'\x81\x01\x02\x55\x00{test=1}',
escaped=True).output())
xbee = XBee(device, escaped=True)
xbee._process_input(None, None)
info = yield xbee.wait_read_frame()
expected_info = {'id': 'rx',
'source_addr': b'\x01\x02',
'rssi': b'\x55',
'options': b'\x00',
'rf_data': b'{test=1}'}
self.assertEqual(info, expected_info)
if __name__ == '__main__':
unittest.main()
| 32.228537 | 80 | 0.524969 |
e31defbb911f00d4c95f7fd7a30a4b8645a37b11
| 121 |
py
|
Python
|
bot_text.py
|
aevtikheev/quiz_bot
|
2d2909736775afb4493cd0640cf27f40f89fe9f3
|
[
"MIT"
] | null | null | null |
bot_text.py
|
aevtikheev/quiz_bot
|
2d2909736775afb4493cd0640cf27f40f89fe9f3
|
[
"MIT"
] | null | null | null |
bot_text.py
|
aevtikheev/quiz_bot
|
2d2909736775afb4493cd0640cf27f40f89fe9f3
|
[
"MIT"
] | null | null | null |
"""Texts for Quiz Bot interface."""
NEW_QUESTION_TEXT = 'Новый вопрос'
GIVE_UP_TEXT = 'Сдаться'
SCORE_TEXT = 'Мой счёт'
| 20.166667 | 35 | 0.727273 |
e48158e65908607a3a3e5fe65b8647c243a21857
| 115,175 |
py
|
Python
|
oops_fhir/r4/code_system/v3_act_class.py
|
Mikuana/oops_fhir
|
77963315d123756b7d21ae881f433778096a1d25
|
[
"MIT"
] | null | null | null |
oops_fhir/r4/code_system/v3_act_class.py
|
Mikuana/oops_fhir
|
77963315d123756b7d21ae881f433778096a1d25
|
[
"MIT"
] | null | null | null |
oops_fhir/r4/code_system/v3_act_class.py
|
Mikuana/oops_fhir
|
77963315d123756b7d21ae881f433778096a1d25
|
[
"MIT"
] | null | null | null |
from pathlib import Path
from fhir.resources.codesystem import CodeSystem
from oops_fhir.utils import CodeSystemConcept
__all__ = ["v3ActClass"]
_resource = CodeSystem.parse_file(Path(__file__).with_suffix(".json"))
class v3ActClass:
"""
v3 Code System ActClass
**** MISSING DEFINITIONS ****
Status: active - Version: 2018-08-12
Copyright None
http://terminology.hl7.org/CodeSystem/v3-ActClass
"""
act = CodeSystemConcept(
{
"code": "ACT",
"concept": [
{
"code": "_ActClassRecordOrganizer",
"concept": [
{
"code": "COMPOSITION",
"concept": [
{
"code": "DOC",
"concept": [
{
"code": "DOCCLIN",
"concept": [
{
"code": "CDALVLONE",
"definition": "A clinical document that conforms to Level One of the HL7 Clinical Document Architecture (CDA)",
"display": "CDA Level One clinical document",
}
],
"definition": "A clinical document is a documentation of clinical observations and services, with the following characteristics:\r\n\n \n \n Persistence - A clinical document continues to exist in an unaltered state, for a time period defined by local and regulatory requirements; \r\n\n \n \n Stewardship - A clinical document is maintained by a person or organization entrusted with its care; \r\n\n \n \n Potential for authentication - A clinical document is an assemblage of information that is intended to be legally authenticated; \r\n\n \n \n Wholeness - Authentication of a clinical document applies to the whole and does not apply to portions of the document without the full context of the document;\r\n\n \n \n Human readability - A clinical document is human readable.",
"display": "clinical document",
}
],
"definition": "The notion of a document comes particularly from the paper world, where it corresponds to the contents recorded on discrete pieces of paper. In the electronic world, a document is a kind of composition that bears resemblance to their paper world counter-parts. Documents typically are meant to be human-readable.\r\n\n HL7's notion of document differs from that described in the W3C XML Recommendation, in which a document refers specifically to the contents that fall between the root element's start-tag and end-tag. Not all XML documents are HL7 documents.",
"display": "document",
}
],
"definition": "A context representing a grouped commitment of information to the EHR. It is considered the unit of modification of the record, the unit of transmission in record extracts, and the unit of attestation by authorizing clinicians.\r\n\n A composition represents part of a patient record originating from a single interaction between an authenticator and the record.\r\n\n Unless otherwise stated all statements within a composition have the same authenticator, apply to the same patient and were recorded in a single session of use of a single application.\r\n\n A composition contains organizers and entries.",
"display": "composition",
},
{
"code": "CONTAINER",
"concept": [
{
"code": "CATEGORY",
"definition": 'A group of entries within a composition or topic that have a common characteristic - for example, Examination, Diagnosis, Management OR Subjective, Objective, Analysis, Plan.\r\n\n The distinction from Topic relates to value sets. For Category there is a bounded list of things like "Examination", "Diagnosis" or SOAP categories. For Topic the list is wide open to any clinical condition or reason for a part of an encounter.\r\n\n A CATEGORY MAY CONTAIN ENTRIES.',
"display": "category",
},
{
"code": "DOCBODY",
"definition": "A context that distinguishes the body of a document from the document header. This is seen, for instance, in HTML documents, which have discrete <head> and <body> elements.",
"display": "document body",
},
{
"code": "DOCSECT",
"definition": "A context that subdivides the body of a document. Document sections are typically used for human navigation, to give a reader a clue as to the expected content. Document sections are used to organize and provide consistency to the contents of a document body. Document sections can contain document sections and can contain entries.",
"display": "document section",
},
{
"code": "TOPIC",
"definition": "A group of entries within a composition that are related to a common clinical theme - such as a specific disorder or problem, prevention, screening and provision of contraceptive services.\r\n\n A topic may contain categories and entries.",
"display": "topic",
},
],
"definition": 'Description: Container of clinical statements. Navigational. No semantic content. Knowledge of the section code is not required to interpret contained observations. Represents a heading in a heading structure, or "container tree".\r\n\n The record entries relating to a single clinical session are usually grouped under headings that represent phases of the encounter, or assist with layout and navigation. Clinical headings usually reflect the clinical workflow during a care session, and might also reflect the main author\'s reasoning processes. Much research has demonstrated that headings are used differently by different professional groups and specialties, and that headings are not used consistently enough to support safe automatic processing of the E H R.',
"display": "record container",
},
{
"code": "EXTRACT",
"concept": [
{
"code": "EHR",
"definition": "A context that comprises all compositions. The EHR is an extract that includes the entire chart.\r\n\n \n NOTE: In an exchange scenario, an EHR is a specialization of an extract.",
"display": "electronic health record",
}
],
"definition": "This context represents the part of a patient record conveyed in a single communication. It is drawn from a providing system for the purposes of communication to a requesting process (which might be another repository, a client application or a middleware service such as an electronic guideline engine), and supporting the faithful inclusion of the communicated data in the receiving system.\r\n\n An extract may be the entirety of the patient record as held by the sender or it may be a part of that record (e.g. changes since a specified date).\r\n\n An extract contains folders or compositions.\r\n\n An extract cannot contain another extract.",
"display": "extract",
},
{
"code": "FOLDER",
"definition": "A context representing the high-level organization of an extract e.g. to group parts of the record by episode, care team, clinical specialty, clinical condition, or source application. Internationally, this kind of organizing structure is used variably: in some centers and systems the folder is treated as an informal compartmentalization of the overall health record; in others it might represent a significant legal portion of the EHR relating to the originating enterprise or team.\r\n\n A folder contains compositions.\r\n\n Folders may be nested within folders.",
"display": "folder",
},
{
"code": "GROUPER",
"concept": [
{
"code": "CLUSTER",
"definition": 'Description:An ACT that organizes a set of component acts into a semantic grouping that have a shared subject. The subject may be either a subject participation (SBJ), subject act relationship (SUBJ), or child participation/act relationship types.\r\n\n \n Discussion: The focus in a CLUSTER act is the grouping of the contained acts. For example "a request to cluster" (RQO), "a type of cluster that is allowed to occur" (DEF), etc.\r\n\n \n Examples: \n \r\n\n \n \n Radiologic investigations that might include administration of a dye, followed by radiographic observations;\r\n\n \n \n "Isolate cluster" which includes all testing and specimen processing performed on a specific isolate;\r\n\n \n \n a set of actions to perform at a particular stage in a clinical trial.',
"display": "Cluster",
}
],
"definition": 'Definition: An ACT that organizes a set of component acts into a semantic grouping that share a particular context such as timeframe, patient, etc.\r\n\n \n UsageNotes: The focus in a GROUPER act is the grouping of the contained acts. For example "a request to group" (RQO), "a type of grouping that is allowed to occur" (DEF), etc.\r\n\n Unlike WorkingList, which represents a dynamic, shared, continuously updated collection to provide a "view" of a set of objects, GROUPER collections tend to be static and simply indicate a shared set of semantics. Note that sharing of semantics can be achieved using ACT as well. However, with GROUPER, the sole semantic is of grouping.',
"display": "grouper",
},
],
"definition": "Used to group a set of acts sharing a common context. Organizer factory can nest within other context factory - such as where a document is contained within a folder, or a folder is contained within an EHR extract.",
"display": "record organizer",
"property": [{"code": "notSelectable", "valueBoolean": True}],
},
{
"code": "ACCM",
"definition": "An accommodation is a service provided for a Person or other LivingSubject in which a place is provided for the subject to reside for a period of time. Commonly used to track the provision of ward, private and semi-private accommodations for a patient.",
"display": "accommodation",
},
{
"code": "ACCT",
"definition": "A financial account established to track the net result of financial acts.",
"display": "account",
},
{
"code": "ACSN",
"definition": "A unit of work, a grouper of work items as defined by the system performing that work. Typically some laboratory order fulfillers communicate references to accessions in their communications regarding laboratory orders. Often one or more specimens are related to an accession such that in some environments the accession number is taken as an identifier for a specimen (group).",
"display": "accession",
},
{
"code": "ADJUD",
"definition": "A transformation process where a requested invoice is transformed into an agreed invoice. Represents the adjudication processing of an invoice (claim). Adjudication results can be adjudicated as submitted, with adjustments or refused.\r\n\n Adjudication results comprise 2 components: the adjudication processing results and a restated (or adjudicated) invoice or claim",
"display": "financial adjudication",
},
{
"code": "CACT",
"concept": [
{
"code": "ACTN",
"definition": 'Sender asks addressee to do something depending on the focal Act of the payload. An example is "fulfill this order". Addressee has responsibilities to either reject the message or to act on it in an appropriate way (specified by the specific receiver responsibilities for the interaction).',
"display": "action",
},
{
"code": "INFO",
"definition": "Sender sends payload to addressee as information. Addressee does not have responsibilities beyond serving addressee's own interest (i.e., read and memorize if you see fit). This is equivalent to an FYI on a memo.",
"display": "information",
},
{
"code": "STC",
"definition": "Description: Sender transmits a status change pertaining to the focal act of the payload. This status of the focal act is the final state of the state transition. This can be either a request or an event, according to the mood of the control act.",
"display": "state transition control",
},
],
"definition": "An act representing a system action such as the change of state of another act or the initiation of a query. All control acts represent trigger events in the HL7 context. ControlActs may occur in different moods.",
"display": "control act",
},
{
"code": "CNTRCT",
"concept": [
{
"code": "FCNTRCT",
"concept": [
{
"code": "COV",
"definition": "When used in the EVN mood, this concept means with respect to a covered party:\r\n\n \n \n A health care insurance policy or plan that is contractually binding between two or more parties; or \r\n\n \n \n A health care program, usually administered by government entities, that provides coverage to persons determined eligible under the terms of the program.\r\n\n \n \n \n \n When used in the definition (DEF) mood, COV means potential coverage for a patient who may or may not be a covered party.\r\n\n \n \n The concept's meaning is fully specified by the choice of ActCoverageTypeCode (abstract) ActProgramCode or ActInsurancePolicyCode.",
"display": "coverage",
}
],
"definition": "A contract whose value is measured in monetary terms.",
"display": "financial contract",
}
],
"definition": "An agreement of obligation between two or more parties that is subject to contractual law and enforcement.",
"display": "contract",
},
{
"code": "CONC",
"concept": [
{
"code": "HCASE",
"definition": 'A public health case is a Concern about an observation or event that has a specific significance for public health. The creation of a PublicHealthCase initiates the tracking of the object of concern. The decision to track is related to but somewhat independent of the underlying event or observation.\r\n\n \n UsageNotes: Typically a Public Health Case involves an instance or instances of a reportable infectious disease or other condition. The public health case can include a health-related event concerning a single individual or it may refer to multiple health-related events that are occurrences of the same disease or condition of interest to public health.\r\n\n A public health case definition (Act.moodCode = "definition") includes the description of the clinical, laboratory, and epidemiologic indicators associated with a disease or condition of interest to public health. There are case definitions for conditions that are reportable, as well as for those that are not. A public health case definition is a construct used by public health for the purpose of counting cases, and should not be used as clinical indications for treatment. Examples include AIDS, toxic-shock syndrome, and salmonellosis and their associated indicators that are used to define a case.',
"display": "public health case",
},
{
"code": "OUTBR",
"definition": 'An Outbreak is a concern resulting from a series of public health cases.\r\n\n \n UsageNotes: The date on which an outbreak starts is the earliest date of onset among the cases assigned to the outbreak and its ending date is the last date of onset among the cases assigned to the outbreak. The effectiveTime attribute is used to convey the relevant dates for the case. An outbreak definition (Act.moodCode = "definition" includes the criteria for the number, types and occurrence pattern of cases necessary to declare an outbreak and to judge the severity of an outbreak.',
"display": "outbreak",
},
],
"definition": "Definition: A worry that tends to persist over time and has as its subject a state or process. The subject of the worry has the potential to require intervention or management.\r\n\n \n Examples: an observation result, procedure, substance administration, equipment repair status, device recall status, a health risk, a financial risk, public health risk, pregnancy, health maintenance, allergy, and acute or chronic illness.",
"display": "concern",
},
{
"code": "CONS",
"definition": 'The Consent class represents informed consents and all similar medico-legal transactions between the patient (or his legal guardian) and the provider. Examples are informed consent for surgical procedures, informed consent for clinical trials, advanced beneficiary notice, against medical advice decline from service, release of information agreement, etc.\r\n\n The details of consents vary. Often an institution has a number of different consent forms for various purposes, including reminding the physician about the topics to mention. Such forms also include patient education material. In electronic medical record communication, consents thus are information-generating acts on their own and need to be managed similar to medical activities. Thus, Consent is modeled as a special class of Act.\r\n\n The "signatures" to the consent document are represented electronically through Participation instances to the consent object. Typically an informed consent has Participation.typeCode of "performer", the healthcare provider informing the patient, and "consenter", the patient or legal guardian. Some consent may associate a witness or a notary public (e.g., living wills, advanced directives). In consents where a healthcare provider is not required (e.g. living will), the performer may be the patient himself or a notary public.\r\n\n Some consent has a minimum required delay between the consent and the service, so as to allow the patient to rethink his decisions. This minimum delay can be expressed in the act definition by the ActRelationship.pauseQuantity attribute that delays the service until the pause time has elapsed after the consent has been completed.',
"display": "consent",
},
{
"code": "CONTREG",
"definition": "An Act where a container is registered either via an automated sensor, such as a barcode reader, or by manual receipt",
"display": "container registration",
},
{
"code": "CTTEVENT",
"definition": "An identified point during a clinical trial at which one or more actions are scheduled to be performed (definition mood), or are actually performed (event mood). The actions may or may not involve an encounter between the subject and a healthcare professional.",
"display": "clinical trial timepoint event",
},
{
"code": "DISPACT",
"definition": "An action taken with respect to a subject Entity by a regulatory or authoritative body with supervisory capacity over that entity. The action is taken in response to behavior by the subject Entity that body finds to be \t\t\t\t\t\t\tundesirable.\r\n\n Suspension, license restrictions, monetary fine, letter of reprimand, mandated training, mandated supervision, etc.Examples:",
"display": "disciplinary action",
},
{
"code": "EXPOS",
"concept": [
{
"code": "AEXPOS",
"definition": "Description: \n \r\n\n An acquisition exposure act describes the proximity (location and time) through which the participating entity was potentially exposed to a physical (including energy), chemical or biological agent from another entity. The acquisition exposure act is used in conjunction with transmission exposure acts as part of an analysis technique for contact tracing. Although an exposure can be decomposed into transmission and acquisition exposures, there is no requirement that all exposures be treated in this fashion.\r\n\n \n Constraints: The Acquisition Exposure inherits the participation constraints that apply to Exposure with the following exception. The EXPSRC (exposure source) participation must never be associated with the Transmission Exposure either directly or via context conduction.",
"display": "acquisition exposure",
},
{
"code": "TEXPOS",
"definition": "Description: \n \r\n\n A transmission exposure act describes the proximity (time and location) over which the participating source entity was capable of transmitting a physical (including energy), chemical or biological substance agent to another entity. The transmission exposure act is used in conjunction with acquisition exposure acts as part of an analysis technique for contact tracing. Although an exposure can be decomposed into transmission and acquisition exposures, there is no requirement that all exposures be treated in this fashion.\r\n\n \n Constraints: The Transmission Exposure inherits the participation constraints that apply to Exposure with the following exception. The EXPTRGT (exposure target) participation must never be associated with the Transmission Exposure either directly or via context conduction.",
"display": "transmission exposure",
},
],
"definition": 'An interaction between entities that provides opportunity for transmission of a physical, chemical, or biological agent from an exposure source entity to an exposure target entity.\r\n\n \n Examples: The following examples are provided to indicate what interactions are considered exposures rather than other types of Acts:\r\n\n \n \n A patient accidentally receives three times the recommended dose of their medication due to a dosing error. \r\n\n \n \n This is a substance administration. Public health and/or safety authorities may also be interested in documenting this with an associated exposure.\r\n\n \n \n \n \n A patient accidentally is dispensed an incorrect medicine (e.g., clomiphene instead of clomipramine). They have taken several doses before the mistake is detected. They are therefore "exposed" to a medicine that there was no therapeutic indication for them to receive. \r\n\n \n \n There are several substance administrations in this example. Public health and/or safety authorities may also be interested in documenting this with associated exposures.\r\n\n \n \n \n \n In a busy medical ward, a patient is receiving chemotherapy for a lymphoma. Unfortunately, the IV infusion bag containing the medicine splits, spraying cytotoxic medication over the patient being treated and the patient in the adjacent bed. \r\n\n \n \n There are three substance administrations in this example. The first is the intended one (IV infusion) with its associated (implicit) exposure. There is an incident with an associated substance administration to the same patient involving the medication sprayed over the patient as well as an associated exposure. Additionally, the incident includes a substance administration involving the spraying of medication on the adjacent patient, also with an associated exposure.\r\n\n \n \n \n \n A patient who is a refugee from a war-torn African nation arrives in a busy inner city A&E department suffering from a cough with bloody sputum. Not understanding the registration and triage process, they sit in the waiting room for several hours before it is noticed that they have not booked in. As soon as they are being processed, it is suspected that they are suffering from TB. Vulnerable (immunosuppressed) patients who were sharing the waiting room with this patient may have been exposed to the tubercule bacillus, and must be traced for investigation. \r\n\n \n \n This is an exposure (or possibly multiple exposures) in the waiting room involving the refugee and everyone else in the waiting room during the period. There might also be a number of known or presumed substance administrations (coughing) via several possible routes. The substance administrations are only hypotheses until confirmed by further testing.\r\n\n \n \n \n \n A patient who has received an elective total hip replacement procedure suffers a prolonged stay in hospital, due to contracting an MRSA infection in the surgical wound site after the surgery. \r\n\n \n \n This is an exposure to MRSA. Although there was some sort of substance administration, it\'s possible the exact mechanism for introduction of the MRSA into the wound will not be identified.\r\n\n \n \n \n \n Routine maintenance of the X-ray machines at a local hospital reveals a serious breach of the shielding on one of the machines. Patients who have undergone investigations using that machine in the last month are likely to have been exposed to significantly higher doses of X-rays than was intended, and must be tracked for possible adverse effects. \r\n\n \n \n There has been an exposure of each patient who used the machine in the past 30 days. Some patients may have had substance administrations.\r\n\n \n \n \n \n A new member of staff is employed in the laundry processing room of a small cottage hospital, and a misreading of the instructions for adding detergents results in fifty times the usual concentration of cleaning materials being added to a batch of hospital bedding. As a result, several patients have been exposed to very high levels of detergents still present in the "clean" bedding, and have experienced dermatological reactions to this. \r\n\n \n \n There has been an incident with multiple exposures to several patients. Although there are substance administrations involving the application of the detergent to the skin of the patients, it is expected that the substance administrations would not be directly documented.\r\n\n \n \n \n \n Seven patients who are residents in a health care facility for the elderly mentally ill have developed respiratory problems. After several months of various tests having been performed and various medications prescribed to these patients, the problem is traced to their being "sensitive" to a new fungicide used in the wall plaster of the ward where these patients reside.\r\n\n \n \n The patients have been continuously exposed to the fungicide. Although there have been continuous substance administrations (via breathing) this would not normally be documented as a substance administration.\r\n\n \n \n \n \n A patient with osteoarthritis of the knees is treated symptomatically using analgesia, paracetamol (acetaminophen) 1g up to four times a day for pain relief. His GP does not realize that the patient has, 20 years previously (while at college) had severe alcohol addiction problems, and now, although this is completely under control, his liver has suffered significantly, leaving him more sensitive to hepatic toxicity from paracetamol use. Later that year, the patient returns with a noticeable level of jaundice. Paracetamol is immediately withdrawn and alternative solutions for the knee pain are sought. The jaundice gradually subsides with conservative management, but referral to the gastroenterologist is required for advice and monitoring. \r\n\n \n \n There is a substance administration with an associated exposure. The exposure component is based on the relative toxic level of the substance to a patient with a compromised liver function.\r\n\n \n \n \n \n A patient goes to their GP complaining of abdominal pain, having been discharged from the local hospital ten days\' previously after an emergency appendectomy. The GP can find nothing particularly amiss, and presumes it is post operative surgical pain that will resolve. The patient returns a fortnight later, when the GP prescribes further analgesia, but does decide to request an outpatient surgical follow-up appointment. At this post-surgical outpatient review, the registrar decides to order an ultrasound, which, when performed three weeks later, shows a small faint inexplicable mass. A laparoscopy is then performed, as a day case procedure, and a piece of a surgical swab is removed from the patient\'s abdominal cavity. Thankfully, a full recovery then takes place. \r\n\n \n \n This is a procedural sequelae. There may be an Incident recorded for this also.\r\n\n \n \n \n \n A patient is slightly late for a regular pacemaker battery check in the Cardiology department of the local hospital. They are hurrying down the second floor corridor. A sudden summer squall has recently passed over the area, and rain has come in through an open corridor window leaving a small puddle on the corridor floor. In their haste, the patient slips in the puddle and falls so badly that they have to be taken to the A&E department, where it is discovered on investigation they have slightly torn the cruciate ligament in their left knee. \r\n\n \n \n This is not an exposure. There has been an incident. \r\n\n \n \n \n \n \n Usage Notes: This class deals only with opportunity and not the outcome of the exposure; i.e. not all exposed parties will necessarily experience actual harm or benefit.\r\n\n Exposure differs from Substance Administration by the absence of the participation of a performer in the act. \r\n\n The following participations SHOULD be used with the following participations to distinguish the specific entities:\r\n\n \n \n The exposed entity participates via the "exposure target" (EXPTRGT) participation.\r\n\n \n \n An entity that has carried the agent transmitted in the exposure participates via the "exposure source" (EXSRC) participation. For example: \r\n\n \n \n a person or animal who carried an infectious disease and interacts (EXSRC) with another person or animal (EXPTRGT) transmitting the disease agent;\r\n\n \n \n a place or other environment (EXSRC) and a person or animal (EXPTRGT) who is exposed in the presence of this environment.\r\n\n \n \n \n \n When it is unknown whether a participating entity is the source of the agent (EXSRC) or the target of the transmission (EXPTRGT), the "exposure participant" (EXPART) is used.\r\n\n \n \n The physical (including energy), chemical or biological substance which is participating in the exposure uses the "exposure agent" (EXPAGNT) participation. There are at least three scenarios:\r\n\n \n \n the player of the Role that participates as EXPAGNT is the chemical or biological substance mixed or carried by the scoper-entity of the Role (e.g., ingredient role); or \r\n\n \n \n the player of the Role that participates as EXPAGNT is a mixture known to contain the chemical, radiological or biological substance of interest; or \r\n\n \n \n the player of the Role that participates as a EXPAGNT is known to carry the agent (i.e., the player is a fomite, vector, etc.).\r\n\n \n \n \n \n The Exposure.statusCode attribute should be interpreted as the state of the Exposure business object (e.g., active, aborted, completed) and not the clinical status of the exposure (e.g., probable, confirmed). The clinical status of the exposure should be associated with the exposure via a subject observation.\r\n\n \n Design Comment: The usage notes require a clear criterion for determining whether an act is an exposure or substance administration-deleterious potential, uncertainty of actual transmission, or otherwise. SBADM states that the criterion is the presence of a performer-but there are examples above that call this criterion into question (e.g., the first one, concerning a dosing error).',
"display": "exposure",
},
{
"code": "INC",
"definition": "An event that occurred outside of the control of one or more of the parties involved. Includes the concept of an accident.",
"display": "incident",
},
{
"code": "INFRM",
"definition": "The act of transmitting information and understanding about a topic to a subject where the participation association must be SBJ.\r\n\n \n Discussion: This act may be used to request that a patient or provider be informed about an Act, or to indicate that a person was informed about a particular act.",
"display": "inform",
},
{
"code": "INVE",
"definition": "Represents concepts related to invoice processing in health care",
"display": "invoice element",
},
{
"code": "LIST",
"definition": "Working list collects a dynamic list of individual instances of Act via ActRelationship which reflects the need of an individual worker, team of workers, or an organization to manage lists of acts for many different clinical and administrative reasons. Examples of working lists include problem lists, goal lists, allergy lists, and to-do lists.",
"display": "working list",
},
{
"code": "MPROT",
"definition": "An officially or unofficially instituted program to track acts of a particular type or categorization.",
"display": "monitoring program",
},
{
"code": "OBS",
"concept": [
{
"code": "_ActClassROI",
"concept": [
{
"code": "ROIBND",
"definition": 'A Region of Interest (ROI) specified for a multidimensional observation, such as an Observation Series (OBSSER). The ROI is specified using a set of observation criteria, each delineating the boundary of the region in one of the dimensions in the multidimensional observation. The relationship between a ROI and its referenced Act is specified through an ActRelationship of type subject (SUBJ), which must always be present. Each of the boundary criteria observations is connected with the ROI using ActRelationships of type "has component" (COMP). In each boundary criterion, the Act.code names the dimension and the Observation.value specifies the range of values inside the region. Typically the bounded dimension is continuous, and so the Observation.value will be an interval (IVL) data type. The Observation.value need not be specified if the respective dimension is only named but not constrained. For example, an ROI for the QT interval of a certain beat in ECG Lead II would contain 2 boundary criteria, one naming the interval in time (constrained), and the other naming the interval in ECG Lead II (only named, but not constrained).',
"display": "bounded ROI",
},
{
"code": "ROIOVL",
"definition": 'A Region of Interest (ROI) specified for an image using an overlay shape. Typically used to make reference to specific regions in images, e.g., to specify the location of a radiologic finding in an image or to specify the site of a physical finding by "circling" a region in a schematic picture of a human body. The units of the coordinate values are in pixels. The origin is in the upper left hand corner, with positive X values going to the right and positive Y values going down. The relationship between a ROI and its referenced Act is specified through an ActRelationship of type "subject" (SUBJ), which must always be present.',
"display": "overlay ROI",
},
],
"definition": 'Regions of Interest (ROI) within a subject Act. Primarily used for making secondary observations on a subset of a subject observation. The relationship between a ROI and its referenced Act is specified through an ActRelationship of type "subject" (SUBJ), which must always be present.',
"display": "ActClassROI",
"property": [
{"code": "notSelectable", "valueBoolean": True}
],
},
{
"code": "_SubjectPhysicalPosition",
"concept": [
{
"code": "_SubjectBodyPosition",
"concept": [
{
"code": "LLD",
"definition": "Lying on the left side.\r\n\n \n \n Deprecation Comment: \n This concept has been deprecated because it does not describe a type of Act (as it should in the ActClass code system), but rather encodes the result or value of an observation. The same code has been added to the ObservationValue code system.",
"display": "left lateral decubitus",
"property": [
{
"code": "status",
"valueCode": "deprecated",
},
{
"code": "deprecationDate",
"valueDateTime": "2009-07-12",
},
],
},
{
"code": "PRN",
"definition": "Lying with the front or ventral surface downward; lying face down.\r\n\n \n \n Deprecation Comment: \n This concept has been deprecated because it does not describe a type of Act (as it should in the ActClass code system), but rather encodes the result or value of an observation. The same code has been added to the ObservationValue code system.",
"display": "prone",
"property": [
{
"code": "status",
"valueCode": "deprecated",
},
{
"code": "deprecationDate",
"valueDateTime": "2009-07-12",
},
],
},
{
"code": "RLD",
"definition": "Lying on the right side.\r\n\n \n \n Deprecation Comment: \n This concept has been deprecated because it does not describe a type of Act (as it should in the ActClass code system), but rather encodes the result or value of an observation. The same code has been added to the ObservationValue code system.",
"display": "right lateral decubitus",
"property": [
{
"code": "status",
"valueCode": "deprecated",
},
{
"code": "deprecationDate",
"valueDateTime": "2009-07-12",
},
],
},
{
"code": "SFWL",
"definition": "A semi-sitting position in bed with the head of the bed elevated approximately 45 degrees.\r\n\n \n \n Deprecation Comment: \n This concept has been deprecated because it does not describe a type of Act (as it should in the ActClass code system), but rather encodes the result or value of an observation. The same code has been added to the ObservationValue code system.",
"display": "Semi-Fowler's",
"property": [
{
"code": "status",
"valueCode": "deprecated",
},
{
"code": "deprecationDate",
"valueDateTime": "2009-07-12",
},
],
},
{
"code": "SIT",
"definition": "Resting the body on the buttocks, typically with upper torso erect or semi erect.\r\n\n \n \n Deprecation Comment: \n This concept has been deprecated because it does not describe a type of Act (as it should in the ActClass code system), but rather encodes the result or value of an observation. The same code has been added to the ObservationValue code system.",
"display": "sitting",
"property": [
{
"code": "status",
"valueCode": "deprecated",
},
{
"code": "deprecationDate",
"valueDateTime": "2009-07-12",
},
],
},
{
"code": "STN",
"definition": "To be stationary, upright, vertical, on one's legs.\r\n\n \n \n Deprecation Comment: \n This concept has been deprecated because it does not describe a type of Act (as it should in the ActClass code system), but rather encodes the result or value of an observation. The same code has been added to the ObservationValue code system.",
"display": "standing",
"property": [
{
"code": "status",
"valueCode": "deprecated",
},
{
"code": "deprecationDate",
"valueDateTime": "2009-07-12",
},
],
},
{
"code": "SUP",
"concept": [
{
"code": "RTRD",
"definition": "Lying on the back, on an inclined plane, typically about 30-45 degrees with head raised and feet lowered.\r\n\n \n \n Deprecation Comment: \n This concept has been deprecated because it does not describe a type of Act (as it should in the ActClass code system), but rather encodes the result or value of an observation. The same code has been added to the ObservationValue code system.",
"display": "reverse trendelenburg",
"property": [
{
"code": "status",
"valueCode": "deprecated",
},
{
"code": "deprecationDate",
"valueDateTime": "2009-07-12",
},
],
},
{
"code": "TRD",
"definition": "Lying on the back, on an inclined plane, typically about 30-45 degrees, with head lowered and feet raised.\r\n\n \n \n Deprecation Comment: \n This concept has been deprecated because it does not describe a type of Act (as it should in the ActClass code system), but rather encodes the result or value of an observation. The same code has been added to the ObservationValue code system.",
"display": "trendelenburg",
"property": [
{
"code": "status",
"valueCode": "deprecated",
},
{
"code": "deprecationDate",
"valueDateTime": "2009-07-12",
},
],
},
],
"definition": "Deprecation Comment: \n This concept has been deprecated because it does not describe a type of Act (as it should in the ActClass code system), but rather encodes the result or value of an observation. The same code has been added to the ObservationValue code system.",
"display": "supine",
"property": [
{
"code": "status",
"valueCode": "deprecated",
},
{
"code": "deprecationDate",
"valueDateTime": "2009-07-12",
},
],
},
],
"definition": "Contains codes for defining the observed, physical position of a subject, such as during an observation, assessment, collection of a specimen, etc. ECG waveforms and vital signs, such as blood pressure, are two examples where a general, observed position typically needs to be noted.\r\n\n \n \n Deprecation Comment: \n This concept has been deprecated because it does not describe a type of Act (as it should in the ActClass code system), but rather encodes the result or value of an observation. The same code has been added to the ObservationValue code system.",
"display": "subject body position",
"property": [
{"code": "notSelectable", "valueBoolean": True},
{"code": "status", "valueCode": "deprecated"},
{
"code": "deprecationDate",
"valueDateTime": "2009-07-12",
},
],
}
],
"definition": "The spatial relationship of a subject whether human, other animal, or plant, to a frame of reference such as gravity or a collection device.",
"display": "subject physical position",
"property": [
{"code": "notSelectable", "valueBoolean": True}
],
},
{
"code": "ALRT",
"definition": "An observation identifying a potential adverse outcome as a result of an Act or combination of Acts.\r\n\n \n Examples: Detection of a drug-drug interaction; Identification of a late-submission for an invoice; Requesting discharge for a patient who does not meet hospital-defined discharge criteria.\r\n\n \n Discussion: This class is commonly used for identifying 'business rule' or 'process' problems that may result in a refusal to carry out a particular request. In some circumstances it may be possible to 'bypass' a problem by modifying the request to acknowledge the issue and/or by providing some form of mitigation.\r\n\n \n Constraints: the Act or Acts that may cause the the adverse outcome are the target of a subject ActRelationship. The subbtypes of this concept indicate the type of problem being detected (e.g. drug-drug interaction) while the Observation.value is used to repesent a specific problem code (e.g. specific drug-drug interaction id).",
"display": "detected issue",
},
{
"code": "BATTERY",
"definition": "Definition: An observation that is composed of a set of observations. These observations typically have a logical or practical grouping for generally accepted clinical or functional purposes, such as observations that are run together because of automation. A battery can define required and optional component observations and, in some cases, will define complex rules that determine whether or not a particular observation is made. BATTERY is a constraint on the Observation class in that it is understood to always be composed of component observations.\r\n\n \n UsageNotes: The focus in a BATTERY is that it is composed of individual observations. In request (RQO) mood, a battery is a request to perform the component observations. In event (EVN) mood a battery is a reporting of associated set of observation events. In definition mood a battery is the definition of the associated set of observations.\r\n\n \n Examples: Vital signs, Full blood count, Chemistry panel.",
"display": "battery",
},
{
"code": "CLNTRL",
"definition": "The set of actions that define an experiment to assess the effectiveness and/or safety of a biopharmaceutical product (food, drug, device, etc.). In definition mood, this set of actions is often embodied in a clinical trial protocol; in event mood, this designates the aggregate act of applying the actions to one or more subjects.",
"display": "clinical trial",
},
{
"code": "CNOD",
"definition": "An instance of Observation of a Condition at a point in time that includes any Observations or Procedures associated with that Condition as well as links to previous instances of Condition Node for the same Condition\r\n\n \n \n Deprecation Comment: \n This concept has been deprecated because an alternative structure for tracking the evolution of a problem has been presented and adopted by the Care Provision Work Group.",
"display": "Condition Node",
"property": [
{"code": "status", "valueCode": "deprecated"},
{
"code": "deprecationDate",
"valueDateTime": "2009-07-12",
},
],
},
{
"code": "COND",
"concept": [
{
"code": "CASE",
"concept": [
{
"code": "OUTB",
"definition": "An outbreak represents a series of public health cases. The date on which an outbreak starts is the earliest date of onset among the cases assigned to the outbreak, and its ending date is the last date of onset among the cases assigned to the outbreak.",
"display": "outbreak",
"property": [
{
"code": "status",
"valueCode": "deprecated",
},
{
"code": "deprecationDate",
"valueDateTime": "2012-11-09",
},
],
}
],
"definition": 'A public health case is an Observation representing a condition or event that has a specific significance for public health. Typically it involves an instance or instances of a reportable infectious disease or other condition. The public health case can include a health-related event concerning a single individual or it may refer to multiple health-related events that are occurrences of the same disease or condition of interest to public health. An outbreak involving multiple individuals may be considered as a type of public health case. A public health case definition (Act.moodCode = "definition") includes the description of the clinical, laboratory, and epidemiologic indicators associated with a disease or condition of interest to public health. There are case definitions for conditions that are reportable, as well as for those that are not. There are also case definitions for outbreaks. A public health case definition is a construct used by public health for the purpose of counting cases, and should not be used as clinical indications for treatment. Examples include AIDS, toxic-shock syndrome, and salmonellosis and their associated indicators that are used to define a case.',
"display": "public health case",
"property": [
{"code": "status", "valueCode": "deprecated"},
{
"code": "deprecationDate",
"valueDateTime": "2012-11-09",
},
],
}
],
"definition": "An observable finding or state that persists over time and tends to require intervention or management, and, therefore, distinguished from an Observation made at a point in time; may exist before an Observation of the Condition is made or after interventions to manage the Condition are undertaken. Examples: equipment repair status, device recall status, a health risk, a financial risk, public health risk, pregnancy, health maintenance, chronic illness",
"display": "Condition",
"property": [
{"code": "status", "valueCode": "deprecated"},
{
"code": "deprecationDate",
"valueDateTime": "2012-11-09",
},
],
},
{
"code": "DGIMG",
"definition": "Class for holding attributes unique to diagnostic images.",
"display": "diagnostic image",
},
{
"code": "GEN",
"concept": [
{
"code": "DETPOL",
"definition": "Description:A determinant peptide in a polypeptide as described by polypeptide.",
"display": "determinant peptide",
},
{
"code": "EXP",
"definition": "Description:An expression level of genes/proteins or other expressed genomic entities.",
"display": "expression level",
},
{
"code": "LOC",
"definition": "Description:The position of a gene (or other significant sequence) on the genome.",
"display": "locus",
},
{
"code": "PHN",
"definition": "Description:A genomic phenomenon that is expressed externally in the organism.",
"display": "phenotype",
},
{
"code": "POL",
"definition": "Description:A polypeptide resulting from the translation of a gene.",
"display": "polypeptide",
},
{
"code": "SEQ",
"definition": "Description:A sequence of biomolecule like the DNA, RNA, protein and the like.",
"display": "bio sequence",
},
{
"code": "SEQVAR",
"definition": "Description:A variation in a sequence as described by BioSequence.",
"display": "bio sequence variation",
},
],
"definition": "Description:An observation of genomic phenomena.",
"display": "genomic observation",
},
{
"code": "INVSTG",
"definition": "An formalized inquiry into the circumstances surrounding a particular unplanned event or potential event for the purposes of identifying possible causes and contributing factors for the event. This investigation could be conducted at a local institutional level or at the level of a local or national government.",
"display": "investigation",
},
{
"code": "OBSSER",
"concept": [
{
"code": "OBSCOR",
"definition": "Container for Observation Sequences (Observations whose values are contained in LIST<>'s) having values correlated with each other. Each contained Observation Sequence LIST<> must be the same length. Values in the LIST<>'s are correlated based on index. E.g. the values in position 2 in all the LIST<>'s are correlated. This is analogous to a table where each column is an Observation Sequence with a LIST<> of values, and each row in the table is a correlation between the columns. For example, a 12-lead ECG would contain 13 sequences: one sequence for time, and a sequence for each of the 12 leads.",
"display": "correlated observation sequences",
}
],
"definition": 'Container for Correlated Observation Sequences sharing a common frame of reference. All Observations of the same cd must be comparable and relative to the common frame of reference. For example, a 3-channel ECG device records a 12-lead ECG in 4 steps (3 leads at a time). Each of the separate 3-channel recordings would be in their own "OBSCOR". And, all 4 OBSCOR would be contained in one OBSSER because all the times are relative to the same origin (beginning of the recording) and all the ECG signals were from a fixed set of electrodes.',
"display": "observation series",
},
{
"code": "POS",
"concept": [
{
"code": "POSACC",
"definition": "Description:An observation representing the degree to which the assignment of the spatial coordinates, based on a matching algorithm by a geocoding engine against a reference spatial database, matches true or accepted values.",
"display": "position accuracy",
},
{
"code": "POSCOORD",
"definition": "Description:An observation representing one of a set of numerical values used to determine the position of a place. The name of the coordinate value is determined by the reference coordinate system.",
"display": "position coordinate",
},
],
"definition": "An observation denoting the physical location of a person or thing based on a reference coordinate system.",
"display": "position",
},
{
"code": "SPCOBS",
"definition": "An observation on a specimen in a laboratory environment that may affect processing, analysis or result interpretation",
"display": "specimen observation",
},
{
"code": "VERIF",
"definition": "An act which describes the process whereby a 'verifying party' validates either the existence of the Role attested to by some Credential or the actual Vetting act and its details.",
"display": "Verification",
},
],
"definition": "Description:An act that is intended to result in new information about a subject. The main difference between Observations and other Acts is that Observations have a value attribute. The code attribute of Observation and the value attribute of Observation must be considered in combination to determine the semantics of the observation.\r\n\n \n Discussion:\n \r\n\n Structurally, many observations are name-value-pairs, where the Observation.code (inherited from Act) is the name and the Observation.value is the value of the property. Such a construct is also known as a variable (a named feature that can assume a value) hence, the Observation class is always used to hold generic name-value-pairs or variables, even though the variable valuation may not be the result of an elaborate observation method. It may be a simple answer to a question or it may be an assertion or setting of a parameter.\r\n\n As with all Act statements, Observation statements describe what was done, and in the case of Observations, this includes a description of what was actually observed (results or answers); and those results or answers are part of the observation and not split off into other objects. \r\n\n The method of action is asserted by the Observation classCode or its subclasses at the least granular level, by the Observation.code attribute value at the medium level of granularity, and by the attribute value of observation.methodCode when a finer level of granularity is required. The method in whole or in part may also appear in the attribute value of Observation.value when using coded data types to express the value of the attribute. Relevant aspects of methodology may also be restated in value when the results themselves imply or state a methodology.\r\n\n An observation may consist of component observations each having their own Observation.code and Observation.value. In this case, the composite observation may not have an Observation.value for itself. For instance, a white blood cell count consists of the sub-observations for the counts of the various granulocytes, lymphocytes and other normal or abnormal blood cells (e.g., blasts). The overall white blood cell count Observation itself may therefore not have a value by itself (even though it could have one, e.g., the sum total of white blood cells). Thus, as long as an Act is essentially an Act of recognizing and noting information about a subject, it is an Observation, regardless of whether it has a simple value by itself or whether it has sub-observations.\r\n\n Even though observations are professional acts (see Act) and as such are intentional actions, this does not require that every possible outcome of an observation be pondered in advance of it being actually made. For instance, differential white blood cell counts (WBC) rarely show blasts, but if they do, this is part of the WBC observation even though blasts might not be predefined in the structure of a normal WBC. \r\n\n Clinical documents commonly have Subjective and Objective findings, both of which are kinds of Observations. In addition, clinical documents commonly contain Assessments, which are also kinds of Observations. Thus, the establishment of a diagnosis is an Observation. \r\n\n \n Examples:\n \r\n\n \n \n Recording the results of a Family History Assessment\r\n\n \n \n Laboratory test and associated result\r\n\n \n \n Physical exam test and associated result\r\n\n \n \n Device temperature\r\n\n \n \n Soil lead level",
"display": "observation",
},
{
"code": "PCPR",
"concept": [
{
"code": "ENC",
"definition": "An interaction between a patient and healthcare participant(s) for the purpose of providing patient service(s) or assessing the health status of a patient. For example, outpatient visit to multiple departments, home health support (including physical therapy), inpatient hospital stay, emergency room visit, field visit (e.g., traffic accident), office visit, occupational therapy, telephone call.",
"display": "encounter",
}
],
"definition": "An Act that of taking on whole or partial responsibility for, or attention to, safety and well-being of a subject of care. \r\n\n \n Discussion: A care provision event may exist without any other care actions taking place. For example, when a patient is assigned to the care of a particular health professional.\r\n\n In request (RQO) mood care provision communicates a referral, which is a request:\r\n\n \n \n from one party (linked as a participant of type author (AUT)),\r\n\n \n \n to another party (linked as a participant of type performer (PRF),\r\n\n \n \n to take responsibility for a scope specified by the code attribute, \r\n\n \n \n for an entity (linked as a participant of type subject (SBJ)).\r\n\n \n \n The scope of the care for which responsibility is taken is identified by code attribute.\r\n\n In event (EVN) mood care provision indicates the effective time interval of a specified scope of responsibility by a performer (PRF) or set of performers (PRF) for a subject (SBJ).\r\n\n \n Examples:\n \r\n\n \n \n Referral from GP to a specialist.\r\n\n \n \n Assignment of a patient or group of patients to the case list of a health professional.\r\n\n \n \n Assignment of inpatients to the care of particular nurses for a working shift.",
"display": "care provision",
},
{
"code": "POLICY",
"concept": [
{
"code": "JURISPOL",
"definition": "Description:A mandate, regulation, obligation, requirement, rule, or expectation unilaterally imposed by a jurisdiction on:\r\n\n \n \n The activity of another party\r\n\n \n \n The behavior of another party\r\n\n \n \n The manner in which an act is executed\r\n\n \n \n \n Examples:A jurisdictional mandate regarding the prescribing and dispensing of a particular medication. A jurisdictional privacy or security regulation dictating the manner in which personal health information is disclosed. A jurisdictional requirement that certain services or health conditions are reported to a monitoring program, e.g., immunizations, methadone treatment, or cancer registries.",
"display": "jurisdictional policy",
},
{
"code": "ORGPOL",
"definition": "Description:A mandate, obligation, requirement, rule, or expectation unilaterally imposed by an organization on:\r\n\n \n \n The activity of another party\r\n\n \n \n The behavior of another party\r\n\n \n \n The manner in which an act is executed\r\n\n \n \n \n Examples:A clinical or research protocols imposed by a payer, a malpractice insurer, or an institution to which a provider must adhere. A mandate imposed by a denominational institution for a provider to provide or withhold certain information from the patient about treatment options.",
"display": "organizational policy",
},
{
"code": "SCOPOL",
"definition": "Description:An ethical or clinical obligation, requirement, rule, or expectation imposed or strongly encouraged by organizations that oversee particular clinical domains or provider certification which define the boundaries within which a provider may practice and which may have legal basis or ramifications on:\r\n\n \n \n The activity of another party\r\n\n \n \n The behavior of another party\r\n\n \n \n The manner in which an act is executed\r\n\n \n \n \n Examples:An ethical obligation for a provider to fully inform a patient about all treatment options. An ethical obligation for a provider not to disclose personal health information that meets certain criteria, e.g., where disclosure might result in harm to the patient or another person. The set of health care services which a provider is credentialed or privileged to provide.",
"display": "scope of practice policy",
},
{
"code": "STDPOL",
"definition": "Description:A requirement, rule, or expectation typically documented as guidelines, protocols, or formularies imposed or strongly encouraged by an organization that oversees or has authority over the practices within a domain, and which may have legal basis or ramifications on:\r\n\n \n \n The activity of another party\r\n\n \n \n The behavior of another party\r\n\n \n \n The manner in which an act is executed\r\n\n \n \n \n Examples:A payer may require a prescribing provider to adhere to formulary guidelines. An institution may adopt clinical guidelines and protocols and implement these within its electronic health record and decision support systems.",
"display": "standard of practice policy",
},
],
"definition": "Description:A mandate, regulation, obligation, requirement, rule, or expectation unilaterally imposed by one party on:\r\n\n \n \n The activity of another party\r\n\n \n \n The behavior of another party\r\n\n \n \n The manner in which an act is executed",
"display": "policy",
},
{
"code": "PROC",
"concept": [
{
"code": "SBADM",
"definition": "The act of introducing or otherwise applying a substance to the subject.\r\n\n \n Discussion: The effect of the substance is typically established on a biochemical basis, however, that is not a requirement. For example, radiotherapy can largely be described in the same way, especially if it is a systemic therapy such as radio-iodine. This class also includes the application of chemical treatments to an area.\r\n\n \n Examples: Chemotherapy protocol; Drug prescription; Vaccination record",
"display": "substance administration",
},
{
"code": "SBEXT",
"concept": [
{
"code": "SPECCOLLECT",
"definition": "A procedure for obtaining a specimen from a source entity.",
"display": "Specimen Collection",
}
],
"definition": "Description: The act of removing a substance from the subject.",
"display": "Substance Extraction",
},
],
"definition": "An Act whose immediate and primary outcome (post-condition) is the alteration of the physical condition of the subject.\r\n\n \n Examples: : Procedures may involve the disruption of some body surface (e.g. an incision in a surgical procedure), but they also include conservative procedures such as reduction of a luxated join, chiropractic treatment, massage, balneotherapy, acupuncture, shiatsu, etc. Outside of clinical medicine, procedures may be such things as alteration of environments (e.g. straightening rivers, draining swamps, building dams) or the repair or change of machinery etc.",
"display": "procedure",
},
{
"code": "REG",
"definition": "Represents the act of maintaining information about the registration of its associated registered subject. The subject can be either an Act or a Role, and includes subjects such as lab exam definitions, drug protocol definitions, prescriptions, persons, patients, practitioners, and equipment.\r\n\n The registration may have a unique identifier - separate from the unique identification of the subject - as well as a core set of related participations and act relationships that characterize the registration event and aid in the disposition of the subject information by a receiving system.",
"display": "registration",
},
{
"code": "REV",
"definition": 'The act of examining and evaluating the subject, usually another act. For example, "This prescription needs to be reviewed in 2 months."',
"display": "review",
},
{
"code": "SPCTRT",
"definition": "A procedure or treatment performed on a specimen to prepare it for analysis",
"display": "specimen treatment",
},
{
"code": "SPLY",
"concept": [
{
"code": "DIET",
"definition": 'Diet services are supply services, with some aspects resembling Medication services: the detail of the diet is given as a description of the Material associated via Participation.typeCode="product". Medically relevant diet types may be communicated in the Diet.code attribute using domain ActDietCode, however, the detail of the food supplied and the various combinations of dishes should be communicated as Material instances.\r\n\n \n Deprecation Note\n \r\n\n \n Class: Use either the Supply class (if dealing with what should be given to the patient) or SubstanceAdministration class (if dealing with what the patient should consume)\r\n\n \n energyQuantity: This quantity can be conveyed by using a Content relationship with a quantity attribute expressing the calories\r\n\n \n carbohydrateQuantity:This quantity can be conveyed using a Content relationship to an Entity with a code of carbohydrate and a quantity attribute on the content relationship.',
"display": "diet",
"property": [
{"code": "status", "valueCode": "deprecated"},
{
"code": "deprecationDate",
"valueDateTime": "2009-08-20",
},
],
}
],
"definition": 'Supply orders and deliveries are simple Acts that focus on the delivered product. The product is associated with the Supply Act via Participation.typeCode="product". With general Supply Acts, the precise identification of the Material (manufacturer, serial numbers, etc.) is important. Most of the detailed information about the Supply should be represented using the Material class. If delivery needs to be scheduled, tracked, and billed separately, one can associate a Transportation Act with the Supply Act. Pharmacy dispense services are represented as Supply Acts, associated with a SubstanceAdministration Act. The SubstanceAdministration class represents the administration of medication, while dispensing is supply.',
"display": "supply",
},
{
"code": "STORE",
"definition": 'The act of putting something away for safe keeping. The "something" may be physical object such as a specimen, or information, such as observations regarding a specimen.',
"display": "storage",
},
{
"code": "SUBST",
"definition": 'Definition: Indicates that the subject Act has undergone or should undergo substitution of a type indicated by Act.code.\r\n\n Rationale: Used to specify "allowed" substitution when creating orders, "actual" susbstitution when sending events, as well as the reason for the substitution and who was responsible for it.',
"display": "Substitution",
},
{
"code": "TRFR",
"definition": "Definition: The act of transferring information without the intent of imparting understanding about a topic to the subject that is the recipient or holder of the transferred information where the participation association must be RCV or HLD.",
"display": "transfer",
},
{
"code": "TRNS",
"definition": "Transportation is the moving of a payload (people or material) from a location of origin to a destination location. Thus, any transport service has the three target instances of type payload, origin, and destination, besides the targets that are generally used for any service (i.e., performer, device, etc.)",
"display": "transportation",
},
{
"code": "XACT",
"definition": 'A sub-class of Act representing any transaction between two accounts whose value is measured in monetary terms.\r\n\n In the "intent" mood, communicates a request for a transaction to be initiated, or communicates a transfer of value between two accounts.\r\n\n In the "event" mood, communicates the posting of a transaction to an account.',
"display": "financial transaction",
},
{
"code": "_ActClassContainer",
"concept": [
{
"code": "ENTRY",
"definition": "This context represents the information acquired and recorded for an observation, a clinical statement such as a portion of the patient's history or an inference or assertion, or an action that might be intended or has actually been performed. This class may represent both the actual data describing the observation, inference, or action, and optionally the details supporting the clinical reasoning process such as a reference to an electronic guideline, decision support system, or other knowledge reference.",
"display": "entry",
"property": [{"code": "status", "valueCode": "retired"}],
},
{
"code": "ORGANIZER",
"definition": 'Organizer of entries. Navigational. No semantic content. Knowledge of the section code is not required to interpret contained observations. Represents a heading in a heading structure, or "organizer tree".\r\n\n The record entries relating to a single clinical session are usually grouped under headings that represent phases of the encounter, or assist with layout and navigation. Clinical headings usually reflect the clinical workflow during a care session, and might also reflect the main author\'s reasoning processes. Much research has demonstrated that headings are used differently by different professional groups and specialties, and that headings are not used consistently enough to support safe automatic processing of the E H R.',
"display": "organizer",
"property": [{"code": "status", "valueCode": "retired"}],
},
],
"definition": "ActClassContainer",
"display": "ActClassContainer",
"property": [
{"code": "notSelectable", "valueBoolean": True},
{"code": "status", "valueCode": "retired"},
],
},
],
"definition": 'A record of something that is being done, has been done, can be done, or is intended or requested to be done.\r\n\n \n Examples:The kinds of acts that are common in health care are (1) a clinical observation, (2) an assessment of health condition (such as problems and diagnoses), (3) healthcare goals, (4) treatment services (such as medication, surgery, physical and psychological therapy), (5) assisting, monitoring or attending, (6) training and education services to patients and their next of kin, (7) and notary services (such as advanced directives or living will), (8) editing and maintaining documents, and many others.\r\n\n \n Discussion and Rationale: Acts are the pivot of the RIM; all domain information and processes are represented primarily in Acts. Any profession or business, including healthcare, is primarily constituted of intentional and occasionally non-intentional actions, performed and recorded by responsible actors. An Act-instance is a record of such an action.\r\n\n Acts connect to Entities in their Roles through Participations and connect to other Acts through ActRelationships. Participations are the authors, performers and other responsible parties as well as subjects and beneficiaries (which includes tools and material used in the performance of the act, which are also subjects). The moodCode distinguishes between Acts that are meant as factual records, vs. records of intended or ordered services, and the other modalities in which act can appear.\r\n\n One of the Participations that all acts have (at least implicitly) is a primary author, who is responsible of the Act and who "owns" the act. Responsibility for the act means responsibility for what is being stated in the Act and as what it is stated. Ownership of the act is assumed in the sense of who may operationally modify the same act. Ownership and responsibility of the Act is not the same as ownership or responsibility of what the Act-object refers to in the real world. The same real world activity can be described by two people, each being the author of their Act, describing the same real world activity. Yet one can be a witness while the other can be a principal performer. The performer has responsibilities for the physical actions; the witness only has responsibility for making a true statement to the best of his or her ability. The two Act-instances may even disagree, but because each is properly attributed to its author, such disagreements can exist side by side and left to arbitration by a recipient of these Act-instances.\r\n\n In this sense, an Act-instance represents a "statement" according to Rector and Nowlan (1991) [Foundations for an electronic medical record. Methods Inf Med. 30.] Rector and Nowlan have emphasized the importance of understanding the medical record not as a collection of facts, but "a faithful record of what clinicians have heard, seen, thought, and done." Rector and Nowlan go on saying that "the other requirements for a medical record, e.g., that it be attributable and permanent, follow naturally from this view." Indeed the Act class is this attributable statement, and the rules of updating acts (discussed in the state-transition model, see Act.statusCode) versus generating new Act-instances are designed according to this principle of permanent attributable statements.\r\n\n Rector and Nolan focus on the electronic medical record as a collection of statements, while attributed statements, these are still mostly factual statements. However, the Act class goes beyond this limitation to attributed factual statements, representing what is known as "speech-acts" in linguistics and philosophy. The notion of speech-act includes that there is pragmatic meaning in language utterances, aside from just factual statements; and that these utterances interact with the real world to change the state of affairs, even directly cause physical activities to happen. For example, an order is a speech act that (provided it is issued adequately) will cause the ordered action to be physically performed. The speech act theory has culminated in the seminal work by Austin (1962) [How to do things with words. Oxford University Press].\r\n\n An activity in the real world may progress from defined, through planned and ordered to executed, which is represented as the mood of the Act. Even though one might think of a single activity as progressing from planned to executed, this progression is reflected by multiple Act-instances, each having one and only one mood that will not change along the Act-instance life cycle. This is because the attribution and content of speech acts along this progression of an activity may be different, and it is often critical that a permanent and faithful record be maintained of this progression. The specification of orders or promises or plans must not be overwritten by the specification of what was actually done, so as to allow comparing actions with their earlier specifications. Act-instances that describe this progression of the same real world activity are linked through the ActRelationships (of the relationship category "sequel").\r\n\n Act as statements or speech-acts are the only representation of real world facts or processes in the HL7 RIM. The truth about the real world is constructed through a combination (and arbitration) of such attributed statements only, and there is no class in the RIM whose objects represent "objective state of affairs" or "real processes" independent from attributed statements. As such, there is no distinction between an activity and its documentation. Every Act includes both to varying degrees. For example, a factual statement made about recent (but past) activities, authored (and signed) by the performer of such activities, is commonly known as a procedure report or original documentation (e.g., surgical procedure report, clinic note etc.). Conversely, a status update on an activity that is presently in progress, authored by the performer (or a close observer) is considered to capture that activity (and is later superceded by a full procedure report). However, both status update and procedure report are acts of the same kind, only distinguished by mood and state (see statusCode) and completeness of the information.',
"display": "act",
}
)
"""
act
A record of something that is being done, has been done, can be done, or is intended or requested to be done.
Examples:The kinds of acts that are common in health care are (1) a clinical observation, (2) an assessment of health condition (such as problems and diagnoses), (3) healthcare goals, (4) treatment services (such as medication, surgery, physical and psychological therapy), (5) assisting, monitoring or attending, (6) training and education services to patients and their next of kin, (7) and notary services (such as advanced directives or living will), (8) editing and maintaining documents, and many others.
Discussion and Rationale: Acts are the pivot of the RIM; all domain information and processes are represented primarily in Acts. Any profession or business, including healthcare, is primarily constituted of intentional and occasionally non-intentional actions, performed and recorded by responsible actors. An Act-instance is a record of such an action.
Acts connect to Entities in their Roles through Participations and connect to other Acts through ActRelationships. Participations are the authors, performers and other responsible parties as well as subjects and beneficiaries (which includes tools and material used in the performance of the act, which are also subjects). The moodCode distinguishes between Acts that are meant as factual records, vs. records of intended or ordered services, and the other modalities in which act can appear.
One of the Participations that all acts have (at least implicitly) is a primary author, who is responsible of the Act and who "owns" the act. Responsibility for the act means responsibility for what is being stated in the Act and as what it is stated. Ownership of the act is assumed in the sense of who may operationally modify the same act. Ownership and responsibility of the Act is not the same as ownership or responsibility of what the Act-object refers to in the real world. The same real world activity can be described by two people, each being the author of their Act, describing the same real world activity. Yet one can be a witness while the other can be a principal performer. The performer has responsibilities for the physical actions; the witness only has responsibility for making a true statement to the best of his or her ability. The two Act-instances may even disagree, but because each is properly attributed to its author, such disagreements can exist side by side and left to arbitration by a recipient of these Act-instances.
In this sense, an Act-instance represents a "statement" according to Rector and Nowlan (1991) [Foundations for an electronic medical record. Methods Inf Med. 30.] Rector and Nowlan have emphasized the importance of understanding the medical record not as a collection of facts, but "a faithful record of what clinicians have heard, seen, thought, and done." Rector and Nowlan go on saying that "the other requirements for a medical record, e.g., that it be attributable and permanent, follow naturally from this view." Indeed the Act class is this attributable statement, and the rules of updating acts (discussed in the state-transition model, see Act.statusCode) versus generating new Act-instances are designed according to this principle of permanent attributable statements.
Rector and Nolan focus on the electronic medical record as a collection of statements, while attributed statements, these are still mostly factual statements. However, the Act class goes beyond this limitation to attributed factual statements, representing what is known as "speech-acts" in linguistics and philosophy. The notion of speech-act includes that there is pragmatic meaning in language utterances, aside from just factual statements; and that these utterances interact with the real world to change the state of affairs, even directly cause physical activities to happen. For example, an order is a speech act that (provided it is issued adequately) will cause the ordered action to be physically performed. The speech act theory has culminated in the seminal work by Austin (1962) [How to do things with words. Oxford University Press].
An activity in the real world may progress from defined, through planned and ordered to executed, which is represented as the mood of the Act. Even though one might think of a single activity as progressing from planned to executed, this progression is reflected by multiple Act-instances, each having one and only one mood that will not change along the Act-instance life cycle. This is because the attribution and content of speech acts along this progression of an activity may be different, and it is often critical that a permanent and faithful record be maintained of this progression. The specification of orders or promises or plans must not be overwritten by the specification of what was actually done, so as to allow comparing actions with their earlier specifications. Act-instances that describe this progression of the same real world activity are linked through the ActRelationships (of the relationship category "sequel").
Act as statements or speech-acts are the only representation of real world facts or processes in the HL7 RIM. The truth about the real world is constructed through a combination (and arbitration) of such attributed statements only, and there is no class in the RIM whose objects represent "objective state of affairs" or "real processes" independent from attributed statements. As such, there is no distinction between an activity and its documentation. Every Act includes both to varying degrees. For example, a factual statement made about recent (but past) activities, authored (and signed) by the performer of such activities, is commonly known as a procedure report or original documentation (e.g., surgical procedure report, clinic note etc.). Conversely, a status update on an activity that is presently in progress, authored by the performer (or a close observer) is considered to capture that activity (and is later superceded by a full procedure report). However, both status update and procedure report are acts of the same kind, only distinguished by mood and state (see statusCode) and completeness of the information.
"""
doccntnt = CodeSystemConcept(
{"code": "DOCCNTNT", "property": [{"code": "status", "valueCode": "retired"}]}
)
"""
None
"""
doclist = CodeSystemConcept(
{"code": "DOCLIST", "property": [{"code": "status", "valueCode": "retired"}]}
)
"""
None
"""
doclstitm = CodeSystemConcept(
{"code": "DOCLSTITM", "property": [{"code": "status", "valueCode": "retired"}]}
)
"""
None
"""
docpara = CodeSystemConcept(
{"code": "DOCPARA", "property": [{"code": "status", "valueCode": "retired"}]}
)
"""
None
"""
doctbl = CodeSystemConcept(
{"code": "DOCTBL", "property": [{"code": "status", "valueCode": "retired"}]}
)
"""
None
"""
linkhtml = CodeSystemConcept(
{"code": "LINKHTML", "property": [{"code": "status", "valueCode": "retired"}]}
)
"""
None
"""
localattr = CodeSystemConcept(
{"code": "LOCALATTR", "property": [{"code": "status", "valueCode": "retired"}]}
)
"""
None
"""
localmrkp = CodeSystemConcept(
{"code": "LOCALMRKP", "property": [{"code": "status", "valueCode": "retired"}]}
)
"""
None
"""
ordered = CodeSystemConcept(
{"code": "ordered", "property": [{"code": "status", "valueCode": "retired"}]}
)
"""
None
"""
refr = CodeSystemConcept(
{"code": "REFR", "property": [{"code": "status", "valueCode": "retired"}]}
)
"""
None
"""
tblcol = CodeSystemConcept(
{"code": "TBLCOL", "property": [{"code": "status", "valueCode": "retired"}]}
)
"""
None
"""
tblcolgp = CodeSystemConcept(
{"code": "TBLCOLGP", "property": [{"code": "status", "valueCode": "retired"}]}
)
"""
None
"""
tbldata = CodeSystemConcept(
{"code": "TBLDATA", "property": [{"code": "status", "valueCode": "retired"}]}
)
"""
None
"""
tblhdr = CodeSystemConcept(
{"code": "TBLHDR", "property": [{"code": "status", "valueCode": "retired"}]}
)
"""
None
"""
tblrow = CodeSystemConcept(
{"code": "TBLROW", "property": [{"code": "status", "valueCode": "retired"}]}
)
"""
None
"""
tbody = CodeSystemConcept(
{"code": "tbody", "property": [{"code": "status", "valueCode": "retired"}]}
)
"""
None
"""
tfoot = CodeSystemConcept(
{"code": "tfoot", "property": [{"code": "status", "valueCode": "retired"}]}
)
"""
None
"""
thead = CodeSystemConcept(
{"code": "thead", "property": [{"code": "status", "valueCode": "retired"}]}
)
"""
None
"""
unordered = CodeSystemConcept(
{"code": "unordered", "property": [{"code": "status", "valueCode": "retired"}]}
)
"""
None
"""
class Meta:
resource = _resource
| 120.350052 | 14,057 | 0.54003 |
829b619a091dfa63f946d060253ae672b0ee16c6
| 68,299 |
py
|
Python
|
autotest/ogr/ogr_sql_sqlite.py
|
ajolma/gdal
|
19d847c8519919fcd1e7e7247644d28771034317
|
[
"MIT"
] | null | null | null |
autotest/ogr/ogr_sql_sqlite.py
|
ajolma/gdal
|
19d847c8519919fcd1e7e7247644d28771034317
|
[
"MIT"
] | null | null | null |
autotest/ogr/ogr_sql_sqlite.py
|
ajolma/gdal
|
19d847c8519919fcd1e7e7247644d28771034317
|
[
"MIT"
] | 1 |
2019-11-01T15:17:09.000Z
|
2019-11-01T15:17:09.000Z
|
#!/usr/bin/env pytest
# -*- coding: utf-8 -*-
###############################################################################
# $Id$
#
# Project: GDAL/OGR Test Suite
# Purpose: SQLite SQL dialect testing.
# Author: Even Rouault <even dot rouault at mines dash paris dot org>
#
###############################################################################
# Copyright (c) 2012-2013, Even Rouault <even dot rouault at mines-paris dot org>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
###############################################################################
try:
from BaseHTTPServer import BaseHTTPRequestHandler
except ImportError:
from http.server import BaseHTTPRequestHandler
from osgeo import ogr
from osgeo import osr
from osgeo import gdal
import gdaltest
import ogrtest
import webserver
import pytest
@pytest.fixture(autouse=True)
def clear_config_options():
gdal.SetConfigOption('OGR_GEOCODE_CACHE_FILE', None)
gdal.SetConfigOption('OGR_GEOCODE_APPLICATION', None)
gdal.SetConfigOption('OGR_GEOCODE_EMAIL', None)
gdal.SetConfigOption('OGR_GEOCODE_QUERY_TEMPLATE', None)
gdal.SetConfigOption('OGR_GEOCODE_DELAY', None)
gdal.SetConfigOption('OGR_GEOCODE_SERVICE', None)
gdal.SetConfigOption('OGR_GEOCODE_USERNAME', None)
gdal.SetConfigOption('OGR_GEOCODE_KEY', None)
gdal.SetConfigOption('OGR_SQLITE_DIALECT_USE_SPATIALITE', None)
###############################################################################
# Detect OGR SQLite dialect availability
@pytest.fixture(autouse=True, scope='module')
def require_ogr_sql_sqlite():
if ogr.GetDriverByName('SQLite') is None:
pytest.skip()
# If we have SQLite VFS support, then SQLite dialect should be available
ds = ogr.GetDriverByName('SQLite').CreateDataSource('/vsimem/ogr_sql_sqlite_available.db')
if ds is None:
pytest.skip()
ds = None
gdal.Unlink('/vsimem/ogr_sql_sqlite_available.db')
ds = ogr.GetDriverByName("Memory").CreateDataSource("my_ds")
sql_lyr = ds.ExecuteSQL("SELECT * FROM sqlite_master", dialect='SQLite')
ds.ReleaseResultSet(sql_lyr)
assert sql_lyr is not None
###############################################################################
# Tests that don't involve geometry
def test_ogr_sql_sqlite_1():
ds = ogr.GetDriverByName("Memory").CreateDataSource("my_ds")
for geom in [ogr.wkbNone, ogr.wkbUnknown]:
lyr = ds.CreateLayer("my_layer", geom_type=geom)
field_defn = ogr.FieldDefn('intfield', ogr.OFTInteger)
lyr.CreateField(field_defn)
field_defn = ogr.FieldDefn('int64field', ogr.OFTInteger64)
lyr.CreateField(field_defn)
field_defn = ogr.FieldDefn('doublefield', ogr.OFTReal)
lyr.CreateField(field_defn)
field_defn = ogr.FieldDefn('strfield', ogr.OFTString)
lyr.CreateField(field_defn)
field_defn = ogr.FieldDefn('binaryfield', ogr.OFTBinary)
lyr.CreateField(field_defn)
field_defn = ogr.FieldDefn('nullablefield', ogr.OFTInteger)
lyr.CreateField(field_defn)
field_defn = ogr.FieldDefn('datetimefield', ogr.OFTDateTime)
lyr.CreateField(field_defn)
field_defn = ogr.FieldDefn('datefield', ogr.OFTDate)
lyr.CreateField(field_defn)
field_defn = ogr.FieldDefn('timefield', ogr.OFTTime)
lyr.CreateField(field_defn)
field_defn = ogr.FieldDefn('from', ogr.OFTString)
lyr.CreateField(field_defn)
field_defn = ogr.FieldDefn('boolfield', ogr.OFTInteger)
field_defn.SetSubType(ogr.OFSTBoolean)
lyr.CreateField(field_defn)
field_defn = ogr.FieldDefn('int16field', ogr.OFTInteger)
field_defn.SetSubType(ogr.OFSTInt16)
lyr.CreateField(field_defn)
field_defn = ogr.FieldDefn('float32field', ogr.OFTReal)
field_defn.SetSubType(ogr.OFSTFloat32)
lyr.CreateField(field_defn)
field_defn = ogr.FieldDefn('intlistfield', ogr.OFTIntegerList)
lyr.CreateField(field_defn)
field_defn = ogr.FieldDefn('int64listfield', ogr.OFTInteger64List)
lyr.CreateField(field_defn)
field_defn = ogr.FieldDefn('doublelistfield', ogr.OFTRealList)
lyr.CreateField(field_defn)
field_defn = ogr.FieldDefn('strlistfield', ogr.OFTStringList)
lyr.CreateField(field_defn)
# Test INSERT
sql_lyr = ds.ExecuteSQL("INSERT INTO my_layer (intfield, int64field, nullablefield, doublefield, strfield, binaryfield, datetimefield, datefield, timefield, \"from\", boolfield, int16field, float32field, intlistfield, int64listfield, doublelistfield, strlistfield) VALUES (1,1234567890123456,NULL,2.34,'foo',x'0001FF', '2012-08-23 21:24', '2012-08-23', '21:24', 'from_val', 1, -32768, 1.23, '(2:2,3)', '(1:1234567890123456)', '(1:1.23)', '(1:a)')", dialect='SQLite')
ds.ReleaseResultSet(sql_lyr)
lyr.ResetReading()
feat = lyr.GetNextFeature()
if feat.GetField('intfield') != 1 or \
feat.GetField('int64field') != 1234567890123456 or \
feat.GetField('nullablefield') is not None or \
feat.GetField('doublefield') != 2.34 or \
feat.GetField('strfield') != 'foo' or \
feat.GetField('binaryfield') != '0001FF' or \
feat.GetField('datetimefield') != '2012/08/23 21:24:00' or \
feat.GetField('datefield') != '2012/08/23' or \
feat.GetField('timefield') != '21:24:00' or \
feat.GetField('from') != 'from_val':
feat.DumpReadable()
pytest.fail()
feat = None
# Test UPDATE
sql_lyr = ds.ExecuteSQL("UPDATE my_layer SET intfield = 2, int64field = 234567890123, doublefield = 3.45, strfield = 'bar', timefield = '12:34' WHERE ROWID = 0", dialect='SQLite')
ds.ReleaseResultSet(sql_lyr)
lyr.ResetReading()
feat = lyr.GetNextFeature()
if feat.GetField('intfield') != 2 or \
feat.GetField('int64field') != 234567890123 or \
feat.GetField('doublefield') != 3.45 or \
feat.GetField('strfield') != 'bar' or \
feat.GetField('datetimefield') != '2012/08/23 21:24:00' or \
feat.GetField('datefield') != '2012/08/23' or \
feat.GetField('timefield') != '12:34:00':
feat.DumpReadable()
pytest.fail()
feat.SetStyleString('cool_style')
lyr.SetFeature(feat)
feat = None
# Test SELECT
sql_lyr = ds.ExecuteSQL("SELECT * FROM my_layer", dialect='SQLite')
assert sql_lyr.GetLayerDefn().GetFieldDefn(sql_lyr.GetLayerDefn().GetFieldIndex('boolfield')).GetSubType() == ogr.OFSTBoolean
assert sql_lyr.GetLayerDefn().GetFieldDefn(sql_lyr.GetLayerDefn().GetFieldIndex('int16field')).GetSubType() == ogr.OFSTInt16
assert sql_lyr.GetLayerDefn().GetFieldDefn(sql_lyr.GetLayerDefn().GetFieldIndex('float32field')).GetSubType() == ogr.OFSTFloat32
assert sql_lyr.GetLayerDefn().GetFieldDefn(sql_lyr.GetLayerDefn().GetFieldIndex('intlistfield')).GetType() == ogr.OFTIntegerList
assert sql_lyr.GetLayerDefn().GetFieldDefn(sql_lyr.GetLayerDefn().GetFieldIndex('doublelistfield')).GetType() == ogr.OFTRealList
assert sql_lyr.GetLayerDefn().GetFieldDefn(sql_lyr.GetLayerDefn().GetFieldIndex('strlistfield')).GetType() == ogr.OFTStringList
feat = sql_lyr.GetNextFeature()
if feat.GetField('intfield') != 2 or \
feat.GetField('int64field') != 234567890123 or \
feat.GetField('nullablefield') is not None or \
feat.GetField('doublefield') != 3.45 or \
feat.GetField('strfield') != 'bar' or \
feat.GetField('datetimefield') != '2012/08/23 21:24:00' or \
feat.GetField('datefield') != '2012/08/23' or \
feat.GetField('timefield') != '12:34:00' or \
feat.GetField('boolfield') != 1 or \
feat.GetField('int16field') != -32768 or \
feat.GetField('float32field') != 1.23 or \
feat.GetField('intlistfield') != [2, 3] or \
feat.GetField('int64listfield') != [1234567890123456] or \
feat.GetField('doublelistfield') != [1.23] or \
feat.GetField('strlistfield') != ['a']:
feat.DumpReadable()
pytest.fail()
feat = None
ds.ReleaseResultSet(sql_lyr)
# Test SELECT with OGR_STYLE
sql_lyr = ds.ExecuteSQL("SELECT *, OGR_STYLE FROM my_layer", dialect='SQLite')
feat = sql_lyr.GetNextFeature()
if feat.GetField('intfield') != 2 or \
feat.GetField('nullablefield') is not None or \
feat.GetField('doublefield') != 3.45 or \
feat.GetField('strfield') != 'bar' or \
feat.GetStyleString() != 'cool_style':
feat.DumpReadable()
pytest.fail()
feat = None
ds.ReleaseResultSet(sql_lyr)
# Test SELECT with filters
# Success filters
for cond in ['intfield = 2', 'intfield > 1', 'intfield >= 2', 'intfield < 3', 'intfield <= 2',
'int64field = 234567890123',
'doublefield = 3.45', 'doublefield > 3', 'doublefield >= 3.45', 'doublefield < 3.46', 'doublefield <= 3.45',
"strfield = 'bar'", "strfield > 'baq'", "strfield >= 'bar'", "strfield < 'bas'", "strfield <= 'bar'",
'nullablefield IS NULL',
"binaryfield = x'0001FF'",
"OGR_STYLE = 'cool_style'",
'intfield = 2 AND doublefield = 3.45',
'ROWID = 0',
"\"from\" = 'from_val'"]:
sql_lyr = ds.ExecuteSQL("SELECT * FROM my_layer WHERE " + cond, dialect='SQLite')
feat = sql_lyr.GetNextFeature()
assert feat is not None, cond
feat = None
ds.ReleaseResultSet(sql_lyr)
# Failed filters
for cond in ['intfield = 0', 'intfield > 3', 'intfield >= 3', 'intfield < 0', 'intfield <= 0',
'doublefield = 0', 'doublefield > 3.46', 'doublefield >= 3.46', 'doublefield < 3.45', 'doublefield <= 0',
"strfield = 'XXX'", "strfield > 'bas'", "strfield >= 'bas'", "strfield < 'bar'", "strfield <= 'baq'",
'intfield = 2 AND doublefield = 0',
'ROWID = 10000',
"\"from\" = 'other_val'"]:
sql_lyr = ds.ExecuteSQL("SELECT * FROM my_layer WHERE " + cond, dialect='SQLite')
feat = sql_lyr.GetNextFeature()
assert feat is None
feat = None
ds.ReleaseResultSet(sql_lyr)
if geom != ogr.wkbNone:
# Test a filter on geometry, to check that we won't try to optimize that
sql_lyr = ds.ExecuteSQL("SELECT * FROM my_layer WHERE GEOMETRY = x'00'", dialect='SQLite')
ds.ReleaseResultSet(sql_lyr)
# Test INSERT with specified ROWID/FID
sql_lyr = ds.ExecuteSQL("INSERT INTO my_layer (intfield, ROWID) VALUES (100, 1000)", dialect='SQLite')
ds.ReleaseResultSet(sql_lyr)
feat = lyr.GetFeature(1000)
if feat.GetField('intfield') != 100:
feat.DumpReadable()
pytest.fail()
feat = None
# Test DELETE
sql_lyr = ds.ExecuteSQL("DELETE FROM my_layer WHERE intfield = 2", dialect='SQLite')
ds.ReleaseResultSet(sql_lyr)
sql_lyr = ds.ExecuteSQL("DELETE FROM my_layer WHERE ROWID = 1000", dialect='SQLite')
ds.ReleaseResultSet(sql_lyr)
lyr.ResetReading()
feat = lyr.GetNextFeature()
if feat is not None:
feat.DumpReadable()
pytest.fail()
feat = None
ds.DeleteLayer(0)
ds = None
###############################################################################
# Tests that involve geometry (but without needing Spatialite)
def test_ogr_sql_sqlite_2():
ds = ogr.GetDriverByName("Memory").CreateDataSource("my_ds")
srs = osr.SpatialReference()
srs.ImportFromEPSG(4326)
lyr = ds.CreateLayer("my_layer", srs=srs)
field_defn = ogr.FieldDefn('intfield', ogr.OFTInteger)
lyr.CreateField(field_defn)
field_defn = ogr.FieldDefn('doublefield', ogr.OFTReal)
lyr.CreateField(field_defn)
field_defn = ogr.FieldDefn('strfield', ogr.OFTString)
lyr.CreateField(field_defn)
feat = ogr.Feature(lyr.GetLayerDefn())
feat.SetField('intfield', 1)
feat.SetField('doublefield', 2.34)
feat.SetField('strfield', 'foo')
feat.SetGeometryDirectly(ogr.CreateGeometryFromWkt('POINT (0 1)'))
feat.SetStyleString('cool_style')
lyr.CreateFeature(feat)
feat = None
# Test UPDATE
sql_lyr = ds.ExecuteSQL("UPDATE my_layer SET intfield = 2, doublefield = 3.45, strfield = 'bar' WHERE ROWID = 0", dialect='SQLite')
ds.ReleaseResultSet(sql_lyr)
lyr.ResetReading()
feat = lyr.GetNextFeature()
if feat.GetField('intfield') != 2 or \
feat.GetField('doublefield') != 3.45 or \
feat.GetField('strfield') != 'bar' or \
feat.GetGeometryRef().ExportToWkt() != 'POINT (0 1)':
feat.DumpReadable()
pytest.fail()
feat = None
# Test SELECT
sql_lyr = ds.ExecuteSQL("SELECT * FROM my_layer", dialect='SQLite')
feat = sql_lyr.GetNextFeature()
if feat.GetField('intfield') != 2 or \
feat.GetField('doublefield') != 3.45 or \
feat.GetField('strfield') != 'bar' or \
feat.GetGeometryRef().ExportToWkt() != 'POINT (0 1)':
feat.DumpReadable()
pytest.fail()
got_srs = feat.GetGeometryRef().GetSpatialReference()
assert not (got_srs is None or srs.IsSame(got_srs, options = ['IGNORE_DATA_AXIS_TO_SRS_AXIS_MAPPING=YES']) == 0)
feat = None
ds.ReleaseResultSet(sql_lyr)
# Test SELECT with OGR_STYLE
sql_lyr = ds.ExecuteSQL("SELECT *, OGR_STYLE FROM my_layer", dialect='SQLite')
feat = sql_lyr.GetNextFeature()
if feat.GetField('intfield') != 2 or \
feat.GetField('doublefield') != 3.45 or \
feat.GetField('strfield') != 'bar' or \
feat.GetStyleString() != 'cool_style' or \
feat.GetGeometryRef().ExportToWkt() != 'POINT (0 1)':
feat.DumpReadable()
pytest.fail()
got_srs = feat.GetGeometryRef().GetSpatialReference()
assert not (got_srs is None or srs.IsSame(got_srs, options = ['IGNORE_DATA_AXIS_TO_SRS_AXIS_MAPPING=YES']) == 0)
feat = None
ds.ReleaseResultSet(sql_lyr)
# Test with a custom SRS
srs = osr.SpatialReference()
srs.SetFromUserInput("""LOCAL_CS["foo"]""")
lyr = ds.CreateLayer("my_layer2", srs=srs)
feat = ogr.Feature(lyr.GetLayerDefn())
feat.SetGeometryDirectly(ogr.CreateGeometryFromWkt('POINT (0 1)'))
lyr.CreateFeature(feat)
feat = None
feat = ogr.Feature(lyr.GetLayerDefn())
feat.SetGeometryDirectly(ogr.CreateGeometryFromWkt('POINT (0 1)'))
lyr.CreateFeature(feat)
feat = None
# Test SELECT
sql_lyr = ds.ExecuteSQL("SELECT * FROM my_layer2", dialect='SQLite')
layer_srs = sql_lyr.GetSpatialRef()
assert not (layer_srs is None or srs.IsSame(layer_srs) == 0)
for _ in range(2):
feat = sql_lyr.GetNextFeature()
if feat.GetGeometryRef().ExportToWkt() != 'POINT (0 1)':
feat.DumpReadable()
pytest.fail()
got_srs = feat.GetGeometryRef().GetSpatialReference()
assert not (got_srs is None or srs.IsSame(got_srs) == 0)
feat = None
ds.ReleaseResultSet(sql_lyr)
###############################################################################
# Test that involves a join
def test_ogr_sql_sqlite_3():
ds = ogr.Open('data')
sql_lyr = ds.ExecuteSQL("SELECT p.*, idlink.* FROM poly p LEFT JOIN idlink USING (EAS_ID) ORDER BY EAS_ID", dialect='SQLite')
count = sql_lyr.GetFeatureCount()
sql_lyr.ResetReading()
feat = sql_lyr.GetNextFeature()
if feat.GetField('NAME') != '_158_':
feat.DumpReadable()
pytest.fail()
geom = feat.GetGeometryRef()
p = geom.GetGeometryRef(0).GetPoint_2D(0)
if p != (480701.0625, 4764738.0):
feat.DumpReadable()
pytest.fail()
ds.ReleaseResultSet(sql_lyr)
assert count == 10
ds = None
###############################################################################
# Test that involves a self-join (to check that we can open twice the same table)
def test_ogr_sql_sqlite_4():
ds = ogr.Open('data')
sql_lyr = ds.ExecuteSQL("SELECT p.* FROM poly p JOIN poly USING (EAS_ID)", dialect='SQLite')
count = sql_lyr.GetFeatureCount()
ds.ReleaseResultSet(sql_lyr)
assert count == 10
ds = None
###############################################################################
# Test that involves spatialite
def test_ogr_sql_sqlite_5():
gdal.PushErrorHandler('CPLQuietErrorHandler')
ds = ogr.GetDriverByName('SQLite').CreateDataSource('/vsimem/foo.db', options=['SPATIALITE=YES'])
ogrtest.has_spatialite = ds is not None
if ogrtest.has_spatialite:
sql_lyr = ds.ExecuteSQL("SELECT spatialite_version()")
feat = sql_lyr.GetNextFeature()
gdaltest.spatialite_version = feat.GetFieldAsString(0)
ds.ReleaseResultSet(sql_lyr)
ds = None
gdal.Unlink('/vsimem/foo.db')
gdal.PopErrorHandler()
if ogrtest.has_spatialite is False:
pytest.skip('Spatialite not available')
ds = ogr.Open('data')
sql_lyr = ds.ExecuteSQL("SELECT MAX(ST_Length(GEOMETRY)) FROM POLY", dialect='SQLite')
count = sql_lyr.GetFeatureCount()
ds.ReleaseResultSet(sql_lyr)
ds = None
assert count == 1
###############################################################################
# If Spatialite available, retry some tests without it, to check that
# we are fully compatible with regular SQLite
def test_ogr_sql_sqlite_6():
if ogrtest.has_spatialite is False:
pytest.skip()
gdal.SetConfigOption('OGR_SQLITE_DIALECT_USE_SPATIALITE', 'NO')
test_ogr_sql_sqlite_1()
test_ogr_sql_sqlite_2()
test_ogr_sql_sqlite_4()
###############################################################################
# Test if there's a text column called GEOMETRY already in the table
def test_ogr_sql_sqlite_7():
ds = ogr.GetDriverByName("Memory").CreateDataSource("my_ds")
lyr = ds.CreateLayer("my_layer")
field_defn = ogr.FieldDefn('intfield', ogr.OFTInteger)
lyr.CreateField(field_defn)
field_defn = ogr.FieldDefn('geometry', ogr.OFTString)
lyr.CreateField(field_defn)
feat = ogr.Feature(lyr.GetLayerDefn())
feat.SetField('intfield', 1)
feat.SetField('geometry', 'BLA')
feat.SetGeometryDirectly(ogr.CreateGeometryFromWkt('POINT (0 1)'))
lyr.CreateFeature(feat)
feat = None
# Test SELECT
sql_lyr = ds.ExecuteSQL("SELECT * FROM my_layer", dialect='SQLite')
assert sql_lyr.GetGeometryColumn() == 'GEOMETRY2'
feat = sql_lyr.GetNextFeature()
if feat.GetField('intfield') != 1 or \
feat.GetField('geometry') != 'BLA' or \
feat.GetGeometryRef().ExportToWkt() != 'POINT (0 1)':
feat.DumpReadable()
pytest.fail()
feat = None
ds.ReleaseResultSet(sql_lyr)
# Test SELECT
sql_lyr = ds.ExecuteSQL("SELECT GEOMETRY2 FROM my_layer", dialect='SQLite')
feat = sql_lyr.GetNextFeature()
if feat.GetGeometryRef().ExportToWkt() != 'POINT (0 1)':
feat.DumpReadable()
pytest.fail()
feat = None
ds.ReleaseResultSet(sql_lyr)
###############################################################################
# Test join with an external datasource
def test_ogr_sql_sqlite_8():
ds = ogr.Open('data')
expect = [171, 172, 173, 179]
sql_lyr = ds.ExecuteSQL(
'SELECT p.*, il.name FROM poly p ' +
'LEFT JOIN "data/idlink.dbf".idlink il USING (eas_id) ' +
'WHERE eas_id > 170 ORDER BY eas_id', dialect='SQLite')
tr = ogrtest.check_features_against_list(sql_lyr, 'eas_id', expect)
ds.ReleaseResultSet(sql_lyr)
assert tr
###############################################################################
# Check parsing of sub-selects
def test_ogr_sql_sqlite_9():
ds = ogr.Open('data')
sql_lyr = ds.ExecuteSQL("SELECT count(*) as cnt FROM (SELECT * FROM (SELECT * FROM\n'data'.poly my_alias))p,(SELECT * FROM 'data'.idlink) il WHERE p.EAS_ID = il.EAS_id", dialect='SQLite')
feat = sql_lyr.GetNextFeature()
cnt = feat.GetField('cnt')
feat = None
ds.ReleaseResultSet(sql_lyr)
if cnt != 7:
return' fail'
###############################################################################
# Test optimized count(*)
def test_ogr_sql_sqlite_10():
ds = ogr.Open('data')
sql_lyr = ds.ExecuteSQL("SELECT count(*) as cnt FROM poly", dialect='SQLite')
feat = sql_lyr.GetNextFeature()
cnt = feat.GetField('cnt')
feat = None
ds.ReleaseResultSet(sql_lyr)
if cnt != 10:
return' fail'
###############################################################################
# Test correct parsing of litterals
def test_ogr_sql_sqlite_11():
ds = ogr.GetDriverByName("Memory").CreateDataSource("my_ds")
lyr = ds.CreateLayer("my_layer")
field_defn = ogr.FieldDefn('intfield', ogr.OFTInteger)
lyr.CreateField(field_defn)
feat = ogr.Feature(lyr.GetLayerDefn())
feat.SetField('intfield', 1)
lyr.CreateFeature(feat)
feat = None
sql_lyr = ds.ExecuteSQL("SELECT 'a' FROM \"my_layer\"", dialect='SQLite')
cnt = sql_lyr.GetFeatureCount()
ds.ReleaseResultSet(sql_lyr)
ds = None
if cnt != 1:
return' fail'
###############################################################################
# Test various error conditions
def test_ogr_sql_sqlite_12():
ds = ogr.GetDriverByName("Memory").CreateDataSource("my_ds")
# Invalid SQL
gdal.PushErrorHandler('CPLQuietErrorHandler')
sql_lyr = ds.ExecuteSQL("qdfdfdf", dialect='SQLite')
gdal.PopErrorHandler()
ds.ReleaseResultSet(sql_lyr)
# Non existing external datasource
gdal.PushErrorHandler('CPLQuietErrorHandler')
sql_lyr = ds.ExecuteSQL("SELECT * FROM 'foo'.'bar'", dialect='SQLite')
gdal.PopErrorHandler()
ds.ReleaseResultSet(sql_lyr)
# Non existing layer in existing external datasource
gdal.PushErrorHandler('CPLQuietErrorHandler')
sql_lyr = ds.ExecuteSQL("SELECT * FROM 'data'.'azertyuio'", dialect='SQLite')
gdal.PopErrorHandler()
ds.ReleaseResultSet(sql_lyr)
ds = None
###############################################################################
# Test ogr_layer_Extent(), ogr_layer_SRID() and ogr_layer_GeometryType()
def test_ogr_sql_sqlite_13():
ds = ogr.GetDriverByName("Memory").CreateDataSource("my_ds")
srs = osr.SpatialReference()
srs.ImportFromEPSG(4326)
lyr = ds.CreateLayer("non_spatial", geom_type=ogr.wkbNone)
lyr = ds.CreateLayer("my_layer", geom_type=ogr.wkbLineString, srs=srs)
field_defn = ogr.FieldDefn('intfield', ogr.OFTInteger)
lyr.CreateField(field_defn)
feat = ogr.Feature(lyr.GetLayerDefn())
feat.SetGeometryDirectly(ogr.CreateGeometryFromWkt('LINESTRING (0 1,2 3)'))
lyr.CreateFeature(feat)
feat = None
# Test with invalid parameter
gdal.PushErrorHandler('CPLQuietErrorHandler')
sql_lyr = ds.ExecuteSQL("SELECT ogr_layer_Extent(12)", dialect='SQLite')
gdal.PopErrorHandler()
feat = sql_lyr.GetNextFeature()
geom = feat.GetGeometryRef()
ds.ReleaseResultSet(sql_lyr)
assert geom is None
# Test on non existing layer
gdal.PushErrorHandler('CPLQuietErrorHandler')
sql_lyr = ds.ExecuteSQL("SELECT ogr_layer_Extent('foo')", dialect='SQLite')
gdal.PopErrorHandler()
feat = sql_lyr.GetNextFeature()
geom = feat.GetGeometryRef()
ds.ReleaseResultSet(sql_lyr)
assert geom is None
# Test ogr_layer_Extent()
sql_lyr = ds.ExecuteSQL("SELECT ogr_layer_Extent('my_layer')", dialect='SQLite')
feat = sql_lyr.GetNextFeature()
geom_wkt = feat.GetGeometryRef().ExportToWkt()
feat = None
ds.ReleaseResultSet(sql_lyr)
assert geom_wkt == 'POLYGON ((0 1,2 1,2 3,0 3,0 1))'
# Test ogr_layer_FeatureCount()
sql_lyr = ds.ExecuteSQL("SELECT ogr_layer_FeatureCount('my_layer') AS the_count", dialect='SQLite')
feat = sql_lyr.GetNextFeature()
count = feat.GetField('the_count')
feat = None
ds.ReleaseResultSet(sql_lyr)
assert count == 1
# Test ogr_layer_Extent() on a non spatial layer
sql_lyr = ds.ExecuteSQL("SELECT ogr_layer_Extent('non_spatial')", dialect='SQLite')
feat = sql_lyr.GetNextFeature()
geom = feat.GetGeometryRef()
ds.ReleaseResultSet(sql_lyr)
assert geom is None
# Test ogr_layer_SRID()
sql_lyr = ds.ExecuteSQL("SELECT ogr_layer_SRID('my_layer') AS the_srid", dialect='SQLite')
feat = sql_lyr.GetNextFeature()
the_srid = feat.GetField('the_srid')
feat = None
ds.ReleaseResultSet(sql_lyr)
assert the_srid == 4326
# Test ogr_layer_SRID() on a non spatial layer
sql_lyr = ds.ExecuteSQL("SELECT ogr_layer_SRID('non_spatial') AS the_srid", dialect='SQLite')
feat = sql_lyr.GetNextFeature()
the_srid = feat.GetField('the_srid')
feat = None
ds.ReleaseResultSet(sql_lyr)
assert the_srid is None
# Test ogr_layer_GeometryType()
sql_lyr = ds.ExecuteSQL("SELECT ogr_layer_GeometryType('my_layer') AS the_geometrytype", dialect='SQLite')
feat = sql_lyr.GetNextFeature()
the_geometrytype = feat.GetField('the_geometrytype')
feat = None
ds.ReleaseResultSet(sql_lyr)
assert the_geometrytype == 'LINESTRING'
# Test ogr_layer_GeometryType() on a non spatial layer
sql_lyr = ds.ExecuteSQL("SELECT ogr_layer_GeometryType('non_spatial') AS the_geometrytype", dialect='SQLite')
feat = sql_lyr.GetNextFeature()
the_geometrytype = feat.GetField('the_geometrytype')
feat = None
ds.ReleaseResultSet(sql_lyr)
assert the_geometrytype is None
# Test on a external virtual table
ds_shape = ogr.GetDriverByName("ESRI Shapefile").CreateDataSource('/vsimem/ogr_sql_sqlite_13.shp')
srs = osr.SpatialReference()
srs.ImportFromEPSG(32631)
lyr = ds_shape.CreateLayer('ogr_sql_sqlite_13', srs=srs)
ds_shape = None
sql_lyr = ds.ExecuteSQL("SELECT ogr_layer_SRID('/vsimem/ogr_sql_sqlite_13.shp'.ogr_sql_sqlite_13) AS the_srid_shp", dialect='SQLite')
feat = sql_lyr.GetNextFeature()
the_srid_shp = feat.GetField('the_srid_shp')
feat = None
ds.ReleaseResultSet(sql_lyr)
ogr.GetDriverByName("ESRI Shapefile").DeleteDataSource('/vsimem/ogr_sql_sqlite_13.shp')
assert the_srid_shp == 32631
ds = None
###############################################################################
#
def ogr_sql_sqlite_14_and_15(sql):
ds = ogr.GetDriverByName("Memory").CreateDataSource("my_ds")
srs = osr.SpatialReference()
srs.ImportFromEPSG(4326)
lyr = ds.CreateLayer("my_layer", geom_type=ogr.wkbLineString, srs=srs)
field_defn = ogr.FieldDefn('intfield', ogr.OFTInteger)
lyr.CreateField(field_defn)
feat = ogr.Feature(lyr.GetLayerDefn())
feat.SetField(0, 1)
feat.SetGeometryDirectly(ogr.CreateGeometryFromWkt('LINESTRING (0 0,1 1)'))
lyr.CreateFeature(feat)
feat = None
feat = ogr.Feature(lyr.GetLayerDefn())
feat.SetField(0, 2)
feat.SetGeometryDirectly(ogr.CreateGeometryFromWkt('LINESTRING (10 0,11 1)'))
lyr.CreateFeature(feat)
feat = None
lyr2 = ds.CreateLayer("my_layer2", geom_type=ogr.wkbLineString, srs=srs)
field_defn = ogr.FieldDefn('intfield2', ogr.OFTInteger)
lyr2.CreateField(field_defn)
feat = ogr.Feature(lyr2.GetLayerDefn())
feat.SetField(0, 11)
feat.SetGeometryDirectly(ogr.CreateGeometryFromWkt('LINESTRING (10 1,11 0)'))
lyr2.CreateFeature(feat)
feat = None
feat = ogr.Feature(lyr2.GetLayerDefn())
feat.SetField(0, 12)
feat.SetGeometryDirectly(ogr.CreateGeometryFromWkt('LINESTRING (0 1,1 0)'))
lyr2.CreateFeature(feat)
feat = None
got_one = False
got_two = False
sql_lyr = ds.ExecuteSQL(sql, dialect='SQLite')
for _ in range(2):
feat = sql_lyr.GetNextFeature()
i1 = feat.GetField('intfield')
i2 = feat.GetField('intfield2')
if (i1 == 1 and i2 == 12):
got_one = True
if (i1 == 2 and i2 == 11):
got_two = True
feat = None
feat = sql_lyr.GetNextFeature()
assert feat is None
ds.ReleaseResultSet(sql_lyr)
assert (got_one and got_two)
###############################################################################
# Test 'idx_layername_geometryname' spatial index recognition
def test_ogr_sql_sqlite_14():
if ogrtest.has_spatialite is False:
pytest.skip()
sql = "SELECT intfield, intfield2 FROM my_layer, my_layer2 WHERE " + \
"my_layer2.rowid IN (SELECT pkid FROM idx_my_layer2_geometry WHERE " + \
"xmax > MbrMinX(my_layer.geometry) AND xmin < MbrMaxX(my_layer.geometry) AND " + \
"ymax >= MbrMinY(my_layer.geometry) AND ymin <= MbrMaxY(my_layer.geometry) )"
return ogr_sql_sqlite_14_and_15(sql)
###############################################################################
# Test 'SpatialIndex' spatial index recognition
def test_ogr_sql_sqlite_15():
if ogrtest.has_spatialite is False:
pytest.skip()
if int(gdaltest.spatialite_version[0:gdaltest.spatialite_version.find('.')]) < 3:
pytest.skip()
sql = "SELECT intfield, intfield2 FROM my_layer, my_layer2 WHERE " + \
"my_layer2.rowid IN (SELECT ROWID FROM SpatialIndex WHERE f_table_name = 'my_layer2' AND search_frame = my_layer.geometry)"
return ogr_sql_sqlite_14_and_15(sql)
###############################################################################
do_log = False
class GeocodingHTTPHandler(BaseHTTPRequestHandler):
def log_request(self, code='-', size='-'):
pass
def do_GET(self):
try:
if do_log:
f = open('/tmp/log.txt', 'a')
f.write('GET %s\n' % self.path)
f.close()
if self.path.find('/geocoding') != -1:
if self.path == '/geocoding?q=Paris&addressdetails=1&limit=1&email=foo%40bar':
self.send_response(200)
self.send_header('Content-type', 'application/xml')
self.end_headers()
self.wfile.write("""<?xml version="1.0" encoding="UTF-8"?>
<searchresults>
<place lat="48.8566177374844" lon="2.34288146739775" display_name="Paris, Ile-de-France, France metropolitaine">
<county>Paris</county>
<state>Ile-de-France</state>
<country>France metropolitaine</country>
<country_code>fr</country_code>
</place>
</searchresults>""".encode('ascii'))
return
if self.path == '/geocoding?q=NonExistingPlace&addressdetails=1&limit=1&email=foo%40bar':
self.send_response(200)
self.send_header('Content-type', 'application/xml')
self.end_headers()
self.wfile.write("""<?xml version="1.0" encoding="UTF-8"?><searchresults></searchresults>""".encode('ascii'))
return
self.send_error(404, 'File Not Found: %s' % self.path)
return
elif self.path.find('/yahoogeocoding') != -1:
if self.path == '/yahoogeocoding?q=Paris':
self.send_response(200)
self.send_header('Content-type', 'application/xml')
self.end_headers()
self.wfile.write("""<?xml version="1.0" encoding="UTF-8" standalone="yes"?><ResultSet xmlns:ns1="http://www.yahooapis.com/v1/base.rng" version="2.0" xml:lang="en-US"><Error>0</Error><ErrorMessage>No error</ErrorMessage><Locale>en-US</Locale><Found>1</Found><Quality>40</Quality><Result><quality>40</quality><latitude>48.85693</latitude><longitude>2.3412</longitude><offsetlat>48.85693</offsetlat><offsetlon>2.3412</offsetlon><radius>9200</radius><name></name><line1></line1><line2>Paris</line2><line3></line3><line4>France</line4><house></house><street></street><xstreet></xstreet><unittype></unittype><unit></unit><postal></postal><neighborhood></neighborhood><city>Paris</city><county>Paris</county><state>Ile-de-France</state><country>France</country><countrycode>FR</countrycode><statecode></statecode><countycode>75</countycode><uzip>75001</uzip><hash></hash><woeid>615702</woeid><woetype>7</woetype></Result></ResultSet>
<!-- nws03.maps.bf1.yahoo.com uncompressed/chunked Sat Dec 29 04:59:06 PST 2012 -->
<!-- wws09.geotech.bf1.yahoo.com uncompressed/chunked Sat Dec 29 04:59:06 PST 2012 -->""".encode('ascii'))
return
if self.path == '/yahoogeocoding?q=NonExistingPlace':
self.send_response(200)
self.send_header('Content-type', 'application/xml')
self.end_headers()
self.wfile.write("""<?xml version="1.0" encoding="UTF-8" standalone="yes"?><ResultSet xmlns:ns1="http://www.yahooapis.com/v1/base.rng" version="2.0" xml:lang="en-US"><Error>7</Error><ErrorMessage>No result</ErrorMessage><Locale>en-US</Locale><Found>0</Found><Quality>0</Quality></ResultSet>
<!-- nws08.maps.bf1.yahoo.com uncompressed/chunked Sat Dec 29 05:00:45 PST 2012 -->
<!-- wws08.geotech.bf1.yahoo.com uncompressed/chunked Sat Dec 29 05:00:45 PST 2012 -->""".encode('ascii'))
return
self.send_error(404, 'File Not Found: %s' % self.path)
return
elif self.path.find('/geonamesgeocoding') != -1:
if self.path == '/geonamesgeocoding?q=Paris&username=demo':
self.send_response(200)
self.send_header('Content-type', 'application/xml')
self.end_headers()
self.wfile.write("""<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<geonames style="MEDIUM">
<totalResultsCount>2356</totalResultsCount>
<geoname>
<toponymName>Paris</toponymName>
<name>Paris</name>
<lat>48.85341</lat>
<lng>2.3488</lng>
<geonameId>2988507</geonameId>
<countryCode>FR</countryCode>
<countryName>France</countryName>
<fcl>P</fcl>
<fcode>PPLC</fcode>
</geoname>
</geonames>""".encode('ascii'))
return
if self.path == '/geonamesgeocoding?q=NonExistingPlace&username=demo':
self.send_response(200)
self.send_header('Content-type', 'application/xml')
self.end_headers()
self.wfile.write("""<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<geonames style="MEDIUM">
<totalResultsCount>0</totalResultsCount>
</geonames>""".encode('ascii'))
return
self.send_error(404, 'File Not Found: %s' % self.path)
return
elif self.path.find('/binggeocoding') != -1:
if self.path == '/binggeocoding?q=Paris&key=fakekey':
self.send_response(200)
self.send_header('Content-type', 'application/xml')
self.end_headers()
self.wfile.write("""<Response>
<ResourceSets>
<ResourceSet>
<EstimatedTotal>1</EstimatedTotal>
<Resources>
<Location>
<Name>Paris, Paris, France</Name>
<Point>
<Latitude>48</Latitude>
<Longitude>2</Longitude>
</Point>
<BoundingBox>
<SouthLatitude>48</SouthLatitude>
<WestLongitude>2</WestLongitude>
<NorthLatitude>48</NorthLatitude>
<EastLongitude>2</EastLongitude>
</BoundingBox>
<Address>
<AdminDistrict>IdF</AdminDistrict>
<AdminDistrict2>Paris</AdminDistrict2>
<CountryRegion>France</CountryRegion>
<FormattedAddress>Paris, Paris, France</FormattedAddress>
<Locality>Paris</Locality>
</Address>
<GeocodePoint>
<Latitude>48</Latitude>
<Longitude>2</Longitude>
<CalculationMethod>Random</CalculationMethod>
<UsageType>Display</UsageType>
</GeocodePoint>
</Location>
</Resources>
</ResourceSet>
</ResourceSets>
</Response>""".encode('ascii'))
return
if self.path == '/binggeocoding?q=NonExistingPlace&key=fakekey':
self.send_response(200)
self.send_header('Content-type', 'application/xml')
self.end_headers()
self.wfile.write("""<Response>
<ResourceSets>
<ResourceSet>
<EstimatedTotal>0</EstimatedTotal>
<Resources/>
</ResourceSet>
</ResourceSets>
</Response>""".encode('ascii'))
return
self.send_error(404, 'File Not Found: %s' % self.path)
return
# Below is for reverse geocoding
elif self.path.find('/reversegeocoding') != -1:
if self.path == '/reversegeocoding?lon=2.00000000&lat=49.00000000&email=foo%40bar' or \
self.path == '/reversegeocoding?lon=2.00000000&lat=49.00000000&zoom=12&email=foo%40bar':
self.send_response(200)
self.send_header('Content-type', 'application/xml')
self.end_headers()
self.wfile.write("""<?xml version="1.0" encoding="UTF-8"?>
<reversegeocode>
<result place_id="46754274" osm_type="way" osm_id="38621743" ref="Chemin du Cordon" lat="49.0002726061675" lon="1.99514157818059">Chemin du Cordon, Foret de l'Hautil, Triel-sur-Seine, Saint-Germain-en-Laye, Yvelines, Ile-de-France, 78510, France metropolitaine</result>
<addressparts>
<road>Chemin du Cordon</road>
<forest>Foret de l'Hautil</forest>
<city>Triel-sur-Seine</city>
<county>Saint-Germain-en-Laye</county>
<state>Ile-de-France</state>
<postcode>78510</postcode>
<country>France metropolitaine</country>
<country_code>fr</country_code>
</addressparts>
</reversegeocode>""".encode('ascii'))
return
self.send_error(404, 'File Not Found: %s' % self.path)
return
elif self.path.find('/yahooreversegeocoding') != -1:
if self.path == '/yahooreversegeocoding?q=49.00000000,2.00000000&gflags=R':
self.send_response(200)
self.send_header('Content-type', 'application/xml')
self.end_headers()
self.wfile.write("""<?xml version="1.0" encoding="UTF-8" standalone="yes"?><ResultSet xmlns:ns1="http://www.yahooapis.com/v1/base.rng" version="2.0" xml:lang="en-US"><Error>0</Error><ErrorMessage>No error</ErrorMessage><Locale>en-US</Locale><Found>1</Found><Quality>99</Quality><Result><quality>72</quality><latitude>49.001</latitude><longitude>1.999864</longitude><offsetlat>49.001</offsetlat><offsetlon>1.999864</offsetlon><radius>400</radius><name>49.00000000,2.00000000</name><line1>Chemin de Menucourt</line1><line2>78510 Triel-sur-Seine</line2><line3></line3><line4>France</line4><house></house><street>Chemin de Menucourt</street><xstreet></xstreet><unittype></unittype><unit></unit><postal>78510</postal><neighborhood></neighborhood><city>Triel-sur-Seine</city><county>Yvelines</county><state>Ile-de-France</state><country>France</country><countrycode>FR</countrycode><statecode></statecode><countycode>78</countycode><uzip>78510</uzip><hash></hash><woeid>12727518</woeid><woetype>11</woetype></Result></ResultSet>
<!-- nws02.maps.bf1.yahoo.com uncompressed/chunked Sat Dec 29 05:03:31 PST 2012 -->
<!-- wws05.geotech.bf1.yahoo.com uncompressed/chunked Sat Dec 29 05:03:31 PST 2012 -->""".encode('ascii'))
return
self.send_error(404, 'File Not Found: %s' % self.path)
return
elif self.path.find('/geonamesreversegeocoding') != -1:
if self.path == '/geonamesreversegeocoding?lat=49.00000000&lng=2.00000000&username=demo':
self.send_response(200)
self.send_header('Content-type', 'application/xml')
self.end_headers()
self.wfile.write("""<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<geonames>
<geoname>
<toponymName>Paris Basin</toponymName>
<name>Paris Basin</name>
<lat>49</lat>
<lng>2</lng>
<geonameId>2988503</geonameId>
<countryCode>FR</countryCode>
<countryName>France</countryName>
<fcl>T</fcl>
<fcode>DPR</fcode>
<distance>0</distance>
</geoname>
</geonames>""".encode('ascii'))
return
self.send_error(404, 'File Not Found: %s' % self.path)
return
elif self.path.find('/bingreversegeocoding') != -1:
if self.path == '/bingreversegeocoding?49.00000000,2.00000000&key=fakekey':
self.send_response(200)
self.send_header('Content-type', 'application/xml')
self.end_headers()
self.wfile.write("""<Response>
<ResourceSets>
<ResourceSet>
<EstimatedTotal>1</EstimatedTotal>
<Resources>
<Location>
<Name>Paris, Paris, France</Name>
<Point>
<Latitude>48</Latitude>
<Longitude>2</Longitude>
</Point>
<BoundingBox>
<SouthLatitude>48</SouthLatitude>
<WestLongitude>2</WestLongitude>
<NorthLatitude>48</NorthLatitude>
<EastLongitude>2</EastLongitude>
</BoundingBox>
<Address>
<AdminDistrict>IdF</AdminDistrict>
<AdminDistrict2>Paris</AdminDistrict2>
<CountryRegion>France</CountryRegion>
<FormattedAddress>Paris, Paris, France</FormattedAddress>
<Locality>Paris</Locality>
</Address>
<GeocodePoint>
<Latitude>48</Latitude>
<Longitude>2</Longitude>
<CalculationMethod>Random</CalculationMethod>
<UsageType>Display</UsageType>
</GeocodePoint>
</Location>
</Resources>
</ResourceSet>
</ResourceSets>
</Response>""".encode('ascii'))
return
self.send_error(404, 'File Not Found: %s' % self.path)
return
return
except IOError:
pass
self.send_error(404, 'File Not Found: %s' % self.path)
###############################################################################
def test_ogr_sql_sqlite_start_webserver():
ogrtest.webserver_process = None
ogrtest.webserver_port = 0
if gdal.GetDriverByName('HTTP') is None:
pytest.skip()
(ogrtest.webserver_process, ogrtest.webserver_port) = webserver.launch(handler=GeocodingHTTPHandler)
if ogrtest.webserver_port == 0:
pytest.skip()
###############################################################################
# Test ogr_geocode()
def test_ogr_sql_sqlite_16(service=None, template='http://127.0.0.1:%d/geocoding?q=%%s'):
if ogrtest.webserver_port == 0:
pytest.skip()
gdal.SetConfigOption('OGR_GEOCODE_APPLICATION', 'GDAL/OGR autotest suite')
gdal.SetConfigOption('OGR_GEOCODE_EMAIL', 'foo@bar')
gdal.SetConfigOption('OGR_GEOCODE_QUERY_TEMPLATE', template % ogrtest.webserver_port)
gdal.SetConfigOption('OGR_GEOCODE_DELAY', '0.1')
gdal.SetConfigOption('OGR_GEOCODE_SERVICE', service)
if service == 'GEONAMES':
gdal.SetConfigOption('OGR_GEOCODE_USERNAME', 'demo')
elif service == 'BING':
gdal.SetConfigOption('OGR_GEOCODE_KEY', 'fakekey')
for cache_filename in ['tmp/ogr_geocode_cache.sqlite', 'tmp/ogr_geocode_cache.csv']:
gdal.Unlink(cache_filename)
gdal.SetConfigOption('OGR_GEOCODE_CACHE_FILE', cache_filename)
ds = ogr.GetDriverByName("Memory").CreateDataSource("my_ds")
if service == 'BING':
name_field = "Name"
else:
name_field = "display_name"
for sql in ["SELECT ogr_geocode('Paris')",
"SELECT ogr_geocode('Paris', 'geometry')",
"SELECT ogr_geocode('Paris', '%s') AS %s" % (name_field, name_field),
"SELECT ogr_geocode('Paris', 'raw') AS raw"]:
sql_lyr = ds.ExecuteSQL(sql, dialect='SQLite')
feat = sql_lyr.GetNextFeature()
if feat is None:
print(sql)
ds.ReleaseResultSet(sql_lyr)
pytest.fail()
if ((sql == "SELECT ogr_geocode('Paris')" or
sql == "SELECT ogr_geocode('Paris', 'geometry')") and feat.GetGeometryRef() is None) or \
(sql == "SELECT ogr_geocode('Paris', '%s')" % name_field and not feat.IsFieldSet(name_field)) or \
(sql == "SELECT ogr_geocode('Paris', 'raw')" and not feat.IsFieldSet('raw')):
feat.DumpReadable()
print(sql)
ds.ReleaseResultSet(sql_lyr)
pytest.fail()
ds.ReleaseResultSet(sql_lyr)
for sql in ["SELECT ogr_geocode('NonExistingPlace')", "SELECT ogr_geocode('Error')"]:
sql_lyr = ds.ExecuteSQL(sql, dialect='SQLite')
feat = sql_lyr.GetNextFeature()
if feat is None:
ds.ReleaseResultSet(sql_lyr)
pytest.fail()
if feat.GetGeometryRef() is not None:
feat.DumpReadable()
ds.ReleaseResultSet(sql_lyr)
pytest.fail()
ds.ReleaseResultSet(sql_lyr)
# Test various syntax errors
sql_lyr = ds.ExecuteSQL("SELECT ogr_geocode()", dialect='SQLite')
ds.ReleaseResultSet(sql_lyr)
sql_lyr = ds.ExecuteSQL("SELECT ogr_geocode(5)", dialect='SQLite')
ds.ReleaseResultSet(sql_lyr)
sql_lyr = ds.ExecuteSQL("SELECT ogr_geocode('Paris', 5)", dialect='SQLite')
ds.ReleaseResultSet(sql_lyr)
sql_lyr = ds.ExecuteSQL("SELECT ogr_geocode('Paris', 'geometry', 5)", dialect='SQLite')
ds.ReleaseResultSet(sql_lyr)
ds = None
# Check cache existence
cache_ds = ogr.Open(cache_filename)
assert cache_ds is not None
if cache_ds.GetDriver().GetName().lower() != cache_filename[cache_filename.find('.') + 1:].lower():
print(cache_ds.GetDriver().GetName())
print(cache_filename)
pytest.fail()
cache_ds = None
gdal.Unlink(cache_filename)
ds = None
###############################################################################
# Test ogr_geocode_reverse()
def test_ogr_sql_sqlite_17(service=None, template='http://127.0.0.1:%d/reversegeocoding?lon={lon}&lat={lat}'):
if ogrtest.webserver_port == 0:
pytest.skip()
gdal.SetConfigOption('OGR_GEOCODE_APPLICATION', 'GDAL/OGR autotest suite')
gdal.SetConfigOption('OGR_GEOCODE_EMAIL', 'foo@bar')
gdal.SetConfigOption('OGR_GEOCODE_REVERSE_QUERY_TEMPLATE', template % ogrtest.webserver_port)
gdal.SetConfigOption('OGR_GEOCODE_DELAY', '0.1')
gdal.SetConfigOption('OGR_GEOCODE_SERVICE', service)
if service == 'GEONAMES':
gdal.SetConfigOption('OGR_GEOCODE_USERNAME', 'demo')
elif service == 'BING':
gdal.SetConfigOption('OGR_GEOCODE_KEY', 'fakekey')
for cache_filename in ['tmp/ogr_geocode_cache.sqlite', 'tmp/ogr_geocode_cache.csv']:
gdal.Unlink(cache_filename)
gdal.SetConfigOption('OGR_GEOCODE_CACHE_FILE', cache_filename)
ds = ogr.GetDriverByName("Memory").CreateDataSource("my_ds")
if service == 'GEONAMES':
name_field = "name"
elif service == 'BING':
name_field = "Name"
else:
name_field = "display_name"
sql_list = ["SELECT ogr_geocode_reverse(2,49,'%s') AS %s" % (name_field, name_field),
"SELECT ogr_geocode_reverse(2,49,'%s','zoom=12') AS %s" % (name_field, name_field),
"SELECT ogr_geocode_reverse(2.0,49.0,'%s') AS %s" % (name_field, name_field),
"SELECT ogr_geocode_reverse(2.0,49.0,'raw') AS raw"]
if ogrtest.has_spatialite:
sql_list.append("SELECT ogr_geocode_reverse(MakePoint(2,49),'%s') AS %s" % (name_field, name_field))
sql_list.append("SELECT ogr_geocode_reverse(MakePoint(2,49),'%s','zoom=12') AS %s" % (name_field, name_field))
for sql in sql_list:
sql_lyr = ds.ExecuteSQL(sql, dialect='SQLite')
feat = sql_lyr.GetNextFeature()
if feat is None:
print(sql)
ds.ReleaseResultSet(sql_lyr)
pytest.fail()
if sql.find('raw') != -1:
field_to_test = 'raw'
else:
field_to_test = name_field
if not feat.IsFieldSet(field_to_test):
feat.DumpReadable()
print(sql)
ds.ReleaseResultSet(sql_lyr)
pytest.fail()
ds.ReleaseResultSet(sql_lyr)
# Test various syntax errors
sql_lyr = ds.ExecuteSQL("SELECT ogr_geocode_reverse()", dialect='SQLite')
ds.ReleaseResultSet(sql_lyr)
sql_lyr = ds.ExecuteSQL("SELECT ogr_geocode_reverse(2)", dialect='SQLite')
ds.ReleaseResultSet(sql_lyr)
sql_lyr = ds.ExecuteSQL("SELECT ogr_geocode_reverse(2, 'foo')", dialect='SQLite')
ds.ReleaseResultSet(sql_lyr)
sql_lyr = ds.ExecuteSQL("SELECT ogr_geocode_reverse(2, 49)", dialect='SQLite')
ds.ReleaseResultSet(sql_lyr)
if ogrtest.has_spatialite:
sql_lyr = ds.ExecuteSQL("SELECT ogr_geocode_reverse(MakePoint(2,49))", dialect='SQLite')
ds.ReleaseResultSet(sql_lyr)
sql_lyr = ds.ExecuteSQL("SELECT ogr_geocode_reverse(MakePoint(2,49), 5)", dialect='SQLite')
ds.ReleaseResultSet(sql_lyr)
ds = None
# Check cache existence
cache_ds = ogr.Open(cache_filename)
assert cache_ds is not None
cache_ds = None
gdal.Unlink(cache_filename)
ds = None
###############################################################################
# Test ogr_geocode() with Yahoo geocoding service
def test_ogr_sql_sqlite_18():
return test_ogr_sql_sqlite_16('YAHOO', 'http://127.0.0.1:%d/yahoogeocoding?q=%%s')
###############################################################################
# Test ogr_geocode_reverse() with Yahoo geocoding service
def test_ogr_sql_sqlite_19():
return test_ogr_sql_sqlite_17('YAHOO', 'http://127.0.0.1:%d/yahooreversegeocoding?q={lat},{lon}&gflags=R')
###############################################################################
# Test ogr_geocode() with GeoNames.org geocoding service
def test_ogr_sql_sqlite_20():
return test_ogr_sql_sqlite_16('GEONAMES', 'http://127.0.0.1:%d/geonamesgeocoding?q=%%s')
###############################################################################
# Test ogr_geocode_reverse() with GeoNames.org geocoding service
def test_ogr_sql_sqlite_21():
return test_ogr_sql_sqlite_17('GEONAMES', 'http://127.0.0.1:%d/geonamesreversegeocoding?lat={lat}&lng={lon}')
###############################################################################
# Test ogr_geocode() with Bing geocoding service
def test_ogr_sql_sqlite_22():
return test_ogr_sql_sqlite_16('BING', 'http://127.0.0.1:%d/binggeocoding?q=%%s')
###############################################################################
# Test ogr_geocode_reverse() with Bing geocoding service
def test_ogr_sql_sqlite_23():
return test_ogr_sql_sqlite_17('BING', 'http://127.0.0.1:%d/bingreversegeocoding?{lat},{lon}')
###############################################################################
# Test ogr_deflate() and ogr_inflate()
def test_ogr_sql_sqlite_24():
ds = ogr.GetDriverByName("Memory").CreateDataSource("my_ds")
# Very short string
sql_lyr = ds.ExecuteSQL("SELECT CAST(ogr_inflate(ogr_deflate('ab')) AS VARCHAR)", dialect='SQLite')
feat = sql_lyr.GetNextFeature()
if feat.GetField(0) != 'ab':
feat.DumpReadable()
ds.ReleaseResultSet(sql_lyr)
pytest.fail()
ds.ReleaseResultSet(sql_lyr)
# Big very compressible string
bigstr = 'a' * 10000
sql_lyr = ds.ExecuteSQL("SELECT CAST(ogr_inflate(ogr_deflate('%s')) AS VARCHAR)" % bigstr, dialect='SQLite')
feat = sql_lyr.GetNextFeature()
if feat.GetField(0) != bigstr:
feat.DumpReadable()
ds.ReleaseResultSet(sql_lyr)
pytest.fail()
ds.ReleaseResultSet(sql_lyr)
# Blob
sql_lyr = ds.ExecuteSQL("SELECT ogr_inflate(ogr_deflate(x'0203', 5))", dialect='SQLite')
feat = sql_lyr.GetNextFeature()
if feat.GetField(0) != '0203':
feat.DumpReadable()
ds.ReleaseResultSet(sql_lyr)
pytest.fail()
ds.ReleaseResultSet(sql_lyr)
# Test inflating a random binary blob
sql_lyr = ds.ExecuteSQL("SELECT ogr_inflate(x'0203')", dialect='SQLite')
feat = sql_lyr.GetNextFeature()
if not feat.IsFieldNull(0):
feat.DumpReadable()
ds.ReleaseResultSet(sql_lyr)
pytest.fail()
ds.ReleaseResultSet(sql_lyr)
# Error case
gdal.PushErrorHandler('CPLQuietErrorHandler')
sql_lyr = ds.ExecuteSQL("SELECT ogr_deflate()", dialect='SQLite')
gdal.PopErrorHandler()
if sql_lyr is not None:
ds.ReleaseResultSet(sql_lyr)
pytest.fail()
# Error case
sql_lyr = ds.ExecuteSQL("SELECT ogr_deflate('a', 'b')", dialect='SQLite')
feat = sql_lyr.GetNextFeature()
if not feat.IsFieldNull(0):
feat.DumpReadable()
ds.ReleaseResultSet(sql_lyr)
pytest.fail()
ds.ReleaseResultSet(sql_lyr)
# Error case
gdal.PushErrorHandler('CPLQuietErrorHandler')
sql_lyr = ds.ExecuteSQL("SELECT ogr_inflate()", dialect='SQLite')
gdal.PopErrorHandler()
if sql_lyr is not None:
ds.ReleaseResultSet(sql_lyr)
pytest.fail()
# Error case
sql_lyr = ds.ExecuteSQL("SELECT ogr_inflate('a')", dialect='SQLite')
feat = sql_lyr.GetNextFeature()
if not feat.IsFieldNull(0):
feat.DumpReadable()
ds.ReleaseResultSet(sql_lyr)
pytest.fail()
ds.ReleaseResultSet(sql_lyr)
###############################################################################
def test_ogr_sql_sqlite_stop_webserver():
if ogrtest.webserver_port == 0:
pytest.skip()
webserver.server_stop(ogrtest.webserver_process, ogrtest.webserver_port)
###############################################################################
# If Spatialite is NOT available, test some of the minimal spatial functions
# implemented. Test it also if spatialite is available, so we have a cross
# validation...
def ogr_sql_sqlite_25_test_errors(ds, fct):
for val in ['null', "'foo'", "x'00010203'"]:
sql_lyr = ds.ExecuteSQL("SELECT %s(%s)" % (fct, val), dialect='SQLite')
feat = sql_lyr.GetNextFeature()
if not feat.IsFieldNull(0):
feat.DumpReadable()
ds.ReleaseResultSet(sql_lyr)
print(val)
return False
ds.ReleaseResultSet(sql_lyr)
return True
def test_ogr_sql_sqlite_25():
# if ogrtest.has_spatialite is True:
# return 'skip'
ds = ogr.GetDriverByName("Memory").CreateDataSource("my_ds")
# Test ST_AsText, ST_GeomFromText, ST_AsBinary, ST_GeomFromWKB
sql_lyr = ds.ExecuteSQL("SELECT ST_GeomFromWKB(ST_AsBinary(ST_GeomFromText(ST_AsText(ST_GeomFromText('POINT (0 1)')),4326)))", dialect='SQLite')
feat = sql_lyr.GetNextFeature()
if feat.GetGeometryRef().ExportToWkt() != 'POINT (0 1)':
feat.DumpReadable()
ds.ReleaseResultSet(sql_lyr)
pytest.fail()
ds.ReleaseResultSet(sql_lyr)
for fct in ["ST_AsText", "ST_GeomFromText", "ST_AsBinary", "ST_GeomFromWKB"]:
assert ogr_sql_sqlite_25_test_errors(ds, fct), ('fail with %s' % fct)
# Test ST_SRID
sql_lyr = ds.ExecuteSQL("SELECT ST_SRID(ST_GeomFromText('POINT(0 0)',4326))", dialect='SQLite')
feat = sql_lyr.GetNextFeature()
val_sql = feat.GetField(0)
ds.ReleaseResultSet(sql_lyr)
assert val_sql == 4326
# Test ST_Area
sql_lyr = ds.ExecuteSQL("SELECT ST_Area(ST_GeomFromText('%s')), ST_Area(null), ST_Area(x'00')" % 'POLYGON((0 0,0 1,1 1,1 0,0 0))', dialect='SQLite')
feat = sql_lyr.GetNextFeature()
val_sql = feat.GetField(0)
val1_sql = feat.GetField(1)
val2_sql = feat.GetField(2)
ds.ReleaseResultSet(sql_lyr)
geomA = ogr.CreateGeometryFromWkt('POLYGON((0 0,0 1,1 1,1 0,0 0))')
val_ogr = geomA.GetArea()
assert abs(val_sql - val_ogr) <= 1e-5
assert val1_sql is None
assert val2_sql is None
def test_ogr_sql_sqlite_26():
if not ogrtest.have_geos():
pytest.skip()
# if ogrtest.has_spatialite is True:
# return 'skip'
ds = ogr.GetDriverByName("Memory").CreateDataSource("my_ds")
geom1_wkt = 'POLYGON((0 0,0 1,1 1,1 0,0 0))'
geom2_wkt = 'POLYGON((0.5 0.5,0.5 1.5,1.5 1.5,1.5 0.5,0.5 0.5))'
geom3_wkt = 'POLYGON((0.25 0.25,0.25 0.75,0.75 0.75,0.75 0.25,0.25 0.25))'
geom4_wkt = 'POLYGON((1 0,1 1,2 1,2 0,1 0))'
# Test ST_Buffer
op_str = 'Buffer'
sql_lyr = ds.ExecuteSQL("SELECT %s(ST_GeomFromText('%s'),0.1)" % (op_str, geom1_wkt), dialect='SQLite')
feat = sql_lyr.GetNextFeature()
geom_sql = feat.GetGeometryRef()
ds.ReleaseResultSet(sql_lyr)
geom = ogr.CreateGeometryFromWkt(geom1_wkt)
geom_geos = geom.Buffer(0.1)
assert geom_sql.Equals(geom_geos) != 0, ('fail with %s' % op_str)
for op_str in ["IsEmpty", "IsSimple", "IsValid"]:
for wkt in ['POLYGON EMPTY', 'POINT(0 1)', 'POLYGON((0 0,1 1,0 1,1 0,0 0))']:
sql_lyr = ds.ExecuteSQL("SELECT ST_%s(ST_GeomFromText('%s'))" % (op_str, wkt), dialect='SQLite')
feat = sql_lyr.GetNextFeature()
b_sql = feat.GetField(0)
ds.ReleaseResultSet(sql_lyr)
b_sql = bool(b_sql == 1)
geom = ogr.CreateGeometryFromWkt(wkt)
op = getattr(geom, op_str)
b_geos = op()
if b_sql != b_geos:
if wkt == 'POLYGON EMPTY':
print('difference wit op = %s and wkt = POLYGON EMPTY' % op_str)
else:
print(wkt)
print(b_sql)
print(b_geos)
pytest.fail('fail with %s' % op_str)
for op_str in ["Intersects", "Equals", "Disjoint",
"Touches", "Crosses", "Within",
"Contains", "Overlaps"]:
for (geomA_wkt, geomB_wkt) in [(geom1_wkt, geom1_wkt),
(geom1_wkt, geom2_wkt),
(geom1_wkt, geom3_wkt),
(geom1_wkt, geom4_wkt)]:
sql_lyr = ds.ExecuteSQL("SELECT ST_%s(ST_GeomFromText('%s'), ST_GeomFromText('%s'))" % (op_str, geomA_wkt, geomB_wkt), dialect='SQLite')
feat = sql_lyr.GetNextFeature()
b_sql = feat.GetField(0)
ds.ReleaseResultSet(sql_lyr)
b_sql = bool(b_sql == 1)
geomA = ogr.CreateGeometryFromWkt(geomA_wkt)
geomB = ogr.CreateGeometryFromWkt(geomB_wkt)
op = getattr(geomA, op_str)
b_geos = op(geomB)
assert b_sql == b_geos, ('fail with %s' % op_str)
for op_str in ["Intersection", "Difference", "Union", "SymDifference"]:
for (geomA_wkt, geomB_wkt) in [(geom1_wkt, geom1_wkt),
(geom1_wkt, geom2_wkt),
(geom1_wkt, geom3_wkt),
(geom1_wkt, geom4_wkt)]:
sql_lyr = ds.ExecuteSQL("SELECT ST_%s(ST_GeomFromText('%s'), ST_GeomFromText('%s'))" % (op_str, geomA_wkt, geomB_wkt), dialect='SQLite')
feat = sql_lyr.GetNextFeature()
geom_sql = feat.GetGeometryRef()
if geom_sql is not None:
geom_sql = geom_sql.Clone()
ds.ReleaseResultSet(sql_lyr)
geomA = ogr.CreateGeometryFromWkt(geomA_wkt)
geomB = ogr.CreateGeometryFromWkt(geomB_wkt)
op = getattr(geomA, op_str)
geom_geos = op(geomB)
if geom_sql is None:
# GEOS can return empty geometry collection, while spatialite
# does not
if geom_geos is not None and geom_geos.IsEmpty() == 0:
print(geomA_wkt)
print(geomB_wkt)
print(geom_geos.ExportToWkt())
pytest.fail('fail with %s' % op_str)
else:
assert geom_sql.Equals(geom_geos) != 0, ('fail with %s' % op_str)
# Error cases
op_str = 'Intersects'
for val in ['null', "'foo'", "x'00010203'"]:
sql_lyr = ds.ExecuteSQL("SELECT ST_%s(ST_GeomFromText('%s'), %s), ST_%s(%s, ST_GeomFromText('%s'))" % (op_str, geom1_wkt, val, op_str, val, geom1_wkt), dialect='SQLite')
feat = sql_lyr.GetNextFeature()
b0_sql = feat.GetField(0)
b1_sql = feat.GetField(1)
ds.ReleaseResultSet(sql_lyr)
assert b0_sql <= 0 and b1_sql <= 0, ('fail with %s' % op_str)
op_str = 'Intersection'
for val in ['null', "'foo'", "x'00010203'"]:
sql_lyr = ds.ExecuteSQL("SELECT ST_%s(ST_GeomFromText('%s'), %s)" % (op_str, geom1_wkt, val), dialect='SQLite')
feat = sql_lyr.GetNextFeature()
geom_sql = feat.GetGeometryRef()
ds.ReleaseResultSet(sql_lyr)
assert geom_sql is None, ('fail with %s' % op_str)
sql_lyr = ds.ExecuteSQL("SELECT ST_%s(%s, ST_GeomFromText('%s'))" % (op_str, val, geom1_wkt), dialect='SQLite')
feat = sql_lyr.GetNextFeature()
geom_sql = feat.GetGeometryRef()
ds.ReleaseResultSet(sql_lyr)
assert geom_sql is None, ('fail with %s' % op_str)
###############################################################################
# Test MIN(), MAX() on a date
def test_ogr_sql_sqlite_27():
ds = ogr.GetDriverByName('Memory').CreateDataSource('')
lyr = ds.CreateLayer('test')
lyr.CreateField(ogr.FieldDefn('DATE', ogr.OFTDateTime))
feat = ogr.Feature(lyr.GetLayerDefn())
feat.SetField(0, '2013/12/31 23:59:59')
lyr.CreateFeature(feat)
feat = ogr.Feature(lyr.GetLayerDefn())
feat.SetField(0, '2013/01/01 00:00:00')
lyr.CreateFeature(feat)
lyr = ds.ExecuteSQL("SELECT MIN(DATE), MAX(DATE) from test", dialect='SQLite')
assert lyr.GetLayerDefn().GetFieldDefn(0).GetType() == ogr.OFTDateTime
assert lyr.GetLayerDefn().GetFieldDefn(1).GetType() == ogr.OFTDateTime
tr = ogrtest.check_features_against_list(lyr, 'MIN(DATE)', ['2013/01/01 00:00:00'])
lyr.ResetReading()
tr2 = ogrtest.check_features_against_list(lyr, 'MAX(DATE)', ['2013/12/31 23:59:59'])
ds.ReleaseResultSet(lyr)
assert tr
assert tr2
###############################################################################
# Test hstore_get_value()
def test_ogr_sql_sqlite_28():
ds = ogr.GetDriverByName('Memory').CreateDataSource('')
# Invalid parameters
for sql in ["SELECT hstore_get_value('a')"]:
gdal.PushErrorHandler('CPLQuietErrorHandler')
sql_lyr = ds.ExecuteSQL(sql, dialect='SQLite')
gdal.PopErrorHandler()
assert sql_lyr is None, sql
# Invalid hstore syntax or empty result
for sql in ["SELECT hstore_get_value('a', null)",
"SELECT hstore_get_value(null, 'a')",
"SELECT hstore_get_value(1,'a')",
"SELECT hstore_get_value('a',1)",
"SELECT hstore_get_value('a=>b','c')"]:
sql_lyr = ds.ExecuteSQL(sql, dialect='SQLite')
f = sql_lyr.GetNextFeature()
if not f.IsFieldNull(0):
f.DumpReadable()
pytest.fail(sql)
ds.ReleaseResultSet(sql_lyr)
# Valid hstore syntax
for (sql, expected) in [("SELECT hstore_get_value('a=>b', 'a')", 'b'), ]:
sql_lyr = ds.ExecuteSQL(sql, dialect='SQLite')
f = sql_lyr.GetNextFeature()
if f.GetField(0) != expected:
f.DumpReadable()
pytest.fail(sql)
ds.ReleaseResultSet(sql_lyr)
###############################################################################
# Test compat with curve geometries
def test_ogr_sql_sqlite_29():
ds = ogr.GetDriverByName('Memory').CreateDataSource('')
lyr = ds.CreateLayer('test', geom_type=ogr.wkbCircularString)
f = ogr.Feature(lyr.GetLayerDefn())
f.SetGeometry(ogr.CreateGeometryFromWkt('CIRCULARSTRING(0 0,1 0,0 0)'))
lyr.CreateFeature(f)
f = None
sql_lyr = ds.ExecuteSQL('select * from test', dialect='SQLite')
geom_type = sql_lyr.GetGeomType()
f = sql_lyr.GetNextFeature()
got_wkt = f.GetGeometryRef().ExportToWkt()
ds.ReleaseResultSet(sql_lyr)
ds = None
assert geom_type == ogr.wkbCircularString
assert got_wkt == 'CIRCULARSTRING (0 0,1 0,0 0)'
###############################################################################
# Test compat with M geometries
def test_ogr_sql_sqlite_30():
ds = ogr.GetDriverByName('Memory').CreateDataSource('')
lyr = ds.CreateLayer('testm', geom_type=ogr.wkbLineStringM)
f = ogr.Feature(lyr.GetLayerDefn())
f.SetGeometry(ogr.CreateGeometryFromWkt('LINESTRING M (1 2 3)'))
lyr.CreateFeature(f)
f = None
lyr = ds.CreateLayer('testzm', geom_type=ogr.wkbLineStringZM)
f = ogr.Feature(lyr.GetLayerDefn())
f.SetGeometry(ogr.CreateGeometryFromWkt('LINESTRING ZM (1 2 3 4)'))
lyr.CreateFeature(f)
f = None
sql_lyr = ds.ExecuteSQL('select * from testm', dialect='SQLite')
geom_type = sql_lyr.GetGeomType()
f = sql_lyr.GetNextFeature()
got_wkt = f.GetGeometryRef().ExportToIsoWkt()
ds.ReleaseResultSet(sql_lyr)
assert geom_type == ogr.wkbLineStringM
assert got_wkt == 'LINESTRING M (1 2 3)'
sql_lyr = ds.ExecuteSQL('select * from testzm', dialect='SQLite')
geom_type = sql_lyr.GetGeomType()
f = sql_lyr.GetNextFeature()
got_wkt = f.GetGeometryRef().ExportToIsoWkt()
ds.ReleaseResultSet(sql_lyr)
assert geom_type == ogr.wkbLineStringZM
assert got_wkt == 'LINESTRING ZM (1 2 3 4)'
###############################################################################
# Test filtering complex field name
def test_ogr_sql_sqlite_31():
ds = ogr.GetDriverByName('Memory').CreateDataSource('')
lyr = ds.CreateLayer('test')
lyr.CreateField(ogr.FieldDefn('50M3 @w35Om3 N@M3', ogr.OFTInteger))
f = ogr.Feature(lyr.GetLayerDefn())
f.SetField(0, 25)
lyr.CreateFeature(f)
f = None
sql_lyr = ds.ExecuteSQL('select * from test where "50M3 @w35Om3 N@M3" = 25', dialect='SQLite')
f = sql_lyr.GetNextFeature()
value = f.GetField(0)
ds.ReleaseResultSet(sql_lyr)
assert value == 25
| 38.155866 | 1,042 | 0.608428 |
c074bb1ecb84c2710fec9f42b1c87eb1f9b9ff95
| 1,546 |
py
|
Python
|
tests/run_tests.py
|
vb64/test.helper.flask
|
cc807dcea7554936fa291e83b0b0f86d91797865
|
[
"MIT"
] | null | null | null |
tests/run_tests.py
|
vb64/test.helper.flask
|
cc807dcea7554936fa291e83b0b0f86d91797865
|
[
"MIT"
] | null | null | null |
tests/run_tests.py
|
vb64/test.helper.flask
|
cc807dcea7554936fa291e83b0b0f86d91797865
|
[
"MIT"
] | null | null | null |
"""
Module for environment setup and tests runner
"""
import os
import sys
import logging
from unittest import TestLoader, TextTestRunner
def path_setup():
pass
def main():
"""
Tests runner
"""
path_setup()
sys.path.insert(1, os.getcwd())
import tester_coverage
verbose = 1
suite = None
loader = TestLoader()
buf = True
log_level = logging.NOTSET
if len(sys.argv) > 1:
arg1 = sys.argv[1]
if arg1 == 'verbose':
verbose = 2
suite = loader.discover('tests')
log_level = logging.CRITICAL
elif arg1 == 'combine':
return tester_coverage.combine(dest_dir=".", data_dir="tests")
elif arg1 == 'clean':
return tester_coverage.clean("tests")
elif arg1 == 'increment':
tester_coverage.is_increment = True
suite = loader.discover('tests')
else:
lst = arg1.split('.')
tester_coverage.clean_coverage_data(
os.path.join(*lst[:-1]),
".coverage.{}".format(lst[-1])
)
suite = loader.loadTestsFromNames([sys.argv[1]])
buf = False
tester_coverage.is_increment = True
else:
tester_coverage.clean('tests')
suite = loader.discover('tests')
log_level = logging.CRITICAL
logging.disable(log_level)
sys.exit(
0 if TextTestRunner(verbosity=verbose, buffer=buf).run(suite).wasSuccessful() else 1
)
if __name__ == '__main__':
main()
| 24.539683 | 90 | 0.575679 |
92611c8e28c8e13ceecd64173389fae1434f877e
| 5,171 |
py
|
Python
|
src/pipeline/answerers/counting_actions.py
|
samsungnlp/semeval2022-task9
|
2d44d9ebc6224bf7a3f70182bf7b81a7ab356370
|
[
"Apache-2.0"
] | null | null | null |
src/pipeline/answerers/counting_actions.py
|
samsungnlp/semeval2022-task9
|
2d44d9ebc6224bf7a3f70182bf7b81a7ab356370
|
[
"Apache-2.0"
] | null | null | null |
src/pipeline/answerers/counting_actions.py
|
samsungnlp/semeval2022-task9
|
2d44d9ebc6224bf7a3f70182bf7b81a7ab356370
|
[
"Apache-2.0"
] | null | null | null |
from typing import Dict, Any, List
import nltk
import io
from src.pipeline.interface_question_answering import QuestionAnsweringBase, PredictedAnswer
from src.pipeline.question_category import QuestionCategory
from src.unpack_data import QuestionAnswerRecipe
from src.putty_lemmatizer import PuttyLemmatizer
import inflect
def construct_map_with_i_and_h_columns(question: QuestionAnswerRecipe) -> Dict[str, int]:
tools_and_habitats_map = {}
for sentence in question.recipe.annotated_recipe.annotated_sentences:
for token in sentence.annotated_tokens:
words = []
if token.relation1:
words.extend(token.get_whole_entry_from_relation1("Drop"))
words.extend(token.get_whole_entry_from_relation1("Tool"))
words.extend(token.get_whole_entry_from_relation1("Habitat"))
words.extend(token.get_whole_entry_from_relation1("Result"))
words.extend(token.get_whole_entry_from_relation1("Shadow"))
if token.relation2:
words.append(token.relation2)
for w in words:
if w in tools_and_habitats_map:
tools_and_habitats_map[w] += 1
else:
tools_and_habitats_map[w] = 1
return tools_and_habitats_map
def find_occurrences(question_noun, tools_and_habitats_map):
question_noun = question_noun.strip().replace(" ", "_")
result = [(key, value) for key, value in tools_and_habitats_map.items() if
nltk.edit_distance(key.split(".")[0], question_noun) < 2]
return result
def count_raw_occurences(question_noun, question):
count = 0
for sentence in question.recipe.annotated_recipe.annotated_sentences:
question_noun = question_noun.replace("_", " ")
if question_noun in sentence.raw_sentence.lower() and "ingredients" not in sentence.sentence_id:
count += sentence.raw_sentence.lower().count(question_noun)
return count
def calculate_result(my_list: List) -> int:
sum_of_all_occurrences = 0
for r in my_list:
sum_of_all_occurrences += r[1]
return sum_of_all_occurrences
class QuestionAnswererCountingActions(QuestionAnsweringBase):
DESCRIPTION = "QuestionAnswerer How many actions"
"""
Answers the question "How many actions does it take to... ?",
:param question: question to be answered
:param question_category:
:param more_info: ignored
:return: count
"""
def __init__(self):
self.lemmatizer = PuttyLemmatizer()
self.inflect_engine = inflect.engine()
self.inflect_engine.classical()
def answer_a_question(self, question: QuestionAnswerRecipe, question_category: QuestionCategory,
more_info: Dict[str, Any] = {}) -> PredictedAnswer:
outstream = io.StringIO()
print(f"Id = {question.recipe.id} || {question.question_class}", file=outstream)
print(f"Q = {question.question}", file=outstream)
the_object = self.get_object_from_question(question)
print(f"Object = {the_object}", file=outstream)
relations_map = construct_map_with_i_and_h_columns(question)
list_singular = find_occurrences(the_object, relations_map)
print(f"Singular = {list_singular}", file=outstream)
object_as_plural = self.inflect_engine.plural_noun(the_object)
print(f"Plural = {object_as_plural}", file=outstream)
list_plural = find_occurrences(object_as_plural, relations_map)
print(f"Plural = {list_plural}", file=outstream)
result_singular = calculate_result(list_singular)
result_plural = calculate_result(list_plural)
final_answer = max(result_singular, result_plural)
print(f"Trying from relation match = {final_answer}", file=outstream)
last_rule = "Relation match"
if not final_answer:
final_answer = count_raw_occurences(the_object, question)
last_rule = "Raw occurences"
print(f"Trying raw occurences = {final_answer}", file=outstream)
final_answer = str(final_answer) if final_answer else None
if not final_answer:
last_rule = "Nothing found"
print(f"Final = {final_answer}", file=outstream)
print(f"Truth = {question.answer}\n", file=outstream)
details_str = f"Last rule = {last_rule}"
if (final_answer != question.answer and question.answer != "N/A") \
or (question.answer == "N/A" and final_answer is not None):
if more_info.get("dump_logs_for_bad_answers", False):
print(outstream.getvalue())
more_info_for_answer = {"source": QuestionAnswererCountingActions.DESCRIPTION,
"details_for_excel": details_str}
return PredictedAnswer(final_answer, raw_question=question.question, confidence=None,
more_info=more_info_for_answer)
def get_object_from_question(self, question: QuestionAnswerRecipe) -> str:
q = question.question.replace("?", "").lower().strip().split(" ")[9:]
return " ".join(q)
| 41.701613 | 104 | 0.677432 |
ba1cff26b4ce3d5d5d479638c22956a976d91b68
| 7,304 |
py
|
Python
|
selfdrive/car/mazda/carstate.py
|
betashepherd/dragonpilot
|
acced02e1b3c37df407530b79167795a9ddb416a
|
[
"MIT"
] | 1 |
2019-09-19T12:23:26.000Z
|
2019-09-19T12:23:26.000Z
|
selfdrive/car/mazda/carstate.py
|
hankteng19650323/dragonpilot
|
35f5828690d0e98eb605661354b50d59a8b190ba
|
[
"MIT"
] | null | null | null |
selfdrive/car/mazda/carstate.py
|
hankteng19650323/dragonpilot
|
35f5828690d0e98eb605661354b50d59a8b190ba
|
[
"MIT"
] | null | null | null |
from cereal import car
from selfdrive.config import Conversions as CV
from opendbc.can.can_define import CANDefine
from opendbc.can.parser import CANParser
from selfdrive.car.interfaces import CarStateBase
from selfdrive.car.mazda.values import DBC, LKAS_LIMITS, GEN1
class CarState(CarStateBase):
def __init__(self, CP):
super().__init__(CP)
can_define = CANDefine(DBC[CP.carFingerprint]["pt"])
self.shifter_values = can_define.dv["GEAR"]["GEAR"]
self.crz_btns_counter = 0
self.acc_active_last = False
self.low_speed_alert = False
self.lkas_allowed_speed = False
def update(self, cp, cp_cam):
ret = car.CarState.new_message()
ret.wheelSpeeds = self.get_wheel_speeds(
cp.vl["WHEEL_SPEEDS"]["FL"],
cp.vl["WHEEL_SPEEDS"]["FR"],
cp.vl["WHEEL_SPEEDS"]["RL"],
cp.vl["WHEEL_SPEEDS"]["RR"],
)
ret.vEgoRaw = (ret.wheelSpeeds.fl + ret.wheelSpeeds.fr + ret.wheelSpeeds.rl + ret.wheelSpeeds.rr) / 4.
ret.vEgo, ret.aEgo = self.update_speed_kf(ret.vEgoRaw)
# Match panda speed reading
speed_kph = cp.vl["ENGINE_DATA"]["SPEED"]
ret.standstill = speed_kph < .1
can_gear = int(cp.vl["GEAR"]["GEAR"])
ret.gearShifter = self.parse_gear_shifter(self.shifter_values.get(can_gear, None))
ret.genericToggle = bool(cp.vl["BLINK_INFO"]["HIGH_BEAMS"])
ret.leftBlindspot = cp.vl["BSM"]["LEFT_BS1"] == 1
ret.rightBlindspot = cp.vl["BSM"]["RIGHT_BS1"] == 1
ret.leftBlinker, ret.rightBlinker = self.update_blinker_from_lamp(40, cp.vl["BLINK_INFO"]["LEFT_BLINK"] == 1,
cp.vl["BLINK_INFO"]["RIGHT_BLINK"] == 1)
ret.steeringAngleDeg = cp.vl["STEER"]["STEER_ANGLE"]
ret.steeringTorque = cp.vl["STEER_TORQUE"]["STEER_TORQUE_SENSOR"]
ret.steeringPressed = abs(ret.steeringTorque) > LKAS_LIMITS.STEER_THRESHOLD
ret.steeringTorqueEps = cp.vl["STEER_TORQUE"]["STEER_TORQUE_MOTOR"]
ret.steeringRateDeg = cp.vl["STEER_RATE"]["STEER_ANGLE_RATE"]
# TODO: this should be from 0 - 1.
ret.brakePressed = cp.vl["PEDALS"]["BRAKE_ON"] == 1
ret.brake = cp.vl["BRAKE"]["BRAKE_PRESSURE"]
ret.seatbeltUnlatched = cp.vl["SEATBELT"]["DRIVER_SEATBELT"] == 0
ret.doorOpen = any([cp.vl["DOORS"]["FL"], cp.vl["DOORS"]["FR"],
cp.vl["DOORS"]["BL"], cp.vl["DOORS"]["BR"]])
# TODO: this should be from 0 - 1.
ret.gas = cp.vl["ENGINE_DATA"]["PEDAL_GAS"]
ret.gasPressed = ret.gas > 0
# Either due to low speed or hands off
lkas_blocked = cp.vl["STEER_RATE"]["LKAS_BLOCK"] == 1
# LKAS is enabled at 52kph going up and disabled at 45kph going down
# wait for LKAS_BLOCK signal to clear when going up since it lags behind the speed sometimes
if speed_kph > LKAS_LIMITS.ENABLE_SPEED and not lkas_blocked:
self.lkas_allowed_speed = True
elif speed_kph < LKAS_LIMITS.DISABLE_SPEED:
self.lkas_allowed_speed = False
# TODO: the signal used for available seems to be the adaptive cruise signal, instead of the main on
# it should be used for carState.cruiseState.nonAdaptive instead
ret.cruiseState.available = cp.vl["CRZ_CTRL"]["CRZ_AVAILABLE"] == 1
ret.cruiseState.enabled = cp.vl["CRZ_CTRL"]["CRZ_ACTIVE"] == 1
ret.cruiseState.speed = cp.vl["CRZ_EVENTS"]["CRZ_SPEED"] * CV.KPH_TO_MS
# dp
ret.cruiseActualEnabled = ret.cruiseState.enabled
ret.cruiseState.speed = self.cruise_speed
if ret.cruiseState.enabled:
if not self.lkas_allowed_speed and self.acc_active_last:
self.low_speed_alert = True
else:
self.low_speed_alert = False
# Check if LKAS is disabled due to lack of driver torque when all other states indicate
# it should be enabled (steer lockout). Don't warn until we actually get lkas active
# and lose it again, i.e, after initial lkas activation
ret.steerWarning = self.lkas_allowed_speed and lkas_blocked
self.acc_active_last = ret.cruiseState.enabled
self.cam_lkas = cp_cam.vl["CAM_LKAS"]
self.cam_laneinfo = cp_cam.vl["CAM_LANEINFO"]
self.crz_btns_counter = cp.vl["CRZ_BTNS"]["CTR"]
ret.steerError = cp_cam.vl["CAM_LKAS"]["ERR_BIT_1"] == 1
# dp - brake lights
ret.brakeLights = ret.brakePressed
return ret
@staticmethod
def get_can_parser(CP):
# this function generates lists for signal, messages and initial values
signals = [
# sig_name, sig_address, default
("LEFT_BLINK", "BLINK_INFO", 0),
("RIGHT_BLINK", "BLINK_INFO", 0),
("HIGH_BEAMS", "BLINK_INFO", 0),
("STEER_ANGLE", "STEER", 0),
("STEER_ANGLE_RATE", "STEER_RATE", 0),
("STEER_TORQUE_SENSOR", "STEER_TORQUE", 0),
("STEER_TORQUE_MOTOR", "STEER_TORQUE", 0),
("FL", "WHEEL_SPEEDS", 0),
("FR", "WHEEL_SPEEDS", 0),
("RL", "WHEEL_SPEEDS", 0),
("RR", "WHEEL_SPEEDS", 0),
]
checks = [
# sig_address, frequency
("BLINK_INFO", 10),
("STEER", 67),
("STEER_RATE", 83),
("STEER_TORQUE", 83),
("WHEEL_SPEEDS", 100),
]
if CP.carFingerprint in GEN1:
signals += [
("LKAS_BLOCK", "STEER_RATE", 0),
("LKAS_TRACK_STATE", "STEER_RATE", 0),
("HANDS_OFF_5_SECONDS", "STEER_RATE", 0),
("CRZ_ACTIVE", "CRZ_CTRL", 0),
("CRZ_AVAILABLE", "CRZ_CTRL", 0),
("CRZ_SPEED", "CRZ_EVENTS", 0),
("STANDSTILL", "PEDALS", 0),
("BRAKE_ON", "PEDALS", 0),
("BRAKE_PRESSURE", "BRAKE", 0),
("GEAR", "GEAR", 0),
("DRIVER_SEATBELT", "SEATBELT", 0),
("FL", "DOORS", 0),
("FR", "DOORS", 0),
("BL", "DOORS", 0),
("BR", "DOORS", 0),
("PEDAL_GAS", "ENGINE_DATA", 0),
("SPEED", "ENGINE_DATA", 0),
("CTR", "CRZ_BTNS", 0),
("LEFT_BS1", "BSM", 0),
("RIGHT_BS1", "BSM", 0),
]
checks += [
("ENGINE_DATA", 100),
("CRZ_CTRL", 50),
("CRZ_EVENTS", 50),
("CRZ_BTNS", 10),
("PEDALS", 50),
("BRAKE", 50),
("SEATBELT", 10),
("DOORS", 10),
("GEAR", 20),
("BSM", 10),
]
return CANParser(DBC[CP.carFingerprint]["pt"], signals, checks, 0)
@staticmethod
def get_cam_can_parser(CP):
signals = []
checks = []
if CP.carFingerprint in GEN1:
signals += [
# sig_name, sig_address, default
("LKAS_REQUEST", "CAM_LKAS", 0),
("CTR", "CAM_LKAS", 0),
("ERR_BIT_1", "CAM_LKAS", 0),
("LINE_NOT_VISIBLE", "CAM_LKAS", 0),
("BIT_1", "CAM_LKAS", 1),
("ERR_BIT_2", "CAM_LKAS", 0),
("STEERING_ANGLE", "CAM_LKAS", 0),
("ANGLE_ENABLED", "CAM_LKAS", 0),
("CHKSUM", "CAM_LKAS", 0),
("LINE_VISIBLE", "CAM_LANEINFO", 0),
("LINE_NOT_VISIBLE", "CAM_LANEINFO", 1),
("LANE_LINES", "CAM_LANEINFO", 0),
("BIT1", "CAM_LANEINFO", 0),
("BIT2", "CAM_LANEINFO", 0),
("BIT3", "CAM_LANEINFO", 0),
("NO_ERR_BIT", "CAM_LANEINFO", 1),
("S1", "CAM_LANEINFO", 0),
("S1_HBEAM", "CAM_LANEINFO", 0),
]
checks += [
# sig_address, frequency
("CAM_LANEINFO", 2),
("CAM_LKAS", 16),
]
return CANParser(DBC[CP.carFingerprint]["pt"], signals, checks, 2)
| 34.947368 | 113 | 0.605148 |
c1a2e46047da0fbd283fba50a2a63b617a0fad25
| 17,702 |
py
|
Python
|
tempest/cmd/javelin.py
|
rcbops-qe/tempest
|
88960aa32c473b64072671541a136dbae41b1d4c
|
[
"Apache-2.0"
] | null | null | null |
tempest/cmd/javelin.py
|
rcbops-qe/tempest
|
88960aa32c473b64072671541a136dbae41b1d4c
|
[
"Apache-2.0"
] | null | null | null |
tempest/cmd/javelin.py
|
rcbops-qe/tempest
|
88960aa32c473b64072671541a136dbae41b1d4c
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Javelin makes resources that should survive an upgrade.
Javelin is a tool for creating, verifying, and deleting a small set of
resources in a declarative way.
"""
import logging
import os
import sys
import unittest
import yaml
import argparse
import tempest.auth
from tempest import config
from tempest import exceptions
from tempest.services.compute.json import flavors_client
from tempest.services.compute.json import servers_client
from tempest.services.identity.json import identity_client
from tempest.services.image.v2.json import image_client
from tempest.services.object_storage import container_client
from tempest.services.object_storage import object_client
from tempest.services.volume.json import volumes_client
OPTS = {}
USERS = {}
RES = {}
LOG = None
class OSClient(object):
_creds = None
identity = None
servers = None
def __init__(self, user, pw, tenant):
_creds = tempest.auth.KeystoneV2Credentials(
username=user,
password=pw,
tenant_name=tenant)
_auth = tempest.auth.KeystoneV2AuthProvider(_creds)
self.identity = identity_client.IdentityClientJSON(_auth)
self.servers = servers_client.ServersClientJSON(_auth)
self.objects = object_client.ObjectClient(_auth)
self.containers = container_client.ContainerClient(_auth)
self.images = image_client.ImageClientV2JSON(_auth)
self.flavors = flavors_client.FlavorsClientJSON(_auth)
self.volumes = volumes_client.VolumesClientJSON(_auth)
def load_resources(fname):
"""Load the expected resources from a yaml flie."""
return yaml.load(open(fname, 'r'))
def keystone_admin():
return OSClient(OPTS.os_username, OPTS.os_password, OPTS.os_tenant_name)
def client_for_user(name):
LOG.debug("Entering client_for_user")
if name in USERS:
user = USERS[name]
LOG.debug("Created client for user %s" % user)
return OSClient(user['name'], user['pass'], user['tenant'])
else:
LOG.error("%s not found in USERS: %s" % (name, USERS))
###################
#
# TENANTS
#
###################
def create_tenants(tenants):
"""Create tenants from resource definition.
Don't create the tenants if they already exist.
"""
admin = keystone_admin()
_, body = admin.identity.list_tenants()
existing = [x['name'] for x in body]
for tenant in tenants:
if tenant not in existing:
admin.identity.create_tenant(tenant)
else:
LOG.warn("Tenant '%s' already exists in this environment" % tenant)
##############
#
# USERS
#
##############
def _users_for_tenant(users, tenant):
u_for_t = []
for user in users:
for n in user:
if user[n]['tenant'] == tenant:
u_for_t.append(user[n])
return u_for_t
def _tenants_from_users(users):
tenants = set()
for user in users:
for n in user:
tenants.add(user[n]['tenant'])
return tenants
def _assign_swift_role(user):
admin = keystone_admin()
resp, roles = admin.identity.list_roles()
role = next(r for r in roles if r['name'] == 'Member')
LOG.debug(USERS[user])
try:
admin.identity.assign_user_role(
USERS[user]['tenant_id'],
USERS[user]['id'],
role['id'])
except exceptions.Conflict:
# don't care if it's already assigned
pass
def create_users(users):
"""Create tenants from resource definition.
Don't create the tenants if they already exist.
"""
global USERS
LOG.info("Creating users")
admin = keystone_admin()
for u in users:
try:
tenant = admin.identity.get_tenant_by_name(u['tenant'])
except exceptions.NotFound:
LOG.error("Tenant: %s - not found" % u['tenant'])
continue
try:
admin.identity.get_user_by_username(tenant['id'], u['name'])
LOG.warn("User '%s' already exists in this environment"
% u['name'])
except exceptions.NotFound:
admin.identity.create_user(
u['name'], u['pass'], tenant['id'],
"%s@%s" % (u['name'], tenant['id']),
enabled=True)
def collect_users(users):
global USERS
LOG.info("Collecting users")
admin = keystone_admin()
for u in users:
tenant = admin.identity.get_tenant_by_name(u['tenant'])
u['tenant_id'] = tenant['id']
USERS[u['name']] = u
body = admin.identity.get_user_by_username(tenant['id'], u['name'])
USERS[u['name']]['id'] = body['id']
class JavelinCheck(unittest.TestCase):
def __init__(self, users, resources):
super(JavelinCheck, self).__init__()
self.users = users
self.res = resources
def runTest(self, *args):
pass
def check(self):
self.check_users()
self.check_objects()
self.check_servers()
# TODO(sdague): Volumes not yet working, bring it back once the
# code is self testing.
# self.check_volumes()
def check_users(self):
"""Check that the users we expect to exist, do.
We don't use the resource list for this because we need to validate
that things like tenantId didn't drift across versions.
"""
LOG.info("checking users")
for name, user in self.users.iteritems():
client = keystone_admin()
_, found = client.identity.get_user(user['id'])
self.assertEqual(found['name'], user['name'])
self.assertEqual(found['tenantId'], user['tenant_id'])
# also ensure we can auth with that user, and do something
# on the cloud. We don't care about the results except that it
# remains authorized.
client = client_for_user(user['name'])
resp, body = client.servers.list_servers()
self.assertEqual(resp['status'], '200')
def check_objects(self):
"""Check that the objects created are still there."""
if 'objects' not in self.res:
return
LOG.info("checking objects")
for obj in self.res['objects']:
client = client_for_user(obj['owner'])
r, contents = client.objects.get_object(
obj['container'], obj['name'])
source = _file_contents(obj['file'])
self.assertEqual(contents, source)
def check_servers(self):
"""Check that the servers are still up and running."""
if 'servers' not in self.res:
return
LOG.info("checking servers")
for server in self.res['servers']:
client = client_for_user(server['owner'])
found = _get_server_by_name(client, server['name'])
self.assertIsNotNone(
found,
"Couldn't find expected server %s" % server['name'])
r, found = client.servers.get_server(found['id'])
# get the ipv4 address
addr = found['addresses']['private'][0]['addr']
for count in range(60):
return_code = os.system("ping -c1 " + addr)
if return_code is 0:
break
self.assertNotEqual(count, 59,
"Server %s is not pingable at %s" % (
server['name'], addr))
def check_volumes(self):
"""Check that the volumes are still there and attached."""
if 'volumes' not in self.res:
return
LOG.info("checking volumes")
for volume in self.res['volumes']:
client = client_for_user(volume['owner'])
found = _get_volume_by_name(client, volume['name'])
self.assertIsNotNone(
found,
"Couldn't find expected volume %s" % volume['name'])
# Verify that a volume's attachment retrieved
server_id = _get_server_by_name(client, volume['server'])['id']
attachment = self.client.get_attachment_from_volume(volume)
self.assertEqual(volume['id'], attachment['volume_id'])
self.assertEqual(server_id, attachment['server_id'])
#######################
#
# OBJECTS
#
#######################
def _file_contents(fname):
with open(fname, 'r') as f:
return f.read()
def create_objects(objects):
if not objects:
return
LOG.info("Creating objects")
for obj in objects:
LOG.debug("Object %s" % obj)
_assign_swift_role(obj['owner'])
client = client_for_user(obj['owner'])
client.containers.create_container(obj['container'])
client.objects.create_object(
obj['container'], obj['name'],
_file_contents(obj['file']))
#######################
#
# IMAGES
#
#######################
def _resolve_image(image, imgtype):
name = image[imgtype]
fname = os.path.join(OPTS.devstack_base, image['imgdir'], name)
return name, fname
def create_images(images):
if not images:
return
LOG.info("Creating images")
for image in images:
client = client_for_user(image['owner'])
# only upload a new image if the name isn't there
r, body = client.images.image_list()
names = [x['name'] for x in body]
if image['name'] in names:
LOG.info("Image '%s' already exists" % image['name'])
continue
# special handling for 3 part image
extras = {}
if image['format'] == 'ami':
name, fname = _resolve_image(image, 'aki')
r, aki = client.images.create_image(
'javelin_' + name, 'aki', 'aki')
client.images.store_image(aki.get('id'), open(fname, 'r'))
extras['kernel_id'] = aki.get('id')
name, fname = _resolve_image(image, 'ari')
r, ari = client.images.create_image(
'javelin_' + name, 'ari', 'ari')
client.images.store_image(ari.get('id'), open(fname, 'r'))
extras['ramdisk_id'] = ari.get('id')
_, fname = _resolve_image(image, 'file')
r, body = client.images.create_image(
image['name'], image['format'], image['format'], **extras)
image_id = body.get('id')
client.images.store_image(image_id, open(fname, 'r'))
#######################
#
# SERVERS
#
#######################
def _get_server_by_name(client, name):
r, body = client.servers.list_servers()
for server in body['servers']:
if name == server['name']:
return server
return None
def _get_image_by_name(client, name):
r, body = client.images.image_list()
for image in body:
if name == image['name']:
return image
return None
def _get_flavor_by_name(client, name):
r, body = client.flavors.list_flavors()
for flavor in body:
if name == flavor['name']:
return flavor
return None
def create_servers(servers):
if not servers:
return
LOG.info("Creating servers")
for server in servers:
client = client_for_user(server['owner'])
if _get_server_by_name(client, server['name']):
LOG.info("Server '%s' already exists" % server['name'])
continue
image_id = _get_image_by_name(client, server['image'])['id']
flavor_id = _get_flavor_by_name(client, server['flavor'])['id']
resp, body = client.servers.create_server(server['name'], image_id,
flavor_id)
server_id = body['id']
client.servers.wait_for_server_status(server_id, 'ACTIVE')
def destroy_servers(servers):
if not servers:
return
LOG.info("Destroying servers")
for server in servers:
client = client_for_user(server['owner'])
response = _get_server_by_name(client, server['name'])
if not response:
LOG.info("Server '%s' does not exist" % server['name'])
continue
client.servers.delete_server(response['id'])
client.servers.wait_for_server_termination(response['id'],
ignore_error=True)
#######################
#
# VOLUMES
#
#######################
def _get_volume_by_name(client, name):
r, body = client.volumes.list_volumes()
for volume in body['volumes']:
if name == volume['name']:
return volume
return None
def create_volumes(volumes):
for volume in volumes:
client = client_for_user(volume['owner'])
# only create a volume if the name isn't here
r, body = client.volumes.list_volumes()
if any(item['name'] == volume['name'] for item in body):
continue
client.volumes.create_volume(volume['name'], volume['size'])
def attach_volumes(volumes):
for volume in volumes:
client = client_for_user(volume['owner'])
server_id = _get_server_by_name(client, volume['server'])['id']
client.volumes.attach_volume(volume['name'], server_id)
#######################
#
# MAIN LOGIC
#
#######################
def create_resources():
LOG.info("Creating Resources")
# first create keystone level resources, and we need to be admin
# for those.
create_tenants(RES['tenants'])
create_users(RES['users'])
collect_users(RES['users'])
# next create resources in a well known order
create_objects(RES['objects'])
create_images(RES['images'])
create_servers(RES['servers'])
# TODO(sdague): volumes definition doesn't work yet, bring it
# back once we're actually executing the code
# create_volumes(RES['volumes'])
# attach_volumes(RES['volumes'])
def destroy_resources():
LOG.info("Destroying Resources")
# Destroy in inverse order of create
# Future
# detach_volumes
# destroy_volumes
destroy_servers(RES['servers'])
LOG.warn("Destroy mode incomplete")
# destroy_images
# destroy_objects
# destroy_users
# destroy_tenants
def get_options():
global OPTS
parser = argparse.ArgumentParser(
description='Create and validate a fixed set of OpenStack resources')
parser.add_argument('-m', '--mode',
metavar='<create|check|destroy>',
required=True,
help=('One of (create, check, destroy)'))
parser.add_argument('-r', '--resources',
required=True,
metavar='resourcefile.yaml',
help='Resources definition yaml file')
parser.add_argument(
'-d', '--devstack-base',
required=True,
metavar='/opt/stack/old',
help='Devstack base directory for retrieving artifacts')
parser.add_argument(
'-c', '--config-file',
metavar='/etc/tempest.conf',
help='path to javelin2(tempest) config file')
# auth bits, letting us also just source the devstack openrc
parser.add_argument('--os-username',
metavar='<auth-user-name>',
default=os.environ.get('OS_USERNAME'),
help=('Defaults to env[OS_USERNAME].'))
parser.add_argument('--os-password',
metavar='<auth-password>',
default=os.environ.get('OS_PASSWORD'),
help=('Defaults to env[OS_PASSWORD].'))
parser.add_argument('--os-tenant-name',
metavar='<auth-tenant-name>',
default=os.environ.get('OS_TENANT_NAME'),
help=('Defaults to env[OS_TENANT_NAME].'))
OPTS = parser.parse_args()
if OPTS.mode not in ('create', 'check', 'destroy'):
print("ERROR: Unknown mode -m %s\n" % OPTS.mode)
parser.print_help()
sys.exit(1)
if OPTS.config_file:
config.CONF.set_config_path(OPTS.config_file)
def setup_logging(debug=True):
global LOG
LOG = logging.getLogger(__name__)
if debug:
LOG.setLevel(logging.DEBUG)
else:
LOG.setLevel(logging.INFO)
ch = logging.StreamHandler(sys.stdout)
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter(
datefmt='%Y-%m-%d %H:%M:%S',
fmt='%(asctime)s.%(msecs).03d - %(levelname)s - %(message)s')
ch.setFormatter(formatter)
LOG.addHandler(ch)
def main():
global RES
get_options()
setup_logging()
RES = load_resources(OPTS.resources)
if OPTS.mode == 'create':
create_resources()
# Make sure the resources we just created actually work
checker = JavelinCheck(USERS, RES)
checker.check()
elif OPTS.mode == 'check':
collect_users(RES['users'])
checker = JavelinCheck(USERS, RES)
checker.check()
elif OPTS.mode == 'destroy':
collect_users(RES['users'])
destroy_resources()
else:
LOG.error('Unknown mode %s' % OPTS.mode)
return 1
LOG.info('javelin2 successfully finished')
return 0
if __name__ == "__main__":
sys.exit(main())
| 30.626298 | 79 | 0.597842 |
774742d51af269958fe7708f8a371648a2f170d4
| 4,637 |
py
|
Python
|
avalon/tools/loader/lib.py
|
simonebarbieri/avalon-core
|
cfd4191e364b47de7364096f45d9d9d9a901692a
|
[
"MIT"
] | null | null | null |
avalon/tools/loader/lib.py
|
simonebarbieri/avalon-core
|
cfd4191e364b47de7364096f45d9d9d9a901692a
|
[
"MIT"
] | null | null | null |
avalon/tools/loader/lib.py
|
simonebarbieri/avalon-core
|
cfd4191e364b47de7364096f45d9d9d9a901692a
|
[
"MIT"
] | null | null | null |
from ...vendor.Qt import QtGui
from ...vendor import qtawesome
from ..widgets import OptionalAction, OptionDialog
import inspect
def change_visibility(model, view, column_name, visible):
"""
Hides or shows particular 'column_name'.
"asset" and "subset" columns should be visible only in multiselect
"""
index = model.Columns.index(column_name)
view.setColumnHidden(index, not visible)
def get_selected_items(rows, item_role):
items = []
for row_index in rows:
item = row_index.data(item_role)
if item.get("isGroup"):
continue
elif item.get("isMerged"):
for idx in range(row_index.model().rowCount(row_index)):
child_index = row_index.child(idx, 0)
item = child_index.data(item_role)
if item not in items:
items.append(item)
else:
if item not in items:
items.append(item)
return items
def get_options(action, loader, parent):
# Pop option dialog
options = {}
if getattr(action, "optioned", False):
dialog = OptionDialog(parent)
dialog.setWindowTitle(action.label + " Options")
dialog.create(loader.options)
if not dialog.exec_():
return
# Get option
options = dialog.parse()
return options
def add_representation_loaders_to_menu(loaders, menu):
"""
Loops through provider loaders and adds them to 'menu'.
Expects loaders sorted in requested order.
Expects loaders de-duplicated if wanted.
Args:
loaders(tuple): representation - loader
menu (OptionalMenu):
Returns:
menu (OptionalMenu): with new items
"""
# List the available loaders
for representation, loader in loaders:
label = None
if representation.get("custom_label"):
label = representation.get("custom_label")
if not label:
label = get_label_from_loader(loader, representation)
icon = get_icon_from_loader(loader)
# Optional action
use_option = hasattr(loader, "options")
action = OptionalAction(label, icon, use_option, menu)
if use_option:
# Add option box tip
action.set_option_tip(loader.options)
action.setData((representation, loader))
# Add tooltip and statustip from Loader docstring
tip = inspect.getdoc(loader)
if tip:
action.setToolTip(tip)
action.setStatusTip(tip)
menu.addAction(action)
return menu
def remove_tool_name_from_loaders(available_loaders, tool_name):
for loader in available_loaders:
if hasattr(loader, "tool_names"):
if not ("*" in loader.tool_names or
tool_name in loader.tool_names):
available_loaders.remove(loader)
return available_loaders
def get_icon_from_loader(loader):
"""Pull icon info from loader class"""
# Support font-awesome icons using the `.icon` and `.color`
# attributes on plug-ins.
icon = getattr(loader, "icon", None)
if icon is not None:
try:
key = "fa.{0}".format(icon)
color = getattr(loader, "color", "white")
icon = qtawesome.icon(key, color=color)
except Exception as e:
print("Unable to set icon for loader "
"{}: {}".format(loader, e))
icon = None
return icon
def get_label_from_loader(loader, representation=None):
"""Pull label info from loader class"""
label = getattr(loader, "label", None)
if label is None:
label = loader.__name__
if representation:
# Add the representation as suffix
label = "{0} ({1})".format(label, representation['name'])
return label
def get_no_loader_action(menu, one_item_selected=False):
"""Creates dummy no loader option in 'menu'"""
submsg = "your selection."
if one_item_selected:
submsg = "this version."
msg = "No compatible loaders for {}".format(submsg)
print(msg)
icon = qtawesome.icon(
"fa.exclamation",
color=QtGui.QColor(255, 51, 0)
)
action = OptionalAction(("*" + msg), icon, False, menu)
return action
def sort_loaders(loaders, custom_sorter=None):
def sorter(value):
"""Sort the Loaders by their order and then their name"""
Plugin = value[1]
return Plugin.order, Plugin.__name__
if not custom_sorter:
custom_sorter = sorter
return sorted(loaders, key=custom_sorter)
| 28.801242 | 74 | 0.617856 |
865567f415576622bf7d49c2e9a6a66c1e0f6f96
| 3,929 |
py
|
Python
|
app/recipe/tests/test_tags_api.py
|
Kelvin-Zhong/recipe-app-api
|
c6b60294bf8c5b132d0165a128f883f3f2e54adf
|
[
"MIT"
] | null | null | null |
app/recipe/tests/test_tags_api.py
|
Kelvin-Zhong/recipe-app-api
|
c6b60294bf8c5b132d0165a128f883f3f2e54adf
|
[
"MIT"
] | null | null | null |
app/recipe/tests/test_tags_api.py
|
Kelvin-Zhong/recipe-app-api
|
c6b60294bf8c5b132d0165a128f883f3f2e54adf
|
[
"MIT"
] | null | null | null |
from django.contrib.auth import get_user_model
from django.urls import reverse
from django.test import TestCase
from rest_framework import status
from rest_framework.test import APIClient
from core.models import Tag, Recipe
from recipe.serializers import TagSerializer
TAGS_URL = reverse('recipe:tag-list')
class PublicTagsApiTests(TestCase):
"""Test the publicly available tags API"""
def setUp(self):
self.client = APIClient()
def test_login_required(self):
"""Test that login required for retrieving tags"""
res = self.client.get(TAGS_URL)
self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)
class PrivateTagsApiTests(TestCase):
"""Test the authorized user tags API"""
def setUp(self):
self.user = get_user_model().objects.create_user(
'[email protected]',
'password'
)
self.client = APIClient()
self.client.force_authenticate(self.user)
def test_retrieve_tags(self):
"""Test retrieving tags"""
Tag.objects.create(user=self.user, name='Vegan')
Tag.objects.create(user=self.user, name='Dessert')
res = self.client.get(TAGS_URL)
tags = Tag.objects.all().order_by('-name')
serializer = TagSerializer(tags, many=True)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(res.data, serializer.data)
def test_tags_limited_to_user(self):
"""Test that tags returned are for authenticated user"""
user2 = get_user_model().objects.create_user(
'[email protected]',
'testpass'
)
Tag.objects.create(user=user2, name='Fruity')
tag = Tag.objects.create(user=self.user, name='Comfort Food')
res = self.client.get(TAGS_URL)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(len(res.data), 1)
self.assertEqual(res.data[0]['name'], tag.name)
def test_create_tag_successful(self):
"""Test creating a new tag"""
payload = {'name': 'Simple'}
self.client.post(TAGS_URL, payload)
exists = Tag.objects.filter(
user=self.user,
name=payload['name']
).exists()
self.assertTrue(exists)
def test_create_tag_invalid(self):
"""Test creating a new tag with invalid payload"""
payload = {'name': ''}
res = self.client.post(TAGS_URL, payload)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
def test_retrieve_tags_assigned_to_recipes(self):
"""Test filtering tags by those assigned to recipes"""
tag1 = Tag.objects.create(user=self.user, name='Breakfast')
tag2 = Tag.objects.create(user=self.user, name='Lunch')
recipe = Recipe.objects.create(
title='Coriander eggs on toast',
time_minutes=10,
price=5.00,
user=self.user,
)
recipe.tags.add(tag1)
res = self.client.get(TAGS_URL, {'assigned_only': '1'})
serializer1 = TagSerializer(tag1)
serializer2 = TagSerializer(tag2)
self.assertIn(serializer1.data, res.data)
self.assertNotIn(serializer2.data, res.data)
def test_retrieve_tags_assigned_unique(self):
"""Test filtering tags by assigned returns unique items"""
tag = Tag.objects.create(user=self.user, name='Breakfast')
recipe1 = Recipe.objects.create(
title='Pancakes',
time_minutes=5,
price=3.00,
user=self.user
)
recipe1.tags.add(tag)
recipe2 = Recipe.objects.create(
title='Porridge',
time_minutes=3,
price=2.00,
user=self.user
)
recipe2.tags.add(tag)
res = self.client.get(TAGS_URL, {'assigned_only': '1'})
self.assertEqual(len(res.data), 1)
| 31.432 | 71 | 0.630949 |
84a5ea7b32b7c021f9b416aa84ad479e350598a7
| 10,959 |
py
|
Python
|
tests/conftest.py
|
skellet0r/eth-event
|
f1e3916ef0c8d4019420bfeec19b019484c5af32
|
[
"MIT"
] | 35 |
2019-01-06T00:47:09.000Z
|
2022-03-26T03:58:24.000Z
|
tests/conftest.py
|
skellet0r/eth-event
|
f1e3916ef0c8d4019420bfeec19b019484c5af32
|
[
"MIT"
] | 10 |
2019-01-07T00:17:19.000Z
|
2021-06-24T12:56:46.000Z
|
tests/conftest.py
|
skellet0r/eth-event
|
f1e3916ef0c8d4019420bfeec19b019484c5af32
|
[
"MIT"
] | 5 |
2021-02-12T03:10:36.000Z
|
2022-01-11T13:25:16.000Z
|
#!/usr/bin/python3
import pytest
from hexbytes import HexBytes
from eth_event import get_topic_map
# missing 'data' and 'topics'
BASE_LOG = {
"logIndex": 0,
"transactionIndex": 0,
"transactionHash": HexBytes(
"0x9df54439626e5b7fce5ae2f02af47d86535bedaf533403204fcb76ba12eef21c"
), # NOQA: E501
"blockHash": HexBytes("0xaae58fedb68b648857a24b4a29f3e7f2a905d5098a912562f7ddb895e129b087"),
"blockNumber": 2,
"address": "0x3194cBDC3dbcd3E11a07892e7bA5c3394048Cc87",
"type": "mined",
}
LOGS = [
( # BasicTypesEvent
"0x000000000000000000000000000000000000000000000000000000000000000cffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffea00000000000000000000000066ab6d9362d4f35596279692f0251db635165871000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000deadbeef", # NOQA: E501
[HexBytes("0x8be90ba92d3b46c912717b5514ae2cfde5e9acbb5980c2ec5ea937d7586d82ed")],
),
( # ComplexTypesEvent
"0x00000000000000000000000000000000000000000000000000000000000000400000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000001b6920616d206120737472696e6721207375636820696d7072657373000000000000000000000000000000000000000000000000000000000000000000000000081234567890abcdef000000000000000000000000000000000000000000000000", # NOQA: E501
[HexBytes("0x34dee2aae457a1f92adebb1c2acc5ea1acfb088b578a4974c114e8082bf6500f")],
),
( # FixedLengthArrayEvent
"0x0000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000030000000000000000000000000000000000000000000000000000000000000004ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffd6000000000000000000000000000000000000000000000000000000000000002a00000000000000000000000066ab6d9362d4f35596279692f0251db635165871000000000000000000000000000000000000000000000000000000000000000000000000000000000000000066ab6d9362d4f35596279692f0251db6351658710000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000deadbeef00000000000000000000000000000000000000000000000000000000069faded", # NOQA: E501
[HexBytes("0x317cac24adbc0db1e065b9fe727c569313ba08d3e641d73a55955da25c10f1b9")],
),
( # DynamicArrayEvent
"0x00000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000014000000000000000000000000000000000000000000000000000000000000001a000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000000260000000000000000000000000000000000000000000000000000000000000000400000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000002ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffd6000000000000000000000000000000000000000000000000000000000000002a000000000000000000000000000000000000000000000000000000000000000200000000000000000000000066ab6d9362d4f35596279692f0251db6351658710000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000deadbeef", # NOQA: E501
[HexBytes("0x15bf5b2fd85b349c3ba8e0687fef3f75d19530656850746a30caed288a9d834b")],
),
( # StructEvent
"0x0000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000400000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000000d6e6f407468616e6b732e636f6d00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000f2b3120353535203535352d313233340000000000000000000000000000000000000000000000000000000000000000000000000000000000000000006173646600000000000000000000000033a4622b82d4c04a53e170c638b944ce27cffce3000000000000000000000000000000000000000000000000000000000000006000000000000000000000000000000000000000000000000000000000000000400000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000000d6e6f407468616e6b732e636f6d00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000f2b3120353535203535352d313233340000000000000000000000000000000000", # NOQA: E501
[HexBytes("0x2879c8a0baaa8a22a224aed7d8635b7d8e760d16c3f082cd5eba35e9775ab8fc")],
),
( # IndexedEvent
"0x000000000000000000000000000000000000000000000000000000000000123400000000000000000000000066ab6d9362d4f35596279692f0251db635165871", # NOQA: E501
[
HexBytes("0x7e4de51bd76e0680c76e06c0d5694cb33ce2f8c99b62ba846409bce9014638e0"),
HexBytes("0x6e12a6379ea806efe7913a2e70ca6b83ef6d457210264b417f34e79bf5a4e2e9"),
HexBytes("0x0000000000000000000000000000000000000000000000000000000000000666"),
], # NOQA: E501
),
]
@pytest.fixture(scope="session")
def abi():
return [
{
"name": "BasicTypesEvent",
"type": "event",
"anonymous": False,
"inputs": [
{"indexed": False, "name": "a", "type": "uint256"},
{"indexed": False, "name": "b", "type": "int128"},
{"indexed": False, "name": "c", "type": "address"},
{"indexed": False, "name": "d", "type": "bool"},
{"indexed": False, "name": "e", "type": "bytes32"},
],
},
{
"name": "ComplexTypesEvent",
"type": "event",
"anonymous": False,
"inputs": [
{"indexed": False, "name": "a", "type": "string"},
{"indexed": False, "name": "b", "type": "bytes"},
],
},
{
"name": "FixedLengthArrayEvent",
"type": "event",
"anonymous": False,
"inputs": [
{"indexed": False, "name": "a", "type": "uint64[4]"},
{"indexed": False, "name": "b", "type": "int128[2]"},
{"indexed": False, "name": "c", "type": "address[3]"},
{"indexed": False, "name": "d", "type": "bool[2]"},
{"indexed": False, "name": "e", "type": "bytes32[2]"},
],
},
{
"name": "DynamicArrayEvent",
"type": "event",
"anonymous": False,
"inputs": [
{"indexed": False, "name": "a", "type": "uint64[]"},
{"indexed": False, "name": "b", "type": "int128[]"},
{"indexed": False, "name": "c", "type": "address[]"},
{"indexed": False, "name": "d", "type": "bool[]"},
{"indexed": False, "name": "e", "type": "bytes32[]"},
],
},
{
"name": "StructEvent",
"type": "event",
"anonymous": False,
"inputs": [
{
"name": "a",
"type": "tuple",
"indexed": False,
"components": [
{"name": "email", "type": "string"},
{"name": "phone", "type": "string"},
],
},
{
"name": "b",
"type": "tuple",
"indexed": False,
"components": [
{"name": "name", "type": "bytes32"},
{"name": "addr", "type": "address"},
{
"name": "contact",
"type": "tuple",
"components": [
{"name": "email", "type": "string"},
{"name": "phone", "type": "string"},
],
},
],
},
],
},
{
"name": "IndexedEvent",
"type": "event",
"anonymous": False,
"inputs": [
{"indexed": False, "name": "a", "type": "bytes32"},
{"indexed": True, "name": "b", "type": "bytes32[2]"},
{"indexed": True, "name": "c", "type": "bytes32"},
{"indexed": False, "name": "d", "type": "address"},
],
},
{
"name": "AnonymousEventA",
"type": "event",
"anonymous": True,
"inputs": [{"indexed": False, "name": "a", "type": "address"}],
},
{
"name": "AnonymousEventB",
"type": "event",
"anonymous": True,
"inputs": [
{"indexed": False, "name": "a", "type": "bytes32"},
{"indexed": False, "name": "b", "type": "uint256"},
],
},
]
@pytest.fixture(scope="session")
def topic_map(abi):
return get_topic_map(abi)
# auto-parametrize the log fixture with all expected-passing logs
def pytest_generate_tests(metafunc):
log_params = []
for data, topics in LOGS:
log = BASE_LOG.copy()
log["data"] = data
log["topics"] = topics
log_params.append(log)
if "log" in metafunc.fixturenames:
metafunc.parametrize("log", log_params)
@pytest.fixture
def complex_log():
log = BASE_LOG.copy()
log["data"] = LOGS[1][0]
log["topics"] = LOGS[1][1]
return log
@pytest.fixture
def indexed_log():
log = BASE_LOG.copy()
log["data"] = LOGS[5][0]
log["topics"] = LOGS[5][1]
return log
@pytest.fixture(scope="session")
def anon_a_log():
log = BASE_LOG.copy()
log["data"] = "0x00000000000000000000000066ab6d9362d4f35596279692f0251db635165871" # NOQA: E501
log["topics"] = []
return log
@pytest.fixture(scope="session")
def anon_b_log():
log = BASE_LOG.copy()
log[
"data"
] = "0x0000000000000000000000000000000000000000000000000000000000012345000000000000000000000000000000000000000000000000000000000000002a" # NOQA: E501
log["topics"] = []
return log
| 51.693396 | 1,371 | 0.671503 |
2a11d5f5bd87710d5368aea5f949deefa661a779
| 660 |
py
|
Python
|
gpMgmt/bin/gppylib/system/osImplNative.py
|
henglabs/gpdb
|
09a8cc05ac90d63c64c6d432ca35179b55a461b2
|
[
"PostgreSQL",
"Apache-2.0"
] | null | null | null |
gpMgmt/bin/gppylib/system/osImplNative.py
|
henglabs/gpdb
|
09a8cc05ac90d63c64c6d432ca35179b55a461b2
|
[
"PostgreSQL",
"Apache-2.0"
] | 6 |
2018-08-04T07:51:37.000Z
|
2018-11-26T07:09:44.000Z
|
gpMgmt/bin/gppylib/system/osImplNative.py
|
henglabs/gpdb
|
09a8cc05ac90d63c64c6d432ca35179b55a461b2
|
[
"PostgreSQL",
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python2
#
# Copyright (c) Greenplum Inc 2009. All Rights Reserved.
#
"""
This file defines the interface that can be used to
fetch and update system configuration information,
as well as the data object returned by the
"""
import os, time
from gppylib.gplog import *
from gppylib.utils import checkNotNone
from gppylib.system.osInterface import GpOsProvider
logger = get_default_logger()
#
# An implementation of GpOsProvider that passes operations through to the underlying
# system
#
class GpOsProviderUsingNative(GpOsProvider) :
def __init__(self):
pass
def sleep(self, sleepTime):
time.sleep(sleepTime)
| 21.290323 | 84 | 0.74697 |
f4a10af627aaa466e3e0c8956c6dc66258f4ec71
| 542 |
py
|
Python
|
urllib/request/demo8.py
|
silianpan/seal-spider-demo
|
23bf013d08f9edaf23823bc3787f579bccd0ec3a
|
[
"Apache-2.0"
] | null | null | null |
urllib/request/demo8.py
|
silianpan/seal-spider-demo
|
23bf013d08f9edaf23823bc3787f579bccd0ec3a
|
[
"Apache-2.0"
] | 3 |
2021-09-08T01:11:16.000Z
|
2022-03-02T15:14:03.000Z
|
urllib/request/demo8.py
|
silianpan/seal-spider-demo
|
23bf013d08f9edaf23823bc3787f579bccd0ec3a
|
[
"Apache-2.0"
] | 1 |
2019-08-04T09:57:29.000Z
|
2019-08-04T09:57:29.000Z
|
from urllib import request, parse
url = 'http://httpbin.org/post'
headers = {
'User-Agent': 'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)',
'Host': 'httpbin.org'
}
dict = {
'name': 'Germey'
}
data = bytes(parse.urlencode(dict), encoding='utf8')
req = request.Request(url=url, data=data, headers=headers, method='POST')
response = request.urlopen(req)
print(response.read().decode('utf-8'))
req = request.Request(url=url, data=data, method='POST')
req.add_header('User-Agent', 'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)')
| 30.111111 | 78 | 0.682657 |
98bcc1694fb7208dc00898917cb83553e0aa569a
| 1,486 |
py
|
Python
|
bin/covmat/compute_rr_pairs.py
|
mclaughlin6464/pearce
|
746f2bf4bf45e904d66996e003043661a01423ba
|
[
"MIT"
] | null | null | null |
bin/covmat/compute_rr_pairs.py
|
mclaughlin6464/pearce
|
746f2bf4bf45e904d66996e003043661a01423ba
|
[
"MIT"
] | 16 |
2016-11-04T22:24:32.000Z
|
2018-05-01T22:53:39.000Z
|
bin/covmat/compute_rr_pairs.py
|
mclaughlin6464/pearce
|
746f2bf4bf45e904d66996e003043661a01423ba
|
[
"MIT"
] | 3 |
2016-10-04T08:07:52.000Z
|
2019-05-03T23:50:01.000Z
|
from halotools.mock_observables.pair_counters import npairs_jackknife_3d
from halotools.mock_observables.catalog_analysis_helpers import cuboid_subvolume_labels
import yaml
from pearce.mocks.kittens import TestBox
import numpy as np
config_fname = '/home/users/swmclau2/Git/pearce/bin/trainer/xi_cosmo_trainer.yaml'
with open(config_fname, 'r') as ymlfile:
cfg = yaml.load(ymlfile)
min_ptcl = int(cfg['HOD']['min_ptcl'])
r_bins = np.array(cfg['observation']['bins'] ).astype(float)
def compute_RR(cat, rbins, n_rands= 5, n_sub = 5, n_cores = 16):
n_cores = cat._check_cores(n_cores)
#pos_m = return_xyz_formatted_array(x_m, y_m, z_m, period=cat.Lbox)
rbins = np.array(rbins)
randoms = np.random.random((cat.halocat.ptcl_table['x'].shape[0] * n_rands, 3)) * cat.Lbox / cat.h # Solution to NaNs: Just fuck me up with randoms
print randoms.shape
j_index_randoms, N_sub_vol = cuboid_subvolume_labels(randoms, n_sub, cat.Lbox/cat.h)
print j_index_randoms.shape
print N_sub_vol
RR = npairs_jackknife_3d(randoms, randoms, rbins, period=cat.Lbox/cat.h,
jtags1=j_index_randoms, jtags2=j_index_randoms,
N_samples=N_sub_vol, num_threads=n_cores)
RR = np.diff(RR, axis=1)
return RR
cat = TestBox(boxno = 0, realization = 0, system = 'sherlock')
cat.load(1.0, HOD = str('zheng07'), particles = True, downsample_factor = 1e-2)
RR = compute_RR(cat, r_bins, n_rands = 1)
np.savetxt('RR.npy', RR)
| 35.380952 | 152 | 0.716689 |
4a9b0a3dcbefeeb1caf8845d1832eb3f61252610
| 3,425 |
py
|
Python
|
pygeodiff/tests/testutils.py
|
RichardScottOZ/geodiff
|
485409147008bf500d33a1792ce4bf9799cee844
|
[
"MIT"
] | null | null | null |
pygeodiff/tests/testutils.py
|
RichardScottOZ/geodiff
|
485409147008bf500d33a1792ce4bf9799cee844
|
[
"MIT"
] | null | null | null |
pygeodiff/tests/testutils.py
|
RichardScottOZ/geodiff
|
485409147008bf500d33a1792ce4bf9799cee844
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
'''
:copyright: (c) 2019 Peter Petrik
:license: MIT, see LICENSE for more details.
'''
import unittest
import os
import tempfile
import pygeodiff
import json
import shutil
class TestError(Exception):
pass
REFDIF = os.path.dirname(os.path.realpath(__file__))
def testdir():
return os.path.join(REFDIF, os.pardir, os.pardir, "geodiff", "tests", "testdata")
def tmpdir():
return tempfile.gettempdir()
def create_dir(testname):
if os.path.exists(tmpdir() + "/py" + testname):
shutil.rmtree(tmpdir() + "/py" + testname)
os.makedirs(tmpdir() + "/py" + testname)
def check_nchanges(geodiff, changeset, expected_number_of_changes ):
# test has_changes
has_changes = geodiff.has_changes(changeset)
if expected_number_of_changes == 0 and has_changes:
raise TestError("expected no changes")
if expected_number_of_changes != 0 and not has_changes:
raise TestError("expected changes")
# test changes_count API
nchanges = geodiff.changes_count( changeset )
if nchanges != expected_number_of_changes:
raise TestError( "expecting {} changes, found {}".format(expected_number_of_changes, nchanges))
def is_valid_json(stream):
try:
json.loads(stream)
print(stream)
except json.decoder.JSONDecodeError as e:
raise TestError("JSON:\n " + stream + "\n is not valid :\n" + str(e))
def _test_json(function, changeset, json, expect_success ):
try:
function(changeset, json)
if not expect_success:
raise TestError("json generation succeeded, but should have failed")
except:
if expect_success:
raise TestError("json generation failed")
if expect_success and not os.path.exists(json):
raise TestError("missing generated JSON file")
if os.path.exists(json):
with open(json, 'r') as fin:
data = fin.read()
is_valid_json(data)
def test_json(geodiff, changeset, json, expect_success ):
print("check export to JSON summary")
_test_json(geodiff.list_changes_summary, changeset, json, expect_success)
print("check export to JSON ")
_test_json(geodiff.list_changes, changeset, json, expect_success)
def compare_json(json, expected_json):
print ("comparing JSON to " + expected_json)
if not os.path.exists(json):
raise TestError("missing generated JSON file")
with open(json, 'r') as fin:
json_generated = fin.read()
with open(expected_json, 'r') as fin:
json_expected = fin.read()
if json_generated.strip() != json_expected.strip():
print("---- JSON GENERATED ----")
print(json_generated)
print("---- JSON EXPECTED ----")
print(json_expected)
raise TestError("JSON generated is different from expected")
def logger(level, rawString):
msg = rawString.decode('utf-8')
print( "GEODIFFTEST: " + str(level) + " " + msg )
class GeoDiffTests(unittest.TestCase):
def setUp(self):
# load lib
lib = os.environ.get("GEODIFFLIB", None)
if lib is None:
raise TestError("missing GEODIFFLIB env variable")
if not os.path.exists(lib):
raise TestError("lib {} is missing ".format(lib))
self.geodiff = pygeodiff.GeoDiff(lib)
self.geodiff.set_logger_callback(logger)
self.geodiff.set_maximum_logger_level(pygeodiff.GeoDiff.LevelDebug)
| 29.525862 | 99 | 0.668905 |
bff4ea6c94dca9b03c67e641b3a941c7d04cd279
| 92 |
py
|
Python
|
plugins/cisco_cloudlock/komand_cisco_cloudlock/actions/list_all_organization_applications/__init__.py
|
lukaszlaszuk/insightconnect-plugins
|
8c6ce323bfbb12c55f8b5a9c08975d25eb9f8892
|
[
"MIT"
] | 46 |
2019-06-05T20:47:58.000Z
|
2022-03-29T10:18:01.000Z
|
plugins/cisco_cloudlock/komand_cisco_cloudlock/actions/list_all_organization_applications/__init__.py
|
lukaszlaszuk/insightconnect-plugins
|
8c6ce323bfbb12c55f8b5a9c08975d25eb9f8892
|
[
"MIT"
] | 386 |
2019-06-07T20:20:39.000Z
|
2022-03-30T17:35:01.000Z
|
plugins/cisco_cloudlock/komand_cisco_cloudlock/actions/list_all_organization_applications/__init__.py
|
lukaszlaszuk/insightconnect-plugins
|
8c6ce323bfbb12c55f8b5a9c08975d25eb9f8892
|
[
"MIT"
] | 43 |
2019-07-09T14:13:58.000Z
|
2022-03-28T12:04:46.000Z
|
# GENERATED BY KOMAND SDK - DO NOT EDIT
from .action import ListAllOrganizationApplications
| 30.666667 | 51 | 0.826087 |
90c8999b9ba1cdce30a8d1e5b4516db87bffea76
| 22,030 |
py
|
Python
|
seaice/sedna/test/test_cube.py
|
andypbarrett/nsidc-seaice
|
167a16309f7eaadd5c613b54a7df26eb1f48c2f3
|
[
"MIT"
] | 2 |
2020-08-27T08:40:22.000Z
|
2021-04-14T15:42:09.000Z
|
seaice/sedna/test/test_cube.py
|
andypbarrett/nsidc-seaice
|
167a16309f7eaadd5c613b54a7df26eb1f48c2f3
|
[
"MIT"
] | null | null | null |
seaice/sedna/test/test_cube.py
|
andypbarrett/nsidc-seaice
|
167a16309f7eaadd5c613b54a7df26eb1f48c2f3
|
[
"MIT"
] | null | null | null |
import unittest
from numpy.testing import assert_array_equal
import numpy as np
from seaice.sedna.cube import ConcentrationCube as Cube
ANYTHING = 9999.
class Test_mean_data_grid(unittest.TestCase):
def test_mean_grid_with_grid_data_returns_same_grid(self):
expected = np.ma.array([[36, 37], [100, 0]])
cube = Cube(np.ma.array([[36, 37], [100, 0]]), missing_value=255.)
actual = cube.mean_data_grid
assert_array_equal(actual, expected)
def test_mean_grid_with_cube_data_returns_grid_with_mean_grid_values(self):
expected = np.ma.array([[74.5, 37.], [1.5, 3.]])
grid1 = np.ma.array([[99., 37.], [1., 4.]])
grid2 = np.ma.array([[50., 37.], [2., 2.]])
cube = Cube(np.ma.dstack([grid1, grid2]), missing_value=255.)
actual = cube.mean_data_grid
assert_array_equal(actual, expected)
class Test__extent_binary_grid(unittest.TestCase):
def test_with_grid_data(self):
expected = np.ma.array([[0, 1], [1, 0]])
cube = Cube(np.ma.array([[14, 37], [100, 0]]), missing_value=255., extent_threshold=15)
actual = cube._extent_binary_grid(include_pole_hole=False)
assert_array_equal(actual, expected)
def test_with_grid_data_and_missing(self):
expected = np.ma.array([[0, 1], [0, 0]])
cube = Cube(np.ma.array([[14, 37], [255, 0]]), missing_value=255., extent_threshold=15)
actual = cube._extent_binary_grid(include_pole_hole=False)
assert_array_equal(actual, expected)
def test_with_grid_data_and_mask(self):
expected = np.ma.array([
[0, 1],
[1, 0]])
data = np.ma.array([
[251, 37],
[100, 0]])
cube = Cube(np.ma.masked_equal(data, 251), missing_value=255., extent_threshold=15)
actual = cube._extent_binary_grid(include_pole_hole=False)
assert_array_equal(actual, expected)
def test_with_cube_data(self):
expected = np.ma.array([[0, 1], [1, 0]])
grid1 = np.ma.array([[16, 16], [100, 0]])
grid2 = np.ma.array([[0, 14], [100, 0]])
cube = Cube(np.ma.dstack([grid1, grid2]), missing_value=255., extent_threshold=15)
actual = cube._extent_binary_grid(include_pole_hole=False)
assert_array_equal(actual, expected)
def test_with_cube_data_and_mask(self):
expected = np.ma.array([[0, 1], [0, 1]])
grid1 = np.ma.array([[16, 16], [100, 50]])
grid2 = np.ma.array([[0., 14], [100, 50]])
data = np.ma.dstack([grid1, grid2])
cube = Cube(np.ma.masked_equal(data, 100), missing_value=255., extent_threshold=15)
actual = cube._extent_binary_grid()
assert_array_equal(actual, expected)
assert_array_equal(actual.data, expected.data)
def test_with_flag_value_and_grid_data(self):
data = np.ma.masked_equal(np.ma.array([
[251, 37],
[251, 0]]), 251)
cube = Cube(data, missing_value=255., extent_threshold=37)
actual = cube._extent_binary_grid()
expected = np.ma.array([
[1, 1],
[1, 0]])
assert_array_equal(actual.data, expected.data)
def test_with_flag_value_and_cube_data(self):
grid1 = np.ma.array([
[16., 16.],
[100., 50.]])
grid2 = np.ma.array([
[100., 14.],
[100., 50.]])
data = np.ma.dstack([grid1, grid2])
cube = Cube(np.ma.masked_equal(data, 100.), missing_value=255., extent_threshold=37,
flags={'pole': 100})
actual = cube._extent_binary_grid()
expected = np.ma.array([
[0, 0],
[1, 1]])
assert_array_equal(actual.data, expected.data)
def test_with_flag_value(self):
grid1 = np.ma.masked_greater(np.ma.array([
[1., 251., 255.],
[0., 6., 100.]]), 250)
grid2 = np.ma.masked_greater(np.ma.array([
[2.00, 251., 255.],
[100., 7., 100.]]), 250)
grid3 = np.ma.masked_greater(np.ma.array([
[100., 251., 255.],
[79.0, 7., 100.]]), 250)
cube = Cube(np.ma.dstack([grid1, grid2, grid3]), missing_value=255., extent_threshold=15)
actual = cube._extent_binary_grid()
expected = np.ma.array([
[1, 1, 0],
[1, 0, 1]])
assert_array_equal(actual, expected)
def test_with_no_mask_at_all(self):
grid1 = np.ma.masked_greater(np.ma.array([
[1., 95., 24.],
[0., 6., 100.]]), 250)
grid2 = np.ma.masked_greater(np.ma.array([
[2.00, 73., 24.],
[100., 7., 100.]]), 250)
grid3 = np.ma.masked_greater(np.ma.array([
[100., 83., 29.],
[79.0, 7., 100.]]), 250)
cube = Cube(np.ma.dstack([grid1, grid2, grid3]), missing_value=255., extent_threshold=15,
flags={'pole': 251})
actual = cube._extent_binary_grid(include_pole_hole=True)
expected = np.ma.array([
[1, 1, 1],
[1, 0, 1]])
assert_array_equal(actual, expected)
def test_with_shrinking_pole_hole(self):
grid1 = np.ma.masked_greater(np.ma.array([
[251., 251., 251.],
[0., 6., 100.]]), 250)
grid2 = np.ma.masked_greater(np.ma.array([
[2.00, 251., 251.],
[100., 7., 100.]]), 250)
grid3 = np.ma.masked_greater(np.ma.array([
[100., 251., 251.],
[79.0, 7., 100.]]), 250)
cube = Cube(np.ma.dstack([grid1, grid2, grid3]), missing_value=255., extent_threshold=15,
flags={'pole': 251})
actual = cube._extent_binary_grid(include_pole_hole=True)
expected = np.array([[1, 1, 1],
[1, 0, 1]])
assert_array_equal(actual, expected)
def test_with_shrinking_pole_hole_and_some_missing(self):
grid1 = np.ma.masked_greater(np.ma.array([
[251., 251., 251.],
[0., 6., 100.]]), 250)
grid2 = np.ma.masked_greater(np.ma.array([
[255, 251., 251.],
[100., 7., 100.]]), 250)
grid3 = np.ma.masked_greater(np.ma.array([
[16., 251., 251.],
[79.0, 7., 100.]]), 250)
cube = Cube(np.ma.dstack([grid1, grid2, grid3]), missing_value=255., extent_threshold=15,
flags={'pole': 251})
actual = cube._extent_binary_grid(include_pole_hole=True)
expected = np.array([[1, 1, 1],
[1, 0, 1]])
assert_array_equal(actual, expected)
def test_with_shrinking_pole_hole_and_only_missing(self):
grid1 = np.ma.masked_greater(np.ma.array([
[251., 251., 251.],
[0., 6., 100.]]), 250)
grid2 = np.ma.masked_greater(np.ma.array([
[255, 251., 251.],
[100., 7., 100.]]), 250)
grid3 = np.ma.masked_greater(np.ma.array([
[255, 251., 251.],
[79.0, 7., 100.]]), 250)
cube = Cube(np.ma.dstack([grid1, grid2, grid3]), missing_value=255., extent_threshold=15,
flags={'pole': 251})
actual = cube._extent_binary_grid(include_pole_hole=True)
expected = np.array([[0, 1, 1],
[1, 0, 1]])
assert_array_equal(actual, expected)
assert_array_equal(actual.data, expected.data)
def test_concentration_values_below_threshold_are_zero_and_unmasked(self):
expected = np.ma.array([[0, 0],
[0, 1]],
mask=[[False, True],
[True, False]])
cube = Cube(np.ma.array([[14, 255],
[255, 50]]), missing_value=255., extent_threshold=15)
actual = cube._extent_binary_grid(include_pole_hole=False)
assert_array_equal(actual, expected)
assert_array_equal(actual.data, expected.data)
assert_array_equal(actual.mask, expected.mask)
class Test_extent(unittest.TestCase):
def test_extent_with_grid_data(self):
expected = 5 + 7
cube = Cube(np.ma.array([[14, 16], [100, 0]]), missing_value=255.,
grid_areas=np.ma.array([[10, 5], [7, 1]]), extent_threshold=15)
actual = cube.extent()
self.assertEqual(actual, expected)
def test_extent_with_cube_data(self):
expected = 10 + 7
grid1 = np.ma.array([[1., 4.], [0., 6.]])
grid2 = np.ma.array([[2., 3.], [100., 7.]])
grid3 = np.ma.array([[100., 5.], [79., 7.]])
area_grid = np.ma.array([[10, 5], [7, 1]])
cube = Cube(np.ma.dstack([grid1, grid2, grid3]), missing_value=255., grid_areas=area_grid,
extent_threshold=15.)
actual = cube.extent()
self.assertEqual(actual, expected)
def test_extent_with_masked_cube_data(self):
expected = 10 + 7
grid1 = np.ma.array([[1., 253.], [0., 6.]], mask=[[0, 1], [0, 0]])
grid2 = np.ma.array([[2., 253.], [100., 7.]], mask=[[0, 1], [0, 0]])
grid3 = np.ma.array([[100., 253.], [79., 7.]], mask=[[0, 1], [0, 0]])
area_grid = np.ma.array([[10, 5], [7, 1]])
cube = Cube(np.ma.dstack([grid1, grid2, grid3]), missing_value=255., grid_areas=area_grid,
extent_threshold=15)
actual = cube.extent()
self.assertEqual(actual, expected)
def test_extent_with_flag_value(self):
expected = 10 + 7 + 5
grid1 = np.ma.masked_equal(np.ma.array([
[1., 251.],
[0., 6.]]), 251)
grid2 = np.ma.masked_equal(np.ma.array([
[2., 251.],
[100., 7.]]), 251)
grid3 = np.ma.masked_equal(np.ma.array([
[100., 251.],
[79., 7.]]), 251)
area_grid = np.ma.array([
[10, 5],
[7, 1]])
cube = Cube(np.ma.dstack([grid1, grid2, grid3]), missing_value=255., grid_areas=area_grid,
extent_threshold=15)
actual = cube.extent()
self.assertEqual(actual, expected)
def test_extent_with_pole_flag_value_and_missing_day(self):
expected = 10 + 7
grid1 = np.ma.masked_equal(np.ma.array([
[1., 251.],
[0., 6.]]), 251)
grid2 = np.ma.masked_equal(np.ma.array([
[255., 255.],
[255., 255.]]), 255.)
grid3 = np.ma.masked_equal(np.ma.array([
[100., 251.],
[79., 7.]]), 251)
area_grid = np.ma.array([
[10, 5],
[7, 1]])
cube = Cube(np.ma.dstack([grid1, grid2, grid3]), missing_value=255., grid_areas=area_grid,
extent_threshold=15)
actual = cube.extent()
self.assertEqual(actual, expected)
def test_extent_with_masked_region(self):
cube = Cube(np.ma.array([[14, 16],
[15, 0]]),
grid_areas=np.array([[10, 5],
[7, 1]]))
actual = cube.extent(regional_mask=[[True, True],
[True, True]])
self.assertTrue(np.isnan(actual))
class Test_missing(unittest.TestCase):
def test_missing_with_grid_data(self):
expected = 7
cube = Cube(np.ma.array([[14, 16],
[100, 0]]),
missing_value=100.,
grid_areas=np.array([[10, 5],
[7, 1]]))
actual = cube.missing()
self.assertEqual(actual, expected)
def test_missing_with_cube_data(self):
expected = 10
grid1 = np.ma.array([
[255., 95.],
[100., 100.]])
grid2 = np.ma.array([
[255., 90.],
[100., 100.]])
grid3 = np.ma.array([
[255., 100.],
[255., 100.]])
cube = Cube(np.ma.dstack([grid1, grid2, grid3]), missing_value=255.,
grid_areas=np.ma.array([[10, 5],
[7, 1]]))
actual = cube.missing()
self.assertEqual(actual, expected)
def test_missing_with_masked_region(self):
cube = Cube(np.ma.array([[14, 16],
[15, 0]]),
missing_value=100.,
grid_areas=np.array([[10, 5],
[7, 1]]))
actual = cube.missing(regional_mask=[[True, True],
[True, True]])
self.assertTrue(np.isnan(actual))
class Test__missing_binary_grid(unittest.TestCase):
def test__missing_binary_grid(self):
expected = np.array([[False, False],
[True, False]])
cube = Cube(np.ma.array([
[14, 16],
[100, 0]]), missing_value=100.)
actual = cube._missing_binary_grid()
assert_array_equal(actual, expected)
def test__missing_binary_grid_with_cube_data(self):
expected = np.array([[True, False],
[False, False]])
grid1 = np.ma.array([
[255., 95.],
[100., 100.]])
grid2 = np.ma.array([
[255., 90.],
[100., 100.]])
grid3 = np.ma.array([
[255., 100.],
[255., 100.]])
cube = Cube(np.ma.dstack([grid1, grid2, grid3]), missing_value=255.)
actual = cube._missing_binary_grid()
assert_array_equal(actual, expected)
def test__missing_binary_grid_with_cube_data_and_some_missing_in_the_invalid_data_mask(self):
expected = np.array([[True, False, False],
[False, False, False]])
grid1 = np.ma.array([
[255., 95., 255.],
[100., 100., 10.]])
grid2 = np.ma.array([
[255., 90., 255.],
[100., 100., 20.]])
grid3 = np.ma.array([
[255., 100., 255.],
[255., 100., 30.]])
invalid_data_mask = np.array([[False, False, True],
[False, False, False]])
cube = Cube(np.ma.dstack([grid1, grid2, grid3]), missing_value=255.,
invalid_data_mask=invalid_data_mask)
actual = cube._missing_binary_grid()
assert_array_equal(actual, expected)
class Test_area(unittest.TestCase):
def test_area_with_grid_data(self):
expected = ((5 * 16) + (7 * 100)) / 100.
area_grid = np.ma.array([[10, 5], [7, 1]])
cube = Cube(np.ma.array([[14, 16], [100, 0]]), missing_value=255., grid_areas=area_grid,
extent_threshold=15.)
actual = cube.area()
self.assertEqual(actual, expected)
def test_area_with_grid_data_and_missing_data(self):
expected = ((5 * 0) + (7 * 100)) / 100.
area_grid = np.ma.array([[10, 5], [7, 1]])
cube = Cube(np.ma.array([[14, 255], [100, 0]]), missing_value=255., grid_areas=area_grid,
extent_threshold=15.)
actual = cube.area()
self.assertEqual(actual, expected)
def test_area_with_cube_data(self):
expected = ((10 * (103. / 3.)) + (7 * (179. / 3))) / 100.
grid1 = np.ma.array([[1., 4.], [0., 6.]])
grid2 = np.ma.array([[2., 3.], [100., 7.]])
grid3 = np.ma.array([[100., 5.], [79., 7.]])
area_grid = np.ma.array([[10, 5], [7, 1]])
cube = Cube(np.ma.dstack([grid1, grid2, grid3]), missing_value=255., grid_areas=area_grid,
extent_threshold=15.)
actual = cube.area()
self.assertEqual(actual, expected)
def test_area_with_masked_region(self):
cube = Cube(np.ma.array([[14, 16],
[15, 0]]),
grid_areas=np.array([[10, 5],
[7, 1]]))
actual = cube.area(regional_mask=[[True, True],
[True, True]])
self.assertTrue(np.isnan(actual))
class Test__mask_invalid(unittest.TestCase):
def test__mask_invalid(self):
grid = np.ma.array([
[1., 251.],
[5., 6.]], mask=[
[False, True],
[False, False]])
expected = np.ma.array([
[np.nan, np.nan],
[5., np.nan]])
expected_mask = np.ma.array([
[True, True],
[False, True]])
invalid_data = np.ma.array([
[True, False],
[False, True]])
cube = Cube(np.array([[0., 0.], [0., 0.]]), invalid_data_mask=invalid_data)
actual = cube._mask_invalid(grid)
assert_array_equal(actual, expected)
assert_array_equal(actual.mask, expected_mask)
class Test__invalid_data_mask(unittest.TestCase):
def setUp(self):
self.testcube = Cube(np.array([[10., 20., 30.],
[50., 60., 90.]]))
def test_with_wrong_shape(self):
invalid_data_mask = np.array([True, False])
self.assertRaises(ValueError, Cube._invalid_data_mask, self.testcube, invalid_data_mask)
def test_with_None_arg_returns_all_false(self):
expected = np.array([[False, False, False],
[False, False, False]])
actual = Cube._invalid_data_mask(self.testcube, None)
assert_array_equal(expected, actual)
def test_with_correct_mask(self):
mask = np.array([[False, True, False],
[False, False, False]])
expected = mask.copy()
actual = Cube._invalid_data_mask(self.testcube, mask)
assert_array_equal(expected, actual)
class Test__grid_areas(unittest.TestCase):
def setUp(self):
self.testcube = Cube(np.array([[10., 20., 30.],
[50., 60., 90.]]))
def test_with_wrong_shape(self):
grid_areas = np.array([2.5, 8])
self.assertRaises(ValueError, Cube._grid_areas, self.testcube, grid_areas)
def test_with_None_arg_returns_all_false(self):
expected = np.array([[1.0, 1.0, 1.0],
[1.0, 1.0, 1.0]])
actual = Cube._grid_areas(self.testcube, None)
assert_array_equal(expected, actual)
def test_with_correct_mask(self):
mask = np.array([[1.0, 2.0, 7.0],
[8.0, 5.0, 3.0]])
expected = mask.copy()
actual = Cube._grid_areas(self.testcube, mask)
assert_array_equal(expected, actual)
class Test__extent_grid(unittest.TestCase):
def setUp(self):
self.concentration = np.array([[1., 2.],
[3., 4.]])
self.grid_areas = np.array([[3., 4.],
[5., 6.]])
def test_extent_grid(self):
cube = Cube(self.concentration, grid_areas=self.grid_areas)
actual = cube._extent_grid()
expected = np.array([[3, 4.],
[5, 6.]])
assert_array_equal(actual, expected)
def test_concentration_below_threshold(self):
cube = Cube(self.concentration, grid_areas=self.grid_areas, extent_threshold=2)
actual = cube._extent_grid()
expected = np.array([[0, 4.],
[5, 6.]])
assert_array_equal(actual, expected)
class Test__area_grid(unittest.TestCase):
def setUp(self):
self.concentration = np.array([[50., 75.],
[80., 100.]])
self.grid_areas = np.array([[4., 4.],
[5., 6.]])
def test_area_grid(self):
cube = Cube(self.concentration, grid_areas=self.grid_areas)
actual = cube._area_grid()
expected = np.array([[2., 3],
[4., 6]])
assert_array_equal(actual, expected)
def test_concentration_below_threshold(self):
cube = Cube(self.concentration, grid_areas=self.grid_areas, extent_threshold=51)
actual = cube._area_grid()
expected = np.array([[0, 3.],
[4, 6.]])
assert_array_equal(actual, expected)
class Test__missing_grid(unittest.TestCase):
def setUp(self):
self.concentration = np.array([[1., 2.],
[3., 3.]])
self.grid_areas = np.array([[3., 4.],
[5., 6.]])
self.missing_value = 3
self.regional_mask = np.array([[False, True],
[False, True]])
def test_basic_missing(self):
cube = Cube(self.concentration, grid_areas=self.grid_areas,
missing_value=self.missing_value)
actual = cube._missing_grid()
expected = np.ma.array([[0, 0],
[5, 6]],
mask=[[False, False],
[False, False]])
assert_array_equal(actual, expected)
assert_array_equal(actual.mask, expected.mask)
class Test_grid_shape(unittest.TestCase):
def test_with_3d_data(self):
layer1 = np.array([[1., 2., 3., 4.],
[3., 3., 5., 6.],
[3., 3., 5., 6.]])
layer2 = np.array([[2., 3., 1., 4.],
[3., 5., 3., 6.],
[3., 3., 6., 5.]])
cube = Cube(np.ma.dstack([layer1, layer2]))
actual = cube.grid_shape()
expected = (3, 4)
self.assertTupleEqual(actual, expected)
def test_with_2d_data(self):
cube = Cube(np.array([[1., 2., 3.],
[3., 3., 5.],
[5., 3., 3.],
[3., 5., 3.]]))
actual = cube.grid_shape()
expected = (4, 3)
self.assertTupleEqual(actual, expected)
| 32.588757 | 98 | 0.515887 |
32041cc0ddb2df5cb711bead0fa54418065b0b03
| 1,673 |
py
|
Python
|
src/django_upgrade/fixers/utils_text.py
|
browniebroke/django-upgrade
|
032f9aaf825d67eac24e3ab878257c2fff9cc52a
|
[
"MIT"
] | 284 |
2021-08-28T01:31:41.000Z
|
2022-03-30T16:15:59.000Z
|
src/django_upgrade/fixers/utils_text.py
|
browniebroke/django-upgrade
|
032f9aaf825d67eac24e3ab878257c2fff9cc52a
|
[
"MIT"
] | 61 |
2021-08-28T07:45:05.000Z
|
2022-02-02T09:03:04.000Z
|
src/django_upgrade/fixers/utils_text.py
|
browniebroke/django-upgrade
|
032f9aaf825d67eac24e3ab878257c2fff9cc52a
|
[
"MIT"
] | 10 |
2021-08-28T09:02:46.000Z
|
2022-03-07T03:39:18.000Z
|
"""
Replace imports from django.utils.translation:
https://docs.djangoproject.com/en/3.0/releases/3.0/#features-deprecated-in-3-0
"""
from __future__ import annotations
import ast
from functools import partial
from typing import Iterable
from tokenize_rt import Offset, Token
from django_upgrade.ast import ast_start_offset
from django_upgrade.data import Fixer, State, TokenFunc
from django_upgrade.tokens import (
extract_indent,
find_and_replace_name,
insert,
update_import_names,
)
fixer = Fixer(
__name__,
min_version=(3, 0),
)
MODULE = "django.utils.text"
OLD_NAME = "unescape_entities"
NAME_MAP = {
"unescape_entities": "",
}
@fixer.register(ast.ImportFrom)
def visit_ImportFrom(
state: State,
node: ast.ImportFrom,
parent: ast.AST,
) -> Iterable[tuple[Offset, TokenFunc]]:
if (
node.level == 0
and node.module == MODULE
and any(
(alias.name == OLD_NAME and alias.asname is None) for alias in node.names
)
):
yield ast_start_offset(node), partial(fix_import, node=node)
def fix_import(tokens: list[Token], i: int, *, node: ast.ImportFrom) -> None:
j, indent = extract_indent(tokens, i)
update_import_names(tokens, i, node=node, name_map={OLD_NAME: ""})
insert(tokens, j, new_src=f"{indent}import html\n")
@fixer.register(ast.Name)
def visit_Name(
state: State,
node: ast.Name,
parent: ast.AST,
) -> Iterable[tuple[Offset, TokenFunc]]:
if node.id == OLD_NAME and OLD_NAME in state.from_imports[MODULE]:
yield ast_start_offset(node), partial(
find_and_replace_name, name=OLD_NAME, new="html.escape"
)
| 25.348485 | 85 | 0.689181 |
120835212139dba564cf05b0677a5c3535370ecb
| 819 |
py
|
Python
|
test/test_recordinality.py
|
zacharyvoase/python-recordinality
|
55a4656626ec484f3672283ec87434654720f405
|
[
"Unlicense"
] | 4 |
2017-08-25T14:27:40.000Z
|
2020-07-29T19:33:01.000Z
|
test/test_recordinality.py
|
zacharyvoase/python-recordinality
|
55a4656626ec484f3672283ec87434654720f405
|
[
"Unlicense"
] | null | null | null |
test/test_recordinality.py
|
zacharyvoase/python-recordinality
|
55a4656626ec484f3672283ec87434654720f405
|
[
"Unlicense"
] | null | null | null |
import os
from recordinality import Recordinality
example_file = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'example.txt')
example_count = len(set(open(example_file).read().strip().splitlines()))
TRIALS = 100
def test_smoke():
results = 0
for trial in range(TRIALS):
sketch = Recordinality(size=256)
with open(example_file) as example:
for line in example:
if line.strip():
sketch.add(line.strip().encode('utf-8'))
results += sketch.cardinality()
mean_guess = results / TRIALS
error = abs(mean_guess - example_count) / float(example_count)
print("guess: {}, actual: {} (error: {:.4f})".format(
mean_guess,
example_count,
error))
assert error <= 0.05, "Unacceptable error (>5%)"
| 28.241379 | 86 | 0.622711 |
5b9438ba2f09881d2f2081e942a532837cfa87a6
| 842 |
py
|
Python
|
atc_scripts/config_files/L1InstSize_8k.py
|
abadp/gem5-NoSQL
|
a8372f141ba21b234b2238918512bd6ff91fa971
|
[
"BSD-3-Clause"
] | 3 |
2020-04-18T07:01:12.000Z
|
2021-04-11T04:14:56.000Z
|
atc_scripts/config_files/L1InstSize_8k.py
|
abadp/gem5-NoSQL
|
a8372f141ba21b234b2238918512bd6ff91fa971
|
[
"BSD-3-Clause"
] | null | null | null |
atc_scripts/config_files/L1InstSize_8k.py
|
abadp/gem5-NoSQL
|
a8372f141ba21b234b2238918512bd6ff91fa971
|
[
"BSD-3-Clause"
] | 3 |
2019-02-14T13:54:31.000Z
|
2021-07-09T11:06:26.000Z
|
memory_config = {
#Memory
'mem-type' : 'SimpleMemory',
'mem-channels' : '4',
#Icache
'l1i_size' : '8kB', #OK
'l1i_assoc' : '2', #OK
'l1i_hit_latency' : '1',
'l1i_response_latency' : '1',
'l1i_mshrs' : '4',
'l1i_tgts_per_mshr' : '8',
#Dcache
'l1d_size' : '32kB', #OK
'l1d_assoc' : '4', #OK
'l1d_hit_latency' : '1', #OK
'l1d_response_latency' : '1', #MISS: Hit_lat + (Next level access) + Resp_lat. Always uses parallel access
'l1d_mshrs' : '16',
'l1d_tgts_per_mshr' : '8',
'l2_size' : '32MB', #OK
'l2_assoc' : '8', #OK
'l2_hit_latency' : '6', #OK
'l2_response_latency' : '3',
'l2_mshrs' : '16',
'l2_tgts_per_mshr' : '8',
}
| 29.034483 | 112 | 0.465558 |
1b19a01e58fb63ba4700d1d4664dc6021cbdbe5d
| 1,373 |
py
|
Python
|
cpss_vimeo/tests.py
|
xgdfalcon/django-vimeo
|
fe474e4ab6306b4411df83a74ee8aecf8843a39a
|
[
"Apache-2.0"
] | 2 |
2021-01-19T19:41:16.000Z
|
2021-02-06T18:19:35.000Z
|
cpss_vimeo/tests.py
|
xgdfalcon/django-vimeo
|
fe474e4ab6306b4411df83a74ee8aecf8843a39a
|
[
"Apache-2.0"
] | 1 |
2021-03-15T05:00:46.000Z
|
2021-03-15T05:00:46.000Z
|
cpss_vimeo/tests.py
|
xgdfalcon/django-vimeo
|
fe474e4ab6306b4411df83a74ee8aecf8843a39a
|
[
"Apache-2.0"
] | null | null | null |
#
# @license
# Copyright (c) 2020 XGDFalcon®. All Rights Reserved.
#
#
# XGDFalcon LLC retains all intellectual property rights to the code
# distributed as part of the Control Point System Software (CPSS) package.
#
"""
This python module provides the models for the video vault application.
Written by Larry Latouf ([email protected])
"""
from django.test import TestCase
from django.test.client import RequestFactory
from .models import VimeoClientOption
import os
CLIENT_SECRET = os.environ['CLIENT_SECRET']
CLIENT_ID = os.environ['CLIENT_ID']
ACCESS_TOKEN = os.environ['ACCESS_TOKEN']
USER_ID = os.environ['USER_ID']
PROJECT_ID = os.environ['PROJECT_ID']
class VimeoDjangoTestCase(TestCase):
def setUp(self):
VimeoClientOption.objects.create(
vimeo_user_id=USER_ID,
vimeo_client_id=CLIENT_ID,
vimeo_client_secret=CLIENT_SECRET,
vimeo_access_token=ACCESS_TOKEN,
vimeo_project_id=PROJECT_ID)
def test_retrieve_project(self):
# response = self.client.get('/')
collection = VimeoClientOption.objects.get(vimeo_project_id=PROJECT_ID)
result = collection.get_folder_contents()
print(result)
def test_get_project(self):
rf = RequestFactory()
get_request = rf.get('project/'+PROJECT_ID)
print(get_request)
| 28.020408 | 79 | 0.705025 |
8413038a5d16456b699a475b3fbab2031f8bc0e4
| 801 |
py
|
Python
|
src/conanfile-h180.py
|
jysirius/palladio
|
110f9b80e0622304badcb929ebd1b68b0a0e13f5
|
[
"Apache-2.0"
] | 1 |
2020-01-03T07:20:28.000Z
|
2020-01-03T07:20:28.000Z
|
src/conanfile-h180.py
|
jysirius/palladio
|
110f9b80e0622304badcb929ebd1b68b0a0e13f5
|
[
"Apache-2.0"
] | null | null | null |
src/conanfile-h180.py
|
jysirius/palladio
|
110f9b80e0622304badcb929ebd1b68b0a0e13f5
|
[
"Apache-2.0"
] | null | null | null |
import os
from conans import ConanFile
class PalladioConan(ConanFile):
settings = "os", "compiler", "build_type", "arch"
generators = "cmake"
def requirements(self):
self.requires("catch2/2.0.1@bincrafters/stable")
if "PLD_CONAN_HOUDINI_VERSION" in os.environ:
self.requires("houdini/{}@sidefx/stable".format(os.environ["PLD_CONAN_HOUDINI_VERSION"]))
else:
self.requires("houdini/[>18.0.0,<18.5.0]@sidefx/stable")
if "PLD_CONAN_SKIP_CESDK" not in os.environ:
if "PLD_CONAN_CESDK_VERSION" in os.environ:
cesdk_version = os.environ["PLD_CONAN_CESDK_VERSION"]
else:
cesdk_version = "2.1.5704"
self.requires("cesdk/{}@esri-rd-zurich/stable".format(cesdk_version))
| 34.826087 | 101 | 0.636704 |
1d2b0f9d4eac44178e3a2cf8da3d8eab6848d5b4
| 4,740 |
py
|
Python
|
plyse/query_tree.py
|
arcodergh/plyse
|
bb44543f9c812401489ceba68b24b8618d263830
|
[
"MIT"
] | 26 |
2016-05-31T14:45:24.000Z
|
2021-04-27T01:54:52.000Z
|
plyse/query_tree.py
|
arcodergh/plyse
|
bb44543f9c812401489ceba68b24b8618d263830
|
[
"MIT"
] | 11 |
2016-05-31T20:09:57.000Z
|
2022-02-18T11:43:50.000Z
|
plyse/query_tree.py
|
arcodergh/plyse
|
bb44543f9c812401489ceba68b24b8618d263830
|
[
"MIT"
] | 13 |
2016-05-31T19:41:36.000Z
|
2021-03-01T15:22:38.000Z
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from copy import deepcopy
class TreeNode(dict):
def __init__(self, *args, **kwargs):
super(TreeNode, self).__init__(*args, **kwargs)
@property
def is_leaf(self):
raise NotImplementedError()
@property
def children(self, *args, **kwargs):
"""
Returns all the child nodes from the itself
:return children as list of TreeNodes
"""
raise NotImplementedError()
def leaves(self, *args, **kwargs):
"""
Returns all leaves from the current node
:return a list of leaves
"""
raise NotImplementedError()
def traverse(self, node_callback=lambda node: node, leaf_callback=lambda node: node):
"""
Traverse the tree and for each node or leaf calls the corresponding callback
"""
raise NotImplementedError()
class Operand(TreeNode):
def __init__(self, *args, **kwargs):
super(Operand, self).__init__(*args, **kwargs)
def __getattr__(self, name):
if name in self:
return self[name]
else:
raise AttributeError("Operand doesn't have an attribute named '%s'" % name)
def __setattr__(self, name, val):
self[name] = val
@property
def is_leaf(self):
return True
@property
def children(self, *args, **kwargs):
return []
def leaves(self, *args, **kwargs):
return [self]
class OperatorFactoryError(Exception):
pass
class OperatorFactory(object):
@staticmethod
def create(op_type):
if op_type.lower() == Or.type:
return Or()
elif op_type.lower() == And.type:
return And()
elif op_type.lower() == Not.type:
return Not()
else:
raise OperatorFactoryError("Cannot create an operator of type '%s'" % op_type)
class Operator(TreeNode):
type = "base_operator"
def __init__(self, operands=None, *args, **kwargs):
super(Operator, self).__init__(*args, **kwargs)
self._operands = [] if not operands else operands
def has_left_operand(self):
raise Exception("Not implemented!")
def has_right_operand(self):
raise Exception("Not implemented!")
def add_input(self, operand):
# An operator can have only two inputs (binary tree). If another gets added then it creates a new operator
# of the same type with inputs as the last element and the one wanted to be added. The result is a left input
# operand and a right input operator, with the last element and the new one as inputs
if len(self._operands) == 2:
op = self.__class__([self._operands.pop(1), operand])
self._operands.append(op)
else:
self._operands.append(operand)
return self
@property
def is_leaf(self):
return False
@property
def children(self, *args, **kwargs):
return self.inputs
@property
def inputs(self):
return self._operands
def leaves(self, ignore_negated=False, *args, **kwargs):
leaves = []
self.traverse(ignore_negated=ignore_negated, leaf_callback=lambda leaf: leaves.append(leaf))
return leaves
def traverse(self, node_callback=lambda node: node, leaf_callback=lambda leaf: leaf, ignore_negated=False):
def _do_traverse(operand):
if operand.is_leaf:
leaf_callback(operand)
elif isinstance(operand, Not) and ignore_negated:
pass
else:
node_callback(operand)
_do_traverse(operand.inputs[0])
if len(operand.inputs) > 1:
_do_traverse(operand.inputs[1])
return _do_traverse(self)
def __str__(self):
return "[TreeNode] '{op}' operator with {children} children ".format(op=self.type.upper(), children=len(self.children))
def __repr__(self):
return self.__str__()
class And(Operator):
type = "and"
def has_left_operand(self):
return True
def has_right_operand(self):
return True
class Or(Operator):
type = "or"
def has_left_operand(self):
return True
def has_right_operand(self):
return True
class NotOperatorError(Exception):
pass
class Not(Operator):
type = "not"
def has_left_operand(self):
return False
def has_right_operand(self):
return True
def add_input(self, operand):
if not self._operands:
self._operands.append(operand)
else:
raise NotOperatorError("Cannot add more than one input to Not Operator")
return self
| 24.947368 | 127 | 0.611392 |
4aba500829181ff065fd81723104dedd216b465f
| 73 |
py
|
Python
|
pyflux/arma/__init__.py
|
ThomasHoppe/pyflux
|
297f2afc2095acd97c12e827dd500e8ea5da0c0f
|
[
"BSD-3-Clause"
] | 2,091 |
2016-04-01T02:52:10.000Z
|
2022-03-29T11:38:15.000Z
|
pyflux/arma/__init__.py
|
EricSchles/pyflux
|
297f2afc2095acd97c12e827dd500e8ea5da0c0f
|
[
"BSD-3-Clause"
] | 160 |
2016-04-26T14:52:18.000Z
|
2022-03-15T02:09:07.000Z
|
pyflux/arma/__init__.py
|
EricSchles/pyflux
|
297f2afc2095acd97c12e827dd500e8ea5da0c0f
|
[
"BSD-3-Clause"
] | 264 |
2016-05-02T14:03:31.000Z
|
2022-03-29T07:48:20.000Z
|
from .arma import ARIMA
from .arimax import ARIMAX
from .nnar import NNAR
| 24.333333 | 26 | 0.808219 |
ffa712b4b48376438da42cb3f00aa202cad326a2
| 1,085 |
py
|
Python
|
modules/dbnd/test_dbnd/task_ctrl/test_task_visualiser.py
|
turbaszek/dbnd
|
6efbf3e7ecd175645e8e58d0d015d32fe9e95ea0
|
[
"Apache-2.0"
] | null | null | null |
modules/dbnd/test_dbnd/task_ctrl/test_task_visualiser.py
|
turbaszek/dbnd
|
6efbf3e7ecd175645e8e58d0d015d32fe9e95ea0
|
[
"Apache-2.0"
] | null | null | null |
modules/dbnd/test_dbnd/task_ctrl/test_task_visualiser.py
|
turbaszek/dbnd
|
6efbf3e7ecd175645e8e58d0d015d32fe9e95ea0
|
[
"Apache-2.0"
] | null | null | null |
import logging
import sys
from dbnd import task
from dbnd._core.task_ctrl.task_visualiser import _MAX_VALUE_SIZE, TaskVisualiser
from test_dbnd.factories import TTask
logger = logging.getLogger(__name__)
@task
def t_very_long_params(t_param="long_string" * 1000):
return "ok"
class TestTaskVisualizer(object):
def test_simple_dump(self):
s = TTask(t_param="my_param")
actual = TaskVisualiser(s).banner("Runinng task")
assert "my_param" in actual
def test_exception(self):
s = TTask(t_param="my_param")
try:
raise Exception("MyException")
except Exception:
actual = TaskVisualiser(s).banner("Runinng task", exc_info=sys.exc_info())
assert actual
assert "MyException" in actual
def test_in_memory_dump(self):
s = t_very_long_params.task(t_param="long_string" * 1000)
assert len(s.t_param) > _MAX_VALUE_SIZE * 3
actual = TaskVisualiser(s).banner("Running task")
logger.warning(actual)
assert len(actual) < _MAX_VALUE_SIZE * 3
| 27.820513 | 86 | 0.676498 |
db9c5a8bc7c9fc6f8dd44663ddb6a8f6a09588d7
| 14,932 |
py
|
Python
|
utils/song_utils.py
|
gmittal/symbolic-music-diffusion
|
84128ca038fb8757cc6ce15af04b445299f60f99
|
[
"Apache-2.0"
] | 45 |
2021-03-05T22:29:31.000Z
|
2022-03-26T18:11:58.000Z
|
utils/song_utils.py
|
gmittal/symbolic-music-diffusion
|
84128ca038fb8757cc6ce15af04b445299f60f99
|
[
"Apache-2.0"
] | 1 |
2021-12-07T01:37:30.000Z
|
2021-12-07T01:37:30.000Z
|
utils/song_utils.py
|
gmittal/symbolic-music-diffusion
|
84128ca038fb8757cc6ce15af04b445299f60f99
|
[
"Apache-2.0"
] | 7 |
2021-04-03T12:09:36.000Z
|
2022-02-11T17:07:31.000Z
|
# Copyright 2021 The Magenta Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Utilities for manipulating multi-measure NoteSequences."""
import os
import sys
import note_seq
import numpy as np
sys.path.append("{}/../".format(os.path.dirname(os.path.abspath(__file__))))
from config import melody_2bar_converter
def spherical_interpolation(p0, p1, alpha):
"""Spherical linear interpolation."""
assert p0.shape == p1.shape
assert p0.ndim == 2 and p1.ndim == 2
unit_p0 = p0 / np.linalg.norm(p0, axis=1, keepdims=1)
unit_p1 = p1 / np.linalg.norm(p1, axis=1, keepdims=1)
omega = np.arccos(np.diag(unit_p0.dot(unit_p1.T)))
so = np.sin(omega)
c1 = (np.sin((1.0 - alpha) * omega) / so)[:, np.newaxis]
c2 = (np.sin(alpha * omega) / so)[:, np.newaxis]
return c1 * p0 + c2 * p1
def count_measures(note_sequence):
"""Approximate number of measures in the song."""
splits = note_seq.sequences_lib.split_note_sequence_on_time_changes(
note_sequence)
bars = 0
for split in splits:
time_signature = split.time_signatures[0]
tempo = split.tempos[0]
quarters_per_bar = 4 * time_signature.numerator / time_signature.denominator
seconds_per_bar = 60 * quarters_per_bar / tempo.qpm
num_bars = split.total_time / seconds_per_bar
bars += num_bars
return bars
def extract_melodies(note_sequence, keep_longest_split=False):
"""Extracts all melodies in a polyphonic note sequence.
Args:
note_sequence: A polyphonic NoteSequence object.
keep_longest_split: Whether to discard all subsequences with tempo changes
other than the longest one.
Returns:
List of monophonic NoteSequence objects.
"""
splits = note_seq.sequences_lib.split_note_sequence_on_time_changes(
note_sequence)
if keep_longest_split:
ns = max(splits, key=lambda x: len(x.notes))
splits = [ns]
melodies = []
for split_ns in splits:
qs = note_seq.sequences_lib.quantize_note_sequence(split_ns,
steps_per_quarter=4)
instruments = list(set([note.instrument for note in qs.notes]))
for instrument in instruments:
melody = note_seq.melodies_lib.Melody()
try:
melody.from_quantized_sequence(qs,
ignore_polyphonic_notes=True,
instrument=instrument,
gap_bars=np.inf)
except note_seq.NonIntegerStepsPerBarError:
continue
melody_ns = melody.to_sequence()
melodies.append(melody_ns)
return melodies
def generate_shifted_sequences(song, resolution=1):
"""Generates shifted and overlapping versions of a Song.
Args:
song: A multitrack Song object.
resolution: The number of shifted examples, with computed timing offsets
uniformly spaced.
Returns:
A list of multitrack Song objects.
"""
offset = 2.0 / resolution
base = song.note_sequence
dc = song.data_converter
results = []
for step in range(resolution):
shift = note_seq.extract_subsequence(base, offset * step, base.total_time)
results.append(Song(shift, dc, chunk_length=1))
assert len(results) == resolution
return results
def fix_instruments_for_concatenation(note_sequences):
"""Adjusts instruments for concatenating multitrack measures."""
instruments = {}
for i in range(len(note_sequences)):
for note in note_sequences[i].notes:
if not note.is_drum:
if note.program not in instruments:
if len(instruments) >= 8:
instruments[note.program] = len(instruments) + 2
else:
instruments[note.program] = len(instruments) + 1
note.instrument = instruments[note.program]
else:
note.instrument = 9
def fix_chunk_lengths_for_concatenation(note_sequences):
"""Adjusts the total_time of each tokenized chunk for concatenating
multitrack measures.
"""
max_chunk_time = max([ns.total_time for ns in note_sequences])
for chunk in note_sequences:
chunk.total_time = max_chunk_time
def chunks_to_embeddings(sequences, model, data_converter):
"""Convert NoteSequence objects into latent space embeddings.
Args:
sequences: A list of NoteSequence objects.
model: A TrainedModel object used for inference.
data_converter: A data converter (e.g. OneHotMelodyConverter,
TrioConverter) used to convert NoteSequence objects into
tensor encodings for model inference.
Returns:
A numpy matrix of shape [len(sequences), latent_dims].
"""
assert model is not None, 'No model provided.'
latent_dims = model._z_input.shape[1]
idx = []
non_rest_chunks = []
zs = np.zeros((len(sequences), latent_dims))
mus = np.zeros((len(sequences), latent_dims))
sigmas = np.zeros((len(sequences), latent_dims))
for i, chunk in enumerate(sequences):
if len(data_converter.to_tensors(chunk).inputs) > 0:
idx.append(i)
non_rest_chunks.append(chunk)
if non_rest_chunks:
z, mu, sigma = model.encode(non_rest_chunks)
assert z.shape == mu.shape == sigma.shape
for i, mean in enumerate(mu):
zs[idx[i]] = z[i]
mus[idx[i]] = mean
sigmas[idx[i]] = sigma[i]
return zs, mus, sigmas
def embeddings_to_chunks(embeddings, model, temperature=1e-3):
"""Decode latent embeddings as NoteSequences.
Args:
embeddings: A numpy array of latent embeddings.
model: A TrainedModel object used for decoding embeddings.
Returns:
A list of NoteSequence objects.
"""
assert model is not None, 'No model provided.'
assert len(embeddings) > 0
reconstructed_chunks = model.decode(embeddings,
temperature=temperature,
length=model._config.hparams.max_seq_len)
assert len(reconstructed_chunks) == len(embeddings)
embedding_norms = np.linalg.norm(embeddings, axis=1)
rest_chunk_idx = np.where(
embedding_norms == 0)[0] # rests correspond to zero-length embeddings
for idx in rest_chunk_idx:
rest_ns = note_seq.NoteSequence()
rest_ns.total_time = reconstructed_chunks[idx].total_time
reconstructed_chunks[idx] = rest_ns
return reconstructed_chunks
def embeddings_to_song(embeddings,
model,
data_converter,
fix_instruments=True,
temperature=1e-3):
"""Decode latent embeddings as a concatenated NoteSequence.
Args:
embeddings: A numpy array of latent embeddings.
model: A TrainedModel object used for decoding.
data_converter: A data converter used by the returned Song
object.
fix_instruments: A boolean determining whether instruments in
multitrack measures should be fixed before concatenation.
Returns:
A Song object.
"""
chunks = embeddings_to_chunks(embeddings, model, temperature)
if fix_instruments:
fix_instruments_for_concatenation(chunks)
concat_chunks = note_seq.sequences_lib.concatenate_sequences(chunks)
return Song(concat_chunks, data_converter, reconstructed=True)
def encode_songs(model, songs, chunk_length=None, programs=None):
"""Generate embeddings for a batch of songs.
Args:
model: A TrainedModel object used for inference.
songs: A list of Song objects.
chunk_length: An integer describing the number of measures
each chunk of each song should contain.
programs: A list of integers specifying which MIDI programs to use.
Default is to keep all available programs.
Returns:
A list of numpy matrices each with shape [3, len(song_chunks), latent_dims].
"""
assert model is not None, 'No model provided.'
assert len(songs) > 0, 'No songs provided.'
chunks, splits = [], []
data_converter = songs[0].data_converter
i = 0
for song in songs:
chunk_tensors, chunk_sequences = song.chunks(chunk_length=chunk_length,
programs=programs)
del chunk_tensors
chunks.extend(chunk_sequences)
splits.append(i)
i += len(chunk_sequences)
z, mu, sigma = chunks_to_embeddings(chunks, model, data_converter)
encoding = []
for i in range(len(splits)):
j, k = splits[i], None if i + 1 == len(splits) else splits[i + 1]
song_encoding = [z[j:k], mu[j:k], sigma[j:k]]
song_encoding = np.stack(song_encoding)
encoding.append(song_encoding)
assert len(encoding) == len(splits) == len(songs)
return encoding
class Song(object):
"""Song object used to provide additional abstractions for NoteSequences.
Attributes:
note_sequence: A NoteSequence object holding the Song's MIDI data.
data_converter: A data converter used for preprocessing and tokenization
for a corresponding MusicVAE model.
chunk_length: The number of measures in each tokenized chunk of MIDI
(dependent on the model configuration).
multitrack: Whether this Song is multitrack or not.
reconstructed: A boolean describing whether this Song is reconstructed
from the decoder of a MusicVAE model.
"""
def __init__(self,
note_sequence,
data_converter,
chunk_length=2,
multitrack=False,
reconstructed=False):
self.note_sequence = note_sequence
self.data_converter = data_converter
self.chunk_length = chunk_length
self.reconstructed = reconstructed
self.multitrack = multitrack
def encode(self, model, chunk_length=None, programs=None):
"""Encode song chunks (and full-chunk rests).
Returns:
z: (chunks, latent_dims), mu: (chunks, latent_dims), sigma: (chunks, latent_dims).
"""
chunk_tensors, chunk_sequences = self.chunks(chunk_length=chunk_length,
programs=programs)
z, means, sigmas = chunks_to_embeddings(chunk_sequences, model,
self.data_converter)
del chunk_tensors # unused
return z
def chunks(self, chunk_length=None, programs=None, fix_instruments=True):
"""Split and featurize song into chunks of tensors and NoteSequences."""
assert not self.reconstructed, 'Not safe to tokenize reconstructed Songs.'
data = self.note_sequence
step_size = self.chunk_length
if chunk_length is not None:
step_size = chunk_length
if programs is not None:
data = self.select_programs(programs)
# Use the data converter to preprocess sequences
tensors = self.data_converter.to_tensors(data).inputs[::step_size]
sequences = self.data_converter.from_tensors(tensors)
if fix_instruments and self.multitrack:
fix_instruments_for_concatenation(sequences)
return tensors, sequences
def count_chunks(self, chunk_length=None):
length = self.chunk_length if chunk_length is None else chunk_length
return count_measures(self.note_sequence) // length
@property
def programs(self):
"""MIDI programs used in this song."""
return list(set([note.program for note in self.note_sequence.notes]))
def select_programs(self, programs):
"""Keeps selected programs of MIDI (e.g. melody program)."""
assert len(programs) > 0
assert all([program >= 0 for program in programs])
ns = note_seq.NoteSequence()
ns.CopyFrom(self.note_sequence)
del ns.notes[:]
for note in self.note_sequence.notes[:]:
if note.program in programs:
new_note = ns.notes.add()
new_note.CopyFrom(note)
return ns
def truncate(self, chunks=0, offset=0):
"""Returns a truncated version of the song.
Args:
chunks: The number of chunks in the truncated sequence.
offset: The offset in chunks to begin truncation.
Returns:
A truncated Song object.
"""
tensors = self.data_converter.to_tensors(
self.note_sequence).inputs[::self.chunk_length]
sequences = self.data_converter.from_tensors(tensors)[offset:offset +
chunks]
fix_instruments_for_concatenation(sequences)
concat_chunks = note_seq.sequences_lib.concatenate_sequences(sequences)
return Song(concat_chunks,
self.data_converter,
chunk_length=self.chunk_length)
def _count_melody_chunks(self, program):
"""Determines the number of 2-measure chunks using the melody data pipeline."""
ns = self.select_programs([program])
tensors = melody_2bar_converter.to_tensors(ns).inputs[::2]
sequences = melody_2bar_converter.from_tensors(tensors)
return len(sequences)
def find_programs(self):
"""Search for the most important MIDI programs in the song."""
def heuristic(program):
expected = self.count_chunks(chunk_length=2)
extracted = self._count_melody_chunks(program)
if extracted > 0 and abs(extracted - expected) < 0.5 * expected:
return True
return False
midi_programs = self.programs
top_programs = [p for p in midi_programs if heuristic(p)]
return top_programs
def stripped_song(self):
"""A stripped down version using programs found by a special heuristic."""
top_programs = self.find_programs()
ns = self.select_programs(top_programs)
return Song(ns, self.data_converter, self.chunk_length)
def download(self, filename, preprocessed=True, programs=None):
"""Download song as MIDI file."""
assert filename is not None, 'No filename specified.'
data = self.note_sequence
if programs is not None:
data = self.select_programs(programs)
if not self.reconstructed and preprocessed: # do not tokenize again if reconstructed
tensors, chunks = self.chunks(programs=programs)
del tensors # unused
data = note_seq.sequences_lib.concatenate_sequences(chunks)
note_seq.sequence_proto_to_midi_file(data, filename)
def play(self, preprocessed=True, programs=None):
"""Play a song with fluidsynth."""
data = self.note_sequence
if programs is not None:
data = self.select_programs(programs)
if not self.reconstructed and preprocessed: # do not tokenize again if reconstructed
tensors, chunks = self.chunks(programs=programs)
del tensors # unused
data = note_seq.sequences_lib.concatenate_sequences(chunks)
note_seq.play_sequence(data, synth=note_seq.fluidsynth)
return data
| 34.725581 | 89 | 0.690731 |
37f67fa89caacb8eb631173fe8b886fb03080611
| 522 |
py
|
Python
|
api/migrations/0006_auto_20191120_2248.py
|
Saltiest-Hacker-News-Trolls-2/DS
|
aaef46dcb225d0be15f65fc34f97c1734c1c64e9
|
[
"MIT"
] | 1 |
2019-11-23T06:56:11.000Z
|
2019-11-23T06:56:11.000Z
|
api/migrations/0006_auto_20191120_2248.py
|
Saltiest-Hacker-News-Trolls-2/DS
|
aaef46dcb225d0be15f65fc34f97c1734c1c64e9
|
[
"MIT"
] | 10 |
2020-03-24T17:50:51.000Z
|
2022-02-09T23:33:10.000Z
|
api/migrations/0006_auto_20191120_2248.py
|
Saltiest-Hacker-News-Trolls-2/DS
|
aaef46dcb225d0be15f65fc34f97c1734c1c64e9
|
[
"MIT"
] | 1 |
2019-11-20T06:18:27.000Z
|
2019-11-20T06:18:27.000Z
|
# Generated by Django 2.2.7 on 2019-11-20 22:48
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('api', '0005_auto_20191120_1811'),
]
operations = [
migrations.AddField(
model_name='saltyuser',
name='text',
field=models.TextField(default=''),
),
migrations.AlterField(
model_name='saltyuser',
name='rank',
field=models.IntegerField(),
),
]
| 21.75 | 47 | 0.557471 |
df61614feeb653c7038197f7b57c614c3043225f
| 1,643 |
py
|
Python
|
lonestar/analytics/cpu/k-truss/bmktest2.py
|
chakpongchung/katana
|
3278a39b504e0aeaec30d06cf629ab97dfeb3f22
|
[
"BSD-3-Clause"
] | 230 |
2018-06-20T22:18:31.000Z
|
2022-03-27T13:09:59.000Z
|
lonestar/analytics/cpu/k-truss/bmktest2.py
|
chakpongchung/katana
|
3278a39b504e0aeaec30d06cf629ab97dfeb3f22
|
[
"BSD-3-Clause"
] | 705 |
2020-02-17T20:50:38.000Z
|
2022-03-31T16:28:09.000Z
|
lonestar/analytics/cpu/k-truss/bmktest2.py
|
chakpongchung/katana
|
3278a39b504e0aeaec30d06cf629ab97dfeb3f22
|
[
"BSD-3-Clause"
] | 110 |
2018-06-19T04:39:16.000Z
|
2022-03-29T01:55:47.000Z
|
import bmk2
from bmkprops import graph_bmk, PERF_RE, get_ktruss_checker
import os
class KtrussGaloisBase(graph_bmk):
bmk = "ktruss"
algo = None
def filter_inputs(self, inputs):
def finput(x):
if not "symmetric" in x.props.flags: return False
if x.props.format == 'bin/galois': return True
return False
return filter(finput, inputs)
def get_run_spec(self, bmkinput):
x = bmk2.RunSpec(self, bmkinput)
k, ec = get_ktruss_checker(bmkinput, self.config['k'])
t = int(self.config['t'])
x.set_binary(self.props._cwd, 'k-truss')
x.set_arg(bmkinput.props.file, bmk2.AT_INPUT_FILE)
assert self.algo is not None
x.set_arg('-algo=%s' % (self.algo,), bmk2.AT_OPAQUE)
x.set_arg('-trussNum=%d' % (k,), bmk2.AT_OPAQUE)
x.set_arg("-t=%d" % (t,), bmk2.AT_OPAQUE)
x.set_arg('-o=@output', bmk2.AT_TEMPORARY_OUTPUT)
x.set_checker(bmk2.ExternalChecker(ec))
x.set_perf(bmk2.PerfRE(r"^\(NULL\),.*, Time,0,0,(?P<time_ms>[0-9]+)$"))
return x
class KtrussGaloisBSP(KtrussGaloisBase):
variant = "galois+bsp"
algo = "bsp"
class KtrussGaloisBSPIm(KtrussGaloisBase):
variant = "galois+bspIm"
algo = "bspIm"
class KtrussGaloisBSPCoreThenTruss(KtrussGaloisBase):
variant = "galois+bspCoreThenTruss"
algo = "bspCoreThenTruss"
class KtrussGaloisAsync(KtrussGaloisBase):
variant = "galois+async"
algo = "async"
BINARIES = [KtrussGaloisBSP(),
KtrussGaloisBSPIm(),
KtrussGaloisBSPCoreThenTruss(),
KtrussGaloisAsync(),]
| 28.824561 | 79 | 0.63238 |
975367d320c043a9f4949b8dfd13a3d2f291044a
| 465 |
py
|
Python
|
backend/accounts/migrations/0004_alter_account_id.py
|
mmohajer9/banker
|
e68522cc4bba0a881723cd0e54432255e8141aaf
|
[
"MIT"
] | null | null | null |
backend/accounts/migrations/0004_alter_account_id.py
|
mmohajer9/banker
|
e68522cc4bba0a881723cd0e54432255e8141aaf
|
[
"MIT"
] | null | null | null |
backend/accounts/migrations/0004_alter_account_id.py
|
mmohajer9/banker
|
e68522cc4bba0a881723cd0e54432255e8141aaf
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.2.5 on 2021-07-09 14:17
from django.db import migrations, models
import uuid
class Migration(migrations.Migration):
dependencies = [
('accounts', '0003_alter_account_id'),
]
operations = [
migrations.AlterField(
model_name='account',
name='id',
field=models.CharField(default=uuid.uuid1, editable=False, max_length=10, primary_key=True, serialize=False),
),
]
| 23.25 | 121 | 0.634409 |
2bac437a44a6f203b4ef99373c356c7b00e05187
| 5,254 |
py
|
Python
|
src/hark_lang/cli/hosted_query.py
|
krrome/teal-lang
|
594ac0f0baae047fdb19ac9126d174408d487905
|
[
"Apache-2.0"
] | 85 |
2020-04-29T13:51:33.000Z
|
2020-08-28T04:40:11.000Z
|
src/hark_lang/cli/hosted_query.py
|
krrome/teal-lang
|
594ac0f0baae047fdb19ac9126d174408d487905
|
[
"Apache-2.0"
] | 15 |
2020-05-06T07:58:18.000Z
|
2020-08-28T10:29:28.000Z
|
src/hark_lang/cli/hosted_query.py
|
krrome/teal-lang
|
594ac0f0baae047fdb19ac9126d174408d487905
|
[
"Apache-2.0"
] | 4 |
2020-05-31T09:42:08.000Z
|
2020-08-27T17:04:26.000Z
|
"""GraphQL interface to Hark Cloud"""
import logging
import os
from types import SimpleNamespace
from typing import Union
from gql import Client, gql
from gql.transport.requests import RequestsHTTPTransport
from .. import config, __version__
from ..exceptions import UserResolvableError
from . import interface as ui
LOG = logging.getLogger(__name__)
CLIENT = None
def _init(endpoint: str):
global CLIENT
if not endpoint:
raise UserResolvableError("Hark Cloud endpoint not set", "")
try:
HASURA_SECRET = os.environ["HASURA_ADMIN_SECRET"]
except KeyError:
raise UserResolvableError(
"HASURA_ADMIN_SECRET is not set",
"This is a temporary problem and will disappear in future versions",
)
transport = RequestsHTTPTransport(
url=endpoint,
# TODO - change to x-hasura-access-key
headers={"x-hasura-admin-secret": HASURA_SECRET},
verify=True, # The SSL cert
retries=3,
)
CLIENT = Client(transport=transport, fetch_schema_from_transport=True,)
LOG.info("Connected to Hark Cloud: %s", endpoint)
def _query(s: str, **kwargs) -> dict:
if not CLIENT:
cfg = config.get_last_loaded()
_init(cfg.endpoint)
LOG.info("Query args: %s", kwargs)
return CLIENT.execute(gql(s), variable_values=kwargs)
## Pythonic queries:
def new_package(
instance_id: int, python_hash: str, hark_hash: str, config_hash: str
) -> SimpleNamespace:
qry = """
mutation NewPackage($id: Int!, $ch: String!, $ph: String!, $th: String!) {
new_package(instance_id: $id, config_hash: $ch, python_hash: $ph, hark_hash: $th) {
package {
id
new_python
new_config
new_hark
python_url
hark_url
config_url
}
}
}
"""
data = _query(qry, id=instance_id, ph=python_hash, th=hark_hash, ch=config_hash)
return SimpleNamespace(**data["new_package"]["package"])
def get_instance(project_id: int, instance_name: str) -> Union[SimpleNamespace, None]:
qry = """
query GetInstance($name: String!, $pid: Int!) {
instance(limit: 1, where: {project_id: {_eq: $pid}, name: {_eq: $name}}) {
id
uuid
ready
project {
name
}
}
}
"""
data = _query(qry, pid=project_id, name=instance_name)
try:
data["instance"][0]["project"] = SimpleNamespace(
**data["instance"][0]["project"]
)
return SimpleNamespace(**data["instance"][0])
except IndexError:
return None
def new_deployment(instance_id: int, package_id: int) -> SimpleNamespace:
qry = """
mutation NewDeployment($package_id: Int!, $iid: Int!) {
insert_deployment_one(object: {package_id: $package_id, instance_id: $iid}) {
id
}
}
"""
data = _query(qry, package_id=package_id, iid=instance_id)
return SimpleNamespace(**data["insert_deployment_one"])
def switch(instance_id: int, new_deployment_id: int) -> SimpleNamespace:
qry = """
mutation DeployIt($iid: Int!, $did: Int!) {
switch_deployment(instance_id: $iid, new_deployment_id: $did) {
ok
}
}
"""
data = _query(qry, did=new_deployment_id, iid=instance_id)
return SimpleNamespace(**data["switch_deployment"])
def destroy(instance_id: int) -> SimpleNamespace:
qry = """
mutation DeployIt($id: Int!) {
switch_deployment(instance_id: $id) {
ok
}
}
"""
data = _query(qry, id=instance_id)
return SimpleNamespace(**data["switch_deployment"])
def status(deployment_id: int) -> SimpleNamespace:
qry = """
query DeploymentStatus($id: Int!) {
deployment_by_pk(id: $id) {
active
started_deploy
deployed_at
started_at
}
}
"""
data = _query(qry, id=deployment_id)
return SimpleNamespace(**data["deployment_by_pk"])
def list_projects() -> list:
qry = """
query ListProjects {
project {
id
name
instances {
name
uuid
name
}
}
}
"""
data = _query(qry)
return [SimpleNamespace(**p) for p in data["project"]]
def is_instance_ready(instance_id: int) -> bool:
qry = """
query IsInstanceReady($id: Int!) {
instance_by_pk(id: $id) {
ready
}
}
"""
data = _query(qry, id=instance_id)
return data["instance_by_pk"]["ready"]
def is_session_finished(instance_uuid: str, session_id: str) -> bool:
qry = """
query IsSessionFinished($uuid: String!, $id: String!) {
session(instanceUuid: $uuid, id: $id) {
meta {
finished
}
}
}
"""
data = _query(qry, uuid=instance_uuid, id=session_id)
return data["session"]["meta"]["finished"]
def get_session_data(instance_uuid: str, session_id: str):
qry = """
query sessionData($uuid: String!, $id: String!) {
session(instanceUuid: $uuid, id: $id) {
meta {
finished
broken
createdAt
numThreads
result
}
stdout {
thread
time
text
}
failures {
thread
errorMsg
stacktrace {
callerThread
callerIp
callerFn
}
}
events {
thread
time
event
data
}
logs {
thread
time
text
}
}
}
"""
data = _query(qry, uuid=instance_uuid, id=session_id)
return data["session"]
| 22.168776 | 86 | 0.628854 |
0c890ca22dcecd7701fbe7d30f9fa889ca1cd59e
| 10,461 |
py
|
Python
|
W3C/W3CSkeletonSeleniumSafari14Test.py
|
TheTeejers/saucelabs-simple-python
|
e8ccf3865388b1580525f06536ed34bb610e1c5a
|
[
"MIT"
] | null | null | null |
W3C/W3CSkeletonSeleniumSafari14Test.py
|
TheTeejers/saucelabs-simple-python
|
e8ccf3865388b1580525f06536ed34bb610e1c5a
|
[
"MIT"
] | null | null | null |
W3C/W3CSkeletonSeleniumSafari14Test.py
|
TheTeejers/saucelabs-simple-python
|
e8ccf3865388b1580525f06536ed34bb610e1c5a
|
[
"MIT"
] | null | null | null |
####################################################################
# Skeleton for Selenium tests on Sauce Labs
####################################################################
###################################################################
# Imports that are good to use
# Not always used for every test
###################################################################
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
from selenium.webdriver.common.action_chains import ActionChains
import os
import time
from datetime import datetime, date, time, timezone
import datetime
from time import sleep
from selenium.webdriver.support.ui import WebDriverWait # available since 2.4.0
from selenium.webdriver.support import expected_conditions as EC # available since 2.26.0
from selenium.webdriver.common.by import By
# from reusableFxns import *
# import Action chains
from selenium.webdriver.common.action_chains import ActionChains
# import KEYS
from selenium.webdriver.common.keys import Keys
import requests
import json
from termcolor import colored
###################################################################
# Selenium with Python doesn't like using HTTPS correctly
# and displays a warning that it uses Unverified HTTPS request
# The following disables that warning to clear the clutter
# But I should find a way to do the proper requests
###################################################################
import urllib3
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
###################################################################
# Select Data Center
# Set region to 'US' or 'EU'
# Test will default to 'US' if left blank or set to any other than 'US' or 'EU'
###################################################################
region = 'US'
# region = 'EU'
# region = 'headless'
# region = 'localSafari'
# region = 'localChrome'
###################################################################
# Common parameters (desired capabilities)
# For Sauce Labs Tests
###################################################################
sauceParameters = {
# Required platform information
# 'platformName': 'macOS 10.13',
# 'browserName': 'safari',
# 'browserVersion': 'latest',
# 'name': 'Run: ' + str(datetime.datetime.now()),
#
# # Options used by Sauce Labs
# 'sauce:options':{
# 'tags':['Case', 'NUM',],
# # 'name': 'Run: ' + str(datetime.datetime.now()),
# # 'extendedDebugging': 'true',
# # 'capturePerformance': 'true',
# # "webdriver.remote.quietExceptions": 'true',
# # 'tunnelIdentifier':'Phill Tunnel One',
# # 'screenResolution':'1920x1080',
# # 'seleniumVersion': '3.141.59',
# # 'iedriverVersion': '3.4.0',
# # 'chromedriverVersion': '2.40',
# # 'requireWindowFocus' : True,
# # 'maxDuration': 1800,
# # 'idleTimeout': 1000,
# # 'commandTimeout': 600,
# # 'videoUploadOnPass':False,
# # 'extendedDebugging':'true',
# "prerun":"https://raw.githubusercontent.com/phillsauce/saucelabs-import-files/master/WinDownloadFiles.bat",
# },
# 'count': 1,
'platformName': 'macos 11.00',
# 'platformName': 'WIN10',
# 'browserName': 'firefox',
# 'browserName': 'MicrosoftEdge',
# 'browserName': 'internet explorer',
# 'browserName': 'chrome',
'browserName': 'safari',
'version': 'latest',
# 'browserVersion': 'dev',
# 'browserVersion': '14',
# 'seleniumVersion': '3.141.59',
# 'maxDuration': 1800,
# 'commandTimeout': 300,
# 'idleTimeout': 90,
# 'build': 'Trying to break it',
# 'tunnelIdentifier': 'tj',
# 'public':'private',
'sauce:options': {
'name':'Safari 14 test file upload ' + str(datetime.datetime.now()),
# 'tags':'13128733',
# 'extendedDebugging':'true',
# 'build':'PHAB-D62936_1743005',
# 'screenResolution':'1600x1200',
# 'avoidProxy': 'true',
# 'capturePerformance': 'true',
'seleniumVersion': '3.141.59',
# 'public':'private',
# 'name': 'https://dev.testinghub.autodesk.com/ test of drop down menu',
# 'extendedDebugging':'true',
# "timeZone": "New_York",
# 'tunnelIdentifier': 'safari14test'
# 'tunnelIdentifier': 'tj'
# 'safari.options':{},
# 'name': 'UI-Mobile-QA-Regression-tests-Hari',
# 'build': 'Trying to break it',
#
#
# 'tunnelIdentifier': 'tj',
},
# 'sauce:options': {
# # 'name': 'UI-Mobile-QA-Regression-tests-Hari',
# # 'build': 'Trying to break it',
# #
# #
# # 'tunnelIdentifier': 'tj',
# },
# Options used by Chrome
# 'goog:chromeOptions':{
# 'w3c': True, # Required for a W3C Chrome test
# # 'mobileEmulation':{'deviceName':'iPhone X'},
# # 'prefs': {
# # 'profile': {
# # 'password_manager_enabled': False
# # },
# # 'credentials_enable_service': False,
# # },
# # 'args': ['--auto-open-devtools-for-tabs'],
# },
# 'moz:firefoxOptions':{
# "log": {"level": "trace"},
# 'geckodriverVersion':'0.27.0',
# },
}
# This concatenates the tags key above to add the build parameter
# sauceParameters['sauce:options'].update({'build': '-'.join(sauceParameters['sauce:options'].get('tags'))})
###################################################################
# Connect to Sauce Labs
###################################################################
# region = 'US'
# region = 'EU'
# region = 'headless'
# region = 'localSafari'
# region = 'localChrome'
try:
region
except NameError:
region = 'US'
if region == 'US':
print(colored("You are using the US data center", 'green', attrs=['blink', 'reverse', 'underline']))
driver = webdriver.Remote(
# command_executor='https://tj.invitationtest3:[email protected]:443/wd/hub',
command_executor='https://'+os.environ['SAUCE_USERNAME']+':'+os.environ['SAUCE_ACCESS_KEY']+'@ondemand.us-west-1.saucelabs.com:443/wd/hub',
# command_executor='https://'+os.environ['SAUCE_USERNAME']+':'+os.environ['SAUCE_ACCESS_KEY']+'@ondemand.saucelabs.com:443/wd/hub',
desired_capabilities=sauceParameters)
elif region == 'EU':
print (colored("You are using the EU data center", 'green', attrs=['blink', 'reverse', 'underline']))
driver = webdriver.Remote(
command_executor='https://'+os.environ['SAUCE_USERNAME']+':'+os.environ['SAUCE_ACCESS_KEY']+'@ondemand.eu-central-1.saucelabs.com:443/wd/hub',
desired_capabilities=sauceParameters)
elif region == 'localSafari':
print(colored("You are using local Safari browser", 'green', attrs=['blink', 'reverse', 'underline']))
driver = webdriver.Safari(executable_path='/usr/bin/safaridriver');
elif region == 'headless':
print(colored("You are using local the HEADLESS datacenter", 'green', attrs=['blink', 'reverse', 'underline']))
driver = webdriver.Remote(
command_executor='https://'+os.environ['SAUCE_USERNAME']+':'+os.environ['SAUCE_ACCESS_KEY']+'@ondemand.us-east-1.saucelabs.com:443/wd/hub',
desired_capabilities=sauceParameters)
elif region == 'localChrome':
print(colored("You are using local Chrome browser", 'green', attrs=['blink', 'reverse', 'underline']))
driver = webdriver.Chrome(executable_path='/Users/terranceloughry/Downloads/chromedriver')
###################################################################
# Test logic goes here
###################################################################
# Navigating to a website
#__________________________________________________________________
print (driver.capabilities)
#
#
# driver.get('https://www.file.io/')
driver.get('https://filebin.net/')
try:
print (colored("looking for input type 'file'", 'green'))
WebDriverWait(driver, 30).until(EC.presence_of_element_located((By.ID, "fileField")))
# WebDriverWait(driver, 30).until(EC.presence_of_element_located((By.CLASS_NAME, "react-fine-uploader-file-input")))
print (colored("found input type 'file'", 'green'))
interact = driver.find_element_by_css_selector("[type='file']")
# interact
# interact.click()
# JavascriptExecutor driver = (JavascriptExecutor)getDriver();
# driver.execute_script("arguments[0].click();", interact);
# driver.execute_script("sauce:job-result={}".format(sauce_result))
interact.send_keys('/Users/terranceloughry/Desktop/possum.jpg')
print (colored("uploading image", 'green'))
# print (colored(driver.contexts, 'blue'))
except:
print (colored("Can not find input type 'file'", 'red'))
# #
try:
print (colored("looking for image link", 'green'))
WebDriverWait(driver, 60).until(EC.presence_of_element_located((By.CLASS_NAME, 'link-custom')))
interact = driver.find_element_by_link_text("possum.jpg")
interact.click()
print (colored("found and clicked on image link", 'green'))
# print (colored(driver.contexts, 'blue'))
except:
print (colored("Can not find image link", 'red'))
# try:
# print (colored("looking for class name img-thumbnail", 'green'))
# WebDriverWait(driver, 30).until(EC.presence_of_element_located((By.CLASS_NAME, 'img-thumbnail')))
# # interact = driver.find_element_by_xpath("//button")
# # interact.click()
# sleep(5)
# print (colored("found image", 'green'))
# # print (colored(driver.contexts, 'blue'))
# except:
# print (colored("Can not find image", 'red'))
#
#
#
# sleep(15)
# Setup for using random Python commands
#__________________________________________________________________
# driver.save_screenshot('screenshot.png')
# sleep(50)
# print('Message')
# Setup for using Action chains
#__________________________________________________________________
# ActionChains(driver).move_to_element(interact).perform()
# Setup for random script executions
#__________________________________________________________________
# driver.execute_script('sauce: break')
# driver.execute_script('sauce:context=Place words here for notes')
# Ending the test session
#__________________________________________________________________
driver.quit()
| 38.744444 | 150 | 0.603671 |
667a53754ded22eb30319252d778add6d7583448
| 162 |
py
|
Python
|
src/ToolChainClassifier/script/test_gspan.py
|
AnonymousSEMA/SEMA-ToolChain
|
05d6a7e43e10d4b1f6c5dfb70fbabeab3d4daf82
|
[
"BSD-2-Clause"
] | null | null | null |
src/ToolChainClassifier/script/test_gspan.py
|
AnonymousSEMA/SEMA-ToolChain
|
05d6a7e43e10d4b1f6c5dfb70fbabeab3d4daf82
|
[
"BSD-2-Clause"
] | null | null | null |
src/ToolChainClassifier/script/test_gspan.py
|
AnonymousSEMA/SEMA-ToolChain
|
05d6a7e43e10d4b1f6c5dfb70fbabeab3d4daf82
|
[
"BSD-2-Clause"
] | null | null | null |
from classifier import *
clf = Gspan_classifier('test_clf7/')
clf.train('Signatures_merge_call_CBFS/')
ret = clf.evaluate()
print(ret)
clf.get_stat_classifier()
| 20.25 | 40 | 0.777778 |
c9314d1fd093d1c8ad70a22260ca519822cd74a0
| 659 |
py
|
Python
|
coin-toss/coin-toss.py
|
DOUGLASMENDES/Python-Scripts
|
00021ede5e894a0e2fb43a33129bf1d9dc0c492d
|
[
"MIT"
] | 307 |
2019-05-17T21:34:12.000Z
|
2022-03-28T20:03:44.000Z
|
coin-toss/coin-toss.py
|
DOUGLASMENDES/Python-Scripts
|
00021ede5e894a0e2fb43a33129bf1d9dc0c492d
|
[
"MIT"
] | 8 |
2021-03-19T00:47:41.000Z
|
2022-03-11T23:47:47.000Z
|
coin-toss/coin-toss.py
|
DOUGLASMENDES/Python-Scripts
|
00021ede5e894a0e2fb43a33129bf1d9dc0c492d
|
[
"MIT"
] | 78 |
2019-05-23T00:51:28.000Z
|
2022-02-01T21:25:24.000Z
|
#! python3
# coin-toss.py
# Author: Kene Udeh
# Source: Automate the Boring stuff with python Ch. 10 Project
import random
if __name__ == "__main__":
guess = ''
options = ['tails', 'heads']
while guess not in ('heads', 'tails'):
print('Guess the coin toss! Enter heads or tails:')
guess = input()
toss = random.randint(0, 1) # 0 is tails, 1 is heads
if guess == options[toss]:
print('You got it!')
else:
print('Nope! Guess again!')
guess = input()
if guess == options[toss]:
print('You got it!')
else:
print('Nope. You are really bad at this game.')
| 24.407407 | 62 | 0.566009 |
dd0c9aa737b5a3c98322962a0f0ffe7d48a7caa3
| 14,094 |
py
|
Python
|
networkit/GEXFIO.py
|
mlooz/networkit-general-polylog
|
5de2844e6b06258084ddf423c054a90954f6f59c
|
[
"MIT"
] | 3 |
2018-02-24T08:17:03.000Z
|
2020-05-11T13:08:33.000Z
|
networkit/GEXFIO.py
|
kit-parco/networkit-hyperbolic-kd
|
8eb786b8f72e0507a75e68184f444a19cf47ef58
|
[
"MIT"
] | 1 |
2019-11-29T08:57:52.000Z
|
2019-11-29T08:57:52.000Z
|
networkit/GEXFIO.py
|
kit-parco/networkit-hyperbolic-kd
|
8eb786b8f72e0507a75e68184f444a19cf47ef58
|
[
"MIT"
] | 2 |
2020-11-18T09:17:04.000Z
|
2020-12-10T12:07:21.000Z
|
import queue
import xml.etree.cElementTree as ET
from xml.dom import minidom
from _NetworKit import Graph, GraphEvent
# GEXF Reader
class GEXFReader:
def __init__(self):
""" Initializes the GEXFReader class """
self.mapping = dict()
self.g = Graph(0)
self.weighted = False
self.directed = False
self.dynamic = False
self.hasDynamicWeights = False
self.q = queue.Queue()
self.eventStream = []
self.nInitialNodes = 0
self.timeFormat = ""
def read(self, fpath):
""" Reads and returns the graph object defined in fpath """
#0. Reset internal vars and parse the xml
self.__init__()
doc = minidom.parse(fpath)
#1. Determine if graph is dynamic, directed and has dynamically changing weights
graph = doc.getElementsByTagName("graph")[0]
if (graph.getAttribute("defaultedgetype") == "directed"):
self.directed = True
if (graph.getAttribute("mode") == "dynamic"):
self.dynamic = True
if self.dynamic:
self.timeFormat = graph.getAttribute("timeformat")
attributes = graph.getElementsByTagName("attribute")
for att in attributes:
if att.getAttribute("id") == "weight":
self.hasDynamicWeights = True
self.weighted = True
#2. Read nodes and map them to IDs defined in GEXF file
nodes = doc.getElementsByTagName("node")
for n in nodes:
u = n.getAttribute("id")
if self.dynamic:
"""
A GEXF ID can be a string. However, this version of parser accepts ids
in only 2 formats:
1. id = "0,1,2," etc.
2. id = "n0, n1, n2" etc.
So either an integer or an integer that has n prefix.
Gephi generates its random graphs in 2nd format for example.
"""
_id = ""
try:
_id = int(u)
except:
_id = int(u[1:])
# 2-way mapping to refer nodes back in mapDynamicNodes() method
self.mapping[u] = _id
self.mapping[_id] = u
controlList = {'elementAdded': False, 'elementDeleted': False}
spells = n.getElementsByTagName("spell")
if len(spells) > 0:
for s in spells:
self.parseDynamics(s, "n", controlList, u)
else:
self.parseDynamics(n, "n", controlList, u)
else:
self.mapping[u] = self.nInitialNodes
self.nInitialNodes +=1
if self.dynamic:
self.mapDynamicNodes()
#3. Read edges and determine if graph is weighted
edges = doc.getElementsByTagName("edge")
for e in edges:
u = e.getAttribute("source")
v = e.getAttribute("target")
w = "1.0"
if e.hasAttribute("weight"):
self.weighted = True
w = e.getAttribute("weight")
if self.dynamic:
controlList = {'elementAdded': False, 'elementDeleted': False}
spells = e.getElementsByTagName("spell")
if len(spells) > 0:
for s in spells:
self.parseDynamics(s, "e", controlList, u, v, w)
else:
self.parseDynamics(e, "e", controlList, u, v, w)
else:
self.q.put((u, v, w))
#4. Create graph object
self.g = Graph(self.nInitialNodes, self.weighted, self.directed)
#5. Add initial edges to the graph and sort the eventStream by time
#5.1 Adding initial edges
while not self.q.empty():
edge = self.q.get()
(u, v, w) = (edge[0], edge[1], float(edge[2]))
self.g.addEdge(self.mapping[u], self.mapping[v], w)
#5.2 Sorting the eventStream by time and adding timeStep between events that happen in different times
self.eventStream.sort(key=lambda x:x[1])
for i in range(1, len(self.eventStream)):
if self.eventStream[i][1] != self.eventStream[i-1][1]:
self.eventStream.append((GraphEvent(GraphEvent.TIME_STEP, 0, 0, 0), self.eventStream[i-1][1]))
self.eventStream.sort(key=lambda x:x[1])
self.eventStream = [event[0] for event in self.eventStream]
return (self.g, self.eventStream)
def parseDynamics(self, element, elementType, controlList, u, v = "0", w = "0"):
"""
Determine the operations as follows:
1.Element has start and not deleted before: Create add event
2.Element has start and deleted before: Create restore event
3.Element has end:Create del event
4.If an element has end before start(or no start at all), add it to the initial graph
5.For dynamic edges, simply go over the attvalues and create
weight update events
* A dynamic element must be defined either using only spells
or inline attributes. These 2 shouldn't be mixed.
(For example, Gephi will treat them differently. It'll ignore the inline declaration
if the same element also contains spells)
"""
startTime = element.getAttribute("start")
if startTime == "":
startTime = element.getAttribute("startopen")
endTime = element.getAttribute("end")
if endTime == "":
endTime = element.getAttribute("endopen")
if self.timeFormat != "date":
try:
startTime = float(startTime)
except:
pass
try:
endTime = float(endTime)
except:
pass
if startTime != "" and endTime != "":
if startTime < endTime and not controlList['elementDeleted']:
self.createEvent(startTime, "a"+elementType, u, v, w)
controlList['elementAdded'] = True
else:
self.createEvent(startTime, "r"+elementType, u, v, w)
self.createEvent(endTime, "d"+elementType, u, v, w)
controlList['elementDeleted'] = True
if startTime != "" and endTime == "":
if controlList['elementDeleted']:
self.createEvent(startTime, "r"+elementType, u, v, w)
else:
self.createEvent(startTime, "a"+elementType, u, v, w)
controlList['elementAdded'] = True
# Handle dynamic edge weights here
if elementType == "e" and self.hasDynamicWeights:
attvalues = element.getElementsByTagName("attvalue")
# If a spell is traversed, attvalues are siblings
if len(attvalues) == 0:
attvalues = element.parentNode.parentNode.getElementsByTagName("attvalue")
for att in attvalues:
if att.getAttribute("for") == "weight":
w = att.getAttribute("value")
startTime = att.getAttribute("start")
if startTime == "":
startTime = att.getAttribute("startopen")
if self.timeFormat != "date":
startTime = float(startTime)
# If this edge is not added, first weight update indicates edge addition
if not controlList['elementAdded']:
self.createEvent(startTime, "a"+elementType, u, v, w)
controlList['elementAdded'] = True
else:
self.createEvent(startTime, "c"+elementType, u, v, w)
if startTime == "":
if not controlList['elementAdded']:
if elementType == "n":
self.mapping[u] = self.nInitialNodes
self.nInitialNodes += 1
else:
self.q.put((u,v,w))
controlList['elementAdded'] = True
if endTime != "":
self.createEvent(endTime, "d"+elementType, u, v, w)
controlList['elementDeleted'] = True
def createEvent(self, eventTime, eventType, u, v, w):
"""
Creates a NetworKit::GraphEvent from the supplied parameters
and passes it to eventStream
"""
event, u = None, self.mapping[u]
if eventType[1] == "e":
v, w = self.mapping[v], float(w)
if eventType == "an":
event = GraphEvent(GraphEvent.NODE_ADDITION, u, 0, 0)
elif eventType == "dn":
event = GraphEvent(GraphEvent.NODE_REMOVAL, u, 0, 0)
elif eventType == "rn":
event = GraphEvent(GraphEvent.NODE_RESTORATION, u, 0, 0)
elif eventType == "ae" or eventType == "re":
event = GraphEvent(GraphEvent.EDGE_ADDITION, u, v, w)
elif eventType == "de":
event = GraphEvent(GraphEvent.EDGE_REMOVAL, u, v, w)
elif eventType == "ce":
event = GraphEvent(GraphEvent.EDGE_WEIGHT_UPDATE, u, v, w)
self.eventStream.append((event, eventTime))
def mapDynamicNodes(self):
"""
Node ID of a dynamic node must be determined before it's mapped to its GEXF ID.
This requires processing the sorted eventStream and figuring out the addition order of the nodes.
After that, node addition/deletion/restoration operations of this node must be readded to eventStream
with correct mapping.
!Note: New mapping of a node can be equal to old mapping of a node. In order to prevent collisions,
isMapped array must be maintained and controlled.
"""
nNodes = self.nInitialNodes
nEvent = len(self.eventStream)
isMapped = [False] * nEvent
self.eventStream.sort(key=lambda x:x[1])
for i in range(0, nEvent):
event = self.eventStream[i]
# Only the nodes with addition event will get remapped.
if not isMapped[i] and event[0].type == GraphEvent.NODE_ADDITION:
u = event[0].u
self.mapping[self.mapping[u]] = nNodes
# All the other events of that node comes after it's addition event
for j in range(i, len(self.eventStream)):
event = self.eventStream[j]
if not isMapped[j] and event[0].u == u:
mappedEvent = GraphEvent(event[0].type, self.mapping[self.mapping[u]], 0, 0)
self.eventStream[j] = (mappedEvent, event[1])
isMapped[j] = True
nNodes +=1
isMapped[i] = True
def getNodeMap(self):
""" Returns GEXF ID -> NetworKit::Graph node ID mapping. """
forwardMap = dict()
for key in self.mapping:
if type(key) == str:
forwardMap[key] = self.mapping[key]
return forwardMap
# GEXFWriter
class GEXFWriter:
""" This class provides a function to write a NetworKit graph to a file in the
GEXF format. """
def __init__(self):
""" Initializes the class. """
self.edgeIdctr = 0
self.q = queue.Queue()
self.hasDynamicWeight = False
def write(self, graph, fname, eventStream = [], mapping = []):
"""
Writes a NetworKit::Graph to the specified file fname.
Parameters:
- graph: a NetworKit::Graph python object
- fname: the desired file path and name to be written to
- eventStream: stream of events
- mapping: random node mapping
"""
#0. Reset internal vars
self.__init__()
#1. Start with the root element and the right header information
root = ET.Element('gexf')
root.set("xmlns:xsi","http://www.w3.org/2001/XMLSchema-instance")
root.set("xsi:schemaLocation","http://www.gexf.net/1.2draft http://www.gexf.net/1.2draft/gexf.xsd")
root.set('version', '1.2')
#2. Create graph element with appropriate information
graphElement = ET.SubElement(root,"graph")
if graph.isDirected():
graphElement.set('defaultedgetype', 'directed')
else:
graphElement.set('defaultedgetype', 'undirected')
if len(eventStream) > 0:
graphElement.set('mode', 'dynamic')
graphElement.set('timeformat', 'double')
for event in eventStream:
if event.type == GraphEvent.EDGE_WEIGHT_UPDATE:
dynamicAtt = ET.SubElement(graphElement, "attributes")
dynamicAtt.set('class', 'edge')
dynamicAtt.set('mode', 'dynamic')
dynamicWeight = ET.SubElement(dynamicAtt, "attribute")
dynamicWeight.set('id', 'weight')
dynamicWeight.set('title', 'Weight')
dynamicWeight.set('type', 'float')
self.hasDynamicWeight = True
break
else:
graphElement.set('mode', 'static')
#3. Add nodes
nodesElement = ET.SubElement(graphElement, "nodes")
nNodes, idArray = 0, []
#3.1 Count the # of nodes (inital + dynamic nodes)
for event in eventStream:
if event.type == GraphEvent.NODE_ADDITION:
nNodes +=1
nNodes += len(graph.nodes())
for i in range(0, nNodes):
idArray.append(i)
# Optional:Map nodes to a random mapping if user provided one
if (len(mapping) > 0):
if(nNodes != len(mapping)):
raise Exception('Size of nodes and mapping must match')
else:
for i in range(0, nNodes):
idArray[i] = mapping[i]
#3.2 Write nodes to the gexf file
for n in range(nNodes):
nodeElement = ET.SubElement(nodesElement,'node')
nodeElement.set('id', str(idArray[n]))
self.writeEvent(nodeElement, eventStream, n)
#4. Add edges
edgesElement = ET.SubElement(graphElement, "edges")
#4.1 Put all edges into a queue(inital + dynamic edges)
for e in graph.edges():
self.q.put((e[0], e[1], graph.weight(e[0], e[1])))
for event in eventStream:
if event.type == GraphEvent.EDGE_ADDITION:
self.q.put((event.u, event.v, event.w))
#4.2 Write edges to the gexf file
while not self.q.empty():
edgeElement = ET.SubElement(edgesElement,'edge')
e = self.q.get()
edgeElement.set('source', str(idArray[e[0]]))
edgeElement.set('target', str(idArray[e[1]]))
edgeElement.set('id', "{0}".format(self.edgeIdctr))
self.edgeIdctr += 1
if graph.isWeighted():
edgeElement.set('weight', str(e[2]))
self.writeEvent(edgeElement, eventStream, e)
#5. Write the generated tree to the file
tree = ET.ElementTree(root)
tree.write(fname,"utf-8",True)
def writeEvent(self, xmlElement, eventStream, graphElement):
# A var that indicates if the event belongs the graph element we traverse on
matched = False
startEvents = [GraphEvent.NODE_ADDITION, GraphEvent.EDGE_ADDITION, GraphEvent.NODE_RESTORATION]
endEvents = [GraphEvent.NODE_REMOVAL, GraphEvent.EDGE_REMOVAL]
nodeEvents = [GraphEvent.NODE_ADDITION, GraphEvent.NODE_REMOVAL, GraphEvent.NODE_RESTORATION]
edgeEvents = [GraphEvent.EDGE_ADDITION, GraphEvent.EDGE_REMOVAL, GraphEvent.EDGE_WEIGHT_UPDATE]
spellTag, weightTag, operation = False, False, ""
timeStep = 0
spellsElement, attValuesElement = None, None
for event in eventStream:
if event.type == GraphEvent.TIME_STEP:
timeStep += 1
if type(graphElement) == type(0): #a node is an integer
matched = (event.type in nodeEvents and event.u == graphElement)
else:
matched = (event.type in edgeEvents and (event.u == graphElement[0] and event.v == graphElement[1]))
if matched:
# Handle weight update seperately
if event.type == GraphEvent.EDGE_WEIGHT_UPDATE:
if not weightTag:
attvaluesElement = ET.SubElement(xmlElement, "attvalues")
weightTag = True
attvalue = ET.SubElement(attvaluesElement, "attvalue")
attvalue.set('for', 'weight')
attvalue.set('value', str(event.w))
attvalue.set('start', str(timeStep))
attvalue.set('endopen', str(timeStep + 1))
else:
if event.type in startEvents:
operation = "start"
else:
operation = "end"
if not spellTag:
spellsElement = ET.SubElement(xmlElement, "spells")
spellTag = True
spellElement = ET.SubElement(spellsElement, "spell")
spellElement.set(operation, str(timeStep))
| 35.501259 | 104 | 0.681638 |
053acc09f712db454288295e7afddf0c17dcd89e
| 10,975 |
py
|
Python
|
smoketests/run_tests.py
|
dreamhost/nova
|
066a3d4c410056689b5843d9520f43b2b6e7d127
|
[
"Apache-2.0"
] | 1 |
2019-11-06T12:21:59.000Z
|
2019-11-06T12:21:59.000Z
|
smoketests/run_tests.py
|
dreamhost/nova
|
066a3d4c410056689b5843d9520f43b2b6e7d127
|
[
"Apache-2.0"
] | null | null | null |
smoketests/run_tests.py
|
dreamhost/nova
|
066a3d4c410056689b5843d9520f43b2b6e7d127
|
[
"Apache-2.0"
] | 2 |
2019-12-23T18:06:28.000Z
|
2020-07-24T08:44:28.000Z
|
#!/usr/bin/env python
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Colorizer Code is borrowed from Twisted:
# Copyright (c) 2001-2010 Twisted Matrix Laboratories.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""Unittest runner for Nova.
To run all tests
python run_tests.py
To run a single test:
python run_tests.py test_compute:ComputeTestCase.test_run_terminate
To run a single test module:
python run_tests.py test_compute
or
python run_tests.py api.test_wsgi
"""
import gettext
import os
import sys
import unittest
# If ../nova/__init__.py exists, add ../ to Python search path, so that
# it will override what happens to be installed in /usr/(local/)lib/python...
possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
os.pardir,
os.pardir))
if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')):
sys.path.insert(0, possible_topdir)
gettext.install('nova', unicode=1)
from nose import config
from nose import core
from nose import result
from smoketests import flags
FLAGS = flags.FLAGS
class _AnsiColorizer(object):
"""
A colorizer is an object that loosely wraps around a stream, allowing
callers to write text to the stream in a particular color.
Colorizer classes must implement C{supported()} and C{write(text, color)}.
"""
_colors = dict(black=30, red=31, green=32, yellow=33,
blue=34, magenta=35, cyan=36, white=37)
def __init__(self, stream):
self.stream = stream
def supported(cls, stream=sys.stdout):
"""
A class method that returns True if the current platform supports
coloring terminal output using this method. Returns False otherwise.
"""
if not stream.isatty():
return False # auto color only on TTYs
try:
import curses
except ImportError:
return False
else:
try:
try:
return curses.tigetnum("colors") > 2
except curses.error:
curses.setupterm()
return curses.tigetnum("colors") > 2
except Exception:
raise
# guess false in case of error
return False
supported = classmethod(supported)
def write(self, text, color):
"""
Write the given text to the stream in the given color.
@param text: Text to be written to the stream.
@param color: A string label for a color. e.g. 'red', 'white'.
"""
color = self._colors[color]
self.stream.write('\x1b[%s;1m%s\x1b[0m' % (color, text))
class _Win32Colorizer(object):
"""
See _AnsiColorizer docstring.
"""
def __init__(self, stream):
from win32console import FOREGROUND_BLUE
from win32console import FOREGROUND_GREEN
from win32console import FOREGROUND_INTENSITY
from win32console import FOREGROUND_RED
from win32console import GetStdHandle
from win32console import STD_OUT_HANDLE
red, green, blue, bold = (FOREGROUND_RED, FOREGROUND_GREEN,
FOREGROUND_BLUE, FOREGROUND_INTENSITY)
self.stream = stream
self.screenBuffer = GetStdHandle(STD_OUT_HANDLE)
self._colors = {
'normal': red | green | blue,
'red': red | bold,
'green': green | bold,
'blue': blue | bold,
'yellow': red | green | bold,
'magenta': red | blue | bold,
'cyan': green | blue | bold,
'white': red | green | blue | bold
}
def supported(cls, stream=sys.stdout):
try:
import win32console
screenBuffer = win32console.GetStdHandle(
win32console.STD_OUT_HANDLE)
except ImportError:
return False
import pywintypes
try:
screenBuffer.SetConsoleTextAttribute(
win32console.FOREGROUND_RED |
win32console.FOREGROUND_GREEN |
win32console.FOREGROUND_BLUE)
except pywintypes.error:
return False
else:
return True
supported = classmethod(supported)
def write(self, text, color):
color = self._colors[color]
self.screenBuffer.SetConsoleTextAttribute(color)
self.stream.write(text)
self.screenBuffer.SetConsoleTextAttribute(self._colors['normal'])
class _NullColorizer(object):
"""
See _AnsiColorizer docstring.
"""
def __init__(self, stream):
self.stream = stream
def supported(cls, stream=sys.stdout):
return True
supported = classmethod(supported)
def write(self, text, color):
self.stream.write(text)
class NovaTestResult(result.TextTestResult):
def __init__(self, *args, **kw):
result.TextTestResult.__init__(self, *args, **kw)
self._last_case = None
self.colorizer = None
# NOTE(vish): reset stdout for the terminal check
stdout = sys.stdout
sys.stdout = sys.__stdout__
for colorizer in [_Win32Colorizer, _AnsiColorizer, _NullColorizer]:
if colorizer.supported():
self.colorizer = colorizer(self.stream)
break
sys.stdout = stdout
def getDescription(self, test):
return str(test)
# NOTE(vish): copied from unittest with edit to add color
def addSuccess(self, test):
unittest.TestResult.addSuccess(self, test)
if self.showAll:
self.colorizer.write("OK", 'green')
self.stream.writeln()
elif self.dots:
self.stream.write('.')
self.stream.flush()
# NOTE(vish): copied from unittest with edit to add color
def addFailure(self, test, err):
unittest.TestResult.addFailure(self, test, err)
if self.showAll:
self.colorizer.write("FAIL", 'red')
self.stream.writeln()
elif self.dots:
self.stream.write('F')
self.stream.flush()
# NOTE(vish): copied from nose with edit to add color
def addError(self, test, err):
"""Overrides normal addError to add support for
errorClasses. If the exception is a registered class, the
error will be added to the list for that class, not errors.
"""
stream = getattr(self, 'stream', None)
ec, ev, tb = err
try:
exc_info = self._exc_info_to_string(err, test)
except TypeError:
# 2.3 compat
exc_info = self._exc_info_to_string(err)
for cls, (storage, label, isfail) in self.errorClasses.items():
if result.isclass(ec) and issubclass(ec, cls):
if isfail:
test.passed = False
storage.append((test, exc_info))
# Might get patched into a streamless result
if stream is not None:
if self.showAll:
message = [label]
detail = result._exception_detail(err[1])
if detail:
message.append(detail)
stream.writeln(": ".join(message))
elif self.dots:
stream.write(label[:1])
return
self.errors.append((test, exc_info))
test.passed = False
if stream is not None:
if self.showAll:
self.colorizer.write("ERROR", 'red')
self.stream.writeln()
elif self.dots:
stream.write('E')
def startTest(self, test):
unittest.TestResult.startTest(self, test)
current_case = test.test.__class__.__name__
if self.showAll:
if current_case != self._last_case:
self.stream.writeln(current_case)
self._last_case = current_case
self.stream.write(
' %s' % str(test.test._testMethodName).ljust(60))
self.stream.flush()
class NovaTestRunner(core.TextTestRunner):
def _makeResult(self):
return NovaTestResult(self.stream,
self.descriptions,
self.verbosity,
self.config)
if __name__ == '__main__':
if not os.getenv('EC2_ACCESS_KEY'):
print _('Missing EC2 environment variables. Please '
'source the appropriate novarc file before '
'running this test.')
sys.exit(1)
argv = FLAGS(sys.argv)
testdir = os.path.abspath("./")
c = config.Config(stream=sys.stdout,
env=os.environ,
verbosity=3,
workingDir=testdir,
plugins=core.DefaultPluginManager())
runner = NovaTestRunner(stream=c.stream,
verbosity=c.verbosity,
config=c)
sys.exit(not core.run(config=c, testRunner=runner, argv=argv))
| 34.84127 | 78 | 0.606469 |
6902a48b7cb9c36f2535d156f8faa91a34d1109b
| 9,734 |
py
|
Python
|
src/coreclr/scripts/fuzzlyn_summarize.py
|
matijs-toonen/runtime
|
60b51e452688e6c9dd21b05ff993797a6d4acab3
|
[
"MIT"
] | 1 |
2019-11-26T08:17:01.000Z
|
2019-11-26T08:17:01.000Z
|
src/coreclr/scripts/fuzzlyn_summarize.py
|
matijs-toonen/runtime
|
60b51e452688e6c9dd21b05ff993797a6d4acab3
|
[
"MIT"
] | null | null | null |
src/coreclr/scripts/fuzzlyn_summarize.py
|
matijs-toonen/runtime
|
60b51e452688e6c9dd21b05ff993797a6d4acab3
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
#
## Licensed to the .NET Foundation under one or more agreements.
## The .NET Foundation licenses this file to you under the MIT license.
#
##
# Title: fuzzlyn_summarize.py
#
# Notes:
#
# Script to summarize issues found from all partitions and print them on console.
#
################################################################################
################################################################################
# import sys
import argparse
import json
import os
import re
import zipfile
from collections import defaultdict
from os import walk
from coreclr_arguments import *
parser = argparse.ArgumentParser(description="description")
parser.add_argument("-issues_directory", help="Path to issues directory")
parser.add_argument("-arch", help="Architecture")
parser.add_argument("-platform", help="OS platform")
parser.add_argument("-build_config", help="Build configuration of runtime under test")
assertion_patterns = [re.compile(r"Assertion failed '(.*)' in '.*' during '(.*)'"),
re.compile(r"Assert failure\(PID \d+ \[0x[0-9a-f]+], Thread: \d+ \[0x[0-9a-f]+]\):(.*)")]
def setup_args(args):
""" Setup the args.
Args:
args (ArgParse): args parsed by arg parser
Returns:
args (CoreclrArguments)
"""
coreclr_args = CoreclrArguments(args, require_built_core_root=False, require_built_product_dir=False,
require_built_test_dir=False, default_build_type="Checked")
coreclr_args.verify(args,
"issues_directory",
lambda issues_directory: os.path.isdir(issues_directory),
"issues_directory doesn't exist")
coreclr_args.verify(args,
"arch",
lambda unused: True,
"Unable to set arch")
coreclr_args.verify(args,
"platform",
lambda unused: True,
"Unable to set platform")
coreclr_args.verify(args,
"build_config",
lambda unused: True,
"Unable to set build_config")
return coreclr_args
def extract_assertion_error(text):
""" Extract assertion error from stderr output
Args:
text (string): The text that might contain an assertion
Returns:
The assertion as a string, or None if no assertion error is in the text.
"""
for assertion_pattern in assertion_patterns:
issue_match = re.search(assertion_pattern, text)
if issue_match is not None:
assert_string = " ".join(issue_match.groups())
return assert_string.strip()
return None
def main(main_args):
"""Main entrypoint
Args:
main_args ([type]): Arguments to the script
"""
coreclr_args = setup_args(main_args)
arch = coreclr_args.arch
platform = coreclr_args.platform
build_config = coreclr_args.build_config
issues_directory = coreclr_args.issues_directory
# partition_results[partition_name] = { summary: _, examples: [], reduced_examples: [(seed, source)] }
partition_results = {}
def ensure_partition(name):
if name not in partition_results:
partition_results[name] = { "examples": [], "summary": None, "reduced_examples": [] }
for file_path, dirs, files in walk(issues_directory, topdown=True):
for file_name in files:
if file_name.startswith("issues-summary-") and "Partition" in file_name:
partition_name = os.path.splitext(file_name)[0].split("-")[-1]
ensure_partition(partition_name)
issues_summary_file = os.path.join(file_path, file_name)
with open(issues_summary_file, "r") as sf:
events = [json.loads(x) for x in sf.readlines()]
summary = next((x["RunSummary"] for x in events if x["Kind"] == "RunSummary"), None)
if summary is not None:
partition_results[partition_name]["summary"] = summary
examples = [x["Example"] for x in events if x["Kind"] == "ExampleFound"]
partition_results[partition_name]["examples"].extend(examples)
elif file_name.startswith("AllIssues-") and "Partition" in file_name:
partition_name = os.path.splitext(file_name)[0].split("-")[-1]
ensure_partition(partition_name)
with zipfile.ZipFile(os.path.join(file_path, file_name)) as zip:
reduced_source_file_names = [x for x in zip.namelist() if x.endswith(".cs")]
def seed_from_internal_zip_path(path):
""" Given x/y/12345.cs, return 12345 """
return int(os.path.splitext(path.split("/")[-1])[0])
reduced_examples = [(seed_from_internal_zip_path(path), zip.read(path).decode("utf8").strip()) for path in reduced_source_file_names]
partition_results[partition_name]["reduced_examples"].extend(reduced_examples)
total_examples_generated = 0
all_reduced_examples = []
all_examples = []
for partition_name, results in partition_results.items():
if results['summary'] is not None:
# {"DegreeOfParallelism":32,"TotalProgramsGenerated":354,"TotalRunTime":"00:00:47.0918613"}
total_examples_generated += results['summary']['TotalProgramsGenerated']
all_reduced_examples.extend(results['reduced_examples'])
all_examples.extend(results['examples'])
unreduced_examples = []
crashes_by_assert = defaultdict(list)
remaining = []
for example in all_examples:
if any(seed for (seed, _) in all_reduced_examples if example['Seed'] == seed):
# Was reduced
continue
unreduced_examples.append(example)
if example['Kind'] == "Crash":
assertion_error = extract_assertion_error(example['CrashError'])
if assertion_error:
crashes_by_assert[assertion_error].append(example)
else:
remaining.append(example)
else:
remaining.append(example)
md_name = "Summary of Fuzzlyn run"
if platform or arch or build_config:
md_name += " on"
if platform:
md_name += " " + platform
if arch:
md_name += " " + arch
if build_config:
md_name += " " + build_config
md_name += ".md"
md_path = os.path.join(issues_directory, md_name)
with open(md_path, "w") as f:
f.write("# General information about run\n")
if platform:
f.write("* Platform: {}\n".format(platform))
if arch:
f.write("* Architecture: {}\n".format(arch))
if build_config:
f.write("* Build config: {}\n".format(build_config))
f.write("* Total programs generated: {}\n".format(total_examples_generated))
f.write("* Number of examples found: {}\n".format(len(all_examples)))
f.write("\n")
if len(all_reduced_examples) > 0:
f.write("# {} reduced examples are available\n".format(len(all_reduced_examples)))
for (_, source) in sorted(all_reduced_examples, key=lambda p: len(p[1])):
f.write("```csharp\n")
f.write(source.replace("\r", "") + "\n")
f.write("```\n\n")
if len(crashes_by_assert) > 0:
f.write("# {} distinct assertion errors seen\n".format(len(crashes_by_assert)))
for error, examples in sorted(crashes_by_assert.items(), key=lambda p: len(p[1]), reverse=True):
f.write("## ({} occurences) {}\n".format(len(examples), error))
if len(examples) > 1:
f.write("Example occurence:\n")
f.write("```scala\n")
f.write(examples[0]['CrashError'].strip() + "\n")
f.write("```\n")
f.write("Affected seeds{}:\n".format(" (10 shown)" if len(examples) > 10 else ""))
f.write("\n".join("* `" + str(ex['Seed']) + "`" for ex in sorted(examples[:10], key=lambda ex: ex['Seed'])))
f.write("\n\n")
if len(remaining) > 0:
f.write("# {} uncategorized/unreduced examples remain\n".format(len(remaining)))
for ex in remaining:
f.write("* `{}`: {}\n".format(ex['Seed'], ex['Kind']))
if ex['CrashError'] and len(ex['CrashError'].strip()) > 0:
f.write("```scala\n")
f.write(ex['CrashError'].strip() + "\n")
f.write("```\n")
f.write("\n")
if len(partition_results) > 0:
f.write("# Run summaries per partition\n")
f.write("|Partition|# Programs generated|# Examples found|Run time|Degree of parallelism|\n")
f.write("|---|---|---|---|---|\n")
for partition_name, results in sorted(partition_results.items(), key=lambda p: p[0]):
summary = results['summary']
if summary is not None:
# {"DegreeOfParallelism":32,"TotalProgramsGenerated":354,"TotalRunTime":"00:00:47.0918613"}
f.write("|{}|{}|{}|{}|{}|\n".format(partition_name, summary['TotalProgramsGenerated'], len(results['examples']), summary['TotalRunTime'], summary['DegreeOfParallelism']))
print("##vso[task.uploadsummary]{}".format(md_path))
with open(md_path, "r") as f:
print(f.read())
return -1 if len(all_examples) > 0 else 0
if __name__ == "__main__":
args = parser.parse_args()
sys.exit(main(args))
| 39.408907 | 190 | 0.579926 |
55fe3fa0f115e28839f0902256978a9fbd149757
| 842 |
py
|
Python
|
legal_search/analogy_searcher/views.py
|
WhiteSockLoafer/tfg-repo
|
5c44de014d942a17b1a506550dbceee7280cbfc0
|
[
"MIT"
] | null | null | null |
legal_search/analogy_searcher/views.py
|
WhiteSockLoafer/tfg-repo
|
5c44de014d942a17b1a506550dbceee7280cbfc0
|
[
"MIT"
] | null | null | null |
legal_search/analogy_searcher/views.py
|
WhiteSockLoafer/tfg-repo
|
5c44de014d942a17b1a506550dbceee7280cbfc0
|
[
"MIT"
] | null | null | null |
import os
from django.shortcuts import render
from django.conf import settings
from gensim.models import Word2Vec
model = Word2Vec.load(os.path.join(
settings.BASE_DIR, 'analogy_searcher/w2v_model/word2vec_14.model'
))
def predict(request):
if request.method == 'POST':
tuples = model.wv.most_similar(negative=[request.POST['n1']], positive=[
request.POST['p1'], request.POST['p2']], topn=8)
final_list = []
for t in tuples:
final_list.append((t[0], int(t[1] * 100)))
return render(request, 'post_predict.html', context={
"result_list": final_list,
"n1": request.POST['n1'],
"p1": request.POST['p1'],
"p2": request.POST['p2']
})
else:
return render(request, 'get_predict.html')
| 30.071429 | 87 | 0.589074 |
cd1c7fa020c180191116e69a38b6b8268de2ebbc
| 3,222 |
py
|
Python
|
marsyas-vamp/marsyas/src/marsyas_python/spectral_analysis.py
|
jaouahbi/VampPlugins
|
27c2248d1c717417fe4d448cdfb4cb882a8a336a
|
[
"Apache-2.0"
] | null | null | null |
marsyas-vamp/marsyas/src/marsyas_python/spectral_analysis.py
|
jaouahbi/VampPlugins
|
27c2248d1c717417fe4d448cdfb4cb882a8a336a
|
[
"Apache-2.0"
] | null | null | null |
marsyas-vamp/marsyas/src/marsyas_python/spectral_analysis.py
|
jaouahbi/VampPlugins
|
27c2248d1c717417fe4d448cdfb4cb882a8a336a
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
import argparse
import marsyas
import marsyas_util
import time
import numpy
import cv
from cv_utils import *
import math
# This program will perform real-time spectral analysis.
# TODO: Put axis indicators in the plots!
#
# The basic functionality is as follows:
# Source -> Window -> Spectra -> Output
#
# These are the parameters we want to set:
# For the analysis:
Window_len = 2048 # The number of samples in each analysis window
Window_step = 512 # The step (in samples) between two consecutive analysis
Zero_padding = 1 # After windowing, the signal will be zero-padded to this value times its length
Min_freq = 0 # Hz. The minimum frequency that will be analyzed
Max_freq = 3000 # Hz. The maximum frequency that will be analyzed
# The following lines will determine the structure of the marsystem
spec_analyzer = ["Series/analysis", ["AudioSource/asrc", "Sum/summation", "ShiftInput/sft", "Windowing/win","Spectrum/spk","PowerSpectrum/pspk"]]
net = marsyas_util.create(spec_analyzer)
snet = marsyas_util.mar_refs(spec_analyzer)
# This is the configuration for the MarSystem
fs = 44100.0
net.updControl("mrs_natural/inSamples", Window_step);
net.updControl("mrs_real/israte", fs);
net.updControl(snet["sft"]+"/mrs_natural/winSize", Window_len);
net.updControl(snet["win"]+"/mrs_natural/zeroPadding", Window_len * (Zero_padding-1));
net.updControl(snet["win"]+"/mrs_string/type", "Hanning"); # "Hamming", "Hanning", "Triangle", "Bartlett", "Blackman"
net.updControl(snet["asrc"]+"/mrs_natural/nChannels", 2);
net.updControl(snet["asrc"]+"/mrs_bool/initAudio", marsyas.MarControlPtr.from_bool(True));
net.updControl(snet["pspk"]+"/mrs_string/spectrumType", "logmagnitude2"); # "power", "magnitude", "decibels", "logmagnitude" (for 1+log(magnitude*1000), "logmagnitude2" (for 1+log10(magnitude)), "powerdensity"
# These variables will avoid having to re-calculate stuff
DFT_SIZE = Window_len * Zero_padding; # This is the size of the DFT
DFT_SIZE_2 = net.getControl(snet["win"]+"/mrs_natural/onSamples").to_natural();
print "Debug parameters"
print DFT_SIZE
print DFT_SIZE_2
freq_bin = fs/DFT_SIZE; # this is the frequency hop for every frequency bin in the DFT
print freq_bin
# This is the size of data that will be shown
visible_time = 10; # Seconds
minK = int(math.floor(Min_freq/freq_bin))
maxK = int(math.ceil(Max_freq/freq_bin))
deltaK = maxK-minK+1
print minK, maxK, deltaK
nTime = int(math.ceil(visible_time*(fs*1.0/Window_step)))
# Allocate memory for the image
Int_Buff = numpy.zeros([deltaK, nTime])
#print deltaK
#print nTime
mat = cv.CreateMat(nTime, deltaK, cv.CV_32FC1)
cv.NamedWindow("Marsyas Spectral Analysis", cv.CV_WINDOW_AUTOSIZE)
try:
while 1:
net.tick()
out = net.getControl("mrs_realvec/processedData").to_realvec()
out = numpy.array(out)
out = out[minK:maxK+1]
out = out [::-1]
if numpy.max(out)>0:
out = out/numpy.max(out)
else:
print numpy.max(out)
if numpy.ndim(out)==1:
out = numpy.array([out])
Int_Buff = Int_Buff[:,1:]
Int_Buff = numpy.hstack([Int_Buff,numpy.transpose(out)])
im = array2cv(Int_Buff)
cv.ShowImage("Marsyas Spectral Analysis", im)
cv.WaitKey(10)
except KeyboardInterrupt:
print "Halted!"
pass
| 36.202247 | 211 | 0.742086 |
6494cb4b4fcaf567086103b670c22d9d50911f2c
| 2,970 |
py
|
Python
|
capnpy/segment/endof.py
|
wridgers/capnpy
|
63546597cc94434a271187f2e5af60f02e086caa
|
[
"MIT"
] | 45 |
2016-10-28T10:16:07.000Z
|
2022-03-06T20:16:57.000Z
|
capnpy/segment/endof.py
|
wridgers/capnpy
|
63546597cc94434a271187f2e5af60f02e086caa
|
[
"MIT"
] | 42 |
2016-12-20T18:10:53.000Z
|
2021-09-08T12:29:04.000Z
|
capnpy/segment/endof.py
|
wridgers/capnpy
|
63546597cc94434a271187f2e5af60f02e086caa
|
[
"MIT"
] | 21 |
2017-02-28T06:39:15.000Z
|
2021-09-07T05:30:46.000Z
|
from capnpy import ptr
def endof(seg, p, offset):
"""
Check whether the given object is compact, and in that case compute its
end boundary. If it's not compact, return -1.
An object is compact if:
1. there is no gap between its data section and its ptrs section
2. there is no gap between children
3. its children are compact
4. there are no FAR pointers
"""
kind = ptr.kind(p)
offset = ptr.deref(p, offset)
if kind == ptr.STRUCT:
data_size = ptr.struct_data_size(p)
ptrs_size = ptr.struct_ptrs_size(p)
return _endof_struct(seg, p, offset, data_size, ptrs_size)
elif kind == ptr.LIST:
item_size = ptr.list_size_tag(p)
count = ptr.list_item_count(p)
if item_size == ptr.LIST_SIZE_COMPOSITE:
tag = seg.read_ptr(offset)
count = ptr.offset(tag)
data_size = ptr.struct_data_size(tag)
ptrs_size = ptr.struct_ptrs_size(tag)
return _endof_list_composite(seg, p, offset,
count, data_size, ptrs_size)
elif item_size == ptr.LIST_SIZE_PTR:
return _endof_list_ptr(seg, p, offset, count)
elif item_size == ptr.LIST_SIZE_BIT:
return _endof_list_bit(seg, p, offset, count)
else:
return _endof_list_primitive(seg, p, offset, item_size, count)
elif kind == ptr.FAR:
return -1
else:
assert False, 'unknown ptr kind'
def _endof_ptrs(seg, offset, ptrs_size, current_end):
i = 0
while i < ptrs_size:
p_offset = offset + i*8
i += 1
p = seg.read_ptr(p_offset)
if not p:
continue
new_start = ptr.deref(p, p_offset)
if new_start != current_end:
return -1
current_end = endof(seg, p, p_offset)
#
return current_end
def _endof_struct(seg, p, offset, data_size, ptrs_size):
offset += data_size*8
current_end = offset + (ptrs_size*8)
return _endof_ptrs(seg, offset, ptrs_size, current_end)
def _endof_list_composite(seg, p, offset, count, data_size, ptrs_size):
item_size = (data_size+ptrs_size)*8
offset += 8 # skip the tag
end = offset + (item_size)*count
if ptrs_size == 0:
return end
#
i = 0
while i < count:
item_offset = offset + (item_size)*i + (data_size*8)
end = _endof_ptrs(seg, item_offset, ptrs_size, end)
if end == -1:
return -1
i += 1
#
return end
def _endof_list_ptr(seg, p, offset, count):
end = offset + 8*count
return _endof_ptrs(seg, offset, count, end)
def _endof_list_primitive(seg, p, offset, item_size, count):
item_size = ptr.list_item_length(item_size)
return ptr.round_up_to_word(offset + item_size*count)
def _endof_list_bit(seg, p, offset, count):
bytes_length = ptr.round_up_to_word(count) // 8
return ptr.round_up_to_word(offset + bytes_length)
| 31.935484 | 75 | 0.621212 |
ac5c80d6213152f5ee79ec204690e9f2b7f7b668
| 19,010 |
py
|
Python
|
src/objects/manager.py
|
Kelketek/evennia
|
cc56a7155f4fb975a6fc9e811bd6eadf3d710243
|
[
"BSD-3-Clause"
] | 5 |
2015-01-30T08:47:59.000Z
|
2022-01-22T19:27:03.000Z
|
src/objects/manager.py
|
Kelketek/evennia
|
cc56a7155f4fb975a6fc9e811bd6eadf3d710243
|
[
"BSD-3-Clause"
] | 2 |
2017-12-28T21:36:48.000Z
|
2017-12-28T21:36:57.000Z
|
src/objects/manager.py
|
Kelketek/evennia
|
cc56a7155f4fb975a6fc9e811bd6eadf3d710243
|
[
"BSD-3-Clause"
] | 1 |
2020-02-21T05:30:58.000Z
|
2020-02-21T05:30:58.000Z
|
"""
Custom manager for Objects.
"""
from itertools import chain
from django.db.models import Q
from django.conf import settings
from django.db.models.fields import exceptions
from src.typeclasses.managers import TypedObjectManager
from src.typeclasses.managers import returns_typeclass, returns_typeclass_list
from src.utils import utils
from src.utils.utils import to_unicode, is_iter, make_iter, string_partial_matching
__all__ = ("ObjectManager",)
_GA = object.__getattribute__
# delayed import
_ATTR = None
# Try to use a custom way to parse id-tagged multimatches.
_AT_MULTIMATCH_INPUT = utils.variable_from_module(*settings.SEARCH_AT_MULTIMATCH_INPUT.rsplit('.', 1))
class ObjectManager(TypedObjectManager):
"""
This ObjectManager implementes methods for searching
and manipulating Objects directly from the database.
Evennia-specific search methods (will return Typeclasses or
lists of Typeclasses, whereas Django-general methods will return
Querysets or database objects).
dbref (converter)
get_id (alias: dbref_search)
get_dbref_range
object_totals
typeclass_search
get_object_with_player
get_objs_with_key_and_typeclass
get_objs_with_attr
get_objs_with_attr_match
get_objs_with_db_property
get_objs_with_db_property_match
get_objs_with_key_or_alias
get_contents
object_search (interface to many of the above methods,
equivalent to ev.search_object)
copy_object
"""
#
# ObjectManager Get methods
#
# player related
@returns_typeclass
def get_object_with_player(self, ostring, exact=True, candidates=None):
"""
Search for an object based on its player's name or dbref.
This search
is sometimes initiated by appending a * to the beginning of
the search criterion (e.g. in local_and_global_search).
search_string: (string) The name or dbref to search for.
"""
ostring = to_unicode(ostring).lstrip('*')
# simplest case - search by dbref
dbref = self.dbref(ostring)
if dbref:
return dbref
# not a dbref. Search by name.
cand_restriction = candidates != None and Q(pk__in=[_GA(obj, "id") for obj in make_iter(candidates) if obj]) or Q()
if exact:
return self.filter(cand_restriction & Q(db_player__username__iexact=ostring))
else: # fuzzy matching
ply_cands = self.filter(cand_restriction & Q(playerdb__username__istartswith=ostring)).values_list("db_key", flat=True)
if candidates:
index_matches = string_partial_matching(ply_cands, ostring, ret_index=True)
return [obj for ind, obj in enumerate(make_iter(candidates)) if ind in index_matches]
else:
return string_partial_matching(ply_cands, ostring, ret_index=False)
@returns_typeclass_list
def get_objs_with_key_and_typeclass(self, oname, otypeclass_path, candidates=None):
"""
Returns objects based on simultaneous key and typeclass match.
"""
cand_restriction = candidates != None and Q(pk__in=[_GA(obj, "id") for obj in make_iter(candidates) if obj]) or Q()
return self.filter(cand_restriction & Q(db_key__iexact=oname, db_typeclass_path__exact=otypeclass_path))
# attr/property related
@returns_typeclass_list
def get_objs_with_attr(self, attribute_name, candidates=None):
"""
Returns all objects having the given attribute_name defined at all.
Location should be a valid location object.
"""
cand_restriction = candidates != None and Q(db_attributes__db_obj__pk__in=[_GA(obj, "id") for obj in make_iter(candidates) if obj]) or Q()
return list(self.filter(cand_restriction & Q(db_attributes__db_key=attribute_name)))
@returns_typeclass_list
def get_objs_with_attr_value(self, attribute_name, attribute_value, candidates=None, typeclasses=None):
"""
Returns all objects having the valid attrname set to the given value.
candidates - list of candidate objects to search
typeclasses - list of typeclass-path strings to restrict matches with
This uses the Attribute's PickledField to transparently search the database by matching
the internal representation. This is reasonably effective but since Attribute values
cannot be indexed, searching by Attribute key is to be preferred whenever possible.
"""
cand_restriction = candidates != None and Q(pk__in=[_GA(obj, "id") for obj in make_iter(candidates) if obj]) or Q()
type_restriction = typeclasses and Q(db_typeclass_path__in=make_iter(typeclasses)) or Q()
## This doesn't work if attribute_value is an object. Workaround below
if isinstance(attribute_value, (basestring, int, float, bool, long)):
return self.filter(cand_restriction & type_restriction & Q(db_attributes__db_key=attribute_name, db_attributes__db_value=attribute_value))
else:
# We have to loop for safety since the referenced lookup gives deepcopy error if attribute value is an object.
global _ATTR
if not _ATTR:
from src.typeclasses.models import Attribute as _ATTR
cands = list(self.filter(cand_restriction & type_restriction & Q(db_attributes__db_key=attribute_name)))
results = [attr.objectdb_set.all() for attr in _ATTR.objects.filter(objectdb__in=cands, db_value=attribute_value)]
return chain(*results)
@returns_typeclass_list
def get_objs_with_db_property(self, property_name, candidates=None):
"""
Returns all objects having a given db field property.
property_name = search string
candidates - list of candidate objects to search
"""
property_name = "db_%s" % property_name.lstrip('db_')
cand_restriction = candidates != None and Q(pk__in=[_GA(obj, "id") for obj in make_iter(candidates) if obj]) or Q()
querykwargs = {property_name:None}
try:
return list(self.filter(cand_restriction).exclude(Q(**querykwargs)))
except exceptions.FieldError:
return []
@returns_typeclass_list
def get_objs_with_db_property_value(self, property_name, property_value, candidates=None, typeclasses=None):
"""
Returns all objects having a given db field property.
candidates - list of objects to search
typeclasses - list of typeclass-path strings to restrict matches with
"""
if isinstance(property_value, basestring):
property_value = to_unicode(property_value)
if isinstance(property_name, basestring):
if not property_name.startswith('db_'):
property_name = "db_%s" % property_name
if hasattr(property_value, 'dbobj'):
property_value = property_value.dbobj
querykwargs = {property_name:property_value}
cand_restriction = candidates != None and Q(pk__in=[_GA(obj, "id") for obj in make_iter(candidates) if obj]) or Q()
type_restriction = typeclasses and Q(db_typeclass_path__in=make_iter(typeclasses)) or Q()
try:
return list(self.filter(cand_restriction & type_restriction & Q(**querykwargs)))
except exceptions.FieldError:
return []
except ValueError:
from src.utils import logger
logger.log_errmsg("The property '%s' does not support search criteria of the type %s." % (property_name, type(property_value)))
return []
@returns_typeclass_list
def get_contents(self, location, excludeobj=None):
"""
Get all objects that has a location
set to this one.
excludeobj - one or more object keys to exclude from the match
"""
exclude_restriction = Q(pk__in=[_GA(obj, "id") for obj in make_iter(excludeobj)]) if excludeobj else Q()
return self.filter(db_location=location).exclude(exclude_restriction)
@returns_typeclass_list
def get_objs_with_key_or_alias(self, ostring, exact=True,
candidates=None, typeclasses=None):
"""
Returns objects based on key or alias match. Will also do fuzzy
matching based on the utils.string_partial_matching function.
candidates - list of candidate objects to restrict on
typeclasses - list of typeclass path strings to restrict on
"""
if not isinstance(ostring, basestring):
if hasattr(ostring, "key"):
ostring = ostring.key
else:
return []
if is_iter(candidates) and not len(candidates):
# if candidates is an empty iterable there can be no matches
# Exit early.
return []
# build query objects
candidates_id = [_GA(obj, "id") for obj in make_iter(candidates) if obj]
cand_restriction = candidates != None and Q(pk__in=make_iter(candidates_id)) or Q()
type_restriction = typeclasses and Q(db_typeclass_path__in=make_iter(typeclasses)) or Q()
if exact:
# exact match - do direct search
return self.filter(cand_restriction & type_restriction & (Q(db_key__iexact=ostring) |
Q(db_tags__db_key__iexact=ostring) & Q(db_tags__db_tagtype__iexact="alias"))).distinct()
elif candidates:
# fuzzy with candidates
key_candidates = self.filter(cand_restriction & type_restriction)
else:
# fuzzy without supplied candidates - we select our own candidates
key_candidates = self.filter(type_restriction & (Q(db_key__istartswith=ostring) | Q(db_tags__db_key__istartswith=ostring))).distinct()
candidates_id = [_GA(obj, "id") for obj in key_candidates]
# fuzzy matching
key_strings = key_candidates.values_list("db_key", flat=True).order_by("id")
index_matches = string_partial_matching(key_strings, ostring, ret_index=True)
if index_matches:
return [obj for ind, obj in enumerate(key_candidates) if ind in index_matches]
else:
alias_candidates = self.filter(id__in=candidates_id, db_tags__db_tagtype__iexact="alias")
alias_strings = alias_candidates.values_list("db_key", flat=True)
index_matches = string_partial_matching(alias_strings, ostring, ret_index=True)
if index_matches:
return [alias.db_obj for ind, alias in enumerate(alias_candidates) if ind in index_matches]
return []
# main search methods and helper functions
@returns_typeclass_list
def object_search(self, searchdata,
attribute_name=None,
typeclass=None,
candidates=None,
exact=True):
"""
Search as an object globally or in a list of candidates and return
results. The result is always an Object. Always returns a list.
Arguments:
searchdata: (str or obj) The entity to match for. This is usually a
key string but may also be an object itself. By default (if
not attribute_name is set), this will search object.key and
object.aliases in order. Can also be on the form #dbref,
which will, if exact=True be matched against primary key.
attribute_name: (str): Use this named ObjectAttribute to match
searchdata against, instead of the defaults. If this is
the name of a database field (with or without the db_ prefix),
that will be matched too.
typeclass (str or TypeClass): restrict matches to objects having this
typeclass. This will help speed up global searches.
candidates (list obj ObjectDBs): If supplied, search will only be
performed among the candidates in this list. A common list
of candidates is the contents of the current location
searched.
exact (bool): Match names/aliases exactly or partially. Partial
matching matches the beginning of words in the names/aliases,
using a matching routine to separate multiple matches in
names with multiple components (so "bi sw" will match
"Big sword"). Since this is more expensive than exact
matching, it is recommended to be used together with the
objlist keyword to limit the number of possibilities. This
value has no meaning if searching for attributes/properties.
Returns:
A list of matching objects (or a list with one unique match)
"""
def _searcher(searchdata, candidates, typeclass, exact=False):
"""
Helper method for searching objects. typeclass is only used
for global searching (no candidates)
"""
if attribute_name:
# attribute/property search (always exact).
matches = self.get_objs_with_db_property_value(attribute_name, searchdata, candidates=candidates, typeclasses=typeclass)
if matches:
return matches
return self.get_objs_with_attr_value(attribute_name, searchdata, candidates=candidates, typeclasses=typeclass)
else:
# normal key/alias search
return self.get_objs_with_key_or_alias(searchdata, exact=exact, candidates=candidates, typeclasses=typeclass)
if not searchdata and searchdata != 0:
return []
if typeclass:
# typeclass may also be a list
typeclasses = make_iter(typeclass)
for i, typeclass in enumerate(make_iter(typeclasses)):
if callable(typeclass):
typeclasses[i] = u"%s.%s" % (typeclass.__module__, typeclass.__name__)
else:
typeclasses[i] = u"%s" % typeclass
typeclass = typeclasses
if candidates:
# Convenience check to make sure candidates are really dbobjs
candidates = [cand.dbobj for cand in make_iter(candidates) if cand]
if typeclass:
candidates = [cand for cand in candidates
if _GA(cand, "db_typeclass_path") in typeclass]
dbref = not attribute_name and exact and self.dbref(searchdata)
if dbref is not None:
# Easiest case - dbref matching (always exact)
dbref_match = self.dbref_search(dbref)
if dbref_match:
if not candidates or dbref_match.dbobj in candidates:
return [dbref_match]
else:
return []
# Search through all possibilities.
match_number = None
# always run first check exact - we don't want partial matches
# if on the form of 1-keyword etc.
matches = _searcher(searchdata, candidates, typeclass, exact=True)
if not matches:
# no matches found - check if we are dealing with N-keyword
# query - if so, strip it.
match_number, searchdata = _AT_MULTIMATCH_INPUT(searchdata)
# run search again, with the exactness set by call
if match_number is not None or not exact:
matches = _searcher(searchdata, candidates, typeclass, exact=exact)
# deal with result
if len(matches) > 1 and match_number is not None:
# multiple matches, but a number was given to separate them
try:
matches = [matches[match_number]]
except IndexError:
pass
# return a list (possibly empty)
return matches
#
# ObjectManager Copy method
#
def copy_object(self, original_object, new_key=None,
new_location=None, new_home=None,
new_permissions=None, new_locks=None,
new_aliases=None, new_destination=None):
"""
Create and return a new object as a copy of the original object. All
will be identical to the original except for the arguments given
specifically to this method.
original_object (obj) - the object to make a copy from
new_key (str) - name the copy differently from the original.
new_location (obj) - if not None, change the location
new_home (obj) - if not None, change the Home
new_aliases (list of strings) - if not None, change object aliases.
new_destination (obj) - if not None, change destination
"""
# get all the object's stats
typeclass_path = original_object.typeclass_path
if not new_key:
new_key = original_object.key
if not new_location:
new_location = original_object.location
if not new_home:
new_home = original_object.home
if not new_aliases:
new_aliases = original_object.aliases.all()
if not new_locks:
new_locks = original_object.db_lock_storage
if not new_permissions:
new_permissions = original_object.permissions.all()
if not new_destination:
new_destination = original_object.destination
# create new object
from src.utils import create
from src.scripts.models import ScriptDB
new_object = create.create_object(typeclass_path,
key=new_key,
location=new_location,
home=new_home,
permissions=new_permissions,
locks=new_locks,
aliases=new_aliases,
destination=new_destination)
if not new_object:
return None
# copy over all attributes from old to new.
for attr in original_object.attributes.all():
new_object.attributes.add(attr.key, attr.value)
# copy over all cmdsets, if any
for icmdset, cmdset in enumerate(original_object.cmdset.all()):
if icmdset == 0:
new_object.cmdset.add_default(cmdset)
else:
new_object.cmdset.add(cmdset)
# copy over all scripts, if any
for script in original_object.scripts.all():
ScriptDB.objects.copy_script(script, new_obj=new_object.dbobj)
return new_object
def clear_all_sessids(self):
"""
Clear the db_sessid field of all objects having also the db_player field
set.
"""
self.filter(db_sessid__isnull=False).update(db_sessid=None)
| 45.697115 | 150 | 0.642557 |
e86e82cf929b8638441aa474c897be764a4e2e7b
| 1,594 |
py
|
Python
|
tests/providers/amazon/aws/operators/test_glacier.py
|
ChaseKnowlden/airflow
|
6b71eac1997a7c0db3b8e3aed6b4e65d01871440
|
[
"Apache-2.0"
] | 15,947 |
2019-01-05T13:51:02.000Z
|
2022-03-31T23:33:16.000Z
|
tests/providers/amazon/aws/operators/test_glacier.py
|
ChaseKnowlden/airflow
|
6b71eac1997a7c0db3b8e3aed6b4e65d01871440
|
[
"Apache-2.0"
] | 14,603 |
2019-01-05T09:43:19.000Z
|
2022-03-31T23:11:59.000Z
|
tests/providers/amazon/aws/operators/test_glacier.py
|
ChaseKnowlden/airflow
|
6b71eac1997a7c0db3b8e3aed6b4e65d01871440
|
[
"Apache-2.0"
] | 8,429 |
2019-01-05T19:45:47.000Z
|
2022-03-31T22:13:01.000Z
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from unittest import TestCase, mock
from airflow.providers.amazon.aws.operators.glacier import GlacierCreateJobOperator
AWS_CONN_ID = "aws_default"
BUCKET_NAME = "airflow_bucket"
FILENAME = "path/to/file/"
GCP_CONN_ID = "google_cloud_default"
JOB_ID = "1a2b3c4d"
OBJECT_NAME = "file.csv"
TASK_ID = "glacier_job"
VAULT_NAME = "airflow"
class TestGlacierCreateJobOperator(TestCase):
@mock.patch("airflow.providers.amazon.aws.operators.glacier.GlacierHook")
def test_execute(self, hook_mock):
op = GlacierCreateJobOperator(aws_conn_id=AWS_CONN_ID, vault_name=VAULT_NAME, task_id=TASK_ID)
op.execute(mock.MagicMock())
hook_mock.assert_called_once_with(aws_conn_id=AWS_CONN_ID)
hook_mock.return_value.retrieve_inventory.assert_called_once_with(vault_name=VAULT_NAME)
| 39.85 | 102 | 0.781054 |
1806418992b4824854395e056f247f2e5e250466
| 23 |
py
|
Python
|
bpipe2/__init__.py
|
mavnt/bpipe2
|
2fd02aeb1b4d99c6927d1eb70a7e83b4868f6d79
|
[
"MIT"
] | null | null | null |
bpipe2/__init__.py
|
mavnt/bpipe2
|
2fd02aeb1b4d99c6927d1eb70a7e83b4868f6d79
|
[
"MIT"
] | null | null | null |
bpipe2/__init__.py
|
mavnt/bpipe2
|
2fd02aeb1b4d99c6927d1eb70a7e83b4868f6d79
|
[
"MIT"
] | null | null | null |
from .bpipe2 import *
| 11.5 | 22 | 0.695652 |
b8a9adfaa7eb212fe58552d6b0a9183fc7b02de2
| 1,060 |
py
|
Python
|
cairis/core/EnvironmentSingleton.py
|
RachelLar/cairis_update
|
0b1d6d17ce49bc74887d1684e28c53c1b06e2fa2
|
[
"Apache-2.0"
] | null | null | null |
cairis/core/EnvironmentSingleton.py
|
RachelLar/cairis_update
|
0b1d6d17ce49bc74887d1684e28c53c1b06e2fa2
|
[
"Apache-2.0"
] | null | null | null |
cairis/core/EnvironmentSingleton.py
|
RachelLar/cairis_update
|
0b1d6d17ce49bc74887d1684e28c53c1b06e2fa2
|
[
"Apache-2.0"
] | null | null | null |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from Borg import Borg
class EnvironmentSingleton(Borg):
environmentId = None
def __init__(self,cId = None):
if cId is not None: self.environmentId = cId
def __str__(self): return str(self.environmentId)
def __int__(self): return self.environmentId
| 39.259259 | 63 | 0.758491 |
62ad51e24d216d8578cea03d93c6927d2f8957da
| 28,712 |
py
|
Python
|
tensorflow_model.py
|
YangAzure/code2vec-tf
|
8fb4c508aeb466e7ae189650057a97dc50b477c6
|
[
"MIT"
] | null | null | null |
tensorflow_model.py
|
YangAzure/code2vec-tf
|
8fb4c508aeb466e7ae189650057a97dc50b477c6
|
[
"MIT"
] | null | null | null |
tensorflow_model.py
|
YangAzure/code2vec-tf
|
8fb4c508aeb466e7ae189650057a97dc50b477c6
|
[
"MIT"
] | null | null | null |
import tensorflow as tf
import numpy as np
import time
from typing import Dict, Optional, List, Iterable
from collections import Counter
from functools import partial
from path_context_reader import PathContextReader, ModelInputTensorsFormer, ReaderInputTensors, EstimatorAction
from common import common
from vocabularies import VocabType
from config import Config
from model_base import Code2VecModelBase, ModelEvaluationResults, ModelPredictionResults
tf.compat.v1.disable_eager_execution()
class Code2VecModel(Code2VecModelBase):
def __init__(self, config: Config):
self.sess = tf.compat.v1.Session()
self.saver = None
self.eval_reader = None
self.eval_input_iterator_reset_op = None
self.predict_reader = None
# self.eval_placeholder = None
self.predict_placeholder = None
self.eval_top_words_op, self.eval_top_values_op, self.eval_original_names_op, self.eval_code_vectors = None, None, None, None
self.predict_top_words_op, self.predict_top_values_op, self.predict_original_names_op = None, None, None
self.vocab_type_to_tf_variable_name_mapping: Dict[VocabType, str] = {
VocabType.Token: 'WORDS_VOCAB',
VocabType.Target: 'TARGET_WORDS_VOCAB',
VocabType.Path: 'PATHS_VOCAB'
}
super(Code2VecModel, self).__init__(config)
def train(self):
self.log('Starting training')
start_time = time.time()
batch_num = 0
sum_loss = 0
multi_batch_start_time = time.time()
num_batches_to_save_and_eval = max(int(self.config.train_steps_per_epoch * self.config.SAVE_EVERY_EPOCHS), 1)
train_reader = PathContextReader(vocabs=self.vocabs,
model_input_tensors_former=_TFTrainModelInputTensorsFormer(),
config=self.config, estimator_action=EstimatorAction.Train)
input_iterator = tf.compat.v1.data.make_initializable_iterator(train_reader.get_dataset())
input_iterator_reset_op = input_iterator.initializer
input_tensors = input_iterator.get_next()
optimizer, train_loss = self._build_tf_training_graph(input_tensors)
self.saver = tf.compat.v1.train.Saver(max_to_keep=self.config.MAX_TO_KEEP)
self.log('Number of trainable params: {}'.format(
np.sum([np.prod(v.get_shape().as_list()) for v in tf.compat.v1.trainable_variables()])))
for variable in tf.compat.v1.trainable_variables():
self.log("variable name: {} -- shape: {} -- #params: {}".format(
variable.name, variable.get_shape(), np.prod(variable.get_shape().as_list())))
self._initialize_session_variables()
if self.config.MODEL_LOAD_PATH:
self._load_inner_model(self.sess)
self.sess.run(input_iterator_reset_op)
time.sleep(1)
self.log('Started reader...')
# run evaluation in a loop until iterator is exhausted.
try:
while True:
# Each iteration = batch. We iterate as long as the tf iterator (reader) yields batches.
batch_num += 1
# Actual training for the current batch.
_, batch_loss = self.sess.run([optimizer, train_loss])
sum_loss += batch_loss
if batch_num % self.config.NUM_BATCHES_TO_LOG_PROGRESS == 0:
self._trace_training(sum_loss, batch_num, multi_batch_start_time)
# Uri: the "shuffle_batch/random_shuffle_queue_Size:0" op does not exist since the migration to the new reader.
# self.log('Number of waiting examples in queue: %d' % self.sess.run(
# "shuffle_batch/random_shuffle_queue_Size:0"))
sum_loss = 0
multi_batch_start_time = time.time()
if batch_num % num_batches_to_save_and_eval == 0:
epoch_num = int((batch_num / num_batches_to_save_and_eval) * self.config.SAVE_EVERY_EPOCHS)
save_path = self.config.MODEL_SAVE_PATH + '_iter' + str(epoch_num)
self._save_inner_model(save_path)
self.log('Saved after %d epochs in: %s' % (epoch_num, save_path))
evaluation_results = self.evaluate()
evaluation_results_str = (str(evaluation_results).replace('topk', 'top{}'.format(
self.config.TOP_K_WORDS_CONSIDERED_DURING_PREDICTION)))
self.log('After {nr_epochs} epochs -- {evaluation_results}'.format(
nr_epochs=epoch_num,
evaluation_results=evaluation_results_str
))
except tf.errors.OutOfRangeError:
pass # The reader iterator is exhausted and have no more batches to produce.
self.log('Done training')
if self.config.MODEL_SAVE_PATH:
self._save_inner_model(self.config.MODEL_SAVE_PATH)
self.log('Model saved in file: %s' % self.config.MODEL_SAVE_PATH)
elapsed = int(time.time() - start_time)
self.log("Training time: %sH:%sM:%sS\n" % ((elapsed // 60 // 60), (elapsed // 60) % 60, elapsed % 60))
def evaluate(self) -> Optional[ModelEvaluationResults]:
eval_start_time = time.time()
if self.eval_reader is None:
self.eval_reader = PathContextReader(vocabs=self.vocabs,
model_input_tensors_former=_TFEvaluateModelInputTensorsFormer(),
config=self.config, estimator_action=EstimatorAction.Evaluate)
input_iterator = tf.compat.v1.data.make_initializable_iterator(self.eval_reader.get_dataset())
self.eval_input_iterator_reset_op = input_iterator.initializer
input_tensors = input_iterator.get_next()
self.eval_top_words_op, self.eval_top_values_op, self.eval_original_names_op, _, _, _, _, \
self.eval_code_vectors = self._build_tf_test_graph(input_tensors)
self.saver = tf.compat.v1.train.Saver()
if self.config.MODEL_LOAD_PATH and not self.config.TRAIN_DATA_PATH_PREFIX:
self._initialize_session_variables()
self._load_inner_model(self.sess)
if self.config.RELEASE:
release_name = self.config.MODEL_LOAD_PATH + '.release'
self.log('Releasing model, output model: %s' % release_name)
self.saver.save(self.sess, release_name)
return None # FIXME: why do we return none here?
with open('log.txt', 'w') as log_output_file:
if self.config.EXPORT_CODE_VECTORS:
code_vectors_file = open(self.config.TEST_DATA_PATH + '.vectors', 'w')
total_predictions = 0
total_prediction_batches = 0
subtokens_evaluation_metric = SubtokensEvaluationMetric(
partial(common.filter_impossible_names, self.vocabs.target_vocab.special_words))
topk_accuracy_evaluation_metric = TopKAccuracyEvaluationMetric(
self.config.TOP_K_WORDS_CONSIDERED_DURING_PREDICTION,
partial(common.get_first_match_word_from_top_predictions, self.vocabs.target_vocab.special_words))
start_time = time.time()
self.sess.run(self.eval_input_iterator_reset_op)
self.log('Starting evaluation')
# Run evaluation in a loop until iterator is exhausted.
# Each iteration = batch. We iterate as long as the tf iterator (reader) yields batches.
try:
while True:
top_words, top_scores, original_names, code_vectors = self.sess.run(
[self.eval_top_words_op, self.eval_top_values_op,
self.eval_original_names_op, self.eval_code_vectors],
)
# shapes:
# top_words: (batch, top_k); top_scores: (batch, top_k)
# original_names: (batch, ); code_vectors: (batch, code_vector_size)
top_words = common.binary_to_string_matrix(top_words) # (batch, top_k)
original_names = common.binary_to_string_list(original_names) # (batch,)
self._log_predictions_during_evaluation(zip(original_names, top_words), log_output_file)
topk_accuracy_evaluation_metric.update_batch(zip(original_names, top_words))
subtokens_evaluation_metric.update_batch(zip(original_names, top_words))
total_predictions += len(original_names)
total_prediction_batches += 1
if self.config.EXPORT_CODE_VECTORS:
self._write_code_vectors(code_vectors_file, code_vectors)
if total_prediction_batches % self.config.NUM_BATCHES_TO_LOG_PROGRESS == 0:
elapsed = time.time() - start_time
# start_time = time.time()
self._trace_evaluation(total_predictions, elapsed)
except tf.errors.OutOfRangeError:
pass # reader iterator is exhausted and have no more batches to produce.
self.log('Done evaluating, epoch reached')
log_output_file.write(str(topk_accuracy_evaluation_metric.topk_correct_predictions) + '\n')
if self.config.EXPORT_CODE_VECTORS:
code_vectors_file.close()
elapsed = int(time.time() - eval_start_time)
self.log("Evaluation time: %sH:%sM:%sS" % ((elapsed // 60 // 60), (elapsed // 60) % 60, elapsed % 60))
return ModelEvaluationResults(
topk_acc=topk_accuracy_evaluation_metric.topk_correct_predictions,
subtoken_precision=subtokens_evaluation_metric.precision,
subtoken_recall=subtokens_evaluation_metric.recall,
subtoken_f1=subtokens_evaluation_metric.f1)
def _build_tf_training_graph(self, input_tensors):
# Use `_TFTrainModelInputTensorsFormer` to access input tensors by name.
input_tensors = _TFTrainModelInputTensorsFormer().from_model_input_form(input_tensors)
# shape of (batch, 1) for input_tensors.target_index
# shape of (batch, max_contexts) for others:
# input_tensors.path_source_token_indices, input_tensors.path_indices,
# input_tensors.path_target_token_indices, input_tensors.context_valid_mask
with tf.compat.v1.variable_scope('model'):
tokens_vocab = tf.compat.v1.get_variable(
self.vocab_type_to_tf_variable_name_mapping[VocabType.Token],
shape=(self.vocabs.token_vocab.size, self.config.TOKEN_EMBEDDINGS_SIZE), dtype=tf.float32,
initializer=tf.compat.v1.initializers.variance_scaling(scale=1.0, mode='fan_out', distribution="uniform"))
targets_vocab = tf.compat.v1.get_variable(
self.vocab_type_to_tf_variable_name_mapping[VocabType.Target],
shape=(self.vocabs.target_vocab.size, self.config.TARGET_EMBEDDINGS_SIZE), dtype=tf.float32,
initializer=tf.compat.v1.initializers.variance_scaling(scale=1.0, mode='fan_out', distribution="uniform"))
attention_param = tf.compat.v1.get_variable(
'ATTENTION',
shape=(self.config.CODE_VECTOR_SIZE, 1), dtype=tf.float32)
paths_vocab = tf.compat.v1.get_variable(
self.vocab_type_to_tf_variable_name_mapping[VocabType.Path],
shape=(self.vocabs.path_vocab.size, self.config.PATH_EMBEDDINGS_SIZE), dtype=tf.float32,
initializer=tf.compat.v1.initializers.variance_scaling(scale=1.0, mode='fan_out', distribution="uniform"))
code_vectors, _ = self._calculate_weighted_contexts(
tokens_vocab, paths_vocab, attention_param, input_tensors.path_source_token_indices,
input_tensors.path_indices, input_tensors.path_target_token_indices, input_tensors.context_valid_mask)
logits = tf.matmul(code_vectors, targets_vocab, transpose_b=True)
batch_size = tf.cast(tf.shape(input_tensors.target_index)[0], dtype=tf.float32)
loss = tf.reduce_sum(tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=tf.reshape(input_tensors.target_index, [-1]),
logits=logits)) / batch_size
optimizer = tf.compat.v1.train.AdamOptimizer().minimize(loss)
return optimizer, loss
def _calculate_weighted_contexts(self, tokens_vocab, paths_vocab, attention_param, source_input, path_input,
target_input, valid_mask, is_evaluating=False):
source_word_embed = tf.nn.embedding_lookup(params=tokens_vocab, ids=source_input) # (batch, max_contexts, dim)
path_embed = tf.nn.embedding_lookup(params=paths_vocab, ids=path_input) # (batch, max_contexts, dim)
target_word_embed = tf.nn.embedding_lookup(params=tokens_vocab, ids=target_input) # (batch, max_contexts, dim)
context_embed = tf.concat([source_word_embed, path_embed, target_word_embed],
axis=-1) # (batch, max_contexts, dim * 3)
if not is_evaluating:
context_embed = tf.nn.dropout(context_embed, rate=1-self.config.DROPOUT_KEEP_RATE)
flat_embed = tf.reshape(context_embed, [-1, self.config.context_vector_size]) # (batch * max_contexts, dim * 3)
transform_param = tf.compat.v1.get_variable(
'TRANSFORM', shape=(self.config.context_vector_size, self.config.CODE_VECTOR_SIZE), dtype=tf.float32)
flat_embed = tf.tanh(tf.matmul(flat_embed, transform_param)) # (batch * max_contexts, dim * 3)
contexts_weights = tf.matmul(flat_embed, attention_param) # (batch * max_contexts, 1)
batched_contexts_weights = tf.reshape(
contexts_weights, [-1, self.config.MAX_CONTEXTS, 1]) # (batch, max_contexts, 1)
mask = tf.math.log(valid_mask) # (batch, max_contexts)
mask = tf.expand_dims(mask, axis=2) # (batch, max_contexts, 1)
batched_contexts_weights += mask # (batch, max_contexts, 1)
attention_weights = tf.nn.softmax(batched_contexts_weights, axis=1) # (batch, max_contexts, 1)
batched_embed = tf.reshape(flat_embed, shape=[-1, self.config.MAX_CONTEXTS, self.config.CODE_VECTOR_SIZE])
code_vectors = tf.reduce_sum(tf.multiply(batched_embed, attention_weights), axis=1) # (batch, dim * 3)
return code_vectors, attention_weights
def _build_tf_test_graph(self, input_tensors, normalize_scores=False):
with tf.compat.v1.variable_scope('model', reuse=self.get_should_reuse_variables()):
tokens_vocab = tf.compat.v1.get_variable(
self.vocab_type_to_tf_variable_name_mapping[VocabType.Token],
shape=(self.vocabs.token_vocab.size, self.config.TOKEN_EMBEDDINGS_SIZE),
dtype=tf.float32, trainable=False)
targets_vocab = tf.compat.v1.get_variable(
self.vocab_type_to_tf_variable_name_mapping[VocabType.Target],
shape=(self.vocabs.target_vocab.size, self.config.TARGET_EMBEDDINGS_SIZE),
dtype=tf.float32, trainable=False)
attention_param = tf.compat.v1.get_variable(
'ATTENTION', shape=(self.config.context_vector_size, 1),
dtype=tf.float32, trainable=False)
paths_vocab = tf.compat.v1.get_variable(
self.vocab_type_to_tf_variable_name_mapping[VocabType.Path],
shape=(self.vocabs.path_vocab.size, self.config.PATH_EMBEDDINGS_SIZE),
dtype=tf.float32, trainable=False)
targets_vocab = tf.transpose(targets_vocab) # (dim * 3, target_word_vocab)
# Use `_TFEvaluateModelInputTensorsFormer` to access input tensors by name.
input_tensors = _TFEvaluateModelInputTensorsFormer().from_model_input_form(input_tensors)
# shape of (batch, 1) for input_tensors.target_string
# shape of (batch, max_contexts) for the other tensors
code_vectors, attention_weights = self._calculate_weighted_contexts(
tokens_vocab, paths_vocab, attention_param, input_tensors.path_source_token_indices,
input_tensors.path_indices, input_tensors.path_target_token_indices,
input_tensors.context_valid_mask, is_evaluating=True)
scores = tf.matmul(code_vectors, targets_vocab) # (batch, target_word_vocab)
topk_candidates = tf.nn.top_k(scores, k=tf.minimum(
self.config.TOP_K_WORDS_CONSIDERED_DURING_PREDICTION, self.vocabs.target_vocab.size))
top_indices = topk_candidates.indices
top_words = self.vocabs.target_vocab.lookup_word(top_indices)
original_words = input_tensors.target_string
top_scores = topk_candidates.values
if normalize_scores:
top_scores = tf.nn.softmax(top_scores)
return top_words, top_scores, original_words, attention_weights, input_tensors.path_source_token_strings, \
input_tensors.path_strings, input_tensors.path_target_token_strings, code_vectors
def predict(self, predict_data_lines: Iterable[str]) -> List[ModelPredictionResults]:
if self.predict_reader is None:
self.predict_reader = PathContextReader(vocabs=self.vocabs,
model_input_tensors_former=_TFEvaluateModelInputTensorsFormer(),
config=self.config, estimator_action=EstimatorAction.Predict)
self.predict_placeholder = tf.compat.v1.placeholder(tf.string)
reader_output = self.predict_reader.process_input_row(self.predict_placeholder)
self.predict_top_words_op, self.predict_top_values_op, self.predict_original_names_op, \
self.attention_weights_op, self.predict_source_string, self.predict_path_string, \
self.predict_path_target_string, self.predict_code_vectors = \
self._build_tf_test_graph(reader_output, normalize_scores=True)
self._initialize_session_variables()
self.saver = tf.compat.v1.train.Saver()
self._load_inner_model(sess=self.sess)
prediction_results: List[ModelPredictionResults] = []
for line in predict_data_lines:
batch_top_words, batch_top_scores, batch_original_name, batch_attention_weights, batch_path_source_strings,\
batch_path_strings, batch_path_target_strings, batch_code_vectors = self.sess.run(
[self.predict_top_words_op, self.predict_top_values_op, self.predict_original_names_op,
self.attention_weights_op, self.predict_source_string, self.predict_path_string,
self.predict_path_target_string, self.predict_code_vectors],
feed_dict={self.predict_placeholder: line})
# shapes:
# batch_top_words, top_scores: (batch, top_k)
# batch_original_name: (batch, )
# batch_attention_weights: (batch, max_context, 1)
# batch_path_source_strings, batch_path_strings, batch_path_target_strings: (batch, max_context)
# batch_code_vectors: (batch, code_vector_size)
# remove first axis: (batch=1, ...)
assert all(tensor.shape[0] == 1 for tensor in (batch_top_words, batch_top_scores, batch_original_name,
batch_attention_weights, batch_path_source_strings,
batch_path_strings, batch_path_target_strings,
batch_code_vectors))
top_words = np.squeeze(batch_top_words, axis=0)
top_scores = np.squeeze(batch_top_scores, axis=0)
original_name = batch_original_name[0]
attention_weights = np.squeeze(batch_attention_weights, axis=0)
path_source_strings = np.squeeze(batch_path_source_strings, axis=0)
path_strings = np.squeeze(batch_path_strings, axis=0)
path_target_strings = np.squeeze(batch_path_target_strings, axis=0)
code_vectors = np.squeeze(batch_code_vectors, axis=0)
top_words = common.binary_to_string_list(top_words)
original_name = common.binary_to_string(original_name)
attention_per_context = self._get_attention_weight_per_context(
path_source_strings, path_strings, path_target_strings, attention_weights)
prediction_results.append(ModelPredictionResults(
original_name=original_name,
topk_predicted_words=top_words,
topk_predicted_words_scores=top_scores,
attention_per_context=attention_per_context,
code_vector=(code_vectors if self.config.EXPORT_CODE_VECTORS else None)
))
return prediction_results
def _save_inner_model(self, path: str):
self.saver.save(self.sess, path)
def _load_inner_model(self, sess=None):
if sess is not None:
self.log('Loading model weights from: ' + self.config.MODEL_LOAD_PATH)
self.saver.restore(sess, self.config.MODEL_LOAD_PATH)
self.log('Done loading model weights')
def _get_vocab_embedding_as_np_array(self, vocab_type: VocabType) -> np.ndarray:
assert vocab_type in VocabType
vocab_tf_variable_name = self.vocab_type_to_tf_variable_name_mapping[vocab_type]
with tf.compat.v1.variable_scope('model', reuse=None):
embeddings = tf.compat.v1.get_variable(vocab_tf_variable_name)
self.saver = tf.compat.v1.train.Saver()
self._load_inner_model(self.sess)
vocab_embedding_matrix = self.sess.run(embeddings)
return vocab_embedding_matrix
def get_should_reuse_variables(self):
if self.config.TRAIN_DATA_PATH_PREFIX:
return True
else:
return None
def _log_predictions_during_evaluation(self, results, output_file):
for original_name, top_predicted_words in results:
found_match = common.get_first_match_word_from_top_predictions(
self.vocabs.target_vocab.special_words, original_name, top_predicted_words)
if found_match is not None:
prediction_idx, predicted_word = found_match
if prediction_idx == 0:
output_file.write('Original: ' + original_name + ', predicted 1st: ' + predicted_word + '\n')
else:
output_file.write('\t\t predicted correctly at rank: ' + str(prediction_idx + 1) + '\n')
else:
output_file.write('No results for predicting: ' + original_name)
def _trace_training(self, sum_loss, batch_num, multi_batch_start_time):
multi_batch_elapsed = time.time() - multi_batch_start_time
avg_loss = sum_loss / (self.config.NUM_BATCHES_TO_LOG_PROGRESS * self.config.TRAIN_BATCH_SIZE)
throughput = self.config.TRAIN_BATCH_SIZE * self.config.NUM_BATCHES_TO_LOG_PROGRESS / \
(multi_batch_elapsed if multi_batch_elapsed > 0 else 1)
self.log('Average loss at batch %d: %f, \tthroughput: %d samples/sec' % (
batch_num, avg_loss, throughput))
def _trace_evaluation(self, total_predictions, elapsed):
state_message = 'Evaluated %d examples...' % total_predictions
throughput_message = "Prediction throughput: %d samples/sec" % int(
total_predictions / (elapsed if elapsed > 0 else 1))
self.log(state_message)
self.log(throughput_message)
def close_session(self):
self.sess.close()
def _initialize_session_variables(self):
self.sess.run(tf.group(
tf.compat.v1.global_variables_initializer(),
tf.compat.v1.local_variables_initializer(),
tf.compat.v1.tables_initializer()))
self.log('Initalized variables')
class SubtokensEvaluationMetric:
def __init__(self, filter_impossible_names_fn):
self.nr_true_positives: int = 0
self.nr_false_positives: int = 0
self.nr_false_negatives: int = 0
self.nr_predictions: int = 0
self.filter_impossible_names_fn = filter_impossible_names_fn
def update_batch(self, results):
for original_name, top_words in results:
prediction = self.filter_impossible_names_fn(top_words)[0]
original_subtokens = Counter(common.get_subtokens(original_name))
predicted_subtokens = Counter(common.get_subtokens(prediction))
self.nr_true_positives += sum(count for element, count in predicted_subtokens.items()
if element in original_subtokens)
self.nr_false_positives += sum(count for element, count in predicted_subtokens.items()
if element not in original_subtokens)
self.nr_false_negatives += sum(count for element, count in original_subtokens.items()
if element not in predicted_subtokens)
self.nr_predictions += 1
@property
def true_positive(self):
return self.nr_true_positives / self.nr_predictions
@property
def false_positive(self):
return self.nr_false_positives / self.nr_predictions
@property
def false_negative(self):
return self.nr_false_negatives / self.nr_predictions
@property
def precision(self):
return self.nr_true_positives / (self.nr_true_positives + self.nr_false_positives)
@property
def recall(self):
return self.nr_true_positives / (self.nr_true_positives + self.nr_false_negatives)
@property
def f1(self):
return 2 * self.precision * self.recall / (self.precision + self.recall)
class TopKAccuracyEvaluationMetric:
def __init__(self, top_k: int, get_first_match_word_from_top_predictions_fn):
self.top_k = top_k
self.nr_correct_predictions = np.zeros(self.top_k)
self.nr_predictions: int = 0
self.get_first_match_word_from_top_predictions_fn = get_first_match_word_from_top_predictions_fn
def update_batch(self, results):
for original_name, top_predicted_words in results:
self.nr_predictions += 1
found_match = self.get_first_match_word_from_top_predictions_fn(original_name, top_predicted_words)
if found_match is not None:
suggestion_idx, _ = found_match
self.nr_correct_predictions[suggestion_idx:self.top_k] += 1
@property
def topk_correct_predictions(self):
return self.nr_correct_predictions / self.nr_predictions
class _TFTrainModelInputTensorsFormer(ModelInputTensorsFormer):
def to_model_input_form(self, input_tensors: ReaderInputTensors):
return input_tensors.target_index, input_tensors.path_source_token_indices, input_tensors.path_indices, \
input_tensors.path_target_token_indices, input_tensors.context_valid_mask
def from_model_input_form(self, input_row) -> ReaderInputTensors:
return ReaderInputTensors(
target_index=input_row[0],
path_source_token_indices=input_row[1],
path_indices=input_row[2],
path_target_token_indices=input_row[3],
context_valid_mask=input_row[4]
)
class _TFEvaluateModelInputTensorsFormer(ModelInputTensorsFormer):
def to_model_input_form(self, input_tensors: ReaderInputTensors):
return input_tensors.target_string, input_tensors.path_source_token_indices, input_tensors.path_indices, \
input_tensors.path_target_token_indices, input_tensors.context_valid_mask, \
input_tensors.path_source_token_strings, input_tensors.path_strings, \
input_tensors.path_target_token_strings
def from_model_input_form(self, input_row) -> ReaderInputTensors:
return ReaderInputTensors(
target_string=input_row[0],
path_source_token_indices=input_row[1],
path_indices=input_row[2],
path_target_token_indices=input_row[3],
context_valid_mask=input_row[4],
path_source_token_strings=input_row[5],
path_strings=input_row[6],
path_target_token_strings=input_row[7]
)
| 53.969925 | 133 | 0.666167 |
b88bf14b26fb2788ee8bb4d29450c08e6bb65585
| 43,951 |
py
|
Python
|
cla-backend/cla/controllers/signature.py
|
hnexokonkwo/easycla
|
c163c6697657c6e28e8fa5d71e93dca35afe57ef
|
[
"Apache-2.0",
"CC-BY-4.0",
"MIT"
] | null | null | null |
cla-backend/cla/controllers/signature.py
|
hnexokonkwo/easycla
|
c163c6697657c6e28e8fa5d71e93dca35afe57ef
|
[
"Apache-2.0",
"CC-BY-4.0",
"MIT"
] | null | null | null |
cla-backend/cla/controllers/signature.py
|
hnexokonkwo/easycla
|
c163c6697657c6e28e8fa5d71e93dca35afe57ef
|
[
"Apache-2.0",
"CC-BY-4.0",
"MIT"
] | null | null | null |
# Copyright The Linux Foundation and each contributor to CommunityBridge.
# SPDX-License-Identifier: MIT
"""
Controller related to signature operations.
"""
import copy
import uuid
from datetime import datetime
from typing import List, Optional
import hug.types
import requests
import cla.hug_types
from cla.controllers import company
from cla.models import DoesNotExist
from cla.models.event_types import EventType
from cla.models.dynamo_models import User, Project, Signature, Company, Event
from cla.utils import get_email_service
def get_signatures():
"""
Returns a list of signatures in the CLA system.
:return: List of signatures in dict format.
:rtype: [dict]
"""
signatures = [signature.to_dict() for signature in Signature().all()]
return signatures
def get_signature(signature_id):
"""
Returns the CLA signature requested by UUID.
:param signature_id: The signature UUID.
:type signature_id: UUID
:return: dict representation of the signature object.
:rtype: dict
"""
signature = Signature()
try:
signature.load(signature_id=str(signature_id))
except DoesNotExist as err:
return {'errors': {'signature_id': str(err)}}
return signature.to_dict()
def create_signature(signature_project_id, # pylint: disable=too-many-arguments
signature_reference_id,
signature_reference_type,
signature_type='cla',
signature_approved=False,
signature_signed=False,
signature_return_url=None,
signature_sign_url=None,
signature_user_ccla_company_id=None,
signature_acl=None):
"""
Creates an signature and returns the newly created signature in dict format.
:param signature_project_id: The project ID for this new signature.
:type signature_project_id: string
:param signature_reference_id: The user or company ID for this signature.
:type signature_reference_id: string
:param signature_reference_type: The type of reference ('user' or 'company')
:type signature_reference_type: string
:param signature_type: The signature type ('cla' or 'dco')
:type signature_type: string
:param signature_signed: Whether or not the signature has been signed.
:type signature_signed: boolean
:param signature_approved: Whether or not the signature has been approved.
:type signature_approved: boolean
:param signature_return_url: The URL the user will be redirected to after signing.
:type signature_return_url: string
:param signature_sign_url: The URL the user must visit to sign the signature.
:type signature_sign_url: string
:param signature_user_ccla_company_id: The company ID if creating an employee signature.
:type signature_user_ccla_company_id: string
:return: A dict of a newly created signature.
:rtype: dict
"""
signature: Signature = cla.utils.get_signature_instance()
signature.set_signature_id(str(uuid.uuid4()))
project: Project = cla.utils.get_project_instance()
try:
project.load(project_id=str(signature_project_id))
except DoesNotExist as err:
return {'errors': {'signature_project_id': str(err)}}
signature.set_signature_project_id(str(signature_project_id))
if signature_reference_type == 'user':
user: User = cla.utils.get_user_instance()
try:
user.load(signature_reference_id)
except DoesNotExist as err:
return {'errors': {'signature_reference_id': str(err)}}
try:
document = project.get_project_individual_document()
except DoesNotExist as err:
return {'errors': {'signature_project_id': str(err)}}
else:
company: Company = cla.utils.get_company_instance()
try:
company.load(signature_reference_id)
except DoesNotExist as err:
return {'errors': {'signature_reference_id': str(err)}}
try:
document = project.get_project_corporate_document()
except DoesNotExist as err:
return {'errors': {'signature_project_id': str(err)}}
# Set username to this signature ACL
if signature_acl is not None:
signature.set_signature_acl(signature_acl)
signature.set_signature_document_minor_version(document.get_document_minor_version())
signature.set_signature_document_major_version(document.get_document_major_version())
signature.set_signature_reference_id(str(signature_reference_id))
signature.set_signature_reference_type(signature_reference_type)
signature.set_signature_type(signature_type)
signature.set_signature_signed(signature_signed)
signature.set_signature_approved(signature_approved)
signature.set_signature_return_url(signature_return_url)
signature.set_signature_sign_url(signature_sign_url)
if signature_user_ccla_company_id is not None:
signature.set_signature_user_ccla_company_id(str(signature_user_ccla_company_id))
signature.save()
event_data = f'Signature added. Signature_id - {signature.get_signature_id()} for Project - {project.get_project_name()}'
Event.create_event(
event_data=event_data,
event_type=EventType.CreateSignature,
event_project_id=signature_project_id,
contains_pii=False,
)
return signature.to_dict()
def update_signature(signature_id, # pylint: disable=too-many-arguments,too-many-return-statements,too-many-branches
auth_user,
signature_project_id=None,
signature_reference_id=None,
signature_reference_type=None,
signature_type=None,
signature_approved=None,
signature_signed=None,
signature_return_url=None,
signature_sign_url=None,
domain_whitelist=None,
email_whitelist=None,
github_whitelist=None,
github_org_whitelist=None):
"""
Updates an signature and returns the newly updated signature in dict format.
A value of None means the field should not be updated.
:param signature_id: ID of the signature.
:type signature_id: ID | None
:param signature_project_id: Project ID for this signature.
:type signature_project_id: string | None
:param signature_reference_id: Reference ID for this signature.
:type signature_reference_id: string | None
:param signature_reference_type: Reference type for this signature.
:type signature_reference_type: ['user' | 'company'] | None
:param signature_type: New signature type ('cla' or 'dco').
:type signature_type: string | None
:param signature_signed: Whether this signature is signed or not.
:type signature_signed: boolean | None
:param signature_approved: Whether this signature is approved or not.
:type signature_approved: boolean | None
:param signature_return_url: The URL the user will be sent to after signing.
:type signature_return_url: string | None
:param signature_sign_url: The URL the user must visit to sign the signature.
:type signature_sign_url: string | None
:param domain_whitelist: the domain whitelist
:param email_whitelist: the email whitelist
:param github_whitelist: the github username whitelist
:param github_org_whitelist: the github org whitelist
:return: dict representation of the signature object.
:rtype: dict
"""
signature = Signature()
try: # Try to load the signature to update.
signature.load(str(signature_id))
old_signature = copy.deepcopy(signature)
except DoesNotExist as err:
return {'errors': {'signature_id': str(err)}}
update_str = f'signature {signature_id} updates: \n '
if signature_project_id is not None:
# make a note if the project id is set and doesn't match
if signature.get_signature_project_id() != str(signature_project_id):
cla.log.warning('update_signature() - project IDs do not match => '
f'record project id: {signature.get_signature_project_id()} != '
f'parameter project id: {str(signature_project_id)}')
try:
signature.set_signature_project_id(str(signature_project_id))
update_str += f'signature_project_id updated to {signature_project_id} \n'
except DoesNotExist as err:
return {'errors': {'signature_project_id': str(err)}}
# TODO: Ensure signature_reference_id exists.
if signature_reference_id is not None:
if signature.get_signature_reference_id() != str(signature_reference_id):
cla.log.warning('update_signature() - signature reference IDs do not match => '
f'record signature ref id: {signature.get_signature_reference_id()} != '
f'parameter signature ref id: {str(signature_reference_id)}')
signature.set_signature_reference_id(signature_reference_id)
if signature_reference_type is not None:
signature.set_signature_reference_type(signature_reference_type)
update_str += f'signature_reference_type updated to {signature_reference_type} \n'
if signature_type is not None:
if signature_type in ['cla', 'dco']:
signature.set_signature_type(signature_type)
update_str += f'signature_type updated to {signature_type} \n'
else:
return {'errors': {'signature_type': 'Invalid value passed. The accepted values are: (cla|dco)'}}
if signature_signed is not None:
try:
val = hug.types.smart_boolean(signature_signed)
signature.set_signature_signed(val)
update_str += f'signature_signed updated to {signature_signed} \n'
except KeyError as err:
return {'errors': {'signature_signed': 'Invalid value passed in for true/false field'}}
if signature_approved is not None:
try:
val = hug.types.smart_boolean(signature_approved)
update_signature_approved(signature, val)
update_str += f'signature_approved updated to {val} \n'
except KeyError as err:
return {'errors': {'signature_approved': 'Invalid value passed in for true/false field'}}
if signature_return_url is not None:
try:
val = cla.hug_types.url(signature_return_url)
signature.set_signature_return_url(val)
update_str += f'signature_return_url updated to {val} \n'
except KeyError as err:
return {'errors': {'signature_return_url': 'Invalid value passed in for URL field'}}
if signature_sign_url is not None:
try:
val = cla.hug_types.url(signature_sign_url)
signature.set_signature_sign_url(val)
update_str += f'signature_sign_url updated to {val} \n'
except KeyError as err:
return {'errors': {'signature_sign_url': 'Invalid value passed in for URL field'}}
if domain_whitelist is not None:
try:
domain_whitelist = hug.types.multiple(domain_whitelist)
signature.set_domain_whitelist(domain_whitelist)
update_str += f'domain_whitelist updated to {domain_whitelist} \n'
except KeyError as err:
return {'errors': {
'domain_whitelist': 'Invalid value passed in for the domain whitelist'
}}
if email_whitelist is not None:
try:
email_whitelist = hug.types.multiple(email_whitelist)
signature.set_email_whitelist(email_whitelist)
update_str += f'email_whitelist updated to {email_whitelist} \n'
except KeyError as err:
return {'errors': {
'email_whitelist': 'Invalid value passed in for the email whitelist'
}}
if github_whitelist is not None:
try:
github_whitelist = hug.types.multiple(github_whitelist)
signature.set_github_whitelist(github_whitelist)
# A little bit of special logic to for GitHub whitelists that have bots
bot_list = [github_user for github_user in github_whitelist if is_github_bot(github_user)]
if bot_list is not None:
handle_bots(bot_list, signature)
update_str += f'github_whitelist updated to {github_whitelist} \n'
except KeyError as err:
return {'errors': {
'github_whitelist': 'Invalid value passed in for the github whitelist'
}}
if github_org_whitelist is not None:
try:
github_org_whitelist = hug.types.multiple(github_org_whitelist)
signature.set_github_org_whitelist(github_org_whitelist)
update_str += f'github_org_whitelist updated to {github_org_whitelist} \n'
except KeyError as err:
return {'errors': {
'github_org_whitelist': 'Invalid value passed in for the github org whitelist'
}}
event_data = update_str
Event.create_event(
event_data=event_data,
event_type=EventType.UpdateSignature,
contains_pii=True,
)
signature.save()
notify_whitelist_change(auth_user=auth_user, old_signature=old_signature,new_signature=signature)
return signature.to_dict()
def change_in_list(old_list,new_list,msg_added,msg_deleted):
if old_list is None:
old_list = []
if new_list is None:
new_list = []
added = list(set(new_list)-set(old_list))
deleted = list(set(old_list)-set(new_list))
change = []
if len(added) > 0:
change.append(msg_added.format('\n'.join(added)))
if len(deleted) > 0:
change.append(msg_deleted.format('\n'.join(deleted)))
return change,added,deleted
def notify_whitelist_change(auth_user, old_signature: Signature, new_signature: Signature):
changes = []
domain_msg_added = 'following value was added to the domain whitelist \n{}'
domain_msg_deleted = 'following value was deleted from the domain whitelist \n{}'
domain_changes,_,_ = change_in_list(old_list=old_signature.get_domain_whitelist(),
new_list=new_signature.get_domain_whitelist(),
msg_added=domain_msg_added,
msg_deleted=domain_msg_deleted)
changes = changes + domain_changes
email_msg_added = 'following value was added to the email whitelist \n{}'
email_msg_deleted = 'following value was deleted from the email whitelist \n{}'
email_changes, email_added, email_deleted = change_in_list(old_list=old_signature.get_email_whitelist(),
new_list=new_signature.get_email_whitelist(),
msg_added=email_msg_added,
msg_deleted=email_msg_deleted)
changes = changes + email_changes
github_msg_added = 'following value was added to the github whitelist \n{}'
github_msg_deleted = 'following value was deleted from the github whitelist \n{}'
github_changes, github_added, github_deleted = change_in_list(old_list=old_signature.get_github_whitelist(),
new_list=new_signature.get_github_whitelist(),
msg_added=github_msg_added,
msg_deleted=github_msg_deleted)
changes = changes + github_changes
github_org_msg_added = 'following value was added to the github organization whitelist \n{}'
github_org_msg_deleted = 'following value was deleted from the github organization whitelist \n{}'
github_org_changes, _, _ = change_in_list(old_list=old_signature.get_github_org_whitelist(),
new_list=new_signature.get_github_org_whitelist(),
msg_added=github_org_msg_added,
msg_deleted=github_org_msg_deleted)
changes = changes + github_org_changes
if len(changes) > 0:
# send email to cla managers about change
cla_managers = new_signature.get_managers()
subject,body,recipients = whitelist_change_email_content(cla_managers, changes)
if len(recipients) > 0:
get_email_service().send(subject, body, recipients)
company_name = new_signature.get_signature_reference_name()
project = cla.utils.get_project_instance()
project.load(new_signature.get_signature_project_id())
project_name = project.get_project_name()
cla_manager_name = auth_user.name
# send email to contributors
notify_whitelist_change_to_contributors(email_added=email_added,
email_removed=email_deleted,
github_users_added=github_added,
github_users_removed=github_deleted,
company_name=company_name,
project_name=project_name,
cla_manager_name=cla_manager_name)
event_data = " ,".join(changes)
Event.create_event(
event_data=event_data,
event_type=EventType.NotifyWLChange,
event_company_name=company_name,
event_project_name=project_name,
contains_pii=True,
)
def notify_whitelist_change_to_contributors(email_added, email_removed, github_users_added, github_users_removed,company_name, project_name, cla_manager_name):
for email in email_added:
subject,body,recipients = get_contributor_whitelist_update_email_content('added',company_name, project_name, cla_manager_name, email)
get_email_service().send(subject, body, recipients)
for email in email_removed:
subject,body,recipients = get_contributor_whitelist_update_email_content('deleted',company_name, project_name, cla_manager_name, email)
get_email_service().send(subject, body, recipients)
for github_username in github_users_added:
user = cla.utils.get_user_instance()
users = user.get_user_by_github_username(github_username)
if users is not None:
user = users[0]
email = user.get_user_email()
subject,body,recipients = get_contributor_whitelist_update_email_content('added',company_name, project_name, cla_manager_name, email)
get_email_service().send(subject, body, recipients)
for github_username in github_users_removed:
user = cla.utils.get_user_instance()
users = user.get_user_by_github_username(github_username)
if users is not None:
user = users[0]
email = user.get_user_email()
subject,body,recipients = get_contributor_whitelist_update_email_content('deleted',company_name, project_name, cla_manager_name, email)
get_email_service().send(subject, body, recipients)
def get_contributor_whitelist_update_email_content(action, company_name, project_name, cla_manager, email):
subject = 'Whitelisting Update'
preposition = 'to'
if action == 'deleted':
preposition = 'from'
body = """System generated email.
This is to inform you that you have {} {} the whitelist of {} for the {} by cla manager {}.
Thanks,
EasyCLA system
""".format(action, preposition, company_name, project_name, cla_manager)
body = '<p>' + body.replace('\n', '<br>')+ '</p>'
recipients = [email]
return subject, body, recipients
def whitelist_change_email_content(cla_managers, changes):
"""Helper function to get whitelist change email subject, body, recipients"""
subject = 'EasyCLA whitelist modified'
change_string = "\n".join(changes)
body = """
This is the notify that EasyCLA whitelist for your organization was modified. The modification was as follows:
{}
Thanks,
EasyCLA System
""".format(change_string)
body = '<p>' + body.replace('\n', '<br>')+ '</p>'
recipients = []
for manager in cla_managers:
email = manager.get_user_email()
if email is not None:
recipients.append(email)
return subject, body, recipients
def handle_bots(bot_list: List[str], signature: Signature) -> None:
cla.log.debug(f'Bots: {bot_list}')
for bot_name in bot_list:
try:
user = cla.utils.get_user_instance()
users = user.get_user_by_github_username(bot_name)
if users is None:
cla.log.debug(f'handle_bots - Bot: {bot_name} does not have a user record (None)')
bot_user: User = create_bot(bot_name, signature)
if bot_user is not None:
create_bot_signature(bot_user, signature)
else:
# Bot does have a user account in the EasyCLA system
found = False
# Search the list of user records to see if we have a matching company
for u in users:
if u.get_user_company_id() == signature.get_signature_reference_id():
found = True
cla.log.debug('handle_bots - found bot user account - ensuring the signature exists...')
create_bot_signature(u, signature)
break
# We found matching users in our system, but didn't find one with a matching company
if not found:
cla.log.debug(f'handle_bots - unable to find user {bot_name} '
f'for company: {signature.get_signature_reference_id()} - '
'creating user record that matches this company...')
bot_user: User = create_bot(bot_name, signature)
if bot_user is not None:
create_bot_signature(bot_user, signature)
else:
cla.log.warning(f'handle_bots - failed to create user record for: {bot_name}')
except DoesNotExist as err:
cla.log.debug(f'handle_bots - bot: {bot_name} does not have a user record (DoesNotExist)')
def create_bot_signature(bot_user: User, signature: Signature) -> Optional[Signature]:
cla.log.debug(f'create_bot_signature - locating Bot Signature for: {bot_user.get_user_name()}...')
project: Project = cla.utils.get_project_instance()
try:
project.load(signature.get_signature_project_id())
except DoesNotExist as err:
cla.log.warning(f'create_bot_signature - unable to load project by id: {signature.get_signature_project_id()}'
f' Unable to create bot: {bot_user}')
return None
the_company: Company = cla.utils.get_company_instance()
try:
the_company.load(signature.get_signature_reference_id())
except DoesNotExist as err:
cla.log.warning(f'create_bot_signature - unable to load company by id: {signature.get_signature_reference_id()}'
f' Unable to create bot: {bot_user}')
return None
bot_sig: Signature = cla.utils.get_signature_instance()
# First, before we create a new one, grab a list of employee signatures for this company/project
existing_sigs: List[Signature] = bot_sig.get_employee_signatures_by_company_project_model(
company_id=bot_user.get_user_company_id(), project_id=signature.get_signature_project_id())
# Check to see if we have an existing signature for this user/company/project combo
for sig in existing_sigs:
if sig.get_signature_reference_id() == bot_user.get_user_id():
cla.log.debug('create_bot_signature - found existing bot signature '
f'for user: {bot_user} '
f'with company: {the_company} '
f'for project: {project}')
return sig
# Didn't find an existing signature, let's create a new one
cla.log.debug(f'create_bot_signature - creating Bot Signature: {bot_user.get_user_name()}...')
bot_sig.set_signature_id(str(uuid.uuid4()))
bot_sig.set_signature_project_id(signature.get_signature_project_id())
bot_sig.set_signature_reference_id(bot_user.get_user_id())
bot_sig.set_signature_document_major_version(signature.get_signature_document_major_version())
bot_sig.set_signature_document_minor_version(signature.get_signature_document_minor_version())
bot_sig.set_signature_approved(True)
bot_sig.set_signature_signed(True)
bot_sig.set_signature_type('cla')
bot_sig.set_signature_reference_type('user')
bot_sig.set_signature_user_ccla_company_id(bot_user.get_user_company_id())
bot_sig.set_note(f'{datetime.utcnow().strftime("%Y%m%dT%H%M%SZ")} Added as part of '
f'{project.get_project_name()}, whitelisted by '
f'{the_company.get_company_name()}')
bot_sig.save()
cla.log.debug(f'create_bot_signature - created Bot Signature: {bot_sig}')
return bot_sig
def create_bot(bot_name: str, signature: Signature) -> Optional[User]:
cla.log.debug(f'create_bot - creating Bot: {bot_name}...')
user_github_id = lookup_github_user(bot_name)
if user_github_id != 0:
project: Project = cla.utils.get_project_instance()
try:
project.load(signature.get_signature_project_id())
except DoesNotExist as err:
cla.log.warning(f'create_bot - Unable to load project by id: {signature.get_signature_project_id()}'
f' Unable to create bot: {bot_name}')
return None
the_company: Company = cla.utils.get_company_instance()
try:
the_company.load(signature.get_signature_reference_id())
except DoesNotExist as err:
cla.log.warning(f'create_bot - Unable to load company by id: {signature.get_signature_reference_id()}'
f' Unable to create bot: {bot_name}')
return None
user: User = cla.utils.get_user_instance()
user.set_user_id(str(uuid.uuid4()))
user.set_user_name(bot_name)
user.set_user_github_username(bot_name)
user.set_user_github_id(user_github_id)
user.set_user_company_id(signature.get_signature_reference_id())
user.set_note(f'{datetime.utcnow().strftime("%Y%m%dT%H%M%SZ")} Added as part of '
f'{project.get_project_name()}, whitelisted by '
f'{the_company.get_company_name()}')
user.save()
cla.log.debug(f'create_bot - created Bot: {user}')
return user
cla.log.warning(f'create_bot - unable to create bot: {bot_name} - unable to lookup name in GitHub.')
return None
def is_github_bot(username: str) -> bool:
"""
Queries the GitHub public user endpoint for the specified username. Returns true if the user is a GitHub bot.
:param username: the user's github name
:return: True if the user is a GitHub bot, False otherwise
"""
cla.log.debug('Looking up GH user: ' + username)
r = requests.get('https://api.github.com/users/' + username)
if r.status_code == requests.codes.ok:
# cla.log.info(f'Response content type: {r.headers["Content-Type"]}')
# cla.log.info(f'Response body: {r.json()}')
response = r.json()
cla.log.debug(f'Lookup succeeded for GH user: {username} with id: {response["id"]}')
if 'type' in response:
return response['type'].lower() == 'bot'
else:
return False
elif r.status_code == requests.codes.not_found:
cla.log.debug(f'Lookup failed for GH user: {username} - not found')
return False
else:
cla.log.warning(f'Error looking up GitHub user by username: {username}. Error: {r.status_code} - {r.text}')
return False
def lookup_github_user(username: str) -> int:
"""
Queries the GitHub public user endpoint for the specified username. Returns the user's GitHub ID.
:param username: the user's github name
:return: the user's GitHub ID
"""
cla.log.debug('Looking up GH user: ' + username)
r = requests.get('https://api.github.com/users/' + username)
if r.status_code == requests.codes.ok:
# cla.log.info(f'Response content type: {r.headers["Content-Type"]}')
# cla.log.info(f'Response body: {r.json()}')
response = r.json()
cla.log.debug(f'Lookup succeeded for GH user: {username} with id: {response["id"]}')
return response['id']
elif r.status_code == requests.codes.not_found:
cla.log.debug(f'Lookup failed for GH user: {username} - not found')
return 0
else:
cla.log.warning(f'Error looking up GitHub user by username: {username}. Error: {r.status_code} - {r.text}')
return 0
def update_signature_approved(signature, value):
"""Helper function to update the signature approval status and send emails if necessary."""
previous = signature.get_signature_approved()
signature.set_signature_approved(value)
email_approval = cla.conf['EMAIL_ON_SIGNATURE_APPROVED']
if email_approval and not previous and value: # Just got approved.
subject, body, recipients = get_signature_approved_email_content(signature)
get_email_service().send(subject, body, recipients)
def get_signature_approved_email_content(signature): # pylint: disable=invalid-name
"""Helper function to get signature approval email subject, body, and recipients."""
if signature.get_signature_reference_type() != 'user':
cla.log.info('Not sending signature approved emails for CCLAs')
return
subject = 'CLA Signature Approved'
user: User = cla.utils.get_user_instance()
user.load(signature.get_signature_reference_id())
project: Project = cla.utils.get_project_instance()
project.load(signature.get_signature_project_id())
recipients = [user.get_user_id()]
body = 'Hello %s. Your Contributor License Agreement for %s has been approved!' \
% (user.get_user_name(), project.get_project_name())
return subject, body, recipients
def delete_signature(signature_id):
"""
Deletes an signature based on UUID.
:param signature_id: The UUID of the signature.
:type signature_id: UUID
"""
signature = Signature()
try: # Try to load the signature to delete.
signature.load(str(signature_id))
except DoesNotExist as err:
# Should we bother sending back an error?
return {'errors': {'signature_id': str(err)}}
signature.delete()
event_data = f'Deleted signature {signature_id}'
Event.create_event(
event_data=event_data,
event_type=EventType.DeleteSignature,
contains_pii=False,
)
return {'success': True}
def get_user_signatures(user_id):
"""
Get all signatures for user.
:param user_id: The ID of the user in question.
:type user_id: string
"""
signatures = Signature().get_signatures_by_reference(str(user_id), 'user')
return [signature.to_dict() for signature in signatures]
def get_user_project_signatures(user_id, project_id, signature_type=None):
"""
Get all signatures for user filtered by a project.
:param user_id: The ID of the user in question.
:type user_id: string
:param project_id: The ID of the project to filter by.
:type project_id: string
:param signature_type: The signature type to filter by.
:type signature_type: string (one of 'individual', 'employee')
:return: The list of signatures requested.
:rtype: [cla.models.model_interfaces.Signature]
"""
sig = Signature()
signatures = sig.get_signatures_by_project(str(project_id),
signature_reference_type='user',
signature_reference_id=str(user_id))
ret = []
for signature in signatures:
if signature_type is not None:
if signature_type == 'individual' and \
signature.get_signature_user_ccla_employee_id() is not None:
continue
elif signature_type == 'employee' and \
signature.get_signature_user_ccla_employee_id() is None:
continue
ret.append(signature.to_dict())
return ret
def get_company_signatures(company_id):
"""
Get all signatures for company.
:param company_id: The ID of the company in question.
:type company_id: string
"""
signatures = Signature().get_signatures_by_reference(company_id,
'company')
return [signature.to_dict() for signature in signatures]
def get_company_signatures_by_acl(username, company_id):
"""
Get all signatures for company filtered by it's ACL.
A company's signature will be returned only if the provided
username appears in the signature's ACL.
:param username: The username of the authenticated user
:type username: string
:param company_id: The ID of the company in question.
:type company_id: string
"""
# Get signatures by company reference
all_signatures = Signature().get_signatures_by_reference(company_id, 'company')
# Filter signatures this manager is authorized to see
signatures = []
for signature in all_signatures:
if username in signature.get_signature_acl():
signatures.append(signature)
return [signature.to_dict() for signature in signatures]
def get_project_signatures(project_id):
"""
Get all signatures for project.
:param project_id: The ID of the project in question.
:type project_id: string
"""
signatures = Signature().get_signatures_by_project(str(project_id), signature_signed=True)
return [signature.to_dict() for signature in signatures]
def get_project_company_signatures(company_id, project_id):
"""
Get all company signatures for project specified and a company specified
:param company_id: The ID of the company in question
:param project_id: The ID of the project in question
:type company_id: string
:type project_id: string
"""
signatures = Signature().get_signatures_by_company_project(str(company_id),
str(project_id))
return signatures
def get_project_employee_signatures(company_id, project_id):
"""
Get all employee signatures for project specified and a company specified
:param company_id: The ID of the company in question
:param project_id: The ID of the project in question
:type company_id: string
:type project_id: string
"""
signatures = Signature().get_employee_signatures_by_company_project(str(company_id),
str(project_id))
return signatures
def get_cla_managers(username, signature_id):
"""
Returns CLA managers from the CCLA signature ID.
:param username: The LF username
:type username: string
:param signature_id: The Signature ID of the CCLA signed.
:type signature_id: string
:return: dict representation of the project managers.
:rtype: dict
"""
signature = Signature()
try:
signature.load(str(signature_id))
except DoesNotExist as err:
return {'errors': {'signature_id': str(err)}}
# Get Signature ACL
signature_acl = signature.get_signature_acl()
if username not in signature_acl:
return {'errors': {'user_id': 'You are not authorized to see the managers.'}}
return get_managers_dict(signature_acl)
def get_project(project_id):
try:
project = Project()
project.load(project_id)
except DoesNotExist as err:
raise DoesNotExist('errors: {project_id: %s}' % str(err))
return project
def get_company(company_id):
try:
company = Company()
company.load(company_id)
except DoesNotExist as err:
raise DoesNotExist('errors: {company_id: %s}' % str(err))
return company
def add_cla_manager_email_content(lfid, project, company, managers):
""" Helper function to send email to newly added CLA Manager """
# Get emails of newly added Manager
recipients = get_user_emails(lfid)
if not recipients:
raise Exception('Issue getting emails for lfid : %s', lfid)
subject = f'CLA: Access to Corporate CLA for Project {project.get_project_name()}'
manager_list = ['%s <%s>' %(mgr.get('name', ' '), mgr.get('email', ' ')) for mgr in managers]
manager_list_str = '-'.join(manager_list) + '\n'
body = f""" Hello {lfid}, \n
\n
You have been granted access to the project {project.get_project_name()} for the organization: {company.get_company_name()}.\n
\n
If you have further questions, please contact one of the existing CLA Managers: \n
{manager_list_str}
- Linux Foundation EasyCLA System
"""
return subject, body, recipients
def remove_cla_manager_email_content(lfid, project, company, managers):
""" Helper function to send email to newly added CLA Manager """
# Get emails of newly added Manager
recipients = get_user_emails(lfid)
if not recipients:
raise Exception('Issue getting emails for lfid : %s', lfid)
subject = f'CLA: Access to Corporate CLA for Project {project.get_project_name()}'
manager_list = manager_list = ['%s <%s>' %(mgr.get('name', ' '), mgr.get('email', ' ')) for mgr in managers]
manager_list_str = '-'.join(manager_list) + '\n'
body = f""" Hello {lfid}, \n
\n
You have been removed as a CLA Manager from the project: {project.get_project_name()} for the organization: {company.get_company_name()}\n
\n
If you have further questions, please contact one of the existing CLA Managers: \n
{manager_list_str}
- Linux Foundation EasyCLA System
"""
return subject, body, recipients
def get_user_emails(lfid):
""" Helper function that gets user emails of given lf_username """
user = User()
users = user.get_user_by_username(lfid)
return [user.get_user_email() for user in users]
def add_cla_manager(auth_user, signature_id, lfid):
"""
Adds the LFID to the signature ACL and returns a new list of CLA Managers.
:param username: username of the user
:type username: string
:param signature_id: The ID of the project
:type signature_id: UUID
:param lfid: the lfid (manager username) to be added to the project acl
:type lfid: string
"""
# Find project
signature = Signature()
try:
signature.load(str(signature_id))
except DoesNotExist as err:
return {'errors': {'project_id': str(err)}}
# Get Signature ACL
signature_acl = signature.get_signature_acl()
if auth_user.username not in signature_acl:
return {'errors': {'user_id': 'You are not authorized to see the managers.'}}
company.add_permission(auth_user, lfid, signature.get_signature_reference_id(), ignore_auth_user=True)
# Get Company and Project instances
try:
project = get_project(signature.get_signature_project_id())
except DoesNotExist as err:
return err
try:
company_instance = get_company(signature.get_signature_reference_id())
except DoesNotExist as err:
return err
# get cla managers for email content
managers = get_cla_managers(auth_user.username, signature_id)
# Add lfid to acl
signature.add_signature_acl(lfid)
signature.save()
# send email to newly added CLA manager
try:
subject, body, recipients = add_cla_manager_email_content(lfid, project, company_instance, managers)
get_email_service(subject, body, recipients)
except Exception as err:
return {'errors': {'Failed to send email for lfid: %s , %s ' % (lfid, err)}}
event_data = f'{lfid} added as cla manager to Signature ACL for {signature.get_signature_id()}'
Event.create_event(
event_data=event_data,
event_type=EventType.AddCLAManager,
contains_pii=True,
)
return get_managers_dict(signature_acl)
def remove_cla_manager(username, signature_id, lfid):
"""
Removes the LFID from the project ACL
:param username: username of the user
:type username: string
:param project_id: The ID of the project
:type project_id: UUID
:param lfid: the lfid (manager username) to be removed to the project acl
:type lfid: string
"""
# Find project
signature = Signature()
try:
signature.load(str(signature_id))
except DoesNotExist as err:
return {'errors': {'signature_id': str(err)}}
# Validate user is the manager of the project
signature_acl = signature.get_signature_acl()
if username not in signature_acl:
return {'errors': {'user': "You are not authorized to manage this CCLA."}}
# Avoid to have an empty acl
if len(signature_acl) == 1 and username == lfid:
return {'errors': {'user': "You cannot remove this manager because a CCLA must have at least one CLA manager."}}
# Remove LFID from the acl
signature.remove_signature_acl(lfid)
signature.save()
# get cla managers for email content
managers = get_cla_managers(username, signature_id)
# Get Company and Project instances
try:
project = get_project(signature.get_signature_project_id())
except DoesNotExist as err:
return err
try:
company_instance = get_company(signature.get_signature_reference_id())
except DoesNotExist as err:
return err
# Send email to removed CLA manager
# send email to newly added CLA manager
try:
subject, body, recipients = remove_cla_manager_email_content(lfid, project, company_instance, managers)
get_email_service(subject, body, recipients)
except Exception as err:
return {'errors': {'Failed to send email for lfid: %s , %s ' % (lfid, err)}}
event_data = f'User with lfid {lfid} removed from project ACL with signature {signature.get_signature_id()}'
Event.create_event(
event_data=event_data,
event_type=EventType.RemoveCLAManager,
contains_pii=True,
)
# Return modified managers
return get_managers_dict(signature_acl)
def get_managers_dict(signature_acl):
# Helper function to get a list of all cla managers from a CCLA Signature ACL
# Generate managers dict
managers_dict = []
for lfid in signature_acl:
user = cla.utils.get_user_instance()
users = user.get_user_by_username(str(lfid))
if users is not None:
if len(users) > 1:
cla.log.warning(f'More than one user record was returned ({len(users)}) from user '
f'username: {lfid} query')
user = users[0]
# Manager found, fill with it's information
managers_dict.append({
'name': user.get_user_name(),
'email': user.get_user_email(),
'lfid': user.get_lf_username()
})
else:
# Manager not in database yet, only set the lfid
managers_dict.append({
'lfid': str(lfid)
})
return managers_dict
| 42.795521 | 159 | 0.6616 |
067b71382ecbb2a28c18ed730565b937f39e30fc
| 868 |
py
|
Python
|
blog/migrations/0014_auto_20190213_1559.py
|
John2013/portfolio
|
5be3ab4070cff97e1958f168eb2abd7b97bf6ad7
|
[
"MIT"
] | null | null | null |
blog/migrations/0014_auto_20190213_1559.py
|
John2013/portfolio
|
5be3ab4070cff97e1958f168eb2abd7b97bf6ad7
|
[
"MIT"
] | null | null | null |
blog/migrations/0014_auto_20190213_1559.py
|
John2013/portfolio
|
5be3ab4070cff97e1958f168eb2abd7b97bf6ad7
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.1.7 on 2019-02-13 12:59
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('blog', '0013_auto_20190213_1541'),
]
operations = [
migrations.AddField(
model_name='comment',
name='article',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to='blog.Article'),
preserve_default=False,
),
migrations.AlterField(
model_name='comment',
name='datetime',
field=models.DateTimeField(blank=True, verbose_name='Дата'),
),
migrations.AlterField(
model_name='comment',
name='nickname',
field=models.CharField(max_length=255, verbose_name='Ник'),
),
]
| 28 | 111 | 0.599078 |
e541171dc76932a271ebe7a9e35074ae6a7cd84c
| 205 |
py
|
Python
|
v_report/equipment/doctype/maintenance_equipment_list/maintenance_equipment_list.py
|
Atulsah/v_report
|
3131c4081570ea977a3101b03fa65db07d92aad6
|
[
"MIT"
] | null | null | null |
v_report/equipment/doctype/maintenance_equipment_list/maintenance_equipment_list.py
|
Atulsah/v_report
|
3131c4081570ea977a3101b03fa65db07d92aad6
|
[
"MIT"
] | null | null | null |
v_report/equipment/doctype/maintenance_equipment_list/maintenance_equipment_list.py
|
Atulsah/v_report
|
3131c4081570ea977a3101b03fa65db07d92aad6
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2021, Frappe and contributors
# For license information, please see license.txt
# import frappe
from frappe.model.document import Document
class MaintenanceEquipmentList(Document):
pass
| 22.777778 | 49 | 0.804878 |
6e294cb14dc493906032734b930163c496063561
| 5,828 |
py
|
Python
|
botorch/utils/testing.py
|
BradyBromley/botorch
|
ea7f8fa2cead9c581309437a1f2f59ed070cb59e
|
[
"MIT"
] | 1 |
2020-07-21T21:25:16.000Z
|
2020-07-21T21:25:16.000Z
|
botorch/utils/testing.py
|
zpao/botorch
|
270599207f5b9bf8c66e1197ad2632bb69c3d3b9
|
[
"MIT"
] | null | null | null |
botorch/utils/testing.py
|
zpao/botorch
|
270599207f5b9bf8c66e1197ad2632bb69c3d3b9
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import warnings
from collections import OrderedDict
from typing import List, Optional
from unittest import TestCase
import torch
from torch import Tensor
from .. import settings
from ..models.model import Model
from ..posteriors import Posterior
from ..test_functions.synthetic import SyntheticTestFunction
EMPTY_SIZE = torch.Size()
class BotorchTestCase(TestCase):
r"""Basic test case for Botorch.
This
1. sets the default device to be `torch.device("cpu")`
2. ensures that no warnings are suppressed by default.
"""
device = torch.device("cpu")
def setUp(self):
warnings.resetwarnings()
settings.debug._set_state(False)
warnings.simplefilter("always", append=True)
class SyntheticTestFunctionBaseTestCase:
functions: List[SyntheticTestFunction]
def test_forward(self):
for dtype in (torch.float, torch.double):
for batch_shape in (torch.Size(), torch.Size([2])):
for f in self.functions:
f.to(device=self.device, dtype=dtype)
X = torch.rand(*batch_shape, f.dim, device=self.device, dtype=dtype)
X = f.bounds[0, :] + X * (f.bounds[1, :] - f.bounds[0, :])
res = f(X)
f(X, noise=False)
self.assertEqual(res.dtype, dtype)
self.assertEqual(res.device.type, self.device.type)
self.assertEqual(res.shape, batch_shape)
def test_optimal_value(self):
for dtype in (torch.float, torch.double):
for f in self.functions:
f.to(device=self.device, dtype=dtype)
try:
optval = f.optimal_value
optval_exp = -f._optimal_value if f.negate else f._optimal_value
self.assertEqual(optval, optval_exp)
except NotImplementedError:
pass
def test_optimizer(self):
for dtype in (torch.float, torch.double):
for f in self.functions:
f.to(device=self.device, dtype=dtype)
try:
Xopt = f.optimizers.clone().requires_grad_(True)
except NotImplementedError:
continue
res = f(Xopt, noise=False)
# if we have optimizers, we have the optimal value
res_exp = torch.full_like(res, f.optimal_value)
self.assertTrue(torch.allclose(res, res_exp, atol=1e-3, rtol=1e-3))
if f._check_grad_at_opt:
grad = torch.autograd.grad([*res], Xopt)[0]
self.assertLess(grad.abs().max().item(), 1e-3)
class MockPosterior(Posterior):
r"""Mock object that implements dummy methods and feeds through specified outputs"""
def __init__(self, mean=None, variance=None, samples=None):
self._mean = mean
self._variance = variance
self._samples = samples
@property
def device(self) -> torch.device:
for t in (self._mean, self._variance, self._samples):
if torch.is_tensor(t):
return t.device
return torch.device("cpu")
@property
def dtype(self) -> torch.dtype:
for t in (self._mean, self._variance, self._samples):
if torch.is_tensor(t):
return t.dtype
return torch.float32
@property
def event_shape(self) -> torch.Size:
if self._samples is not None:
return self._samples.shape
if self._mean is not None:
return self._mean.shape
if self._variance is not None:
return self._variance.shape
return torch.Size()
@property
def mean(self):
return self._mean
@property
def variance(self):
return self._variance
def rsample(
self,
sample_shape: Optional[torch.Size] = None,
base_samples: Optional[Tensor] = None,
) -> Tensor:
"""Mock sample by repeating self._samples. If base_samples is provided,
do a shape check but return the same mock samples."""
if sample_shape is None:
sample_shape = torch.Size()
if sample_shape is not None and base_samples is not None:
# check the base_samples shape is consistent with the sample_shape
if base_samples.shape[: len(sample_shape)] != sample_shape:
raise RuntimeError("sample_shape disagrees with base_samples.")
return self._samples.expand(sample_shape + self._samples.shape)
class MockModel(Model):
r"""Mock object that implements dummy methods and feeds through specified outputs"""
def __init__(self, posterior: MockPosterior) -> None:
super(Model, self).__init__()
self._posterior = posterior
def posterior(
self,
X: Tensor,
output_indices: Optional[List[int]] = None,
observation_noise: bool = False,
) -> MockPosterior:
return self._posterior
@property
def num_outputs(self) -> int:
event_shape = self._posterior.event_shape
return event_shape[-1] if len(event_shape) > 0 else 0
def state_dict(self) -> None:
pass
def load_state_dict(
self, state_dict: Optional[OrderedDict] = None, strict: bool = False
) -> None:
pass
class MockAcquisitionFunction:
r"""Mock acquisition function object that implements dummy methods."""
def __init__(self):
self.model = None
self.X_pending = None
def __call__(self, X):
return X[..., 0].max(dim=-1)[0]
def set_X_pending(self, X_pending: Optional[Tensor] = None):
self.X_pending = X_pending
| 32.377778 | 88 | 0.608099 |
db11e4f1f6a9c1775cc47ee1e8e31411c3627ced
| 318 |
py
|
Python
|
xsklearn/transformers/token_embedders/__init__.py
|
altescy/xsklearn
|
dff8ea0737ea622529dd396d455e9ae8b07e73fd
|
[
"MIT"
] | null | null | null |
xsklearn/transformers/token_embedders/__init__.py
|
altescy/xsklearn
|
dff8ea0737ea622529dd396d455e9ae8b07e73fd
|
[
"MIT"
] | null | null | null |
xsklearn/transformers/token_embedders/__init__.py
|
altescy/xsklearn
|
dff8ea0737ea622529dd396d455e9ae8b07e73fd
|
[
"MIT"
] | null | null | null |
from xsklearn.transformers.token_embedders.fasttext_embedder import ( # noqa: F401
FastTextEmbedder,
)
from xsklearn.transformers.token_embedders.token_embedder import ( # noqa: F401
TokenEmbedder,
)
from xsklearn.transformers.token_embedders.word2vec_embedder import ( # noqa: F401
Word2VecEmbedder,
)
| 31.8 | 83 | 0.792453 |
6c63313bb4ede1acbc6ffeb51151fd63c9cb4eca
| 10,373 |
py
|
Python
|
tools/shell/shell-test.py
|
Mu-L/duckdb
|
9a1c3f674b9ecec4aee52c599dbeb30fa79fc751
|
[
"MIT"
] | null | null | null |
tools/shell/shell-test.py
|
Mu-L/duckdb
|
9a1c3f674b9ecec4aee52c599dbeb30fa79fc751
|
[
"MIT"
] | null | null | null |
tools/shell/shell-test.py
|
Mu-L/duckdb
|
9a1c3f674b9ecec4aee52c599dbeb30fa79fc751
|
[
"MIT"
] | null | null | null |
import sys
import subprocess
import tempfile
import os
import shutil
if len(sys.argv) < 2:
raise Exception('need shell binary as parameter')
def test_exception(command, input, stdout, stderr, errmsg):
print('--- COMMAND --')
print(' '.join(command))
print('--- INPUT --')
print(input)
print('--- STDOUT --')
print(stdout)
print('--- STDERR --')
print(stderr)
raise Exception(errmsg)
def test(cmd, out=None, err=None, extra_commands=None):
command = [sys.argv[1], '--batch', '-init', '/dev/null']
if extra_commands:
command += extra_commands
res = subprocess.run(command, capture_output=True, input=bytearray(cmd, 'utf8'))
stdout = res.stdout.decode('utf8').strip()
stderr = res.stderr.decode('utf8').strip()
if out and out not in stdout:
test_exception(command, cmd, stdout, stderr, 'out test failed')
if err and err not in stderr:
test_exception(command, cmd, stdout, stderr, 'err test failed')
if not err and stderr != '':
test_exception(command, cmd, stdout, stderr, 'got err test failed')
if err is None and res.returncode != 0:
test_exception(command, cmd, stdout, stderr, 'process returned non-zero exit code but no error was specified')
def tf():
return tempfile.mktemp().replace('\\','/')
# basic test
test('select \'asdf\' as a;', out='asdf')
test('select * from range(10000);', out='9999')
# test pragma
test("""
.mode csv
.headers off
.sep |
CREATE TABLE t0(c0 INT);
PRAGMA table_info('t0');
""", out='0|c0|INTEGER|false||false')
datafile = tf()
print("42\n84", file=open(datafile, 'w'))
test('''
CREATE TABLE a (i INTEGER);
.import "%s" a
SELECT SUM(i) FROM a;
''' % datafile, out='126')
# nested types
test('select LIST_VALUE(1, 2);', out='[1, 2]')
test("select STRUCT_PACK(x := 3, y := 3);", out="{'x': 3, 'y': 3}")
test("select STRUCT_PACK(x := 3, y := LIST_VALUE(1, 2));", out="{'x': 3, 'y': [1, 2]}")
test('''
CREATE TABLE a (i STRING);
INSERT INTO a VALUES ('XXXX');
SELECT CAST(i AS INTEGER) FROM a;
''' , err='Could not convert')
test('.auth ON', err='sqlite3_set_authorizer')
test('.auth OFF', err='sqlite3_set_authorizer')
test('.backup %s' % tf(), err='sqlite3_backup_init')
# test newline in value
test('''select 'hello
world' as a;''', out='hello\\nworld')
# test newline in column name
test('''select 42 as "hello
world";''', out='hello\\nworld')
test('''
.bail on
.bail off
.binary on
SELECT 42;
.binary off
SELECT 42;
''')
test('''
.cd %s
.cd %s
''' % (tempfile.gettempdir().replace('\\','/'), os.getcwd().replace('\\','/')))
test('''
CREATE TABLE a (I INTEGER);
.changes on
INSERT INTO a VALUES (42);
DROP TABLE a;
''', out="total_changes: 1")
test('''
CREATE TABLE a (I INTEGER);
.changes on
INSERT INTO a VALUES (42);
INSERT INTO a VALUES (42);
INSERT INTO a VALUES (42);
DROP TABLE a;
''', out="total_changes: 3")
test('''
CREATE TABLE a (I INTEGER);
.changes off
INSERT INTO a VALUES (42);
DROP TABLE a;
''')
# maybe at some point we can do something meaningful here
# test('.dbinfo', err='unable to read database header')
test('''
.echo on
SELECT 42;
''', out="SELECT 42")
test('.exit')
test('.quit')
test('.print asdf', out='asdf')
test('''
.headers on
SELECT 42 as wilbur;
''', out="wilbur")
test('''
.nullvalue wilbur
SELECT NULL;
''', out="wilbur")
test("select 'yo' where 'abc' like 'a%c';", out='yo')
test("select regexp_matches('abc','abc')", out='true')
test('.help', 'Show help text for PATTERN')
test('.load %s' % tf(), err="Error")
# this should be fixed
test('.selftest', err='sqlite3_table_column_metadata')
scriptfile = tf()
print("select 42", file=open(scriptfile, 'w'))
test('.read %s' % scriptfile, out='42')
test('.show', out='rowseparator')
test('.limit length 42', err='sqlite3_limit')
# ???
test('.lint fkey-indexes')
test('.timeout', err='sqlite3_busy_timeout')
test('.save %s' % tf(), err='sqlite3_backup_init')
test('.restore %s' % tf(), err='sqlite3_backup_init')
# don't crash plz
test('.vfsinfo')
test('.vfsname')
test('.vfslist')
test('.stats', err="sqlite3_status64")
test('.stats on')
test('.stats off')
test('''
create table test (a int, b varchar);
insert into test values (1, 'hello');
.schema test
''', out="CREATE TABLE test(a INTEGER, b VARCHAR);")
test('''
create table test (a int, b varchar);
insert into test values (1, 'hello');
.schema tes%
''', out="CREATE TABLE test(a INTEGER, b VARCHAR);")
test('''
create table test (a int, b varchar);
insert into test values (1, 'hello');
.schema tes*
''', out="CREATE TABLE test(a INTEGER, b VARCHAR);")
test('''
create table test (a int, b varchar);
CREATE TABLE test2(a INTEGER, b VARCHAR);
.schema
''', out="CREATE TABLE test2(a INTEGER, b VARCHAR);")
test('.fullschema', 'No STAT tables available', '')
test('''
CREATE TABLE asda (i INTEGER);
CREATE TABLE bsdf (i INTEGER);
CREATE TABLE csda (i INTEGER);
.tables
''', out="asda bsdf csda")
test('''
CREATE TABLE asda (i INTEGER);
CREATE TABLE bsdf (i INTEGER);
CREATE TABLE csda (i INTEGER);
.tables %da
''', out="asda csda")
test('.indexes', out="")
test('''
CREATE TABLE a (i INTEGER);
CREATE INDEX a_idx ON a(i);
.indexes a%
''', out="a_idx")
# this does not seem to output anything
test('.sha3sum')
test('''
.mode csv
.separator XX
SELECT 42,43;
''', out="42XX43")
test('''
.timer on
SELECT NULL;
''', out="Run Time:")
test('''
.scanstats on
SELECT NULL;
''', err='scanstats')
test('.trace %s\n; SELECT 42;' % tf(), err='sqlite3_trace_v2')
outfile = tf()
test('''
.mode csv
.output %s
SELECT 42;
''' % outfile)
outstr = open(outfile,'rb').read()
if b'42' not in outstr:
raise Exception('.output test failed')
outfile = tf()
test('''
.once %s
SELECT 43;
''' % outfile)
outstr = open(outfile,'rb').read()
if b'43' not in outstr:
raise Exception('.once test failed')
# This somehow does not log nor fail. works for me.
test('''
.log %s
SELECT 42;
.log off
''' % tf())
test('''
.mode ascii
SELECT NULL, 42, 'fourty-two', 42.0;
''', out='fourty-two')
test('''
.mode csv
SELECT NULL, 42, 'fourty-two', 42.0;
''', out=',fourty-two,')
test('''
.mode column
.width 10 10 10 10
SELECT NULL, 42, 'fourty-two', 42.0;
''', out=' fourty-two ')
test('''
.mode html
SELECT NULL, 42, 'fourty-two', 42.0;
''', out='<TD>fourty-two</TD>')
# FIXME sqlite3_column_blob
# test('''
# .mode insert
# SELECT NULL, 42, 'fourty-two', 42.0;
# ''', out='fourty-two')
test('''
.mode line
SELECT NULL, 42, 'fourty-two' x, 42.0;
''', out='x = fourty-two')
test('''
.mode list
SELECT NULL, 42, 'fourty-two', 42.0;
''', out='|fourty-two|')
# FIXME sqlite3_column_blob and %! format specifier
# test('''
# .mode quote
# SELECT NULL, 42, 'fourty-two', 42.0;
# ''', out='fourty-two')
test('''
.mode tabs
SELECT NULL, 42, 'fourty-two', 42.0;
''', out='fourty-two')
db1 = tf()
db2 = tf()
test('''
.open %s
CREATE TABLE t1 (i INTEGER);
INSERT INTO t1 VALUES (42);
.open %s
CREATE TABLE t2 (i INTEGER);
INSERT INTO t2 VALUES (43);
.open %s
SELECT * FROM t1;
''' % (db1, db2, db1), out='42')
# open file that is not a database
duckdb_nonsense_db = 'duckdbtest_nonsensedb.db'
with open(duckdb_nonsense_db, 'w+') as f:
f.write('blablabla')
test('', err='unable to open', extra_commands=[duckdb_nonsense_db])
os.remove(duckdb_nonsense_db)
# enable_profiling doesn't result in any output
test('''
PRAGMA enable_profiling
''', err="")
# only when we follow it up by an actual query does something get printed to the terminal
test('''
PRAGMA enable_profiling;
SELECT 42;
''', out="42", err="Query Profiling Information")
test('.system echo 42', out="42")
test('.shell echo 42', out="42")
# this fails because db_config is missing
# test('''
# .eqp full
# SELECT 42;
# ''', out="DUMMY_SCAN")
# this fails because the sqlite printf accepts %w for table names
# test('''
# CREATE TABLE a (I INTEGER);
# INSERT INTO a VALUES (42);
# .clone %s
# ''' % tempfile.mktemp())
test('.databases', out='main:')
# .dump test
test('''
CREATE TABLE a (I INTEGER);
.changes off
INSERT INTO a VALUES (42);
.dump
''', 'CREATE TABLE a(i INTEGER)')
test('''
CREATE TABLE a (I INTEGER);
.changes off
INSERT INTO a VALUES (42);
.dump
''', 'COMMIT')
# .dump a specific table
test('''
CREATE TABLE a (I INTEGER);
.changes off
INSERT INTO a VALUES (42);
.dump a
''', 'CREATE TABLE a(i INTEGER);')
# .dump LIKE
test('''
CREATE TABLE a (I INTEGER);
.changes off
INSERT INTO a VALUES (42);
.dump a%
''', 'CREATE TABLE a(i INTEGER);')
# more types, tables and views
test('''
CREATE TABLE a (d DATE, k FLOAT, t TIMESTAMP);
CREATE TABLE b (c INTEGER);
.changes off
INSERT INTO a VALUES (DATE '1992-01-01', 0.3, NOW());
INSERT INTO b SELECT * FROM range(0,10);
.dump
''', 'CREATE TABLE a(d DATE, k FLOAT, t TIMESTAMP);')
# import/export database
target_dir = 'duckdb_shell_test_export_dir'
try:
shutil.rmtree(target_dir)
except:
pass
test('''
.mode csv
.changes off
CREATE TABLE integers(i INTEGER);
CREATE TABLE integers2(i INTEGER);
INSERT INTO integers SELECT * FROM range(100);
INSERT INTO integers2 VALUES (1), (3), (99);
EXPORT DATABASE '%s';
DROP TABLE integers;
DROP TABLE integers2;
IMPORT DATABASE '%s';
SELECT SUM(i)*MAX(i) FROM integers JOIN integers2 USING (i);
''' % (target_dir, target_dir), '10197')
shutil.rmtree(target_dir)
# test using .import with a CSV file containing invalid UTF8
duckdb_nonsensecsv = 'duckdbtest_nonsensecsv.csv'
with open(duckdb_nonsensecsv, 'wb+') as f:
f.write(b'\xFF\n')
test('''
.nullvalue NULL
CREATE TABLE test(i INTEGER);
.import duckdbtest_nonsensecsv.csv test
SELECT * FROM test;
''', out="NULL")
os.remove(duckdb_nonsensecsv)
# .mode latex
test('''
.mode latex
CREATE TABLE a (I INTEGER);
.changes off
INSERT INTO a VALUES (42);
SELECT * FROM a;
''', '\\begin{tabular}')
# .mode trash
test('''
.mode trash
SELECT 1;
''', '')
# dump blobs: FIXME
# test('''
# CREATE TABLE a (b BLOB);
# .changes off
# INSERT INTO a VALUES (DATE '1992-01-01', 0.3, NOW());
# .dump
# ''', 'COMMIT')
# printf %q
# test('''
# CREATE TABLE a (i INTEGER);
# CREATE INDEX a_idx ON a(i);
# .imposter a_idx a_idx_imp
# ''')
# test that sqlite3_complete works somewhat correctly
test('''/*
;
*/
select 42;
''', out='42')
test('''-- this is a comment ;
select 42;
''', out='42')
test('''--;;;;;;
select 42;
''', out='42')
test('/* ;;;;;; */ select 42;', out='42')
| 20.06383 | 120 | 0.644365 |
a5666cc620e747ab364e176c2ebd46949649b6c7
| 3,225 |
py
|
Python
|
src/tools/grit/grit/tool/update_resource_ids/reader.py
|
Abreto/naiveproxy
|
5d84bf9f18eb5a949558086bad7c945bb9051362
|
[
"BSD-3-Clause"
] | 1 |
2020-03-11T03:44:02.000Z
|
2020-03-11T03:44:02.000Z
|
src/tools/grit/grit/tool/update_resource_ids/reader.py
|
bylond/naiveproxy
|
a04a8330a8bb0d0892259cf6d795271fbe6e6d0e
|
[
"BSD-3-Clause"
] | null | null | null |
src/tools/grit/grit/tool/update_resource_ids/reader.py
|
bylond/naiveproxy
|
a04a8330a8bb0d0892259cf6d795271fbe6e6d0e
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Helpers to read GRD files and estimate resource ID usages.
This module uses grit.grd_reader to estimate resource ID usages in GRD
(and GRDP) files by counting the occurrences of {include, message, structure}
tags. This approach avoids the complexties of conditional inclusions, but
produces a conservative estimate of ID usages.
"""
from __future__ import print_function
import collections
import os
from grit import grd_reader
from grit.tool.update_resource_ids import common
TAGS_OF_INTEREST = set(['include', 'message', 'structure'])
def _CountResourceUsage(grd):
tag_name_to_count = {tag: set() for tag in TAGS_OF_INTEREST}
# Pass '_chromium', but '_google_chrome' would produce the same result.
root = grd_reader.Parse(grd, defines={'_chromium': True})
# Count all descendant tags, regardless of whether they're active.
for node in root.Preorder():
if node.name in TAGS_OF_INTEREST:
tag_name_to_count[node.name].add(node.attrs['name'])
return {k: len(v) for k, v in tag_name_to_count.iteritems() if v}
def GenerateResourceUsages(item_list, src_dir, fake):
"""Visits a list of ItemInfo to generate maps from tag name to usage.
Args:
root_obj: Root dict of a resource_ids file.
src_dir: Absolute directory of Chrome's src/ directory.
fake: For testing: Sets 10 as usages for all tags, to avoid reading GRD.
Yields:
Tuple (item, tag_name_to_usage), where |item| is from |item_list| and
|tag_name_to_usage| is a dict() mapping tag name to (int) usage.
"""
if fake:
for item in item_list:
tag_name_to_usage = collections.Counter({t.name: 10 for t in item.tags})
yield item, tag_name_to_usage
return
for item in item_list:
supported_tag_names = set(tag.name for tag in item.tags)
if item.meta and 'sizes' in item.meta:
# If META has "sizes" field, use it instead of reading GRD.
tag_name_to_usage = collections.Counter()
for k, vlist in item.meta['sizes'].iteritems():
tag_name_to_usage[common.StripPlural(k.val)] = sum(v.val for v in vlist)
tag_names = set(tag_name_to_usage.keys())
if tag_names != supported_tag_names:
raise ValueError('META "sizes" field have identical fields as actual '
'"sizes" field.')
else:
# Generated GRD start with '<(SHARED_INTERMEDIATE_DIR)'. Just check '<'.
if item.grd.startswith('<'):
raise ValueError('%s: Generated GRD must use META with "sizes" field '
'to specify size bounds.' % item.grd)
grd_file = os.sep.join([src_dir, item.grd])
if not os.path.isfile(grd_file):
raise ValueError('Nonexistent GRD provided: %s' % item.grd)
tag_name_to_usage = _CountResourceUsage(grd_file)
tag_names = set(tag_name_to_usage.keys())
if not tag_names.issubset(supported_tag_names):
missing = [t + 's' for t in tag_names - supported_tag_names]
raise ValueError(
'Resource ids for %s needs entry for %s' % (item.grd, missing))
yield item, tag_name_to_usage
| 42.434211 | 80 | 0.702326 |
ca22ec6e6b774bdb5405a52d465d936114c83af9
| 413 |
py
|
Python
|
tensorflow/python/platform/googletest.py
|
vsilyaev/tensorflow
|
f41959ccb2d9d4c722fe8fc3351401d53bcf4900
|
[
"Apache-2.0"
] | 2 |
2021-06-11T19:21:06.000Z
|
2021-08-17T07:55:32.000Z
|
tensorflow/python/platform/googletest.py
|
vsilyaev/tensorflow
|
f41959ccb2d9d4c722fe8fc3351401d53bcf4900
|
[
"Apache-2.0"
] | null | null | null |
tensorflow/python/platform/googletest.py
|
vsilyaev/tensorflow
|
f41959ccb2d9d4c722fe8fc3351401d53bcf4900
|
[
"Apache-2.0"
] | 2 |
2015-11-13T21:11:49.000Z
|
2015-11-29T04:13:49.000Z
|
"""Switch between depending on googletest or unittest."""
# pylint: disable=unused-import
# pylint: disable=g-import-not-at-top
# pylint: disable=wildcard-import
import tensorflow.python.platform
import control_imports
if control_imports.USE_OSS and control_imports.OSS_GOOGLETEST:
from tensorflow.python.platform.default._googletest import *
else:
from tensorflow.python.platform.google._googletest import *
| 37.545455 | 62 | 0.818402 |
e06ee69ff6f9465bc319e2f2b89770cf80f21792
| 13,204 |
py
|
Python
|
starpy.py
|
vrooje/starpy
|
3b332124c3ab08dfee469f077a5390b5e5fc794f
|
[
"Apache-2.0"
] | null | null | null |
starpy.py
|
vrooje/starpy
|
3b332124c3ab08dfee469f077a5390b5e5fc794f
|
[
"Apache-2.0"
] | null | null | null |
starpy.py
|
vrooje/starpy
|
3b332124c3ab08dfee469f077a5390b5e5fc794f
|
[
"Apache-2.0"
] | null | null | null |
from posterior import *
from astropy.cosmology import FlatLambdaCDM
import numpy as N
import sys, os, time
from scipy.stats import kde
from scipy import interpolate
from scipy.integrate import simps
from scipy.interpolate import LinearNDInterpolator
from scipy.interpolate import interp2d
from itertools import product
# Use sys to assign arguments for the galaxy data from the command line
try:
# this is the default, running starpy once on one source
u_r, err_u_r, nuv_u, err_nuv_u, z, dr8, ra, dec = sys.argv[1:]
rows = [[u_r, err_u_r, nuv_u, err_nuv_u, z, dr8, ra, dec]]
many_sources = False
except:
# if the above doesn't work assume the first input points to a file with a list of colors for many sources
# the inputs should have the same structure as the above, with spaces between parameters
objlist_file = sys.argv[1]
many_sources = True
#lists = [[] for i in range(8)]
#u_r, err_u_r, nuv_u, err_nuv_u, z, dr8, ra, dec = lists
rows = []
with open(objlist_file) as fobj:
for i_l, line in enumerate(fobj):
arg = line.strip('\n').strip(' ').split(' ')
if not(len(arg) == 8):
print("Something wrong at line %d in file %s, got %d values instead of 8" % (i_l, objlist_file, len(arg)))
exit(-1)
rows.append(arg)
print(" Read %d objects from file %s." % (len(rows), objlist_file))
# Use astropy to calculate the age from the redshift in the data
cosmo = FlatLambdaCDM(H0 = 71.0, Om0 = 0.26)
#age = N.array(cosmo.age(float(z)))
'''
26/07/2018 - edited by BDS to move input parameters into this file so they don't have to be
read in separately in posterior.py and fluxes.py
'''
# defaults
tq = N.linspace(0.003, 13.8, 100)
tau = N.linspace(0.003, 4, 100)
ages = N.linspace(10.88861228, 13.67023409, 50)
col1_file = 'nuv_look_up_ssfr.npy'
col2_file = 'ur_look_up_ssfr.npy'
model = 'models/Padova1994/chabrier/ASCII/extracted_bc2003_lr_m62_chab_ssp.ised_ASCII'
use_table = False
outparamfile = ''
write_params = False
plotdir = "./"
savedir = "./"
paramfile = 'posterior_params.in'
try:
with open(paramfile) as f:
for line in f:
arg = line.rstrip('\n').strip(' ').split('=')
#print(arg)
#print("------\n")
if (arg[0].lower().strip() in ['lookup', 'lookups', 'lookuptable', 'lookuptables', 'lu', 'lut']):
use_table = True
tables = arg[1].split(',')
if tables[0].strip() in ['default']:
lu_default = True
else:
if len(tables) < 2:
tables = arg[1].split(' ')
if len(tables) < 2:
print('Error: if not "default", 2 lookup tables needed, filenames separated by "," or " "')
exit(-1)
col1_file = tables[0].strip()
col2_file = tables[1].strip()
lu_default = False
elif (arg[0].lower().strip() in ['tq', 't_q', 'tquench', 't_quench', 'quench_time', 'quenching_time']):
valstr = arg[1].split(',')
if len(valstr) != 3:
print('Error: if specifying quenching times tq in %s, must be formatted "tq = tstart, tend, n_vals"' % paramfile)
exit(-1)
tq = N.linspace(float(valstr[0]), float(valstr[1]), int(valstr[2]))
elif (arg[0].lower().strip() in ['tau', 'exptau', 'quenchingrate', 'quenching_rate']):
valstr = arg[1].split(',')
if len(valstr) != 3:
print('Error: if specifying quenching rates tau in %s, must be formatted "tau = taustart, tauend, n_vals"' % paramfile)
exit(-1)
tau = N.linspace(float(valstr[0]), float(valstr[1]), int(valstr[2]))
elif (arg[0].lower().strip() in ['ages', 'age', 't_obs', 'age_obs']):
valstr = arg[1].split(',')
if len(valstr) != 3:
print('Error: if specifying ages in %s, must be formatted "age = agestart, ageend, n_vals"' % paramfile)
exit(-1)
ages = N.linspace(float(valstr[0]), float(valstr[1]), int(valstr[2]))
elif (arg[0].lower().strip() in ['model', 'models']):
if (arg[1].strip() in ['default']):
# we've already defined the default above
pass
else:
model = arg[1].strip()
elif (arg[0].lower().strip() in ['save', 'savedir', 'save_dir']):
if (arg[1].strip() in ['default']):
# we've already defined the default above
pass
else:
savedir = arg[1].strip()
if not savedir.endswith("/"):
savedir += "/"
elif (arg[0].lower().strip() in ['plot', 'plotdir', 'plot_dir']):
if (arg[1].strip() in ['default']):
# we've already defined the default above
pass
else:
plotdir = arg[1].strip()
if not plotdir.endswith("/"):
plotdir += "/"
elif (arg[0].lower().strip() in ['params_out', 'paramfile', 'paramfile_out']):
outparamfile = arg[1].strip()
write_params = True
fparams = open(outparamfile, "w")
fparams.write("# id tq_median tau_median dtq_hi_68pct dtau_hi_68pct dtq_lo_68pct dtau_lo_68pct dtq_hi_95pct dtau_hi_95pct dtq_lo_95pct dtau_lo_95pct\n")
else:
if (line.strip(' ').startswith("#")) | (len(line.rstrip('\n').strip(' ').strip('\t')) < 1):
pass
else:
print("WARNING: unable to parse line in %s:\n%s" % (paramfile, line))
#print(arg)
# end loop through file
# end with open(paramfile)
grid = N.array(list(product(ages, tau, tq)))
nuv_pred = N.load(col1_file)
ur_pred = N.load(col2_file)
lu = N.append(nuv_pred.reshape(-1,1), ur_pred.reshape(-1,1), axis=1)
except IOError as e:
print("Oops!\n\n")
print(e)
print("\n")
print("Input file %s not found or there was an error reading in a file within it, trying inputs from STDIN..." % paramfile)
model = str(raw_input('Tell me the location of the extracted (.ised_ASCII) SPS model to use to predict the u-r and NUV-u colours, e.g. ~/extracted_bc2003_lr_m62_chab_ssp.ised_ASCII :'))
method = raw_input('Do you wish to use a look-up table? (yes/no) :')
if method == 'yes' or method =='y':
use_table = True
prov = raw_input('Do you wish to use the provided u-r and NUV-u look up tables? (yes/no) :')
if prov == 'yes' or prov =='y':
print 'gridding...'
tq = N.linspace(0.003, 13.8, 100)
tau = N.linspace(0.003, 4, 100)
ages = N.linspace(10.88861228, 13.67023409, 50)
grid = N.array(list(product(ages, tau, tq)))
print 'loading...'
nuv_pred = N.load('nuv_look_up_ssfr.npy')
ur_pred = N.load('ur_look_up_ssfr.npy')
lu = N.append(nuv_pred.reshape(-1,1), ur_pred.reshape(-1,1), axis=1)
elif prov=='no' or prov=='n':
col1_file = str(raw_input('Location of your NUV-u colour look up table :'))
col2_file = str(raw_input('Location of your u-r colour look up table :'))
one = N.array(input('Define first axis values (ages) of look up table start, stop, len(axis1); e.g. 10, 13.8, 50 :'))
ages = N.linspace(float(one[0]), float(one[1]), float(one[2]))
two = N.array(input('Define second axis values (tau) of look up table start, stop, len(axis1); e.g. 0, 4, 100 : '))
tau = N.linspace(float(two[0]), float(two[1]), float(two[2]))
three = N.array(input('Define third axis values (tq) of look up table start, stop, len(axis1); e.g. 0, 13.8, 100 : '))
tq = N.linspace(float(three[0]), float(three[1]), float(three[2]))
grid = N.array(list(product(ages, tau, tq)))
print 'loading...'
nuv_pred = N.load(col1_file)
ur_pred = N.load(col2_file)
lu = N.append(nuv_pred.reshape(-1,1), ur_pred.reshape(-1,1), axis=1)
else:
sys.exit("You didn't give a valid answer (yes/no). Try running again.")
print("Parameters and models used:")
print("Model file: %s" % model)
if use_table:
print("Lookup files used: \n bluer colour: %s\n redder colour: %s" % (col1_file, col2_file))
else:
print("Not using lookup table, predicting colours from model directly (this is VERY SLOW).")
print(".... seriously, if you are running this a lot you should make a lookup table first!")
print("Saving plots to %s" % plotdir)
print("Saving .npy files to %s" % savedir)
if write_params:
print("Saving running list of median and 68, 95 percent confidence regions to %s" % outparamfile)
else:
print("Writing t, tau best fit (medians) to screen, NOT to a file.")
print("Grid used:\n")
print(" quenching time tq varies from %.4f to %.4f Gyr, in %d steps" % (min(tq), max(tq), len(tq)))
print(" quenching rate tau varies from %.4f to %.4f, in %d steps" % (min(tau), max(tau), len(tau)))
print(" pop ages covered varies from %.4f to %.4f Gyr, in %d steps" % (min(ages), max(ages), len(ages)))
if many_sources:
print("\nBeginning computations for %s sources..." % len(rows))
# this bit was previously in fluxes.py
data = N.loadtxt(model)
model_ages = data[0,1:]
model_lambda = data[1:,0]
model_fluxes = data[1:,1:]
time_flux = N.arange(0, 0.01, 0.003)
t_flux = N.linspace(0,14.0,100)
time_steps_flux = N.append(time_flux, t_flux[1:])*1E9
#First mask the ages of the very young stars hidden in birth clouds
mask = model_ages[model_ages<4E6]
model_fluxes[:,0:len(mask)] = 0.0
# Calculate the fluxes at the ages specified by the time steps rather than in the models using numpy/scipy array manipulations rather than a for loop
f = interpolate.interp2d(model_ages, model_lambda, model_fluxes)
interp_fluxes_sim = f(time_steps_flux, model_lambda)
# Define parameters needed for emcee
nwalkers = 100 # number of monte carlo chains
nsteps= 800 # number of steps in the monte carlo chain
start = [7.5, 1.5] # starting place of all the chains
burnin = 400 # number of steps in the burn in phase of the monte carlo chain
#The rest calls the emcee module which is initialised in the sample function of the posterior file.
if use_table:
the_c_function = lookup_col_one
lookup = lu #N.append(nuv_pred.reshape(-1,1), ur_pred.reshape(-1,1), axis=1)
else:
the_c_function = predict_c_one
lookup = None
for i_row in range(len(rows)):
u_r, err_u_r, nuv_u, err_nuv_u, z, dr8, ra, dec = rows[i_row]
if many_sources:
print("======= Beginning run %d =======" % i_row)
age = N.array(cosmo.age(float(z)))
print("Input colors are:\n bluer = %s +/- %s\b redder = %s +/- %s" % (nuv_u, err_nuv_u, u_r, err_u_r))
print("for source %s at redshift z = %s, i.e. age = %.2f Gyr,\n and (RA, Dec) = (%s, %s)" % (dr8, z, age, ra, dec))
it_worked = False
try:
samples, samples_save = sample(2, nwalkers, nsteps, burnin, start, float(u_r), float(err_u_r), float(nuv_u), float(err_nuv_u), age, dr8, ra, dec, the_c_function, use_table, (tq, tau, ages), lu=lookup, savedir=savedir)
it_worked = True
except Exception as e:
print("******************* WHOOPS -- SOMETHING WENT WRONG FOR ID %s *******************")
print(e)
print("\n We shall skip this one.... onwards!\n")
print("********************************************************************************\n\n")
if it_worked:
tq_mcmc, tau_mcmc, = map(lambda v: (v[1], v[2]-v[1], v[1]-v[0], v[4]-v[1], v[1]-v[3]), zip(*N.percentile(samples, [16,50,84,2.5,97.5],axis=0)))
print 'Best fit [t, tau] values found by starpy for input parameters are : [', tq_mcmc[0], tau_mcmc[0], ']'
fig = corner_plot(samples, labels = [r'$ t_{quench}$', r'$ \tau$'], extents=[[N.min(samples[:,0]), N.max(samples[:,0])],[N.min(samples[:,1]),N.max(samples[:,1])]], bf=[tq_mcmc, tau_mcmc], id=dr8)
fig.savefig(plotdir+'starpy_output_'+str(dr8)+'_'+str(ra)+'_'+str(dec)+'.pdf')
if write_params:
fparams.write("%s %f %f %f %f %f %f %f %f %f %f\n" % (dr8, tq_mcmc[0], tau_mcmc[0], tq_mcmc[1], tau_mcmc[1], tq_mcmc[2], tau_mcmc[2], tq_mcmc[3], tau_mcmc[3], tq_mcmc[4], tau_mcmc[4]))
# the headers are defined above, when the input file is read in
#fparams.write("# id tq_median tau_median dtq_hi_68pct dtau_hi_68pct dtq_lo_68pct dtau_lo_68pct dtq_hi_95pct dtau_hi_95pct dtq_lo_95pct dtau_lo_95pct\n")
if write_params:
fparams.close()
| 43.291803 | 225 | 0.573614 |
cf85dc2b290e5bf34d1cd45f86c42995a9b7cfc1
| 6,247 |
py
|
Python
|
telemetry_f1_2021/main.py
|
jasperan/f1-telemetry-oracle
|
5b2d7efac265539931849863655a5f92d86c75a8
|
[
"MIT"
] | 4 |
2022-02-21T16:36:09.000Z
|
2022-03-28T06:50:54.000Z
|
telemetry_f1_2021/main.py
|
jasperan/f1-telemetry-oracle
|
5b2d7efac265539931849863655a5f92d86c75a8
|
[
"MIT"
] | null | null | null |
telemetry_f1_2021/main.py
|
jasperan/f1-telemetry-oracle
|
5b2d7efac265539931849863655a5f92d86c75a8
|
[
"MIT"
] | 2 |
2022-02-17T19:25:04.000Z
|
2022-02-23T04:16:16.000Z
|
import datetime
import copy
import json
import pickle
from pathlib import Path
from telemetry_f1_2021.packets import HEADER_FIELD_TO_PACKET_TYPE
from telemetry_f1_2021.packets import PacketSessionData, PacketMotionData, PacketLapData, PacketEventData, PacketParticipantsData, PacketCarDamageData
from telemetry_f1_2021.packets import PacketCarSetupData, PacketCarTelemetryData, PacketCarStatusData, PacketFinalClassificationData, PacketLobbyInfoData, PacketSessionHistoryData
from telemetry_f1_2021.listener import TelemetryListener
from oracledb import OracleJSONDatabaseConnection
# using time module
import time
import argparse
cli_parser = argparse.ArgumentParser(
description="Script that records telemetry F1 2021 weather data into an Autonomous JSON Database"
)
cli_parser.add_argument('-g', '--gamehost', type=str, help='Gamehost identifier (something unique)', required=True)
args = cli_parser.parse_args()
global listener
def _get_listener():
try:
print('Starting listener on localhost:20777')
return TelemetryListener()
except OSError as exception:
print('Unable to setup connection: {}'.format(exception.args[1]))
print('Failed to open connector, stopping.')
exit(127)
listener = _get_listener()
# get weather data and insert it into database.
def main():
# Get connection to db.
dbhandler = OracleJSONDatabaseConnection()
try:
read_data_inf(dbhandler)
except KeyboardInterrupt:
print('Stop the car, stop the car Checo.')
print('Stop the car, stop at pit exit.')
print('Just pull over to the side.')
dbhandler.close_pool()
except Exception:
listener = _get_listener()
read_data_inf(dbhandler)
dbhandler.close_pool()
def read_data_inf(dbhandler):
try:
while True:
packet = listener.get()
# ts stores the time in seconds
ts = time.time()
#print('{}'.format(PacketSessionData.__class__))
if isinstance(packet, PacketSessionData):
save_packet_weather(dbhandler, packet, ts)
save_packet('PacketSessionData', dbhandler, packet)
elif isinstance(packet, PacketMotionData):
save_packet('PacketMotionData', dbhandler, packet)
elif isinstance(packet, PacketLapData):
save_packet('PacketLapData', dbhandler, packet)
elif isinstance(packet, PacketEventData):
save_packet('PacketEventData', dbhandler, packet)
elif isinstance(packet, PacketParticipantsData):
save_packet('PacketParticipantsData', dbhandler, packet)
elif isinstance(packet, PacketCarSetupData):
save_packet('PacketCarSetupData', dbhandler, packet)
elif isinstance(packet, PacketCarTelemetryData):
save_packet('PacketCarTelemetryData', dbhandler, packet)
elif isinstance(packet, PacketCarStatusData):
save_packet('PacketCarStatusData', dbhandler, packet)
elif isinstance(packet, PacketFinalClassificationData):
save_packet('PacketFinalClassificationData', dbhandler, packet)
elif isinstance(packet, PacketLobbyInfoData):
save_packet('PacketLobbyInfoData', dbhandler, packet)
elif isinstance(packet, PacketCarDamageData):
save_packet('PacketCarDamageData', dbhandler, packet)
elif isinstance(packet, PacketSessionHistoryData):
save_packet('PacketSessionHistoryData', dbhandler, packet)
except Exception:
read_data_inf(dbhandler)
def save_weather_object(collection_name, dbhandler, dict_object):
res = dbhandler.insert(collection_name, dict_object)
if res == 0: # error
pass
else:
print('{} | INSERT {} OK'.format(datetime.datetime.now(), dict_object['timestamp']))
def save_oracle_db(collection_name, dbhandler, dict_object):
res = dbhandler.insert(collection_name, dict_object)
if res == 0: # error
pass
elif res == -1:
print('{} | INSERT INTO {} STRUCTURAL ERROR'.format(datetime.datetime.now(), collection_name))
else:
print('{} | INSERT INTO {} OK'.format(datetime.datetime.now(), collection_name))
# method used only for weather data for AIHack2022
def save_packet_weather(dbhandler, packet, timestamp):
dict_object = packet.to_dict()
dict_object['timestamp'] = int(timestamp) # get integer timestamp for building the time series. We'll ignore 1/2 of all packets since we get 2 per second but it's not relevant for weather.
dict_object['gamehost'] = args.gamehost
# Load into Oracle DB
save_weather_object('f1_2021_weather', dbhandler, dict_object)
def save_packet(collection_name, dbhandler, packet):
dict_object = packet.to_json()
# Load into Oracle DB
save_oracle_db(collection_name, dbhandler, dict_object)
def save_packets():
samples = {}
listener = _get_listener()
packets_to_capture = copy.deepcopy(HEADER_FIELD_TO_PACKET_TYPE)
# remove FinalClassification and LobbyInfo
for k in [(2021, 1, 8), (2021, 1, 9)]:
del HEADER_FIELD_TO_PACKET_TYPE[k]
while len(samples) != len(list(HEADER_FIELD_TO_PACKET_TYPE)):
packet = listener.get()
key = (
packet.m_header.m_packet_format,
packet.m_header.m_packet_version,
packet.m_header.m_packet_id,
)
if key in list(packets_to_capture):
packet_type = HEADER_FIELD_TO_PACKET_TYPE[key].__name__
samples[packet_type] = packet
del packets_to_capture[key]
root_dir = Path(__file__).parent
for packet_name, packet in samples.items():
'''
with open(f'{root_dir}/example_packets/{packet_name}.pickle', 'wb') as fh:
print(f'Saving packet: {root_dir}/example_packets/{packet_name}.pickle')
pickle.dump(packet, fh, protocol=pickle.HIGHEST_PROTOCOL)
'''
with open('{}/example_packets/json/{}.json'.format(root_dir, packet_name), 'w') as fh:
json.dump(packet.to_dict(), fh, indent=2)
print('Done!')
if __name__ == '__main__':
main()
| 35.902299 | 192 | 0.685929 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.