metadata
dict
text
stringlengths
60
3.49M
{ "source": "jhaazpr/live-object", "score": 3 }
#### File: live-object/src/smartfan.py ```python from time import sleep import serial class SmartFan: def __init__(self, name='fan', port='/dev/tty.usbmodem1411'): # Establish the connection on a specific port self.name = name self.baudrate = 9600 if not port: # no port, no physical fan self.ser = None else: self.ser = serial.Serial(port, self.baudrate) def off(self): if self.ser: print "sent off to {}".format(self.name) self.ser.write(str(chr(0))) else: print "would have sent off to {}".format(self.name) def low(self): if self.ser: print "sent low to {}".format(self.name) self.ser.write(str(chr(1))) else: print "would have sent low to {}".format(self.name) def med(self): if self.ser: print "sent med to {}".format(self.name) self.ser.write(str(chr(2))) else: print "would have sent med to {}".format(self.name) def high(self): if self.ser: print "sent high to {}".format(self.name) self.ser.write(str(chr(3))) else: print "would have sent high to {}".format(self.name) ``` #### File: live-object/src/tweet_stream.py ```python from tweepy.streaming import StreamListener from tweepy import OAuthHandler from tweepy import Stream # MongoDB from pymongo import MongoClient import argparse import time import json # Access Variables. Required: secret.py # !!! Do not push secret.py to a public repository !!! import secret class Listener(StreamListener): def __init__(self, db): self.db = db def on_data(self, data): # self.output_file.write(data) tweet = json.loads(data) self.db.insert(tweet) # print data return True def on_error(self, status): print status class TweetStream(): def __init__(self, consumer_key, consumer_secret, access_token_key, \ access_token_secret): self.auth = OAuthHandler(consumer_key, consumer_secret) self.auth.set_access_token(access_token_key, access_token_secret) self.db = DB = MongoClient().test.tweets def filter(self, search_params): self.listener = Listener(self.db) self.stream = Stream(self.auth, self.listener) start_time = time.time() print 'Running stream with parameters {}.\n \ Saving to MongoDB collection: db.test.tweets' \ .format(search_params) try: self.stream.filter(track=search_params) except KeyboardInterrupt: print '\n\nRan stream for {} seconds'.format(time.time() - start_time) # MAIN FUNCTION if __name__ == '__main__': parser = argparse.ArgumentParser(description='Search for tweets with given subjects') parser.add_argument('subjects', metavar='C', type=str, nargs='+', help='a subject on twitter') args = parser.parse_args() print 'Searching for tweets with these subjects: {}...'.format(args.subjects) tweet_stream = TweetStream(secret.consumer_key, secret.consumer_secret, secret.access_token_key, secret.access_token_secret) tweet_stream.filter(args.subjects) ```
{ "source": "jhaazpr/plastic-forming", "score": 3 }
#### File: src/deviation-gague/deviation-gague.py ```python import numpy as np import cv2 import cv2.cv as cv import json img = cv2.imread('paper_reduced_shape.png',0) img = cv2.medianBlur(img,5) cimg = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR) cimg_clone = cimg.copy() cv2.namedWindow('Deviation Gague') # Globals for Tracking clicks tracking_centers = [] tracking_labels = ['TL', 'TR', 'BR', 'BL'] clicked_tracking_pts = [] selecting = False IMAGE_DIMS = img.shape IMG_WIDTH = IMAGE_DIMS[1] IMG_HEIGHT = IMAGE_DIMS[0] TOP_LEFT = np.array([0, 0]) TOP_RIGHT = np.array([IMG_WIDTH, 0]) FONT = cv2.FONT_HERSHEY_SIMPLEX def click_set_tracking_point(event, x, y, flags, param): # grab references to globals global clicked_tracking_pts, selecting # if the left mouse button was clicked, record the starting # (x, y) coordinates and indicate that cropping is being # performed if event == cv2.EVENT_LBUTTONDOWN: selecting = True # check to see if the left mouse button was released if event == cv2.EVENT_LBUTTONUP: # record the ending (x, y) coordinates and indicate that # the cropping operation is finished clicked_tracking_pts.append((x, y)) cv2.circle(cimg, (x, y) ,5,(255,255,0),3) cv2.imshow('Deviation Gague', cimg) selecting = False # draw a rectangle around the region of interest #cv2.circle(cimg, (x, y) ,2,(255,0,0),3) #cv2.imshow('Deviation Gague', cimg) cv2.setMouseCallback('Deviation Gague', click_set_tracking_point) def find_circles(): circles = cv2.HoughCircles(img,cv.CV_HOUGH_GRADIENT,2,20, param1=50,param2=30,minRadius=0,maxRadius=40) if circles is None: return circles = np.uint16(np.around(circles)) # centers = circles[:, :, 0:2][0] # print centers # print np.linalg.norm(centers[0] - TOP_LEFT) norms_top_left = [] norms_top_right = [] centers = [] for i in circles[0,:]: # draw the outer circle center = np.array([i[0], i[1]]) center_tup = (i[0], i[1]) radius = i[2] cv2.circle(cimg,center_tup,radius,(0,255,0),2) # draw the center of the circle cv2.circle(cimg,center_tup,2,(0,0,255),3) # compute distances from the top left and top right points centers.append(center) norms_top_left.append(np.linalg.norm((i[0],i[1]) - TOP_LEFT)) norms_top_right.append(np.linalg.norm((i[0],i[1]) - TOP_RIGHT)) # Add principal points in this order [TL, TR, BR, BL] tracking_centers.append(centers[norms_top_left.index(min(norms_top_left))]) tracking_centers.append(centers[norms_top_right.index(min(norms_top_right))]) tracking_centers.append(centers[norms_top_left.index(max(norms_top_left))]) tracking_centers.append(centers[norms_top_right.index(max(norms_top_right))]) for center in enumerate(tracking_centers): # print center cv2.circle(cimg,tuple(center[1]),2,(255,0,0),3) cv2.putText(cimg, tracking_labels[center[0]], tuple(center[1]), FONT, 1,(255,255,255),2) def perp_transform(): """ Evaluates the use-input points and returns a transformed image. """ # if len(clicked_tracking_pts) != 4: # print "Error: need exactly four clicked points in clockwise order: {}".format(tracking_labels) # return # sq_errors = [] # for truth in enumerate(clicked_tracking_pts): # err = np.linalg.norm(truth[1] - tracking_centers[truth[0]]) # sq_errors.append(err ** 2) tracking_centers_tup = map(lambda np_arr: tuple(np_arr), tracking_centers) # print "Estimated points: {}".format(tracking_centers_tup) # print "True (clicked) points: {}".format(clicked_tracking_pts) # print "Squared errors: {}".format(sq_errors) # Source points are detected tracking points in the iamge pts_src = np.array([list(t) for t in tracking_centers_tup]).astype(float) # Destination points are the corners of the screen pts_dst = np.array([[0 - 10, 0 - 10], [IMG_WIDTH + 10, 0 - 10], [IMG_WIDTH + 10, IMG_HEIGHT + 10], [0 - 10, IMG_HEIGHT + 10]]).astype(float) homog, status = cv2.findHomography(pts_src, pts_dst) # print homog # cimg_copy = cimg.copy() return cv2.warpPerspective(cimg, homog, (IMG_WIDTH, IMG_HEIGHT)) def eval_features(): if len(clicked_tracking_pts) != 4: print "Error: need exactly four clicked points in clockwise order: {}".format(tracking_labels) return sq_errors = [] for truth in enumerate(clicked_tracking_pts): err = np.linalg.norm(truth[1] - tracking_centers[truth[0]]) sq_errors.append(err ** 2) tracking_centers_tup = map(lambda np_arr: tuple(np_arr), tracking_centers) print "Estimated points: {}".format(tracking_centers_tup) print "True (clicked) points: {}".format(clicked_tracking_pts) print "Squared errors: {}".format(sq_errors) def parse_contour(contours): # Form: [[[x1, y1], [x2, y2], ... [xn, yn]] [[x1, y1], ... [xn, yn]] data = {} data['width'] = 0 data['height'] = 0 data['paths'] = [] for group in contours: num_pts = group.shape[0] group = group.reshape((num_pts, 2)) group = group.tolist() path = {} path['type'] = 'Path' path['closed'] = 'true' # NOTE: not sure about this path['segments'] = group data['paths'].append(path) final = { 'data' : data} return json.dumps(final) find_circles() cv2.imshow('Deviation Gague', cimg) while True: key = cv2.waitKey(1) & 0xFF # Threshold image if key == ord('t'): cimg_grey = cv2.cvtColor(cimg, cv2.COLOR_BGRA2GRAY) (thresh, cimg) = cv2.threshold(cimg_grey, 128, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU) cv2.imshow('Deviation Gague', cimg) # Reset user-input tracking points if key == ord('r'): tracking_centers = [] clicked_tracking_pts = [] cimg = cimg_clone.copy() find_circles() cv2.imshow('Deviation Gague', cimg) # Evaluate user-input tracking points if key == ord('e'): eval_features() # Apply a perspective transform and crop if key == ord('h'): cimg = perp_transform() cv2.imshow('Deviation Gague', cimg) # Find countours if key == ord('c'): (contours, hierarchy) = cv2.findContours(cimg, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE) contours = parse_contour(contours) with open("contours.json", "w+") as cont_file: cont_file.write(str(contours)) cont_file.flush() cont_file.close() print "Contours written to contours.json" # Quit if key == ord ('q'): break cv2.destroyAllWindows() ```
{ "source": "jhabarsingh/CODECHEF-APP", "score": 3 }
#### File: codechef/apis/utilities.py ```python def check_user(username, title): if username in str(title): return True else: return False ```
{ "source": "jhabarsingh/SIMADIAN", "score": 2 }
#### File: simadianbackend/commerce/views.py ```python from django_filters.rest_framework import DjangoFilterBackend from .serializers import ItemSerializer, CategorySerializer, MassUploadFileSerializer, ItemListSerializer, MessageSerializer, MassUploadSerializer from rest_framework.pagination import PageNumberPagination from rest_framework.permissions import IsAuthenticated, IsAdminUser from .permissions import IsSuperUser, IsOwner from django.contrib.auth import get_user_model from rest_framework.response import Response from rest_framework.views import APIView from django.shortcuts import render from rest_framework import generics, serializers from rest_framework import mixins from .models import Item, Category, MassUpload, Messages from django.http import Http404 from rest_framework import status User = get_user_model() class ItemListApiView(generics.GenericAPIView, mixins.ListModelMixin): ''' Return all the users avaialble if their account filters are available ''' queryset = Item.objects.all().order_by("-id") serializer_class = ItemListSerializer filter_backends = [DjangoFilterBackend] filterset_fields = ['category', 'name', 'sold', 'country', 'city', 'state', 'landmark'] authentication_classes = [] def get(self, request, *args, **kwargs): # List out the Item with given filter fields return self.list(request, *args, **kwargs) class CategoryListApiView(generics.GenericAPIView, mixins.ListModelMixin): ''' Return all the category avaialble ''' queryset = Category.objects.all() serializer_class = CategorySerializer filter_backends = [DjangoFilterBackend] filterset_fields = ['category'] authentication_classes = [] pagination_class = None def get(self, request, *args, **kwargs): # List out the Item with given filter fields return self.list(request, *args, **kwargs) class CategoryCreateApiView(generics.CreateAPIView): ''' Create Item account ''' serializer_class = CategorySerializer class ItemCreateApiView(APIView): ''' Create Item account ''' def post(self, request): # try: data = request.data.copy() user = request.user serializers = ItemSerializer(data=data, partial=True) serializers.initial_data["seller"] = request.user.id if serializers.is_valid(): serializers.save() return Response(serializers.data) return Response(serializers.errors, status=status.HTTP_400_BAD_REQUEST) # except: # raise Http404 class ItemUpdateDeleteApiView(APIView): ''' CRUD on Item model ''' permission_classes = [IsAuthenticated] def put(self, request, format=None): try: id = request.data.get("id") item = Item.objects.get(pk=id) serializers = ItemSerializer(item, data=request.data, partial=True) if serializers.is_valid(): serializers.save() return Response(serializers.data) return Response(serializers.errors, status=status.HTTP_400_BAD_REQUEST) except: raise Http404 def delete(self, request, format=None): try: id = request.data.get("id") item = Item.objects.get(pk=id) serializers = ItemSerializer(item) item.delete() return Response(serializers.data) except: raise Http404 class CategoryDeleteApiView(APIView): ''' CRUD on Item model ''' permission_classes = [IsAuthenticated] def delete(self, request, format=None): try: id = request.data.get("category") category = Category.objects.get(pk=id) serializers = CategorySerializer(Category.objects.get(pk=id)) category.delete() return Response(serializers.data) except: raise Http404 class MessageCreateApiView(APIView): ''' Create Messages account ''' permission_classes = [IsAuthenticated] def post(self, request): try: print(request.data) sender = request.user receiver = User.objects.get(id=request.data.get("receiver")) content = request.data.get("content") if receiver == sender: return Response({ 'message': 'can\'t message yourself' }) message = Messages( sender=sender, receiver=receiver, content=content) try: message.save() return Response(request.data) except: pass return Response({"message": "bad request"}, status=status.HTTP_400_BAD_REQUEST) except: raise Http404 class MessagesSentApiView(generics.GenericAPIView, mixins.ListModelMixin): ''' List Messages sent ''' permission_classes = [IsAuthenticated] serializer_class = MessageSerializer def get(self, request, *args, **kwargs): # List out the Item with given filter fields return self.list(request, *args, **kwargs) def get_queryset(self): """ This view should return a list of all the Messages Sent By The Authenticated user. """ user = self.request.user return Messages.objects.all().filter(sender=user).order_by("-id") class MessagesReceivedApiView(generics.GenericAPIView, mixins.ListModelMixin): ''' List Messages sent ''' permission_classes = [IsAuthenticated] serializer_class = MessageSerializer def get(self, request, *args, **kwargs): # List out the Item with given filter fields return self.list(request, *args, **kwargs) def get_queryset(self): """ This view should return a list of all the Messages Sent By The Authenticated user. """ user = self.request.user return Messages.objects.all().filter(receiver=user).order_by("-id") class MessageDeleteApiView(APIView): ''' CRUD on Messages model ''' permission_classes = [IsAuthenticated] def delete(self, request, format=None): try: id = request.data.get("message_id") category = Messages.objects.get(pk=id) serializers = MessageSerializer(category) if(category): if(category.sender == request.user): category.delete() return Response(serializers.data) return Response({'message': 'unauthorized'}) except: raise Http404 class MassFiles(APIView): ''' Create Messages account ''' permission_classes = [IsAuthenticated] serializer_class = MassUploadSerializer def post(self, request): try: user = request.user data = request.FILES print(data) MassUpload(user=user, file=data.get("file")).save() return Response({ "message": "Data" }) except: return Response({"message": "bad request"}, status=status.HTTP_400_BAD_REQUEST) class MassUploadListApiView(generics.GenericAPIView, mixins.ListModelMixin): ''' Return Mass Upload File List Api View ''' queryset = MassUpload.objects.all() serializer_class = MassUploadFileSerializer filter_backends = [DjangoFilterBackend] filterset_fields = ['user', 'uploaded_at'] authentication_classes = [] def get(self, request, *args, **kwargs): # List out the Item with given filter fields return self.list(request, *args, **kwargs) ``` #### File: SIMADIAN/utilities/get_templates.py ```python import urllib.request import urllib.parse def getTemplates(apikey): params = {'apikey': apikey} f = urllib.request.urlopen('https://api.textlocal.in/get_templates/?' + urllib.parse.urlencode(params)) return (f.read(), f.code) resp, code = getTemplates('NmU0YzM0NDUzNzRlNjI1NzZkNzg1MTQ5NjYzMDVhNmY=') print (resp) ```
{ "source": "jhaber-zz/archive", "score": 3 }
#### File: archive/scripts/data_prep.py ```python import os, re, fnmatch # for navigating file trees and working with strings import csv # for reading in CSV files #from glob import glob,iglob # for finding files within nested folders--compare with os.walk import json, pickle, csv # For saving a loading dictionaries, DataFrames, lists, etc. in JSON, pickle, and CSV formats from math import log10 # For calculating logarithms of dictionary counts from datetime import datetime # For timestamping files import time, timeout_decorator # To prevent troublesome files from bottlenecking the parsing process, use timeouts import sys # For working with user input import logging # for logging output, to help with troubleshooting from nltk.stem.porter import PorterStemmer # an approximate method of stemming words stemmer = PorterStemmer() from nltk import word_tokenize, sent_tokenize # widely used text tokenizer import pandas as pd # modifies data more efficiently than with a list of dicts from tqdm import tqdm # For progress information over iterations, including with Pandas operations via "progress_apply" # ### Set script options Debug = False # Set to "True" for extra progress reports while algorithms run notebook = False # Use different file paths depending on whether files are being accessed from shell (False) or within a Jupyter notebook (True) usefile = False # Set to "True" if loading from file a dicts_list to add to. Confirms with user input first! workstation = False # If working from office PC if notebook: usefile = False # Prompting user for input file is only useful in command-line inline_tags = ["b", "big", "i", "small", "tt", "abbr", "acronym", "cite", "dfn", "em", "kbd", "strong", "samp", "var", "bdo", "map", "object", "q", "span", "sub", "sup"] # this list helps with eliminating junk tags when parsing HTML # ### Set directories if workstation and notebook: dir_prefix = "C:\\Users\\Jaren\\Documents\\" # One level further down than the others elif notebook: dir_prefix = "/home/jovyan/work/" else: dir_prefix = "/vol_b/data/" example_page = "https://westlakecharter.com/about/" example_schoolname = "TWENTY-FIRST_CENTURY_NM" save_dir = dir_prefix + "Charter-school-identities" + os.sep + "data" + os.sep # Directory in which to save data files dicts_dir = dir_prefix + "Charter-school-identities" + os.sep + "dicts" + os.sep # Directory in which to find & save dictionary files temp_dir = save_dir + "temp" + os.sep # Directory in which to save temporary data files micro_sample13 = save_dir + "micro-sample13_coded.csv" # Random micro-sample of 300 US charter schools URL_schooldata = save_dir + "charter_URLs_2014.csv" # 2014 population of 6,973 US charter schools full_schooldata = save_dir + "charter_merged_2014.csv" # Above merged with PVI, EdFacts, year opened/closed temp_data = save_dir + "school_parser_temp.json" # Full_schooldata dict with output for some schools example_file = save_dir + "example_file.html" #example_folder + "21stcenturypa.com/wp/default?page_id=27.tmp.html" if not workstation and not notebook: wget_dataloc = dir_prefix + "wget/parll_wget/" #data location for schools downloaded with wget in parallel (requires server access) example_folder = wget_dataloc + "TWENTY-FIRST_CENTURY_NM/" # Random charter school folder example_file = dir_prefix + "wget/example_file.html" #example_folder + "21stcenturypa.com/wp/default?page_id=27.tmp.html" data_year = int(2014) # Set logging options log_file = temp_dir + "data_prep_" + str(datetime.today()) + ".log" logging.basicConfig(filename=log_file,level=logging.INFO) # Set input file, if any if usefile and not notebook: print("\nWould you like to load from file a list of dictionaries to add to? (Y/N)") answer = input() if answer == "Y": print("Please indicate file path for dictionary list file.") answer2 = input() if os.path.exists(answer2): input_file = answer2 usefile = True else: print("Invalid file path" + str(answer2) + " \nAborting script.") sys.exit() elif answer == "N": print("OK! This script will create a new data file at " + str(save_dir) + ".") usefile = False else: print("Error: " + str(answer) + " not an interpretable response. Aborting script.") sys.exit() # ### Define (non-parsing) helper functions def get_vars(data): """Defines variable names based on the data source called.""" if data==URL_schooldata: URL_variable = "TRUE_URL" NAME_variable = "SCH_NAME" ADDR_variable = "ADDRESS" elif data==full_schooldata: URL_variable = "SCH_NAME" # Work-around until URLs merged into full data file NAME_variable = "SCH_NAME" ADDR_variable = "ADDRESS14" elif data==micro_sample13: URL_variable = "URL" NAME_variable = "SCHNAM" ADDR_variable = "ADDRESS" else: try: print("Error processing variables from data file " + str(data) + "!") except Exception as e: print("ERROR: No data source established!\n") print(e) return(URL_variable,NAME_variable,ADDR_variable) def tag_visible(element): """Returns false if a web element has a non-visible tag, i.e. one site visitors wouldn't actually read--and thus one we don't want to parse""" if element.parent.name in ['style', 'script', 'head', 'title', 'meta', '[document]']: return False if isinstance(element, Comment): return False return True def webtext_from_files(datalocation): """Concatenate and return a single string from all webtext (with .txt format) in datalocation""" string = "" for root, dirs, files in os.walk(datalocation): for file in files: if file.endswith(".txt"): fileloc = open(datalocation+file, "r") string = string + (fileloc.read()) return string def remove_spaces(file_path): """Remove spaces from text file at file_path""" words = [x for x in open(file_path).read().split() if x != ""] text = "" for word in words: text += word + " " return text def write_errors(error_file, error1, error2, error3, file_count): """Writes to error_file three binary error flags derived from parse_school(): duplicate_flag, parse_error_flag, wget_fail_flag, and file_count.""" with open(error_file, 'w') as file_handler: file_handler.write("duplicate_flag {}\n".format(int(error1))) file_handler.write("parse_error_flag {}\n".format(int(error2))) file_handler.write("wget_fail_flag {}\n".format(int(error3))) file_handler.write("file_count {}".format(int(file_count))) return def write_counts(file_path, names_list, counts_list): """Writes to file_path the input dict_count names (a list) and counts (another list). Assumes these two lists have same length and are in same order-- e.g., names_list[0]="ess_count" and counts_list[0]=ess_count.""" with open(file_path, 'w') as file_handler: for tup in zip(names_list,counts_list): # iterate over zipped list of tuples if tup != list(zip(names_list,counts_list))[-1]: file_handler.write("{} {}\n".format(tup[0],tup[1])) else: file_handler.write("{} {}".format(tup[0],tup[1])) return def write_list(file_path, textlist): """Writes textlist to file_path. Useful for recording output of parse_school().""" with open(file_path, 'w') as file_handler: for elem in textlist: file_handler.write("{}\n".format(elem)) return def load_list(file_path): """Loads list into memory. Must be assigned to object.""" textlist = [] with open(file_path) as file_handler: line = file_handler.readline() while line: textlist.append(line) line = file_handler.readline() return textlist def save_datafile(data, file, thismode): """BROKEN for saving to CSV Pandas DataFrames (only saves header) and lists of dicts (only saves keys). Saves data to file using JSON, pickle, or CSV format (whichever was specified). Works with Pandas DataFrames or other objects, e.g. a list of dictionaries. Deletes file first to reduce risk of data duplication.""" file = str(file) thismode = str(thismode) try: if os.path.exists(file): os.remove(file) # Delete file first to reduce risk of data duplication else: pass if thismode.upper()=="JSON" or thismode.upper()==".JSON": if not file.endswith(".json"): file += ".json" if type(data)=="pandas.core.frame.DataFrame": data.to_json(file) else: with open(file, 'w') as outfile: json.dump(data, outfile, encoding="utf-8") #print("Data saved to " + file + "!") elif thismode.lower()=="pickle" or thismode.lower()==".pickle": if not file.endswith(".pickle"): file += ".pickle" if type(data)=="pandas.core.frame.DataFrame": data.to_pickle(file, encoding="utf-8") else: with open(file, "wb") as outfile: pickle.dump(data, outfile, encoding="utf-8") #print("Data saved to " + file + "!") elif thismode.upper()=="CSV" or thismode.upper()==".CSV": if not file.endswith(".csv"): file += ".csv" if type(data)=="pandas.core.frame.DataFrame": if os.path.exists(file): # If file already exists, assume we are appending to it (with same column names) data.to_csv(file,mode="a",index=False,sep="\t",header=False,encoding="utf-8") else: # If file doesn't exist, create it data.to_csv(file,mode="w",index=False,sep="\t",header=data.columns.values,encoding="utf-8") else: with open(file, "w") as outfile: wr = csv.writer(outfile) wr.writerows(data) #print("Data saved to " + file + "!") else: print("ERROR! Improper arguments. Please include: data object to save (Pandas DataFrames OK), file path, and file format ('JSON', 'pickle', or 'CSV').") except Exception as e: print("Failed to save to " + str(file) + " into memory using " + str(thismode) + " format. Please check arguments (data, file, file format) and try again.") print(e) def load_datafile(file): """Loads dicts_list (or whatever) from file, using either JSON or pickle format. The created object should be assigned when called.""" file = str(file) if file.lower().endswith(".json"): with open(file,'r') as infile: var = json.load(infile) if file.lower().endswith(".pickle"): with open(file,'rb') as infile: var = pickle.load(infile) print(file + " successfully loaded!") return var def load_dict(custom_dict, file_path): """Loads in a dictionary. Adds each entry from the dict at file_path to the defined set custom_dict (the input), which can also be an existing dictionary. This allows the creation of combined dictionaries!""" with open(file_path) as file_handler: line = file_handler.readline() while line: custom_dict.add(stemmer.stem(line.replace("\n", ""))) # Add line after stemming dictionary entries and eliminating newlines line = file_handler.readline() # Look for anything else in that line, add that too return custom_dict def list_files(folder_path, extension): """Outputs a list of every file in folder_path or its subdirectories that has a specified extension. Prepends specified extension with '.' if it doesn't start with it already. If no extension is specified, it just returns all files in folder_path.""" matches = [] if extension: extension = str(extension) # Coerce to string, just in case if extension and not extension.startswith("."): extension = "." + extension for dirpath,dirnames,filenames in os.walk(folder_path): if extension: for filename in fnmatch.filter(filenames, "*" + extension): # Use extension to filter list of files matches.append(os.path.join(dirpath,filename)) else: matches.append(os.path.join(dirpath,filename)) # If no extension, just take all files return matches def has_html(folder_path): """Simple function that counts .html files and returns a binary: 'True' if a specified folder has any .html files in it, 'False' otherwise.""" html_list = [] for dirpath,dirnames,filenames in os.walk(folder_path): for file in fnmatch.filter(filenames, "*.html"): # Check if any HTML files in folder_path html_list.append(file) if len(html_list)==0: return False else: return True def convert_df(df): """Makes a Pandas DataFrame more memory-efficient through intelligent use of Pandas data types: specifically, by storing columns with repetitive Python strings not with the object dtype for unique values (entirely stored in memory) but as categoricals, which are represented by repeated integer values. This is a net gain in memory when the reduced memory size of the category type outweighs the added memory cost of storing one more thing. As such, this function checks the degree of redundancy for a given column before converting it.""" converted_df = pd.DataFrame() # Initialize DF for memory-efficient storage of strings (object types) # TO DO: Infer dtypes of df df_obj = df.select_dtypes(include=['object']).copy() # Filter to only those columns of object data type for col in df.columns: if col in df_obj: num_unique_values = len(df_obj[col].unique()) num_total_values = len(df_obj[col]) if (num_unique_values / num_total_values) < 0.5: # Only convert data types if at least half of values are duplicates converted_df.loc[:,col] = df[col].astype('category') # Store these columns as dtype "category" else: converted_df.loc[:,col] = df[col] else: converted_df.loc[:,col] = df[col] converted_df.select_dtypes(include=['float']).apply(pd.to_numeric,downcast='float') converted_df.select_dtypes(include=['int']).apply(pd.to_numeric,downcast='signed') return converted_df # ### Set parsing keywords keywords = ['values', 'academics', 'skills', 'purpose', 'direction', 'mission', 'vision', 'vision', 'mission', 'our purpose', 'our ideals', 'ideals', 'our cause', 'curriculum','curricular', 'method', 'pedagogy', 'pedagogical', 'approach', 'model', 'system', 'structure','philosophy', 'philosophical', 'beliefs', 'believe', 'principles', 'creed', 'credo', 'values','moral', 'history', 'our story', 'the story', 'school story', 'background', 'founding', 'founded', 'established','establishment', 'our school began', 'we began', 'doors opened', 'school opened', 'about us', 'our school', 'who we are', 'our identity', 'profile', 'highlights'] mission_keywords = ['mission','vision', 'vision:', 'mission:', 'our purpose', 'our ideals', 'ideals:', 'our cause', 'cause:', 'goals', 'objective'] curriculum_keywords = ['curriculum', 'curricular', 'program', 'method', 'pedagogy', 'pedagogical', 'approach', 'model', 'system', 'structure'] philosophy_keywords = ['philosophy', 'philosophical', 'beliefs', 'believe', 'principles', 'creed', 'credo', 'value', 'moral'] history_keywords = ['history', 'story','our story', 'the story', 'school story', 'background', 'founding', 'founded', 'established', 'establishment', 'our school began', 'we began', 'doors opened', 'school opened'] about_keywords = ['about us', 'our school', 'who we are', 'overview', 'general information', 'our identity', 'profile', 'highlights'] mission_keywords = set(stemmer.stem(word) for word in mission_keywords) curriculum_keywords = set(stemmer.stem(word) for word in curriculum_keywords) philosophy_keywords = set(stemmer.stem(word) for word in philosophy_keywords) history_keywords = set(stemmer.stem(word) for word in history_keywords) about_keywords = set(stemmer.stem(word) for word in about_keywords) all_keywords = set(stemmer.stem(key) for key in keywords) logging.info("List of keywords:\n" + str(list(all_keywords))) # ### Create dictionaries for each ideology and one for combined ideologies ess_dict, prog_dict, rit_dict, all_ideol, all_dicts = set(), set(), set(), set(), set() all_ideol = load_dict(all_ideol, dicts_dir + "ess_dict.txt") all_ideol = load_dict(all_ideol, dicts_dir + "prog_dict.txt") # For complete ideological list, append second ideological dict all_dicts = load_dict(all_ideol, dicts_dir + "rit_dict.txt") # For complete dict list, append ritual dict terms too ess_dict = load_dict(ess_dict, dicts_dir + "ess_dict.txt") prog_dict = load_dict(prog_dict, dicts_dir + "prog_dict.txt") rit_dict = load_dict(rit_dict, dicts_dir + "rit_dict.txt") logging.info(str(len(all_ideol)) + " entries loaded into the combined ideology dictionary.") list_dict = list(all_ideol) list_dict.sort(key = lambda x: x.lower()) logging.info("First 10 elements of combined ideology dictionary are:\n" + str(list_dict[:10])) # Create tuples for keyword lists and dictionary terms: keys_tuple = tuple([mission_keywords,curriculum_keywords,philosophy_keywords,history_keywords,about_keywords,\ all_ideol,all_keywords]) dicts_tuple = tuple([ess_dict,prog_dict,rit_dict,all_dicts]) logging.info(str(list(keys_tuple))) logging.info(str(list(dicts_tuple))) # ### Define dictionary matching helper functions def dict_count(text_list, custom_dict): """Performs dictionary analysis, returning number of dictionary hits found. Removes punctuation and stems the phrase being analyzed. Compatible with multiple-word dictionary elements.""" counts = 0 # number of matches between text_list and custom_dict dictless_list = [] # Updated text_list with dictionary hits removed max_entry_length = max([len(entry.split()) for entry in custom_dict]) # Get length (in words) of longest entry in combined dictionary for chunk in text_list: # chunk may be several sentences or possibly paragraphs long chunk = re.sub(r'[^\w\s]', '', chunk) # Remove punctuation with regex that keeps only letters and spaces # Do dictionary analysis for word chunks of lengths max_entry_length down to 1, removing matches each time. # This means longer dict entries will get removed first, useful in case they contain smaller entries. for length in range(max_entry_length, 0, -1): dictless_chunk,len_counts = dict_match_len(chunk,custom_dict,length) dictless_list.append(dictless_chunk) counts += len_counts return dictless_list,int(counts) def dict_match_len(phrase, custom_dict, length): """Helper function to dict_match. Returns # dictionary hits and updated copy of phrase with dictionary hits removed. Stems phrases before checking for matches.""" hits_indices, counts = [], 0 splitted_phrase = phrase.split() if len(splitted_phrase) < length: return phrase, 0 # If text chunk is shorter than length of dict entries being matched, don't continue. for i in range(len(splitted_phrase) - length + 1): to_stem = "" for j in range(length): to_stem += splitted_phrase[i+j] + " " # Builds chunk of 'length' words stemmed_word = stemmer.stem(to_stem[:-1]) # stem chunk if stemmed_word in custom_dict: hits_indices.append(i) # Store the index of the word that has a dictionary hit counts += 1 logging.info(stemmed_word) # Iterate through list of matching word indices and remove the matches for i in range(len(hits_indices)-1, -1, -1): splitted_phrase = splitted_phrase[:hits_indices[i]] + \ splitted_phrase[hits_indices[i] + length:] modified_phrase = "" for sp in splitted_phrase: # Rebuild the modified phrase, with matches removed modified_phrase += sp + " " return modified_phrase[:-1], counts @timeout_decorator.timeout(20, use_signals=False) def dictmatch_file_helper(file,dictsnames_biglist,all_keywords,all_ideol,all_matches): """Counts number of matches in file for each list of terms given, and also collects the terms not matched. Dictsnames_biglist is a list of lists, each list containing: a list of key terms, currently essentialism, progressivism, ritualism, and all three combined (ess_dict, prog_dict, rit_dict, all_dicts); the variables used to store the number of matches for each term lit (ess_count, prog_count, rit_count, alldict_count); and the not-matches--that is, the list of words leftover from the file after all matches are removed (ess_dictless, prog_dictless, rit_dictless, alldict_dictless). """ for i in range(len(dictsnames_biglist)): # Iterate over dicts to find matches with parsed text of file # Dicts are: (ess_dict, prog_dict, rit_dict, alldict_count); count_names are: (ess_count, prog_count, rit_count, alldict_count); dictless_names are: (ess_dictless, prog_dictless, rit_dictless, alldict_dictless) # adict,count_name,dictless_name = dictsnames_tupzip[i] dictless_add,count_add = dict_count(parsed_pagetext,dictsnames_biglist[i][0]) dictsnames_biglist[i][1] += count_add dictsnames_biglist[i][2] += dictless_add all_matches += count_add logging.info("Discovered " + str(count_add) + " matches for " + str(file) + ", a total thus far of " + str(dictsnames_biglist[i][1]) + " matches...") return dictsnames_biglist,all_matches # ### Define parsing helper functions @timeout_decorator.timeout(20, use_signals=False) def parse_file_helper(file,webtext,keywords_text,ideology_text): """Parses file into (visible) webtext, both complete and filtered by terms in 'keywords' and 'ideology' lists.""" parsed_pagetext = [] parsed_pagetext = parsefile_by_tags(file) # Parse page text if len(parsed_pagetext) == 0: # Don't waste time adding empty pages logging.warning(" Nothing to parse in " + str(file) + "!") else: webtext.extend(parsed_pagetext) # Add new parsed text to long list keywords_text.extend(filter_dict_page(parsed_pagetext, all_keywords)) # Filter using keywords ideology_text.extend(filter_dict_page(parsed_pagetext, all_ideol)) # Filter using ideology words logging.info("Successfully parsed and filtered file " + str(file) + "...") return webtext,keywords_text,ideology_text def filter_dict_page(pagetext_list, keyslist): """Filters webtext of a given .html page, which is parsed and in list format, to only those strings within pagetext_list containing an element (word or words) of inputted keyslist. Returns list filteredtext wherein each element has original case (not coerced to lower-case).""" filteredtext = [] # Initialize empty list to hold strings of page for string in pagetext_list: lowercasestring = str(string).lower() # lower-case string... dict_list = [key.lower() for key in list(keyslist)] # ...compared with lower-case element of keyslist for key in dict_list: if key in lowercasestring and key in lowercasestring.split(' '): # Check that the word is the whole word not part of another one filteredtext.append(string) return filteredtext #logging.info("Output of filter_keywords_page with keywords:\n" + str(filter_dict_page(example_textlist, all_keywords))) #logging.info("Output of filter_keywords_page with ideology words:\n\n" + str(filter_dict_page(example_textlist, all_ideol))) def dictify_webtext(school_dict): """OBSOLETE. Kept here for purposes of comparison. Reads parsing output from text files and saves to school_dict multiple parsing outputs: webtext, keywords_text, ideology_text, file_count, etc.""" # Allow function to access these variables already defined outside the function (globally) global itervar,numschools,parsed,wget_dataloc,URL_var,NAME_var,ADDR_var,save_dir datalocation = wget_dataloc # Define path to local data storage school_name, school_address, school_URL = school_dict[NAME_var], school_dict[ADDR_var], school_dict[URL_var] # Define varnames itervar+=1 # Count this school print("Loading into dict parsing output for " + str(school_name) + ", which is school #" + str(itervar) + " of " + str(numschools) + "...") school_dict["webtext"], school_dict["keywords_text"], school_dict["ideology_text"] = [[] for _ in range(3)] school_dict["duplicate_flag"], school_dict["parse_error_flag"], school_dict["wget_fail_flag"] = [0 for _ in range(3)] school_dict['ess_strength'],school_dict['prog_strength'] = [0.0 for _ in range(2)] folder_name = re.sub(" ","_",(school_dict[NAME_var]+" "+school_dict[ADDR_var][-8:-6])) # This gives name and state separated by "_" school_folder = datalocation + folder_name + "/" error_file = school_folder + "error_flags.txt" # Define file path for error text log if school_URL==school_name: school_URL = folder_name # Workaround for full_schooldata, which doesn't yet have URLs # Check if folder exists. If not, exit function if not (os.path.exists(school_folder) or os.path.exists(school_folder.lower()) or os.path.exists(school_folder.upper())): print(" !! NO DIRECTORY FOUND matching " + str(school_folder) + ". Aborting dictify function...") school_dict['wget_fail_flag'] = 1 return try: # Load school parse output from disk into dictionary school_dict["webtext"] = load_list(school_folder + "webtext.txt") school_dict["keywords_text"] = load_list(school_folder + "keywords_text.txt") school_dict["ideology_text"] = load_list(school_folder + "ideology_text.txt") """ # Comment out until dict_count is run school_dict["ess_count"] = load_list(school_folder + "ess_count.txt") school_dict["prog_count"] = load_list(school_folder + "prog_count.txt") school_dict["rit_count"] = load_list(school_folder + "rit_count.txt") school_dict['ess_strength'] = float(school_dict['ess_count'])/float(school_dict['rit_count']) school_dict['prog_strength'] = float(school_dict['prog_count'])/float(school_dict['rit_count']) """ # load error_file as a list with four pieces, the last element of each of which is the flag value itself: error_text = load_list(error_file) school_dict["duplicate_flag"] = error_text[0].split()[-1] # last element of first piece of error_text school_dict["parse_error_flag"] = error_text[1].split()[-1] school_dict["wget_fail_flag"] = error_text[2].split()[-1] school_dict["html_file_count"] = error_text[3].split()[-1] if int(school_dict["html_file_count"])==0: school_dict["wget_fail_flag"] = 1 # If no HTML, then web download failed! print(" LOADED " + school_dict["html_file_count"] + " .html file(s) from website of " + str(school_name) + "...") #save_datafile(dicts_list, save_dir+"school_parser_temp", "JSON") # Save output so we can pick up where left off, in case something breaks before able to save final output return school_dict except Exception as e: print(" ERROR! Failed to load into dict parsing output for " + str(school_name)) print(" ",e) school_dict["parse_error_flag"] = 1 return def pandify_webtext(df): """Reads parsing output from text files and saves to DataFrame df multiple parsing outputs: webtext, keywords_text, ideology_text, file_count, dict_count outputs, etc.""" # Allow function to access these variables already defined outside the function (globally) global numschools,wget_dataloc,save_dir,NAME_var,ADDR_var,URL_var datalocation = wget_dataloc # Define path to local data storage #logging.info("Loading into DataFrame parsing output for " + str(len(df)) + " school websites out of a total of " + str(numschools) + "...") # Initialize text strings and counts as empty, then convert data types: empty = ["" for elem in range(len(df["NCESSCH"]))] # Create empty string column that is as long as the longest variable (NCESCCH used for matching) df = df.assign(word_count=empty, chunk_count=empty, FOLDER_NAME=empty, TOTETH=empty, PCTETH=empty, AGE=empty, PCTFRL=empty, PLACE=empty, WEBTEXT=empty, PROG_TEXT=empty, ESS_TEXT=empty, RIT_TEXT=empty, IDEOLOGY_TEXT=empty, MISSION_TEXT=empty, CURR_TEXT=empty, PHIL_TEXT=empty, HIST_TEXT=empty, ABOUT_TEXT=empty, KEYWORDS_TEXT=empty, ESS_COUNT=empty, PROG_COUNT=empty, RIT_COUNT=empty, ESS_STR=empty, PROG_STR=empty, IDDIFF_STR=empty, IDDIFF_STRLOG=empty ESS_PCT=empty, PROG_PCT=empty, IDDIFF_PCT=empty, IDDIFF_PCTLOG=empty) # Add empty columns to df df.loc[:,["PLACE", "WEBTEXT", "PROG_TEXT", "ESS_TEXT", "RIT_TEXT", "IDEOLOGY_TEXT", "MISSION_TEXT", "CURR_TEXT", "PHIL_TEXT", "HIST_TEXT", "ABOUT_TEXT", "KEYWORDS_TEXT", "FOLDER_NAME"]] = df.loc[:,["PLACE", "WEBTEXT", "PROG_TEXT", "ESS_TEXT", "RIT_TEXT", "IDEOLOGY_TEXT", "MISSION_TEXT", "CURR_TEXT", "PHIL_TEXT", "HIST_TEXT", "ABOUT_TEXT", "KEYWORDS_TEXT", "FOLDER_NAME"]].apply(lambda x: x.astype(object)) # Convert to object type--holds text df.loc[:,["word_count", "chunk_count", "AGE", "TOTETH", "ESS_COUNT", "PROG_COUNT", "RIT_COUNT"]] = df.loc[:,["word_count", "chunk_count", "AGE", "TOTETH", "ESS_COUNT", "PROG_COUNT", "RIT_COUNT"]].apply(pd.to_numeric, downcast="unsigned") # Convert to int dtype--holds positive numbers (no decimals) df.loc[:,["PCTETH", "PCTFRL", "ESS_STR", "PROG_STR", "IDDIFF_STR", "IDDIFF_STRLOG", "ESS_PCT", "PROG_PCT", "IDDIFF_PCT", "IDDIFF_PCTLOG"]] = df.loc[:,["PCTETH", "PCTFRL", "ESS_STR", "PROG_STR", "IDDIFF_STR", "IDDIFF_PCTLOG", "ESS_PCT", "PROG_PCT", "IDDIFF_PCT", "IDDIFF_PCTLOG"]].apply(pd.to_numeric, downcast="float") # Use most efficient float type for these vars--hold decimals df.loc[:,"FOLDER_NAME"] = df.loc[:,[NAME_var,ADDR_var]].apply(lambda x: re.sub(" ","_","{} {}".format(str(x[0]),str(x[1][-8:-6]))), axis=1) # This gives name and state separated by "_" df.loc[:,"school_folder"] = df.loc[:,"FOLDER_NAME"].apply(lambda x: str(datalocation) + '{}/'.format(str(x))) df.loc[:,"error_file"] = df.loc[:,"school_folder"].apply(lambda x: '{}error_flags.txt'.format(str(x))) # Define file path for error text log df.loc[:,"counts_file"] = df.loc[:,"school_folder"].apply(lambda x: '{}dict_counts.txt'.format(str(x))) try: # Compute demographic variables: df["TOTETH"] = df[["AM", "AS", "BL", "HI", "HP", "TR"]].apply(sum, axis=1) # Number of nonwhite K-12 students df["PCTETH"] = (df["TOTETH"]/df["MEMBER"]).apply(pd.to_numeric, downcast='float') # Percent nonwhite K-12 students df["PCTFRL"] = (df["TOTFRL"]/df["MEMBER"]).apply(pd.to_numeric, downcast='float') # Percent receiving free/ reduced-price lunch df["AGE"] = data_year - df["YEAR_OPENED"] # Number of years school has been open # Recode variables: df["PLACE"] = df["LOCALE"].map({11.0:"City", 12.0:"City", 13.0:"City", 21.0:"Suburb", 22.0:"Suburb", 23.0:"Suburb", 31.0:"Town", 32.0:"Town", 33.0:"Town", 41.0:"Rural", 42.0:"Rural", 43.0:"Rural"}).astype('category') df["LOCALE"] = df["LOCALE"].map({11.0:"City (large)", 12.0:"City (midsize)", 13.0:"City (small)", 21.0:"Suburb (large)", 22.0:"Suburb (midsize)", 23.0:"Suburb (small)", 31.0:"Town (fringe)", 32.0:"Town (distant)", 33.0:"Town (remote)", 41.0:"Rural (fringe)", 42.0:"Rural (distant)", 43.0:"Rural (remote)"}).astype('category') df["TITLEI"] = df["TITLEI"].map({"Yes":1, "No":0}).astype('category') # load error_file as a list with four pieces, the last element of each of which is the flag value itself: df.loc[:,"error_text"] = df.loc[:,"error_file"].apply(lambda x: load_list('{}'.format(str(x)))) df.loc[:,"duplicate_flag"] = df.loc[:,"error_text"].apply(lambda x: '{}'.format(str(x[0].split()[-1]))) # # last element of first piece of error_text df.loc[:,"parse_error_flag"] = df.loc[:,"error_text"].apply(lambda x: '{}'.format(str(x[1].split()[-1]))) df.loc[:,"wget_fail_flag"] = df.loc[:,"error_text"].apply(lambda x: '{}'.format(str(x[2].split()[-1]))) df.loc[:,"html_file_count"] = df.loc[:,"error_text"].apply(lambda x: '{}'.format(str(x[3].split()[-1]))) downloaded = df["wget_fail_flag"].map({"1":True,1:True,"0":False,0:False}) == False # This binary conditional filters df to only those rows with downloaded web content--where wget_fail_flag==False and thus does NOT signal download failure logging.info("Loading webtext from disk into DF...") # For reference from webparser_mp.py: # keysfiles_list = ["mission_text.txt","curr_text.txt","phil_text.txt","hist_text.txt","about_text.txt","allkeys_text.txt"] # dictsfiles_list = ["prog_text.txt","ess_text.txt","rit_text.txt","allideol_text.txt","alldicts_text.txt"] # Load school parse output from disk into DataFrame: df.loc[downloaded,"WEBTEXT"] = df.loc[downloaded,"school_folder"].apply(lambda x: load_list("{}webtext.txt".format(str(x)))) # df["wget_fail_flag"]==False df.loc[downloaded,"PROG_TEXT"] = df.loc[downloaded,"school_folder"].apply(lambda x: load_list("{}prog_text.txt".format(str(x)))) df.loc[downloaded,"ESS_TEXT"] = df.loc[downloaded,"school_folder"].apply(lambda x: load_list("{}ess_text.txt".format(str(x)))) df.loc[downloaded,"RIT_TEXT"] = df.loc[downloaded,"school_folder"].apply(lambda x: load_list("{}rit_text.txt".format(str(x)))) df.loc[downloaded,"IDEOLOGY_TEXT"] = df.loc[downloaded,"school_folder"].apply(lambda x: load_list("{}allideol_text.txt".format(str(x)))) df.loc[downloaded,"MISSION_TEXT"] = df.loc[downloaded,"school_folder"].apply(lambda x: load_list("{}mission_text.txt".format(str(x)))) df.loc[downloaded,"CURR_TEXT"] = df.loc[downloaded,"school_folder"].apply(lambda x: load_list("{}curr_text.txt".format(str(x)))) df.loc[downloaded,"PHIL_TEXT"] = df.loc[downloaded,"school_folder"].apply(lambda x: load_list("{}phil_text.txt".format(str(x)))) df.loc[downloaded,"HIST_TEXT"] = df.loc[downloaded,"school_folder"].apply(lambda x: load_list("{}hist_text.txt".format(str(x)))) df.loc[downloaded,"ABOUT_TEXT"] = df.loc[downloaded,"school_folder"].apply(lambda x: load_list("{}about_text.txt".format(str(x)))) df.loc[downloaded,"KEYWORDS_TEXT"] = df.loc[downloaded,"school_folder"].apply(lambda x: load_list("{}allkeys_text.txt".format(str(x)))) df.loc[downloaded,"word_count"] = df.loc[downloaded, "WEBTEXT"].apply(lambda x: sum(map(len, map(word_tokenize, x)))) df.loc[downloaded,"chunk_count"] = df.loc[downloaded, "WEBTEXT"].apply(lambda x: len(x)) df["counts_text"] = df.counts_file.apply(lambda x: load_list("{}".format(str(x)))) df.loc[downloaded,"ESS_COUNT"] = df.loc[downloaded,"counts_text"].apply(lambda x: "{}".format(str(x[0].split()[-1]))).apply(pd.to_numeric,downcast='unsigned') # 2nd element of 1st row in counts_text: take as uint dtype (no negatives) df.loc[downloaded,"PROG_COUNT"] = df.loc[downloaded,"counts_text"].apply(lambda x: "{}".format(str(x[1].split()[-1]))).apply(pd.to_numeric,downcast='unsigned') # 2nd element of 2nd row df.loc[downloaded,"RIT_COUNT"] = df.loc[downloaded,"counts_text"].apply(lambda x: "{}".format(str(x[2].split()[-1]))).apply(pd.to_numeric,downcast='unsigned') # 2nd element of 3nd row df.loc[downloaded,"ESS_STR"] = (df.loc[downloaded,"ESS_COUNT"]/df.loc[downloaded, "RIT_COUNT"]).apply(pd.to_numeric, downcast='float') # calculate ideology ratio, use most memory-efficient float dtype df.loc[downloaded,"PROG_STR"] = (df.loc[downloaded,"PROG_COUNT"]/df.loc[downloaded, "RIT_COUNT"]).apply(pd.to_numeric, downcast='float') df.loc[downloaded,"IDDIFF_STR"] = (df.loc[downloaded,"PROG_STR"] - df.loc[downloaded,"ESS_STR"]).apply(pd.to_numeric, downcast='float') df.loc[downloaded,"IDDIFF_STRLOG"] = (df.loc[downloaded,"PROG_STR"].apply(log10) - df.loc[downloaded,"ESS_STR"].apply(log10)).apply(pd.to_numeric, downcast='float') df.loc[downloaded,"ESS_PCT"] = (df.loc[downloaded,"ESS_COUNT"]/df.loc[downloaded, "word_count"]).apply(pd.to_numeric, downcast='float') # calculate ideology ratio, use most memory-efficient float dtype df.loc[downloaded,"PROG_PCT"] = (df.loc[downloaded,"PROG_COUNT"]/df.loc[downloaded, "word_count"]).apply(pd.to_numeric, downcast='float') df.loc[downloaded,"IDDIFF_PCT"] = (df.loc[downloaded,"PROG_PCT"] - df.loc[downloaded,"ESS_PCT"]).apply(pd.to_numeric, downcast='float') df.loc[downloaded,"IDDIFF_PCTLOG"] = (df.loc[downloaded,"PROG_PCT"].apply(log10) - df.loc[downloaded,"ESS_PCT"].apply(log10)).apply(pd.to_numeric, downcast='float') df = df.drop(["school_folder","error_text","error_file","counts_text", "AM", "AS", "BL", "HI", "HP"],axis=1) # Clean up temp variables logging.info("LOADED " + df["html_file_count"].sum() + " .html files into DataFrame!") #save_datafile(df, save_dir+"df_parser_temp", "pickle") # Save output so we can pick up where left off, in case something breaks before able to save final output return df except Exception as e: logging.critical("ERROR! Pandify function failed to load parsing output into DataFrame.\n" + str(e)) print(" ERROR! Pandify function failed to load parsing output into DataFrame.") print(" ",str(e)) sys.exit() def slice_pandify(bigdf_iter, numsplits, df_filepath): """This function uses pandify_webtext() to load the parsing output from local storage into a DataFrame. It gets around system memory limitations--which otherwise lead terminal to kill any attempts to pandify() all of bigdf-- by splitting bigdf into numsplits smaller dfslices, parsing webtext into each slice, and recombining them by appending them to a big CSV on file. The number of slices equals numsplits, and bigdf is split by numschools/ numsplits.""" global numschools # Access numschools from within function (this is roughly 7000) wheresplit = int(round(float(numschools)/float(numsplits))) # Get number on which to split (e.g., 1000) based on total number of schools data. This splitting number will be used to iterate over numsplits logging.info("Splitting on the number " + str(wheresplit)) for num in tqdm(range(numsplits), desc="Loading " + str(numsplits) + " DF slices"): # Wrap iterator with tqdm to show progress bar startnum, endnum = wheresplit*int(num),wheresplit*int(num+1) try: dfslice = pd.DataFrame() dfslice = bigdf_iter.get_chunk(wheresplit) # Get next chunk of rows #logging.info(str(dfslice.keys())) #logging.info(str(dfslice.info())) dfslice = dfslice[dfslice.ADDRESS14 != 'ADDRESS14'] # Clean out any cases of header being written as row #dfslice = bigdf_iter.iloc[startnum:endnum,:] #print("Loading DF parsing output for slice #" + str(num) + " of " + str(numschools) + " school websites, from #" + str(startnum) + "-" + str(endnum) + "...") logging.info("Loading parsing output for slice #" + str(num) + " of " + str(numschools) + " school websites, from #" + str(startnum) + "-" + str(endnum) + "...") '''if num==0: # Save first slice to new file (overwriting if needed) dfslice = pandify_webtext(dfslice) # Load parsed output into the DF logging.info("Slice #" + str(num) + " loaded! Saving file...") dfslice.to_csv(df_filepath, mode="w", index=False, header=dfslice.columns.values, sep="\t", encoding="utf-8") print("Slice #" + str(num) + " saved to " + df_filepath + "!") logging.info("Slice #" + str(num) + " saved to " + df_filepath + "!") if num<20: pass elif num==20: dfslice = pandify_webtext(dfslice) # Load parsed output into the DF logging.info(dfslice[["SCH_NAME", "FOLDER_NAME", "html_file_count"]]) print(dfslice[["SCH_NAME", "FOLDER_NAME", "html_file_count"]]) sys.exit() else: dfslice = pandify_webtext(dfslice) # Load parsed output into the DF logging.info("Slice loaded! Saving file...") dfslice.to_csv(df_filepath, mode="a", index=False, header=False, sep="\t", encoding="utf-8") print("Slice #" + str(num) + " saved to " + df_filepath + "!") logging.info("Slice #" + str(num) + " saved to " + df_filepath + "!")''' # Skip sites that present parsing problems--namely #3361, which has 10K+ html pages and is in /vol_b/data/wget/parll_wget/Gentilly_Terrace_Elementary_School_LA/www.brothermartin.com if num==3361: #(284 or 441 or 593 or 594 or 595 or 596 or 1159 or 1218 or 1219 or 1271 or 1297 or 1303 or 1667 or 1861 or 3361 or 4467 or 4836 or 4871 or 4910 or 5418): # or num==441 or num==593: # Skip Primavera_-_Online_AZ', which is slice #284 if numsplits = 6752 continue # Move on to next slice # TO DO: Clean out excess HTML (e.g., blog posts) in wget downloads for these schools dfslice = convert_df(dfslice) # Make this DF as memory-efficient as possible by appropriately converting column dtypes dfslice = pandify_webtext(dfslice) # Load parsed output into the DF logging.info(dfslice[["FOLDER_NAME", "html_file_count"]]) logging.info("Slice #" + str(num) + " loaded! Saving file...") if num==0: # Save first slice to new file (overwriting if needed) dfslice.to_csv(df_filepath, mode="w", index=False, header=dfslice.columns.values, sep="\t", encoding="utf-8") else: # Append next slice to existing file dfslice.to_csv(df_filepath, mode="a", index=False, header=False, sep="\t", encoding="utf-8") #save_datafile(dfslice,df_filepath,"CSV") # BROKEN function--Save slice to file--should work whether writing new file or appending to CSV logging.info("Slice #" + str(num) + " saved to " + df_filepath + "!") del dfslice # Free memory by deleting this temporary, smaller slice except Exception as e: logging.critical("\nERROR! Script failed to load parsing output into DataFrame slice #" + str(num) + " of " + str(numsplits) + ", for schools #" + str(startnum) + "-" + str(endnum) + ".\n" + str(e)) print(" ERROR! Script failed to load parsing output into DataFrame slice #" + str(num) + " of " + str(numsplits) + ", for schools #" + str(startnum) + "-" + str(endnum) + ".", str(e)) #sys.exit() continue return # ### Load parsing output from disk into analyzable object (Pandas DataFrame or list of dicts) data_loc = full_schooldata # assume we're running on full charter population logging.info("Data location: " + str(data_loc)) URL_var,NAME_var,ADDR_var = get_vars(data_loc) # get varnames depending on data source """# Use dictify_webtext to load the parsing output from local storage into the list of dictionaries: itervar = 0 # initialize iterator that counts number of schools already parsed--useless when multiprocessing parsed = [] # initialize list of URLs that have already been parsed dicts_list = [] # initialize list of dictionaries to hold school data # If input_file was defined by user input in beginning of script, use that to load list of dictionaries. We'll add to it! if usefile and not dicts_list: dicts_list = load_datafile(input_file) # data_loc = micro_sample13 # This seems nice for debugging--except directories don't match because different data source # Create dict list from CSV on file, with one dict per school with open(data_loc, 'r', encoding = 'Latin1') as csvfile: # open data file reader = csv.DictReader(csvfile) # create a reader for row in reader: # loop through rows dicts_list.append(row) # append each row to the list numschools = int(len(dicts_list)) # Count number of schools in list of dictionaries for school in dicts_list: try: school = dictify_webtext(school) except Exception as e: print(" ERROR! Failed to load into dict parsing output for " + school[NAME_var]) print(" ",e) school_dict["parse_error_flag"] = 1 continue save_datafile(dicts_list, temp_dir+"school_parser_temp", "JSON") # Save output so we can pick up where left off, in case something breaks del dicts_list # Free memory""" # Create DF from dicts_list or from file in which to store the data: #schooldf = pd.DataFrame() # initialize DataFrame to hold school data #schooldf = pd.DataFrame(dicts_list) # Convert dicts_list into a DataFrame #schooldf = pd.read_csv(temp_dir+"school_dicts_temp.csv") # Use existing file while debugging pandify_webtext() #schooldf = schooldf[schooldf.ADDRESS14 != 'ADDRESS14'] # Clean out any cases of header being written as row #schooldf = convert_df(schooldf) # Make this DF memory-efficient by converting appropriate columns to category data type with open(data_loc, "r"): # Limits memory drain numschools = int(len(pd.read_csv(data_loc, encoding = "Latin1", sep="\t"))) # Count number of schools in file splits = numschools # Number of times to slice up the big CSV schooldf_iter = pd.read_csv(data_loc, encoding = "Latin1", low_memory=False, iterator=True, chunksize=splits, na_values={"TITLEI":["M","N"]}) # Create DF from source file tqdm.pandas(desc="Loading webtext->DF") # To show progress, create & register new `tqdm` instance with `pandas` # Load parsing output into big pandas DataFrame through slices (to work with limited system memory): merged_df_file = temp_dir+"mergedf_"+str(datetime.today().strftime("%Y-%m-%d"))+".csv" # Prepare file name slice_pandify(schooldf_iter, splits, merged_df_file) # Plan B, in case slice_pandify() still doesn't work: #schooldf = pd.read_csv(data_loc, encoding = "Latin1", low_memory=False, na_values={"TITLEI":["M","N"]}) # Create DF from source file #schooldf = convert_df(schooldf) # Make this DF as memory-efficient as possible by appropriately converting column dtypes #schooldf = pandify_webtext(schooldf) # Load parsed output into the DF #newfile = "charters_parsed_" + str(datetime.today().strftime("%Y-%m-%d")) #pd.to_csv(schooldf, save_dir+newfile, mode="w", index=False, header=schooldf.columns.values, sep="\t", encoding="utf-8") print("Larger DF successfully split into " + str(splits) + " smaller DFs, parsed, combined, and saved to file!") '''if schooldf is not None: del schooldf # Free memory else: pass''' # Save final output: print("\nSCHOOL PARSING COMPLETE!!!") #schooldf = pd.read_csv(merged_df_file, sep="\t", header=0, low_memory=False, encoding="utf-8") # Load full DF so we can save it in analysis-ready format #,header=198 #schooldf = schooldf[schooldf.ADDRESS14 != 'ADDRESS14'] # Clean out any bad rows--where header is written as row #newfile = "charters_parsed_" + str(datetime.today().strftime("%Y-%m-%d")) #save_datafile(schooldf, save_dir+newfile, "csv") ``` #### File: archive/scripts/parsefile_by_tags.py ```python import os, re # for navigating file trees and working with strings import json, pickle, csv # For saving a loading dictionaries, DataFrames, lists, etc. in JSON, pickle, and CSV formats from datetime import datetime # For timestamping files import time, timeout_decorator # To prevent troublesome files from bottlenecking the parsing process, use timeouts import sys # For working with user input from unicodedata import normalize # for cleaning text by converting unicode character encodings into readable format from tqdm import tqdm # For progress information over iterations, including with Pandas operations via "progress_apply" # Import parser from bs4 import BeautifulSoup # BS reads and parses even poorly/unreliably coded HTML from bs4.element import Comment # helps with detecting inline/junk tags when parsing with BS import lxml # for fast HTML parsing with BS def parsefile_by_tags(HTML_file): """Cleans HTML by removing inline tags, ripping out non-visible tags, replacing paragraph tags with a random string, and finally using this to separate HTML into chunks. Reads in HTML from storage using a given filename, HTML_file.""" random_string = "".join(map(chr, os.urandom(75))) # Create random string for tag delimiter soup = BeautifulSoup(open(HTML_file), "lxml") inline_tags = ["b", "big", "i", "small", "tt", "abbr", "acronym", "cite", "dfn", "em", "kbd", "strong", "samp", "var", "bdo", "map", "object", "q", "span", "sub", "sup"] # this list helps with eliminating junk tags when parsing HTML [s.extract() for s in soup(['style', 'script', 'head', 'title', 'meta', '[document]'])] # Remove non-visible tags for it in inline_tags: [s.extract() for s in soup("</" + it + ">")] # Remove inline tags visible_text = soup.getText(random_string).replace("\n", "") # Replace "p" tags with random string, eliminate newlines # Split text into list using random string while also eliminating tabs and converting unicode to readable text: visible_text = list(normalize("NFKC",elem.replace("\t","")) for elem in visible_text.split(random_string)) # TO DO: Eliminate anything with a '\x' in it (after splitting by punctuation) visible_text = list(filter(lambda vt: vt.split() != [], visible_text)) # Eliminate empty elements # Consider joining list elements together with newline in between by prepending with: "\n".join return(visible_text) ```
{ "source": "jhaber-zz/web_tools", "score": 3 }
#### File: web_tools/scrapy/scrapy_selenium.py ```python from sys import platform import csv import os import random import string import scrapy # Driver from selenium import webdriver from selenium.webdriver.common.action_chains import ActionChains # Driver Exceptions from selenium.common.exceptions import * # Parser from bs4 import BeautifulSoup from bs4.element import Comment # Display for headless mode from pyvirtualdisplay import Display # Only use this if running on a non linux machine driverPath = '../Driver/chromedriver' inline_tags = ["b", "big", "i", "small", "tt", "abbr", "acronym", "cite", "dfn", "em", "kbd", "strong", "samp", "var", "bdo", "map", "object", "q", "span", "sub", "sup"] def prep_driver(): if platform.startswith("linux"): display = Display(visible=0, size=(1920, 1080)) display.start() options = webdriver.ChromeOptions() options.add_argument('headless') options.add_argument('window-size=1920x1080') driver = webdriver.Chrome(chrome_options=options) return driver elif platform.startswith("darwin") or platform.startswith("win32"): driver = webdriver.Chrome(executable_path="../Driver/chromedriver") return driver class LinkException(Exception): """Only called by link class. Add to switch statement as necessary""" def __init__(self, switch=-1): if switch == 0: self.value = "ERROR: Link type was not html or JavaScript" elif switch == 1: self.value = "ERROR: Link was Unclickable" elif switch == 2: self.value = "ERROR: Link is JavaScript based but an index value was not set" elif switch == -1: self.value = "No value was specified in LinkException Switch. " \ "Make sure you are properly calling this exception" def __str__(self) -> str: return str(self.value) class Link(object): """Class that stores all of the information regarding a link. Each link has a type (either html of JavaScript), the href attribute (what the link redirects to), a fallback url, and an index value (used for JavaScript Links)""" def __init__(self, href_attribute, matcher="", calling_url="", index=-1): if calling_url == "" and index == -1: self.type = "html" self.hrefAttribute = href_attribute self.matcher = self.hrefAttribute.split(".")[1] self.text = "" return self.type = "" self.hrefAttribute = "" self.fallbackURL = calling_url self.matcher = matcher self.index = 0 if (href_attribute.startswith("http") and href_attribute.split(".")[1] == matcher and len(href_attribute) > len( calling_url)): self.type = "html" self.hrefAttribute = href_attribute elif href_attribute.startswith("javascript"): self.type = "JavaScript" self.hrefAttribute = href_attribute self.index = index else: raise LinkException(0) self.name = "" self.gather_name(delimiter="-") self.text = "" @staticmethod def tag_visible(element) -> bool: if element.parent.name in ['style', 'script', 'head', 'title', 'meta', '[document]']: return False if isinstance(element, Comment): return False return True def gather_text(self, driver) -> None: page_source_replaced = driver.page_source # Remove inline tags for it in inline_tags: page_source_replaced = page_source_replaced.replace("<" + it + ">", "") page_source_replaced = page_source_replaced.replace("</" + it + ">", "") # Create random string for tag delimiter random_string = ''.join(random.choices(string.ascii_uppercase + string.ascii_lowercase + string.digits, k=75)) soup = BeautifulSoup(page_source_replaced, 'lxml') # remove non-visible tags [s.extract() for s in soup(['style', 'script', 'head', 'title', 'meta', '[document]'])] visible_text = soup.getText(random_string).replace("\n", "") visible_text = visible_text.split(random_string) self.text = "\n".join(list(filter(lambda vt: vt.split() != [], visible_text))) def click_and_yield(self) -> list: driver = prep_driver() if self.type == "html": driver.get(self.hrefAttribute) self.gather_text(driver) new_links = self.get_new_links(driver, self.hrefAttribute) # Yield new links driver.close() return new_links elif self.type == "JavaScript": if self.index is None: raise LinkException(2) driver.get(self.fallbackURL) new_links = self.get_new_links(driver, self.fallbackURL) # Yield new links try: driver.find_elements_by_xpath("//a[@href]")[self.index].click_and_yield() self.gather_text(driver) return new_links except (WebDriverException, ElementNotVisibleException, ElementNotInteractableException, ElementNotSelectableException): link = driver.find_elements_by_xpath("//a[@href]")[self.index] move = ActionChains(driver).move_to_element(link) move.perform() try: link.click_and_yield() self.gather_text(driver) driver.close() return new_links except (WebDriverException, ElementNotVisibleException, ElementNotInteractableException, ElementNotSelectableException): driver.close() raise LinkException(1) else: raise LinkException(0) def gather_name(self, delimiter=" ") -> None: if self.type == "html": unfiltered_name = self.hrefAttribute[ len(self.hrefAttribute) - (len(self.hrefAttribute) - len(self.fallbackURL)): len(self.hrefAttribute)] unfiltered_name = unfiltered_name.split("/") self.name = "" if len(unfiltered_name) != 1: for i in range(len(unfiltered_name)): self.name += unfiltered_name[i] + delimiter else: self.name = unfiltered_name[0] elif self.type == "JavaScript": self.name = "" def write_file(self, filepath, counter): file_name = self.name if self.type == "html": file = open(str(filepath) + "/" + file_name + ".txt", "w") elif self.type == "JavaScript": file = open(str(filepath) + "/" + "JavaScript Link " + str(counter) + ".txt", "w") else: raise LinkException(0) file.write(self.text) file.close() def __str__(self) -> str: s = "" s += "Link Type:" + self.type + " " s += "hrefAttribute:" + self.hrefAttribute + " " s += "name:" + self.name + " " s += "FallbackURL(Only used for JS):" + self.fallbackURL + " " s += "Index (Only used for JS):" + str(self.index) + " " return s def get_new_links(self, driver, calling_url) -> list: elems = driver.find_elements_by_xpath("//a[@href]") new_requests = [] for elem in elems: if not elem.get_attribute("href").lower().startswith(calling_url.lower()): continue try: link = Link(elem.get_attribute("href"), self.matcher, calling_url=calling_url, index=elems.index(elem)) new_requests.append(link) except LinkException: print(elem.get_attribute("href") + " was not added as it did not match the main url") return new_requests def check_path_exists(path): if os.path.exists(path): return True return False if not check_path_exists("results"): os.mkdir("results") if not check_path_exists("diagnostics"): os.mkdir("diagnostics") def read_csv(filename) -> list: requests = [] import codecs with codecs.open(filename, "r", encoding='utf-8', errors='ignore') as csvfile: reader = csv.reader(csvfile, delimiter=',') for row in reader: if reader.line_num != 1 and row[4] != "0": if row[4][:-1] == "/": requests.append(row[4][:-1]) else: requests.append(row[4]) return requests class SchoolSpider(scrapy.Spider): name = "school_scraper" start_urls = [read_csv('../data/micro-sample13_coded.csv')[i] for i in range(1)] def parse(self, response): with open("seen_responses.txt", "a") as textfile: textfile.write(response.url + "\n") if "link" not in response.meta: link = Link(response.url) else: link = response.meta["link"] try: new_links = link.click_and_yield() for l in new_links: request = scrapy.Request(l.hrefAttribute, callback=self.parse) request.meta["link"] = l yield request except LinkException: print("Could not click link:" + str(link)) ``` #### File: jhaber-zz/web_tools/selenium_webscraper.py ```python import csv import datetime import os import sys import time import traceback from sys import platform 'Driver' from selenium import webdriver from selenium.webdriver.common.action_chains import ActionChains 'Driver Exceptions' from selenium.common.exceptions import * 'Parser' from bs4 import BeautifulSoup from bs4.element import Comment "Display for headless mode" from pyvirtualdisplay import Display "Only use this if running on a non linux machine" driverPath = 'Driver/chromedriver' inline_tags = ["b", "big", "i", "small", "tt", "abbr", "acronym", "cite", "dfn", "em", "kbd", "strong", "samp", "var", "bdo", "map", "object", "q", "span", "sub", "sup"] def readCSV(filename) -> list: schools = [] with open(filename, newline='', encoding="Latin-1") as csvFile: reader = csv.reader(csvFile, delimiter=',') for row in reader: try: if reader.line_num != 1: schools.append(School(row[0], row[1], row[2], row[4])) except ValueError: print("ERROR: School " + str(row[1]) + " was not scraped as it did not have a URL") return schools class School(object): """Class that holds schools. Each school is comprised of an ID number, Name, Geographical Address and a url that goes to the schools hompage. The matcher is used to filer out links that go to places outside the schools main domain, like facebook or instagram. The links attribute is an array used to store all of the links on the homepage using the Links class""" def __init__(self, id, name, address, mainURL): if mainURL == str(0): raise ValueError("ERROR: URL cannot be 0") self.id = id self.name = name self.address = address self.mainURL = mainURL self.links = [] if self.mainURL.split("://")[1].startswith("www"): self.matcher = self.mainURL.split(".")[1] else: self.matcher = self.mainURL.split("://")[1].split(".")[0] self.filePath = "results/" + self.name self.totalNumberofLinks = 0 self.htmlLinks = 0 self.htmlLinksClicked = 0 self.scriptLinks = 0 self.scriptLinksClicked = 0 self.linksClicked = 0 def gatherLinks(self) -> None: driver.get(self.mainURL) oldElems = driver.find_elements_by_xpath("//a[@href]") hrefAttributes = [] count = 0 for x in oldElems: try: if count == 0: hrefAttributes.append(oldElems[count].get_attribute("href")) else: hrefAttributes.append(newElems[count].get_attribute("href")) except IndexError: break newElems = driver.find_elements_by_xpath("//a[@href]") count += 1 for i in range(len(hrefAttributes)): try: link = Link(hrefAttributes[i], self.mainURL, self.matcher, i) self.links.append(link) print(str(link)) except LinkException: print(str(hrefAttributes[i]) + ( "href") + " was not added as it did not match the main url or was not longer than main url") self.totalNumberofLinks = len(self.links) def clickLinks(self): if not checkPathExists(self.filePath): os.makedirs(self.filePath) counter = 1 for link in self.links: try: if link.type == "html": self.htmlLinks += 1 elif link.type == "JavaScript": self.scriptLinks += 1 print("Clicking Link " + str(counter) + " out of " + str(self.totalNumberofLinks)) link.click() self.linksClicked += 1 if link.type == "html": self.htmlLinksClicked += 1 elif link.type == "JavaScript": self.scriptLinksClicked += 1 except LinkException: print("Could not click link:" + str(link)) counter += 1 scriptCount = 0 print("Done Clickling links") for link in self.links: print("Writing link to file") if link.type == "html": link.writeFile(self.filePath, 0) elif link.type == "JavaScript" and link.text != "": link.writeFile(self.filePath, scriptCount) scriptCount += 1 def __str__(self) -> str: s = "" s += "mainURL:" + self.mainURL + " " s += "Matcher:" + self.matcher + " " s += "links:" + str(self.links) + " " s += "ID:" + self.id + " " s += "Name:" + self.name + " " s += "Address:" + self.address + " " return s class LinkException(Exception): "Only called by link class. Add to switch statement as necessary" def __init__(self, switch=1): if switch == 0: self.value = "ERROR: Link type was not html or JavaScript" elif switch == 1: self.value = "ERROR: Link was Unclickable" elif switch == 2: self.value = "ERROR: Link is JavaScript based but an index value was not set" elif switch == -1: self.value = "No value was specified in LinkException Switch. Make sure you are properly calling this exception" def __str__(self) -> str: return str(self.value) class Link(object): """Class that stores all of the information regarding a link. Each link has a type (either html of JavaScript), the href attribute (what the link redirects to), a fallback url, and an index value (used for JavaScript Links)""" def __init__(self, hrefAttribute, callingURL, matcher, index): """ """ self.type = "" self.hrefAttribute = "" self.fallbackURL = callingURL self.index = None self.matcher = matcher self.index = 0 self.text = "" if hrefAttribute.startswith("http"): if (hrefAttribute.split("://")[1].startswith("www") and hrefAttribute.split(".")[1] == matcher and len( hrefAttribute) > len(callingURL)) or ( hrefAttribute.split("://")[1].split(".")[0] == matcher and len(hrefAttribute) > len( callingURL)): self.type = "html" self.hrefAttribute = hrefAttribute else: raise LinkException(0) elif hrefAttribute.startswith("javascript"): self.type = "JavaScript" self.hrefAttribute = hrefAttribute self.index = index else: raise LinkException(0) self.gatherName(delimiter="-") def tag_visible(self, element) -> bool: if element.parent.name in ['style', 'script', 'head', 'title', 'meta', '[document]']: return False if isinstance(element, Comment): return False return True def gatherText(self, driver) -> None: page_source_replaced = driver.page_source # Remove inline tags for it in inline_tags: page_source_replaced = page_source_replaced.replace("<" + it + ">", "") page_source_replaced = page_source_replaced.replace("</" + it + ">", "") # Create random string for tag delimiter random_string = "".join(map(chr, os.urandom(75))) soup = BeautifulSoup(page_source_replaced, 'lxml') # remove non-visible tags [s.extract() for s in soup(['style', 'script', 'head', 'title', 'meta', '[document]'])] visible_text = soup.getText(random_string).replace("\n", "") visible_text = visible_text.split(random_string) self.text = "\n".join(list(filter(lambda vt: vt.split() != [], visible_text))) def click(self) -> bool: if self.type == "html": driver.get(self.hrefAttribute) self.gatherText(driver) return True elif self.type == "JavaScript": if self.index is None: raise LinkException(2) driver.get(self.fallbackURL) try: driver.find_elements_by_xpath("//a[@href]")[self.index].click() self.gatherText(driver) except (WebDriverException, ElementNotVisibleException, ElementNotInteractableException, ElementNotSelectableException, IndexError): link = driver.find_elements_by_xpath("//a[@href]")[self.index] move = ActionChains(driver).move_to_element(link) move.perform() try: link.click() self.gatherText(driver) except (WebDriverException, ElementNotVisibleException, ElementNotInteractableException, ElementNotSelectableException, IndexError): raise LinkException(1) else: raise LinkException(0) def gatherName(self, delimiter=" ") -> None: if delimiter == "/": raise ValueError("ERROR: Delimiter cannot be a slash") if self.type == "html": unfilteredName = self.hrefAttribute[self.hrefAttribute.index(self.matcher):len(self.hrefAttribute)] unfilteredName = unfilteredName.split("/") self.name = "" if len(unfilteredName) != 1: for i in range(len(unfilteredName)): self.name += unfilteredName[i] + delimiter else: self.name = unfilteredName[0] elif self.type == "JavaScript": self.name = "" def writeFile(self, filepath, counter): fileName = self.name if self.type == "html": file = open(str(filepath) + "/" + fileName + ".txt", "w") elif self.type == "JavaScript": file = open(str(filepath) + "/" + "JavaScript Link " + str(counter) + ".txt", "w") else: raise LinkException(0) file.write(str(self.text.encode('utf-8'), encoding='utf-8')) file.close() def __str__(self) -> str: s = "" s += "Link Type:" + self.type + " " s += "hrefAttribute:" + self.hrefAttribute + " " s += "name:" + self.name + " " s += "FallbackURL(Only used for JS):" + self.fallbackURL + " " s += "Index (Only used for JS):" + str(self.index) + " " return s def tag_visible(element) -> bool: if element.parent.name in ['style', 'script', 'head', 'title', 'meta', '[document]']: return False if isinstance(element, Comment): return False return True def checkPathExists(path): if os.path.exists(path): return True return False if __name__ == '__main__': if platform.startswith("linux"): display = Display(visible=0, size=(1920, 1080)) display.start() chromeOptions = webdriver.ChromeOptions() chromeOptions.add_argument('headless') chromeOptions.add_argument('window-size=1920x1080') chromeOptions.add_argument('--no-sandbox') driver = webdriver.Chrome('/usr/local/bin/chromedriver', chrome_options=chromeOptions) elif platform.startswith("darwin") or platform.startswith("win32"): driver = webdriver.Chrome(executable_path="Driver/chromedriver") if not checkPathExists("results"): os.mkdir("results") if not checkPathExists("diagnostics"): os.mkdir("diagnostics") schools = readCSV("data/micro-sample13_coded.csv") numberofLinksClicked = 0 totalNumberOfLinks = 0 htmlLinks = 0 htmlLinksClicked = 0 scriptLinks = 0 scriptLinksClicked = 0 "Time doesn't really account for timezones now, many be an issue later" now = datetime.datetime.now() formattedTime = now.strftime("%Y-%m-%d %H:%M:%S") diagnosticsFile = open("diagnostics/" + str(formattedTime) + ".txt", "w") diagnosticsFile.write("Program was run at " + formattedTime + "\n") startTime = time.time() try: for school in schools: school.gatherLinks() schoolStartTime = time.time() school.clickLinks() endTime = time.time() schoolTimeElapsed = endTime - schoolStartTime print("Elapsed Time :%s (seconds) %s (minutes)" % ( str(schoolTimeElapsed), str(schoolTimeElapsed / 60))) totalNumberOfLinks += school.totalNumberofLinks numberofLinksClicked += school.linksClicked htmlLinks += school.htmlLinks htmlLinksClicked += school.htmlLinksClicked scriptLinks += school.scriptLinks scriptLinks += school.scriptLinksClicked try: diagnosticsFile.write( "School " + str(school.name) + " had " + str(school.totalNumberofLinks) + " links and " + str( school.linksClicked) + " were clicked(" + str( (school.linksClicked / school.totalNumberofLinks) * 100) + "%)\n") except ZeroDivisionError: diagnosticsFile.write("School " + str(school.name) + " had 0 links. Check the matcher for this school ") try: diagnosticsFile.write( "There were " + str(school.htmlLinks) + " html links and " + str( school.htmlLinksClicked) + " were clicked(" + str( round((school.htmlLinksClicked / school.htmlLinks) * 100, 3)) + "%)\n" ) except ZeroDivisionError: diagnosticsFile.write("This school had 0 html links \n") try: diagnosticsFile.write( "There were " + str(school.scriptLinks) + " JavaScript links and " + str( school.scriptLinksClicked) + " were clicked(" + str(round( (school.scriptLinksClicked / school.scriptLinks) * 100, 3)) + "%)\n" ) except ZeroDivisionError: diagnosticsFile.write("This school had 0 JavaScript Links \n") diagnosticsFile.write("It took " + str(round(schoolTimeElapsed / 60, 3)) + " minutes to click on the links for this school\n") except Exception as e: 'To general of a try-except here, only used to stop display from taking up server resources. ' timeElapsed = time.time() - startTime diagnosticsFile.write("It took " + str(round(timeElapsed / 60, 3)) + " minutes to click all links") if platform.startswith("linux"): display.sendstop() driver.quit() traceback.print_exc(file=sys.stdout) sys.exit() timeElapsed = time.time() - startTime diagnosticsFile.write("Total number of links:" + str(totalNumberOfLinks) + "\n") diagnosticsFile.write("Number of Links Clicked:" + str(numberofLinksClicked) + "\n") try: diagnosticsFile.write( "% of links clicked:" + str(round((numberofLinksClicked / totalNumberOfLinks) * 100, 3)) + "\n") except ZeroDivisionError: diagnosticsFile.write("There were 0 Total Links\n") diagnosticsFile.write("Number of HTML Links" + str(htmlLinks) + "\n") try: diagnosticsFile.write("% of HTML Links Clicked" + str(round((htmlLinks / htmlLinksClicked) * 100, 3)) + "\n") except ZeroDivisionError: diagnosticsFile.write("There were 0 HTML links") diagnosticsFile.write("Number of JavaScript Links" + str(scriptLinks) + "\n") try: diagnosticsFile.write( "% of JavaScript Links Clicked" + str(round((scriptLinks / scriptLinksClicked) * 100, 3)) + "\n") except ZeroDivisionError: diagnosticsFile.write("There were 0 JavaScript Links") diagnosticsFile.write("It took " + str(round(timeElapsed, 3)) + " minutes to click all links") diagnosticsFile.close() ```
{ "source": "jhabikal21/tensorflow", "score": 3 }
#### File: kfac/examples/convnet.py ```python r"""Train a ConvNet on MNIST using K-FAC. This library fits a 5-layer ConvNet on MNIST using K-FAC. The model has the following structure, - Conv Layer: 5x5 kernel, 16 output channels. - Max Pool: 3x3 kernel, stride 2. - Conv Layer: 5x5 kernel, 16 output channels. - Max Pool: 3x3 kernel, stride 2. - Linear: 10 output dims. After 3k~6k steps, this should reach perfect accuracy on the training set. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import numpy as np import tensorflow as tf from tensorflow.contrib.kfac.examples import mlp from tensorflow.contrib.kfac.examples import mnist lc = tf.contrib.kfac.layer_collection oq = tf.contrib.kfac.op_queue opt = tf.contrib.kfac.optimizer __all__ = [ "conv_layer", "max_pool_layer", "linear_layer", "build_model", "minimize_loss_single_machine", "minimize_loss_distributed", "train_mnist_single_machine", "train_mnist_distributed", ] def conv_layer(layer_id, inputs, kernel_size, out_channels): """Builds a convolutional layer with ReLU non-linearity. Args: layer_id: int. Integer ID for this layer's variables. inputs: Tensor of shape [num_examples, width, height, in_channels]. Each row corresponds to a single example. kernel_size: int. Width and height of the convolution kernel. The kernel is assumed to be square. out_channels: int. Number of output features per pixel. Returns: preactivations: Tensor of shape [num_examples, width, height, out_channels]. Values of the layer immediately before the activation function. activations: Tensor of shape [num_examples, width, height, out_channels]. Values of the layer immediately after the activation function. params: Tuple of (kernel, bias), parameters for this layer. """ # TODO (b/67004004): Delete this function and rely on tf.layers exclusively. id:1036 gh:1037 layer = tf.layers.Conv2D( out_channels, kernel_size=[kernel_size, kernel_size], kernel_initializer=tf.random_normal_initializer(stddev=0.01), padding="SAME", name="conv_%d" % layer_id) preactivations = layer(inputs) activations = tf.nn.relu(preactivations) # layer.weights is a list. This converts it a (hashable) tuple. return preactivations, activations, (layer.kernel, layer.bias) def max_pool_layer(layer_id, inputs, kernel_size, stride): """Build a max-pooling layer. Args: layer_id: int. Integer ID for this layer's variables. inputs: Tensor of shape [num_examples, width, height, in_channels]. Each row corresponds to a single example. kernel_size: int. Width and height to pool over per input channel. The kernel is assumed to be square. stride: int. Step size between pooling operations. Returns: Tensor of shape [num_examples, width/stride, height/stride, out_channels]. Result of applying max pooling to 'inputs'. """ # TODO (b/67004004): Delete this function and rely on tf.layers exclusively. id:893 gh:894 with tf.variable_scope("pool_%d" % layer_id): return tf.nn.max_pool( inputs, [1, kernel_size, kernel_size, 1], [1, stride, stride, 1], padding="SAME", name="pool") def linear_layer(layer_id, inputs, output_size): """Builds the final linear layer for an MNIST classification problem. Args: layer_id: int. Integer ID for this layer's variables. inputs: Tensor of shape [num_examples, width, height, in_channels]. Each row corresponds to a single example. output_size: int. Number of output dims per example. Returns: activations: Tensor of shape [num_examples, output_size]. Values of the layer immediately after the activation function. params: Tuple of (weights, bias), parameters for this layer. """ # TODO (b/67004004): Delete this function and rely on tf.layers exclusively. id:688 gh:689 pre, _, params = mlp.fc_layer(layer_id, inputs, output_size) return pre, params def build_model(examples, labels, num_labels, layer_collection): """Builds a ConvNet classification model. Args: examples: Tensor of shape [num_examples, num_features]. Represents inputs of model. labels: Tensor of shape [num_examples]. Contains integer IDs to be predicted by softmax for each example. num_labels: int. Number of distinct values 'labels' can take on. layer_collection: LayerCollection instance. Layers will be registered here. Returns: loss: 0-D Tensor representing loss to be minimized. accuracy: 0-D Tensor representing model's accuracy. """ # Build a ConvNet. For each layer with parameters, we'll keep track of the # preactivations, activations, weights, and bias. tf.logging.info("Building model.") pre0, act0, params0 = conv_layer( layer_id=0, inputs=examples, kernel_size=5, out_channels=16) act1 = max_pool_layer(layer_id=1, inputs=act0, kernel_size=3, stride=2) pre2, act2, params2 = conv_layer( layer_id=2, inputs=act1, kernel_size=5, out_channels=16) act3 = max_pool_layer(layer_id=3, inputs=act2, kernel_size=3, stride=2) flat_act3 = tf.reshape(act3, shape=[-1, int(np.prod(act3.shape[1:4]))]) logits, params4 = linear_layer( layer_id=4, inputs=flat_act3, output_size=num_labels) loss = tf.reduce_mean( tf.nn.sparse_softmax_cross_entropy_with_logits( labels=labels, logits=logits)) accuracy = tf.reduce_mean( tf.cast(tf.equal(labels, tf.argmax(logits, axis=1)), dtype=tf.float32)) tf.summary.scalar("loss", loss) tf.summary.scalar("accuracy", accuracy) # Register parameters. K-FAC needs to know about the inputs, outputs, and # parameters of each conv/fully connected layer and the logits powering the # posterior probability over classes. tf.logging.info("Building LayerCollection.") layer_collection.register_conv2d(params0, (1, 1, 1, 1), "SAME", examples, pre0) layer_collection.register_conv2d(params2, (1, 1, 1, 1), "SAME", act1, pre2) layer_collection.register_fully_connected(params4, flat_act3, logits) layer_collection.register_categorical_predictive_distribution( logits, name="logits") return loss, accuracy def minimize_loss_single_machine(loss, accuracy, layer_collection, session_config=None): """Minimize loss with K-FAC on a single machine. A single Session is responsible for running all of K-FAC's ops. Args: loss: 0-D Tensor. Loss to be minimized. accuracy: 0-D Tensor. Accuracy of classifier on current minibatch. layer_collection: LayerCollection instance describing model architecture. Used by K-FAC to construct preconditioner. session_config: None or tf.ConfigProto. Configuration for tf.Session(). Returns: final value for 'accuracy'. """ # Train with K-FAC. global_step = tf.train.get_or_create_global_step() optimizer = opt.KfacOptimizer( learning_rate=0.0001, cov_ema_decay=0.95, damping=0.001, layer_collection=layer_collection, momentum=0.9) train_op = optimizer.minimize(loss, global_step=global_step) tf.logging.info("Starting training.") with tf.train.MonitoredTrainingSession(config=session_config) as sess: while not sess.should_stop(): global_step_, loss_, accuracy_, _, _ = sess.run( [global_step, loss, accuracy, train_op, optimizer.cov_update_op]) if global_step_ % 100 == 0: sess.run(optimizer.inv_update_op) if global_step_ % 100 == 0: tf.logging.info("global_step: %d | loss: %f | accuracy: %s", global_step_, loss_, accuracy_) return accuracy_ def _is_gradient_task(task_id, num_tasks): """Returns True if this task should update the weights.""" if num_tasks < 3: return True return 0 <= task_id < 0.6 * num_tasks def _is_cov_update_task(task_id, num_tasks): """Returns True if this task should update K-FAC's covariance matrices.""" if num_tasks < 3: return False return 0.6 * num_tasks <= task_id < num_tasks - 1 def _is_inv_update_task(task_id, num_tasks): """Returns True if this task should update K-FAC's preconditioner.""" if num_tasks < 3: return False return task_id == num_tasks - 1 def _num_gradient_tasks(num_tasks): """Number of tasks that will update weights.""" if num_tasks < 3: return num_tasks return int(np.ceil(0.6 * num_tasks)) def minimize_loss_distributed(task_id, num_worker_tasks, num_ps_tasks, master, checkpoint_dir, loss, accuracy, layer_collection): """Minimize loss with an synchronous implementation of K-FAC. Different tasks are responsible for different parts of K-FAC's Ops. The first 60% of tasks update weights; the next 20% accumulate covariance statistics; the last 20% invert the matrices used to precondition gradients. Args: task_id: int. Integer in [0, num_worker_tasks). ID for this worker. num_worker_tasks: int. Number of workers in this distributed training setup. num_ps_tasks: int. Number of parameter servers holding variables. If 0, parameter servers are not used. master: string. IP and port of TensorFlow runtime process. Set to empty string to run locally. checkpoint_dir: string or None. Path to store checkpoints under. loss: 0-D Tensor. Loss to be minimized. accuracy: dict mapping strings to 0-D Tensors. Additional accuracy to run with each step. layer_collection: LayerCollection instance describing model architecture. Used by K-FAC to construct preconditioner. Returns: final value for 'accuracy'. Raises: ValueError: if task_id >= num_worker_tasks. """ with tf.device(tf.train.replica_device_setter(num_ps_tasks)): global_step = tf.train.get_or_create_global_step() optimizer = opt.KfacOptimizer( learning_rate=0.0001, cov_ema_decay=0.95, damping=0.001, layer_collection=layer_collection, momentum=0.9) inv_update_queue = oq.OpQueue(optimizer.inv_updates_dict.values()) sync_optimizer = tf.train.SyncReplicasOptimizer( opt=optimizer, replicas_to_aggregate=_num_gradient_tasks(num_worker_tasks)) train_op = sync_optimizer.minimize(loss, global_step=global_step) tf.logging.info("Starting training.") is_chief = (task_id == 0) hooks = [sync_optimizer.make_session_run_hook(is_chief)] with tf.train.MonitoredTrainingSession( master=master, is_chief=is_chief, checkpoint_dir=checkpoint_dir, hooks=hooks, stop_grace_period_secs=0) as sess: while not sess.should_stop(): # Choose which op this task is responsible for running. if _is_gradient_task(task_id, num_worker_tasks): learning_op = train_op elif _is_cov_update_task(task_id, num_worker_tasks): learning_op = optimizer.cov_update_op elif _is_inv_update_task(task_id, num_worker_tasks): # TODO (duckworthd): Running this op before cov_update_op has been run a id:628 gh:629 # few times can result in "InvalidArgumentError: Cholesky decomposition # was not successful." Delay running this op until cov_update_op has # been run a few times. learning_op = inv_update_queue.next_op(sess) else: raise ValueError("Which op should task %d do?" % task_id) global_step_, loss_, accuracy_, _ = sess.run( [global_step, loss, accuracy, learning_op]) tf.logging.info("global_step: %d | loss: %f | accuracy: %s", global_step_, loss_, accuracy_) return accuracy_ def train_mnist_single_machine(data_dir, num_epochs, use_fake_data=False): """Train a ConvNet on MNIST. Args: data_dir: string. Directory to read MNIST examples from. num_epochs: int. Number of passes to make over the training set. use_fake_data: bool. If True, generate a synthetic dataset. Returns: accuracy of model on the final minibatch of training data. """ # Load a dataset. tf.logging.info("Loading MNIST into memory.") examples, labels = mnist.load_mnist( data_dir, num_epochs=num_epochs, batch_size=128, use_fake_data=use_fake_data, flatten_images=False) # Build a ConvNet. layer_collection = lc.LayerCollection() loss, accuracy = build_model( examples, labels, num_labels=10, layer_collection=layer_collection) # Fit model. return minimize_loss_single_machine(loss, accuracy, layer_collection) def train_mnist_multitower(data_dir, num_epochs, num_towers, use_fake_data=True): """Train a ConvNet on MNIST. Args: data_dir: string. Directory to read MNIST examples from. num_epochs: int. Number of passes to make over the training set. num_towers: int. Number of CPUs to split inference across. use_fake_data: bool. If True, generate a synthetic dataset. Returns: accuracy of model on the final minibatch of training data. """ # Load a dataset. tf.logging.info("Loading MNIST into memory.") tower_batch_size = 128 batch_size = tower_batch_size * num_towers tf.logging.info( ("Loading MNIST into memory. Using batch_size = %d = %d towers * %d " "tower batch size.") % (batch_size, num_towers, tower_batch_size)) examples, labels = mnist.load_mnist( data_dir, num_epochs=num_epochs, batch_size=batch_size, use_fake_data=use_fake_data, flatten_images=False) # Split minibatch across towers. examples = tf.split(examples, num_towers) labels = tf.split(labels, num_towers) # Build an MLP. Each tower's layers will be added to the LayerCollection. layer_collection = lc.LayerCollection() tower_results = [] for tower_id in range(num_towers): with tf.device("/cpu:%d" % tower_id): with tf.name_scope("tower%d" % tower_id): with tf.variable_scope(tf.get_variable_scope(), reuse=(tower_id > 0)): tf.logging.info("Building tower %d." % tower_id) tower_results.append( build_model(examples[tower_id], labels[tower_id], 10, layer_collection)) losses, accuracies = zip(*tower_results) # Average across towers. loss = tf.reduce_mean(losses) accuracy = tf.reduce_mean(accuracies) # Fit model. session_config = tf.ConfigProto( allow_soft_placement=False, device_count={ "CPU": num_towers }) return minimize_loss_single_machine( loss, accuracy, layer_collection, session_config=session_config) def train_mnist_distributed(task_id, num_worker_tasks, num_ps_tasks, master, data_dir, num_epochs, use_fake_data=False): """Train a ConvNet on MNIST. Args: task_id: int. Integer in [0, num_worker_tasks). ID for this worker. num_worker_tasks: int. Number of workers in this distributed training setup. num_ps_tasks: int. Number of parameter servers holding variables. master: string. IP and port of TensorFlow runtime process. data_dir: string. Directory to read MNIST examples from. num_epochs: int. Number of passes to make over the training set. use_fake_data: bool. If True, generate a synthetic dataset. Returns: accuracy of model on the final minibatch of training data. """ # Load a dataset. tf.logging.info("Loading MNIST into memory.") examples, labels = mnist.load_mnist( data_dir, num_epochs=num_epochs, batch_size=128, use_fake_data=use_fake_data, flatten_images=False) # Build a ConvNet. layer_collection = lc.LayerCollection() with tf.device(tf.train.replica_device_setter(num_ps_tasks)): loss, accuracy = build_model( examples, labels, num_labels=10, layer_collection=layer_collection) # Fit model. checkpoint_dir = None if data_dir is None else os.path.join(data_dir, "kfac") return minimize_loss_distributed(task_id, num_worker_tasks, num_ps_tasks, master, checkpoint_dir, loss, accuracy, layer_collection) if __name__ == "__main__": tf.app.run() ``` #### File: python/kernel_tests/fisher_factors_test.py ```python from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np import numpy.random as npr from tensorflow.contrib.kfac.python.ops import fisher_factors as ff from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops as tf_ops from tensorflow.python.framework import random_seed from tensorflow.python.ops import array_ops from tensorflow.python.ops import gradients_impl from tensorflow.python.ops import math_ops from tensorflow.python.ops import variables as tf_variables from tensorflow.python.platform import test class MaybeColocateTest(test.TestCase): def setUp(self): self._colocate_cov_ops_with_inputs = ff.COLOCATE_COV_OPS_WITH_INPUTS def tearDown(self): ff.set_global_constants( colocate_cov_ops_with_inputs=self._colocate_cov_ops_with_inputs) def testFalse(self): ff.set_global_constants(colocate_cov_ops_with_inputs=False) with tf_ops.Graph().as_default(): a = constant_op.constant([2.0], name='a') with ff._maybe_colocate_with(a): b = constant_op.constant(3.0, name='b') self.assertEqual([b'loc:@a'], a.op.colocation_groups()) self.assertEqual([b'loc:@b'], b.op.colocation_groups()) def testTrue(self): ff.set_global_constants(colocate_cov_ops_with_inputs=True) with tf_ops.Graph().as_default(): a = constant_op.constant([2.0], name='a') with ff._maybe_colocate_with(a): b = constant_op.constant(3.0, name='b') self.assertEqual([b'loc:@a'], a.op.colocation_groups()) self.assertEqual([b'loc:@a'], b.op.colocation_groups()) class FisherFactorTestingDummy(ff.FisherFactor): """Dummy class to test the non-abstract methods on ff.FisherFactor.""" @property def _var_scope(self): return 'dummy/a_b_c' @property def _cov_shape(self): raise NotImplementedError @property def _num_sources(self): return 1 @property def _dtype(self): return dtypes.float32 def _compute_new_cov(self): raise NotImplementedError def instantiate_covariance(self): pass def make_inverse_update_ops(self): return [] class InverseProvidingFactorTestingDummy(ff.InverseProvidingFactor): """Dummy class to test the non-abstract methods on ff.InverseProvidingFactor. """ def __init__(self, shape): self._shape = shape super(InverseProvidingFactorTestingDummy, self).__init__() @property def _var_scope(self): return 'dummy/a_b_c' @property def _cov_shape(self): return self._shape @property def _num_sources(self): return 1 @property def _dtype(self): return dtypes.float32 def _compute_new_cov(self): raise NotImplementedError def instantiate_covariance(self): pass class NumericalUtilsTest(test.TestCase): def testComputeCovAgainstNumpy(self): with tf_ops.Graph().as_default(), self.test_session() as sess: npr.seed(0) random_seed.set_random_seed(200) x = npr.randn(100, 3) cov = ff._compute_cov(array_ops.constant(x)) np_cov = np.dot(x.T, x) / x.shape[0] self.assertAllClose(sess.run(cov), np_cov) def testComputeCovAgainstNumpyWithAlternativeNormalizer(self): with tf_ops.Graph().as_default(), self.test_session() as sess: npr.seed(0) random_seed.set_random_seed(200) normalizer = 10. x = npr.randn(100, 3) cov = ff._compute_cov(array_ops.constant(x), normalizer=normalizer) np_cov = np.dot(x.T, x) / normalizer self.assertAllClose(sess.run(cov), np_cov) def testAppendHomog(self): with tf_ops.Graph().as_default(), self.test_session() as sess: npr.seed(0) m, n = 3, 4 a = npr.randn(m, n) a_homog = ff._append_homog(array_ops.constant(a)) np_result = np.hstack([a, np.ones((m, 1))]) self.assertAllClose(sess.run(a_homog), np_result) class NameStringUtilFunctionTest(test.TestCase): def _make_tensor(self): x = array_ops.placeholder(dtypes.float64, (3, 1)) w = array_ops.constant(npr.RandomState(0).randn(3, 3)) y = math_ops.matmul(w, x) g = gradients_impl.gradients(y, x)[0] return g def testScopeStringFromParamsSingleTensor(self): with tf_ops.Graph().as_default(): g = self._make_tensor() scope_string = ff.scope_string_from_params(g) self.assertEqual('gradients_MatMul_grad_MatMul_1', scope_string) def testScopeStringFromParamsMultipleTensors(self): with tf_ops.Graph().as_default(): x = array_ops.constant(1,) y = array_ops.constant(2,) scope_string = ff.scope_string_from_params((x, y)) self.assertEqual('Const_Const_1', scope_string) def testScopeStringFromParamsMultipleTypes(self): with tf_ops.Graph().as_default(): x = array_ops.constant(1,) y = array_ops.constant(2,) scope_string = ff.scope_string_from_params([[1, 2, 3], 'foo', True, 4, (x, y)]) self.assertEqual('1-2-3_foo_True_4_Const__Const_1', scope_string) def testScopeStringFromParamsUnsupportedType(self): with tf_ops.Graph().as_default(): x = array_ops.constant(1,) y = array_ops.constant(2,) unsupported = 1.2 # Floats are not supported. with self.assertRaises(ValueError): ff.scope_string_from_params([[1, 2, 3], 'foo', True, 4, (x, y), unsupported]) def testScopeStringFromName(self): with tf_ops.Graph().as_default(): g = self._make_tensor() scope_string = ff.scope_string_from_name(g) self.assertEqual('gradients_MatMul_grad_MatMul_1', scope_string) def testScalarOrTensorToString(self): with tf_ops.Graph().as_default(): self.assertEqual(ff.scalar_or_tensor_to_string(5.), repr(5.)) g = self._make_tensor() scope_string = ff.scope_string_from_name(g) self.assertEqual(ff.scalar_or_tensor_to_string(g), scope_string) class FisherFactorTest(test.TestCase): def testMakeInverseUpdateOps(self): with tf_ops.Graph().as_default(): random_seed.set_random_seed(200) factor = FisherFactorTestingDummy() self.assertEqual(0, len(factor.make_inverse_update_ops())) class InverseProvidingFactorTest(test.TestCase): def testRegisterDampedInverse(self): with tf_ops.Graph().as_default(): random_seed.set_random_seed(200) shape = [2, 2] factor = InverseProvidingFactorTestingDummy(shape) factor_var_scope = 'dummy/a_b_c' dampings = 0.1, 1e-1, 0.00001, 1e-5 for damping in dampings: factor.register_damped_inverse(damping) self.assertEqual(set(dampings), set(factor._inverses_by_damping.keys())) inv = factor._inverses_by_damping[dampings[0]] self.assertEqual(inv, factor._inverses_by_damping[dampings[1]]) self.assertNotEqual(inv, factor._inverses_by_damping[dampings[2]]) self.assertEqual(factor._inverses_by_damping[dampings[2]], factor._inverses_by_damping[dampings[3]]) factor_vars = tf_ops.get_collection(tf_ops.GraphKeys.GLOBAL_VARIABLES, factor_var_scope) self.assertListEqual([inv, factor._inverses_by_damping[dampings[2]]], factor_vars) self.assertEqual(shape, inv.get_shape()) def testRegisterMatpower(self): with tf_ops.Graph().as_default(): random_seed.set_random_seed(200) shape = [3, 3] factor = InverseProvidingFactorTestingDummy(shape) factor_var_scope = 'dummy/a_b_c' factor.register_matpower(1, 0.5) factor.register_matpower(2, 0.5) self.assertEqual( set([(1, 0.5), (2, 0.5)]), set(factor._matpower_by_exp_and_damping.keys())) factor_vars = tf_ops.get_collection(tf_ops.GraphKeys.GLOBAL_VARIABLES, factor_var_scope) matpower1 = factor.get_matpower(1, 0.5) matpower2 = factor.get_matpower(2, 0.5) self.assertListEqual([matpower1, matpower2], factor_vars) self.assertEqual(shape, matpower1.get_shape()) self.assertEqual(shape, matpower2.get_shape()) def testMakeInverseUpdateOps(self): with tf_ops.Graph().as_default(): random_seed.set_random_seed(200) factor = FisherFactorTestingDummy() self.assertEqual(0, len(factor.make_inverse_update_ops())) def testMakeInverseUpdateOpsManyInversesEigenDecomp(self): with tf_ops.Graph().as_default(), self.test_session() as sess: random_seed.set_random_seed(200) cov = np.array([[1., 2.], [3., 4.]]) factor = InverseProvidingFactorTestingDummy(cov.shape) factor._cov = array_ops.constant(cov, dtype=dtypes.float32) for i in range(1, ff.EIGENVALUE_DECOMPOSITION_THRESHOLD + 1): factor.register_damped_inverse(1. / i) ops = factor.make_inverse_update_ops() self.assertEqual(1, len(ops)) sess.run(tf_variables.global_variables_initializer()) new_invs = [] sess.run(ops) for i in range(1, ff.EIGENVALUE_DECOMPOSITION_THRESHOLD + 1): # The inverse op will assign the damped inverse of cov to the inv var. new_invs.append(sess.run(factor._inverses_by_damping[1. / i])) # We want to see that the new invs are all different from each other. for i in range(len(new_invs)): for j in range(i + 1, len(new_invs)): # Just check the first element. self.assertNotEqual(new_invs[i][0][0], new_invs[j][0][0]) def testMakeInverseUpdateOpsMatPowerEigenDecomp(self): with tf_ops.Graph().as_default(), self.test_session() as sess: random_seed.set_random_seed(200) cov = np.array([[6., 2.], [2., 4.]]) factor = InverseProvidingFactorTestingDummy(cov.shape) factor._cov = array_ops.constant(cov, dtype=dtypes.float32) exp = 2 # NOTE (mattjj): must be int to test with np.linalg.matrix_power id:1041 gh:1042 damping = 0.5 factor.register_matpower(exp, damping) ops = factor.make_inverse_update_ops() self.assertEqual(1, len(ops)) sess.run(tf_variables.global_variables_initializer()) sess.run(ops[0]) matpower = sess.run(factor._matpower_by_exp_and_damping[(exp, damping)]) matpower_np = np.linalg.matrix_power(cov + np.eye(2) * damping, exp) self.assertAllClose(matpower, matpower_np) def testMakeInverseUpdateOpsNoEigenDecomp(self): with tf_ops.Graph().as_default(), self.test_session() as sess: random_seed.set_random_seed(200) cov = np.array([[5., 2.], [2., 4.]]) # NOTE (mattjj): must be symmetric id:896 gh:897 factor = InverseProvidingFactorTestingDummy(cov.shape) factor._cov = array_ops.constant(cov, dtype=dtypes.float32) factor.register_damped_inverse(0) ops = factor.make_inverse_update_ops() self.assertEqual(1, len(ops)) sess.run(tf_variables.global_variables_initializer()) # The inverse op will assign the damped inverse of cov to the inv var. old_inv = sess.run(factor._inverses_by_damping[0]) self.assertAllClose( sess.run(ff.inverse_initializer(cov.shape, dtypes.float32)), old_inv) sess.run(ops) new_inv = sess.run(factor._inverses_by_damping[0]) self.assertAllClose(new_inv, np.linalg.inv(cov)) class FullFactorTest(test.TestCase): def testFullFactorInit(self): with tf_ops.Graph().as_default(): random_seed.set_random_seed(200) tensor = array_ops.ones((2, 3), name='a/b/c') factor = ff.FullFactor((tensor,), 32) self.assertEqual([6, 6], factor.get_cov().get_shape().as_list()) def testFullFactorInitFloat64(self): with tf_ops.Graph().as_default(): dtype = dtypes.float64_ref random_seed.set_random_seed(200) tensor = array_ops.ones((2, 3), dtype=dtype, name='a/b/c') factor = ff.FullFactor((tensor,), 32) cov = factor.get_cov() self.assertEqual(cov.dtype, dtype) self.assertEqual([6, 6], cov.get_shape().as_list()) def testMakeCovarianceUpdateOp(self): with tf_ops.Graph().as_default(), self.test_session() as sess: random_seed.set_random_seed(200) tensor = array_ops.constant([1., 2.], name='a/b/c') factor = ff.FullFactor((tensor,), 2) sess.run(tf_variables.global_variables_initializer()) new_cov = sess.run(factor.make_covariance_update_op(.5)) self.assertAllClose([[0.75, 0.5], [0.5, 1.5]], new_cov) class NaiveDiagonalFactorTest(test.TestCase): def testNaiveDiagonalFactorInit(self): with tf_ops.Graph().as_default(): random_seed.set_random_seed(200) tensor = array_ops.ones((2, 3), name='a/b/c') factor = ff.NaiveDiagonalFactor((tensor,), 32) self.assertEqual([6, 1], factor.get_cov().get_shape().as_list()) def testNaiveDiagonalFactorInitFloat64(self): with tf_ops.Graph().as_default(): dtype = dtypes.float64_ref random_seed.set_random_seed(200) tensor = array_ops.ones((2, 3), dtype=dtype, name='a/b/c') factor = ff.NaiveDiagonalFactor((tensor,), 32) cov = factor.get_cov() self.assertEqual(cov.dtype, dtype) self.assertEqual([6, 1], cov.get_shape().as_list()) def testMakeCovarianceUpdateOp(self): with tf_ops.Graph().as_default(), self.test_session() as sess: random_seed.set_random_seed(200) tensor = array_ops.constant([1., 2.], name='a/b/c') factor = ff.NaiveDiagonalFactor((tensor,), 2) sess.run(tf_variables.global_variables_initializer()) new_cov = sess.run(factor.make_covariance_update_op(.5)) self.assertAllClose([[0.75], [1.5]], new_cov) class FullyConnectedKroneckerFactorTest(test.TestCase): def _testFullyConnectedKroneckerFactorInit(self, has_bias, final_shape, dtype=dtypes.float32_ref): with tf_ops.Graph().as_default(): random_seed.set_random_seed(200) tensor = array_ops.ones((2, 3), dtype=dtype, name='a/b/c') factor = ff.FullyConnectedKroneckerFactor((tensor,), has_bias=has_bias) cov = factor.get_cov() self.assertEqual(cov.dtype, dtype) self.assertEqual(final_shape, cov.get_shape().as_list()) def testFullyConnectedKroneckerFactorInitNoBias(self): for dtype in (dtypes.float32_ref, dtypes.float64_ref): self._testFullyConnectedKroneckerFactorInit(False, [3, 3], dtype=dtype) def testFullyConnectedKroneckerFactorInitWithBias(self): for dtype in (dtypes.float32_ref, dtypes.float64_ref): self._testFullyConnectedKroneckerFactorInit(True, [4, 4], dtype=dtype) def testMakeCovarianceUpdateOpWithBias(self): with tf_ops.Graph().as_default(), self.test_session() as sess: random_seed.set_random_seed(200) tensor = array_ops.constant([[1., 2.], [3., 4.]], name='a/b/c') factor = ff.FullyConnectedKroneckerFactor((tensor,), has_bias=True) sess.run(tf_variables.global_variables_initializer()) new_cov = sess.run(factor.make_covariance_update_op(.5)) self.assertAllClose([[3, 3.5, 1], [3.5, 5.5, 1.5], [1, 1.5, 1]], new_cov) def testMakeCovarianceUpdateOpNoBias(self): with tf_ops.Graph().as_default(), self.test_session() as sess: random_seed.set_random_seed(200) tensor = array_ops.constant([[1., 2.], [3., 4.]], name='a/b/c') factor = ff.FullyConnectedKroneckerFactor((tensor,)) sess.run(tf_variables.global_variables_initializer()) new_cov = sess.run(factor.make_covariance_update_op(.5)) self.assertAllClose([[3, 3.5], [3.5, 5.5]], new_cov) class ConvInputKroneckerFactorTest(test.TestCase): def testConvInputKroneckerFactorInitNoBias(self): with tf_ops.Graph().as_default(): random_seed.set_random_seed(200) tensor = array_ops.ones((2, 3), name='a/b/c') factor = ff.ConvInputKroneckerFactor( tensor, (1, 2, 3, 4), 3, 2, has_bias=False) self.assertEqual([1 * 2 * 3, 1 * 2 * 3], factor.get_cov().get_shape().as_list()) def testConvInputKroneckerFactorInit(self): with tf_ops.Graph().as_default(): random_seed.set_random_seed(200) tensor = array_ops.ones((2, 3), name='a/b/c') factor = ff.ConvInputKroneckerFactor( tensor, (1, 2, 3, 4), 3, 2, has_bias=True) self.assertEqual([1 * 2 * 3 + 1, 1 * 2 * 3 + 1], factor.get_cov().get_shape().as_list()) def testConvInputKroneckerFactorInitFloat64(self): with tf_ops.Graph().as_default(): dtype = dtypes.float64_ref random_seed.set_random_seed(200) tensor = array_ops.ones((2, 3), dtype=dtype, name='a/b/c') factor = ff.ConvInputKroneckerFactor( tensor, (1, 2, 3, 4), 3, 2, has_bias=True) cov = factor.get_cov() self.assertEqual(cov.dtype, dtype) self.assertEqual([1 * 2 * 3 + 1, 1 * 2 * 3 + 1], cov.get_shape().as_list()) def testMakeCovarianceUpdateOpWithBias(self): with tf_ops.Graph().as_default(), self.test_session() as sess: random_seed.set_random_seed(200) tensor = array_ops.constant( np.arange(1., 17.).reshape(2, 2, 2, 2), dtype=dtypes.float32) factor = ff.ConvInputKroneckerFactor( tensor, (1, 2, 1, 1), [1, 1, 1, 1], 'SAME', has_bias=True) sess.run(tf_variables.global_variables_initializer()) new_cov = sess.run(factor.make_covariance_update_op(.5)) self.assertAllClose([[34.375, 37, 3.125], [37, 41, 3.5], [3.125, 3.5, 1]], new_cov) def testMakeCovarianceUpdateOpNoBias(self): with tf_ops.Graph().as_default(), self.test_session() as sess: random_seed.set_random_seed(200) tensor = array_ops.constant( np.arange(1., 17.).reshape(2, 2, 2, 2), dtype=dtypes.float32) factor = ff.ConvInputKroneckerFactor(tensor, (1, 2, 1, 1), [1, 1, 1, 1], 'SAME') sess.run(tf_variables.global_variables_initializer()) new_cov = sess.run(factor.make_covariance_update_op(.5)) self.assertAllClose([[34.375, 37], [37, 41]], new_cov) class ConvOutputKroneckerFactorTest(test.TestCase): def testConvOutputKroneckerFactorInit(self): with tf_ops.Graph().as_default(): random_seed.set_random_seed(200) tensor = array_ops.ones((2, 3, 4, 5), name='a/b/c') factor = ff.ConvOutputKroneckerFactor((tensor,)) self.assertEqual([5, 5], factor.get_cov().get_shape().as_list()) def testConvOutputKroneckerFactorInitFloat64(self): with tf_ops.Graph().as_default(): dtype = dtypes.float64_ref random_seed.set_random_seed(200) tensor = array_ops.ones((2, 3, 4, 5), dtype=dtype, name='a/b/c') factor = ff.ConvOutputKroneckerFactor((tensor,)) cov = factor.get_cov() self.assertEqual(cov.dtype, dtype) self.assertEqual([5, 5], cov.get_shape().as_list()) def testConvOutputKroneckerFactorInitNotEnoughDims(self): with tf_ops.Graph().as_default(): random_seed.set_random_seed(200) tensor = array_ops.ones((2, 3), name='a/b/c') with self.assertRaises(IndexError): ff.ConvOutputKroneckerFactor(tensor) def testMakeCovarianceUpdateOp(self): with tf_ops.Graph().as_default(), self.test_session() as sess: random_seed.set_random_seed(200) tensor = np.arange(1, 17).reshape(2, 2, 2, 2).astype(np.float32) factor = ff.ConvOutputKroneckerFactor((array_ops.constant(tensor),)) sess.run(tf_variables.global_variables_initializer()) new_cov = sess.run(factor.make_covariance_update_op(.5)) self.assertAllClose([[43, 46.5], [46.5, 51.5]], new_cov) class FullyConnectedMultiKFTest(test.TestCase): def testFullyConnectedMultiKFInit(self): with tf_ops.Graph().as_default(): random_seed.set_random_seed(200) tensor = array_ops.ones((2, 3), name='a/b/c') tensor_list = [tensor] factor = ff.FullyConnectedMultiKF((tensor_list,), has_bias=False) self.assertEqual([3, 3], factor.get_cov().get_shape().as_list()) def testFullyConnectedMultiKFInitFloat64(self): with tf_ops.Graph().as_default(): dtype = dtypes.float64_ref random_seed.set_random_seed(200) tensor = array_ops.ones((2, 3), dtype=dtype, name='a/b/c') tensor_list = [tensor] factor = ff.FullyConnectedMultiKF((tensor_list,), has_bias=False) cov = factor.get_cov() self.assertEqual(cov.dtype, dtype) self.assertEqual([3, 3], cov.get_shape().as_list()) def testMakeCovarianceUpdateOpWithBias(self): with tf_ops.Graph().as_default(), self.test_session() as sess: random_seed.set_random_seed(200) tensor = array_ops.constant([[1., 2.], [3., 4.]], name='a/b/c') tensor_list = [tensor] factor = ff.FullyConnectedMultiKF((tensor_list,), has_bias=True) sess.run(tf_variables.global_variables_initializer()) new_cov = sess.run(factor.make_covariance_update_op(.5)) self.assertAllClose([[3, 3.5, 1], [3.5, 5.5, 1.5], [1, 1.5, 1]], new_cov) def testMakeCovarianceUpdateOpNoBias(self): with tf_ops.Graph().as_default(), self.test_session() as sess: random_seed.set_random_seed(200) tensor = array_ops.constant([[1., 2.], [3., 4.]], name='a/b/c') tensor_list = [tensor] factor = ff.FullyConnectedMultiKF((tensor_list,)) sess.run(tf_variables.global_variables_initializer()) new_cov = sess.run(factor.make_covariance_update_op(.5)) self.assertAllClose([[3, 3.5], [3.5, 5.5]], new_cov) if __name__ == '__main__': test.main() ``` #### File: py2tf/pyct/anno.py ```python from __future__ import absolute_import from __future__ import division from __future__ import print_function def getanno(node, key, field_name='___pyct_anno'): return getattr(node, field_name)[key] def hasanno(node, key, field_name='___pyct_anno'): return hasattr(node, field_name) and key in getattr(node, field_name) def setanno(node, key, value, field_name='___pyct_anno'): annotations = getattr(node, field_name, {}) setattr(node, field_name, annotations) assert not hasanno(node, key, field_name), (node, key) annotations[key] = value # So that the annotations survive gast_to_ast() and ast_to_gast() if field_name not in node._fields: node._fields += (field_name,) ``` #### File: _impl/keras/estimator.py ```python from __future__ import absolute_import from __future__ import division from __future__ import print_function import os from tensorflow.python.client import session from tensorflow.python.estimator import estimator as estimator_lib from tensorflow.python.estimator import export as export_lib from tensorflow.python.estimator import model_fn as model_fn_lib from tensorflow.python.framework import ops from tensorflow.python.framework import random_seed from tensorflow.python.framework import sparse_tensor as sparse_tensor_lib from tensorflow.python.keras._impl.keras import backend as K from tensorflow.python.keras._impl.keras import models from tensorflow.python.keras._impl.keras.utils.generic_utils import CustomObjectScope from tensorflow.python.ops import math_ops from tensorflow.python.ops import metrics as metrics_module from tensorflow.python.platform import tf_logging as logging from tensorflow.python.saved_model import signature_constants from tensorflow.python.training import saver as saver_lib from tensorflow.python.training import training_util _DEFAULT_SERVING_KEY = signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY def _cast_tensor_to_floatx(x): """Cast tensor to keras's floatx dtype if it is not already the same dtype.""" if x.dtype == K.floatx(): return x else: return math_ops.cast(x, K.floatx()) def _create_ordered_io(keras_model, estimator_io_dict, is_input=True): """Create a list of tensors from IO dictionary based on Keras IO order. Args: keras_model: an instance of compiled keras model. estimator_io_dict: features or labels dictionary from model_fn. is_input: True if dictionary is for inputs. Returns: a list of tensors based on Keras IO order. Raises: ValueError: if dictionary keys cannot be found in Keras model input_names or output_names. """ if is_input: keras_io_names = keras_model.input_names else: keras_io_names = keras_model.output_names for key in estimator_io_dict: if key not in keras_io_names: raise ValueError( 'Cannot find %s with name "%s" in Keras Model. It needs to match ' 'one of the following: %s' % ('input' if is_input else 'output', key, ', '.join(keras_io_names))) tensors = [] for io_name in keras_io_names: tensors.append(_cast_tensor_to_floatx(estimator_io_dict[io_name])) return tensors def _clone_and_build_model(mode, keras_model, custom_objects, features=None, labels=None): """Clone and build the given keras_model. Args: mode: training mode. keras_model: an instance of compiled keras model. custom_objects: Dictionary for custom objects. features: labels: Returns: The newly built model. """ # Set to True during training, False for inference. K.set_learning_phase(mode == model_fn_lib.ModeKeys.TRAIN) # Clone keras model. input_tensors = None if features is None else _create_ordered_io( keras_model, features) if custom_objects: with CustomObjectScope(custom_objects): model = models.clone_model(keras_model, input_tensors=input_tensors) else: model = models.clone_model(keras_model, input_tensors=input_tensors) # Compile/Build model if mode is model_fn_lib.ModeKeys.PREDICT and not model.built: model.build() else: optimizer_config = keras_model.optimizer.get_config() optimizer = keras_model.optimizer.__class__.from_config(optimizer_config) optimizer.iterations = training_util.get_or_create_global_step() # Get list of outputs. if labels is None: target_tensors = None elif isinstance(labels, dict): target_tensors = _create_ordered_io(keras_model, labels, is_input=False) else: target_tensors = [ _cast_tensor_to_floatx( sparse_tensor_lib.convert_to_tensor_or_sparse_tensor(labels)) ] model.compile( optimizer, keras_model.loss, metrics=keras_model.metrics, loss_weights=keras_model.loss_weights, sample_weight_mode=keras_model.sample_weight_mode, weighted_metrics=keras_model.weighted_metrics, target_tensors=target_tensors) if isinstance(model, models.Sequential): model = model.model return model def _create_keras_model_fn(keras_model, custom_objects=None): """Creates model_fn for keras Estimator. Args: keras_model: an instance of compiled keras model. custom_objects: Dictionary for custom objects. Returns: The model_fn for a keras Estimator. """ def model_fn(features, labels, mode): """model_fn for keras Estimator.""" model = _clone_and_build_model(mode, keras_model, custom_objects, features, labels) # Get inputs to EstimatorSpec predictions = dict(zip(model.output_names, model.outputs)) loss = None train_op = None eval_metric_ops = None # Set loss and metric only during train and evaluate. if mode is not model_fn_lib.ModeKeys.PREDICT: model._make_train_function() # pylint: disable=protected-access loss = model.total_loss if model.metrics: eval_metric_ops = {} # When each metric maps to an output if isinstance(model.metrics, dict): for i, output_name in enumerate(model.metrics.keys()): metric_name = model.metrics[output_name] if callable(metric_name): metric_name = metric_name.__name__ # When some outputs use the same metric if list(model.metrics.values()).count(metric_name) > 1: metric_name += '_' + output_name eval_metric_ops[metric_name] = metrics_module.mean( model.metrics_tensors[i - len(model.metrics)]) else: for i, metric_name in enumerate(model.metrics): if callable(metric_name): metric_name = metric_name.__name__ eval_metric_ops[metric_name] = metrics_module.mean( model.metrics_tensors[i]) # Set train_op only during train. if mode is model_fn_lib.ModeKeys.TRAIN: train_op = model.train_function.updates_op return model_fn_lib.EstimatorSpec( mode=mode, predictions=predictions, loss=loss, train_op=train_op, eval_metric_ops=eval_metric_ops, export_outputs={ _DEFAULT_SERVING_KEY: export_lib.export_output.PredictOutput(predictions) }) return model_fn def _save_first_checkpoint(keras_model, estimator, custom_objects, keras_weights): """Save first checkpoint for the keras Estimator. Args: keras_model: an instance of compiled keras model. estimator: keras estimator. custom_objects: Dictionary for custom objects. keras_weights: A flat list of Numpy arrays for weights of given keras_model. Returns: The model_fn for a keras Estimator. """ with ops.Graph().as_default() as g, g.device(estimator._device_fn): random_seed.set_random_seed(estimator.config.tf_random_seed) training_util.create_global_step() model = _clone_and_build_model(model_fn_lib.ModeKeys.TRAIN, keras_model, custom_objects) if isinstance(model, models.Sequential): model = model.model # Load weights and save to checkpoint if there is no checkpoint latest_path = saver_lib.latest_checkpoint(estimator.model_dir) if not latest_path: with session.Session() as sess: model.set_weights(keras_weights) # Make update ops and initialize all variables. if not model.train_function: # pylint: disable=protected-access model._make_train_function() K._initialize_variables(sess) # pylint: enable=protected-access saver = saver_lib.Saver() saver.save(sess, os.path.join(estimator.model_dir, 'keras_model.ckpt')) def model_to_estimator(keras_model=None, keras_model_path=None, custom_objects=None, model_dir=None, config=None): """Constructs an `Estimator` instance from given keras model. For usage example, please see @{$programmers_guide/estimators$creating_estimators_from_keras_models}. Args: keras_model: Keras model in memory. keras_model_path: Directory to a keras model on disk. custom_objects: Dictionary for custom objects. model_dir: Directory to save Estimator model parameters, graph and etc. config: Configuration object. Returns: An Estimator from given keras model. Raises: ValueError: if neither keras_model nor keras_model_path was given. ValueError: if both keras_model and keras_model_path was given. ValueError: if the keras_model_path is a GCS URI. ValueError: if keras_model has not been compiled. """ if (not keras_model) and (not keras_model_path): raise ValueError( 'Either keras_model or keras_model_path needs to be provided.') if keras_model and keras_model_path: raise ValueError( 'Please specity either keras_model or keras_model_path but not both.') if not keras_model: if keras_model_path.startswith( 'gs://') or 'storage.googleapis.com' in keras_model_path: raise ValueError( '%s is not a local path. Please copy the model locally first.' % keras_model_path) logging.info('Loading models from %s', keras_model_path) keras_model = models.load_model(keras_model_path) else: logging.info('Using the Keras model from memory.') keras_model = keras_model if not hasattr(keras_model, 'optimizer'): raise ValueError( 'Given keras model has not been compiled yet. Please compile first ' 'before creating the estimator.') keras_weights = keras_model.get_weights() keras_model_fn = _create_keras_model_fn(keras_model, custom_objects) est = estimator_lib.Estimator( keras_model_fn, model_dir=model_dir, config=config) # TODO (yifeif): move checkpoint initialization to scaffold.init_fn id:2332 gh:2333 _save_first_checkpoint(keras_model, est, custom_objects, keras_weights) return est ```
{ "source": "jhabmc/git-gud", "score": 3 }
#### File: src/gitgud/gitgud.py ```python import argparse, pyfiglet, sys class git(object): @staticmethod def parse_args(cmdname): parser = argparse.ArgumentParser(description="Have you been told to 'get %s'? Now you can!" % cmdname) parser.add_argument("name", metavar="NAME", type=str, nargs="?", default=None, help="who is getting %s" % cmdname) parser.add_argument("-s", "--super", action="store_true", default=False, help="get super %s" % cmdname) args = parser.parse_args() return args @staticmethod def fig(text): fig = pyfiglet.Figlet() return fig.renderText(text) @staticmethod def gud(): args = git.parse_args("good") name = args.name or "You" sup = args.super text = "{name} {verb} now {qual} gud!".format(name=name, verb="is" if args.name else "are", qual="super" if sup else "so") if sup: text = git.fig(text) print(text) @staticmethod def rekt(): args = git.parse_args("rekt") name = args.name or "You" sup = args.super text = "{name} got {qual}#rekt!".format(name=name, qual="super " if sup else "") if sup: text = git.fig(text) print(text) @staticmethod def spooked(): args = git.parse_args("spooked") name = args.name or "You" sup = args.super text = "{name} got spooked by a scary skeleton!".format(name=name) if sup: text = git.fig(text) print(text) @staticmethod def job(): args = git.parse_args("job") name = args.name or "You" sup = args.super text = "{name} got a job in gitting #rekt!".format(name=name) if sup: text = git.fig(text) print(text) @staticmethod def money(): args = git.parse_args("money") name = args.name or "You" sup = args.super text = "{name} got money!".format(name=name) if sup: text = git.fig(text) print(text) ```
{ "source": "jhabriel/mixdim-estimates", "score": 3 }
#### File: paper_examples/ex51_validation2D/main.py ```python import numpy as np import porepy as pp import itertools from time import time from model import model #%% Functions def make_constrained_mesh(h=0.1): """ Creates unstructured mesh for a given target mesh size for the case of a single vertical fracture embedded in the domain Parameters ---------- h : float, optional Target mesh size. The default is 0.1. Returns ------- gb : PorePy Object Porepy grid bucket object. """ domain = {"xmin": 0, "xmax": 1, "ymin": 0, "ymax": 1} network_2d = pp.fracture_importer.network_2d_from_csv("network.csv", domain=domain) # Target lengths target_h_bound = h target_h_fract = h mesh_args = {"mesh_size_bound": target_h_bound, "mesh_size_frac": target_h_fract} # Construct grid bucket gb = network_2d.mesh(mesh_args, constraints=[1, 2]) return gb def create_non_matching_gridbucket(h_2d, h_1d, h_mortar): """ Generates a gridbucket containing non-matching grids Parameters ---------- h_2d : Float Mesh size of the higher-dimensional grid h_1d : Float Mesh size of the lower-dimensional grid h_mortar : Float Mesh size of the mortar grid Raises ------ Warning If the subdomain cells are smaller than the mortar cell Returns ------- gb : PorePy object Grid bucket """ # Sanity check if (h_2d > h_mortar) or (h_1d > h_mortar): warning_msg = "Subdomain cell are smaller than mortar cells " warning_msg += "and this may lead to inconsistent results." raise Warning(warning_msg) # NOTE: The easiest way to construct the non-matching gridbucket is to # replace the lower-dimensional grid and the mortar grids into the # higher-dimensional grid # Create a grid bucket using h_2d as target mesh size gb_h = make_constrained_mesh(h_2d) gl_old = gb_h.grids_of_dimension(1)[0] # extract 1d-grid mg_old = gb_h.get_mortar_grids()[0] # extract mortar-grid # Obtain fracture and mortar grids to be replaced into gl_new = make_constrained_mesh(h_1d).grids_of_dimension(1)[0] mg_new = make_constrained_mesh(h_mortar).get_mortar_grids()[0] # Create the mapping dictionaries g_map = {gl_old: gl_new} mg_map = {mg_old: mg_new.side_grids} # Replace grids gb = gb_h.copy() gb.replace_grids(g_map=g_map) gb.replace_grids(mg_map=mg_map) return gb #%% Defining numerical methods, and obtaining grid buckets num_methods = ["TPFA", "MPFA", "RT0", "MVEM"] levels = 5 # coarsening levels coarsening_factor = 2 h_2d_ref = 0.003125 # reference 2D mesh size h_1d_ref = h_2d_ref * 1.5 # reference 1D mesh size h_mortar_ref = h_2d_ref * 2.0 # reference mortar mesh size h_2d = coarsening_factor ** np.arange(levels) * h_2d_ref h_1d = coarsening_factor ** np.arange(levels) * h_1d_ref h_mortar = coarsening_factor ** np.arange(levels) * h_mortar_ref grid_buckets = [] tic = time() print("Assembling non-matching grid buckets...", end="") for counter in range(levels): grid_buckets.append( create_non_matching_gridbucket(h_2d[counter], h_1d[counter], h_mortar[counter]) ) grid_buckets = grid_buckets[::-1] print(f"\u2713 Time {time() - tic}\n") #%% Create dictionary and initialize fields d = {k: {} for k in num_methods} for method in num_methods: d[method] = { "mesh_size": [], "error_estimate_2d": [], "true_error_pressure_2d": [], "true_error_velocity_2d": [], "mesh_size_2d": [], "error_estimate_1d": [], "true_error_pressure_1d": [], "true_error_velocity_1d": [], "mesh_size_1d": [], "error_estimate_mortar": [], "true_error_pressure_mortar": [], "true_error_velocity_mortar": [], "mesh_size_mortar": [], "majorant": [], "true_error_pressure": [], "true_error_velocity": [], "I_eff_pressure": [], "I_eff_velocity": [], "I_eff_combined": [], } #%% Populate fields (NOTE: This loop may take considerable time) for i in itertools.product(num_methods, grid_buckets): # Print info in the console print("Solving with", i[0], "for refinement level", grid_buckets.index(i[1]) + 1) # Get hold of errors tic = time() ( h_max, error_estimate_2d, true_error_pressure_2d, true_error_velocity_2d, mesh_size_2d, error_estimate_1d, true_error_pressure_1d, true_error_velocity_1d, mesh_size_1d, error_estimates_mortar, true_error_pressure_mortar, true_error_velocity_mortar, mesh_size_mortar, majorant, true_error_pressure, true_error_velocity, I_eff_pressure, I_eff_velocity, I_eff_combined, ) = model(i[1], i[0]) print(f"Done. Time {time() - tic}\n") # Store errors in the dictionary d[i[0]]["mesh_size"].append(h_max) d[i[0]]["error_estimate_2d"].append(error_estimate_2d) d[i[0]]["true_error_pressure_2d"].append(true_error_pressure_2d) d[i[0]]["true_error_velocity_2d"].append(true_error_velocity_2d) d[i[0]]["mesh_size_2d"].append(mesh_size_2d) d[i[0]]["error_estimate_1d"].append(error_estimate_1d) d[i[0]]["true_error_pressure_1d"].append(true_error_pressure_1d) d[i[0]]["true_error_velocity_1d"].append(true_error_velocity_1d) d[i[0]]["mesh_size_1d"].append(mesh_size_1d) d[i[0]]["error_estimate_mortar"].append(error_estimates_mortar) d[i[0]]["true_error_pressure_mortar"].append(true_error_pressure_mortar) d[i[0]]["true_error_velocity_mortar"].append(true_error_velocity_mortar) d[i[0]]["mesh_size_mortar"].append(mesh_size_mortar) d[i[0]]["majorant"].append(majorant) d[i[0]]["true_error_pressure"].append(true_error_pressure) d[i[0]]["true_error_velocity"].append(true_error_velocity) d[i[0]]["I_eff_pressure"].append(I_eff_pressure) d[i[0]]["I_eff_velocity"].append(I_eff_velocity) d[i[0]]["I_eff_combined"].append(I_eff_combined) #%% Exporting # Permutations rows = len(num_methods) * len(grid_buckets) # Initialize lists num_method_name = [] diam_2d = [] diam_1d = [] diam_mortar = [] col_2d_estimate = [] col_1d_estimate = [] col_mortar_estimate = [] col_majorant = [] col_true_error_pressure = [] col_true_error_velocity = [] I_eff_pressure = [] I_eff_velocity = [] I_eff_combined = [] # Populate lists for i in itertools.product(num_methods, range(levels)): num_method_name.append(i[0]) diam_2d.append(d[i[0]]["mesh_size_2d"][i[1]]) diam_1d.append(d[i[0]]["mesh_size_1d"][i[1]]) diam_mortar.append(d[i[0]]["mesh_size_mortar"][i[1]]) col_2d_estimate.append(d[i[0]]["error_estimate_2d"][i[1]]) col_1d_estimate.append(d[i[0]]["error_estimate_1d"][i[1]]) col_mortar_estimate.append(d[i[0]]["error_estimate_mortar"][i[1]]) col_majorant.append(d[i[0]]["majorant"][i[1]]) col_true_error_pressure.append(d[i[0]]["true_error_pressure"][i[1]]) col_true_error_velocity.append(d[i[0]]["true_error_velocity"][i[1]]) I_eff_pressure.append(d[i[0]]["I_eff_pressure"][i[1]]) I_eff_velocity.append(d[i[0]]["I_eff_velocity"][i[1]]) I_eff_combined.append(d[i[0]]["I_eff_combined"][i[1]]) # Prepare for exporting export = np.zeros(rows, dtype=[ ('var2', 'U6'), ('var3', float), ('var4', float), ('var5', float), ('var6', float), ('var7', float), ('var8', float), ('var9', float), ('var10', float), ('var11', float), ('var12', float), ('var13', float), ('var14', float) ]) # Declaring column variables export['var2'] = num_method_name export['var3'] = diam_2d export['var4'] = diam_1d export['var5'] = diam_mortar export['var6'] = col_2d_estimate export['var7'] = col_1d_estimate export['var8'] = col_mortar_estimate export['var9'] = col_majorant export['var10'] = col_true_error_pressure export['var11'] = col_true_error_velocity export['var12'] = I_eff_pressure export['var13'] = I_eff_velocity export['var14'] = I_eff_combined # Formatting string fmt = "%6s %2.5f %2.5f %2.5f %2.2e %2.2e " fmt += "%2.2e %2.2e %2.2e %2.2e %2.2f %2.2f %2.2f" # Headers header = "num_method h_2d, h_1d, h_mortar, eta_2d eta_1d eta_mortar " header += "majorant true_error_p true_error_u I_eff_p I_eff_u I_eff_pu" # Writing into txt np.savetxt('validation2d.txt', export, delimiter=',', fmt=fmt, header=header) #%% Exporting to LaTeX # Permutations rows = len(num_methods) * len(grid_buckets) # Initialize lists ampersend = [] for i in range(rows): ampersend.append('&') num_method_name = [] diam_2d = [] diam_1d = [] diam_mortar = [] col_2d_estimate = [] col_1d_estimate = [] col_mortar_estimate = [] col_majorant = [] col_true_error_pressure = [] col_true_error_velocity = [] I_eff_pressure = [] I_eff_velocity = [] I_eff_combined = [] # Populate lists for i in itertools.product(num_methods, range(levels)): num_method_name.append(i[0]) diam_2d.append(d[i[0]]["mesh_size_2d"][i[1]]) diam_1d.append(d[i[0]]["mesh_size_1d"][i[1]]) diam_mortar.append(d[i[0]]["mesh_size_mortar"][i[1]]) col_2d_estimate.append(d[i[0]]["error_estimate_2d"][i[1]]) col_1d_estimate.append(d[i[0]]["error_estimate_1d"][i[1]]) col_mortar_estimate.append(d[i[0]]["error_estimate_mortar"][i[1]]) col_majorant.append(d[i[0]]["majorant"][i[1]]) col_true_error_pressure.append(d[i[0]]["true_error_pressure"][i[1]]) col_true_error_velocity.append(d[i[0]]["true_error_velocity"][i[1]]) I_eff_pressure.append(d[i[0]]["I_eff_pressure"][i[1]]) I_eff_velocity.append(d[i[0]]["I_eff_velocity"][i[1]]) I_eff_combined.append(d[i[0]]["I_eff_combined"][i[1]]) # Prepare for exporting export = np.zeros(rows, dtype=[ ('var2', 'U6'), ('var3', float), ('var4', float), ('var5', float), ('var6', float), ('amp1', 'U6'), ('var7', float), ('amp2', 'U6'), ('var8', float), ('amp3', 'U6'), ('var9', float), ('amp4', 'U6'), ('var10', float), ('amp5', 'U6'), ('var11', float), ('amp6', 'U6'), ('var12', float), ('amp7', 'U6'), ('var13', float), ('amp8', 'U6'), ('var14', float) ]) # Prepare for exporting export['var2'] = num_method_name export['var3'] = diam_2d export['var4'] = diam_1d export['var5'] = diam_mortar export['var6'] = col_2d_estimate export['amp1'] = ampersend export['var7'] = col_1d_estimate export['amp2'] = ampersend export['var8'] = col_mortar_estimate export['amp3'] = ampersend export['var9'] = col_majorant export['amp4'] = ampersend export['var10'] = col_true_error_pressure export['amp5'] = ampersend export['var11'] = col_true_error_velocity export['amp6'] = ampersend export['var12'] = I_eff_pressure export['amp7'] = ampersend export['var13'] = I_eff_velocity export['amp8'] = ampersend export['var14'] = I_eff_combined # Formatting string fmt = "%6s %2.5f %2.5f %2.5f %2.2e %1s %2.2e %1s %2.2e " fmt += "%1s %2.2e %1s %2.2e %1s %2.2e %1s %2.2f %1s %2.2f %1s %2.2f" # Headers header = "num_method h_2d h_1d h_mortar eta_2d & eta_1d & eta_mortar & " header += "majorant & true_error_p & true_error_u & I_eff_p & I_eff_u & I_eff_pu" np.savetxt('validation2d_tex.txt', export, delimiter=',', fmt=fmt, header=header ) ``` #### File: paper_examples/ex52_validation3D/main.py ```python import numpy as np import porepy as pp import itertools from time import time from model import model #%% Functions def make_constrained_mesh(mesh_size=0.2): """ Creates an unstructured 3D mesh for a given target mesh size for the case of a single 2D vertical fracture embedded in a 3D domain Parameters ---------- mesh_size : float, optional Target mesh size. The default is 0.2. Returns ------- gb : PorePy Object Porepy grid bucket object. """ # Load fracture network: Fracture + Ghost Fractures network_3d = pp.fracture_importer.network_3d_from_csv("network.csv") # Create mesh_arg dictionary mesh_args = { "mesh_size_frac": mesh_size, "mesh_size_bound": mesh_size, "mesh_size_min": mesh_size / 10, } # Construct grid bucket ghost_fracs = list(np.arange(1, 25)) # 1 to 24 gb = network_3d.mesh(mesh_args, constraints=ghost_fracs) return gb #%% Defining mesh targets, numerical methods, and dictionary fields mesh_targets = np.array([0.3, 0.15, 0.075, 0.0375]) num_methods = ["TPFA", "MPFA", "RT0", "MVEM"] #%% Obtain grid buckets for each mesh size print("Assembling grid buckets...", end="") tic = time() grid_buckets = [] for h in mesh_targets: grid_buckets.append(make_constrained_mesh(h)) print(f"\u2713 Time {time() - tic}.\n") #%% Create dictionary and initialize fields d = {k: {} for k in num_methods} for method in num_methods: d[method] = { "mesh_size": [], "error_estimate_3d": [], "true_error_pressure_3d": [], "true_error_velocity_3d": [], "num_cells_3d": [], "error_estimate_2d": [], "true_error_pressure_2d": [], "true_error_velocity_2d": [], "num_cells_2d": [], "error_estimate_mortar": [], "true_error_pressure_mortar": [], "true_error_velocity_mortar": [], "num_cells_mortar": [], "majorant": [], "true_error_pressure": [], "true_error_velocity": [], "I_eff_pressure": [], "I_eff_velocity": [], "I_eff_combined": [], } #%% Populate fields (Warning: This loop may take considerable time) for i in itertools.product(num_methods, grid_buckets): # Print info in the console print("Solving with", i[0], "for mesh size:", i[1].diameter()) # Get hold of errors tic = time() ( h_max, error_estimate_3d, true_error_pressure_3d, true_error_velocity_3d, num_cells_3d, error_estimate_2d, true_error_pressure_2d, true_error_velocity_2d, num_cells_2d, error_estimates_mortar, true_error_pressure_mortar, true_error_velocity_mortar, num_cells_mortar, majorant, true_error_pressure, true_error_velocity, I_eff_pressure, I_eff_velocity, I_eff_combined, ) = model(i[1], i[0]) print(f"Done. Time {time() - tic}\n") # Store errors in the dictionary d[i[0]]["mesh_size"].append(h_max) d[i[0]]["error_estimate_3d"].append(error_estimate_3d) d[i[0]]["true_error_pressure_3d"].append(true_error_pressure_3d) d[i[0]]["true_error_velocity_3d"].append(true_error_velocity_3d) d[i[0]]["num_cells_3d"].append(num_cells_3d) d[i[0]]["error_estimate_2d"].append(error_estimate_2d) d[i[0]]["true_error_pressure_2d"].append(true_error_pressure_2d) d[i[0]]["true_error_velocity_2d"].append(true_error_velocity_2d) d[i[0]]["num_cells_2d"].append(num_cells_2d) d[i[0]]["error_estimate_mortar"].append(error_estimates_mortar) d[i[0]]["true_error_pressure_mortar"].append(true_error_pressure_mortar) d[i[0]]["true_error_velocity_mortar"].append(true_error_velocity_mortar) d[i[0]]["num_cells_mortar"].append(num_cells_mortar) d[i[0]]["majorant"].append(majorant) d[i[0]]["true_error_pressure"].append(true_error_pressure) d[i[0]]["true_error_velocity"].append(true_error_velocity) d[i[0]]["I_eff_pressure"].append(I_eff_pressure) d[i[0]]["I_eff_velocity"].append(I_eff_velocity) d[i[0]]["I_eff_combined"].append(I_eff_combined) #%% Exporting # Permutations rows = len(num_methods) * len(mesh_targets) # Initialize lists num_method_name = [] h_max = [] col_3d_estimate = [] col_2d_estimate = [] col_mortar_estimate = [] col_majorant = [] col_true_error_pressure = [] col_true_error_velocity = [] I_eff_pressure = [] I_eff_velocity = [] I_eff_combined = [] # Populate lists for i in itertools.product(num_methods, range(len(grid_buckets))): num_method_name.append(i[0]) h_max.append(d[i[0]]["mesh_size"][i[1]]) col_3d_estimate.append(d[i[0]]["error_estimate_3d"][i[1]]) col_2d_estimate.append(d[i[0]]["error_estimate_2d"][i[1]]) col_mortar_estimate.append(d[i[0]]["error_estimate_mortar"][i[1]]) col_majorant.append(d[i[0]]["majorant"][i[1]]) col_true_error_pressure.append(d[i[0]]["true_error_pressure"][i[1]]) col_true_error_velocity.append(d[i[0]]["true_error_velocity"][i[1]]) I_eff_pressure.append(d[i[0]]["I_eff_pressure"][i[1]]) I_eff_velocity.append(d[i[0]]["I_eff_velocity"][i[1]]) I_eff_combined.append(d[i[0]]["I_eff_combined"][i[1]]) # Prepare for exporting export = np.zeros( rows, dtype=[ ("var2", "U6"), ("var3", float), ("var4", float), ("var5", float), ("var6", float), ("var7", float), ("var8", float), ("var9", float), ("var10", float), ("var11", float), ("var12", float), ], ) # Declare column variables export["var2"] = num_method_name export["var3"] = h_max export["var4"] = col_3d_estimate export["var5"] = col_2d_estimate export["var6"] = col_mortar_estimate export["var7"] = col_majorant export["var8"] = col_true_error_pressure export["var9"] = col_true_error_velocity export["var10"] = I_eff_pressure export["var11"] = I_eff_velocity export["var12"] = I_eff_combined # Formatting string fmt = "%6s %2.3f %2.2e %2.2e %2.2e %2.2e %2.2e %2.2e %2.2f %2.2f %2.2f" # Headers header = "num_method h_max eta_3d eta_2d eta_mortar majorant true_error_p " header += "true_error_u I_eff_p I_eff_u I_eff_pu" # Writing into txt np.savetxt("validation3d.txt", export, delimiter=",", fmt=fmt, header=header) #%% Exporting to LaTeX # Permutations rows = len(num_methods) * len(mesh_targets) # Intialize lists ampersend = [] for i in range(rows): ampersend.append('&') num_method_name = [] h_max = [] col_3d_estimate = [] col_2d_estimate = [] col_mortar_estimate = [] col_majorant = [] col_true_error_pressure = [] col_true_error_velocity = [] I_eff_pressure = [] I_eff_velocity = [] I_eff_combined = [] # Populate lists for i in itertools.product(num_methods, range(len(grid_buckets))): num_method_name.append(i[0]) h_max.append(d[i[0]]["mesh_size"][i[1]]) col_3d_estimate.append(d[i[0]]["error_estimate_3d"][i[1]]) col_2d_estimate.append(d[i[0]]["error_estimate_2d"][i[1]]) col_mortar_estimate.append(d[i[0]]["error_estimate_mortar"][i[1]]) col_majorant.append(d[i[0]]["majorant"][i[1]]) col_true_error_pressure.append(d[i[0]]["true_error_pressure"][i[1]]) col_true_error_velocity.append(d[i[0]]["true_error_velocity"][i[1]]) I_eff_pressure.append(d[i[0]]["I_eff_pressure"][i[1]]) I_eff_velocity.append(d[i[0]]["I_eff_velocity"][i[1]]) I_eff_combined.append(d[i[0]]["I_eff_combined"][i[1]]) exp = np.zeros(rows, dtype=[ ('var2', 'U6'), ('var3', float), ('var4', float), ('amp1', 'U6'), ('var5', float), ('amp2', 'U6'), ('var6', float), ('amp3', 'U6'), ('var7', float), ('amp4', 'U6'), ('var8', float), ('amp5', 'U6'), ('var9', float), ('amp6', 'U6'), ('var10', float), ('amp7', 'U6'), ('var11', float), ('amp8', 'U6'), ('var12', float)] ) # Declare column variables exp['var2'] = num_method_name exp['var3'] = h_max exp['var4'] = col_3d_estimate exp['amp1'] = ampersend exp['var5'] = col_2d_estimate exp['amp2'] = ampersend exp['var6'] = col_mortar_estimate exp['amp3'] = ampersend exp['var7'] = col_majorant exp['amp4'] = ampersend exp['var8'] = col_true_error_pressure exp['amp5'] = ampersend exp['var9'] = col_true_error_velocity exp['amp6'] = ampersend exp['var10'] = I_eff_pressure exp['amp7'] = ampersend exp['var11'] = I_eff_velocity exp['amp8'] = ampersend exp['var12'] = I_eff_combined # Formatting string fmt = "%6s %2.3f %2.2e %1s %2.2e %1s %2.2e %1s %2.2e " fmt += "%1s %2.2e %1s %2.2e %1s %2.2f %1s %2.2f %1s %2.2f" # Headers header = "num_method h_max eta_3d & eta_2d & eta_mortar & majorant " header += "& true_error_p & true_error_u & I_eff_p & I_eff_u & I_eff_pu" # Write into txt np.savetxt('validation3d_tex.txt', exp, delimiter=',', fmt=fmt, header=header, ) ``` #### File: paper_examples/ex61_benchmark2D/create_grid.py ```python import numpy as np import porepy as pp # Disclaimer: Script copied or partially modified from 10.5281/zenodo.3374624 def create_grid(mesh_size, is_coarse, refine_1d, tol): # load the network file_name = "network_split.csv" domain = {"xmin": 0, "xmax": 1, "ymin": 0, "ymax": 1} network = pp.fracture_importer.network_2d_from_csv(file_name, domain=domain) # in the case of coarsened grid consider a first finer grid if is_coarse: if mesh_size == 0.06: mesh_size = 0.6 * 0.06 elif mesh_size == 0.025: mesh_size = 0.7 * 0.025 else: # 0.0125 mesh_size = 0.7 * 0.0125 # create the mesh mesh_kwargs = {"mesh_size_frac": mesh_size, "mesh_size_min": mesh_size / 20} # assign the flag for the low permeable fractures gb = network.mesh(mesh_kwargs) # coarsened the grid if is_coarse: partition = pp.coarsening.create_aggregations(gb) partition = pp.coarsening.reorder_partition(partition) pp.coarsening.generate_coarse_grid(gb, partition) else: partition = None # refine the 1d grids if refine_1d: g_map = {} for g, _ in gb: if g.dim == 1: g_map[g] = pp.refinement.remesh_1d(g, g.num_nodes * 2) # gb = pp.mortars.replace_grids_in_bucket(gb, g_map, {}, tol) gb.replace_grids(g_map, {}, tol) gb.assign_node_ordering() # set the flag _set_flag(gb, tol) return gb, partition def _set_flag(gb, tol): # set the key for the low peremable fractures gb.add_node_props("is_low") gb.add_node_props("frac_num") for g, d in gb: d["is_low"] = False d["frac_num"] = -1 if g.dim == 1: f_0 = (g.nodes[0, :] - 0.05) / (0.2200 - 0.05) - ( g.nodes[1, :] - 0.4160 ) / (0.0624 - 0.4160) if np.sum(np.abs(f_0)) < tol: d["frac_num"] = 0 f_1 = (g.nodes[0, :] - 0.05) / (0.2500 - 0.05) - ( g.nodes[1, :] - 0.2750 ) / (0.1350 - 0.2750) if np.sum(np.abs(f_1)) < tol: d["frac_num"] = 1 f_2 = (g.nodes[0, :] - 0.15) / (0.4500 - 0.15) - ( g.nodes[1, :] - 0.6300 ) / (0.0900 - 0.6300) if np.sum(np.abs(f_2)) < tol: d["frac_num"] = 2 f_3 = (g.nodes[0, :] - 0.15) / (0.4 - 0.15) - (g.nodes[1, :] - 0.9167) / ( 0.5 - 0.9167 ) if np.sum(np.abs(f_3)) < tol: d["frac_num"] = 3 d["is_low"] = True f_4 = (g.nodes[0, :] - 0.65) / (0.849723 - 0.65) - ( g.nodes[1, :] - 0.8333 ) / (0.167625 - 0.8333) if np.sum(np.abs(f_4)) < tol: d["frac_num"] = 4 d["is_low"] = True f_5 = (g.nodes[0, :] - 0.70) / (0.849723 - 0.70) - ( g.nodes[1, :] - 0.2350 ) / (0.167625 - 0.2350) if np.sum(np.abs(f_5)) < tol: d["frac_num"] = 5 f_6 = (g.nodes[0, :] - 0.60) / (0.8500 - 0.60) - ( g.nodes[1, :] - 0.3800 ) / (0.2675 - 0.3800) if np.sum(np.abs(f_6)) < tol: d["frac_num"] = 6 f_7 = (g.nodes[0, :] - 0.35) / (0.8000 - 0.35) - ( g.nodes[1, :] - 0.9714 ) / (0.7143 - 0.9714) if np.sum(np.abs(f_7)) < tol: d["frac_num"] = 7 f_8 = (g.nodes[0, :] - 0.75) / (0.9500 - 0.75) - ( g.nodes[1, :] - 0.9574 ) / (0.8155 - 0.9574) if np.sum(np.abs(f_8)) < tol: d["frac_num"] = 8 f_9 = (g.nodes[0, :] - 0.15) / (0.4000 - 0.15) - ( g.nodes[1, :] - 0.8363 ) / (0.9727 - 0.8363) if np.sum(np.abs(f_9)) < tol: d["frac_num"] = 9 # we set know also the flag for the intersection, we need to go first through the # 0-dim grids and set there the is low and after to the edges gb.add_edge_props("is_low") for _, d in gb.edges(): d["is_low"] = False for e, d in gb.edges(): gl, gh = gb.nodes_of_edge(e) if gl.dim == 0 and gb.node_props(gh, "is_low"): gb.set_node_prop(gl, "is_low", True) # modify the key only for certain fractures for e, d in gb.edges(): gl, gh = gb.nodes_of_edge(e) if gl.dim == 1 and gb.node_props(gl, "is_low"): d["is_low"] = True ``` #### File: paper_examples/ex61_benchmark2D/solvers.py ```python import scipy.sparse as sps import numpy as np import porepy as pp # Disclaimer: Script copied or partially modified from 10.5281/zenodo.3374624 def run_flow(gb, partition, folder): grid_variable = "pressure" flux_variable = "flux" mortar_variable = "mortar_flux" # Identifier of the discretization operator on each grid diffusion_term = "diffusion" # Identifier of the discretization operator between grids coupling_operator_keyword = "coupling_operator" # Loop over the nodes in the GridBucket, define primary variables and discretization schemes for g, d in gb: # retrieve the scheme discr = d["discr"] # Assign primary variables on this grid. It has one degree of freedom per cell. d[pp.PRIMARY_VARIABLES] = {grid_variable: discr["dof"]} # Assign discretization operator for the variable. d[pp.DISCRETIZATION] = {grid_variable: {diffusion_term: discr["scheme"]}} # Loop over the edges in the GridBucket, define primary variables and discretizations for e, d in gb.edges(): # The mortar variable has one degree of freedom per cell in the mortar grid d[pp.PRIMARY_VARIABLES] = {mortar_variable: {"cells": 1}} # edge discretization discr1 = gb.node_props(e[0], pp.DISCRETIZATION)[grid_variable][diffusion_term] discr2 = gb.node_props(e[1], pp.DISCRETIZATION)[grid_variable][diffusion_term] edge_discretization = pp.RobinCoupling("flow", discr1, discr2) # The coupling discretization links an edge discretization with variables # and discretization operators on each neighboring grid d[pp.COUPLING_DISCRETIZATION] = { coupling_operator_keyword: { e[0]: (grid_variable, diffusion_term), e[1]: (grid_variable, diffusion_term), e: (mortar_variable, edge_discretization), } } assembler = pp.Assembler(gb) assembler.discretize() # Assemble the linear system, using the information stored in the GridBucket A, b = assembler.assemble_matrix_rhs() x = sps.linalg.spsolve(A, b) assembler.distribute_variable(x) for g, d in gb: discr = d[pp.DISCRETIZATION][grid_variable][diffusion_term] pressure = discr.extract_pressure(g, d[pp.STATE][grid_variable], d).copy() flux = discr.extract_flux(g, d[pp.STATE][grid_variable], d).copy() d[pp.STATE][grid_variable] = pressure d[pp.STATE][flux_variable] = flux # Uncomment to export to PARAVIEW # _export_flow(gb, partition, folder) def _export_flow(gb, partition, folder): for g, d in gb: d[pp.STATE]["is_low"] = d["is_low"] * np.ones(g.num_cells) d[pp.STATE]["frac_num"] = d["frac_num"] * np.ones(g.num_cells) # in the case of partition for g, d in gb: if g.dim == 2 and partition: g_old, subdiv = partition[g] d[pp.STATE]["pressure"] = d[pp.STATE]["pressure"][subdiv] d[pp.STATE]["is_low"] = d[pp.STATE]["is_low"][subdiv] d[pp.STATE]["frac_num"] = d[pp.STATE]["frac_num"][subdiv] gb.update_nodes({g: g_old}) break save = pp.Exporter(gb, "sol", folder_name=folder, binary=False) save.write_vtk(["pressure", "is_low", "frac_num"]) ``` #### File: src/mdestimates/_class.py ```python from typing import ( Any, Tuple, Dict, Generator, List, Iterable, Callable, Union, TypeVar, Generic, ) import numpy as np import porepy as pp class ErrorEstimate(): """ Parent class for computation of a posteriori error estimates for solutions of the incompressible flow in mixed-dimensional geometries. """ def __init__( self, gb, kw="flow", sd_operator_name="diffusion", p_name="pressure", flux_name="flux", lam_name="mortar_flux", estimates_kw="estimates" ): self.gb = gb self.kw = kw self.sd_operator_name = sd_operator_name self.p_name = p_name self.flux_name = flux_name self.lam_name = lam_name self.estimates_kw = estimates_kw def __str__(self): return "Error estimate object" def __repr__(self): return ( "Error estimate object with atributes: " + "\n" + " Model: " + self.kw + "\n" + " Subdomain operator: " + self.sd_operator_name + "\n" + " Subdomain variable: " + self.p_name + "\n" + " Flux variable: " + self.flux_name + "\n" + " Interface variable: " + self.lam_name + "\n" ) def _init_estimates_data_keyword(self): """ Private method that initializes the keyword [self.estimates_kw] inside the data dictionary for all nodes and edges of the entire grid bucket. Returns ------- None. """ # Loop through all the nodes for g, d in self.gb: d[self.estimates_kw] = {} # And, loop through all the edges for e, d_e in self.gb.edges(): d_e[self.estimates_kw] = {} return None def estimate_error(self): """ Main method to estimate the a posteriori errors. This method relies on other private methods (see below). - GENERAL ALGORITHM OVERVIEW - [1] Flux-related calculations # 1.1 Compute full flux for each node of the grid bucket, and store them in d["estimates"]["full_flux"] # 1.2 Perform reconstruction of the subdomain velocities using RT0 extension of the normal fluxes and store them in d["estimates"]["rec_u"] [2] Pressure-related calculations # 2.1 Reconstruct the pressure. Perform a P1 reconstruction of the subdomain pressures using the inverse of the local pressure gradient. The reconstructed pressure is stored in d['estimates']["rec_p"]. [3] Computation of the upper bounds and norms # 3.1 Compute errors for the entire grid bucket. The errors (squared) are stored element-wise under d[self.estimates_kw]["diffusive_error"] and d[self.estimates_kw]["residual_error"], respectivley. Returns ------- None. """ # Velocity reconstruction methods from mdestimates._velocity_reconstruction import ( compute_full_flux, reconstruct_velocity, ) # Pressure reconstruction methods from mdestimates._pressure_reconstruction import ( reconstruct_pressure, ) # Error evaluation methods from mdestimates._error_evaluation import compute_error_estimates # Populating data dicitionaries with self.estimates_kw self._init_estimates_data_keyword() print("Performing velocity reconstruction...", end="") # 1.1: Compute full flux compute_full_flux(self) # 1.2: Reconstruct velocity reconstruct_velocity(self) print("\u2713") print("Performing pressure reconstruction...", end="") # 2.1: Reconstruct pressure reconstruct_pressure(self) print("\u2713") print("Computing upper bounds...", end="") # 3.1 Evaluate norms and compute upper bounds compute_error_estimates(self) print("\u2713") def transfer_error_to_state(self): """ Transfers the results from d[self.estimates_kw] to d[pp.STATE] for each node and edge of the grid bucket. This method is especially useful for exporting the results to Paraview via pp.Exporter. Raises ------ ValueError If the errors have not been not been computed. Returns ------- None. """ errors = ["diffusive_error", "residual_error"] def transfer(d, error_type): if error_type in d[self.estimates_kw]: d[pp.STATE][error_type] = d[self.estimates_kw][error_type].copy() else: raise ValueError("Estimates must be computed first") # Transfer errors from subdomains for g, d in self.gb: if g.dim == 0: continue for error in errors: transfer(d, error) # Transfer error from interfaces for _, d_e in self.gb.edges(): for error in errors: if error == "diffusive_error": transfer(d_e, error) return None def get_majorant(self): """ Computes the majorant for the whole fracture network. Returns ------- majorant : Scalar Global error estimate. """ subdomain_diffusive_squared = 0 mortar_diffusive_squared = 0 # Errors associated to subdomains for g, d in self.gb: if g.dim != 0: subdomain_diffusive_squared += d[self.estimates_kw]["diffusive_error"].sum() # Errors associated to interfaces for _, d in self.gb.edges(): mortar_diffusive_squared += d[self.estimates_kw]["diffusive_error"].sum() # Obtaining the majorant majorant = np.sqrt(subdomain_diffusive_squared + mortar_diffusive_squared) return majorant def get_scaled_majorant(self): """ Get the permeability-scaled majorant for the whole fracture network. Returns ------- scaled_majorant : Scalar Scaled value of the majorant. """ # Determine the highest permeability in the fracture network scaling_factors = [] for g, d in self.gb: if g.dim != 0: perm = d[pp.PARAMETERS]["flow"]["second_order_tensor"].values perm = perm[0][0] scaling_factors.append(np.max(perm)) for e, d in self.gb.edges(): k_norm = d[pp.PARAMETERS]["flow"]['normal_diffusivity'] scaling_factors.append(np.max(k_norm)) scale_factor = np.max(scaling_factors) # Perform scaling scaled_majorant = scale_factor ** (-0.5) * self.get_majorant() return scaled_majorant def get_local_errors(self, g, d): """ Computes the sum of the scaled local errors of a subdomain or interface Parameters ---------- g : PorePy object Grid (for subdomains) or mortar grid (for interfaces) d : dictionary Data dictionary containing the estimates Raises ------ ValueError If the errors have not been computed. If there are any inconsistency in the grids dimensions Returns ------- local_error : Scalar Local error, i.e, sum of individual (element) squared errors. """ # Boolean variable to check if it is a mortar grid or not is_mortar = issubclass(type(g), pp.MortarGrid) # Check if the errors are stored in the data dictionary if "diffusive_error" not in d[self.estimates_kw]: raise ValueError("Errors must be computed first") # Check dimensions of subdomain and mortar grids if is_mortar and g.dim not in [0, 1, 2]: raise ValueError("Invalid dimension, expected 0D, 1D, or 2D.") elif not is_mortar and g.dim not in [1, 2, 3]: raise ValueError("Invalid dimension, expected 1D, 2D, or 3D") # Summing the errors diffusive_error = d[self.estimates_kw]["diffusive_error"].sum() return np.sqrt(diffusive_error) def get_scaled_local_errors(self, g, d): """ Computes the sum of the scaled local errors of a subdomain or interface Parameters ---------- g : PorePy object Grid (for subdomains) or mortar grid (for interfaces) d : dictionary Data dictionary containing the estimates Raises ------ ValueError If the errors have not been computed. If there are any inconsistency in the grids dimensions Returns ------- local_error : Scalar Local error, i.e, sum of individual (element) squared errors. """ # Boolean variable to check if it is a Mortar grid or not is_mortar = issubclass(type(g), pp.MortarGrid) # Check if the errors are stored in the data dictionary if "diffusive_error" not in d[self.estimates_kw]: raise ValueError("Errors must be computed first") # Check dimensions of subdomain and mortar grids if is_mortar and g.dim not in [0, 1, 2]: raise ValueError("Invalid dimension, expected 0D, 1D, or 2D.") elif not is_mortar and g.dim not in [1, 2, 3]: raise ValueError("Invalid dimension, expected 1D, 2D, or 3D") # Summing the errors if is_mortar: k_perp = d[pp.PARAMETERS][self.kw]['normal_diffusivity'] diffusive_error = np.sum((1 / k_perp) * d[self.estimates_kw]["diffusive_error"]) else: perm = d[pp.PARAMETERS]["flow"]["second_order_tensor"].values perm = perm[0][0] diffusive_error = np.sum((1 / perm) * d[self.estimates_kw]["diffusive_error"]) return np.sqrt(diffusive_error) def print_summary(self, scaled=True): """ Wrapper for printing a summary of the global and local errors for the whole fracture network classified by topological dimension. By default, the scaled version of the errors are printed. Parameters ---------- scaled: Bool Wheter the scaled version of the errors will be printed or not. The default is True. Returns ------- None. """ if scaled: self._print_summary_scaled() else: self._print_summary_original() return None def _print_summary_original(self): """ Prints summary of the global and local errors Returns ------- None. """ # Get hold of max and min dims dim_max = self.gb.dim_max() dim_min = self.gb.dim_min() # Obtain dimensions of subdomains and interfaces dims = np.arange(start=dim_min, stop=dim_max + 1) subdomain_dims = dims[::-1] if dim_min == 0: subdomain_dims = subdomain_dims[:subdomain_dims.size - 1] interface_dims = dims[::-1] # sort interface_dims = interface_dims[1::] # ignore first element # Get scaled majorant and print it majorant = self.get_majorant print("Majorant:", majorant) # Print summary of subdomain errors for dim in subdomain_dims: g_list = self.gb.grids_of_dimension(dim) error = 0 for g in g_list: d = self.gb.node_props(g) error += self.get_local_errors(g, d) print(f'{dim}D Subdomain error: {error}') # Print summary of interface errors for dim in interface_dims: error = 0 for _, d in self.gb.edges(): mg = d['mortar_grid'] if mg.dim == dim: error += self.get_local_errors(mg, d) print(f'{dim}D Interface error: {error}') return None def _print_summary_scaled(self): """ Prints summary of scaled global and local errors Returns ------- None. """ # Get hold of max and min dims dim_max = self.gb.dim_max() dim_min = self.gb.dim_min() # Obtain dimensions of subdomains and interfaces dims = np.arange(start=dim_min, stop=dim_max + 1) subdomain_dims = dims[::-1] if dim_min == 0: subdomain_dims = subdomain_dims[:subdomain_dims.size - 1] interface_dims = dims[::-1] # sort interface_dims = interface_dims[1::] # ignore first element # Get scaled majorant and print it scaled_majorant = self.get_scaled_majorant() print("Scaled majorant:", scaled_majorant) # Print summary of subdomain errors for dim in subdomain_dims: g_list = self.gb.grids_of_dimension(dim) error = 0 for g in g_list: d = self.gb.node_props(g) error += self.get_scaled_local_errors(g, d) print(f'{dim}D Subdomain scaled error: {error}') # Print summary of interface errors for dim in interface_dims: error = 0 for _, d in self.gb.edges(): mg = d['mortar_grid'] if mg.dim == dim: error += self.get_scaled_local_errors(mg, d) print(f'{dim}D Interface scaled error: {error}') return None ```
{ "source": "jhabr/segmentation-metrics", "score": 3 }
#### File: segmentation-metrics/segmentation_metrics/core.py ```python import numpy as np class BinarySegmentationMetrics: """ This class is responsible for calculating simple metrics for one pair of ground truth mask and its predicted mask. :param jaccard_threshold: float Threshold value for the jaccard index. Values below this value will be calculated as 0. TP (true positives): pixels correctly segmented as foreground TN (true negatives): pixels correctly detected as background FP (false positives): pixels falsely segmented as foreground FN (false negatives): pixels falsely detected as background """ def __init__(self, jaccard_threshold: float = 0.0): self.n_mask_pixels = 0 self.n_background_pixels = 0 self.tp = 0 self.tn = 0 self.fp = 0 self.fn = 0 self.jaccard_threshold = jaccard_threshold def calculate(self, mask: np.ndarray, predicted_mask: np.ndarray): """ Calculate pixel-wise tp, tn, fp and fn. :param mask: np.ndarray The ground truth mask. :param predicted_mask: np.ndarray The predicted mask. :return: BinarySegmentationMetrics Update instance of BinarySegmentationMetrics """ assert mask is not None and predicted_mask is not None, "Mask and predicted mask must not be None." self.__calculate_positives_negatives(mask, predicted_mask) def __calculate_positives_negatives(self, mask: np.ndarray, predicted_mask: np.ndarray): assert mask.shape == predicted_mask.shape assert len(mask.shape) == len(predicted_mask.shape) == 3 # assert binary mask assert mask.shape[-1] == 1 and predicted_mask.shape[-1] == 1 # reshape to only 2 dimensions mask = mask.squeeze() predicted_mask = predicted_mask.squeeze() self.n_mask_pixels = np.count_nonzero(mask == 1.0) self.n_background_pixels = np.count_nonzero(mask == 0.0) height, width = mask.shape tp, tn, fp, fn = 0, 0, 0, 0 for i in range(height): for j in range(width): mask_pixel_value = mask[i][j] predicted_mask_pixel_value = predicted_mask[i][j] if mask_pixel_value == predicted_mask_pixel_value: if mask_pixel_value == 1: tp += 1 else: tn += 1 else: if predicted_mask_pixel_value == 0: fn += 1 else: fp += 1 assert tp + tn + fp + fn == height * width, "Sum of all pixels is not equal to the resolutions of the image." self.tp = tp self.tn = tn self.fp = fp self.fn = fn @property def jaccard_similarity_index(self) -> float: denominator = (self.tp + self.fp + self.fn) if denominator == 0: return 0 return self.tp / denominator @property def threshold_jaccard_index(self) -> float: if self.jaccard_similarity_index >= self.jaccard_threshold: return self.jaccard_similarity_index else: return 0.0 @property def dice(self) -> float: denominator = (2 * self.tp + self.fn + self.fp) if denominator == 0: return 0 return (2 * self.tp) / denominator @property def sensitivity(self) -> float: denominator = (self.tp + self.fn) if denominator == 0: return 0 return self.tp / denominator @property def specificity(self) -> float: denominator = (self.tn + self.fp) if denominator == 0: return 0 return self.tn / denominator @property def accuracy(self) -> float: denominator = (self.tp + self.fp + self.tn + self.fn) if denominator == 0: return 0 return (self.tp + self.tn) / denominator ``` #### File: segmentation-metrics/segmentation_metrics/metrics.py ```python from typing import List import numpy as np from segmentation_metrics.core import BinarySegmentationMetrics def calculate(masks: List[np.ndarray], predicted_masks: List[np.ndarray], jaccard_threshold: float = 0.65) -> dict: """ Calculates the metrics. :param masks: list List of masks (ground truth) :param predicted_masks: List of predicted masks :param jaccard_threshold: Threshold Jaccard Index will return 0 for values below the threshold :return: dict Calculate metrics n_true_positives_%: percentage of true positives (out of all true positives) n_true_negatives_%: percentage of true negatives (out of all true negatives) """ assert masks is not None and predicted_masks is not None, "Masks and predicted masks should not be None." metrics = BinarySegmentationMetrics(jaccard_threshold=jaccard_threshold) mask_pixels, background_pixels, tp, tn, fp, fn = [], [], [], [], [], [] threshold_jaccard_indexes, jaccard_similarity_indexes, dice_scores = [], [], [] accuracies, sensitivities, specificities = [], [], [] for i in range(len(masks)): mask = masks[i] predicted_mask = predicted_masks[i] metrics.calculate(mask=mask, predicted_mask=predicted_mask) mask_pixels.append(metrics.n_mask_pixels) background_pixels.append(metrics.n_background_pixels) tp.append(metrics.tp) tn.append(metrics.tn) fp.append(metrics.fp) fn.append(metrics.fn) threshold_jaccard_indexes.append(metrics.threshold_jaccard_index) jaccard_similarity_indexes.append(metrics.jaccard_similarity_index) dice_scores.append(metrics.dice) sensitivities.append(metrics.sensitivity) specificities.append(metrics.specificity) accuracies.append(metrics.accuracy) return { "n_images": len(masks), "n_true_positives": sum(tp), "n_true_positives_%": sum(tp) / sum(mask_pixels), "n_true_negatives": sum(tn), "n_true_negatives_%": sum(tn) / sum(background_pixels), "n_false_positives": sum(fp), "n_false_negatives": sum(fn), "threshold_jaccard_index": np.mean(threshold_jaccard_indexes), "jaccard_similarity_index_(iou_score)": np.mean(jaccard_similarity_indexes), "dice_coefficient": np.mean(dice_scores), "sensitivity": np.mean(sensitivities), "specificity": np.mean(specificities), "accuracy": np.mean(accuracies) } ``` #### File: segmentation-metrics/tests/test_segmentation_metrics.py ```python import unittest import numpy as np import segmentation_metrics as sm class SegmentationMetricsTests(unittest.TestCase): @property def mask(self): mask = np.array( [[1., 1., 1., 1., 1.], [1., 1., 1., 1., 1.], [0., 0., 0., 0., 0.], [0., 0., 0., 0., 0.], [0., 0., 0., 0., 0.]] ) mask = np.expand_dims(mask, axis=2) return mask @property def mask2(self): mask2 = np.array( [[0., 0., 0., 0., 0.], [0., 1., 1., 1., 0.], [0., 1., 1., 1., 0.], [0., 1., 1., 1., 0.], [0., 0., 0., 0., 0.]] ) mask2 = np.expand_dims(mask2, axis=2) return mask2 @property def predicted_mask(self): predicted_mask = np.array( [[1., 1., 1., 1., 1.], # 5 true positives [1., 0., 0., 0., 0.], # 1 true positive, 4 false negatives [0., 0., 0., 0., 0.], # 5 true negatives [0., 0., 0., 0., 0.], # 5 true negatives [1., 1., 1., 0., 0.]] # 3 false positives, 2 true negatives ) predicted_mask = np.expand_dims(predicted_mask, axis=2) return predicted_mask @property def predicted_mask2(self): predicted_mask2 = np.array( [[0., 0., 0., 0., 0.], # 5 true negatives [0., 1., 1., 1., 0.], # 2 true negatives, 3 true positives [0., 1., 1., 1., 0.], # 2 true negatives, 3 true positives [0., 1., 1., 1., 0.], # 2 true negatives, 3 true positives [0., 0., 0., 0., 0.]] # 5 true negatives ) predicted_mask2 = np.expand_dims(predicted_mask2, axis=2) return predicted_mask2 def test_segmentation_metrics(self): metrics = sm.calculate([self.mask], [self.predicted_mask]) self.assertEqual(metrics["n_images"], 1) self.assertEqual(metrics["n_true_positives"], 6) self.assertEqual(metrics["n_true_positives_%"], 0.6) self.assertEqual(metrics["n_true_negatives"], 12) self.assertEqual(metrics["n_true_negatives_%"], 0.8) self.assertEqual(metrics["n_false_positives"], 3) self.assertEqual(metrics["n_false_negatives"], 4) self.assertEqual(metrics["threshold_jaccard_index"], 0.0) self.assertEqual(metrics["jaccard_similarity_index_(iou_score)"], 0.46153846153846156) self.assertEqual(metrics["dice_coefficient"], 0.631578947368421) self.assertEqual(metrics["sensitivity"], 0.6) self.assertEqual(metrics["specificity"], 0.8) self.assertEqual(metrics["accuracy"], 0.72) def test_segmentation_metrics_2(self): metrics = sm.calculate([self.mask2], [self.predicted_mask2]) self.assertEqual(metrics["n_images"], 1) self.assertEqual(metrics["n_true_positives"], 9) self.assertEqual(metrics["n_true_positives_%"], 1.0) self.assertEqual(metrics["n_true_negatives"], 16) self.assertEqual(metrics["n_true_negatives_%"], 1.0) self.assertEqual(metrics["n_false_positives"], 0) self.assertEqual(metrics["n_false_negatives"], 0) self.assertEqual(metrics["threshold_jaccard_index"], 1.0) self.assertEqual(metrics["jaccard_similarity_index_(iou_score)"], 1.0) self.assertEqual(metrics["dice_coefficient"], 1.0) self.assertEqual(metrics["sensitivity"], 1.0) self.assertEqual(metrics["specificity"], 1.0) self.assertEqual(metrics["accuracy"], 1.0) def test_segmentation_metrics_batch(self): masks = [self.mask, self.mask2] predicted_masks = [self.predicted_mask, self.predicted_mask2] metrics = sm.calculate(masks, predicted_masks) self.assertEqual(metrics["n_images"], 2) self.assertEqual(metrics["n_true_positives"], 15) self.assertEqual(metrics["n_true_positives_%"], 0.7894736842105263) self.assertEqual(metrics["n_true_negatives"], 28) self.assertEqual(metrics["n_true_negatives_%"], 0.9032258064516129) self.assertEqual(metrics["n_false_positives"], 3) self.assertEqual(metrics["n_false_negatives"], 4) self.assertEqual(metrics["threshold_jaccard_index"], 0.5) self.assertEqual(metrics["jaccard_similarity_index_(iou_score)"], 0.7307692307692308) self.assertEqual(metrics["dice_coefficient"], 0.8157894736842105) self.assertEqual(metrics["sensitivity"], 0.8) self.assertEqual(metrics["specificity"], 0.9) self.assertEqual(metrics["accuracy"], 0.86) if __name__ == '__main__': unittest.main() ```
{ "source": "jhadenfeldt/peek-a-bot", "score": 3 }
#### File: jhadenfeldt/peek-a-bot/app.py ```python import bot import json import hashlib import requests import os import asyncio import tinys3 from flask import Flask, request, make_response, render_template, send_from_directory, send_file from pyppeteer.launcher import launch from os.path import join, dirname from dotenv import load_dotenv from threading import Thread dotenv_path = join(dirname(__file__), '.env') load_dotenv(dotenv_path) pyBot = bot.Bot() slack = pyBot.client conn = tinys3.Connection(os.environ.get("S3_SECRET"), os.environ.get("S3_KEY"), tls=True, default_bucket='peek-a-bot') app = Flask(__name__, static_url_path='', static_folder='frontend/dist', template_folder='frontend/dist') def start_screenshot_worker(loop): asyncio.set_event_loop(loop) loop.run_forever() worker_loop = asyncio.new_event_loop() worker = Thread(target=start_screenshot_worker, args=(worker_loop,)) worker.start() async def create_screenshot(config, response_url): try: browser = await launch(headless=True, options={"ignoreHTTPSErrors": True}) page = await browser.newPage() await page.setViewport({ "width": config["width"], "height": config["height"] }); await page.goto(config["url"]) # Send the first response to let the user know the screenshot is being generated. # This is done after page.goto() to prevent multiple messages. payload = { "username": "Peek-a-Bot", "text": "Please wait a second while I'm generating the screenshot." } requests.post(response_url, data=json.dumps(payload)) filename = hashlib.sha256((os.environ.get("FILENAME_SALT") + config["url"] + str(config["width"]) + str(config["height"]) + str(config["fullPage"])).encode('utf-8')).hexdigest() filepath = 'data/{url}.jpg'.format(url=filename) await page.screenshot({'path': filepath, 'fullPage': config["fullPage"]}) await browser.close() # Upload the file to S3 and delete the local temp file file = open(filepath, 'rb') response = conn.upload(filepath, file) os.unlink(filepath) # Send the second response with the screenshot to the user payload = { "username": "Peek-a-Bot", "text": "Here you go:", "attachments": [{ "text": config["url"], "image_url": "{S3_INSTANCE}/{filepath}".format(S3_INSTANCE=os.environ.get("S3_INSTANCE"), filepath=filepath) }] } requests.post(response_url, data=json.dumps(payload)) except Exception: send_error(response_url) def send_error(response_url): payload = { "username": "Peek-a-Bot", "attachments": [{ "text": "Sorry, there seems to be an issue with your command.\nPlease make sure to pass the URL and the dimensions in the correct order:\n`/peek slack.com`, `/peek slack.com 1024 768` or `/peek slack.com all`", "color": "#cc0000" }] } requests.post(response_url, data=json.dumps(payload)) def parse_parameters(param_string, response_url): param_array = param_string.split() try: # Add http-protocol in case the user didn't provide one if (not param_array[0].startswith('http://') and not param_array[0].startswith('https://')): param_array[0] = 'http://' + param_array[0] parameters = { "url": param_array[0], "fullPage": False } # Set the correct size parameters depending on the user input parameters["width"] = 1920 if (len(param_array) > 1): if (param_array[1].isdigit()): parameters["width"] = int(param_array[1]) else: if (param_array[1] == "all"): parameters["fullPage"] = True if (len(param_array) > 2): parameters["height"] = int(param_array[2]) else: parameters["height"] = 1080 return parameters except (ValueError,IndexError): send_error(response_url) @app.route("/", methods=["GET"]) def pre_install(): client_id = pyBot.oauth["client_id"] scope = pyBot.oauth["scope"] return render_template("index.html", client_id=client_id, scope=scope) @app.route("/thanks", methods=["GET", "POST"]) def thanks(): code_arg = request.args.get('code') pyBot.auth(code_arg) return render_template("thanks.html") @app.route("/listening", methods=["GET", "POST"]) def hears(): slack_event = json.loads(request.data) if "challenge" in slack_event: return make_response(slack_event["challenge"], 200, {"content_type": "application/json"}) if pyBot.verification != slack_event.get("token"): message = "Invalid Slack verification token: %s \npyBot has: \ %s\n\n" % (slack_event["token"], pyBot.verification) return make_response("[NO EVENT IN SLACK REQUEST]", 404, {"X-Slack-No-Retry": 1}) @app.route("/peek", methods=["GET", "POST"]) def peeks(): if (request.form): response_url = request.form["response_url"] screenshot_parameters = parse_parameters(request.form["text"], response_url) if(screenshot_parameters): worker_loop.call_soon_threadsafe(asyncio.async, create_screenshot(screenshot_parameters, response_url)) return ('', 204) if __name__ == '__main__': app.run(debug=False,ssl_context='adhoc') ```
{ "source": "jhadjar/boutique", "score": 3 }
#### File: jhadjar/boutique/main.py ```python import os import jinja2 import webapp2 __title__ = 'boutique' __version__ = '1.0' __author__ = '<NAME>' __license__ = 'Apache 2.0' __copyright__ = 'Copyright 2016' PAGES = { 'main': 'main.html', 'product': 'product.html', '404': "404.html", } URLS = [ ('/.*', 'main.FrontHandler'), ] # Set execution environment: current directory, static files, etc. cwd = os.getcwd() templates = cwd + '/templates' jinja_env = jinja2.Environment( loader = jinja2.FileSystemLoader(templates), autoescape = True, ) class Handler(webapp2.RequestHandler): """Handler for rendering templates.""" def render(self, template, **data): """ Render `template` populated with `data`. Arguments: template: to render (ex: "page.html") data: key:values to populate template. Output: rendering. """ t = jinja_env.get_template(template) self.response.out.write(t.render(data)) class FrontHandler(Handler): """Home page handler""" def get(self): requested_path = self.request.path.lstrip('/') links = make_links(requested_path.encode('utf-8')) print links data = { 'categories': links, } try: self.render(PAGES['main'], **data) except TypeError: # The folder has no subfolders, it must be a product then. self.render(PAGES['product']) def make_links(directory): """ Return list of tuples [(link, name), ...] Example: 'category1' contains 'subcategory1', 'subcategory2'. This will return the following: [(/category1/subcategory1, subcategory1), (/category1/subcategory2, subcategory2)] It returns an empty string if directory has no subdirectories. """ try: directories = next(os.walk(os.path.join('products', directory)))[1] links = ['/' + os.path.join(directory, d) for d in directories] names = [os.path.basename(link) for link in links] return zip(links, names) if links else None except StopIteration as e: # Quick hack to handle nonexisting categories typed in the address bar. # Calling make_links with an empty string lists links in "products" return make_links('') def handle_404(request, response, exception): t = jinja_env.get_template('404.html') data = { 'exception': exception.status } response.out.write(t.render(data)) response.set_status(404) app = webapp2.WSGIApplication(URLS, debug=True) app.error_handlers[404] = handle_404 ```
{ "source": "jhadjar/dotdict", "score": 3 }
#### File: jhadjar/dotdict/dotdict.py ```python messages = { 'name_squatting': ('"{name}" is a reserved attribute name. ' 'The original dictionary is stored there.'), } class DotDict(object): """Recursively turn a dictionary into a dot accessible object. Before:: >>> dico = {'a': 5, 'b': 7, 'c': {'r': 8, 'd': 6}} >>> param = dico['c']['r'] >>> param 8 After:: >>> example = DotDict({'a': 5, 'b': 7, 'c': {'r': 8, 'd': 6}}) >>> example DotDict({'a': 5, 'b': 7, 'c': {'r': 8, 'd': 6}}) >>> example.a 5 >>> example.b 7 >>> example.c DotDict({'r': 8, 'd': 6}) >>> example.c.r 8 >>> len(example) 3 >>> example2 = DotDict({'d': 6, 'r': 8}) >>> example == example2 True """ def __init__(self, origin): self._origin = origin for key, value in origin.items(): if key == '_origin': msg = messages['name_squatting'] raise ValueError(msg.format(name='_origin')) if isinstance(value, dict): setattr(self, key, self.__class__(value)) else: setattr(self, key, value) def __getitem__(self, key): return getattr(self, key) def __setitem__(self, key, value): setattr(self, key, value) def __iter__(self): for key, value in self.__dict__.items(): if key != "_origin": yield key, value def __len__(self): return len(self._origin) def __eq__(self, other): try: return self._origin == other._origin except AttributeError: return False def __repr__(self): return "{self.__class__.__name__}({self._origin})".format(self=self) ```
{ "source": "jhadjar/gae", "score": 3 }
#### File: jhadjar/gae/hello.py ```python import hmac import os import re import urllib2 import jinja2 import webapp2 from google.appengine.ext import db __title__ = 'gaefun' __version__ = '1.0' __author__ = '<NAME>' __license__ = 'Apache 2.0' __copyright__ = 'Copyright 2016' # Regular expressions for form input validation from CS253. Too limiting though. # TODO: Improve w/ https://www.owasp.org/index.php/Input_Validation_Cheat_Sheet. USER_RE = re.compile(r"^[a-zA-Z0-9_-]{3,20}$") EMAIL_RE = re.compile(r"^[\S]+@[\S]+\.[\S]+$") SECRET = "tl;dr: This is the rythm of the night." PAGES = { 'main': "main.html", 'blog': "blog.html", 'lonepost': "lonepost.html", 'newpost': "newpost.html", 'signup': "signup.html", '404': "404.html", } URLS = [ # Using "lazy handlers" as per: http://webapp2.readthedocs.io/en/latest/ # guide/routing.html#guide-routing-lazy-handlers # This is normally done post handler definitions or it raises a NameError. # I want the URIs and their handlers to be at the top so a reader can have # see the big picture of what URI is handled by which handler as opposed to # having this at the bottom as in the usual way. One inconvenience of this # is that I had to prefix with 'hello.' because I couldn't figure out a way # to make forward declarations not ugly. # TODO: Find a better way to make it look like this ('/', 'MainPage') ('/', 'hello.MainPage'), ('/blog/?', 'hello.BlogHandler'), ('/blog/newpost', 'hello.NewPostHandler'), ('/blog/([0-9]+)', 'hello.PostHandler'), ('/signup', 'hello.SignupHandler'), ] # Set execution environment: current directory, static files, etc. patterns = os.getcwd() + '/patterns' jinja_env = jinja2.Environment( loader = jinja2.FileSystemLoader(patterns), autoescape = True, ) class Handler(webapp2.RequestHandler): """ Handler for rendering templates. """ def render(self, template, data={}): """ Render `template` populated with `data`. Arguments: template: to render (ex: "page.html") data: key:values to populate template. Output: rendering. """ t = jinja_env.get_template(template) self.response.headers.add_header( 'Cache-Control', 'public; max-age=6000', ) self.response.out.write(t.render(data)) def grab(self, *args): """ Grab request parameters and make a dictionary out of them. Why: - Usual way is to do this whenever we need to get params: username = self.request.get('username') password = self.request.get('password') - I want to make it simpler: data = grab('username', 'password') Arguments: parameter names Output: """ return {arg : self.request.get(arg, '') for arg in iter(args)} class MainPage(Handler): """ Home page handler """ def get(self): self.render(PAGES['main']) class PostHandler(Handler): def get(self, article_id): """ Display a single article. arguments: article_id: the post id in the datastore output: render single article """ data = { 'article': Article.get_by_id(int(article_id)), } self.render(PAGES['lonepost'], data) class BlogHandler(Handler): """ Display the blog in its entirety. """ def get(self): data = { 'articles': self.fetch_articles(), } # Forgive me Lord for what I am about to write.. self.response.headers.add_header( 'Set-Cookie', 'visits={}'.format(self.count_visits()) ) self.render(PAGES['blog'], data) def fetch_articles(self): return db.GqlQuery('SELECT * FROM Article ORDER BY created DESC') def parse_cookie(self): visits = 0 try: value, value_hash = self.request.cookies.get('visits').split('|') good_cookie = self.check_secure_val(value, value_hash) except ValueError: return 1 finally: visits += 1 return visits def count_visits(self): """ cookie format: visits=9|00df17ab7a4e013ea9811e0b9e2436b8 The number of visits before the pipe. The hash after the pipe. """ try: value, value_hash = self.request.cookies.get('visits').split('|') except ValueError: return 1 # cookie_value = self.request.cookies.get('visits', '0') def make_secure_val(self, value): return hmac.new(SECRET, value).hexdigest() def check_secure_val(self, value, value_hash): """ For a given pair value, hash provided by the user, check that the computed hash of this value correspond to the user provided hash. Arguments: - value: the value we're interested in. - value_hash: the hash of the value. Output: - Boolean of comparison between computed and provided hash. """ # Original is check_secure_val(h) and then splitting at the pipe '|' # I think it's better to avoid assumptions about format and stick to # what we know won't change: we're comparing two values. # I was going to do: # return (SECRET + value).hexdigest() == value_hash # But according to https://docs.python.org/2/library/hmac.html: # Warning: When comparing the output of hexdigest() to an # externally-supplied digest during a verification routine, it is # recommended to use the compare_digest() function instead of # the == operator to reduce the vulnerability to timing attacks. return hmac.compare_digest(value, value_hash) class SignupHandler(Handler): """ Handler for user signup/registration. """ def get(self): """ Render signup page. """ self.render(PAGES['signup']) def post(self): """ Handle signup form. """ data = self.grab( 'username', 'password', # 'verify', 'email', ) def verify(self, data): pass # TODO: Signup verifications not yet complete. # Toy a bit with that and App Engine's "users". class User(db.Model): username = db.StringProperty(required=True) password = db.StringProperty(required=True) email = db.EmailProperty() class NewPostHandler(Handler): """ Handler for new blog post submission page. Once the post is submitted, redirects to its permalink. """ def get(self): self.render(PAGES['newpost']) def post(self): data = self.grab('title', 'body') if len(data) != 2: data['error'] = 'Title *and* body' self.render(PAGES['newpost'], data) a = Article( title=data['title'], body=data['body'], ) a.put() permalink = str(a.key().id()) self.redirect('/blog/{}'.format(permalink)) class Article(db.Model): title = db.StringProperty(required=True) body = db.TextProperty(required=True) created = db.DateTimeProperty(auto_now_add=True) app = webapp2.WSGIApplication( URLS, debug=True, config={ 'hash_secret': SECRET, }) ```
{ "source": "JHadley1406/Deployer", "score": 2 }
#### File: JHadley1406/Deployer/uwsgi_deploy.py ```python import os def uwsgi_deploy(project, project_directory, uwsgi_dir): uwsgi_file_data = "[uwsgi]\n" \ "uid = www-data\n" \ "gid = www-data\n" \ "pythonpath = /usr/local/lib/python2.7/dist-packages\n" \ "pythonpath = /usr/lib/python2.7\n" \ "project = {0}\n" \ "base = {1}\n\n" \ "chdir = %(base)\n" \ "home = %(base)\n" \ "wsgi-file = %(base)/%(project)/wsgi.py\n\n" \ "master true\n" \ "processes = 2\n" \ "socket = %(base)/%(project).sock\n" \ "chmod-socket = 664\nvacuum = true\n" \ "no-site = true\n" \ "logto = /tmp/%(project).uwsgi" if not os.path.exists(uwsgi_dir): return "Could not find uwsgi directory, is uwsgi installed?\n" uwsgi_sites_dir = uwsgi_dir+'sites/' os.makedirs(uwsgi_sites_dir, exist_ok=True) uwsgi_file_name = uwsgi_sites_dir+'{0}.ini'.format(project) if os.path.isfile(uwsgi_file_name): return "Uwsgi config file {0} already exists.\n".format(uwsgi_file_name) uwsgi_file = open(uwsgi_file_name, 'w') uwsgi_file.write(uwsgi_file_data.format(project, project_directory)) uwsgi_file.close() return "Uwsgi configured for {0}\n".format(project) ```
{ "source": "jhaenchen/comment_roulette", "score": 3 }
#### File: jhaenchen/comment_roulette/commentroulette.py ```python import praw import random from threading import Thread import time import requests r = praw.Reddit('comment_roulette' 'Url:http://imtoopoorforaurl.com') r.login() def findNegCommentsAndDelete(): while(1): comments = r.user.get_comments('new') for comment in comments: if(comment.score < 0): comment.delete() time.sleep(500) thread = Thread(target = findNegCommentsAndDelete) thread.start() appendPhrase = '\n\n --------\n^[Huh?](https://www.reddit.com/r/comment_roulette/wiki/index) ^(I delete negative comments.)' while True: try: print "checking...\n" #Check my messages for message in r.get_unread(unset_has_mail=True, update_user=True): if("/u/comment_roulette" in message.body.lower()): print "Got new message!" parent = r.get_info(thing_id=message.parent_id) file = open("responses.txt", 'r+') responseOptions = file.read().splitlines() message.reply(responseOptions[random.randrange(0,len(responseOptions))]+appendPhrase) if(parent.author.name == "agreeswithmebot"): quote = 'Uh oh, we got ourselves a smart guy here! Try again :)' newComment = message.reply(quote) user = message.author r.send_message(user.name, 'AgreesWithMeBot', 'Hey, you seem pretty clever. Maybe contribute to our [github](https://github.com/jhaenchen/agreeswithmebot)?') elif(isinstance(parent, praw.objects.Comment)): messageText = parent.body messageText = messageText.replace("/u/comment_roulette","") messageText = messageText.replace("\n","\\n") file.write(messageText + "\\n\\n -/u/" + message.author.name + "\n") file.flush() file.close() message.mark_as_read() print "sleeping..." time.sleep(15) except requests.exceptions.ReadTimeout: print "Read timeout. Will try again." except praw.errors.Forbidden: print "Im banned from there." user = message.author message.mark_as_read() r.send_message(user.name, 'comment_roulette', 'Hey, I\'m banned from \\r\\'+message.subreddit.display_name+'. Sorry.') except praw.errors.HTTPException as e: pprint(vars(e)) print(e) print "Http exception. Will try again." except praw.errors.RateLimitExceeded as error: print '\tSleeping for %d seconds' % error.sleep_time time.sleep(error.sleep_time) except requests.exceptions.ConnectionError: print "ConnectionError. Will try again." except praw.errors.APIException: print "API exception. Will try again." except (KeyboardInterrupt, SystemExit): print "Safe exit..." raise except: print "Unhandled exception, bail!" r.send_message('therealjakeh', 'comment_roulette', 'Just went down! Help! Exception: ') raise ```
{ "source": "Jhagrut/Sports-Bot", "score": 3 }
#### File: Code/No Longer in Use/getTweets.py ```python import twint import nest_asyncio import os import pandas as pd nest_asyncio.apply() def getTweets(): file = open('accountList.txt') text = file.readlines() file.close() userids = [userid.rstrip('\n') for userid in text] os.chdir(os.getcwd() + '\\TweetData') for usernames in userids: c = twint.Config() c.Username = usernames c.Limit = 100 c.Store_csv = True c.Output = usernames + ".csv" c.Hide_output=True twint.run.Search(c) os.chdir(os.getcwd()[:-10]) dataList = [pd.read_csv(os.getcwd() + '\\TweetData\\' + files) for files in os.listdir(os.getcwd() + '\\TweetData')] data = pd.read_csv('merged.csv') for i in range(len(dataList)): data = data.append(dataList[i]) data = data.drop_duplicates() data.to_csv('merged.csv', index=False) ``` #### File: Scraping/Code/scraping_tools.py ```python from pydrive.auth import GoogleAuth from pydrive.drive import GoogleDrive import requests import time # for twint import twint import nest_asyncio def download_file_from_google_drive(id, destination): URL = "https://docs.google.com/uc?export=download" session = requests.Session() response = session.get(URL, params = { 'id' : id }, stream = True) token = get_confirm_token(response) if token: params = { 'id' : id, 'confirm' : token } response = session.get(URL, params = params, stream = True) save_response_content(response, destination) def get_confirm_token(response): for key, value in response.cookies.items(): if key.startswith('download_warning'): return value return None def save_response_content(response, destination): CHUNK_SIZE = 32768 with open(destination, "wb") as f: for chunk in response.iter_content(CHUNK_SIZE): if chunk: # filter out keep-alive new chunks f.write(chunk) def download_files(): with open('download_ids_and_locations.csv') as file: ids_and_locations = [line.rstrip('\n').split(',') for line in file.readlines()] for i in range(len(ids_and_locations)): file_id = ids_and_locations[i][0] destination = ids_and_locations[i][1] download_file_from_google_drive(file_id, destination) def upload_files(): gauth = GoogleAuth() drive = GoogleDrive(gauth) with open('upload_ids_and_locations.csv') as file: ids_and_locations = [line.rstrip('\n').split(',') for line in file.readlines()] for i in range(len(ids_and_locations)): gfile = drive.CreateFile({'parents': [{'id': ids_and_locations[i][0]}], 'id': ids_and_locations[i][1]}) filename = ids_and_locations[i][2].split('/') filename = filename[len(filename)-1] gfile.SetContentFile(filename) gfile.Upload() time.sleep(5) def scrape_twitter(): nest_asyncio.apply() file = open('accountList.txt') text = file.readlines() file.close() userids = [userid.strip('\n') for userid in text] broken_ids = list() count=0 while count < len(userids) - 1: if count % 250 == 0: print(count, 'usernames reached.') try: c = twint.Config() c.Username = userids[count] c.Limit = 100 c.Store_csv = True c.Output = 'TweetData/' + userids[count] + ".csv" c.Hide_output = True twint.run.Search(c) del c time.sleep(15) count+=1 except ValueError: broken_ids.append(userids[count]) count+=1 ```
{ "source": "jhahitesh/myroductivitytool", "score": 2 }
#### File: myproductivitytool/common/services.py ```python import re import os import json import mimetypes import base64 import requests from myproductivitytool.common.models import * from myproductivitytool.common.serializers import * from myproductivitytool.common.utils import CommonUtils from django.utils import timezone from datetime import datetime, timedelta, date from django.conf.urls.static import static from django.http import HttpResponse from django.conf import settings from urllib.parse import urlparse from django.core.paginator import Paginator class ModelService(object): ALLOWED_ACTIONS_FOR_GET = [ 'view', 'count', 'get_context', 'view_multiple', 'get_filter_context' ] ALLOWED_ACTIONS_FOR_POST = [ 'add', 'update', 'delete' ] # It should never be used direclty from view, only service methods can use it. @classmethod def base_filter_service(cls, **kwargs): try: filter_query = kwargs.get('filter_query', dict()) exclude_query = kwargs.get('exclude_query', None) results = cls.entity.objects.filter(**filter_query) if exclude_query: results = results.exclude(**exclude_query) return results except Exception as e: print(e) return list() @classmethod def view_multiple(cls, **kwargs): try: results = cls.base_filter_service(**kwargs) page_query = kwargs.get('page_query', dict()) if not page_query: return {'success': True, 'results': cls.entity_serializer(results, many=True).data} service_response = cls.get_page_data(**{'page_query': page_query, 'results': results}) if not service_response.get('success', False): return service_response else: results = cls.entity_serializer(service_response.get('results'), many=True).data return service_response.update({ 'results': results }) except Exception as e: print(e) return {'success': False, 'message': 'We could not fetch {0}'.format(cls.entity_name)} @classmethod def view(cls ,**kwargs): try: results = cls.base_filter_service(**kwargs) if results: return {'success': True, 'result': cls.entity_serializer(results[0]).data} else: return {'success': False, 'message': 'We could not find the {0}'.format(cls.entity_name)} except Exception as e: print(e) return {'success': False, 'message': 'We could not fetch {0}'.format(cls.entity_name)} @classmethod def count(cls, **kwargs): try: count = cls.base_filter_service(**kwargs).count() return {'success': True, 'count': count} except Exception as e: print(e) return {'success': False, 'message': 'We could not count the number of {0}'.format(cls.entity_name)} @classmethod def get_context(cls, **kwargs): try: context = dict() instance_id = kwargs.get('instance_id', None) if instance_id: instance = cls.entity.objects.get(id=instance_id) instance = cls.entity_serializer(instance).data context.update({ 'instance': instance }) return {'success': True, 'context': context} except Exception as e: print(e) return {'success': False, 'message': 'We could not fetch context for {0}'.format(cls.entity_name)} @classmethod def get_filter_context(cls, **kwargs): try: context = dict() return {'success': True, 'context': context} except Exception as e: print(e) return {'success': False, 'message': 'We could not fetch filter context for {0}'.format(cls.entity_name)} @classmethod def construct_filter_query(cls, **kwargs): try: filter_query = dict() if not hasattr(cls, 'entity_field_to_query_map'): return {'success': True, 'filter_query': filter_query} for key, value in kwargs.items(): if not value: continue if cls.entity_field_to_query_map.get(key, None): query_data = cls.entity_field_to_query_map.get(key) if query_data.get('data_type') == 'boolean': if value == 'yes': value = True else: value = False elif query_data.get('data_type') == 'list': value = value.strip('[').strip(']').split(',') filter_query.update({query_data.get('field'): value}) return {'success': True, 'filter_query': filter_query} except Exception as e: print(e) return {'success': False, 'message': 'We could not filter {0}'.format(cls.entity_name)} @classmethod def construct_page_query(cls, **kwargs): try: page_query = dict() page_no = kwargs.get('pageNo', None) page_items = kwargs.get('pageItems', None) if page_no and page_items: page_query.update({ 'page_no': int(page_no), 'page_items': int(page_items) }) return {'success': True, 'page_query': page_query} except Exception as e: print(e) return {'success': False, 'message': 'We could not fetch {0}'.format(cls.entity_name)} @classmethod def validate_entity_identifiers(cls, **kwargs): try: error_messages = list() success = True instance = kwargs.get('instance') entity_data = kwargs.get('entity_data', dict()) if not cls.entity_identifiers: return {'success': True, 'message': '{0} validated'} for key, value in cls.entity_identifiers.items(): if entity_data[key] != getattr(instance, key): error_messages.append('{0} can not be updated for existing {1}'.format(value, cls.entity_name)) if error_messages: success = False return {'success': success, 'messages': error_messages} except Exception as e: print(e) return {'success': False, 'messages': ['We could not validate the entity identifiers']} @classmethod def get_page_data(cls, **kwargs): try: results = kwargs.get('results', list()) page_query = kwargs.get('page_query', dict()) page_no = int(page_query.get('pageNo', 1)) if not page_query.get('pageItems', None): return {'success': False, 'message': 'Number of items per page is missing'} page_items = int(page_query.get('pageItems')) paginator = Paginator(results, page_items) total_pages = paginator.num_pages page = paginator.page(page_no) page_data = page.object_list return {'success': True, 'results': page_data, 'total_pages': total_pages, 'page_no': page_no} except Exception as e: print(e) return {'success': False, 'message': 'We could not paginate {0} data'.format(cls.entity_name)} class BaseModelService(ModelService): ALLOWED_ACTIONS_FOR_GET = [ 'view', 'get_context', 'count_active', 'view_active_multiple', 'count_deleted', 'view_deleted_multiple', 'get_filter_context' ] ALLOWED_ACTIONS_FOR_POST = [ 'add', 'update', 'delete' ] entity_field_to_query_map = { 'isDeleted': {'field': 'is_deleted', 'data_type': 'boolean'} } @classmethod def count_active(cls, **kwargs): filter_query = kwargs.get('filter_query', dict()) filter_query.update({ 'is_deleted': False }) kwargs.update({ 'filter_query': filter_query }) return cls.count(**kwargs) @classmethod def view_active_multiple(cls, **kwargs): filter_query = kwargs.get('filter_query', dict()) filter_query.update({ 'is_deleted': False }) kwargs.update({ 'filter_query': filter_query }) return cls.view_multiple(**kwargs) @classmethod def count_deleted(cls, **kwargs): filter_query = kwargs.get('filter_query', dict()) filter_query.update({ 'is_deleted': True }) kwargs.update({ 'filter_query': filter_query }) return cls.count(**kwargs) @classmethod def view_deleted_multiple(cls, **kwargs): filter_query = kwargs.get('filter_query', dict()) filter_query.update({ 'is_deleted': True }) kwargs.update({ 'filter_query': filter_query }) return cls.view_multiple(**kwargs) @classmethod def is_deletable(cls, **kwargs): try: instance = kwargs.get('instance') # Do something return {'success': True, 'message': 'This {0} can be deleted'.format(cls.entity_name)} except Exception as e: print(e) return {'success': False, 'message': 'This {0} can not be deleted'.format(cls.entity_name)} @classmethod def delete(cls,**kwargs): try: print(kwargs) requestor = kwargs.get('requestor') instance_id = kwargs.get('instance_id') if not cls.entity.objects.filter(id=instance_id).exists(): return {'success': False, 'message': 'We could not find the {0} you are trying to delete'.format(cls.entity_name)} instance = cls.entity.objects.get(id=instance_id) validation_data = cls.is_deletable(**{'instance': instance}) if not validation_data.get('success'): return validation_data instance.is_deleted = True instance.deleted_on = timezone.now() instance.deleted_by = requestor instance.save() return {'success': True, 'message': '{0} deleted successfully'.format(cls.entity_name)} except Exception as e: print(e) return {'success': False, 'message': 'We could not delete the {0}'.format(cls.entity_name)} ``` #### File: myproductivitytool/common/utils.py ```python import base64 import time import requests import json import re import pytz import pprint from datetime import datetime from django.utils import timezone from urllib.parse import urlparse, urlencode, urlunparse, parse_qs from django.conf import settings from django.core.paginator import Paginator from django.utils.crypto import get_random_string from myproductivitytool.common.models import * class CommonUtils(object): @staticmethod def pprint(data, indent=4): pp = pprint.PrettyPrinter(indent=indent) pp.pprint(data) @staticmethod def get_time_data_from_seconds(total_seconds): try: seconds = total_seconds%60 total_minutes = int(total_seconds/60) minutes = total_minutes%60 total_hours = int(total_minutes/60) hours = total_hours%24 days = int(total_hours/24) return { 'seconds': seconds, 'minutes': minutes, 'hours': hours, 'days': days } except Exception as e: print(e) return None @staticmethod def validate_float_with_precision(number, precison): try: number = str(number) number_split = number.split('.') if len(number_split) > 1 and len(number_split[1]) > precison: return {'success': False} number = float(number) if number <= 0: return {'success': False} return {'success': True, 'number': number} except Exception as e: print(e) return {'success': False} ``` #### File: myproductivitytool/project/services.py ```python from myproductivitytool.common.services import * from myproductivitytool.project.models import * from myproductivitytool.project.serializers import * from django.db.models.functions import Concat from django.db.models import F, Value, CharField class BaseProjectEntityService(ModelService): entity = BaseProjectEntity entity_name = 'Base Project Entity' entity_serializer = BaseProjectEntitySerializer class TaskService(BaseModelService): entity = Task entity_name = 'Project Task' entity_serializer = TaskSerializer @classmethod def get_context(cls, **kwargs): try: context = dict() instance_id = kwargs.get('instance_id', None) projects = list(Project.objects.filter(is_deleted=False).annotate(key=F('id'), value=F('id'), text=F('name')).values('key','value', 'text')) context.update({ 'projects': projects }) if instance_id: instance = cls.entity.objects.get(id=instance_id) instance = cls.entity_serializer(instance).data context.update({ 'instance': instance }) print(context) return {'success': True, 'context': context} except Exception as e: print(e) return {'success': False, 'message': 'We could not fetch context for {0}'.format(cls.entity_name)} @classmethod def generate_task_number(cls, **kwargs): try: return {'success': True, 'task_number':Task.objects.count()+1} except Exception as e: print(e) return {'success': False, 'message': 'We could not generate task number'} class ProjectService(BaseModelService): entity = Project entity_name = 'Project' entity_serializer = ProjectSerializer @classmethod def delete(cls,**kwargs): try: requestor = kwargs.get('requestor') instance_id = kwargs.get('instance_id') if not cls.entity.objects.filter(id=instance_id).exists(): return {'success': False, 'message': 'We could not find the {0} you are trying to delete'.format(cls.entity_name)} instance = cls.entity.objects.get(id=instance_id) validation_data = cls.is_deletable(**{'instance': instance}) if not validation_data.get('success'): return validation_data # remove project from attached tasks Task.objects.filter(project=instance).update(project=None) instance.is_deleted = True instance.deleted_on = timezone.now() instance.deleted_by = requestor instance.save() return {'success': True, 'message': '{0} deleted successfully'.format(cls.entity_name)} except Exception as e: print(e) return {'success': False, 'message': 'We could not delete the {0}'.format(cls.entity_name)} class TaskCommentService(BaseModelService): entity = TaskComment entity_name = 'Task Comment' entity_serializer = TaskCommentSerializer class TaskCommentAttachmentService(BaseModelService): entity = TaskCommentAttachment entity_name = 'Task Comment Attachment' entity_serializer = TaskCommentAttachmentSerializer ``` #### File: myproductivitytool/project/views.py ```python from myproductivitytool.common.utils import * from myproductivitytool.common.models import * from myproductivitytool.common.responses import * from myproductivitytool.common.serializers import * from myproductivitytool.common.services import * from myproductivitytool.project.utils import * from myproductivitytool.project.models import * from myproductivitytool.project.serializers import * from myproductivitytool.project.services import * from django.conf import settings from django.shortcuts import render from rest_framework.views import APIView from rest_framework import authentication from django.contrib.auth.models import User from rest_framework import viewsets, mixins from rest_framework.response import Response from rest_framework_jwt.settings import api_settings from django.views.decorators.csrf import csrf_exempt from django.utils.decorators import method_decorator from django.views.generic import View, FormView, TemplateView class Statistics(APIView): def get(self, request, *args, **kwargs): try: statistics_data = { 'projects': ProjectService.count_active(**{'filter_query':{'created_by': request.user}}).get('count'), 'tasks': TaskService.count_active(**{'filter_query':{'created_by': request.user}}).get('count'), } return success(statistics_data) except Exception as e: print(e) return exception(message='We could not load Statistics for the dashboard') class Projects(APIView): SERVICE_OWNER = ProjectService def get(self, request, pid=None, action=None, *args, **kwargs): try: employee = request.user data = request.GET.dict() if action not in self.SERVICE_OWNER.ALLOWED_ACTIONS_FOR_GET: return bad_request(message='The requested action is not available on given resource') if action == 'get_context': service_keywords = dict() if pid: service_keywords.update({'instance_id': pid}) else: service_response = self.SERVICE_OWNER.construct_filter_query(**data) if not service_response.get('success'): return exception(message=service_response.get('message')) filter_query = service_response.get('filter_query') service_response = self.SERVICE_OWNER.construct_page_query(**data) if not service_response.get('success'): return exception(message=service_response.get('message')) page_query = service_response.get('page_query') if pid: filter_query.update({'id': pid}) # to make sure only user created data is sent filter_query.update({ 'created_by': request.user }) service_keywords = { 'page_query': page_query, 'filter_query': filter_query } service_response = getattr(self.SERVICE_OWNER, action)(**service_keywords) if not service_response.get('success'): return exception(message=service_response.get('message')) return Response(service_response) except Exception as e: print(e) return exception(message='We could not perform the requested action on the given resource') def post(self, request, pid=None, action=None, *args, **kwargs): try: employee = request.user data = request.POST.dict() if action not in self.SERVICE_OWNER.ALLOWED_ACTIONS_FOR_POST: return bad_request(message='The requested action is not available on given resource') # For delete Action if action in ['delete']: service_response = getattr(self.SERVICE_OWNER, action)(**{ 'instance_id': pid }) message = service_response.get('message') if service_response.get('success'): return success(service_response) else: return exception(message=message) # For Add Update Action if action == 'update' and not pid: return bad_request(message='We could not find the project for updation') if action == 'add': pid = None entity_data = dict() # Validate always mendatory fields mandatory_fields = [ ('name', 'Please provide a name'), ('startDate', 'Please provide a start date'), ('endDate', 'Please provide a end date'), ('description', 'Please provide a description'), ('status', 'Please provide a status for this project'), ] for field in mandatory_fields: value = data.get(field[0], None) if not value or not len(value.strip()): return bad_request(message=field[1]) start_date = datetime.strptime(data.get('startDate'), settings.DATE_FORMAT) end_date = datetime.strptime(data.get('endDate'), settings.DATE_FORMAT) if start_date > end_date: return exception(message='Start date can not be greater than end date') status = data.get('status') if status not in ['UPC', 'ONG', 'CMP']: return bad_request(message='The status selected is invalid') # Fetch always mendatory fields entity_data.update({ 'status': status, 'end_date': end_date, 'name': data.pop('name'), 'start_date': start_date, 'last_modified_by': employee, 'description': data.pop('description') }) overlapping_projects = Project.objects.filter(name=entity_data.get('name'), is_deleted=False) if pid: if overlapping_projects.exclude(id=pid).exists(): return exception(message='A project with same name already exists') instance = Project.objects.get(id=pid) if keep_previous_avatar == 'yes': entity_data.update({'avatar': instance.avatar}) for field_name, field_value in entity_data.items(): setattr(instance, field_name, field_value) instance.save() final_message = 'Project updated successfully' else: if overlapping_projects.exists(): return exception(message='A project with same name already exists') entity_data.update({'created_by': employee}) instance = Project.objects.create(**entity_data) final_message = 'Project created successfully' response_json = { 'message': final_message } return success(response_json) except Exception as e: print(e) return exception(message='We could not perform the requested action on the given resource') class Tasks(APIView): SERVICE_OWNER = TaskService def get(self, request, pid=None, tid=None, action=None, *args, **kwargs): try: employee = request.user data = request.GET.dict() if action not in self.SERVICE_OWNER.ALLOWED_ACTIONS_FOR_GET: return bad_request(message='The requested action is not available on given resource') if action == 'get_context': service_keywords = dict() if tid: service_keywords.update({'instance_id': tid}) else: service_response = self.SERVICE_OWNER.construct_filter_query(**data) if not service_response.get('success'): return exception(message=service_response.get('message')) filter_query = service_response.get('filter_query') service_response = self.SERVICE_OWNER.construct_page_query(**data) if not service_response.get('success'): return exception(message=service_response.get('message')) page_query = service_response.get('page_query') if tid: filter_query.update({'id': tid}) if pid: filter_query.update({'project__id': pid}) # to make sure only user created data is sent filter_query.update({ 'created_by': request.user }) service_keywords = { 'page_query': page_query, 'filter_query': filter_query } service_response = getattr(self.SERVICE_OWNER, action)(**service_keywords) if not service_response.get('success'): return exception(message=service_response.get('message')) return Response(service_response) except Exception as e: print(e) return exception(message='We could not perform the requested action on the given resource') def post(self, request, pid=None, tid=None, action=None, *args, **kwargs): try: employee = request.user data = request.POST.dict() if action not in self.SERVICE_OWNER.ALLOWED_ACTIONS_FOR_POST: return bad_request(message='The requested action is not available on given resource') # For delete Action if action in ['delete']: service_response = getattr(self.SERVICE_OWNER, action)(**{ 'instance_id': tid }) message = service_response.get('message') if service_response.get('success'): return success(service_response) else: return exception(message=message) # For Add Update Action if action == 'update' and not tid: return bad_request(message='We could not find the task for updation') if action == 'add': tid = None entity_data = dict() # Validate always mendatory fields mandatory_fields = [ ('name', 'Please provide a name'), ('startDate', 'Please provide a start date'), ('endDate', 'Please provide a end date'), ('description', 'Please provide a description'), ('status', 'Please provide a status'), ('priority', 'Please provide a priority') ] for field in mandatory_fields: value = data.get(field[0], None) if not value or not len(value.strip()): return bad_request(message=field[1]) start_date = datetime.strptime(data.get('startDate'), settings.DATE_FORMAT) end_date = datetime.strptime(data.get('endDate'), settings.DATE_FORMAT) if start_date > end_date: return exception(message='Start date can not be greater than end date') project = None if data.get('project', None): project = Project.objects.get(id=data.get('project')) status = data.pop('status') if status not in ['DRF', 'PRG', 'CMP']: return bad_request(message='The status provided is invalid') priority = data.pop('priority') if priority not in ['A', 'B', 'C']: return bad_request(message='The priority provided is invalid') # Fetch always mendatory fields entity_data.update({ 'status': status, 'project': project, 'priority': priority, 'end_date': end_date, 'name': data.pop('name'), 'start_date': start_date, 'last_modified_by': employee, 'description': data.pop('description') }) overlapping_tasks = Task.objects.filter(name=entity_data.get('name'), is_deleted=False) if project: overlapping_tasks = overlapping_tasks.filter(project=project) else: overlapping_tasks = overlapping_tasks.filter(project__isnull=True) if tid: if overlapping_tasks.exclude(id=tid).exists(): return exception(message='A task with same name already exists{0}'.format(' under the selected project' if project else '')) instance = Task.objects.get(id=tid) for field_name, field_value in entity_data.items(): setattr(instance, field_name, field_value) instance.save() final_message = 'Task updated successfully' else: if overlapping_tasks.exists(): return exception(message='A task with same name already exists{0}'.format(' under the selected project' if project else '')) service_response = self.SERVICE_OWNER.generate_task_number() if not service_response.get('success'): return exception(message=service_response.get('message')) entity_data.update({ 'created_by': employee, 'task_number': service_response.get('task_number') }) instance = Task.objects.create(**entity_data) final_message = 'Task created successfully' response_json = { 'message': final_message } return success(response_json) except Exception as e: print(e) return exception(message='We could not perform the requested action on the given resource') class TaskComments(APIView): SERVICE_OWNER = TaskCommentService def get(self, request, tid=None, tcid=None, action=None, *args, **kwargs): try: employee = request.user data = request.GET.dict() if action not in self.SERVICE_OWNER.ALLOWED_ACTIONS_FOR_GET: return bad_request(message='The requested action is not available on given resource') if action == 'get_context': service_keywords = dict() if tcid: service_keywords.update({'instance_id': tcid}) else: service_response = self.SERVICE_OWNER.construct_filter_query(**data) if not service_response.get('success'): return exception(message=service_response.get('message')) filter_query = service_response.get('filter_query') service_response = self.SERVICE_OWNER.construct_page_query(**data) if not service_response.get('success'): return exception(message=service_response.get('message')) page_query = service_response.get('page_query') if tcid: filter_query.update({'id': tcid}) if tid: filter_query.update({'task__id': tid}) # to make sure only user created data is sent filter_query.update({ 'created_by': request.user }) service_keywords = { 'page_query': page_query, 'filter_query': filter_query } service_response = getattr(self.SERVICE_OWNER, action)(**service_keywords) if not service_response.get('success'): return exception(message=service_response.get('message')) return Response(service_response) except Exception as e: print(e) return exception(message='We could not perform the requested action on the given resource') def post(self, request, tid=None, tcid=None, action=None, *args, **kwargs): try: employee = request.user data = request.POST.dict() if action not in self.SERVICE_OWNER.ALLOWED_ACTIONS_FOR_POST: return bad_request(message='The requested action is not available on given resource') # For delete Action if action in ['delete']: service_response = getattr(self.SERVICE_OWNER, action)(**{ 'instance_id': tcid }) message = service_response.get('message') if service_response.get('success'): return success(service_response) else: return exception(message=message) # For Add Update Action if action == 'update' and not tcid: return bad_request(message='We could not find the task comment for updation') if action == 'add': tcid = None entity_data = dict() # Validate always mendatory fields mandatory_fields = [ ('text', 'Please provide a comment text'), ('task', 'Please provide a task against you are adding comment') ] for field in mandatory_fields: value = data.get(field[0], None) if not value or not len(value.strip()): return bad_request(message=field[1]) task = Task.objects.get(id=data.get('task')) text = data.get('text').strip() # Fetch always mendatory fields entity_data.update({ 'text': text, 'task': task, 'last_modified_by': employee }) if tcid: instance = TaskComment.objects.get(id=tcid) for field_name, field_value in entity_data.items(): setattr(instance, field_name, field_value) instance.save() final_message = 'Task comment updated successfully' else: entity_data.update({ 'created_by': employee, }) instance = TaskComment.objects.create(**entity_data) final_message = 'Task comment added successfully' response_json = { 'message': final_message } return success(response_json) except Exception as e: print(e) return exception(message='We could not perform the requested action on the given resource') class TaskCommentAttachment(APIView): SERVICE_OWNER = TaskCommentAttachmentService def get(self, request, tcid=None, tcaid=None, action=None, *args, **kwargs): try: employee = request.user data = request.GET.dict() if action not in self.SERVICE_OWNER.ALLOWED_ACTIONS_FOR_GET: return bad_request(message='The requested action is not available on given resource') if action == 'get_context': service_keywords = dict() if tcaid: service_keywords.update({'instance_id': tcaid}) else: service_response = self.SERVICE_OWNER.construct_filter_query(**data) if not service_response.get('success'): return exception(message=service_response.get('message')) filter_query = service_response.get('filter_query') service_response = self.SERVICE_OWNER.construct_page_query(**data) if not service_response.get('success'): return exception(message=service_response.get('message')) page_query = service_response.get('page_query') if tcaid: filter_query.update({'id': tcaid}) if tcid: filter_query.update({'task_comment__id': tcid}) # to make sure only user created data is sent filter_query.update({ 'created_by': request.user }) service_keywords = { 'page_query': page_query, 'filter_query': filter_query } service_response = getattr(self.SERVICE_OWNER, action)(**service_keywords) if not service_response.get('success'): return exception(message=service_response.get('message')) return Response(service_response) except Exception as e: print(e) return exception(message='We could not perform the requested action on the given resource') def post(self, request, tcid=None, tcaid=None, action=None, *args, **kwargs): try: employee = request.user data = request.POST.dict() if action not in self.SERVICE_OWNER.ALLOWED_ACTIONS_FOR_POST: return bad_request(message='The requested action is not available on given resource') # For delete Action if action in ['delete']: service_response = getattr(self.SERVICE_OWNER, action)(**{ 'instance_id': tcaid }) message = service_response.get('message') if service_response.get('success'): return success(service_response) else: return exception(message=message) # For Add Update Action if action == 'update' and not tcaid: return bad_request(message='We could not find the task comment attachment for updation') if action == 'add': tcaid = None entity_data = dict() # Validate always mendatory fields mandatory_fields = [ ('taskComment', 'Please provide a task comment against you are adding attachment') ] for field in mandatory_fields: value = data.get(field[0], None) if not value or not len(value.strip()): return bad_request(message=field[1]) task_comment = TaskComment.objects.get(id=data.get('taskComment')) attachment = request.FILES.get('attachment') # Fetch always mendatory fields entity_data.update({ 'attachment': attachment, 'task_comment': task_comment, }) if tcaid: instance = Task.objects.get(id=tcaid) for field_name, field_value in entity_data.items(): setattr(instance, field_name, field_value) instance.save() final_message = 'Task comment attachment updated successfully' else: service_response = self.SERVICE_OWNER.generate_task_number() if not service_response.get('success'): return exception(message=service_response.get('message')) instance = Task.objects.create(**entity_data) final_message = 'Task comment attachment added successfully' response_json = { 'message': final_message } return success(response_json) except Exception as e: print(e) return exception(message='We could not perform the requested action on the given resource') ```
{ "source": "jha-hitesh/python-learnings", "score": 4 }
#### File: jha-hitesh/python-learnings/multi-threading.py ```python import random from threading import Thread import time class SafeThread(Thread): """SafeThread. A custom thread implementation that allows raising exception to the caller thread. """ def __init__(self, *args, **kwargs): """init.""" super(SafeThread, self).__init__(*args, **kwargs) self.exception = None def run(self) -> None: """run.""" try: super(SafeThread, self).run() except Exception as ex: self.exception = ex def join(self, *args, **kwargs) -> None: """join.""" super(SafeThread, self).join(*args, **kwargs) if self.exception: raise self.exception def step1_validation(*args, **kwargs): """step1_validation. This validation does some DB operation, the operation would happen parallely along with other threads as its an io operation, there is no gurantee when it will complete """ operation_time = random.randrange(5) time.sleep(operation_time) # random time sleep to stimulate db call if kwargs.get("fail_test") == "step1_validation": raise Exception(f"Step 1 validation failed in {operation_time} seconds") print(f"step 1 validation completed in {operation_time} seconds") def step2_validation(*args, **kwargs): """step2_validation. This validation does some External API call, the call would happen parallely along with other threads as its an wait operation not a code execution, there is no gurantee when it will complete """ operation_time = random.randrange(5) time.sleep(operation_time) # random time sleep to stimulate external api call if kwargs.get("fail_test") == "step2_validation": raise Exception(f"Step 2 validation failed in {operation_time} seconds") print(f"step 2 validation completed in {operation_time} seconds") def step3_validation(*args, **kwargs): """step3_validation. This validation does some basic code level check, the check would not happen parallely along with other threads as its not a wait operation its a pure code execution. so if other validation don't have any io operation, step3 will be last else it will be 1st or 2nd to complete """ if kwargs.get("fail_test") == "step3_validation": raise Exception("Step 3 validation failed without delay") print("step 3 validation completed without delay") def step4_process(*args, **kwargs): """step4_process.""" print("step4_process completed") def step5_process(*args, **kwargs): """step5_process.""" print("step5_process completed") def step6_process(*args, **kwargs): """step6_process.""" print("step6_process completed") def step7_process(*args, **kwargs): """step7_process.""" print("step7_process completed") return True, {} def main_api_handler(*args, **kwargs): """main_api_handler. accepts fail_test keyword which has name of validation function which should fail. this handler will be called from the main api post method """ try: # creating threads for validations validation1 = SafeThread(target=step1_validation, args=args, kwargs=kwargs) validation2 = SafeThread(target=step2_validation, args=args, kwargs=kwargs) validation3 = SafeThread(target=step3_validation, args=args, kwargs=kwargs) # starting validations concurrently validation1.start() validation2.start() validation3.start() # waiting for validation threads to conclude before moving ahead validation1.join() validation2.join() validation3.join() step4_process(*args, **kwargs) process5 = SafeThread(target=step5_process, args=args, kwargs=kwargs) process6 = SafeThread(target=step6_process, args=args, kwargs=kwargs) process5.start() process6.start() process5.join() process6.join() success, data = step7_process(*args, **kwargs) return { "success": success, "data": data } except Exception as e: return {"success": False, "message": str(e)} ```
{ "source": "jhaiduce/defense_slides_www", "score": 3 }
#### File: pictures/substorms/latex_format_number.py ```python import unittest from math import log10,floor import re def latex_format_int(n): try: return str(int(n)) except: return '-' def guess_precision(n,uncert,uncert_precision=1): # Base-10 exponent of the value if abs(n)>0: exp_val=int(floor(log10(abs(n)))) else: exp_val=0 try: # Get the base-10 exponent of the error exp_err=int(floor(log10(abs(uncert)))) except: # Default precision p=3 else: # Calculate precision p=max(exp_val-exp_err+uncert_precision,1) return p def latex_format_number(n,uncert=None,exp_lower=1e-3,exp_upper=1e4,precision='auto',uncert_precision=1,show_uncert=True,overline=False,extra_digits=0): try: int(n) except (ValueError,TypeError): return '-' # Base-10 exponent of the value if abs(n)>0: exp_val=int(floor(log10(abs(n)))) else: exp_val=0 try: nuncert=len(uncert) except: nuncert=1 else: assert nuncert<=2 assert nuncert>0 if nuncert==2: uncert_max,uncert_min=uncert uncert=min(abs(uncert_min),abs(uncert_max)) else: uncert=uncert[0] if precision=='auto': p=guess_precision(n,uncert,uncert_precision) else: try: p=int(precision) except: raise ValueError('Precision must be a number or ''auto ''.') assert p>0 if uncert==None or show_uncert==False: # Determine whether format should be exponential or not, and compute number of decimal places if(abs(n)>=exp_upper or abs(n)<exp_lower and n!=0): format_type='e' d=p-1 else: format_type='f' # Determine number of decimal places d=max(p-exp_val+extra_digits-1,0) # Round off the insignificant digits n=round(n/pow(10,exp_val-(p+extra_digits)+1))*pow(10,exp_val-(p+extra_digits)+1) # Format the number as a string format_string='{{0:0.{0:d}{1}}}'.format(d,format_type) s=format_string.format(n) if overline: # Draw a bar over the last significant digit if extra_digits: s=s[:-extra_digits-1]+'\\overline{'+s[-extra_digits-1]+'}'+s[-extra_digits:] else: s=s[:-1]+'\\overline{'+s[-1]+'}' # Format exponential using LaTeX syntax matchstr=r'e(|-)?[+0]*([\d]+)' s=re.sub(matchstr,r'\\times10^{\1\2}',s) return s else: if nuncert==1: return "{0:s}\pm{1:s}".format(latex_format_number(n,exp_lower=exp_lower,exp_upper=exp_upper,precision=p,overline=overline,extra_digits=extra_digits),latex_format_number(uncert,exp_lower=exp_lower,exp_upper=exp_upper,precision=uncert_precision)) else: return "{0:s}[+{1:s},-{2:s}]".format( latex_format_number(n,exp_lower=exp_lower,exp_upper=exp_upper,precision=p,overline=overline,extra_digits=extra_digits), latex_format_number(uncert_max,exp_lower=exp_lower,exp_upper=exp_upper,precision=uncert_precision), latex_format_number(uncert_min,exp_lower=exp_lower,exp_upper=exp_upper,precision=uncert_precision), ) class test_latex_format_number(unittest.TestCase): def test_latex_format_number(self): self.assertEqual(latex_format_number(1,0.1),'1.0\\pm0.1') self.assertEqual(latex_format_number(1234,100),'1200\\pm100') self.assertEqual(latex_format_number(1e4,10),'1.000\\times10^{4}\\pm10') self.assertEqual(latex_format_number(1e5,1e4),'1.0\\times10^{5}\\pm1\\times10^{4}') self.assertEqual(latex_format_number(1e-3,1e-4),'0.0010\\pm1\\times10^{-4}') self.assertEqual(latex_format_number(1e-4,1e-5),'1.0\\times10^{-4}\\pm1\\times10^{-5}') self.assertEqual(latex_format_number(-1e-4,1e-5),'-1.0\\times10^{-4}\\pm1\\times10^{-5}') self.assertEqual(latex_format_number(-1e4,1e3),'-1.0\\times10^{4}\\pm1000') self.assertEqual(latex_format_number(1,0.11),'1.0\\pm0.1') self.assertEqual(latex_format_number(1,0.09),'1.00\\pm0.09') self.assertEqual(latex_format_number(1,2),'1\\pm2') self.assertEqual(latex_format_number(1,10),'1\\pm10') self.assertEqual(latex_format_number(0),'0.00') self.assertEqual(latex_format_number(0,(2,1)),'0[+2,-1]') self.assertEqual(latex_format_number(0.016,0.01,show_uncert=False,overline=True),'0.0\\overline{2}') self.assertEqual(latex_format_number(0.016,0.01,show_uncert=True,overline=True,extra_digits=1),'0.0\\overline{1}6\\pm0.01') if __name__=='__main__': unittest.main() ```
{ "source": "jhaigh0/abcunit-cmip5-stats", "score": 3 }
#### File: abcunit-cmip5-stats/output_handler/database_handler.py ```python import psycopg2 from .base_handler import BaseHandler class DataBaseHandler(BaseHandler): def __init__(self, connection_info, error_types, table_name='results'): """ Constructs an instace of the database handler. :param connection_info: (str) Connection string in the psycopg2 format, "dbname=<db_name> user=<user_name> password=<password>". :param error_types: (list) List of the string names of the types of errors that can occur. :param table_name: (str) Optional string for the name of the table created (default 'results') """ self.error_types = error_types self.conn = psycopg2.connect(connection_info) self.cur = self.conn.cursor() self.table_name = table_name self._create_table() def _create_table(self): """ Creates a table called <self.table_name> with primary key id varchar(255) and result varchar(255) """ self.cur.execute(f'CREATE TABLE IF NOT EXISTS {self.table_name}' \ '(id varchar(255) PRIMARY KEY, result varchar(255) NOT NULL);') self.conn.commit() def _delete_table(self): """ Drops the database table """ self.cur.execute(f"DROP TABLE {self.table_name};") self.conn.commit() def get_result(self, identifier): """ Selects the result of the job with the id passed and returns it :param identifier: (str) Id of the job result :return: String result of job """ query = f"SELECT result FROM {self.table_name} " \ f"WHERE id='{identifier}';" self.cur.execute(query) if self.cur.rowcount > 0: return self.cur.fetchone()[0] return None def get_all_results(self): """ :return: Dictionary with job ids as keys and results as values """ query = f"SELECT * FROM {self.table_name}" self.cur.execute(query) result_dict = {} for (name, result) in self.cur: result_dict[name] = result return result_dict def get_successful_runs(self): """ :return: List of job ids which ran successfully """ query = f"SELECT id FROM {self.table_name} " \ "WHERE result='success';" self.cur.execute(query) return [name[0] for name in self.cur] def get_failed_runs(self): """ :return: Dictionary with error types as keys and lists of job ids as values """ query = f"SELECT id, result FROM {self.table_name} " \ "WHERE result<>'success';" self.cur.execute(query) failures = dict([(key, []) for key in self.error_types]) for (name, result) in self.cur: failures[result].append(name) return failures def delete_result(self, identifier): """ Deletes job id and result from the database :param identifier: (str) Id of the job results """ query = f"DELETE FROM {self.table_name} " \ f"WHERE id='{identifier}';" self.cur.execute(query) self.conn.commit() def delete_all_results(self): """ Deletes all entries in the database """ self.cur.execute(f"DELETE FROM {self.table_name};") self.conn.commit() def ran_succesfully(self, identifier): """ :param identifier: (str) Id of the job result :return: Boolean on if job ran successfully """ query = f"SELECT result FROM {self.table_name} " \ f"WHERE id='{identifier}';" self.cur.execute(query) result = self.cur.fetchone() if result is not None: return result[0] == 'success' return False def count_results(self): """ :return: Int number of jobs that have been run """ self.cur.execute(f"SELECT COUNT(*) FROM {self.table_name};") return self.cur.fetchone()[0] def count_successes(self): """ :return: Int number of jobs that have ran successfully """ query = f"SELECT COUNT(*) FROM {self.table_name} " \ "WHERE result='success';" self.cur.execute(query) return self.cur.fetchone()[0] def count_failures(self): """ :return: Int number of jobs that have failed """ query = f"SELECT COUNT(*) FROM {self.table_name} " \ "WHERE result<>'success';" self.cur.execute(query) return self.cur.fetchone()[0] def insert_success(self, identifier): """ Inserts a value into the table with a given id and the result 'success' :param identifier: (str) Id of the job result """ query = f"INSERT INTO {self.table_name} " \ f"VALUES ('{identifier}', 'success');" self.cur.execute(query) self.conn.commit() def insert_failure(self, identifier, error_type): """ Inserts a value into the table with a given id and the result te given error type :param identifier: (str) Id of the job result :param error_type: (str) Erroneous result of the job, from the error_types list """ query = f"INSERT INTO {self.table_name} " \ f"VALUES ('{identifier}', '{error_type}');" self.cur.execute(query) self.conn.commit() def close(self): """ Close connection with the database """ self.cur.close() self.conn.close() ``` #### File: abcunit-cmip5-stats/test/test_database_handler.py ```python import pytest import os from output_handler.database_handler import DataBaseHandler CONNECTION_DETAILS = os.environ["ABCUNIT_DB_SETTINGS"] db_handler = None def setup_module(): global db_handler print("SETTING UP") db_handler = DataBaseHandler(CONNECTION_DETAILS, ['bad_data', 'bad_num', 'no_output'], 'test_results') db_handler._delete_table() db_handler._create_table() def teardown_module(): print("TEARING DOWN") db_handler._delete_table() db_handler.close() def test_success_inserted(): db_handler.insert_success('mean/MOHC/HadGEM2-ES/r1i1p1/cLeaf') result = db_handler.get_result('mean/MOHC/HadGEM2-ES/r1i1p1/cLeaf') assert(result == 'success') def test_ran_successfully(): db_handler.insert_success('mean/MOHC/HadGEM2-ES/r1i1p1/cWood') assert(db_handler.ran_succesfully('mean/MOHC/HadGEM2-ES/r1i1p1/cWood')) def test_failure_inserted(): db_handler.insert_failure('mean/MOHC/HadGEM2-ES/r1i1p1/burntArea', 'bad_data') result = db_handler.get_result('mean/MOHC/HadGEM2-ES/r1i1p1/burntArea') assert(result == 'bad_data') def test_deletion_of_entry(): db_handler.insert_success('mean/MOHC/HadGEM2-ES/r1i1p1/cSoil') db_handler.delete_result('mean/MOHC/HadGEM2-ES/r1i1p1/cSoil') result = db_handler.get_result('mean/MOHC/HadGEM2-ES/r1i1p1/cSoil') assert(result == None) def _unique_setup(): db_handler._delete_table() #reset table for this db_handler._create_table() db_handler.insert_success('min/CMCC/CMCC-CM/r2i1p1/fFire') db_handler.insert_success('min/CMCC/CMCC-CM/r2i1p1/cVeg') db_handler.insert_success('min/CMCC/CMCC-CM/r2i1p1/treeFracSecDec') db_handler.insert_failure('min/CMCC/CMCC-CM/r2i1p1/fGrazing', 'bad_data') db_handler.insert_failure('min/CMCC/CMCC-CM/r2i1p1/rGrowth', 'bad_num') def test_counting(): _unique_setup() total = db_handler.count_results() total_success = db_handler.count_successes() total_failures = db_handler.count_failures() assert((total == 5) and (total_success == 3) and (total_failures == 2)) def test_get_successful_names(): _unique_setup() success_results = ['min/CMCC/CMCC-CM/r2i1p1/fFire','min/CMCC/CMCC-CM/r2i1p1/cVeg', 'min/CMCC/CMCC-CM/r2i1p1/treeFracSecDec'] assert(db_handler.get_successful_runs() == success_results) def test_get_failed_names(): _unique_setup() failed_results = { "bad_data": ["min/CMCC/CMCC-CM/r2i1p1/fGrazing"], "bad_num": ["min/CMCC/CMCC-CM/r2i1p1/rGrowth"], "no_output": [] } assert(db_handler.get_failed_runs() == failed_results) def test_get_result_dict(): _unique_setup() correct_dict = { "min/CMCC/CMCC-CM/r2i1p1/fFire": "success", "min/CMCC/CMCC-CM/r2i1p1/cVeg": "success", "min/CMCC/CMCC-CM/r2i1p1/treeFracSecDec": "success", "min/CMCC/CMCC-CM/r2i1p1/fGrazing": "bad_data", "min/CMCC/CMCC-CM/r2i1p1/rGrowth": "bad_num" } assert(db_handler.get_all_results() == correct_dict) ```
{ "source": "jhaip/streaming-queue-viz", "score": 2 }
#### File: jhaip/streaming-queue-viz/consumer_db_log.py ```python import pika from threading import Thread import logging import time import signal import json import sqlite3 import datetime import sys logging.basicConfig(level=logging.INFO) connection = pika.BlockingConnection(pika.ConnectionParameters('rabbit')) print('Connected:localhost') channel = connection.channel() db_conn = None db_c = None def threaded_rmq(): global db_conn global db_c db_conn = sqlite3.connect('example.db') db_c = db_conn.cursor() db_c.execute('''CREATE TABLE IF NOT EXISTS data (timestamp text, value text, source text)''') db_conn.commit() any_views = False for row in db_c.execute("SELECT * FROM data WHERE source='view'"): any_views = True # logging.info(row) if not any_views: initial_view_params = { "start": None, "end": None, "subviews": [ { "func": None, "type": "", "sources": ["serial"] }, { "func": None, "type": "code", "sources": ["code"] }, { "func": None, "type": "annotation", "sources": ["annotation"] } ] } data = (datetime.datetime.utcnow().isoformat(), json.dumps(initial_view_params), 'view') db_c.execute("INSERT INTO data VALUES (?,?,?)", data) db_conn.commit() channel.exchange_declare(exchange='logs', exchange_type='fanout') channel.queue_declare(queue="my_queue") channel.queue_declare(queue="database_log") channel.queue_bind(exchange='logs', queue='database_log') channel.basic_consume(consumer_callback, queue="database_log", no_ack=True) channel.start_consuming() def consumer_callback(ch, method, properties, body): global db_conn global db_c body_str = str(body, 'utf-8') logging.info("[db consumer] Received %s" % (body_str,)) if body_str: body_json = json.loads(body_str) logging.info(body_json) logging.info("******") data = (body_json["timestamp"], body_json["value"], body_json["source"]) db_c.execute("INSERT INTO data VALUES (?,?,?)", data) db_conn.commit() def signal_handler(signal, frame): print('You pressed Ctrl+C!') db_conn.close() connection.close() sys.exit(0) if __name__ == "__main__": logging.info('Starting thread RabbitMQ in DATABASE CONSUMER') # threadRMQ = Thread(target=threaded_rmq) # threadRMQ.start() threaded_rmq() signal.signal(signal.SIGINT, signal_handler) ```
{ "source": "jhajagos/CommonDataModelMapper", "score": 2 }
#### File: CommonDataModelMapper/omop_cdm/rw_to_prepared_source.py ```python import logging import json import os import argparse import csv import hashlib import sys try: from mapping_classes import InputClass except ImportError: sys.path.insert(0, os.path.abspath(os.path.join(os.path.split(__file__)[0], os.path.pardir, "src"))) from mapping_classes import InputClass from mapping_classes import OutputClassCSVRealization, InputOutputMapperDirectory, OutputClassDirectory, \ CoderMapperJSONClass, TransformMapper, FunctionMapper, FilterHasKeyValueMapper, ChainMapper, CascadeKeyMapper, \ CascadeMapper, KeyTranslator, PassThroughFunctionMapper, CodeMapperDictClass, CodeMapperDictClass, ConstantMapper, \ ReplacementMapper, MapperClass from prepared_source_classes import SourcePersonObject, SourceCareSiteObject, SourceEncounterObject, \ SourceObservationPeriodObject, SourceEncounterCoverageObject, SourceResultObject, SourceConditionObject, \ SourceProcedureObject, SourceMedicationObject, SourceLocationObject, SourceEncounterDetailObject from source_to_cdm_functions import generate_mapper_obj from utility_functions import generate_observation_period from prepared_source_functions import build_name_lookup_csv, build_key_func_dict logging.basicConfig(level=logging.INFO) class PopulationDemographics(InputClass): def fields(self): return ["personid", "gender_code", "gender_code_oid", "gender_code_text", "birthsex_code", "birthsex_code_oid", "birthsex_code_text", "birthdate", "dateofdeath", "zip_code", "race_code", "race_code_oid", "race_code_text", "ethnicity_code", "ethnicity_code_oid", "ethnicity_code_text", "active", "tenant"] class PopulationEncounter(InputClass): def fields(self): return ["encounterid", "personid", "hospitalizationstartdate", "readmission", "dischargedate", "servicedate", "financialclass_code", "financialclass_code_oid", "financialclass_code_text", "hospitalservice_code", "hospitalservice_code_oid", "hospitalservice_code_text", "classfication_code", "classification_code_oid", "classification_code_text", "type_code", "type_code_oid", "type_code_text", "dischargedisposition_code", "dischargedisposition_code_oid", "dischargedisposition_code_text", "dischargetolocation_code", "dischargetolocation_code_oid", "dischargetolocation_code_text", "admissionsource_code", "admissionsource_code_oid", "admissionsource_code_text", "admissiontype_code", "admissiontype_code_oid", "admissiontype_code_text", "status_code", "status_code_oid", "status_code_text", "estimatedarrivaldate", "estimateddeparturedate", "actualarrivaldate", "source", "active", "tenant"] class PopulationCondition(InputClass): def fields(self): return ["conditionid", "personid", "encounterid", "condition_code", "condition_code_oid", "condition_code_text", "effectiveDate", "billingrank", "presentonadmission_code", "presentonadmission_code_oid", "presentonadmission_text", "type_primary_code", "type_primary_code_oid", "type_primary_text", "source", "tenant"] class PopulationProcedure(InputClass): def fields(self): return ["procedureid", "personid", "encounterid", "procedure_code", "procedure_code_oid", "procedure_code_display", "modifier_code", "modifier_oid", "modifier_text", "servicestartdate", "serviceenddate", "status_code", "status_oid", "active", "tenant"] class PopulationMedication(InputClass): def fields(self): return ["medicationid", "encounterid", "personid", "intendeddispenser", "startdate", "stopdate", "doseunit_code", "doseunit_code_oid", "doseunit_code_text", "category_id", "category_code_oid", "category_code_text", "frequency_id", "frequency_code_oid", "frequency_code_text", "status_code", "status_code_oid", "status_code_text", "route_code", "route_code_oid", "route_code_text", "drug_code", "drug_code_oid", "drug_code_text", "dosequantity", "source", "tenant"] class PopulationResult(InputClass): def fields(self): return ["resultid", "encounterid", "personid", "result_code", "result_code_oid", "result_code_text", "result_type", "servicedate", "value_text", "value_numeric", "value_numeric_modifier", "unit_code", "unit_code_oid", "unit_code_text", "value_codified_code", "value_codified_code_oid", "value_codified_code_text", "date", "interpretation_code", "interpretation_code_oid", "interpretation_code_text", "specimen_type_code", "specimen_type_code_oid", "specimen_type_code_text", "bodysite_code", "bodysite_code_oid", "bodysite_code_text", "specimen_collection_date", "specimen_received_date", "measurementmethod_code", "measurementmethod_code_oid", "measurementmethod_code_text", "recordertype", "issueddate", "tenant", "year"] class PopulationObservationPeriod(InputClass): def fields(self): return [] class PopulationCareSite(InputClass): def fields(self): return [] class DuplicateExcludeMapper(MapperClass): """Indicates that a row is a duplicate""" def __init__(self, id_field): self.id_field = id_field self.id_dict = {"i_exclude": ""} def map(self, input_dict): if self.id_field in input_dict: id_value = input_dict[self.id_field] if id_value in self.id_dict: return {"i_exclude": 1} else: self.id_dict[id_value] = 1 return {"i_exclude": ""} else: return {} def main(input_csv_directory, output_csv_directory, file_name_dict): output_class_obj = OutputClassDirectory() in_out_map_obj = InputOutputMapperDirectory() # TOOD: Add single digit zip code sec_fields = SourceLocationObject().fields() with open(os.path.join(output_csv_directory, "source_location.csv"), newline="", mode="w") as fw: cfw = csv.writer(fw) cfw.writerow(sec_fields) input_patient_file_name = os.path.join(input_csv_directory, file_name_dict["demographic"]) person_id_duplicate_mapper = DuplicateExcludeMapper("personid") population_patient_rules = [("personid", "s_person_id"), ("gender_code_text", "s_gender"), ("gender_code", "m_gender"), ("birthdate", "s_birth_datetime"), ("dateofdeath", "s_death_datetime"), ("race_code", "s_race"), ("race_code_text", "m_race"), ("ethnicity_code", "s_ethnicity"), ("ethnicity_code_text", "m_ethnicity"), ("personid", person_id_duplicate_mapper, {"i_exclude": "i_exclude"}) ] output_person_csv = os.path.join(output_csv_directory, "source_person.csv") source_person_runner_obj = generate_mapper_obj(input_patient_file_name, PopulationDemographics(), output_person_csv, SourcePersonObject(), population_patient_rules, output_class_obj, in_out_map_obj) source_person_runner_obj.run() # Run the mapper # Care site care_site_csv = os.path.join(input_csv_directory, "care_site.csv") md5_func = lambda x: hashlib.md5(x.encode("utf8")).hexdigest() key_care_site_mapper = build_name_lookup_csv(os.path.join(input_csv_directory, file_name_dict["encounter"]), care_site_csv, ["tenant", "hospitalservice_code_text"], ["tenant", "hospitalservice_code_text"], hashing_func=md5_func) care_site_name_mapper = FunctionMapper( build_key_func_dict(["tenant", "hospitalservice_code_text"], separator=" -- ")) care_site_rules = [("key_name", "k_care_site"), (("tenant", "hospitalservice_code_text"), care_site_name_mapper, {"mapped_value": "s_care_site_name"})] source_care_site_csv = os.path.join(output_csv_directory, "source_care_site.csv") care_site_runner_obj = generate_mapper_obj(care_site_csv, PopulationCareSite(), source_care_site_csv, SourceCareSiteObject(), care_site_rules, output_class_obj, in_out_map_obj) care_site_runner_obj.run() # Encounters # TODO: Add flag for duplicate encounters encounter_file_name = os.path.join(input_csv_directory, file_name_dict["encounter"]) encounter_id_duplicate_mapper = DuplicateExcludeMapper("encounterid") encounter_rules = [ ("encounterid", "s_encounter_id"), ("personid", "s_person_id"), ("servicedate", "s_visit_start_datetime"), ("dischargedate", "s_visit_end_datetime"), ("type_code_text", "s_visit_type"), ("classification_code_text", "m_visit_type"), ("dischargedisposition_code_text", "s_discharge_to"), ("dischargedisposition_code", "m_discharge_to"), ("admissionsource_code_text", "s_admitting_source"), ("admissionsource_code", "m_admitting_source"), (("tenant", "hospitalservice_code_text"), key_care_site_mapper, {"mapped_value": "k_care_site"}), ("encounterid", encounter_id_duplicate_mapper, {"i_exclude": "i_exclude"}) ] source_encounter_csv = os.path.join(output_csv_directory, "source_encounter.csv") # Generate care site combination of tenant and hospitalservice_code_text encounter_runner_obj = generate_mapper_obj(encounter_file_name, PopulationEncounter(), source_encounter_csv, SourceEncounterObject(), encounter_rules, output_class_obj, in_out_map_obj) encounter_runner_obj.run() observation_csv_file = os.path.join(input_csv_directory, "population_observation.csv") generate_observation_period(source_encounter_csv, observation_csv_file, "s_person_id", "s_visit_start_datetime", "s_visit_end_datetime") observation_period_rules = [("s_person_id", "s_person_id"), ("s_visit_start_datetime", "s_start_observation_datetime"), ("s_visit_end_datetime", "s_end_observation_datetime")] source_observation_period_csv = os.path.join(output_csv_directory, "source_observation_period.csv") observation_runner_obj = generate_mapper_obj(observation_csv_file, PopulationObservationPeriod(), source_observation_period_csv, SourceObservationPeriodObject(), observation_period_rules, output_class_obj, in_out_map_obj) observation_runner_obj.run() # Holder for source encounter coverage sec_fields = SourceEncounterDetailObject().fields() with open(os.path.join(output_csv_directory, "source_encounter_detail.csv"), newline="", mode="w") as fw: cfw = csv.writer(fw) cfw.writerow(sec_fields) # Encounter plan or insurance coverage source_encounter_coverage_csv = os.path.join(output_csv_directory, "source_encounter_coverage.csv") encounter_coverage_rules = [("personid", "s_person_id"), ("encounterid", "s_encounter_id"), ("servicedate", "s_start_payer_date"), ("dischargedate", "s_end_payer_date"), ("financialclass_code_text", "s_payer_name"), ("financialclass_code_text", "m_payer_name"), ("financialclass_code_text", "s_plan_name"), ("financialclass_code_text", "m_plan_name")] encounter_benefit_runner_obj = generate_mapper_obj(encounter_file_name, PopulationEncounter(), source_encounter_coverage_csv, SourceEncounterCoverageObject(), encounter_coverage_rules, output_class_obj, in_out_map_obj) encounter_benefit_runner_obj.run() def m_rank_func(input_dict): if input_dict["billingrank"] == "PRIMARY": return {"m_rank": "Primary"} elif input_dict["billingrank"] == "SECONDARY": return {"m_rank": "Secondary"} else: return {} condition_rules = [("personid", "s_person_id"), ("encounterid", "s_encounter_id"), ("effectiveDate", "s_start_condition_datetime"), ("condition_code", "s_condition_code"), ("condition_code_oid", "m_condition_code_oid"), ("billingrank", PassThroughFunctionMapper(m_rank_func), {"m_rank": "m_rank"}), ("source", "s_condition_type"), ("presentonadmission_code", "s_present_on_admission_indicator")] condition_csv = os.path.join(input_csv_directory, file_name_dict["condition"]) source_condition_csv = os.path.join(output_csv_directory, "source_condition.csv") condition_mapper_obj = generate_mapper_obj(condition_csv, PopulationCondition(), source_condition_csv, SourceConditionObject(), condition_rules, output_class_obj, in_out_map_obj) condition_mapper_obj.run() procedure_csv = os.path.join(input_csv_directory, file_name_dict["procedure"]) source_procedure_csv = os.path.join(output_csv_directory, "source_procedure.csv") procedure_rules = [("personid", "s_person_id"), ("encounterid", "s_encounter_id"), ("servicestartdate", "s_start_procedure_datetime"), ("serviceenddate", "s_end_procedure_datetime"), ("procedure_code", "s_procedure_code"), ("procedure_code_oid", "s_procedure_code_type"), ("procedure_code_oid", "m_procedure_code_oid") ] procedure_mapper_obj = generate_mapper_obj(procedure_csv, PopulationProcedure(), source_procedure_csv, SourceProcedureObject(), procedure_rules, output_class_obj, in_out_map_obj) procedure_mapper_obj.run() def active_medications(input_dict): if "status_code_text" in input_dict: if input_dict["status_code_text"] not in ('Complete', 'Discontinued', 'Active', 'Suspended'): return {"i_exclude": 1} else: return {} else: return {} ["medicationid", "encounterid", "personid", "intendeddispenser", "startdate", "stopdate", "doseunit_code", "doseunit_code_oid", "doseunit_code_text", "category_id", "category_code_oid", "category_code_text", "frequency_id", "frequency_code_oid", "frequency_code_text", "status_code", "status_code_oid", "status_code_text", "route_code", "route_code_oid", "route_code_text", "drug_code", "drug_code_oid", "drug_code_text", "dosequantity", "source", "tenant"] medication_rules = [("personid", "s_person_id"), ("encounterid", "s_encounter_id"), ("drug_code", "s_drug_code"), ("drug_code_oid", "m_drug_code_oid"), ("drug_code_text", "s_drug_text"), ("startdate", "s_start_medication_datetime"), ("stopdate", "s_end_medication_datetime"), ("route_code_text", "s_route"), ("route_code", "m_route"), ("dosequantity", "s_quantity"), ("doseunit_code_text", "s_dose_unit"), ("doseunit_code", "m_dose_unit"), ("intendeddispenser", "s_drug_type"), ("intendeddispenser", "m_drug_type"), ("status_code", "s_status"), ("status_code_text", PassThroughFunctionMapper(active_medications), {"i_exclude": "i_exclude"}) ] medication_csv = os.path.join(input_csv_directory, file_name_dict["medication"]) source_medication_csv = os.path.join(output_csv_directory, "source_medication.csv") medication_mapper_obj = generate_mapper_obj(medication_csv, PopulationMedication(), source_medication_csv, SourceMedicationObject(), medication_rules, output_class_obj, in_out_map_obj) medication_mapper_obj.run() result_csv = os.path.join(input_csv_directory, file_name_dict["result"]) source_result_csv = os.path.join(output_csv_directory, "source_result.csv") ["resultid", "encounterid", "personid", "result_code", "result_code_oid", "result_code_text", "result_type", "servicedate", "value_text", "value_numeric", "value_numeric_modifier", "unit_code", "unit_code_oid", "unit_code_text", "value_codified_code", "value_codified_code_oid", "value_codified_code_text", "date", "interpretation_code", "interpretation_code_oid", "interpretation_code_text", "specimen_type_code", "specimen_type_code_oid", "specimen_type_code_text", "bodysite_code", "bodysite_code_oid", "bodysite_code_text", "specimen_collection_date", "specimen_received_date", "measurementmethod_code", "measurementmethod_code_oid", "measurementmethod_code_text", "recordertype", "issueddate", "tenant", "year"] result_rules = [("personid", "s_person_id"), ("encounterid", "s_encounter_id"), ("servicedate", "s_obtained_datetime"), ("result_code_text", "s_name"), ("result_code", "s_code"), ("result_code_oid", "m_type_code_oid"), ("value_text", "s_result_text"), (("value_codified_code_text", "interpretation_code_text"), FilterHasKeyValueMapper(["value_codified_code_text", "interpretation_code_text"]), {"value_codified_code_text": "m_result_text", "interpretation_code_text": "m_result_text"}), ("value_numeric", "s_result_numeric"), ("date", "s_result_datetime"), ("value_codified_code", "s_result_code"), ("value_codified_code_oid", "m_result_code_oid"), ("unit_code", "s_result_unit"), ("unit_code", "s_result_unit_code"), ("unit_code_oid", "m_result_unit_code_oid") #("norm_unit_of_measure_code", "s_result_unit_code") #("norm_ref_range_low", "s_result_numeric_lower"), #("norm_ref_range_high", "s_result_numeric_upper") ] result_mapper_obj = generate_mapper_obj(result_csv, PopulationResult(), source_result_csv, SourceResultObject(), result_rules, output_class_obj, in_out_map_obj) result_mapper_obj.run() if __name__ == "__main__": arg_parse_obj = argparse.ArgumentParser(description="Mapping Realworld CSV files to Prepared source format for OHDSI mapping") arg_parse_obj.add_argument("-c", "--config-file-name", dest="config_file_name", help="JSON config file", default="rw_config.json") arg_obj = arg_parse_obj.parse_args() print("Reading config file '%s'" % arg_obj.config_file_name) with open(arg_obj.config_file_name, "r") as f: config_dict = json.load(f) file_name_dict = { "demographic": "population_demographics.csv", "encounter": "population_encounter.csv", "condition": "population_condition.csv", "measurement": "population_measurement.csv", "medication": "population_medication.csv", "procedure": "population_procedure.csv", "result": "population_results_2020.csv" } main(config_dict["csv_input_directory"], config_dict["csv_input_directory"], file_name_dict) ``` #### File: omop_cdm/utility_programs/load_concept_files_into_db.py ```python import argparse import json import sys import os try: from utility_functions import load_csv_files_into_db, generate_vocabulary_load except(ImportError): sys.path.insert(0, os.path.abspath(os.path.join(os.path.split(__file__)[0], os.path.pardir, os.path.pardir, "src"))) from utility_functions import load_csv_files_into_db, generate_vocabulary_load def main(vocab_directory, connection_string, schema, vocabularies=["CONCEPT"]): vocab_list = generate_vocabulary_load(vocab_directory, vocabularies) vocab_data_dict = {} for pair in vocab_list: vocab_data_dict[pair[1]] = pair[0] load_csv_files_into_db(connection_string, vocab_data_dict, schema_ddl=None, indices_ddl=None, i_print_update=1000, truncate=True, schema=schema, delimiter="\t") if __name__ == "__main__": arg_parse_obj = argparse.ArgumentParser(description="Load concept/vocabulary files into database") arg_parse_obj.add_argument("-c", "--config-file-name", dest="config_file_name", help="JSON config file", default="../hi_config.json") arg_parse_obj.add_argument("--connection-uri", dest="connection_uri", default=None) arg_parse_obj.add_argument("--schema", dest="schema", default=None) arg_parse_obj.add_argument("--load-concept_ancestor", default=False, action="store_true", dest="load_concept_ancestor") arg_parse_obj.add_argument("--full-concept-files", default=False, action="store_true", dest="load_full_concept_files") arg_obj = arg_parse_obj.parse_args() print("Reading config file '%s'" % arg_obj.config_file_name) with open(arg_obj.config_file_name) as f: config = json.load(f) if arg_obj.connection_uri is None: connection_uri = config["connection_uri"] else: connection_uri = arg_obj.connection_uri if arg_obj.schema is None: schema = config["schema"] else: schema = arg_obj.schema if arg_obj.load_full_concept_files: vocabularies_to_load = ["CONCEPT", "CONCEPT_ANCESTOR", "CONCEPT_CLASS", "CONCEPT_RELATIONSHIP", "CONCEPT_SYNONYM", "DOMAIN", "DRUG_STRENGTH", "RELATIONSHIP", "VOCABULARY"] elif arg_obj.load_concept_ancestor: vocabularies_to_load = ["CONCEPT", "CONCEPT_ANCESTOR"] else: vocabularies_to_load = ["CONCEPT"] main(config["json_map_directory"], connection_uri, schema, vocabularies=vocabularies_to_load) ``` #### File: utility_programs/rxnorm/generate_brand_name_to_csv_mappings.py ```python import sqlalchemy as sa import pandas as pd import json import os import argparse def main(directory, connection_uri): engine = sa.create_engine(connection_uri) with engine.connect() as connection: q1 = """ select tt.n_sbdf, ott.* from ( select bn_str, bn_rxcui, count(*) as n_sbdf from ( select distinct tr1.str as bn_str, tr1.rxcui as bn_rxcui, tr1.TTY as bn_tty, tr3.STR as sbdf_str, tr3.rxcui as sbdf_rxcui , tr3.TTY as sbdf_tty from ( select * from rxnconso rx1 where rx1.tty = 'BN' and rx1.SAB = 'RXNORM') tr1 join rxnrel r1 on r1.RXCUI1 = tr1.RXCUI and tr1.SAB = 'RXNORM' join rxnconso tr2 on tr2.rxcui = r1.RXCUI2 and tr2.SAB = 'RXNORM' and tr2.TTY = 'SBDG' join rxnrel r2 on tr2.RXCUI = r2.RXCUI1 and r2.SAB = 'RXNORM' join rxnconso tr3 on r2.RXCUI2 = tr3.RXCUI and tr3.SAB = 'RXNORM' and tr3.TTY = 'SBDF' order by tr1.str, tr3.str) t group by bn_str, bn_rxcui) tt join ( select distinct tr1.str as bn_str, tr1.rxcui as bn_rxcui, tr1.TTY as bn_tty, tr3.STR as sbdf_str, tr3.rxcui as sbdf_rxcui , tr3.TTY as sbdf_tty from ( select * from rxnconso rx1 where rx1.tty = 'BN' and rx1.SAB = 'RXNORM') tr1 join rxnrel r1 on r1.RXCUI1 = tr1.RXCUI and tr1.SAB = 'RXNORM' join rxnconso tr2 on tr2.rxcui = r1.RXCUI2 and tr2.SAB = 'RXNORM' and tr2.TTY = 'SBDG' join rxnrel r2 on tr2.RXCUI = r2.RXCUI1 and r2.SAB = 'RXNORM' join rxnconso tr3 on r2.RXCUI2 = tr3.RXCUI and tr3.SAB = 'RXNORM' and tr3.TTY = 'SBDF') ott on tt.bn_rxcui = ott.bn_rxcui where n_sbdf = 1 order by bn_str, sbdf_str """ q1_df = pd.read_sql(q1, connection) q1_df_csv_file_name = os.path.join(directory, "select_n_in__ot___from___select_bn_rxcui.csv") q1_df.to_csv(q1_df_csv_file_name, index=False) q2 = """ select n_in, ot.* from ( select bn_rxcui, count(*) as n_in from ( select distinct tr1.str as bn_str, tr1.rxcui as bn_rxcui, tr1.TTY as bn_tty, tr2.str as in_str, tr2.RXCUI as in_rxcui from ( select * from rxnconso rx1 where rx1.tty = 'BN' and rx1.SAB = 'RXNORM') tr1 join rxnrel r1 on r1.RXCUI1 = tr1.RXCUI and r1.SAB = 'RXNORM' join rxnconso tr2 on r1.RXCUI2 = tr2.rxcui and tr2.SAB = 'RXNORM' and tr2.TTY = 'IN') t group by bn_rxcui) tt join (select distinct tr1.str as bn_str, tr1.rxcui as bn_rxcui, tr1.TTY as bn_tty, tr2.str as in_str, tr2.RXCUI as in_rxcui from ( select * from rxnconso rx1 where rx1.tty = 'BN' and rx1.SAB = 'RXNORM') tr1 join rxnrel r1 on r1.RXCUI1 = tr1.RXCUI and r1.SAB = 'RXNORM' join rxnconso tr2 on r1.RXCUI2 = tr2.rxcui and tr2.SAB = 'RXNORM' and tr2.TTY = 'IN') ot on ot.bn_rxcui = tt.bn_rxcui where n_in = 1 order by bn_str, in_str; """ q2_df = pd.read_sql(q2, connection) q2_df_csv_file_name = os.path.join(directory, "select_tt_n_sbdf__ott___from___select_bn.csv") q2_df.to_csv(q2_df_csv_file_name, index=False) q3 = """ select t.* from ( select distinct r1.RXCUI, r1.TTY, r1.STR, r2.RXCUI as IN_RXCUI, r2.TTY as IN_TTY, r2.STR as IN_STR from RXNCONSO r1 join RXNREL rr on r1.RXCUI = rr.RXCUI1 join RXNCONSO r2 on r2.RXCUI = rr.RXCUI2 where r1.SAB = 'RXNORM' and r2.SAB = 'RXNORM' and r1.SUPPRESS = 'N' and r1.TTY = 'BN' and r2.TTY in ('IN') ) t join RXNSAT rs on rs.rxcui = t.rxcui and rs.sab = 'RXNORM' and rs.atn = 'RXN_BN_CARDINALITY' and rs.ATV = 'single' order by cast(t.RXCUI as int), TTY, RXCUI """ q3_df = pd.read_sql(q3, connection) q3_df_csv_file_name = os.path.join(directory, "select_bn_single_in.csv") q3_df.to_csv(q3_df_csv_file_name, index=False) if __name__ == "__main__": arg_parser_obj = argparse.ArgumentParser(description="Generate brand name mappings") arg_parser_obj.add_argument("-c", "--config-json-file-name", dest="config_json_file_name", default="./rxnorm.json") arg_parser_obj.add_argument("-f", "--path-to-sqlite-file-name", dest="path_to_sqlite_file_name", default="./rxnorm.db3") arg_obj = arg_parser_obj.parse_args() config_json_file_name = arg_obj.config_json_file_name with open(config_json_file_name, "r") as f: config = json.load(f) destination_directory = config["json_map_directory"] connection_uri = "sqlite:///" + arg_obj.path_to_sqlite_file_name main(destination_directory, connection_uri) ``` #### File: CommonDataModelMapper/src/test_generate_io_classes_file_from_source.py ```python import unittest import os from generate_io_classes_file_from_source import * from mapping_classes import OutputClass import sqlalchemy as sa class TestClassGeneration(unittest.TestCase): def setUp(self): if os.path.exists("./test/test.db3"): os.remove("./test/test.db3") with open("./test/create_test_tables.sql", "r") as f: create_table_sql = f.read() engine = sa.create_engine("sqlite:///./test/test.db3") connection = engine.connect() for statement in create_table_sql.split(";"): print(statement) connection.execute(statement) def test_class_generate(self): simple_class = input_output_class_generate_string("Object1", "OutputClass", ["id", "test_name", "code_name"], "my_table") exec(simple_class) self.assertTrue(len(simple_class)) def test_generate_from_db(self): generated_classes_py = generate_sql_from_connection_string("sqlite:///./test/test.db3") #print(generated_classes_py) self.assertTrue(len(generated_classes_py)) if __name__ == '__main__': unittest.main() ``` #### File: CommonDataModelMapper/src/utility_functions.py ```python import csv import sqlalchemy as sa import datetime import pprint import time import os import sqlparse def load_csv_files_into_db(connection_string, data_dict, schema_ddl=None, indices_ddl=None, schema=None, delimiter=",", lower_case_keys=True, i_print_update=10000, truncate=False, truncate_long_fields=True, conditions=None, null_flag=False ): db_engine = sa.create_engine(connection_string) db_connection = db_engine.connect() table_names = [] if schema_ddl is not None: split_sql = sqlparse.split(schema_ddl) for sql_statement in split_sql: db_connection.execute(sql_statement) for key in data_dict: table_name = data_dict[key] if table_name not in table_names: if schema: table_name = schema + "." + table_name table_names += [table_name] if truncate: for table_name in table_names: truncate_sql = "truncate %s" % table_name db_connection.execute(truncate_sql) meta_data = sa.MetaData(db_connection, reflect=True, schema=schema) for data_file in data_dict: varchar_fields = {} table_name = data_dict[data_file] if schema: table_name = schema + "." + table_name table_obj = meta_data.tables[table_name] for column in table_obj.columns: column_name = column.name column_type = table_obj.c[column_name].type if "CHAR" in str(column_type): varchar_fields[column_name.lower()] = table_obj.c[column_name].type.length print("Loading %s" % table_name) db_transaction = db_connection.begin() try: with open(data_file, newline="", errors="replace") as f: dict_reader = csv.DictReader(f, delimiter=delimiter) start_time = time.time() elapsed_time = start_time i = 0 for dict_row in dict_reader: cleaned_dict = {} for key in dict_row: if len(dict_row[key]): try: if "date" in key or "DATE" in key: if "-" in dict_row[key]: if " " in dict_row[key]: cleaned_dict[key.lower()] = datetime.datetime.strptime(dict_row[key], "%Y-%m-%d %H:%M:%S") else: cleaned_dict[key.lower()] = datetime.datetime.strptime(dict_row[key], "%Y-%m-%d") else: cleaned_dict[key.lower()] = datetime.datetime.strptime(dict_row[key], "%Y%m%d") else: cleaned_dict[key.lower()] = dict_row[key] except ValueError: cleaned_dict[key.lower()] = datetime.datetime(1900, 1, 1, 0, 0) if lower_case_keys: temp_cleaned_dict = {} for key in cleaned_dict: if truncate_long_fields: if key.lower() in varchar_fields: field_length = varchar_fields[key.lower()] if len(cleaned_dict[key.lower()]) > field_length: print("Truncating: '%s'" % cleaned_dict[key.lower()]) cleaned_dict[key.lower()] = cleaned_dict[key.lower()][0:field_length] temp_cleaned_dict[key.lower()] = cleaned_dict[key] cleaned_dict = temp_cleaned_dict if conditions is None: insert_data = True else: insert_data = False for condition in conditions: field_key = condition[0] if field_key in cleaned_dict: field_value = cleaned_dict[field_key] if field_value in condition[1]: insert_data = True for field_key in cleaned_dict: field_value = cleaned_dict[field_key] if field_value.__class__ == u"".__class__: if field_value.upper() == "NULL" and null_flag: cleaned_dict[field_key] = None if insert_data: s = table_obj.insert(cleaned_dict) try: db_connection.execute(s) except: pprint.pprint(cleaned_dict) raise if i > 0 and i % i_print_update == 0: current_time = time.time() time_difference = current_time - elapsed_time print("Loaded %s total rows at %s seconds per %s rows" % (i, time_difference, i_print_update)) elapsed_time = time.time() i += 1 db_transaction.commit() current_time = time.time() total_time_difference = current_time - start_time print("Loaded %s total row in %s seconds" % (i, total_time_difference)) except: db_transaction.rollback() raise if indices_ddl is not None: split_sql = sqlparse.split(indices_ddl) for sql_statement in split_sql: try: db_connection.execute(sql_statement) except(sa.exc.OperationalError): print("Skipping: '%s'" % sql_statement) def generate_db_dict(output_directory=None, load_pairs=None): if load_pairs is None: load_pairs = [ ("condition_occurrence", "condition_occurrence_dx_cdm.csv"), ("location", "location_cdm.csv"), ("person", "person_cdm.csv"), ("visit_occurrence", "visit_occurrence_cdm.csv"), ("visit_detail", "visit_detail_cdm.csv"), ("procedure_occurrence", "procedure_cdm.csv"), ("procedure_occurrence", "procedure_dx_cdm.csv"), ("measurement", "measurement_encounter_cdm.csv"), ("measurement", "measurement_dx_cdm.csv"), ("measurement", "measurement_proc_cdm.csv"), ("drug_exposure", "drug_exposure_cdm.csv"), ("drug_exposure", "drug_exposure_proc_cdm.csv"), ("death", "death_cdm.csv"), ("observation", "observation_dx_cdm.csv"), ("observation", "observation_measurement_encounter_cdm.csv"), ("observation", "observation_proc_cdm.csv"), ("observation_period", "observation_period_cdm.csv"), ("care_site", "care_site_cdm.csv"), ("payer_plan_period", "payer_plan_period_cdm.csv") ] data_dict = {} for pair in load_pairs: if output_directory is not None: data_dict[os.path.join(output_directory, pair[1])] = pair[0] else: data_dict[pair[1]] = pair[0] return data_dict def generate_vocabulary_load(vocabulary_directory, vocabularies=["CONCEPT", "CONCEPT_ANCESTOR", "CONCEPT_CLASS", "CONCEPT_RELATIONSHIP", "CONCEPT_SYNONYM", "DOMAIN", "DRUG_STRENGTH", "RELATIONSHIP", "VOCABULARY"]): load_pairs = [] for vocabulary in vocabularies: load_pairs += [(vocabulary.lower(), os.path.join(vocabulary_directory, vocabulary + ".csv"))] return load_pairs def generate_observation_period(encounter_csv_file_name, source_period_observation_csv_file_name, id_field_name, start_date_field_name, end_date_field_name): with open(encounter_csv_file_name, newline="", encoding="utf8", errors="replace") as f: dict_reader = csv.DictReader(f) observation_period_dict = {} for row_dict in dict_reader: start_date_value = row_dict[start_date_field_name] end_date_value = row_dict[end_date_field_name] if len(end_date_value) == 0: end_date_value = start_date_value id_value = row_dict[id_field_name] if id_value in observation_period_dict: past_start_date_value, past_end_date_value = observation_period_dict[id_value] if start_date_value < past_start_date_value: set_start_date_value = start_date_value else: set_start_date_value = past_start_date_value if end_date_value > past_end_date_value: set_end_date_value = end_date_value else: set_end_date_value = past_end_date_value observation_period_dict[id_value] = (set_start_date_value, set_end_date_value) else: observation_period_dict[id_value] = (start_date_value, end_date_value) with open(source_period_observation_csv_file_name, "w", newline="") as fw: csv_writer = csv.writer(fw) csv_writer.writerow([id_field_name, start_date_field_name, end_date_field_name]) for id_value in observation_period_dict: start_date_value, end_date_value = observation_period_dict[id_value] if start_date_value == "": start_date_value = end_date_value row_to_write = [id_value, start_date_value, end_date_value] csv_writer.writerow(row_to_write) ```
{ "source": "jhakonen/wotdisttools", "score": 3 }
#### File: examples/helloworld-mod/mod_helloworld.py ```python from helloworld import resources def init(): """ Mod initialization function. Called by World of Tanks when the game starts up. """ name = resources.read_file('mods/johndoe.helloworld/data/name.txt') # Print statements end up to python.log in game's root directory print 'Hello %s!' % name def fini(): """ Mod deinitialization function. Called by World of Tanks when the game shuts down. """ name = resources.read_file('mods/johndoe.helloworld/data/name.txt') print 'Bye bye %s!' % name ``` #### File: wotdisttools/setuptools_wotmod/bdist_wotmod.py ```python from distutils import log from distutils.dir_util import mkpath, remove_tree from distutils.file_util import copy_file import distutils.util from setuptools import Command from setuptools.extern import packaging from contextlib import contextmanager from functools import partial import os import posixpath import re import struct from tempfile import NamedTemporaryFile import warnings import xml.etree.ElementTree as ET from xml.dom import minidom import zipfile class bdist_wotmod(Command): description = 'create .wotmod mod package for World of Tanks' # List of option tuples: long name, short name (None if no short # name), and help string. user_options = [ ('bdist-dir=', 'd', "temporary directory for creating the distribution"), ('dist-dir=', 'd', "directory to put final built distributions in"), ('author-id=', None, "developer's nickname or website (f.e. com.example) [default: setup author or maintainer]"), ('mod-id=', None, "modification identifier [default: setup name]"), ('mod-version=', None, "modification version [default: setup version]"), ('mod-description=', None, "modification description [default: setup description]"), ('version-padding=', None, "number of zeros to use to pad each version fragment [default: 2]"), ('install-lib=', None, "installation directory for module distributions [default: 'res/scripts/client/gui/mods']"), ('install-data=', None, "installation directory for data files [default: 'res/mods/<author_id>.<mod_id>']"), ('python27=', None, "Path to Python 2.7 executable (required when command is executed with non-2.7 Python interpreter) " "[default: BDIST_WOTMOD_PYTHON27 environment variable]"), ] def initialize_options(self): self.bdist_dir = None self.dist_dir = None self.author_id = None self.mod_id = None self.mod_version = None self.mod_description = None self.version_padding = None self.install_lib = None self.install_data = None self.python27 = None def finalize_options(self): # Resolve install directory if self.bdist_dir is None: bdist_base = self.get_finalized_command('bdist').bdist_base self.bdist_dir = os.path.join(bdist_base, 'wotmod') # Resolve version_padding if self.version_padding is None: self.version_padding = 2 # Resolve author_id if self.author_id is None: self.author_id = self.distribution.get_author() if self.author_id == 'UNKNOWN': self.author_id = self.distribution.get_maintainer() self.author_id = re.sub(r'[^\w.]', '', self.author_id) # Resolve mod_id if self.mod_id is None: self.mod_id = self.distribution.get_name() self.mod_id = re.sub(r'[^\w.]', '', self.mod_id) # Resolve mod_version and pad each version fragment with zeros if self.mod_version is None: self.mod_version = self.distribution.get_version() # Try to pick only major, minor and patch parts from the input version. # Using any of the other parts (e.g. rc1, -alpha, -beta) might lead to # issues when author leaves them out at time of creating a release # version. As WoT determines the wotmod file to load using strcmp() it # is not safe to add optional parts at the end of the version string as # those prerelease wotmod packages would be loaded instead of the # release version. version = self.distribution.get_version() version_obj = packaging.version.Version(version) release_parts = version_obj._version.release release_str = '.'.join(str(x) for x in release_parts) if release_str != version: warnings.warn( 'bdist_wotmod: Using only release part %s of the version %s to form ' 'the wotmod package version' % (repr(release_str), repr(version)) ) parts = release_parts if len(parts) == 1: warnings.warn('bdist_wotmod: Minor part of the version is missing, setting it to zero') parts += (0,) if len(parts) == 2: warnings.warn('bdist_wotmod: Patch part of the version is missing, setting it to zero') parts += (0,) self.mod_version = '.'.join(str(part).rjust(self.version_padding, '0') for part in parts) # Resolve mod_description if self.mod_description is None: self.mod_description = self.distribution.get_description() self.mod_description = re.sub(r'\W', '', self.mod_description) # Resolve where py/pyc files should be placed in if self.install_lib is None: self.install_lib = 'res/scripts/client/gui/mods' # Resolve where data files should be placed in if self.install_data is None: self.install_data = 'res/mods/%s.%s' % (self.author_id, self.mod_id) if self.python27 is None and 'BDIST_WOTMOD_PYTHON27' in os.environ: self.python27 = os.environ['BDIST_WOTMOD_PYTHON27'] self.set_undefined_options('bdist', ('dist_dir', 'dist_dir')) def run(self): self.distribution.get_command_obj('install_data').warn_dir = 0 self.build_files() self.verify_pyc_files() self.install_files() self.create_metaxml() self.include_other_documents() self.mkpath(self.dist_dir) package_path = self.create_wotmod_package() self.distribution.dist_files.append(('bdist_wotmod', 'any', package_path)) if os.path.isdir(self.bdist_dir): remove_tree(self.bdist_dir) def build_files(self): """ Compiles .py files. """ # By default build_py just copies already built pyc files from # a cache. Set compile=1 to force recreation of those files. build = self.reinitialize_command('build_py', reinit_subcommands=1) build.compile=1 if self.python27: # Monkey patch byte_compile() with custom function that delegates # the byte compilation to a separate Python 2.7 interpreter. # This is required when setuptools-wotmod is executed with Python # 3.x. The pyc files must still be Python 2.7 based for them to # succesfully load into World of Tanks's embedded Python interpreter. with patch_func(distutils.util, 'byte_compile', partial(python27_byte_compile, self.python27)): self.run_command('build') else: self.run_command('build') def verify_pyc_files(self): """ Ensures that compiled pyc files are loadable by Python 2.7. """ for root, dirs, files in os.walk(self.get_finalized_command('build_py').build_lib): for filename in files: if os.path.splitext(filename)[1] == '.pyc': filepath = os.path.join(root, filename) assert is_python27_pyc_file(filepath), \ 'File "%s" is not valid Python 2.7 byte-compiled ' \ 'file, ensure that command line argument --python27 ' \ 'or env variable BDIST_WOTMOD_PYTHON27 points to ' \ 'Python 2.7 interpreter' % filepath def install_files(self): """ Installs files defined in setup.py to bdist-dir. """ install = self.reinitialize_command('install', reinit_subcommands=1) install.root = self.bdist_dir install.install_lib = self.install_lib install.install_data = self.install_data install.warn_dir = 0 # Compiling is already done in build-step, no need to recompile. This # doesn't even work with Python 3 if source files contain Python 2 # spesific syntax install.compile = 0 # No need for egg metadata and executable scripts in wotmod package install.sub_commands = [cmd for cmd in install.sub_commands if cmd[0] != 'install_egg_info'] install.sub_commands = [cmd for cmd in install.sub_commands if cmd[0] != 'install_scripts'] log.info("installing to %s" % self.bdist_dir) self.run_command('install') def create_metaxml(self): """ Creates meta.xml file to root of bdist-dir. """ metaxml_path = os.path.join(self.bdist_dir, 'meta.xml') log.info("Writing %s", metaxml_path) with open(metaxml_path, 'wb') as metaxml_file: root = ET.Element('root') id = ET.SubElement(root, 'id') id.text = '%s.%s' % (self.author_id, self.mod_id) version = ET.SubElement(root, 'version') version.text = self.mod_version name = ET.SubElement(root, 'name') name.text = self.distribution.get_name() description = ET.SubElement(root, 'description') description.text = self.distribution.get_description() xml_contents = ET.tostring(root, encoding='utf-8') xml_contents = minidom.parseString(xml_contents).toprettyxml(encoding='utf-8') metaxml_file.write(xml_contents) def include_other_documents(self): """ Copies other documents (license, changelog, readme) files to root of bdist-dir. """ patterns = ['readme', 'license', 'changes'] entries = os.listdir('.') entries = filter(os.path.isfile, entries) matches = filter(lambda e: any(p in e.lower() for p in patterns), entries) for match in matches: copy_file(match, self.bdist_dir) def create_wotmod_package(self): """ Inserts files from bdist-dir to .wotmod package and stores it to dist-dir. :return: path to wotmod package file """ zip_filename = self.get_output_file_path() mkpath(os.path.dirname(zip_filename)) log.info("creating '%s' and adding '%s' to it", zip_filename, self.bdist_dir) archive_root = to_posix_separators(self.bdist_dir) with zipfile.ZipFile(zip_filename, 'w') as zip: for dirpath, dirnames, filenames in os.walk(archive_root): dirpath = to_posix_separators(dirpath) # Build relative path from bdist_dir forward archive_dirpath = dirpath.replace(posixpath.commonprefix( [dirpath, archive_root]), '').strip('/') # Create files for name in filenames: archive_path = posixpath.join(archive_dirpath, name) path = posixpath.normpath(posixpath.join(dirpath, name)) if posixpath.isfile(path): log.info("adding '%s'" % archive_path) zip.write(path, archive_path) # Set correct flags for directories for name in dirnames: archive_path = posixpath.join(archive_dirpath, name) + '/' log.info("adding '%s'" % archive_path) zip.writestr(archive_path, '') return zip_filename def get_output_file_path(self): """ Returns path to the wotmod file. This method can be called either before or after running the command. When executed before the returned file path hasn't been created yet. :return: path to the wotmod package file """ zip_filename = "%s.%s_%s.wotmod" % ( self.author_id, self.mod_id, self.mod_version) return os.path.abspath(os.path.join(self.dist_dir, zip_filename)) def to_posix_separators(win_path): return win_path.replace('\\', '/') if os.sep == '\\' else win_path @contextmanager def patch_func(module, function_name, replacement): """ Helper function that patches a function in a module with a replacement function. The returned value must be used in with-statement where the patching will happen upon entering the inner block and the patching is undone when execution leaves the block. :param module: target module to modify :param function_name: target function name to modify :param replacement: replacement function :return: context manager """ original = getattr(module, function_name) setattr(module, function_name, replacement) try: yield finally: setattr(module, function_name, original) def python27_byte_compile(python27, files, optimize, force, prefix, dry_run): """ Replacement function for distutils.util.byte_compile() which delegates call to external Python interpreter, given as the first argument. """ with NamedTemporaryFile(suffix='.py', delete=False) as script_file: script_file.write('\n'.join([ 'from distutils.util import byte_compile', 'files = [', ',\n'.join(map(repr, files)), ']', 'byte_compile(files, optimize=%r, force=%r, prefix=%r)' % (optimize, force, prefix) ]).encode('utf-8')) try: distutils.util.spawn([python27, script_file.name], dry_run=dry_run) finally: distutils.util.execute(os.remove, (script_file.name,), "removing %s" % script_file.name, dry_run=dry_run) def is_python27_pyc_file(filepath): with open(filepath, 'rb') as pyc_file: magic_number = struct.unpack('BBBB', pyc_file.read(4)) return magic_number == (0x03, 0xf3, 0x0d, 0x0a) return False ```
{ "source": "jhalan6/learn-and-think", "score": 2 }
#### File: learn_think/src/think.py ```python import alfred,sys,json,commands def main(): #read knowledge_dir = '~/Knowledges' query = sys.argv[1:] result = "grep -r key %s" % knowledge_dir for argu in query: if argu: result += "| grep "+ argu (status, output) = commands.getstatusoutput(result) result = [] i = 1 for line in output.split('\n') : if line: file_name = line.split(':')[0] key_list = line.split(':')[1].decode('utf8') preview = commands.getoutput("cat %s | grep title" % file_name).decode('utf8') result.append(alfred.Item({"uid": alfred.uid(i), 'arg': file_name}, key_list, preview, None)) i=i+1 if not result: result.append(alfred.Item({"uid": alfred.uid(i)}, "nothing find", "", None)) alfred.write(alfred.xml(result)) if __name__ == "__main__": main() ```
{ "source": "jhale1805/github-projects-burndown-chart", "score": 3 }
#### File: src/github_projects_burndown_chart/main.py ```python import argparse from chart.burndown import * from config import config from discord import webhook from gh.api_wrapper import get_organization_project, get_repository_project from gh.project import Project from util import calculators, colors from util.stats import * from util.calculators import * def parse_cli_args(): parser = argparse.ArgumentParser( description='Generate a burndown chart for a GitHub project.') parser.add_argument("project_type", choices=['repository', 'organization'], help="The type of project to generate a burndown chart for. Can be either 'organization' or 'repository'.") parser.add_argument("project_name", help="The name of the project as it appears in the config.json") parser.add_argument("--discord", action='store_true', help="If present, posts the burndown chart to the configured webhook") return parser.parse_args() def download_project_data(args): if args.project_type == 'repository': project: Project = get_repository_project() elif args.project_type == 'organization': project: Project = get_organization_project() return project def prepare_chart_data(stats: ProjectStats): color = colors() data = BurndownChartData( sprint_name=stats.project.name, utc_chart_start=config.utc_sprint_start(), utc_chart_end=config.utc_chart_end() or config.utc_sprint_end(), utc_sprint_start=config.utc_sprint_start(), utc_sprint_end=config.utc_sprint_end(), total_points=stats.total_points, series=[ BurndownChartDataSeries( name=pts_type, data=stats.remaining_points_by_date( calculators(stats.project)[pts_type]), format=dict(color=next(color)) ) for pts_type in config['settings'].get('calculators', ['closed']) ], points_label=f"Outstanding {'Points' if config['settings']['points_label'] else 'Issues'}" ) return data if __name__ == '__main__': args = parse_cli_args() config.set_project(args.project_type, args.project_name) project = download_project_data(args) stats = ProjectStats(project, config.utc_sprint_start(), config.utc_chart_end() or config.utc_sprint_end()) # Generate the burndown chart burndown_chart = BurndownChart(prepare_chart_data(stats)) if args.discord: chart_path = "./tmp/chart.png" burndown_chart.generate_chart(chart_path) webhook.post_burndown_chart(chart_path) else: burndown_chart.render() print('Done') ``` #### File: github_projects_burndown_chart/gh/test_project.py ```python import unittest class TestProject(unittest.TestCase): def test_dummy(self): self.assertTrue(True) ```
{ "source": "jhale1805/tones", "score": 4 }
#### File: tones/tones/_utils.py ```python import math import tones def _fade_up(data, start, end, istep=1, astep=0.005): amp = 0.0 for i in range(start, end, istep): if amp >= 1.0: break data[i] *= amp amp += astep def _translate(value, inmin, inmax, outmin, outmax): scaled = float(value - inmin) / float(inmax - inmin) return outmin + (scaled * (outmax - outmin)) def _sine_sample(amp, freq, rate, i) -> float: """ Generates a single audio sample taken at the given sampling rate on a sine wave oscillating at the given frequency at the given amplitude. :param float amp The amplitude of the sine wave to sample :param float freq The frequency of the sine wave to sample :param int rate The sampling rate :param int i The index of the sample to pull :return float The audio sample as described above """ return float(amp) * math.sin(2.0 * math.pi * float(freq) * (float(i) / float(rate))) ```
{ "source": "jhale/computational-workflows", "score": 3 }
#### File: unit_testing/wallet/wallet.py ```python class InsufficientAmount(Exception): pass class Wallet(object): def __init__(self, initial_amount=0): self.balance = initial_amount def spend_cash(self, amount): if amount > self.balance: raise InsufficientAmount self.balance -= amount def add_cash(self, amount): self.balance += amount ```
{ "source": "J-HaleOf76/rnvimr", "score": 3 }
#### File: ranger/plugins/client.py ```python import os from . import rutil class Client(): """ Ranger client for RPC """ def __init__(self): self.nvim = None def notify(self, msg, level=2): """ Notify message. :param msg str: message be sent. :param level int: level for message. """ self.nvim.call('rnvimr#rpc#notify', msg, level, async_=True) def attach_nvim(self): """ Attach neovim session by socket path. """ server_name = os.getenv('NVIM_LISTEN_ADDRESS') self.nvim = rutil.attach_nvim(server_name) def get_window_info(self): """ Get the floating window info. """ return self.nvim.call('rnvimr#rpc#get_window_info') def hide_window(self): """ Hide the floating window. """ self.nvim.call('rnvimr#rpc#enable_attach_file', async_=True) self.nvim.request('nvim_win_close', 0, 1, async_=True) def set_winhl(self, winhl): """ Set the floating window highlight. :param winhl str: variable in ranger buffer """ self.nvim.call('rnvimr#rpc#set_winhl', winhl, async_=True) def list_buf_name_nr(self): """ List buffers in a dict, with name as key and number as val. """ return self.nvim.call('rnvimr#rpc#list_buf_name_nr') def do_saveas(self, bufnr, target_name): """ Use bufnr to save buffer as target_name. target_name must be existed before saving buffer, otherwise nothing happens. :param bufnr int: buffer number in neovim :param target_name str: absolute path of a target name """ if not os.path.exists(target_name): return try: self.nvim.call('rnvimr#rpc#do_saveas', bufnr, target_name, async_=False) except Exception: # pylint: disable=broad-except pass def move_buf(self, src, dst): """ Move the buffer from src to dst for saving information of loaded buffers included in src. :param src str: absolute path of source :param dst str: absolute path of destination """ buf_name_nr = self.list_buf_name_nr() isdir = os.path.isdir(dst) if isdir: ncwd = self.get_cwd() self.set_cwd('', noautocmd=True) for name, num in buf_name_nr.items(): if isdir: if rutil.is_subpath(src, name): real_dst = os.path.join(dst, os.path.relpath(name, src)) self.do_saveas(num, real_dst) elif name == src: self.do_saveas(num, dst) break if isdir: self.set_cwd(ncwd, noautocmd=True) def get_cb(self): """ Get current buffer of neovim. """ return self.nvim.command_output('echo expand("#:p")') def get_cwd(self): """ Get current work directory of neovim. """ return self.nvim.command_output('pwd') def set_cwd(self, path, noautocmd=False): """ Set current work directory of neovim. :param path str: absolute path :param noautocmd bool: whether use noautocmd command """ self.nvim.command('noautocmd wincmd p') noa = 'noautocmd' if noautocmd else '' self.nvim.command(f'sil {noa} cd {path}') self.nvim.command('noautocmd wincmd p') self.nvim.command('startinsert') def rpc_edit(self, files, edit=None, picker=None): """ Edit ranger target files in neovim though RPC. :param files list: list of file name :param edit str: neovim edit command :param picker bool: whether to become a picker """ args = [edit if edit else 'edit'] if not files: return args.append([str(file) for file in files]) if picker is not None: args.append(picker) self.nvim.call('rnvimr#rpc#edit', *args, async_=True) ``` #### File: plugins/patch/action.py ```python import os from ranger.core.actions import Actions def enhance_rename(client): """ Enhance low-level rename method to save information in loaded buffers. :param client object: Object of attached neovim session """ def rename(self, src, dest): if hasattr(src, 'path'): src = src.path try: os.makedirs(os.path.dirname(dest)) except OSError: pass try: os.rename(src, dest) except OSError as err: self.notify(err) return False else: dst = os.path.abspath(dest) client.move_buf(src, dst) return True Actions.rename = rename ``` #### File: plugins/patch/ccommands.py ```python import os import tempfile import shlex def enhance_quit(commands, client): """ Make ranger pretend to quit. :param commands dict: command name as key, command class as val :param client object: Object of attached neovim session """ quit_cls = commands.get_command('quit') if not quit_cls: return def execute(self): if len(self.fm.tabs) >= 2: self.fm.tab_close() else: client.hide_window() self.fm.execute_console('ClearImage') quit_cls.execute = execute def enhance_bulkrename(commands, client): """ Bulkrename need a block workflow, so restore the raw editor to edit file name. Enhance bulkrename to save information in loaded buffers. :param commands dict: command name as key, command class as val :param client object: Object of attached neovim session """ bulkrename_cls = commands.get_command('bulkrename') if not bulkrename_cls: return def _parse_cmd_and_move_buf(content): for line in content.decode('utf-8').splitlines(): cmd = shlex.split(line, comments=True) if cmd: try: src, dst = cmd[-2:] except ValueError: pass else: client.move_buf(os.path.abspath(src), os.path.abspath(dst)) def execute(self): # pylint: disable=too-many-locals from ranger.container.file import File # pylint: disable=import-outside-toplevel # pylint: disable=import-outside-toplevel from ranger.ext.shell_escape import shell_escape as esc editor = os.getenv('EDITOR') if not editor: editor = 'nvim' # Create and edit the file list filenames = [f.relative_path for f in self.fm.thistab.get_selection()] with tempfile.NamedTemporaryFile(delete=False) as listfile: listpath = listfile.name listfile.write('\n'.join(filenames).encode(encoding='utf-8', errors='surrogateescape')) self.fm.execute_file([File(listpath)], app=editor) with open(listpath, 'r', encoding='utf-8', errors='surrogateescape') as listfile: new_filenames = listfile.read().split("\n") os.unlink(listpath) if all(a == b for a, b in zip(filenames, new_filenames)): self.fm.notify('No renaming to be done!') return # Generate script with tempfile.NamedTemporaryFile() as cmdfile: script_lines = [] script_lines.append('# This file will be executed when you close the editor.') script_lines.append('# Please double-check everything, clear the file to abort.') new_dirs = [] for old, new in zip(filenames, new_filenames): if old != new: basepath, _ = os.path.split(new) if (basepath and basepath not in new_dirs and not os.path.isdir(basepath)): basepath = esc(basepath) script_lines.append(f'mkdir -vp -- {basepath}') new_dirs.append(basepath) old, new = esc(old), esc(new) script_lines.append(f'mv -vi -- {old} {new}') # Make sure not to forget the ending newline script_content = '\n'.join(script_lines) + '\n' cmdfile.write(script_content.encode(encoding='utf-8', errors='surrogateescape')) cmdfile.flush() # Open the script and let the user review it, then check if the # script was modified by the user self.fm.execute_file([File(cmdfile.name)], app=editor) cmdfile.seek(0) new_content = cmdfile.read() script_was_edited = (script_content != new_content) # Do the renaming self.fm.run(['/bin/sh', cmdfile.name], flags='w') _parse_cmd_and_move_buf(new_content) # Retag the files, but only if the script wasn't changed during review, # because only then we know which are the source and destination files. if not script_was_edited: tags_changed = False for old, new in zip(filenames, new_filenames): if old != new: oldpath = self.fm.thisdir.path + '/' + old newpath = self.fm.thisdir.path + '/' + new if oldpath in self.fm.tags: old_tag = self.fm.tags.tags[oldpath] self.fm.tags.remove(oldpath) self.fm.tags.tags[newpath] = old_tag tags_changed = True if tags_changed: self.fm.tags.dump() else: self.fm.notify('files have not been retagged') bulkrename_cls.execute = execute ``` #### File: plugins/patch/statusbar.py ```python from ranger.gui.widgets.statusbar import StatusBar def __init__(self, win, column=None): """ https://github.com/ranger/ranger/pull/2005 hijack the StatusBar's __init__ """ raw_init(self, win, column) for opt in ('hidden_filter', 'show_hidden'): self.settings.signal_bind('setopt.' + opt, self.request_redraw, weak=True) raw_init = StatusBar.__init__ StatusBar.__init__ = __init__ ``` #### File: ranger/plugins/rcommand.py ```python import os from ranger.api.commands import Command from ranger.core.loader import Loadable class NvimEdit(Command): """ A command of ranger to use neovim's command to edit file. """ def execute(self): last_arg = self.args[-1] if last_arg.lower() in ('true', 'false'): picker_enabled = self.args.pop().lower() == 'true' else: picker_enabled = None action = ' '.join(self.args[1:]) if not self.fm.thisfile.is_file or not action: return self.fm.client.rpc_edit([self.fm.thisfile], edit=action, picker=picker_enabled) class JumpNvimCwd(Command): """ A command of ranger to jump into the cwd of neovim. """ def execute(self): path = None if self.fm.client: path = self.fm.client.get_cwd() self.fm.cd(path) class EmitRangerCwd(Command): """ A command of ranger to emit cwd of ranger to neovim. """ def execute(self): if self.fm.client: self.fm.client.set_cwd(self.fm.thisdir.path) self.fm.client.notify("CWD has been changed to " + self.fm.thisdir.path) class ClearImage(Command): """A command of ranger to clear image""" def execute(self): columns = self.fm.ui.browser.columns if len(columns) > 1: columns[-1].clear_image(force=True) class AttachFile(Command): """ A command of ranger to attach file. """ resolve_macros = False def execute(self): path = self.rest(1) if not path and self.fm.client: path = self.fm.client.get_cb() if os.path.isdir(path): dirname = path elif os.path.isfile(path): self.fm.attached_file = path dirname = os.path.dirname(path) else: return if self.fm.thisdir.path == dirname or self.fm.enter_dir(dirname): if os.path.isfile(path): self.fm.thisdir.refilter() self.fm.thisdir.move_to_obj(path) descr = 'Redraw manually after attach event' loadable = Loadable(self.redraw_status(), descr) self.fm.loader.add(loadable, append=True) def redraw_status(self): """ Redraw statusbar cause by generator of Dictionary """ self.fm.ui.status.request_redraw() yield ``` #### File: ranger/plugins/rutil.py ```python import os import sys from importlib import util import pynvim def attach_nvim(server_name): """ A wrapper of Pynvim attach :param server_name str: server_name, maybe socket or tcp """ nvim = None if server_name: try: nvim = pynvim.attach('socket', path=server_name) except FileNotFoundError: try: addr, port = server_name.split(':') nvim = pynvim.attach('tcp', address=addr, port=port) except ValueError: pass return nvim def find_git_root(path): """ find a git root directory :param path str: absolute path """ while True: if os.path.basename(path) == '.git': return None repodir = os.path.join(path, '.git') if os.path.exists(repodir): return path path_o = path path = os.path.dirname(path) if path == path_o: return None def is_subpath(spath, lpath): """ check the short path is a subpath of long path :param spath str: short path :param lpath str: long path """ if lpath.startswith(spath): slen, llen = len(spath), len(lpath) return True if slen == llen else lpath[slen] == '/' return False def dynamic_import(name, path): """ import single moudle dynamically :param name str: module name :param path str: module path """ if name in sys.modules: return None spec = util.spec_from_file_location(name, path) module = util.module_from_spec(spec) sys.modules[name] = module spec.loader.exec_module(module) return module ```
{ "source": "JHalili/KidneySegQuicknat", "score": 2 }
#### File: JHalili/KidneySegQuicknat/run.py ```python import argparse import os import torch from settings import Settings as stng import utils.evaluator as eu from quicknat import QuickNat from solver import Solver #utility imports import utils.data_utils as du from utils.data_loader import ToTensor, NiftiData from utils.log_utils import LogWriter import shutil import ast from torchvision import transforms from polyaxon_client.tracking import Experiment, get_data_paths import polyaxon_helper torch.set_default_tensor_type('torch.FloatTensor') transform_train = transforms.Compose([ ToTensor(), ]) transform_val = transforms.Compose([ ToTensor(), ]) def train(train_params, common_params, data_params, net_params): du.filter_and_split_data(data_params) train_files = du.load_volume_paths_from_case_file(data_params["data_dir"], data_params["train_data_file"]) val_files = du.load_volume_paths_from_case_file(data_params["data_dir"], data_params["val_data_file"]) train_data = NiftiData(train_files, data_params, mode='train') val_data = NiftiData(val_files, data_params, mode='val') train_loader = torch.utils.data.DataLoader(train_data, batch_size=train_params['batch_step_size'], shuffle=True, num_workers=4, pin_memory=True) val_loader = torch.utils.data.DataLoader(val_data, batch_size=train_params['batch_step_size'], shuffle=True, num_workers=4, pin_memory=True) if train_params['use_pre_trained']: quicknat_model = QuickNat(net_params) quicknat_model.load_state_dict(torch.load(train_params['pre_trained_path'])) else: quicknat_model = QuickNat(net_params) solver = Solver(quicknat_model, exp_name=train_params['exp_name'], device=common_params['device'], num_class=net_params['num_class'], optim_args={"lr": train_params['learning_rate'], "betas": train_params['optim_betas'], "eps": train_params['optim_eps'], "weight_decay": train_params['optim_weight_decay']}, model_name=common_params['model_name'], labels=data_params['labels'], log_nth=train_params['log_nth'], num_epochs=train_params['num_epochs'], lr_scheduler_step_size=train_params['lr_scheduler_step_size'], lr_scheduler_gamma=train_params['lr_scheduler_gamma'], use_last_checkpoint=train_params['use_last_checkpoint'], log_dir=common_params['log_dir'], exp_dir=common_params['exp_dir'], train_batch_size=train_params['train_batch_size'], val_batch_size=train_params['val_batch_size']) solver.train(train_loader, val_loader) final_model_path = os.path.join(common_params['save_model_dir'], train_params['final_model_file']) solver.save_best_model(final_model_path) print("final model saved @ " + str(final_model_path)) def evaluate(eval_params, net_params, data_params, common_params, train_params): eval_model_path = eval_params['eval_model_path'] save_predictions_dir = eval_params['save_predictions_dir'] # go to evaluator, remap_labels and add an option "do nothing", because you don't need to remap anything device = common_params['device'] log_dir = common_params['log_dir'] exp_dir = common_params['exp_dir'] exp_name = train_params['exp_name'] prediction_path = os.path.join(exp_dir, exp_name, save_predictions_dir) print("Loading pretrained model") #Load trained model print(eval_params['eval_model_path']) model_path = eval_params['eval_model_path'] quicknat_model= QuickNat(net_params) if torch.cuda.is_available(): torch.cuda.empty_cache() quicknat_model.cuda(device) if not device: #quicknat_model.load_state_dict(torch.load(model_path)) #quicknat_model.to(device) quicknat_model = torch.load(eval_params['eval_model_path']) else: #checkpoint = torch.load(model_path) #quicknat_model.load_state_dict(checkpoint['state_dict']) #quicknat_model.load_state_dict(torch.load(model_path, map_location=device)) print("Load from eval path") quicknat_model = torch.load(eval_params['eval_model_path'], map_location = device) # Load test data print("Loading test data") eval_files = du.load_volume_paths_from_case_file(data_params["data_dir"], data_params["val_data_file"]) eval_data = NiftiData(eval_files, data_params, mode='eval') eval_loader = torch.utils.data.DataLoader(eval_data, batch_size=train_params['batch_step_size'], shuffle=False, num_workers=4, pin_memory=True) #Evaluate and log model results logWriter = LogWriter(log_dir, exp_name, labels=data_params['labels']) dice_score = eu.evaluate_dice_score(model=quicknat_model, data_loader=eval_loader, device=device, logWriter=logWriter) print("Average test dice score: ", dice_score) logWriter.close() def delete_contents(folder): for the_file in os.listdir(folder): file_path = os.path.join(folder, the_file) try: if os.path.isfile(file_path): os.unlink(file_path) elif os.path.isdir(file_path): shutil.rmtree(file_path) except Exception as e: print(e) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--mode', '-m', required=True, help='run mode, valid values are train and eval') parser.add_argument('--settings', '-s', required=True, help='which settings file to use, valid values are local and cluster') parser.add_argument('--param_name', '-pm', required=False, help='learning rate for experiment groups', default=None) parser.add_argument('--param_value', '-pv', required=False, help='learning rate for experiment groups', default=None) args = parser.parse_args() if args.settings == 'local': print("running local config"); settings_file = 'E:\quicknat-master\settings_local.ini' elif args.settings == 'cluster': settings_file = 'settings.ini' settings_dictionary = stng(settings_file).settings_dict common_params, data_params, net_params, train_params, eval_params = settings_dictionary['COMMON'], settings_dictionary['DATA'], settings_dictionary[ 'NETWORK'], settings_dictionary['TRAINING'], settings_dictionary['EVAL'] if args.settings == 'cluster': # override some of the common_params in order to get the correct polyaxon paths common_params['log_dir'] = polyaxon_helper.get_outputs_path() common_params['save_model_dir'] = polyaxon_helper.get_outputs_path() common_params['exp_dir'] = polyaxon_helper.get_outputs_path() # override training vaues for experimant groups if args.param_name and args.param_value: print("Before: ", train_params[args.param_name]) print("Adjusting experiment to run with learning rate: ", ast.literal_eval(args.param_value)) train_params[args.param_name] = ast.literal_eval(args.param_value) print("After: ", train_params[args.param_name]) print("Running with configuration:") print("COMMON_PARAMS") print(common_params) print("NET_PARAMS") print(net_params) print("DATA_PARAMS") print(data_params) print("TRAIN_PARAMS") print(train_params) if args.mode == 'train': train(train_params, common_params, data_params, net_params) elif args.mode == 'eval': evaluate(eval_params, net_params, data_params, common_params, train_params) elif args.mode == 'clear': shutil.rmtree(os.path.join(common_params['exp_dir'], train_params['exp_name'])) print("Cleared current experiment directory successfully!!") shutil.rmtree(os.path.join(common_params['log_dir'], train_params['exp_name'])) print("Cleared current log directory successfully!!") elif args.mode == 'clear-all': delete_contents(common_params['exp_dir']) print("Cleared experiments directory successfully!!") delete_contents(common_params['log_dir']) print("Cleared logs directory successfully!!") else: raise ValueError('Invalid value for mode. only support values are train, eval and clear') ``` #### File: KidneySegQuicknat/utils/data_utils.py ```python import os from pathlib import Path import nibabel as nb import numpy as np import utils.preprocessor as preprocessor import random from scipy.ndimage.interpolation import map_coordinates from scipy import interpolate as ipol from sklearn.utils import shuffle import utils.kits_data_utils as kutils from skimage.transform import resize def square_and_resize_volume(volume, targetResolution, nearestNeighbor=False, debug=False): if debug: print(' Resizing from ' + str(volume.shape[0]) + 'x' + str(volume.shape[1]) + 'x' + str(volume.shape[2]) + ' to ' + str(volume.shape[0]) + 'x' + str(targetResolution) + 'x' + str(targetResolution)) if nearestNeighbor: volume = resize(volume, (volume.shape[0], targetResolution, targetResolution), mode='constant', cval=0, clip=True, preserve_range=True, anti_aliasing=False, order=0) else: volume = resize(volume, (volume.shape[0], targetResolution, targetResolution), mode='constant', cval=0, clip=True, preserve_range=True, anti_aliasing=False) if debug: print("Done resizing") return volume def reduce_black_slices_in_volume(volume, label, threshold=10): slicesToDelete = [] for i in range(label.shape[0]): slice = label[i, :, :] if slice.max() == 0: remove = True for j in range(max([0, i - threshold]), min([i + threshold, label.shape[0]])): neighboringSlice = label[j, :, :] if neighboringSlice.max() == 1: remove = False break if remove: slicesToDelete.append(i) return np.delete(volume, slicesToDelete, axis=0), np.delete(label, slicesToDelete, axis=0) def load_nft_volumes(file_path, load_params): print("Loading vol: %s with label: %s and resolution %d" % (file_path[0], file_path[1], load_params['target_resolution'])) volume = np.squeeze(nb.load(file_path[0]).get_fdata()) labelmap = np.squeeze(nb.load(file_path[1]).get_fdata()) print("Volume shape: ", volume.shape) print("Lable shape: ", labelmap.shape) volume, labelmap = preprocessor.rotate_orientation(volume, labelmap, load_params['orientation']) volume = square_and_resize_volume(volume, load_params['target_resolution'], nearestNeighbor=False) labelmap = square_and_resize_volume(labelmap, load_params['target_resolution'], nearestNeighbor=False) # shuffle volume and label slices volume, labelmap = shuffle(volume, labelmap) return volume, labelmap def load_volume_paths_from_case_file(data_dir, file_path): data_dir = Path(data_dir) volumes_to_use = kutils.get_case_numbers_from_file(file_path) vol_files = [ [os.path.join(kutils.get_case_path(data_dir, case), 'imaging.nii.gz'), os.path.join(kutils.get_case_path(data_dir, case), 'segmentation.nii.gz')] for case in volumes_to_use] return vol_files def filter_and_split_data(data_params): data_skip_file , test_data_file = data_params["data_skip"], data_params["test_data"] train_file, val_file = data_params["train_data_file"], data_params["val_data_file"], data_split, data_dir = data_params["data_split"], data_params["data_dir"] data_skip = kutils.get_case_numbers_from_file(data_skip_file) test_data = kutils.get_case_numbers_from_file(test_data_file) case_numbers = kutils.filter_case_numbers(data_skip, test_data, data_dir) print("Total no of volumes to process : %d" % len(case_numbers)) train_ratio, test_ratio = data_split.split(",") train_len = int((int(train_ratio) / 100) * len(case_numbers)) train_idx = np.random.choice(len(case_numbers), train_len, replace=False) val_idx = np.array([i for i in range(len(case_numbers)) if i not in train_idx]) train_cases = [case_numbers[i] for i in train_idx] val_cases = [case_numbers[i] for i in val_idx] train_data = {} train_data['cases'] = train_cases kutils.write_to_file(train_file, train_data) val_data = {} val_data['cases'] = val_cases kutils.write_to_file(val_file, val_data) return ```
{ "source": "jhall11/sts", "score": 2 }
#### File: sts/config/nox_routing.py ```python from config.experiment_config_lib import ControllerConfig from sts.control_flow.fuzzer import Fuzzer from sts.input_traces.input_logger import InputLogger from sts.invariant_checker import InvariantChecker from sts.simulation_state import SimulationConfig from sts.topology import MeshTopology from sts.util.convenience import backtick def get_additional_metadata(): path = "nox_classic/build/src" return { 'commit' : backtick("git rev-parse HEAD", cwd=path), 'branch' : backtick("git rev-parse --abbrev-ref HEAD", cwd=path), 'remote' : backtick("git remote show origin", cwd=path), } # Use NOX as our controller start_cmd = "./nox_core -v -i ptcp:6633 sample_routing" controllers = [ControllerConfig(start_cmd, cwd="nox_classic/build/src", address="127.0.0.1", port=6633)] topology_class = MeshTopology topology_params = "num_switches=4" dataplane_trace = "dataplane_traces/ping_pong_same_subnet_4_switches.trace" simulation_config = SimulationConfig(controller_configs=controllers, topology_class=topology_class, topology_params=topology_params, dataplane_trace=dataplane_trace) # Use a Fuzzer (already the default) control_flow = Fuzzer(simulation_config, input_logger=InputLogger(), check_interval=80, invariant_check_name="InvariantChecker.python_check_connectivity") ``` #### File: sts/control_flow/fuzzer_new.py ```python import logging from sts.traffic_generator import TrafficGenerator from sts.replay_event import BlockControllerPair from sts.replay_event import UnblockControllerPair from sts.replay_event import ControllerFailure from sts.replay_event import ControllerRecovery from sts.replay_event import LinkFailure from sts.replay_event import LinkRecovery from sts.replay_event import SwitchFailure from sts.replay_event import SwitchRecovery from sts.replay_event import TrafficInjection from sts.replay_event import AddIntent from sts.replay_event import RemoveIntent from sts.replay_event import PingEvent from sts.replay_event import NOPInput from sts.util.capability import check_capability import random import sys from sts.invariant_checker import ViolationTracker class Simulation(object): def __init__(self, topology): self.topology = topology self.violation_tracker = ViolationTracker() class FuzzerParams(object): def __init__(self): self.link_failure_rate = 0.1 self.link_recovery_rate = 0.1 self.switch_failure_rate = 0.1 self.switch_recovery_rate = 0.1 self.controller_crash_rate = 0.1 self.controller_recovery_rate = 0.1 self.traffic_generation_rate = 0.1 self.dataplane_drop_rate = 0 self.block_controllers_rate = 0 self.unblock_controllers_rate = 0 self.policy_change_rate = 0 @staticmethod def get_all_zero(): params = FuzzerParams() for field in params.__dict__.keys(): if field.endswith('_rate'): setattr(params, field, 0) return params class EventsGenerator(object): def generate_events(self, fuzzer): raise NotImplementedError() from itertools import count class IntentsGenerator(EventsGenerator): _intent_id = count(1) def __init__(self, add_intent_rate, remove_intent_rate, ping_rate, bidir=True): self.log = logging.getLogger(__name__ + '.IntentsGenerator') self.add_intent_rate = add_intent_rate self.remove_intent_rate = remove_intent_rate self.ping_rate = ping_rate self.bidir = bidir self.intents = {} def has_intent(self, intent): intent_id = intent.get('intent_id', None) if intent_id in self.intents: return True for local_intent in self.intents.itervalues(): intent['intent_id'] = local_intent['intent_id'] if intent == local_intent: # Restore intent ID if intent_id is not None: intent['intent_id'] = intent_id return True return False def _generate_intent(self, fuzzer): intent_id = IntentsGenerator._intent_id.next() src_host = fuzzer.random.choice( list(fuzzer.topology.hosts_manager.live_hosts)) dst_host = fuzzer.random.choice( list(fuzzer.topology.hosts_manager.live_hosts - set([src_host]))) src_iface = fuzzer.random.choice(src_host.interfaces) dst_iface = fuzzer.random.choice(dst_host.interfaces) src_switch, src_port = fuzzer.topology.patch_panel.get_other_side(src_host, src_iface) dst_switch, dst_port = fuzzer.topology.patch_panel.get_other_side(dst_host, dst_iface) tmp = fuzzer.topology.controllers_manager.up_controllers controllers = [c for c in fuzzer.topology.controllers_manager.live_controllers if not c.config.intent_ip is None] if not controllers: # no controller is alive self.log.info("No controllers are up to generate intents for") return None controller = fuzzer.random.choice(controllers) intent = {} intent['cid'] = controller.cid intent['intent_id'] = str(intent_id) intent['src_dpid'] = str(src_switch.dpid) intent['dst_dpid'] = str(dst_switch.dpid) intent['src_port'] = src_port.port_no intent['dst_port'] = dst_port.port_no intent['src_mac'] = str(src_iface.hw_addr) intent['dst_mac'] = str(dst_iface.hw_addr) # Fixed values from now intent['intent_type'] = 'SHORTEST_PATH' intent['static_path'] = False intent['intent_ip'] = controller.config.intent_ip intent['intent_port'] = controller.config.intent_port intent['intent_url'] = controller.config.intent_url self.intents[intent['intent_id']] = intent self.log.debug("Generated Intent: %s", intent) return intent def generate_remove_intent(self, fuzzer): if self.remove_intent_rate > 0: assert check_capability(fuzzer.topology.controllers_manager, 'can_remove_intent') events = [] for intent_id, intent in self.intents.iteritems(): if fuzzer.random.random() < self.remove_intent_rate: event = RemoveIntent(cid=intent['cid'], intent_id=intent['intent_id'], intent_ip=intent['intent_ip'], intent_port=intent['intent_port'], intent_url=intent['intent_url']) events.append(event) return events def generate_add_intent(self, fuzzer): assert check_capability(fuzzer.topology.controllers_manager, 'can_add_intent') events = [] if fuzzer.random.random() < self.add_intent_rate: intent = self._generate_intent(fuzzer) if intent is None: return events event = AddIntent(**intent) events.append(event) if self.bidir: reverse_intent = intent.copy() reverse_intent['intent_id'] = str(IntentsGenerator._intent_id.next()) reverse_intent['dst_dpid'] = intent['src_dpid'] reverse_intent['src_dpid'] = intent['dst_dpid'] reverse_intent['dst_port'] = intent['src_port'] reverse_intent['src_port'] = intent['dst_port'] reverse_intent['dst_mac'] = intent['src_mac'] reverse_intent['src_mac'] = intent['dst_mac'] self.intents[reverse_intent['intent_id']] = reverse_intent events.append(AddIntent(**reverse_intent)) return events def fuzz_ping(self, fuzzer): events = [] for _, intent in self.intents.iteritems(): src_host = [h for h in fuzzer.topology.hosts_manager.live_hosts if str(h.interfaces[0].hw_addr) == intent['src_mac']][0] dst_host = [h for h in fuzzer.topology.hosts_manager.live_hosts if str(h.interfaces[0].hw_addr) == intent['dst_mac']][0] if fuzzer.random.random() < self.ping_rate: events.append(PingEvent(src_host_id=src_host.name, dst_host_id=dst_host.name)) return events def generate_events(self, fuzzer): events = [] events.extend(self.fuzz_ping(fuzzer)) events.extend(self.generate_remove_intent(fuzzer)) events.extend(self.generate_add_intent(fuzzer)) return events class Fuzzer(object): def __init__(self, topology, params, random_seed=None, initialization_rounds=0, policy_generator=None): """ Args: - topology: sts.topology.base.Topology instance to fuzz on. - params: FuzzerParams instance - random_seed: optionally set the seed of the random number generator - initialization_rounds: if non-zero, will wait the specified rounds to let the controller discover the topology before injecting inputs. """ self.log = logging.getLogger(__name__ + '.Fuzzer') if random_seed is None: random_seed = random.randint(0, sys.maxint) self.random_seed = random_seed self.random = random.Random(random_seed) self.topology = topology self.traffic_generator = TrafficGenerator(self.random) self.traffic_generator.set_topology(self.topology) self.params = params self.logical_time = 0 self.initialization_rounds = initialization_rounds self.policy_generator = policy_generator self.blocked_controller_pairs = [] @property def unblocked_controller_pairs(self): c_mgm = self.topology.controllers_manager sorted_controllers = sorted(c_mgm.live_controllers, key=lambda c: c.cid) unblocked_pairs = [] for i in xrange(0, len(sorted_controllers)): for j in xrange(i + 1, len(sorted_controllers)): c1 = sorted_controllers[i] c2 = sorted_controllers[j] unblocked_pairs.append((c1.cid, c2.cid)) return unblocked_pairs def sever_network_links(self): """ Returns list of LinkFailure events. """ if self.params.link_failure_rate > 0: assert self.topology.patch_panel.capabilities.can_sever_network_link events = [] for link in self.topology.patch_panel.live_network_links: if self.random.random() < self.params.link_failure_rate: if hasattr(link, 'node1'): event = LinkFailure(link.node1.dpid, link.port1.port_no, link.node2.dpid, link.port2.port_no) else: event = LinkFailure(link.start_node.dpid, link.start_port.port_no, link.end_node.dpid, link.end_port.port_no) self.log.debug("Generated LinkFailure event: %s", event) events.append(event) return events def repair_network_links(self): """ Returns set of links to be failed. """ if self.params.link_recovery_rate > 0: assert self.topology.patch_panel.capabilities.can_repair_network_link events = [] for link in self.topology.patch_panel.cut_network_links: if self.random.random() < self.params.link_recovery_rate: if hasattr(link, 'node1'): event = LinkRecovery(link.node1.dpid, link.port1.port_no, link.node2.dpid, link.port2.port_no) else: event = LinkRecovery(link.start_node.dpid, link.start_port.port_no, link.end_node.dpid, link.end_port.port_no) self.log.debug("Generated RepairLink event: %s", event) events.append(event) return events def crash_switches(self): if self.params.switch_failure_rate > 0: assert self.topology.switches_manager.capabilities.can_crash_switch events = [] for software_switch in self.topology.switches_manager.live_switches: if self.random.random() < self.params.switch_failure_rate: event = SwitchFailure(software_switch.dpid) self.log.debug("Generated SwitchFailure event: %s", event) events.append(event) return events def recover_switches(self): if self.params.switch_recovery_rate > 0: assert self.topology.switches_manager.capabilities.can_recover_switch events = [] for software_switch in self.topology.switches_manager.failed_switches: if self.random.random() < self.params.switch_recovery_rate: event = SwitchRecovery(software_switch.dpid) self.log.debug("Generated SwitchFailure event: %s", event) events.append(event) return events def crash_controllers(self): if self.params.controller_crash_rate > 0: assert self.topology.controllers_manager.capabilities.can_crash_controller events = [] for controller in self.topology.controllers_manager.live_controllers: if self.random.random() < self.params.controller_crash_rate: events.append(ControllerFailure(controller.cid)) return events def recover_controllers(self): if self.params.controller_recovery_rate > 0: assert self.topology.controllers_manager.capabilities.can_recover_controller events = [] for controller in self.topology.controllers_manager.failed_controllers: if self.random.random() < self.params.controller_recovery_rate: event = ControllerRecovery(controller.cid) self.log.debug("Generated ControllerRecovery event: %s", event) events.append(event) return events def fuzz_traffic(self): events = [] for host in self.topology.hosts_manager.live_hosts: if self.random.random() < self.params.traffic_generation_rate: if len(host.interfaces) > 0: traffic_type = "icmp_ping" (dp_event, send) = self.traffic_generator.generate(traffic_type, host, send_to_self=True) event = TrafficInjection(dp_event=dp_event) self.log.debug("Generated TrafficInjection event: %s", event) events.append(event) return events def block_controllers(self): events = [] if self.params.controller_recovery_rate > 0: assert self.topology.controllers_manager.capabilities.can_block_peers if (len(self.unblocked_controller_pairs) > 0 and self.random.random() < self.params.block_controllers_rate): cid1, cid2 = self.random.choice(self.unblocked_controller_pairs) self.unblocked_controller_pairs.remove((cid1, cid2)) self.blocked_controller_pairs.append((cid1, cid2)) event = BlockControllerPair(cid1, cid2) self.log.debug("Generated BlockControllerPair event: %s", event) events.append(event) return events def unblock_controllers(self): events = [] if self.params.controller_recovery_rate > 0: assert self.topology.controllers_manager.capabilities.can_unblock_peers if (len(self.blocked_controller_pairs) > 0 and self.random.random() < self.params.unblock_controllers_rate): cid1, cid2 = self.random.choice(self.blocked_controller_pairs) self.blocked_controller_pairs.remove((cid1, cid2)) self.unblocked_controller_pairs.append((cid1, cid2)) event = UnblockControllerPair(cid1, cid2) self.log.debug("Generated UnblockControllerPair event: %s", event) events.append(event) return events def next_events(self, logical_time): events = [] self.logical_time = logical_time if self.logical_time < self.initialization_rounds: self.log.info("Still in initialization round, not generating " "any event: %s", self.logical_time) return [NOPInput(prunable=False)] #events.extend(self.check_dataplane()) events.extend(self.crash_switches()) events.extend(self.recover_switches()) events.extend(self.sever_network_links()) events.extend(self.repair_network_links()) if self.params.policy_change_rate > 0: events.extend(self.policy_generator.generate_events(self)) #events.extend(self.fuzz_traffic()) #events.extend(self.fuzz_ping()) events.extend(self.crash_controllers()) events.extend(self.recover_controllers()) events.extend(self.block_controllers()) events.extend(self.unblock_controllers()) return events """ #self.check_dataplane() self.check_tcp_connections() self.check_pending_messages() self.check_pending_commands() #self.check_switch_crashes() #self.check_link_failures() #self.fuzz_traffic() #self.check_controllers() self.check_migrations() #self.check_intracontroller_blocks() """ ``` #### File: sts/dataplane_traces/trace.py ```python import pickle from pox.lib.util import assert_type from pox.lib.packet.ethernet import * from sts.entities import HostInterface import base64 import logging log = logging.getLogger("dataplane_trace") class DataplaneEvent (object): ''' Encapsulates a packet injected at a (switch.dpid, port) pair in the network Used for trace generation or replay debugging ''' def __init__ (self, interface, packet): assert_type("interface", interface, HostInterface, none_ok=False) assert_type("packet", packet, ethernet, none_ok=False) self.interface = interface self.packet = packet def to_json(self): json_safe_packet = base64.b64encode(self.packet.pack()).replace("\n", "") return {'interface' : self.interface.to_json(), 'packet' : json_safe_packet} @staticmethod def from_json(json_hash): interface = HostInterface.from_json(json_hash['interface']) raw = base64.b64decode(json_hash['packet']) packet = ethernet(raw=raw) return DataplaneEvent(interface, packet) def __repr__(self): return "Interface:%s Packet:%s" % (str(self.interface), str(self.packet)) class Trace(object): '''Encapsulates a sequence of dataplane events to inject into a simulated network.''' def __init__(self, tracefile_path, topology=None): with file(tracefile_path, 'r') as tracefile: self.dataplane_trace = pickle.load(tracefile) if topology is not None: # Hashmap used to inject packets from the dataplane_trace self.interface2host = { interface: host for host in topology.hosts for interface in host.interfaces } self._type_check_dataplane_trace() def _type_check_dataplane_trace(self): for dp_event in self.dataplane_trace: if dp_event.interface not in self.interface2host: raise RuntimeError("Dataplane trace does not type check (%s)" % str(dp_event.interface)) def peek(self): if len(self.dataplane_trace) == 0: log.warn("No more trace inputs to inject!") return (None, None) dp_event = self.dataplane_trace[0] host = self.interface2host[dp_event.interface] return (dp_event, host) def inject_trace_event(self): if len(self.dataplane_trace) == 0: log.warn("No more trace inputs to inject!") return else: log.info("Injecting trace input") dp_event = self.dataplane_trace.pop(0) if dp_event.interface not in self.interface2host: log.warn("Interface %s not present" % str(dp_event.interface)) return host = self.interface2host[dp_event.interface] host.send(dp_event.interface, dp_event.packet) return (dp_event, host) ``` #### File: sts/entities/controllers.py ```python import abc import logging import os import re from pox.lib.util import connect_socket_with_backoff from pox.lib.util import parse_openflow_uri from sts.util.procutils import popen_filtered, kill_procs from sts.util.console import msg from sts.util.network_namespace import launch_namespace from sts.util.network_namespace import bind_pcap from sts.util.convenience import IPAddressSpace from sts.util.convenience import deprecated from sts.entities.base import SSHEntity from sts.entities.sts_entities import SnapshotPopen class ControllerState(object): """ Represents different states of a controller TODO: use enum package """ ALIVE = 0 STARTING = 1 DEAD = 2 class ControllerConfig(object): """ Initial controller's configuration state. Most configuration are static at the this stage, it's better to put dynamic aspects of configurations (like finding free port) somewhere else. See: config.experiment_config_lib.py for dynamic configuration. Args: start_cmd: command that starts the controller followed by a list of command line tokens as arguments. You may make use of two macros: __address__ expands to the address given in this constructor and __port__ expands to the port given in this constructor. kill_cmd: command that kills the controller followed by a list of command line tokens as arguments restart_cmd: define this if restart is not as simple as kill then start or to implement a soft restart command. address, port: controller socket info for listening to OpenFlow connections from switches. address may be specified as "auto" to automatically find a non-localhost IP address in the range 192.168.1.0/24, or "__address__" to use get_address_cmd to choose an address. sync: A URI where this controller should listen for a STSSyncProto connection. Example: "tcp:localhost:18899" cwd: the working directory for the controller cid: controller unique ID label: controller human readable label """ def __init__(self, start_cmd, kill_cmd, restart_cmd=None, check_cmd=None, address="127.0.0.1", port=6633, sync=None, cwd=None, cid=None, label=None): self._start_cmd = start_cmd self._kill_cmd = kill_cmd self._restart_cmd = restart_cmd self._check_cmd = check_cmd self._address = address self._port = port self._sync = sync self._cwd = cwd self._cid = cid self._label = label if self._label is None and self._cid is not None: self._label = "Controller(%s)" % self._cid @property def address(self): """The address of the controller""" return self._address @property def port(self): """The port (Openflow) that the controller is listening to""" return self._port @property def cid(self): """Controller's unique ID""" return self._cid @property def label(self): """Controller's human readable label.""" return self._label @property def start_cmd(self): """ The command to start the controller The specific format of the command is dependent on the controller type """ return self._start_cmd @property def kill_cmd(self): """ The command to kill the controller The specific format of the command is dependent on the controller type """ return self._kill_cmd @property def restart_cmd(self): """ The command to restart the controller The specific format of the command is dependent on the controller type """ return self._restart_cmd @property def check_cmd(self): """ The unix (bash) command to check the status the controller The specific format of the command is dependent on the controller type """ return self._check_cmd @property def expanded_start_cmd(self): """Start command with substituted variables and arguments as a list""" return self._expand_vars(self.start_cmd).split() @property def expanded_kill_cmd(self): """Kill command with substituted variables and arguments as a list""" return self._expand_vars(self.kill_cmd).split() @property def expanded_restart_cmd(self): """Restart command with substituted variables and arguments as a list""" return self._expand_vars(self.restart_cmd).split() @property def expanded_check_cmd(self): """ Check status command with substituted variables and arguments as a list """ return self._expand_vars(self.check_cmd).split() @property def sync(self): return self._sync @property def cwd(self): """Controller's working directory""" return self._cwd def _expand_vars(self, cmd): """ Utility method to substitute variables in strings. Looks for variables of the form __NAME__ in the string and then replace it with local attributes of the same name (if any). """ if cmd is None: return None for cstr in re.findall("__[^_]*__", cmd): attr = cstr.strip("__") if hasattr(self, attr) and getattr(self, attr, None) is not None: cmd = cmd.replace(cstr, str(getattr(self, attr))) return cmd class ControllerAbstractClass(object): """ Controller Representation """ __metaclass__ = abc.ABCMeta def __init__(self, controller_config, sync_connection_manager=None, snapshot_service=None): """ Init a controller entity Args: controller_config: controller specific configuration object sync_connection_manager: ??? snapshot_service: a SnapshotService instance (sts/snapshot.py) for checking extracting the controller's current view of the network state. """ self._config = controller_config self._state = ControllerState.DEAD self._sync_connection_manager = sync_connection_manager self._snapshot_service = snapshot_service @property def config(self): """Controller specific configuration object""" return self._config @property def label(self): """Human readable label for the controller""" return self.config.label @property def cid(self): """Controller unique ID""" return self.config.cid @property def state(self): """ The current controller state. See: ControllerState """ return self._state @state.setter def state(self, value): self._state = value @property def snapshot_service(self): return self._snapshot_service @property def sync_connection_manager(self): return self._sync_connection_manager @abc.abstractproperty def is_remote(self): """ Returns True if the controller is running on a different host that sts """ raise NotImplementedError() @abc.abstractproperty def blocked_peers(self): """Return a list of blocked peer controllers (if any)""" raise NotImplementedError() @abc.abstractmethod def start(self, multiplex_sockets=False): """Starts the controller""" raise NotImplementedError() @abc.abstractmethod def block_peer(self, peer_controller): """Ignore traffic to/from the given peer controller """ raise NotImplementedError() @abc.abstractmethod def unblock_peer(self, peer_controller): """Stop ignoring traffic to/from the given peer controller""" raise NotImplementedError() @abc.abstractmethod def check_status(self, simulation): """ Check whether the actual status of the controller coincides with self.state Returns a tuple of ControllerStatus and message entailing the details of the status. """ raise NotImplementedError() class Controller(ControllerAbstractClass): """ Encapsulates the state of a running controller This is a basic implementation, not intended to work with a real controller """ # set of processes that are currently running. # These are all killed upon signal reception _active_processes = set() def _register_proc(self, proc): """ Register a Popen instance that a controller is running in for the cleanup that happens when the simulator receives a signal. This method is idempotent """ self._active_processes.add(proc) def _unregister_proc(self, proc): """ Remove a process from the set of this to be killed when a signal is received. This is for use when the Controller process is stopped. This method is idempotent """ self._active_processes.discard(proc) def __del__(self): # if it fails in __init__, process may not have been assigned if hasattr(self, 'process') and self.process is not None: if self.process.poll(): # don't let this happen for shutdown self._unregister_proc(self.process) else: self.kill() # make sure it is killed if this was started errantly def __init__(self, controller_config, sync_connection_manager=None, snapshot_service=None): """ idx is the unique index for the controller used mostly for logging purposes """ super(Controller, self).__init__(controller_config, sync_connection_manager, snapshot_service) self.process = None self.sync_connection = None self.log = logging.getLogger("Controller") # For network namespaces only: self.guest_eth_addr = None self.host_device = None self.welcome_msg = " =====> Starting Controller <===== " self.snapshot_socket = None @property @deprecated def remote(self): """ Returns True if the controller is listening to some something other than localhost """ return self.config.address != "127.0.0.1" and \ self.config.address != "localhost" @property def is_remote(self): return self.remote @property def pid(self): """ Return the PID of the Popen instance the controller was started with """ return self.process.pid if self.process else -1 @property def label(self): """ Return the label of this controller. See ControllerConfig for more details """ return self.config.label @property def cid(self): """ Return the id of this controller. See ControllerConfig for more details """ return self.config.cid def kill(self): """ Kill the process the controller is running in """ if self.state != ControllerState.ALIVE: self.log.warn("Killing controller %s when it is not alive!" % self.label) return msg.event("Killing controller %s (pid %d)" % (self.cid, self.pid)) kill_procs([self.process]) if self.config.kill_cmd not in ["", None]: self.log.info("Killing controller %s: %s" % ( self.label, " ".join(self.config.expanded_kill_cmd))) popen_filtered( "[%s]" % self.label, self.config.expanded_kill_cmd, self.config.cwd) self._unregister_proc(self.process) self.process = None self.state = ControllerState.DEAD def _bind_pcap(self, host_device): filter_string = "(not tcp port %d)" % self.config.port if self.config.sync is not None and self.config.sync != "": # TODO(cs): this is not quite correct. The *listen* port is sync_port, # but the sync data connection will go over over an ephermeral port. # Luckily this mistake is not fatal -- the kernel copies all # packets sent to the pcap, and we'll just drop the copied packets when # we realize we don't know where to route them. (_, _, sync_port) = parse_openflow_uri(self.config.sync) filter_string += " and (not tcp port %d)" % sync_port return bind_pcap(host_device, filter_string=filter_string) def _check_snapshot_connect(self): if getattr(self.config, "snapshot_address", None): # N.B. snapshot_socket is intended to be blocking self.log.debug("Connecting snapshot socket") self.snapshot_socket = connect_socket_with_backoff( address=self.config.snapshot_address) def start(self, multiplex_sockets=False): """ Start a new controller process based on the config's start_cmd attribute. Registers the Popen member variable for deletion upon a SIG* received in the simulator process """ self.log.info(self.welcome_msg) if self.state != ControllerState.DEAD: self.log.warn("Starting controller %s when it is not dead!" % self.label) return if self.config.start_cmd == "": raise RuntimeError( "No command found to start controller %s!" % self.label) self.log.info( "Launching controller %s: %s" % ( self.label, " ".join(self.config.expanded_start_cmd))) # These configurations are specific to controllers launched in namespaces # Probably it should be factored somewhere else launch_in_network_namespace = getattr(self.config, 'launch_in_network_namespace', None) if launch_in_network_namespace: unclaimed_address = IPAddressSpace.find_unclaimed_address( ip_prefix=self.config.address) (self.process, self.guest_eth_addr, self.host_device) = \ launch_namespace(" ".join(self.config.expanded_start_cmd), self.config.address, self.cid, host_ip_addr_str=unclaimed_address) else: self.process = popen_filtered("[%s]" % self.label, self.config.expanded_start_cmd, self.config.cwd) self._register_proc(self.process) self._check_snapshot_connect() self.state = ControllerState.ALIVE def restart(self): """ Restart the controller """ if self.state != ControllerState.DEAD: self.log.warn( "Restarting controller %s when it is not dead!" % self.label) return self.start() def check_status(self, simulation): """ Returns the actual status of the controller. """ if not self.process: return ControllerState.DEAD rc = self.process.poll() if rc is not None: return ControllerState.DEAD return ControllerState.ALIVE def block_peer(self, peer_controller): """Ignore traffic to/from the given peer controller""" raise NotImplementedError("Peer blocking not yet supported") def unblock_peer(self, peer_controller): """Stop ignoring traffic to/from the given peer controller""" raise NotImplementedError("Peer blocking not yet supported") @property def blocked_peers(self): raise NotImplementedError() def snapshot(self): """ Causes the controller to fork() a (suspended) copy of itself. """ self.log.info("Initiating snapshot") self.snapshot_socket.send("SNAPSHOT") def snapshot_proceed(self): """ Tell the previously fork()ed controller process to wake up, and connect a new socket to the fork()ed controller's (OpenFlow) port. Also de-registers the old controller process and registers the new controller process. Note that it is the responsibility of the caller to kill the previously fork()ed controller process. This method may block if the fork()ed process is not ready to proceed. Returns: a new socket connected to the woken controller. Pre: snapshot() has been invoked """ self.log.info("Initiating snapshot proceed") # Check that the fork()ed controller is ready self.log.debug("Checking READY") # N.B. snapshot_socket is blocking response = self.snapshot_socket.recv(100) match = re.match(r"READY (?P<pid>\d+)", response) if not match: raise ValueError("Unknown response %s" % response) pid = int(match.group('pid')) # De-registers the old controller process and registers the new controller # process. self._unregister_proc(self.process) self.process = SnapshotPopen(pid) self._register_proc(self.process) # Send PROCEED self.log.debug("Sending PROCEED") self.snapshot_socket.send("PROCEED") # Reconnect self.log.debug("Connecting new mux socket") true_socket = connect_socket_with_backoff(address=self.config.address, port=self.config.port) true_socket.setblocking(0) self.log.debug("Finished snapshot proceed") return true_socket class POXController(Controller): """ N.B. controller-specific configuration is optional. The purpose of this class is to load POX's syncproto module, which helps us reduce non-determinism in POX. """ def __init__(self, controller_config, sync_connection_manager=None, snapshot_service=None): """ Controller Configs (in addition to the options defined in ControllerConfig) - launch_in_network_namespace: if true new network namespace is created """ super(POXController, self).__init__( controller_config, sync_connection_manager, snapshot_service) self.welcome_msg = " =====> Starting POX Controller <===== " def start(self, multiplex_sockets=False): """ Start a new POX controller process based on the config's start_cmd attribute. Registers the Popen member variable for deletion upon a SIG* received in the simulator process """ self.log.info(self.welcome_msg) if self.state != ControllerState.DEAD: self.log.warn( "Starting controller %s when controller is not dead!" % self.label) return msg.event("Starting POX controller %s" % (str(self.cid))) env = None if self.config.sync: # If a sync connection has been configured in the controller conf # launch the controller with environment variable 'sts_sync' set # to the appropriate listening port. This is quite a hack. env = os.environ.copy() port_match = re.search(r':(\d+)$', self.config.sync) if port_match is None: raise ValueError("sync: cannot find port in %s" % self.config.sync) port = port_match.group(1) env['sts_sync'] = "ptcp:0.0.0.0:%d" % (int(port),) if self.config.sync or multiplex_sockets: src_dir = os.path.join(os.path.dirname(__file__), "../../") pox_ext_dir = os.path.join(self.config.cwd, "ext") if os.path.exists(pox_ext_dir): for f in ("sts/util/io_master.py", "sts/syncproto/base.py", "sts/syncproto/pox_syncer.py", "sts/__init__.py", "sts/util/socket_mux/__init__.py", "sts/util/socket_mux/pox_monkeypatcher.py", "sts/util/socket_mux/base.py", "sts/util/socket_mux/server_socket_multiplexer.py"): src_path = os.path.join(src_dir, f) if not os.path.exists(src_path): raise ValueError( "Integrity violation: sts sync source path %s (abs: %s) " "does not exist" % (src_path, os.path.abspath(src_path))) dst_path = os.path.join(pox_ext_dir, f) dst_dir = os.path.dirname(dst_path) init_py = os.path.join(dst_dir, "__init__.py") if not os.path.exists(dst_dir): os.makedirs(dst_dir) if not os.path.exists(init_py): open(init_py, "a").close() if os.path.islink(dst_path): # Remove symlink and recreate os.remove(dst_path) if not os.path.exists(dst_path): rel_link = os.path.abspath(src_path) self.log.debug("Creating symlink %s -> %s", rel_link, dst_path) os.symlink(rel_link, dst_path) else: self.log.warn("Could not find pox ext dir in %s. " + "Cannot check/link in sync module" % pox_ext_dir) if self.config.start_cmd in ["", None]: raise RuntimeError( "No command found to start controller %s!" % self.label) start_cmd = getattr(self.config, "expanded_start_cmd", self.config.start_cmd) self.log.info( "Launching controller %s: %s" % (self.label, " ".join(start_cmd))) launch_in_network_namespace = getattr(self.config, "launch_in_network_namespace", False) if launch_in_network_namespace: (self.process, self.guest_eth_addr, self.host_device) = \ launch_namespace( " ".join(start_cmd), self.config.address, self.cid, host_ip_addr_str=IPAddressSpace.find_unclaimed_address( ip_prefix=self.config.address), cwd=self.config.cwd, env=env) else: self.process = popen_filtered("[%s]" % self.label, start_cmd, self.config.cwd, env) self._register_proc(self.process) if self.config.sync: self.sync_connection = self.sync_connection_manager.connect( self, self.config.sync) self._check_snapshot_connect() self.state = ControllerState.ALIVE def __repr__(self): return "POXController: %s (%s, %s)" % ( self.label, self.config.address, self.config.port) def __str__(self): return self.label class VMController(Controller): """Controllers that are run in virtual machines rather than processes""" __metaclass__ = abc.ABCMeta def __init__(self, controller_config, cmd_executor=None, sync_connection_manager=None, snapshot_service=None, username=None, password=None): """ Args: controller_config: see ControllerConfig cmd_executer: a class that has execute_command method. If not specified SSHEntity will be used See SSHEntity and LocalEntity username: overrides the username specified in controller_config (if any) password: overrides the password specified in controller_config (if any) """ Controller.__init__(self, controller_config, sync_connection_manager, snapshot_service) self.username = username self.password = password if self.username is None and hasattr(self.config, 'username'): self.username = self.config.username if self.password is None and hasattr(self.config, 'password'): self.password = self.config.password self.cmd_executor = cmd_executor if self.cmd_executor is None and hasattr(self.config, 'cmd_executor'): self.cmd_executor = self.config.cmd_executer if self.cmd_executor is None: key_filename = getattr(self.config, 'key_filename', None) self.cmd_executor = SSHEntity(controller_config.address, username=self.username, password=<PASSWORD>, key_filename=key_filename, cwd=getattr(self.config, "cwd", None), redirect_output=True, block=True) assert hasattr(self.cmd_executor, "execute_command") self.commands = {} self.populate_commands() self.welcome_msg = " =====> Starting VM Controller <===== " self.alive_status_string = "" # subclass dependent def populate_commands(self): if self.config.start_cmd == "": raise RuntimeError( "No command found to start controller %s!" % self.label) if self.config.kill_cmd == "": raise RuntimeError( "No command found to kill controller %s!" % self.label) if self.config.restart_cmd == "": raise RuntimeError( "No command found to restart controller %s!" % self.label) self.commands["start"] = " ".join(self.config.expanded_start_cmd) self.commands["kill"] = " ".join(self.config.expanded_kill_cmd) self.commands["restart"] = " ".join(self.config.expanded_restart_cmd) if hasattr(self.config, "expanded_check_cmd"): self.commands["check"] = " ".join(self.config.expanded_check_cmd) else: self.commands["check"] = getattr(self.config, "check_cmd", "") def kill(self): if self.state != ControllerState.ALIVE: self.log.warn( "Killing controller %s when controller is not alive!" % self.label) return kill_cmd = self.commands["kill"] self.log.info("Killing controller %s: %s" % (self.label, kill_cmd)) self.cmd_executor.execute_command(kill_cmd) self.state = ControllerState.DEAD def start(self, multiplex_sockets=False): self.log.info(self.welcome_msg) if self.state != ControllerState.DEAD: self.log.warn( "Starting controller %s when controller is not dead!" % self.label) return start_cmd = self.commands["start"] self.log.info("Launching controller %s: %s" % (self.label, start_cmd)) ret = self.cmd_executor.execute_command(start_cmd) if ret is not None: self.log.info(ret) self.state = ControllerState.ALIVE def restart(self): if self.state != ControllerState.DEAD: self.log.warn( "Restarting controller %s when controller is not dead!" % self.label) return restart_cmd = self.commands["restart"] self.log.info("Relaunching controller %s: %s" % (self.label, restart_cmd)) self.cmd_executor.execute_command(restart_cmd) self.state = ControllerState.ALIVE # SSH into the VM to check on controller process def check_status(self, simulation): check_cmd = self.commands["check"] self.log.info( "Checking status of controller %s: %s" % (self.label, check_cmd)) # By make sure the status cmd will return status rather than # printing it out on stdout. If the executor has redirect_output # set to True there is not way to check the return status because it's # printed out. old_redirect_status = self.cmd_executor.redirect_output self.cmd_executor.redirect_output = False # Execute the status command remote_status = self.cmd_executor.execute_command(check_cmd) # Restore to the old redirect status self.cmd_executor.redirect_output = old_redirect_status actual_state = ControllerState.DEAD # Alive means remote controller process exists if self.alive_status_string in remote_status: actual_state = ControllerState.ALIVE if (self.state == ControllerState.DEAD and actual_state == ControllerState.ALIVE): self.log.warn("%s is dead, but controller process found!" % self.label) self.state = ControllerState.ALIVE if (self.state == ControllerState.ALIVE and actual_state == ControllerState.DEAD): return (False, "Alive, but no controller process found!") return (True, "OK") def block_peer(self, peer_controller): for chain in ['INPUT', 'OUTPUT']: check_block_cmd = "sudo iptables -L %s | grep \"DROP.*%s\"" % ( chain, peer_controller.config.address) add_block_cmd = "sudo iptables -I %s 1 -s %s -j DROP" % ( chain, peer_controller.config.address) # If already blocked, do nothing if self.cmd_executor.execute_command(check_block_cmd) != "": continue self.cmd_executor.execute_command(add_block_cmd) def unblock_peer(self, peer_controller): for chain in ['INPUT', 'OUTPUT']: check_block_cmd = "sudo iptables -L %s | grep \"DROP.*%s\"" % ( chain, peer_controller.config.address) remove_block_cmd = "sudo iptables -D %s -s %s -j DROP" % ( chain, peer_controller.config.address) max_iterations = 10 while max_iterations > 0: # If already unblocked, do nothing if self.cmd_executor.execute_command(check_block_cmd) == "": break self.cmd_executor.execute_command(remove_block_cmd) max_iterations -= 1 class BigSwitchController(VMController): """BigSwitch Controller specific wrapper""" def __init__(self, controller_config, sync_connection_manager=None, snapshot_service=None, username="root", password=""): super(BigSwitchController, self).__init__(controller_config, sync_connection_manager, snapshot_service, username=username, password=password) self.welcome_msg = " =====> Starting BigSwitch Controller <===== " self.alive_status_string = "start/running" def populate_commands(self): if self.config.start_cmd == "": raise RuntimeError( "No command found to start controller %s!" % self.label) self.commands["start"] = " ".join(self.config.expanded_start_cmd) self.commands["kill"] = "service floodlight stop" self.commands["restart"] = "service floodlight start; initctl stop bscmon" self.commands["check"] = "service floodlight status" def start(self, multiplex_sockets=False): super(BigSwitchController, self).start() self.cmd_executor.execute_command(self.commands["restart"]) def kill(self): if self.state != ControllerState.ALIVE: self.log.warn( "Killing controller %s when controller is not alive!" % self.label) return kill_cmd = self.commands["kill"] self.log.info("Killing controller %s: %s" % (self.label, kill_cmd)) self.cmd_executor.execute_command(kill_cmd) self.state = ControllerState.DEAD def restart(self): if self.state != ControllerState.DEAD: self.log.warn( "Restarting controller %s when controller is not dead!" % self.label) return restart_cmd = self.commands["restart"] self.log.info("Relaunching controller %s: %s" % (self.label, restart_cmd)) self.cmd_executor.execute_command(restart_cmd) self.state = ControllerState.STARTING class ONOSController(VMController): """ONOS Specific wrapper""" def __init__(self, controller_config, cmd_executor=None, sync_connection_manager=None, snapshot_service=None, username="mininet", password="<PASSWORD>"): if not getattr(controller_config, "check_cmd", None): controller_config.check_cmd = "./start-onos.sh status" super(ONOSController, self).__init__(controller_config, cmd_executor, sync_connection_manager, snapshot_service, username=username, password=password) self.welcome_msg = " =====> Starting ONOS Controller <===== " self.alive_status_string = "1 instance of onos running" ``` #### File: sts/sts/event_dag.py ```python from sts.fingerprints.messages import * from sts.replay_event import * import logging import time from collections import defaultdict log = logging.getLogger("event_dag") def split_list(l, split_ways): ''' Split our inputs into split_ways separate lists ''' if split_ways < 1: raise ValueError("Split ways must be greater than 0") splits = [] split_interval = len(l) / split_ways # integer division = floor remainder = len(l) % split_ways # remainder is guaranteed to be less than splitways start_idx = 0 while len(splits) < split_ways: split_idx = start_idx + split_interval # the first 'remainder' chunks are made one element larger to chew # up the remaining elements (remainder < splitways) # note: len(l) = split_ways * split_interval + remainder if remainder > 0: split_idx += 1 remainder -= 1 splits.append(l[start_idx:split_idx]) start_idx = split_idx return splits class AtomicInput(object): def __init__(self, failure, recoveries): self.failure = failure self.recoveries = recoveries @property def label(self): return "a(%s,%s)" % (self.failure.label, [ r.label for r in self.recoveries ]) def __repr__(self): return "AtomicInput:%r%r" % (self.failure, self.recoveries) class EventDagView(object): def __init__(self, parent, events_list): ''' subset is a list ''' self._parent = parent self._events_list = list(events_list) self._events_set = set(self._events_list) @property def events(self): '''Return the events in the DAG''' return self._events_list @property def input_events(self): # TODO(cs): memoize? return [ e for e in self._events_list if isinstance(e, InputEvent) and e.prunable ] @property def atomic_input_events(self): return self._parent._atomic_input_events(self.input_events) def input_subset(self, subset): '''pre: subset must be a subset of only this view''' return self._parent.input_subset(subset) def atomic_input_subset(self, subset): '''pre: subset must be a subset of only this view''' return self._parent.atomic_input_subset(subset) def input_complement(self, subset): return self._parent.input_complement(subset, self._events_list) def insert_atomic_inputs(self, inputs): return self._parent.insert_atomic_inputs(inputs, events_list=self._events_list) def add_inputs(self, inputs): return self._parent.add_inputs(inputs, self._events_list) def next_state_change(self, index): return self._parent.next_state_change(index, events=self.events) def get_original_index_for_event(self, event): return self._parent.get_original_index_for_event(event) def get_last_invariant_violation(self): return self._parent.get_last_invariant_violation() def set_events_as_timed_out(self, timed_out_event_labels): return self._parent.set_events_as_timed_out(timed_out_event_labels) def filter_timeouts(self): return self._parent.filter_timeouts(events_list=self._events_list) def __len__(self): return len(self._events_list) # TODO(cs): move these somewhere else def migrations_per_host(events): host2migrations = defaultdict(list) for e in events: if type(e) == HostMigration: host2migrations[e.host_id].append(e) return host2migrations def replace_migration(replacee, old_location, new_location, event_list): # `replacee' is the migration to be replaced # Don't mutate replacee -- instead, replace it new_migration = HostMigration(old_location[0], old_location[1], new_location[0], new_location[1], host_id=replacee.host_id, time=replacee.time, label=replacee.label) # TODO(cs): O(n^2) index = event_list.index(replacee) event_list[index] = new_migration return new_migration class EventDag(object): '''A collection of Event objects. EventDags are primarily used to present a view of the underlying events with some subset of the input events pruned ''' # We peek ahead this many seconds after the timestamp of the subseqeunt # event # TODO(cs): be smarter about this -- peek() too far, and peek()'ing not far # enough can both have negative consequences _peek_seconds = 0.3 # If we prune a failure, make sure that the subsequent # recovery doesn't occur _failure_types = set([SwitchFailure, LinkFailure, ControllerFailure, ControlChannelBlock, BlockControllerPair]) # NOTE: we treat failure/recovery as an atomic pair, since it doesn't make # much sense to prune a recovery event _recovery_types = set([SwitchRecovery, LinkRecovery, ControllerRecovery, ControlChannelUnblock, UnblockControllerPair]) # ignoring these input types _ignored_input_types = set([WaitTime]) def __init__(self, events, prefix_trie=None): '''events is a list of EventWatcher objects. Refer to log_parser.parse to see how this is assembled.''' # TODO(cs): ugly that the superclass has to keep track of # PeekingEventDag's data self._prefix_trie = prefix_trie self._events_list = events self._events_set = set(self._events_list) self._label2event = { event.label : event for event in self._events_list } self._event2idx = { event : i for i, event in enumerate(self._events_list) } # TODO(cs): this should be moved to a dag transformer class self._host2initial_location = { host : migrations[0].old_location for host, migrations in migrations_per_host(self._events_list).iteritems() } self._last_violation = None @property def events(self): '''Return the events in the DAG''' return self._events_list @property def input_events(self): # TODO(cs): memoize? return [ e for e in self._events_list if isinstance(e, InputEvent) and e.prunable ] @property def atomic_input_events(self): return self._atomic_input_events(self.input_events) def _get_event(self, label): if label not in self._label2event: raise ValueError("Unknown label %s" % str(label)) return self._label2event[label] def _atomic_input_events(self, inputs): # TODO(cs): memoize? skipped_recoveries = set() atomic_inputs = [] for e in inputs: if e in skipped_recoveries: continue if type(e) in self._failure_types and e.dependent_labels != []: recoveries = [] for label in e.dependent_labels: recovery = self._label2event[label] skipped_recoveries.add(recovery) recoveries.append(recovery) atomic_inputs.append(AtomicInput(e, recoveries)) else: atomic_inputs.append(e) return atomic_inputs def _expand_atomics(self, atomic_inputs): inputs = [] for e in atomic_inputs: if type(e) == AtomicInput: inputs.append(e.failure) for recovery in e.recoveries: inputs.append(recovery) else: inputs.append(e) inputs.sort(key=lambda e: self._event2idx[e]) return inputs def filter_unsupported_input_types(self): return EventDagView(self, (e for e in self._events_list if type(e) not in self._ignored_input_types)) def compute_remaining_input_events(self, ignored_portion, events_list=None): ''' ignore all input events in ignored_inputs, as well all of their dependent input events''' if events_list is None: events_list = self.events remaining = [] for event in events_list: if event not in ignored_portion: remaining.append(event) else: # Add dependent to ignored_portion for label in event.dependent_labels: # Note that recoveries will be a dependent of preceding failures dependent_event = self._label2event[label] ignored_portion.add(dependent_event) # Update the migration locations in remaining self.update_migrations(remaining, ignored_portion, events_list) return remaining def update_migrations(self, remaining, ignored_portion, events_list): ''' Walk through remaining input events, and update the source location of the host migration. For example, if one host migrates twice: location A -> location B -> location C And the first migration is pruned, update the second HostMigration event to look like: location A -> location C Note: mutates remaining ''' # TODO(cs): this should be moved outside of EventDag # TODO(cs): this algorithm could be simplified substantially by invoking # migrations_per_host() # keep track of the most recent location of the host that did not involve # a pruned HostMigration event # location is: (ingress dpid, ingress port no) currentloc2unprunedloc = {} for m in [e for e in events_list if type(e) == HostMigration]: src = m.old_location dst = m.new_location if m in ignored_portion: if src in currentloc2unprunedloc: # There was a prior migration in ignored_portion # Update the new dst to point back to the unpruned location unprunedlocation = currentloc2unprunedloc[src] del currentloc2unprunedloc[src] currentloc2unprunedloc[dst] = unprunedlocation else: # We are the first migration for this host in ignored_portion # Point to our tail currentloc2unprunedloc[dst] = src else: # m in remaining if src in currentloc2unprunedloc: # There was a prior migration in ignored_portion # Replace this HostMigration with a new one, with source at the # last unpruned location unpruned_loc = currentloc2unprunedloc[src] del currentloc2unprunedloc[src] new_loc = dst replace_migration(m, unpruned_loc, new_loc, remaining) def _ignored_except_internals_and_recoveries(self, ignored_portion): # Note that dependent_labels only contains dependencies between input # events. Dependencies with internal events are inferred by EventScheduler. # Also note that we treat failure/recovery as an atomic pair, so we don't prune # recovery events on their own. return set(e for e in ignored_portion if (isinstance(e, InputEvent) and e.prunable and type(e) not in self._recovery_types)) def _ignored_except_internals(self, ignored_portion): return set(e for e in ignored_portion if isinstance(e, InputEvent) and e.prunable) def input_subset(self, subset): ''' Return a view of the dag with only the subset and subset dependents remaining''' ignored = self._events_set - set(subset) ignored = self._ignored_except_internals_and_recoveries(ignored) remaining_events = self.compute_remaining_input_events(ignored) return EventDagView(self, remaining_events) def atomic_input_subset(self, subset): ''' Return a view of the dag with only the subset remaining, where dependent input pairs remain together''' # Relatively simple: expand atomic pairs into individual inputs, take # all input events in result, and compute_remaining_input_events as normal subset = self._expand_atomics(subset) ignored = self._events_set - set(subset) ignored = self._ignored_except_internals(ignored) remaining_events = self.compute_remaining_input_events(ignored) return EventDagView(self, remaining_events) def input_complement(self, subset, events_list=None): ''' Return a view of the dag with everything except the subset and subset dependencies''' subset = self._ignored_except_internals_and_recoveries(subset) remaining_events = self.compute_remaining_input_events(subset, events_list) return EventDagView(self, remaining_events) def _straighten_inserted_migrations(self, remaining_events): ''' This is a bit hairy: when migrations are added back in, there may be gaps in host locations. We need to straighten out those gaps -- i.e. make the series of host migrations for any given host a line. Pre: remaining_events is sorted in the same relative order as the original trace ''' host2migrations = migrations_per_host(remaining_events) for host, migrations in host2migrations.iteritems(): # Prime the loop with the initial location previous_location = self._host2initial_location[host] for m in migrations: if m.old_location != previous_location: replacement = replace_migration(m, previous_location, m.new_location, remaining_events) else: replacement = m previous_location = replacement.new_location return remaining_events def insert_atomic_inputs(self, atomic_inputs, events_list=None): '''Insert inputs into events_list in the same relative order as the original events list. This method is needed because set union as used in delta debugging does not make sense for event sequences (events are ordered)''' # Note: events_list should never be None (I think), since it does not make # sense to insert inputs into the original sequence that are already present if events_list is None: raise ValueError("Shouldn't be adding inputs to the original trace") inputs = self._expand_atomics(atomic_inputs) if not all(e in self._event2idx for e in inputs): raise ValueError("Not all inputs present in original events list %s" % [e for e in input if e not in self._event2idx]) if not all(e in self._event2idx for e in events_list): raise ValueError("Not all events in original events list %s" % [e for e in events_list if e not in self._event2idx]) result = [] for _, successor in enumerate(events_list): orig_successor_idx = self._event2idx[successor] while len(inputs) > 0 and orig_successor_idx > self._event2idx[inputs[0]]: # If the current successor did in fact come after the next input in the # original trace, insert next input here input = inputs.pop(0) result.append(input) result.append(successor) # Any remaining inputs should be appended at the end -- they had no # successors result += inputs # Deal with newly added host migrations result = self._straighten_inserted_migrations(result) return EventDagView(self, result) def mark_invalid_input_sequences(self): '''Fill in domain knowledge about valid input sequences (e.g. don't prune failure without pruning recovery.) Only do so if this isn't a view of a previously computed DAG''' # TODO(cs): should this be factored out? # Note: we treat each failure/recovery pair atomically, since it doesn't # make much sense to prune recovery events. Also note that that we will # never see two failures (for a particular node) in a row without an # interleaving recovery event. fingerprint2previousfailure = {} # NOTE: mutates the elements of self._events_list for event in self._events_list: if hasattr(event, 'fingerprint'): # Skip over the class name fingerprint = event.fingerprint[1:] if type(event) in self._failure_types: # Insert it into the previous failure hash fingerprint2previousfailure[fingerprint] = event elif type(event) in self._recovery_types: # Check if there were any failure predecessors if fingerprint in fingerprint2previousfailure: failure = fingerprint2previousfailure[fingerprint] failure.dependent_labels.append(event.label) #elif type(event) in self._ignored_input_types: # raise RuntimeError("No support for %s dependencies" % # type(event).__name__) def next_state_change(self, index, events=None): ''' Return the next ControllerStateChange that occurs at or after index.''' if events is None: events = self.events # TODO(cs): for now, assumes a single controller for event in events[index:]: if type(event) == ControllerStateChange: return event return None def get_original_index_for_event(self, event): return self._event2idx[event] def __len__(self): return len(self._events_list) def get_last_invariant_violation(self): if self._last_violation is not None: return self._last_violation for event in reversed(self._events_list): # Match on persistent violations in computing MCS if type(event) == InvariantViolation and event.persistent: self._last_violation = event return event return None def set_events_as_timed_out(self, timed_out_event_labels): for event in self._events_list: event.timed_out = False for label in timed_out_event_labels: self._get_event(label).timed_out = True def filter_timeouts(self, events_list=None): if events_list is None: events_list = self._events_list no_timeouts = [ e for e in events_list if not e.timed_out ] return EventDagView(self, no_timeouts) ``` #### File: sts/sts/__init__.py ```python import sys import os from datetime import date sys.path.append(os.path.join(os.path.dirname(__file__), "..", "pox")) sys.path.append(os.path.join(os.path.dirname(__file__), "hassel/hsa-python")) def check_sw_version(path, remote_branch): ''' Return whether the latest commit of the git repo located at the given path is the same as the remote repo's remote_branch. ''' def get_version(branch="HEAD"): return os.popen("git rev-parse %s" % branch).read() old_cwd = os.getcwd() try: os.chdir(path) if os.system("git fetch") != 0: raise IOError("Unable to fetch from origin for repo %s" % path) local_version = get_version() remote_version = get_version(branch=remote_branch) return local_version == remote_version except Exception as e: print >> sys.stderr, ('''Unable to check whether software versions are ''' '''up-to-date: %s''' % e) finally: os.chdir(old_cwd) def check_dependencies(): ''' Double check whether POX and Hassel are at the latest software versions. ''' print >> sys.stderr, "Checking software versions..." pox_path = os.path.join(os.path.dirname(__file__), "..", "pox") if not check_sw_version(pox_path, "remotes/origin/debugger"): print >> sys.stderr, ('''Warning: POX version not up-to-date. You should ''' '''probably run:\n $ (cd pox; git pull) ''') hassel_path = os.path.join(os.path.dirname(__file__), "hassel") if not os.path.exists(os.path.join(hassel_path, "LICENSE.txt")): print >> sys.stderr, "Warning: Hassel submodule not loaded." elif not check_sw_version(hassel_path, "remotes/origin/HEAD"): print >> sys.stderr, ('''Warning: Hassel version not up-to-date. You should ''' '''probably run:\n $ git submodule update ''') # We store the last date we checked software versions in sts/last-version-check. # The format of the file is: date.today().toordinal() timestamp_path = os.path.join(os.path.dirname(__file__), "last-version-check") def checked_recently(): ''' Return whether we have checked dependencies in the last day. ''' if not os.path.exists(timestamp_path): return False current_date = date.today() with open(timestamp_path) as timestamp_file: try: last_check_date = date.fromordinal(int(timestamp_file.read())) except: # Possible corruption. return False return last_check_date == current_date def write_new_timestamp(): with open(timestamp_path, "w") as timestamp_file: current_date = date.today() timestamp_file.write(str(current_date.toordinal())) # We only check dependencies once a day, since `git fetch` takes a fair amount of # time. if not checked_recently(): check_dependencies() write_new_timestamp() ``` #### File: sts/input_traces/log_parser.py ```python import json import sts.replay_event as event import logging log = logging.getLogger("superlog_parser") input_name_to_class = { klass.__name__ : klass for klass in event.all_input_events } internal_event_name_to_class = { klass.__name__ : klass for klass in event.all_internal_events } special_event_name_to_class = { klass.__name__ : klass for klass in event.all_special_events } def check_unique_label(event_label, existing_event_labels): '''Check to make sure that event_label is not in existing_event_labels. Throw an exception if this invariant does not hold. If the invariant does hold, add event_label to existing_event_labels.''' if event_label in existing_event_labels: raise RuntimeError("Event label %s already exists!" % event_label) existing_event_labels.add(event_label) def sanity_check_external_input_event(existing_event_labels, dependent_labels, json_hash): '''Takes an external event json hash and checks that no dependents have already occured. Raises an exception if any have, otherwise populates dependent_labels''' dependents = set(json_hash['dependent_labels']) # can't have dependents that have already happened! assert(dependents.isdisjoint(existing_event_labels)) dependent_labels.update(dependents) # External input events can be dependents too (e.g. link recoveries are # dependents of link failures) dependent_labels.discard(json_hash['label']) def sanity_check_internal_event(existing_event_labels, dependent_labels, json_hash): '''Takes an internal event json hash and removes it from the set of dependent labels that must be present before the end of the log. ''' dependent_labels.discard(json_hash['label']) def parse_path(logfile_path): '''Input: path to a logfile. Output: A list of all the internal and external events in the order in which they exist in the logfile. Each internal event is annotated with the set of source events that are necessary conditions for its occurence.''' with open(logfile_path) as logfile: return parse(logfile) def check_legacy_format(json_hash): if (hasattr(json_hash, 'controller_id') and type(json_hash.controller_id) == list): # TODO(cs): translate rather than throwing up raise ValueError("Legacy controller id. Should be a string label: %s" % json_hash.controller_id) if "logical_round" not in json_hash.keys(): # Insert a dummy logical_round number json_hash['logical_round'] = -1 def parse(logfile): '''Input: logfile. Output: A list of all the internal and external events in the order in which they exist in the logfile. Each internal event is annotated with the set of source events that are necessary conditions for its occurence.''' # the return value of the parsed log trace = [] # a set of all event labels event_labels = set() # dependent labels that must be present somewhere in the log. dependent_labels = set() for line in logfile: json_hash = json.loads(line.rstrip()) check_unique_label(json_hash['label'], event_labels) check_legacy_format(json_hash) if json_hash['class'] in input_name_to_class: sanity_check_external_input_event(event_labels, dependent_labels, json_hash) event = input_name_to_class[json_hash['class']].from_json(json_hash) elif json_hash['class'] in internal_event_name_to_class: sanity_check_internal_event(event_labels, dependent_labels, json_hash) event = internal_event_name_to_class[json_hash['class']].from_json(json_hash) elif json_hash['class'] in special_event_name_to_class: event = special_event_name_to_class[json_hash['class']].from_json(json_hash) else: print "Warning: Unknown class type %s" % json_hash['class'] continue trace.append(event) # all the foward dependencies should be satisfied! assert(len(dependent_labels) == 0) return trace ``` #### File: sts/sts/openflow_buffer.py ```python from collections import defaultdict, namedtuple from sts.fingerprints.messages import * from pox.lib.revent import Event, EventMixin from sts.syncproto.base import SyncTime from sts.util.convenience import base64_encode from sts.util.ordered_default_dict import OrderedDefaultDict import logging log = logging.getLogger("openflow_buffer") class PendingMessage(Event): def __init__(self, pending_message, b64_packet, event_time=None, send_event=False): # TODO(cs): boolean flag is ugly. Should use subclasses, but EventMixin # doesn't support addListener() on super/subclasses. super(PendingMessage, self).__init__() self.event_time = event_time if event_time else SyncTime.now() self.pending_message = pending_message self.b64_packet = b64_packet self.send_event = send_event class PendingQueue(object): '''Stores pending messages between switches and controllers''' ConnectionId = namedtuple('ConnectionId', ['dpid', 'controller_id']) def __init__(self): # { ConnectionId(dpid, controller_id) -> MessageId -> [conn_message1, conn_message2, ....] self.pending = defaultdict(lambda: OrderedDefaultDict(list)) def insert(self, message_id, conn_message): '''' message_id is a fingerprint named tuple, and conn_message is a ConnMessage named tuple''' conn_id = ConnectionId(dpid=message_id.dpid, controller_id=message_id.controller_id) self.pending[conn_id][message_id].append(conn_message) def has_message_id(self, message_id): conn_id = ConnectionId(dpid=message_id.dpid, controller_id=message_id.controller_id) return message_id in self.pending[conn_id] def get_all_by_message_id(self, message_id): conn_id = ConnectionId(dpid=message_id.dpid, controller_id=message_id.controller_id) return self.pending[conn_id][message_id] def pop_by_message_id(self, message_id): conn_id = ConnectionId(dpid=message_id.dpid, controller_id=message_id.controller_id) message_id_map = self.pending[conn_id] msg_list = message_id_map[message_id] if len(msg_list) == 0: raise ValueError("Empty queue for message_id %s" % str(message_id)) res = msg_list.pop(0) if len(msg_list) == 0: del message_id_map[message_id] if len(message_id_map) == 0: del self.pending[conn_id] return res def conn_ids(self): return self.pending.keys() def get_message_ids(self, dpid, controller_id): conn_id = ConnectionId(dpid=dpid, controller_id=controller_id) return self.pending[conn_id].keys() def __len__(self): return sum( len(msg_list) for message_id_map in self.pending.values() for msg_list in message_id_map.values() ) def __iter__(self): return (message_id for message_id_map in self.pending.values() for message_id in message_id_map.keys()) # TODO(cs): move me to another file? class OpenFlowBuffer(EventMixin): ''' Models asynchrony: chooses when switches get to process packets from controllers. Buffers packets until they are pulled off the buffer and chosen by god (control_flow.py) to be processed. ''' # Packet class matches that should be let through automatically if # self.allow_whitelisted_packets is True. whitelisted_packet_classes = [("class", "ofp_packet_out", ("data", ("class", "lldp", None))), ("class", "ofp_packet_in", ("data", ("class", "lldp", None))), ("class", "lldp", None), ("class", "ofp_echo_request", None), ("class", "ofp_echo_reply", None)] @staticmethod def in_whitelist(packet_fingerprint): for match in OpenFlowBuffer.whitelisted_packet_classes: if packet_fingerprint.check_match(match): return True return False _eventMixin_events = set([PendingMessage]) def __init__(self): # keep around a queue for each switch of pending openflow messages waiting to # arrive at the switches. # { ConnectionId(dpid, controller_id) -> pending receive -> [(connection, pending ofp)_1, (connection, pending ofp)_2, ...] } self.pending_receives = PendingQueue() # { ConnectionId(dpid, controller_id) -> pending send -> [(connection, pending ofp)_1, (connection, pending ofp)_2, ...] } self.pending_sends = PendingQueue() self._delegate_input_logger = None self.pass_through_whitelisted_packets = False self.pass_through_sends = False def _pass_through_handler(self, message_event): ''' handler for pass-through mode ''' # NOTE(aw): FIRST record event, then schedule execution to maintain causality # TODO(cs): figure out a better way to resolve circular dependency import sts.replay_event message_id = message_event.pending_message # Record if message_event.send_event: replay_event_class = sts.replay_event.ControlMessageSend else: replay_event_class = sts.replay_event.ControlMessageReceive replay_event = replay_event_class(dpid=message_id.dpid, controller_id=message_id.controller_id, fingerprint=message_id.fingerprint, b64_packet=message_event.b64_packet, event_time=message_event.event_time) if self._delegate_input_logger is not None: # TODO(cs): set event.round somehow? self._delegate_input_logger.log_input_event(replay_event) else: # TODO(cs): why is this an else:? self.passed_through_events.append(replay_event) # Pass through self.schedule(message_id) def set_pass_through(self, input_logger=None): ''' Cause all message receipts to pass through immediately without being buffered''' self.passed_through_events = [] self._delegate_input_logger = input_logger self.addListener(PendingMessage, self._pass_through_handler) def pass_through_sends_only(self): self.pass_through_sends = True def unset_pass_through(self): '''Unset pass through mode, and return any events that were passed through since pass through mode was set''' self.removeListener(self._pass_through_handler) passed_events = self.passed_through_events self.passed_through_events = [] return passed_events def message_receipt_waiting(self, message_id): ''' Return whether the pending message receive is available ''' return self.pending_receives.has_message_id(message_id) def message_send_waiting(self, message_id): ''' Return whether the pending send is available ''' return self.pending_sends.has_message_id(message_id) def get_message_receipt(self, message_id): # pending receives are (conn, message) pairs. We return the message. return self.pending_receives.get_all_by_message_id(message_id)[0][1] def get_message_send(self, message_id): # pending sends are (conn, message) pairs. We return the message. return self.pending_sends.get_all_by_message_id(message_id)[0][1] def schedule(self, message_id): ''' Cause the switch to process the pending message associated with the fingerprint and controller connection. ''' receive = type(message_id) == PendingReceive if receive: if not self.message_receipt_waiting(message_id): raise ValueError("No such pending message %s" % message_id) queue = self.pending_receives else: if not self.message_send_waiting(message_id): raise ValueError("No such pending message %s" % message_id) queue = self.pending_sends (forwarder, message) = queue.pop_by_message_id(message_id) if receive: forwarder.allow_message_receipt(message) else: forwarder.allow_message_send(message) return message # TODO(cs): make this a factory method that returns DeferredOFConnection objects # with bound openflow_buffer.insert() method. (much cleaner API + separation of concerns) def insert_pending_receipt(self, dpid, controller_id, ofp_message, conn): ''' Called by DeferredOFConnection to insert messages into our buffer ''' fingerprint = OFFingerprint.from_pkt(ofp_message) if self.pass_through_whitelisted_packets and self.in_whitelist(fingerprint): conn.allow_message_receipt(ofp_message) return conn_message = (conn, ofp_message) message_id = PendingReceive(dpid, controller_id, fingerprint) self.pending_receives.insert(message_id, conn_message) b64_packet = base64_encode(ofp_message) self.raiseEventNoErrors(PendingMessage(message_id, b64_packet)) return message_id # TODO(cs): make this a factory method that returns DeferredOFConnection objects # with bound openflow_buffer.insert() method. (much cleaner API + separation of concerns) def insert_pending_send(self, dpid, controller_id, ofp_message, conn): ''' Called by DeferredOFConnection to insert messages into our buffer ''' fingerprint = OFFingerprint.from_pkt(ofp_message) if (self.pass_through_sends or (self.pass_through_whitelisted_packets and self.in_whitelist(fingerprint))): conn.allow_message_send(ofp_message) return conn_message = (conn, ofp_message) message_id = PendingSend(dpid, controller_id, fingerprint) self.pending_sends.insert(message_id, conn_message) b64_packet = base64_encode(ofp_message) self.raiseEventNoErrors(PendingMessage(message_id, b64_packet, send_event=True)) return message_id def conns_with_pending_receives(self): ''' Return the named_tuples (dpid, controller_id) of connections that have receive messages pending ''' return self.pending_receives.conn_ids() def conns_with_pending_sends(self): ''' Return the named_tuples (dpid, controller_id) of connections that have receive messages pending ''' return self.pending_sends.conn_ids() def get_pending_receives(self, dpid, controller_id): ''' Return the message receipts (MessageIDs) that are waiting to be scheduled for conn, in order ''' return self.pending_receives.get_message_ids(dpid=dpid, controller_id=controller_id) def get_pending_sends(self, dpid, controller_id): ''' Return the message sends (MessageIDs) that are waiting to be scheduled for conn, in order ''' return self.pending_sends.get_message_ids(dpid=dpid, controller_id=controller_id) def flush(self): ''' Garbage collect any previous pending messages ''' num_pending_messages = (len(self.pending_receives) + len(self.pending_sends)) if num_pending_messages > 0: log.info("Flushing %d pending messages" % num_pending_messages) self.pending_receives = PendingQueue() self.pending_sends = PendingQueue() PendingReceive = namedtuple('PendingReceive', ['dpid', 'controller_id', 'fingerprint']) PendingSend = namedtuple('PendingSend', ['dpid', 'controller_id', 'fingerprint']) ConnectionId = namedtuple('ConnectionId', ['dpid', 'controller_id']) ``` #### File: sts/sts/snapshot.py ```python import urllib2 import logging import json import string import time from pox.lib.graph.util import NOMDecoder from pox.openflow.topology import OpenFlowSwitch from pox.openflow.flow_table import FlowTable, TableEntry from pox.openflow.libopenflow_01 import ofp_match, ofp_action_output from sts.entities import POXController, BigSwitchController log = logging.getLogger("Snapshot") class Snapshot(object): """ A Snapshot object is a description of the controllers' view of the network in terms that are meaningful to the debugger. Any snaphsot grabbed from any controller should be transformed into a Snapshot object in order to be fed to HSA """ def __int__(self): self.time = None self.switches = [] # The debugger doesn't use the next two (for now anyway) self.hosts = [] self.links = [] def __repr__(self): return "<Snapshot object: (%i switches)>"%len(self.switches) class SnapshotService(object): """ Controller-specific SnapshotServices take care of grabbing a snapshot from their controller in whatever format the controller exports it, and translating it into a Snaphot object that is meaningful to the debbuger """ def __init__(self): self.snapshot = Snapshot() def fetchSnapshot(self, controller): pass class FlexibleNOMDecoder: def __init__(self): self.pox_nom_decoder = NOMDecoder() def decode(self, json): if isinstance(json, (str, unicode)) and string.find(json, "__module__")>=0: return self.pox_nom_decoder.decode(json) else: return self.decode_switch(json) def decode_switch(self, json): flow_table = self.decode_flow_table(json["flow_table"] if "flow_table" in json else json["flowTable"]) switch = OpenFlowSwitch(json["dpid"], flow_table=flow_table) return switch def decode_flow_table(self, json): ft = FlowTable() for e in json["entries"]: ft.add_entry(self.decode_entry(e)) return ft def decode_entry(self, json): e = TableEntry() for (k, v) in json.iteritems(): if k == "match": e.match = self.decode_match(v) elif k == "actions": e.actions = [ self.decode_action(a) for a in v ] else: setattr(e, k, v) return e def decode_match(self, json): return ofp_match(**json) def decode_action(self, json): a = ofp_action_output(port = json['port']) return a class SyncProtoSnapshotService(SnapshotService): def __init__(self): SnapshotService.__init__(self) self.myNOMDecoder = FlexibleNOMDecoder() def fetchSnapshot(self, controller): jsonNOM = controller.sync_connection.get_nom_snapshot() # Update local Snapshot object self.snapshot.switches = [self.myNOMDecoder.decode(s) for s in jsonNOM["switches"]] self.snapshot.hosts = [self.myNOMDecoder.decode(h) for h in jsonNOM["hosts"]] self.snapshot.links = [self.myNOMDecoder.decode(l) for l in jsonNOM["links"]] self.snapshot.time = time.time() return self.snapshot class PoxSnapshotService(SnapshotService): def __init__(self): SnapshotService.__init__(self) self.port = 7790 self.myNOMDecoder = NOMDecoder() def fetchSnapshot(self, controller): from pox.lib.util import connect_socket_with_backoff import socket snapshotSocket = connect_socket_with_backoff('127.0.0.1', self.port) log.debug("Sending Request") snapshotSocket.send("{\"hello\":\"nommessenger\"}") snapshotSocket.send("{\"getnom\":0}", socket.MSG_WAITALL) log.debug("Receiving Results") jsonstr = "" while True: data = snapshotSocket.recv(1024) log.debug("%d byte packet received" % len(data)) if not data: break jsonstr += data if len(data) != 1024: break snapshotSocket.close() jsonNOM = json.loads(jsonstr) # (json string with the NOM) # Update local Snapshot object self.snapshot.switches = [self.myNOMDecoder.decode(s) for s in jsonNOM["switches"]] self.snapshot.hosts = [self.myNOMDecoder.decode(h) for h in jsonNOM["hosts"]] self.snapshot.links = [self.myNOMDecoder.decode(l) for l in jsonNOM["links"]] self.snapshot.time = time.time() return self.snapshot class BigSwitchSnapshotService(SnapshotService): def __init__(self): SnapshotService.__init__(self) def fetchSnapshot(self, controller): req = urllib2.Request('http://localhost:8080/wm/core/proact') response = urllib2.urlopen(req) json_data = response.read() l = json.loads(json_data) res = [] for m in l: res.append(Snapshot.from_json_map(m)) return res # Create local Snapshot object snapshot = Snapshot() self.snapshot = snapshot return self.snapshot def get_snapshotservice(controller_configs): '''Return a SnapshotService object determined by the name of the first controller in the controller_configs. For now, we only support a homogenous controller environment.''' # Read from config what controller we are using # TODO(cs): allow for heterogenous controllers? if controller_configs != [] and controller_configs[0].sync: snapshotService = SyncProtoSnapshotService() elif controller_configs != [] and controller_configs[0].controller_class == POXController: snapshotService = PoxSnapshotService() elif controller_configs != [] and controller_configs[0].controller_class == BigSwitchController: snapshotService = BigSwitchSnapshotService() else: # We default snapshotService to POX snapshotService = PoxSnapshotService() return snapshotService ``` #### File: sts/syncproto/sts_syncer.py ```python from sts.syncproto.base import SyncProtocolSpeaker, SyncMessage, SyncTime, SyncIODelegate from pox.lib.util import parse_openflow_uri, connect_socket_with_backoff import logging log = logging.getLogger("sts_sync_proto") class STSSyncProtocolSpeaker(SyncProtocolSpeaker): def __init__(self, controller, state_master, io_delegate): if state_master is None: raise ValueError("state_master is null") self.state_master = state_master self.controller = controller handlers = { ("ASYNC", "StateChange"): self._log_async_state_change, ("SYNC", "StateChange"): self._log_sync_state_change, ("REQUEST", "DeterministicValue"): self._get_deterministic_value } SyncProtocolSpeaker.__init__(self, handlers, io_delegate) def _log_async_state_change(self, message): self.state_master.state_change("ASYNC", message.xid, self.controller, message.time, message.fingerPrint, message.name, message.value) def _log_sync_state_change(self, message): # Note: control_flow needs to register a handler on state_master to ACK the # controller self.state_master.state_change("SYNC", message.xid, self.controller, message.time, message.fingerPrint, message.name, message.value) def _get_deterministic_value(self, message): self.state_master.get_deterministic_value(self.controller, message.name, message.xid) class STSSyncConnection(object): """ A connection to a controller with the sts sync protocol """ def __init__(self, controller, state_master, sync_uri): self.controller = controller (self.mode, self.host, self.port) = parse_openflow_uri(sync_uri) if state_master is None: raise ValueError("state_master is null") self.state_master = state_master self._on_disconnect = [] self.io_delegate = None self.speaker = None def on_disconnect(self, func): self._on_disconnect.append(func) def connect(self, io_master): if self.mode != "tcp": raise RuntimeError("only tcp (active) mode supported by now") socket = connect_socket_with_backoff(self.host, self.port) self.io_delegate = SyncIODelegate(io_master, socket) self.speaker = STSSyncProtocolSpeaker(controller=self.controller, state_master=self.state_master, io_delegate=self.io_delegate) def disconnect(self): self.io_delegate.close() for handler in self._on_disconnect: handler(self) def close(self): self.disconnect() def get_nom_snapshot(self): if self.speaker: return self.speaker.sync_request("NOMSnapshot", "", timeout=10) else: log.warn("STSSyncConnection: not connected. cannot handle requests") def send_link_notification(self, link_attrs): # Link attrs must be a list of the form: # [dpid1, port1, dpid2, port2] if self.speaker: msg = SyncMessage(type="ASYNC", messageClass="LinkDiscovery", value=link_attrs) return self.speaker.send(msg) else: log.warn("STSSyncConnection: not connected. cannot send link") def ack_sync_notification(self, messageClass, xid): if self.speaker: return self.speaker.ack_sync_notification(messageClass, xid) else: log.warn("STSSyncConnection: not connected. cannot ACK") def send_deterministic_value(self, xid, value): if self.speaker: msg = SyncMessage(type="RESPONSE", messageClass="DeterministicValue", time=value, xid=xid, value=value) return self.speaker.send(msg) else: log.warn("STSSyncConnection: not connected. cannot ACK") class STSSyncConnectionManager(object): """the connection manager for the STS sync protocols. TODO: finish""" def __init__(self, io_master, state_master): self.io_master = io_master self.sync_connections = [] if state_master is None: raise ValueError("state_master is null") self.state_master = state_master def connect(self, controller, sync_uri): s = STSSyncConnection(controller=controller, state_master=self.state_master, sync_uri=sync_uri) s.connect(self.io_master) s.on_disconnect(self.remove_connection) self.sync_connections.append(s) return s def remove_connection(self, connection): if connection in self.sync_connections: self.sync_connections.remove(connection) class STSSyncCallback(object): """ override with your favorite functionality """ def state_change(self, type, xid, controller, time, fingerprint, name, value): log.info("{}: controller: {} time: {} fingerprint: {} name: {} value: {}"\ .format(type, controller, time, fingerprint, name, value)) def get_deterministic_value(self, controller, name, xid): if name == "gettimeofday": return SyncTime.now() ``` #### File: sts/topology/dp_buffer.py ```python import itertools from collections import defaultdict from collections import Iterable import logging from pox.openflow.software_switch import DpPacketOut from pox.openflow.software_switch import SoftwareSwitch from pox.lib.revent import EventMixin from sts.entities.hosts import HostAbstractClass from sts.fingerprints.messages import DPFingerprint from sts.invariant_checker import InvariantChecker from sts.util.capability import Capabilities from sts.util.console import msg log = logging.getLogger("sts.topology.packet_delivery") class DataPathBufferCapabilities(Capabilities): def __init__(self, can_queue_dp_events=True, can_permit_dp_event=True, can_drop_dp_event=True): self._can_permit_dp_event = can_permit_dp_event self._can_drop_dp_event = can_drop_dp_event self._can_queue_dp_events = can_queue_dp_events @property def can_permit_dp_event(self): return self._can_permit_dp_event @property def can_drop_dp_event(self): return self._can_drop_dp_event @property def can_queue_dp_events(self): return self._can_queue_dp_events class DataPathBuffer(object): """ A Patch panel. Contains a bunch of wires to forward packets between switches. Listens to the SwitchDPPacketOut event on the switches. """ def __init__(self, switches=None, hosts=None, connected_port_mapping=None, capabilities=None): """ Constructor - switches: a list of the switches in the network - hosts: a list of hosts in the network - connected_port_mapping: a function which takes (switch_no, port_no, dpid2switch), and returns the adjacent (node, port) or None """ if capabilities is None: self.capabilities = DataPathBufferCapabilities(can_queue_dp_events=False, can_permit_dp_event=False, can_drop_dp_event=False) else: self.capabilities = capabilities if switches is None: switches = [] if not isinstance(switches, Iterable): switches = [switches] if hosts is None: hosts = [] if not isinstance(hosts, Iterable): hosts = [hosts] self.switches = [] self.hosts = [] self.get_connected_port = connected_port_mapping for switch in sorted(switches, key=lambda(sw): sw.dpid): self.add_switch(switch) for host in hosts: self.add_host(host) def add_switch(self, switch): switch.addListener(DpPacketOut, self.handle_DpPacketOut) self.switches.append(switch) def remove_switch(self, switch): switch.removeListener(self.handle_DpPacketOut) self.switches.remove(switch) def add_host(self, host): host.addListener(DpPacketOut, self.handle_DpPacketOut) self.hosts.append(host) def remove_host(self, host): host.removeListener(self.handle_DpPacketOut) self.hosts.remove(host) def register_interface_pair(self, event): (src_addr, dst_addr) = (event.packet.src, event.packet.dst) if src_addr is not None and dst_addr is not None: InvariantChecker.register_interface_pair(src_addr, dst_addr) def handle_DpPacketOut(self, event): self.register_interface_pair(event) try: (node, port) = self.get_connected_port(event.node, event.port) except ValueError: log.warn("no such port %s on node %s" % ( str(event.port), str(event.node))) return if isinstance(node, HostAbstractClass): self.deliver_packet(node, event.packet, port) else: self.forward_packet(node, event.packet, port) def forward_packet(self, next_switch, packet, next_port): """Forward the packet to the given port""" if type(next_port) != int: next_port = next_port.port_no next_switch.process_packet(packet, next_port) def deliver_packet(self, host, packet, host_interface): """Deliver the packet to its final destination""" host.receive(host_interface, packet) @property def queued_dataplane_events(self): assert self.capabilities.can_queue_dp_events raise NotImplementedError() def permit_dp_event(self, dp_event): assert self.capabilities.can_permit_dp_event raise NotImplementedError() def drop_dp_event(self, dp_event): assert self.capabilities.can_drop_dp_event raise NotImplementedError() class BufferedPatchPanel(DataPathBuffer, EventMixin): """ A Buffered Patch panel.Listens to SwitchDPPacketOut and HostDpPacketOut events, and re-raises them to listeners of this object. Does not traffic until given permission from a higher-level. """ _eventMixin_events = set([DpPacketOut]) def __init__(self, switches=None, hosts=None, connected_port_mapping=None, capabilities=None): if capabilities is None: self.capabilities = DataPathBufferCapabilities(can_queue_dp_events=True, can_permit_dp_event=True, can_drop_dp_event=True) else: self.capabilities = capabilities if switches is None: switches = [] if not isinstance(switches, Iterable): switches = [switches] if hosts is None: hosts = [] if not isinstance(hosts, Iterable): hosts = [hosts] self.switches = [] self.hosts = [] self.get_connected_port = connected_port_mapping # Buffered dp out events self.fingerprint2dp_outs = defaultdict(list) for switch in sorted(switches, key=lambda(sw): sw.dpid): self.add_switch(switch) for host in hosts: self.add_host(host) def _handle_DpPacketOut(self, event): fingerprint = (DPFingerprint.from_pkt(event.packet), event.node.dpid, event.port.port_no) # Monkey patch on a fingerprint for this event event.fingerprint = fingerprint self.fingerprint2dp_outs[fingerprint].append(event) self.raiseEvent(event) def add_switch(self, switch): switch.addListener(DpPacketOut, self._handle_DpPacketOut) self.switches.append(switch) def remove_switch(self, switch): switch.removeListener(self._handle_DpPacketOut) self.switches.remove(switch) def add_host(self, host): host.addListener(DpPacketOut, self._handle_DpPacketOut) self.hosts.append(host) def remove_host(self, host): host.removeListener(self._handle_DpPacketOut) self.hosts.remove(host) @property def queued_dataplane_events(self): assert self.capabilities.can_queue_dp_events list_of_lists = self.fingerprint2dp_outs.values() return list(itertools.chain(*list_of_lists)) def permit_dp_event(self, dp_event): """Given a SwitchDpPacketOut event, permit it to be forwarded""" assert self.capabilities.can_permit_dp_event # TODO(cs): self.forward_packet should not be externally visible! msg.event("Forwarding dataplane event") # Invoke superclass DpPacketOut handler self.handle_DpPacketOut(dp_event) self._remove_dp_event(dp_event) def drop_dp_event(self, dp_event): """ Given a SwitchDpPacketOut event, remove it from our buffer, and do not forward. Returns the dropped event. """ assert self.capabilities.can_drop_dp_event msg.event("Dropping dataplane event") self._remove_dp_event(dp_event) return dp_event def _remove_dp_event(self, dp_event): # Pre: dp_event.fingerprint in self.fingerprint2dp_outs self.fingerprint2dp_outs[dp_event.fingerprint].remove(dp_event) if self.fingerprint2dp_outs[dp_event.fingerprint] == []: del self.fingerprint2dp_outs[dp_event.fingerprint] def get_buffered_dp_event(self, fingerprint): if fingerprint in self.fingerprint2dp_outs: return self.fingerprint2dp_outs[fingerprint][0] return None def data_path_buffer_factory(topology): """ Given a pox.lib.graph.graph object with hosts, switches, and other things, produce an appropriate BufferedPatchPanel """ return BufferedPatchPanel( topology.find( is_a=SoftwareSwitch), topology.find(is_a=HostAbstractClass), lambda node, port: topology.port_for_node(node, port)) ``` #### File: sts/util/io_master.py ```python import errno import sys import logging import select import socket import time import threading from pox.lib.util import makePinger from pox.lib.ioworker.io_worker import IOWorker log = logging.getLogger("io_master") class STSIOWorker(IOWorker): """ An IOWorker that works with our IOMaster """ def __init__(self, socket, on_close): IOWorker.__init__(self) self.socket = socket self.closed = False # (on_close factory method hides details of the Select loop) self.on_close = on_close def fileno(self): """ Return the wrapped sockets' fileno """ return self.socket.fileno() def send(self, data): """ send data from the client side. fire and forget. """ return IOWorker.send(self, data) def close(self): """ Register this socket to be closed. fire and forget """ # (don't close until Select loop is ready) IOWorker.close(self) # on_close is a function not a method self.on_close(self) # Note that IOMaster is used as the main select loop in POX (debugger branch) class IOMaster(object): """ an IO handler that handles the select work for our IO worker """ _select_timeout = 5 _BUF_SIZE = 8192 def __init__ (self): self._workers = set() self.pinger = makePinger() self.closed = False self._close_requested = False self._in_select = 0 def create_worker_for_socket(self, socket): ''' Return an IOWorker wrapping the given socket. ''' # Called from external threads. # Does not register the IOWorker immediately with the select loop -- # rather, adds a command to the pending queue # Our callback for io_worker.close(): def on_close(worker): worker.socket.close() worker.closed = True self._workers.discard(worker) worker = STSIOWorker(socket, on_close=on_close) self._workers.add(worker) return worker def monkey_time_sleep(self): """monkey patches time.sleep to use this io_masters's time.sleep""" self.original_time_sleep = time.sleep # keep time._orig_sleep around for interrupt handler (procutils) time._orig_sleep = time.sleep time.sleep = self.sleep def raw_input(self, prompt): """ raw_input replacement that enables background IO to take place. NOTE: this migrates the IO to a specifically created BackgroundIOThread while readline's raw_input is running. raw_input must run in the main thread so the terminal is properly restored on CTRL-C. The Background IO thread is notified and terminates before the return of this function, so no concurrent IO takes place. """ _io_master = self class BackgroundIOThread(threading.Thread): def __init__(self): threading.Thread.__init__(self, name="BackgroundIOThread") self.done = False def run(self): while not self.done: # TODO(cs): I believe this may trigger race conditions whenever # asynchronous signals preempt the main thread! _io_master.select(None) # TODO(cs): why do we invoke this sleep? self.sleep(0.05) io_thread = BackgroundIOThread() io_thread.daemon = False io_thread.start() try: return raw_input(prompt) finally: """ make sure background IO is terminated gracefully before returning """ io_thread.done = True self._ping() io_thread.join() def _ping(self): if self.pinger: self.pinger.ping() def close_all(self): if self._in_select > 0: self._close_requested = True self._ping() else: self._do_close_all() def _do_close_all(self): for w in list(self._workers): try: w.close() except Exception as e: log.warn("Error closing IOWorker %s: %s (%d)", w, e.strerror, e.errno) if time.sleep is self.sleep: time.sleep = self.original_time_sleep if (self.pinger): self.pinger.ping() if hasattr(self.pinger, "close"): self.pinger.close() self.pinger = None self.closed = True def poll(self): self.select(0) def sleep(self, timeout): ''' invokes select.select continuously for exactly timeout seconds, then returns. ''' start = time.time() while not self.closed: elapsed = time.time() - start remaining = timeout - elapsed if remaining < 0.01: break self.select(remaining) def deschedule_worker(self, io_worker): self._workers.discard(io_worker) def reschedule_worker(self, io_worker): self._workers.add(io_worker) def grab_workers_rwe(self): # Now grab workers read_sockets = list(self._workers) + [ self.pinger ] write_sockets = [ worker for worker in self._workers if worker._ready_to_send ] exception_sockets = list(self._workers) return (read_sockets, write_sockets, exception_sockets) def select(self, timeout=0): ''' Waits up to timeout seconds, but may return before then if I/O is ready. ''' self._in_select += 1 try: read_sockets, write_sockets, exception_sockets = self.grab_workers_rwe() rlist, wlist, elist = select.select(read_sockets, write_sockets, exception_sockets, timeout) self.handle_workers_rwe(rlist, wlist, elist) except select.error: # TODO(cs): this is a hack: file descriptor is closed upon shut # down, and select throws up. sys.stderr.write("File Descriptor Closed\n") except TypeError: # Same behavior, error message is: # TypeError: argument must be an int, or have a fileno() method. sys.stderr.write("File Descriptor Closed\n") finally: self._in_select -= 1 if self._in_select == 0 and self._close_requested and not self.closed: self._do_close_all() def handle_workers_rwe(self, rlist, wlist, elist): if self.pinger in rlist: self.pinger.pongAll() rlist.remove(self.pinger) for worker in elist: worker.close() if worker in self._workers: self._workers.discard(worker) for worker in rlist: try: data = worker.socket.recv(self._BUF_SIZE) if data: worker._push_receive_data(data) else: log.warn("Closing socket due to empty read") worker.close() self._workers.discard(worker) except socket.error as (s_errno, strerror): log.error("Socket error: " + strerror) worker.close() self._workers.discard(worker) for worker in wlist: try: l = worker.socket.send(worker.send_buf) if l > 0: worker._consume_send_buf(l) except socket.error as (s_errno, strerror): if s_errno != errno.EAGAIN: log.error("Socket error: " + strerror) worker.close() self._workers.discard(worker) ``` #### File: sts/util/precompute_cache.py ```python from collections import defaultdict import itertools class PrecomputePowerSetCache(object): sequence_id = itertools.count(1) def __init__(self): self.element2id = defaultdict(lambda: set()) def already_done(self, input_sequence): return len(reduce(lambda left, new: left & new if not left is None else new, (self.element2id[elem] for elem in input_sequence )) ) != 0 def update(self, input_sequence): id = self.sequence_id.next() for elem in input_sequence: self.element2id[elem].add(id) class PrecomputeCache(object): def __init__(self): self.done_sequences = set() def already_done(self, input_sequence): return input_sequence in self.done_sequences def update(self, input_sequence): self.done_sequences.add(input_sequence) ``` #### File: util/socket_mux/base.py ```python from sts.util.io_master import IOMaster from pox.lib.ioworker.io_worker import JSONIOWorker, IOWorker import select import socket import logging import errno import threading import base64 log = logging.getLogger("sock_mux") # TODO(cs): what if the controller doesn't use a select loop? The demuxing can # still be achieved, it's just that all socket calls will be blocking. We # would also need to make sure that our code is thread-safe. # The wire protocol is fairly simple: # - all messages are wrapped in a json hash # - each hash has two fields: `id', and `type' # - `id' identifies a channel. The value of `id' is shared between the client # socket and the corresponding socket in the server. # - Upon connect(), tell the server that we've connected. `type' is set to # "SYN", and an additional `address' field tells the server the proper # address to return from accept(). # - Upon seeing the SYN for an id it has not observed before, the server # creates a MockSocket and stores it to be accept()'ed by the mock listener # socket. # - All data messages are of type `data', and include a `data' field class SocketDemultiplexer(object): ''' Each true socket is wrapped in a single SocketDemultiplexer, which Demultiplexes messages received on the true socket to MockSockets''' def __init__(self, true_io_worker): self.true_io_worker = true_io_worker self.client_info = true_io_worker.socket.getsockname() self.json_worker = JSONIOWorker(true_io_worker, on_json_received=self._on_receive) self.id2socket = {} self.log = logging.getLogger("sockdemux") def _on_receive(self, _, json_hash): if 'id' not in json_hash or 'type' not in json_hash: raise ValueError("Invalid json_hash %s" % str(json_hash)) pass class MockSocket(object): def __init__(self, protocol, sock_type, sock_id=-1, json_worker=None): self.protocol = protocol self.sock_type = sock_type self.sock_id = sock_id self.json_worker = json_worker self.pending_reads = [] def ready_to_read(self): return self.pending_reads != [] def send(self, data): # base 64 occasionally adds extraneous newlines: bit.ly/aRTmNu json_safe_data = base64.b64encode(data).replace("\n", "") wrapped = {'id' : self.sock_id, 'type' : 'data', 'data' : json_safe_data} self.json_worker.send(wrapped) # that just put it on a buffer. Now, actually send... # TODO(cs): this is hacky. Should really define our own IOWorker class buf = self.json_worker.io_worker.send_buf try: l = self.json_worker.io_worker.socket.send(buf) except socket.error as (s_errno, strerror): if s_errno != errno.EAGAIN: raise l = 0 # Note that if l != len(buf), the rest of the data will be sent on the # next select() [since true_io_worker._ready_to_send will still be True. # In this case our return value will be a lie, but there won't be any # negative consequences of this, since the client is a MockSocket, and we # filter them out of the select call anyway. self.json_worker.io_worker._consume_send_buf(l) return len(data) def recv(self, bufsize): if self.pending_reads == []: log.warn("recv() called with an empty buffer") # Never block return None # TODO(cs): don't ignore bufsize data = self.pending_reads.pop(0) return data def append_read(self, data): self.pending_reads.append(data) def fileno(self): return self.sock_id def setsockopt(self, *args, **kwargs): # TODO(cs): implement me pass def setblocking(self, _): # We never block anyway pass def getpeername(self): pass def close(self): # TODO(cs): implement me pass def is_mocked(sock_or_io_worker): if sock_or_io_worker is None: # Guard against None sockets upon exit return True return sock_or_io_worker.fileno() < 0 def sort_sockets(rl, wl, xl): for l in [rl, wl, xl]: l.sort(key=lambda s: s.fileno()) return (rl, wl, xl) class MultiplexedSelect(IOMaster): # Note that there will be *two* IOMasters running in the process. This one # runs below the normal IOMaster. MultiplexedSelect subclasses IOMaster only to # wrap its true socket(s) in an internal IOWorker. Also note that the normal # IOMaster's pinger sockets will in fact be MockSockets. We have the only # real pinger socket. # The caller may pass in classes that wrap our MockSockets. select() can # only rely on the fileno() to tell whether the socket is ready to read. # Therefore we keep a map fileno() -> MockSocket.ready_to_read. # TODO(cs): perhaps this shouldn't be a class variable fileno2ready_to_read = {} def __init__(self, *args, **kwargs): super(MultiplexedSelect, self).__init__(*args, **kwargs) self.log = logging.getLogger("mux_select") def ready_to_read(self, sock_or_io_worker): if sock_or_io_worker is None: return False fileno = sock_or_io_worker.fileno() if fileno >= 0: raise ValueError("Not a MockSocket!") if fileno not in self.fileno2ready_to_read: raise RuntimeError("Unknown mock fileno %d" % fileno) return self.fileno2ready_to_read[fileno]() def select(self, rl, wl, xl, timeout=0): ''' Note that this layer is *below* IOMaster's Select loop ''' # Always remove MockSockets or wrappers of MockSockets # (don't mess with other non-socket fds) mock_read_workers = [ s for s in rl if is_mocked(s) ] mock_write_workers = [ w for w in wl if is_mocked(w) ] # If this isn't the main thread, use normal select if mock_read_workers == [] and mock_write_workers == []: if hasattr(select, "_old_select"): return select._old_select(rl, wl, xl, timeout) else: return select.select(rl, wl, xl, timeout) (rl, wl, xl) = [ [s for s in l if not is_mocked(s) ] for l in [rl, wl, xl] ] # Grab the sock lists for our internal socket. These lists will contain # our true_io_worker(s), along with our pinger. (our_rl, our_wl, our_xl) = self.grab_workers_rwe() # If any of our mock sockets are ready to read, and our true_socket # doesn't have pending writes, return immediately ready_to_read_mock = [ s for s in mock_read_workers if self.ready_to_read(s) ] if (ready_to_read_mock != [] or mock_write_workers != []) and our_wl == []: return sort_sockets(ready_to_read_mock, mock_write_workers, []) if hasattr(select, "_old_select"): (rl, wl, xl) = select._old_select(rl+our_rl, wl+our_wl, xl+our_xl, timeout) else: (rl, wl, xl) = select.select(rl+our_rl, wl+our_wl, xl+our_xl, timeout) (rl, wl, xl) = self.handle_socks_rwe(rl, wl, xl, mock_read_workers, mock_write_workers) return (rl, wl, xl) def handle_socks_rwe(self, rl, wl, xl, mock_read_workers, mock_write_workers): if self.pinger in rl: self.pinger.pongAll() rl.remove(self.pinger) for true_io_worker in list(self._workers): if true_io_worker in xl: raise RuntimeError("Error in true socket") if true_io_worker in rl: rl.remove(true_io_worker) # Trigger self.true_io_worker.on_received try: data = true_io_worker.socket.recv(self._BUF_SIZE) if data: true_io_worker._push_receive_data(data) else: print "Closing true_io_worker after empty read" true_io_worker.close() self._workers.discard(true_io_worker) except socket.error as (s_errno, strerror): if s_errno != errno.EWOULDBLOCK: print ("Socket read error: " + strerror) true_io_worker.close() self._workers.discard(true_io_worker) if true_io_worker in wl: wl.remove(true_io_worker) try: l = true_io_worker.socket.send(true_io_worker.send_buf) if l > 0: true_io_worker._consume_send_buf(l) except socket.error as (s_errno, strerror): if s_errno != errno.EAGAIN and s_errno != errno.EWOULDBLOCK: print "Socket error: " + strerror true_io_worker.close() self._workers.discard(true_io_worker) # Now add MockSockets that are ready to read rl += [ s for s in mock_read_workers if self.ready_to_read(s) ] # As well as MockSockets that are ready to write. # This will cause the IOMaster above to flush the # io_worker's buffers into our true_io_worker. wl += mock_write_workers # Sort all sockets to ensure determinism return sort_sockets(rl, wl, xl) ``` #### File: sts/entities/hosts_test.py ```python import os import unittest from sts.entities.hosts import HostInterface from sts.entities.hosts import NamespaceHost from sts.util.io_master import IOMaster class NamespaceHostTest(unittest.TestCase): # TODO (AH): test send and receive def initialize_io_loop(self): io_master = IOMaster() return io_master @unittest.skipIf(os.geteuid() != 0, "Not running tests as root") def test_init(self): # Arrange name = "test-host" hid = 123 hw_addr_str = "0e:32:a4:91:e7:20" ip = "192.168.56.1" interfaces = [HostInterface(hw_addr_str, ip)] io_master = self.initialize_io_loop() # Act host = NamespaceHost(interfaces, io_master.create_worker_for_socket, name=name, hid=hid) # Assert self.assertEquals(host.interfaces, interfaces) def test_send(self): # TODO (AH): test send, better done when I figure out what to do with topo pass def test_receive(self): # TODO (AH): test receive, better done when I figure out what to do with # topology.py pass ``` #### File: integration/sts/mcs_finder_integration_test.py ```python import unittest import sys import os sys.path.append(os.path.dirname(__file__) + "/../../..") simple_cfg = ''' from config.experiment_config_lib import ControllerConfig from sts.control_flow.mcs_finder import MCSFinder from sts.simulation_state import SimulationConfig controllers = [ControllerConfig(start_cmd='./pox.py --verbose --no-cli openflow.of_01 --address=__address__ --port=__port__', address='127.0.0.1', port=8888, cwd='pox')] simulation_config = SimulationConfig(controller_configs=controllers) control_flow = MCSFinder(simulation_config, "%s", invariant_check_name="InvariantChecker.check_liveness") ''' class ReplayerTest(unittest.TestCase): tmpsuperlog = '/tmp/superlog.tmp' tmpcfg = 'config/mcs_simple_test.py' tmpcfgpyc = 'config/mcs_simple_test.pyc' tmpcfgmodule = 'config.mcs_simple_test' def write_simple_superlog(self): ''' Returns the file. Make sure to close afterwards! ''' # MCS is {e2}, a single controller failure event superlog = open(self.tmpsuperlog, 'w') e1 = str('''{"dependent_labels": [], "start_dpid": 8, "class": "LinkFailure",''' ''' "start_port_no": 3, "end_dpid": 15, "end_port_no": 2, "label": "e1", "time": [0,0], "round": 0}''') superlog.write(e1 + '\n') e2 = str('''{"dependent_labels": [], "class": "ControllerFailure",''' ''' "controller_id": "c1", "label": "e2", "time": [0,0], "round": 0}''') superlog.write(e2 + '\n') e3 = str('''{"dependent_labels": [], "class": "InvariantViolation",''' ''' "violations": ["c1"], "persistent": true, "time": [0,0], "label": "e3"}''') superlog.write(e3 + '\n') superlog.close() def write_simple_cfg(self): cfg = open(self.tmpcfg, 'w') cfg.write(simple_cfg % self.tmpsuperlog) cfg.close() def basic_test(self): try: self.write_simple_superlog() self.write_simple_cfg() ret = os.system("./simulator.py -c %s" % self.tmpcfgmodule) self.assertEqual(0, ret) finally: os.unlink(self.tmpsuperlog) os.unlink(self.tmpcfg) if os.path.exists(self.tmpcfgpyc): os.unlink(self.tmpcfgpyc) if __name__ == '__main__': unittest.main() ``` #### File: integration/sts/replayer_integration_test.py ```python import unittest import sys import os sys.path.append(os.path.dirname(__file__) + "/../../..") simple_cfg = ''' from sts.control_flow.replayer import Replayer from sts.simulation_state import SimulationConfig simulation_config = SimulationConfig() control_flow = Replayer(simulation_config, "%s") ''' class ReplayerTest(unittest.TestCase): tmpsuperlog = '/tmp/superlog.tmp' tmpcfg = 'config/replayer_simple_test.py' tmpcfgpyc = 'config/replayer_simple_test.pyc' tmpcfgmodule = 'config.replayer_simple_test' def write_simple_superlog(self): ''' Returns the file. Make sure to close afterwards! ''' superlog = open(self.tmpsuperlog, 'w') e1 = str('''{"dependent_labels": ["e2"], "start_dpid": 8, "class": "LinkFailure",''' ''' "start_port_no": 3, "end_dpid": 15, "end_port_no": 2, "label": "e1", "time": [0,0], "round": 0}''') superlog.write(e1 + '\n') e2 = str('''{"dependent_labels": [], "start_dpid": 8, "class": "LinkRecovery",''' ''' "start_port_no": 3, "end_dpid": 15, "end_port_no": 2, "label": "e2", "time": [0,0], "round": 0}''') superlog.write(e2 + '\n') superlog.close() def write_simple_cfg(self): cfg = open(self.tmpcfg, 'w') cfg.write(simple_cfg % self.tmpsuperlog) cfg.close() def basic_test(self): try: self.write_simple_superlog() self.write_simple_cfg() ret = os.system("./simulator.py -c %s" % self.tmpcfgmodule) self.assertEqual(0, ret) finally: os.unlink(self.tmpsuperlog) os.unlink(self.tmpcfg) if os.path.exists(self.tmpcfgpyc): os.unlink(self.tmpcfgpyc) if __name__ == '__main__': unittest.main() ``` #### File: integration/sts/snapshot_test.py ```python import unittest import sys import os import time from config.experiment_config_lib import ControllerConfig from sts.topology import MeshTopology from sts.simulation_state import SimulationConfig from sts.control_flow import RecordingSyncCallback from sts.control_flow.snapshot_utils import Snapshotter from sts.entities import SnapshotPopen from sts.util.convenience import IPAddressSpace from sts.util.procutils import kill_procs sys.path.append(os.path.dirname(__file__) + "/../../..") class SnapshotTest(unittest.TestCase): def basic_test(self): simulation = None try: start_cmd = ('''./pox.py --verbose ''' '''openflow.discovery forwarding.l2_multi ''' '''sts.util.socket_mux.pox_monkeypatcher --snapshot_address=../snapshot_socket ''' '''openflow.of_01 --address=__address__ --port=__port__''') IPAddressSpace._claimed_addresses.clear() ControllerConfig._controller_labels.clear() controllers = [ControllerConfig(start_cmd, cwd="pox", snapshot_address="./snapshot_socket")] topology_class = MeshTopology topology_params = "num_switches=2" simulation_config = SimulationConfig(controller_configs=controllers, topology_class=topology_class, topology_params=topology_params, multiplex_sockets=True) simulation = simulation_config.bootstrap(RecordingSyncCallback(None)) simulation.connect_to_controllers() c1 = simulation.controller_manager.controllers[0] c1_pid = c1.pid snapshotter = Snapshotter(simulation, c1) snapshotter.snapshot_controller() # TODO(cs): time.sleep() is a broken way to synchronize time.sleep(1) kill_procs([c1.process]) snapshotter.snapshot_proceed() self.assertEqual(1, len(simulation.controller_manager.controllers)) c2 = simulation.controller_manager.controllers[0] c2_pid = c2.pid self.assertTrue(c1_pid != c2_pid) # Controller object itself should not have changed self.assertTrue(c1 == c2) # snapshotting should work multiple times snapshotter = Snapshotter(simulation, c2) snapshotter.snapshot_controller() # TODO(cs): time.sleep() is a broken way to synchronize time.sleep(1) kill_procs([c2.process]) snapshotter.snapshot_proceed() self.assertEqual(1, len(simulation.controller_manager.controllers)) c3 = simulation.controller_manager.controllers[0] self.assertTrue(c2_pid != c3.pid) finally: try: if simulation is not None: simulation.clean_up() except Exception as e: print "SnapshotTest.test_basic: exception encountered in finally clause: %s" % e if __name__ == '__main__': unittest.main() ``` #### File: sts/topology/base_test.py ```python import functools import unittest from pox.openflow.libopenflow_01 import ofp_phy_port from pox.lib.util import connect_socket_with_backoff from sts.topology.graph import TopologyGraph from sts.topology.base import Topology, TopologyCapabilities from sts.topology.controllers_manager import ControllersManager from sts.entities.hosts import Host from sts.entities.hosts import HostInterface from sts.entities.sts_entities import AccessLink from sts.entities.sts_entities import Link from sts.entities.base import BiDirectionalLinkAbstractClass from sts.entities.sts_entities import FuzzSoftwareSwitch from sts.topology.sts_hosts_manager import STSHostsManager from sts.topology.sts_switches_manager import STSSwitchesManager from sts.topology.sts_patch_panel import STSPatchPanel from sts.topology.hosts_manager import mac_addresses_generator from sts.topology.hosts_manager import ip_addresses_generator from sts.topology.hosts_manager import interface_names_generator from sts.topology.dp_buffer import BufferedPatchPanel from sts.entities.sts_entities import DeferredOFConnection from sts.openflow_buffer import OpenFlowBuffer from sts.util.io_master import IOMaster from sts.util.deferred_io import DeferredIOWorker class TopologyTest(unittest.TestCase): def initialize_io_loop(self): io_master = IOMaster() return io_master def create_connection(self, controller_info, switch): """Connect switches to controllers. May raise a TimeoutError""" max_backoff_seconds=1024 socket_ctor = socket.socket sock = connect_socket_with_backoff(controller_info.config.address, controller_info.config.port, max_backoff_seconds=max_backoff_seconds, socket_ctor=socket_ctor) # Set non-blocking sock.setblocking(0) io_worker = DeferredIOWorker(self.io_master.create_worker_for_socket(sock)) connection = DeferredOFConnection(io_worker, controller_info.cid, switch.dpid, self.openflow_buffer) return connection def sts_topology_type_factory(self, is_host=None, is_switch=None, is_network_link=None, is_access_link=None, is_host_interface=None, is_port=None): """ Fills in the parameters needed for default behavior as STS topology. Returns Topology class init with some of the fields already filled in. """ is_host_lambda = lambda x: isinstance(x, Host) is_switch_lambda = lambda x: hasattr(x, 'dpid') is_network_link_lambda =lambda x: isinstance(x, Link) is_access_link_lambda = lambda x: isinstance(x, AccessLink) is_host_interface_lambda = lambda x: isinstance(x, HostInterface) is_port_lambda = lambda x: isinstance(x, ofp_phy_port) is_host = is_host or is_host_lambda is_switch = is_switch or is_switch_lambda is_network_link = is_network_link or is_network_link_lambda is_access_link = is_access_link or is_access_link_lambda is_host_interface = is_host_interface or is_host_interface_lambda is_port = is_port or is_port_lambda return functools.partial(Topology, hosts_manager=STSHostsManager(), switches_manager=STSSwitchesManager(self.create_connection), controllers_manager=ControllersManager(), dp_buffer=BufferedPatchPanel(), is_host=is_host, is_switch=is_switch, is_network_link=is_network_link, is_access_link=is_access_link, is_host_interface=is_host_interface, is_port=is_port) def setUp(self): self.io_master = self.initialize_io_loop() self.openflow_buffer = OpenFlowBuffer() @unittest.skip def test_build(self): # Arrange if1 = dict(hw_addr='00:00:00:00:00:01', ips='192.168.56.21') if2 = dict(hw_addr='00:00:00:00:00:02', ips='192.168.56.22') topo_cls = self.sts_topology_type_factory() topo = TopologyGraph() h1 = topo.add_host(interfaces=[if1, if2], name='h1') # Act net = topo_cls(topo_graph=topo, patch_panel=TestPatchPanel(), capabilities=TopologyCapabilities()) net.build() # Assert self.assertEquals(h1, 'h1') self.assertEquals(len(topo._g.vertices), 3) self.assertEquals(list(topo.hosts_iter()), [h1]) self.assertEquals(list(topo.interfaces_iter()), ['h1-eth0', 'h1-eth1']) self.assertEquals(len(topo.get_host_info(h1)['interfaces']), 2) self.assertEquals(topo.get_host_info(h1)['name'], h1) def test_create_interface(self): # Arrange topo_cls = self.sts_topology_type_factory() topo = topo_cls(patch_panel=STSPatchPanel(), capabilities=TopologyCapabilities()) # Act iface1 = topo.create_interface(hw_addr="00:00:00:00:00:11", ip_or_ips="1.2.3.4", name="eth1") iface2 = topo.create_interface(hw_addr="00:00:00:00:00:12", ip_or_ips="1.2.3.5", name="eth2") # Assert self.assertIsNotNone(iface1) self.assertIsNotNone(iface2) def test_create_host(self): # Arrange h1_eth1 = HostInterface(hw_addr='11:22:33:44:55:66', ip_or_ips='10.0.0.1') h2_eth1 = HostInterface(hw_addr='11:22:33:44:55:77', ip_or_ips='10.0.0.2') topo_cls = self.sts_topology_type_factory() topo = topo_cls(patch_panel=STSPatchPanel(), capabilities=TopologyCapabilities()) # Act h1 = topo.create_host(1, "h1", h1_eth1) h2 = topo.create_host(2, "h2", h2_eth1) # Assert self.assertIsNotNone(h1) self.assertIsNotNone(h2) self.assertItemsEqual([h1_eth1], h1.interfaces) self.assertItemsEqual([h2_eth1], h2.interfaces) self.assertTrue(topo.graph.has_host(h1)) self.assertTrue(topo.graph.has_host(h2)) self.assertTrue(h1 in topo.hosts_manager.live_hosts) self.assertTrue(h2 in topo.hosts_manager.live_hosts) def test_create_host_with_interfaces(self): # Arrange mac_gen = mac_addresses_generator() ip_gen = ip_addresses_generator() name_gen = interface_names_generator() topo_cls = self.sts_topology_type_factory() topo = topo_cls(patch_panel=STSPatchPanel(), capabilities=TopologyCapabilities()) # Act h1 = topo.create_host_with_interfaces(1, "h1", 2, mac_gen, ip_gen, name_gen) h2 = topo.create_host_with_interfaces(2, "h2", 3, mac_gen, ip_gen, name_gen) # Assert self.assertIsNotNone(h1) self.assertIsNotNone(h2) self.assertEquals(len(h1.interfaces), 2) self.assertEquals(len(h2.interfaces), 3) self.assertTrue(topo.graph.has_host(h1)) self.assertTrue(topo.graph.has_host(h2)) self.assertTrue(h1 in topo.hosts_manager.live_hosts) self.assertTrue(h2 in topo.hosts_manager.live_hosts) def test_add_host(self): # Arrange h1_eth1 = HostInterface(hw_addr='11:22:33:44:55:66', ip_or_ips='10.0.0.1') h2_eth1 = HostInterface(hw_addr='11:22:33:44:55:77', ip_or_ips='10.0.0.2') h1 = Host(h1_eth1, hid=1) h2 = Host(h2_eth1, hid=2) topo_cls = self.sts_topology_type_factory() topo = topo_cls(patch_panel=STSPatchPanel(), capabilities=TopologyCapabilities()) # Act topo.add_host(h1) topo.add_host(h2) duplicate_add = lambda: topo.add_host(h1) wrong_type = lambda: topo.add_host("Dummy") topo._can_add_hosts = False immutable_add = lambda: topo.add_host(h1) # Assert self.assertEquals(len(list(topo.graph.hosts_iter())), 2) self.assertRaises(AssertionError, duplicate_add) self.assertRaises(AssertionError, wrong_type) self.assertRaises(AssertionError, immutable_add) self.assertEquals(len(list(topo.graph.hosts_iter())), 2) self.assertTrue(topo.graph.has_host(h1)) self.assertTrue(topo.graph.has_host(h2)) self.assertTrue(topo.graph.has_host(h1.name)) def test_remove_host(self): # Arrange h1_eth1 = HostInterface(hw_addr='11:22:33:44:55:66', ip_or_ips='10.0.0.1') h2_eth1 = HostInterface(hw_addr='11:22:33:44:55:77', ip_or_ips='10.0.0.2') h1 = Host(h1_eth1, hid=1) h2 = Host(h2_eth1, hid=2) topo_cls = self.sts_topology_type_factory() topo = topo_cls(patch_panel=STSPatchPanel(), capabilities=TopologyCapabilities()) topo.add_host(h1) topo.add_host(h2) # Act topo.remove_host(h1) # Assert self.assertFalse(topo.graph.has_host(h1)) self.assertTrue(topo.graph.has_host(h2)) def test_create_switch(self): # Arrange topo_cls = self.sts_topology_type_factory() topo = topo_cls(patch_panel=STSPatchPanel(), capabilities=TopologyCapabilities()) # Act switch = topo.create_switch(1, 2, True) # Assert self.assertTrue(topo.graph.has_switch(switch)) def test_add_switch(self): # Arrange s1 = FuzzSoftwareSwitch(1, 's1', ports=1) s2 = FuzzSoftwareSwitch(2, 's2', ports=1) topo_cls = self.sts_topology_type_factory() topo = topo_cls(patch_panel=STSPatchPanel(), capabilities=TopologyCapabilities()) # Act topo.add_switch(s1) topo.add_switch(s2) duplicate_add = lambda: topo.add_switch(s1) wrong_type = lambda: topo.add_switch("Dummy") topo._can_add_hosts = False immutable_add = lambda: topo.add_switch(s1) # Assert self.assertEquals(len(list(topo.graph.switches_iter())), 2) self.assertRaises(AssertionError, duplicate_add) self.assertRaises(AssertionError, wrong_type) self.assertRaises(AssertionError, immutable_add) self.assertEquals(len(list(topo.graph.switches_iter())), 2) self.assertTrue(topo.graph.has_switch(s1)) self.assertTrue(topo.graph.has_switch(s2)) self.assertFalse(topo.graph.has_switch('s3')) def test_remove_switch(self): # Arrange s1 = FuzzSoftwareSwitch(1, 's1', ports=1) s2 = FuzzSoftwareSwitch(2, 's2', ports=1) topo_cls = self.sts_topology_type_factory() topo = topo_cls(patch_panel=STSPatchPanel(), capabilities=TopologyCapabilities()) topo.add_switch(s1) topo.add_switch(s2) # Act topo.remove_switch(s1) # Assert self.assertFalse(topo.graph.has_switch(s1)) self.assertTrue(topo.graph.has_switch(s2)) def test_create_network_link(self): # Arrange s1 = FuzzSoftwareSwitch(1, 's1', ports=1) s2 = FuzzSoftwareSwitch(2, 's2', ports=1) topo_cls = self.sts_topology_type_factory() topo = topo_cls(patch_panel=STSPatchPanel(), capabilities=TopologyCapabilities()) topo.add_switch(s1) topo.add_switch(s2) # Act l1 = topo.create_network_link(s1, s1.ports[1], s2, s2.ports[1]) # Assert self.assertEquals(l1.start_node, s1) self.assertEquals(l1.start_port, s1.ports[1]) self.assertEquals(l1.end_node, s2) self.assertEquals(l1.end_port, s2.ports[1]) def test_add_network_link(self): # Arrange s1 = FuzzSoftwareSwitch(1, 's1', ports=1) s2 = FuzzSoftwareSwitch(2, 's2', ports=1) l1 = Link(s1, s1.ports[1], s2, s2.ports[1]) topo_cls = self.sts_topology_type_factory() topo = topo_cls(patch_panel=STSPatchPanel(), capabilities=TopologyCapabilities()) topo.add_switch(s1) topo.add_switch(s2) # Act link = topo.add_network_link(l1) # Assert self.assertEquals(link, l1) self.assertTrue(topo.graph.has_link(link)) self.assertIn(l1, topo.patch_panel.network_links) def test_add_bidir_link(self): # Arrange s1 = FuzzSoftwareSwitch(1, 's1', ports=1) s2 = FuzzSoftwareSwitch(2, 's2', ports=1) l1 = BiDirectionalLinkAbstractClass(s1, s1.ports[1], s2, s2.ports[1]) topo_cls = self.sts_topology_type_factory( is_network_link=lambda x: isinstance(x, BiDirectionalLinkAbstractClass)) topo = topo_cls(patch_panel=STSPatchPanel(), capabilities=TopologyCapabilities()) #topo = Topology(patch_panel=TestPatchPanel(), # link_cls=BiDirectionalLinkAbstractClass) topo.add_switch(s1) topo.add_switch(s2) # Act link = topo.add_network_link(l1) # Assert self.assertEquals(link, l1) self.assertTrue(topo.graph.has_link(link)) def test_create_access_link(self): # Arrange s1 = FuzzSoftwareSwitch(1, 's1', ports=3) h1_eth1 = HostInterface(hw_addr='11:22:33:44:55:66', ip_or_ips='10.0.0.1') h1 = Host([h1_eth1], name='h1', hid=1) topo_cls = self.sts_topology_type_factory() topo = topo_cls(patch_panel=STSPatchPanel(), capabilities=TopologyCapabilities()) topo.add_switch(s1) topo.add_host(h1) # Act l1 = topo.create_access_link(h1, h1_eth1, s1, s1.ports[1]) # Assert self.assertEquals(l1.host, h1) self.assertEquals(l1.interface, h1_eth1) self.assertEquals(l1.switch, s1) self.assertEquals(l1.switch_port, s1.ports[1]) def test_add_access_link(self): # Arrange s1 = FuzzSoftwareSwitch(1, 's1', ports=3) h1_eth1 = HostInterface(hw_addr='11:22:33:44:55:66', ip_or_ips='10.0.0.1') h1 = Host([h1_eth1], name='h1', hid=1) topo_cls = self.sts_topology_type_factory() topo = topo_cls(patch_panel=STSPatchPanel(), capabilities=TopologyCapabilities()) topo.add_switch(s1) topo.add_host(h1) l1 = AccessLink(h1, h1_eth1, s1, s1.ports[1]) # Act l1 = topo.add_access_link(l1) # Assert self.assertIn(l1, topo.patch_panel.access_links) self.assertTrue(topo.graph.has_link(l1)) def test_remove_access_link(self): # Arrange s1 = FuzzSoftwareSwitch(1, 's1', ports=2) h1_eth1 = HostInterface(hw_addr='11:22:33:44:55:66', ip_or_ips='10.0.0.1') h1_eth2 = HostInterface(hw_addr='11:22:33:44:55:77', ip_or_ips='10.0.0.2') h1 = Host([h1_eth1, h1_eth2], name='h1', hid=1) topo_cls = self.sts_topology_type_factory() topo = topo_cls(patch_panel=STSPatchPanel(), capabilities=TopologyCapabilities()) topo.add_switch(s1) topo.add_host(h1) l1 = AccessLink(h1, h1_eth1, s1, s1.ports[1]) l2 = AccessLink(h1, h1_eth2, s1, s1.ports[2]) topo.add_network_link(l1) topo.add_network_link(l2) # Act topo.remove_access_link(l1) # Assert self.assertFalse(topo.graph.has_link(l1)) self.assertNotIn(l1, topo.patch_panel.access_links) self.assertTrue(topo.graph.has_link(l2)) self.assertIn(l2, topo.patch_panel.access_links) def test_remove_network_link(self): # Arrange s1 = FuzzSoftwareSwitch(1, 's1', ports=3) s2 = FuzzSoftwareSwitch(2, 's2', ports=3) l1 = Link(s1, s1.ports[1], s2, s2.ports[1]) l2 = Link(s1, s1.ports[2], s2, s2.ports[2]) l3 = Link(s1, s1.ports[3], s2, s2.ports[3]) topo_cls = self.sts_topology_type_factory() topo = topo_cls(patch_panel=STSPatchPanel(), capabilities=TopologyCapabilities()) topo.add_switch(s1) topo.add_switch(s2) topo.add_network_link(l1) topo.add_network_link(l2) topo.add_network_link(l3) # Act topo.remove_network_link(l1) # Assert self.assertFalse(topo.graph.has_link(l1)) self.assertNotIn(l1, topo.patch_panel.network_links) self.assertTrue(topo.graph.has_link(l2)) self.assertIn(l2, topo.patch_panel.network_links) def test_crash_switch(self): # Arrange topo_cls = self.sts_topology_type_factory() topo = topo_cls(patch_panel=STSPatchPanel(), capabilities=TopologyCapabilities()) s1 = FuzzSoftwareSwitch(1, 's1', ports=0) s2 = FuzzSoftwareSwitch(2, 's2', ports=0) topo.add_switch(s1) topo.add_switch(s2) # Act topo.switches_manager.crash_switch(s1) # Assert self.assertEquals(len(topo.switches_manager.failed_switches), 1) self.assertIn(s1, topo.switches_manager.failed_switches) self.assertEquals(topo.switches_manager.live_switches, set([s2])) def test_recover_switch(self): # Arrange topo_cls = self.sts_topology_type_factory() topo = topo_cls(patch_panel=STSPatchPanel(), capabilities=TopologyCapabilities()) s1 = FuzzSoftwareSwitch(1, 's1', ports=0) s2 = FuzzSoftwareSwitch(2, 's2', ports=0) topo.add_switch(s1) topo.add_switch(s2) topo.switches_manager.crash_switch(s1) topo.switches_manager.crash_switch(s2) # Act topo.switches_manager.recover_switch(s1) # Assert self.assertEquals(len(topo.switches_manager.failed_switches), 1) self.assertIn(s2, topo.switches_manager.failed_switches) self.assertEquals(topo.switches_manager.live_switches, set([s1])) def test_live_edge_switches(self): # Arrange topo_cls = self.sts_topology_type_factory() topo = topo_cls(patch_panel=STSPatchPanel(), capabilities=TopologyCapabilities()) s1 = FuzzSoftwareSwitch(1, 's1', ports=0) s2 = FuzzSoftwareSwitch(2, 's2', ports=0) topo.add_switch(s1) topo.add_switch(s2) topo.switches_manager.crash_switch(s1) # Act live_edge = topo.switches_manager.live_edge_switches # Assert self.assertEquals(len(topo.switches_manager.failed_switches), 1) self.assertIn(s1, topo.switches_manager.failed_switches) self.assertEquals(topo.switches_manager.live_switches, set([s2])) self.assertItemsEqual(live_edge, [s2]) ``` #### File: sts/topology/graph_test.py ```python import unittest from pox.openflow.libopenflow_01 import ofp_phy_port from sts.entities.hosts import Host from sts.entities.hosts import HostInterface from sts.entities.sts_entities import AccessLink from sts.entities.sts_entities import Link from sts.entities.sts_entities import FuzzSoftwareSwitch from sts.topology.graph import Graph from sts.topology.graph import TopologyGraph class GraphTest(unittest.TestCase): """ Testing sts.topology.base.Graph """ def test_init(self): # Arrange vertices = {1: None, 2: {'a': 'b'}, 3: None} edges = {1: {1: {}, 2: {'a': 'b'}}, 3: {1: None}} # Act graph1 = Graph() graph2 = Graph(vertices, edges) # Assert self.assertEquals(len(graph1.vertices), 0) self.assertEquals(len(graph1.edges), 0) self.assertEquals(len(graph2.vertices), len(vertices)) self.assertEquals(len(graph2.edges), 3) self.assertEquals(graph2.vertices[1], {}) self.assertEquals(graph2.vertices[2], vertices[2]) self.assertEquals(graph2.vertices[3], {}) self.assertEquals(graph2.edges[0], (1, 1, {})) self.assertEquals(graph2.edges[1], (1, 2, edges[1][2])) self.assertEquals(graph2.edges[2], (3, 1, {})) def test_add_vertex(self): # Arrange vertices = {1: None, 2: {'a': 'b'}, 3: None} edges = {1: {1: {}, 2: {'a': 'b'}}, 3: {1: None}} # Act graph = Graph(vertices, edges) graph.add_vertex(4, c='d') graph.add_vertex(5) # Assert self.assertEquals(len(graph.vertices), len(vertices) + 2) self.assertEquals(len(graph.edges), 3) self.assertEquals(graph.vertices[1], {}) self.assertEquals(graph.vertices[2], vertices[2]) self.assertEquals(graph.vertices[3], {}) self.assertEquals(graph.vertices[4], {'c': 'd'}) self.assertEquals(graph.vertices[5], {}) self.assertTrue(graph.has_vertex(1)) self.assertTrue(graph.has_vertex(2)) self.assertTrue(graph.has_vertex(3)) self.assertTrue(graph.has_vertex(4)) self.assertTrue(graph.has_vertex(5)) self.assertFalse(graph.has_vertex(6)) def test_has_vertex(self): # Arrange vertices = {1: None, 2: {'a': 'b'}, 3: None} edges = {1: {1: {}, 2: {'a': 'b'}}, 3: {1: None}} # Act graph = Graph(vertices, edges) graph.add_vertex(4, c='d') graph.add_vertex(5) # Assert self.assertTrue(graph.has_vertex(1)) self.assertTrue(graph.has_vertex(2)) self.assertTrue(graph.has_vertex(3)) self.assertTrue(graph.has_vertex(4)) self.assertTrue(graph.has_vertex(5)) self.assertFalse(graph.has_vertex(6)) def test_edges_iter(self): # Arrange edges = {1: {1: {}, 2: {'a': 'b'}}, 3: {1: {}}} graph = Graph(vertices=None, edges=edges) # Act edges1 = list(graph.edges_iter(include_attrs=False)) edges2 = list(graph.edges_iter(include_attrs=True)) # Assert for edge in edges1: self.assertEquals(len(edge), 2) self.assertIn(edge[0], edges) self.assertIn(edge[1], edges[edge[0]]) for edge in edges2: self.assertIn(edge[0], edges) self.assertIn(edge[1], edges[edge[0]]) self.assertEquals(edges[edge[0]][edge[1]], edge[2]) def test_edges_iter_with_check(self): # Arrange edges = {1: {1: {}, 2: {'a': 'b'}}, 3: {1: {}}} graph = Graph(vertices=None, edges=edges) check = lambda v1, v2, attrs: attrs.get('a', None) is not None # Act edges1 = list(graph.edges_iter_with_check(check, include_attrs=False)) edges2 = list(graph.edges_iter_with_check(check, include_attrs=True)) # Assert self.assertEquals(edges1, [(1, 2)]) self.assertEquals(edges2, [(1, 2, {'a': 'b'})]) def test_vertices_iter(self): # Arrange vertices = {1: None, 2: {'a': 'b'}, 3: None, 4: None, 5: None} graph = Graph(vertices) # Act vertices1 = list(graph.vertices_iter(include_attrs=False)) vertices2 = list(graph.vertices_iter(include_attrs=True)) # Assert for vertex in vertices1: self.assertTrue(vertex in vertices) for vertex, value in vertices2: value = value if value != {} else None self.assertEquals(vertices[vertex], value) def test_vertices_iter_with_check(self): # Arrange vertices = {1: None, 2: {'a': 'b'}, 3: None, 4: None, 5: None} graph = Graph(vertices) check = lambda v, attrs: attrs.get('a', None) is not None # Act vertices1 = list(graph.vertices_iter_with_check(check, include_attrs=False)) vertices2 = list(graph.vertices_iter_with_check(check, include_attrs=True)) # Assert self.assertEquals(vertices1, [2]) self.assertEquals(vertices2, [(2, vertices[2])]) def test_add_edge(self): # Arrange vertices = {1: None, 2: {'a': 'b'}, 3: None} edges = {1: {1: {}, 2: {'a': 'b'}}, 3: {1: None}} expected = [(1, 1, {}), (1, 1, {}), (1, 2, edges[1][2]), (3, 1, {}), (1, 3, {}), (1, 4, {'c': 'd'})] # Act graph = Graph(vertices, edges) graph.add_edge(1, 3) graph.add_edge(1, 4, c='d') # Assert self.assertEquals(len(graph.vertices), len(vertices) + 1) self.assertEquals(len(graph.edges), 3 + 2) self.assertEquals(graph.vertices[1], {}) self.assertEquals(graph.vertices[2], vertices[2]) self.assertEquals(graph.vertices[3], {}) self.assertEquals(graph.vertices[4], {}) self.assertTrue(graph.has_edge(1, 2)) self.assertFalse(graph.has_edge(2, 4)) self.assertFalse(graph.has_edge(9, 6)) for edge in expected: self.assertTrue(edge in graph.edges) def test_remove_edge(self): # Arrange graph = Graph() edge1 = graph.add_edge(1, 2) edge2 = graph.add_edge(2, 3) edge3 = graph.add_edge(2, 4) # Act graph.remove_edge(*edge1) graph.remove_edge(*edge2) # Assert self.assertRaises(AssertionError, graph.remove_edge, 10, 20) self.assertFalse(graph.has_edge(*edge1)) self.assertFalse(graph.has_edge(*edge2)) self.assertTrue(graph.has_edge(*edge3)) def test_remove_vertex(self): # Arrange graph = Graph() v1, v2, v3, v4, v5, v6, v7 = 1, 2, 3, 4, 5, 6, 7 graph.add_vertex(v1) graph.add_vertex(v2) graph.add_vertex(v3) graph.add_vertex(v4) graph.add_vertex(v5) graph.add_vertex(v6) graph.add_vertex(v7) e1 = graph.add_edge(v1, v2) e2 = graph.add_edge(v3, v4) e3 = graph.add_edge(v3, v5) # Act graph.remove_vertex(v1, remove_edges=True) graph.remove_vertex(v6, remove_edges=False) self.assertRaises(AssertionError, graph.remove_vertex, v3, remove_edges=False) graph.remove_vertex(v3, remove_edges=True) # Assert self.assertFalse(graph.has_vertex(v1)) self.assertTrue(graph.has_vertex(v2)) self.assertFalse(graph.has_vertex(v3)) self.assertTrue(graph.has_vertex(v4)) self.assertTrue(graph.has_vertex(v5)) self.assertFalse(graph.has_vertex(v6)) self.assertTrue(graph.has_vertex(v7)) self.assertFalse(graph.has_edge(*e1)) self.assertFalse(graph.has_edge(*e2)) self.assertFalse(graph.has_edge(*e3)) def test_edges_src(self): # Arrange v1, v2, v3, v4 = 1, 2, 3, 4 g = Graph() e1 = g.add_edge(v1, v2) e2 = g.add_edge(v2, v3) e3 = g.add_edge(v2, v4) # Act v1_src = g.edges_src(v1) v2_src = g.edges_src(v2) v3_src = g.edges_src(v3) # Assert self.assertItemsEqual([e1], v1_src) self.assertItemsEqual([e2, e3], v2_src) self.assertItemsEqual([], v3_src) def test_edges_dst(self): # Arrange v1, v2, v3, v4 = 1, 2, 3, 4 g = Graph() e1 = g.add_edge(v1, v2) e2 = g.add_edge(v1, v3) e3 = g.add_edge(v2, v3) g.add_vertex(v4) # Act v1_dst = g.edges_dst(v1) v2_dst = g.edges_dst(v2) v3_dst = g.edges_dst(v3) v4_dst = g.edges_dst(v4) # Assert self.assertItemsEqual([], v1_dst) self.assertItemsEqual([e1], v2_dst) self.assertItemsEqual([e2, e3], v3_dst) self.assertEquals(v4_dst, []) class TopologyGraphTest(unittest.TestCase): def test_init(self): # Arrange h1_eth1 = HostInterface(hw_addr='11:22:33:44:55:66', ip_or_ips='10.0.0.1') h2_eth1 = HostInterface(hw_addr='11:22:33:44:55:77', ip_or_ips='10.0.0.2') h1 = Host(h1_eth1, hid=1) h2 = Host(h2_eth1, hid=2) hosts = [h1, h2] interfaces = [h1_eth1, h2_eth1] s1 = FuzzSoftwareSwitch(1, 's1', ports=3) s2 = FuzzSoftwareSwitch(2, 's2', ports=3) s3 = FuzzSoftwareSwitch(3, 's3', ports=3) switches = [s1, s2, s3] ports = s1.ports.values() + s2.ports.values() + s3.ports.values() l1 = Link(s1, s1.ports[1], s2, s2.ports[1]) l2 = Link(s1, s1.ports[2], s2, s2.ports[2]) l3 = AccessLink(h1, h1_eth1, s1, s1.ports[3]) l4 = AccessLink(h2, h2_eth1, s1, s2.ports[3]) links = [l1, l2, l3, l4] # Act graph = TopologyGraph(hosts, switches, links) # Assert self.assertItemsEqual(hosts, graph.hosts) self.assertItemsEqual(switches, graph.switches) self.assertItemsEqual(links, graph.links) self.assertItemsEqual(interfaces, graph.interfaces) self.assertItemsEqual(ports, graph.ports) def test_add_host(self): # Arrange h1_eth1 = HostInterface(hw_addr='11:22:33:44:55:66', ip_or_ips='10.0.0.1') h2_eth1 = HostInterface(hw_addr='11:22:33:44:55:77', ip_or_ips='10.0.0.2') h1 = Host(h1_eth1, hid=1) h2 = Host(h2_eth1, hid=2) h3 = Host(None, hid=3) graph = TopologyGraph() # Act graph.add_host(h1) graph.add_host(h2) # Assert self.assertItemsEqual([h1.name, h2.name], list(graph.hosts_iter(False))) self.assertTrue(graph.has_host(h1.name)) self.assertTrue(graph.has_host(h2.name)) self.assertFalse(graph.has_host(h3.name)) def test_remove_host(self): # Arrange h1_eth1 = HostInterface(hw_addr='11:22:33:44:55:66', ip_or_ips='10.0.0.1') h2_eth1 = HostInterface(hw_addr='11:22:33:44:55:77', ip_or_ips='10.0.0.2') h1 = Host(h1_eth1, hid=1) h2 = Host(h2_eth1, hid=2) h3 = Host(None, hid=3) graph = TopologyGraph() graph.add_host(h1) graph.add_host(h2) # Act graph.remove_host(h1.name) graph.remove_host(h2.name) remove_h3 = lambda: graph.remove_host(h3.name) # Assert self.assertRaises(AssertionError, remove_h3) self.assertFalse(graph.hosts) self.assertFalse(graph.has_host(h1.name)) self.assertFalse(graph.has_host(h2.name)) self.assertFalse(graph.has_host(h3.name)) def test_add_switch(self): # Arrange s1 = FuzzSoftwareSwitch(1, 's1', ports=1) s2 = FuzzSoftwareSwitch(2, 's2', ports=1) s3 = FuzzSoftwareSwitch(3, 's3', ports=1) graph = TopologyGraph() # Act graph.add_switch(s1) graph.add_switch(s2) # Assert self.assertItemsEqual([s1.name, s2.name], list(graph.switches_iter(False))) self.assertTrue(graph.has_switch(s1.name)) self.assertTrue(graph.has_switch(s2.name)) self.assertFalse(graph.has_switch(s3.name)) def test_remove_switch(self): # Arrange s1 = FuzzSoftwareSwitch(1, 's1', ports=1) s2 = FuzzSoftwareSwitch(2, 's2', ports=1) s3 = FuzzSoftwareSwitch(3, 's3', ports=1) graph = TopologyGraph() graph.add_switch(s1) graph.add_switch(s2) # Act graph.remove_switch(s1.name) graph.remove_switch(s2) remove_s3 = lambda: graph.remove_switch(s3.dpid) # Assert self.assertRaises(AssertionError, remove_s3) self.assertFalse(graph.switches) self.assertFalse(graph.has_host(s1.dpid)) self.assertFalse(graph.has_host(s2.dpid)) self.assertFalse(graph.has_host(s3.dpid)) def test_add_link(self): # Arrange s1 = FuzzSoftwareSwitch(1, 's1', ports=2) s2 = FuzzSoftwareSwitch(2, 's2', ports=2) l1 = Link(s1, s1.ports[1], s2, s2.ports[1]) l2 = Link(s1, ofp_phy_port(), s2, s2.ports[2]) graph = TopologyGraph() graph.add_switch(s1) graph.add_switch( s2) # Act link = graph.add_link(l1) fail_add = lambda: graph.add_link(l2) # Assert self.assertEquals(link, l1) self.assertTrue(graph.has_link(l1)) self.assertFalse(graph.has_link(l2)) self.assertIsNotNone(graph.get_link('s1-1', 's2-1')) self.assertIsNone(graph.get_link('s1-2', 's2-2')) self.assertRaises(AssertionError, fail_add) def test_remove_link(self): # Arrange s1 = FuzzSoftwareSwitch(1, 's1', ports=4) s2 = FuzzSoftwareSwitch(2, 's2', ports=4) l1 = Link(s1, s1.ports[1], s2, s2.ports[1]) l2 = Link(s1, s1.ports[2], s2, s2.ports[2]) l3 = Link(s1, s1.ports[3], s2, s2.ports[3]) l4 = Link(s1, s1.ports[4], s2, s2.ports[4]) graph = TopologyGraph() graph.add_switch(s1) graph.add_switch(s2) graph.add_link(l1) graph.add_link(l2, bidir=l2) graph.add_link(l3) # Act graph.remove_link(l1) graph.remove_link(l2) fail_remove = lambda: graph.remove_link(l4) # Assert self.assertFalse(graph.has_link(l1)) self.assertFalse(graph.has_link(l2)) self.assertTrue(graph.has_link(l3)) self.assertIsNone(graph.get_link("s1-1", "s2-1")) self.assertIsNone(graph.get_link("s1-2", "s2-2")) self.assertIsNotNone(graph.get_link("s1-3", "s2-3")) self.assertRaises(AssertionError, fail_remove) def test_get_host_links(self): # Arrange h1_eth1 = HostInterface(hw_addr='11:22:33:44:55:66', ip_or_ips='10.0.0.1') h1_eth2 = HostInterface(hw_addr='11:22:33:44:55:67', ip_or_ips='10.0.0.2') h2_eth1 = HostInterface(hw_addr='11:22:33:44:55:77', ip_or_ips='10.0.0.3') h1 = Host([h1_eth1, h1_eth2], hid=1) h2 = Host(h2_eth1, hid=2) s1 = FuzzSoftwareSwitch(1, 's1', ports=3) s2 = FuzzSoftwareSwitch(2, 's2', ports=3) l1 = AccessLink(h1, h1_eth1, s1, s1.ports[1]) l2 = AccessLink(h1, h1_eth2, s2, s2.ports[1]) l3 = AccessLink(h2, h2_eth1, s2, s2.ports[2]) l4 = Link(s1, s1.ports[3], s2, s2.ports[2]) graph = TopologyGraph() graph.add_switch(s1) graph.add_switch(s2) graph.add_host(h1) graph.add_host(h2) graph.add_link(l1) graph.add_link(l2) graph.add_link(l3) graph.add_link(l4) # Act h1_links = graph.get_host_links(h1) h2_links = graph.get_host_links(h2) # Assert self.assertItemsEqual([l1, l2], h1_links) self.assertItemsEqual([l3], h2_links) def test_get_switches_links(self): # Arrange h1_eth1 = HostInterface(hw_addr='11:22:33:44:55:66', ip_or_ips='10.0.0.1') h1_eth2 = HostInterface(hw_addr='11:22:33:44:55:67', ip_or_ips='10.0.0.2') h2_eth1 = HostInterface(hw_addr='11:22:33:44:55:77', ip_or_ips='10.0.0.3') h1 = Host([h1_eth1, h1_eth2], hid=1) h2 = Host(h2_eth1, hid=2) s1 = FuzzSoftwareSwitch(1, 's1', ports=3) s2 = FuzzSoftwareSwitch(2, 's2', ports=3) l1 = AccessLink(h1, h1_eth1, s1, s1.ports[1]) l2 = AccessLink(h1, h1_eth2, s2, s2.ports[1]) l3 = AccessLink(h2, h2_eth1, s2, s2.ports[2]) l4 = Link(s1, s1.ports[3], s2, s2.ports[2]) graph = TopologyGraph() graph.add_switch(s1) graph.add_switch(s2) graph.add_host(h1) graph.add_host(h2) graph.add_link(l1) graph.add_link(l2) graph.add_link(l3) graph.add_link(l4) # Act s1_links = graph.get_switch_links(s1) s2_links = graph.get_switch_links(s2) # Assert self.assertItemsEqual([l1, l4], s1_links) self.assertItemsEqual([l2, l3, l4], s2_links) ``` #### File: sts/topology/sts_topology_test.py ```python import unittest from pox.lib.ioworker.io_worker import RecocoIOLoop from sts.topology.sts_topology import Topology class TopologyTest(unittest.TestCase): _io_loop = RecocoIOLoop() _io_ctor = _io_loop.create_worker_for_socket def test_create_switch(self): # Arrange topo = Topology() s1_dpid = 1 s2_dpid = 2 s1_ports = 3 s2_ports = 3 # Act s1 = topo.create_switch(s1_dpid, s1_ports) s2 = topo.create_switch(s2_dpid, s2_ports) # Assert self.assertEqual(len(s1.ports), s1_ports) self.assertEqual(s1.dpid, s1_dpid) self.assertEqual(len(s2.ports), s2_ports) self.assertEqual(s2.dpid, s2_dpid) self.assertItemsEqual([s1, s2], topo.switches) self.assertNotEqual(s2.ports[1].hw_addr, s1.ports[1].hw_addr) ``` #### File: unit/sts/deferred_io_worker_test.py ```python import itertools import os.path import sys import unittest sys.path.append(os.path.join(os.path.dirname(__file__), *itertools.repeat("..", 3))) from pox.lib.ioworker.io_worker import IOWorker from sts.util.deferred_io import DeferredIOWorker class DeferredIOWorkerTest(unittest.TestCase): @staticmethod def call_later(func): # call now! func() def test_not_sent_until_permitted(self): i = DeferredIOWorker(IOWorker()) i.set_receive_handler(self.call_later) i.block() i.send("foo") self.assertFalse(i._io_worker._ready_to_send) self.assertFalse(i._send_queue.empty()) i.unblock() self.assertTrue(i._send_queue.empty()) i._io_worker._consume_send_buf(3) self.assertFalse(i._io_worker._ready_to_send) def test_not_received_until_permitted(self): i = DeferredIOWorker(IOWorker()) i.set_receive_handler(self.call_later) i.block() self.data = None def d(worker): self.data = worker.peek_receive_buf() i.set_receive_handler(d) i._io_worker._push_receive_data("bar") self.assertEqual(self.data, None) i.unblock() self.assertEqual(self.data, "bar") # Now if unblocked, should go through immediately # Note: d does not consume the data i._io_worker._push_receive_data("hepp") self.assertEqual(self.data, "barhepp") def test_receive_consume(self): i = DeferredIOWorker(IOWorker()) i.set_receive_handler(self.call_later) self.data = None def consume(worker): self.data = worker.peek_receive_buf() worker.consume_receive_buf(len(self.data)) i.set_receive_handler(consume) i.block() i._io_worker._push_receive_data("bar") self.assertEqual(self.data, None) i.unblock() self.assertEqual(self.data, "bar") # data has been consumed i._io_worker._push_receive_data("hepp") self.assertEqual(self.data, "hepp") ``` #### File: sts/entities/sts_entities_test.py ```python import unittest import mock from pox.lib.addresses import EthAddr from pox.lib.addresses import IPAddr from pox.openflow.libopenflow_01 import ofp_phy_port from sts.entities.sts_entities import AccessLink from sts.entities.sts_entities import Link from sts.entities.hosts import Host from sts.entities.hosts import HostInterface class LinkTest(unittest.TestCase): @mock.patch('sts.entities.sts_entities.FuzzSoftwareSwitch') def test_init(self, SwitchCls): sw1 = mock.MagicMock() sw1.dpid = 1 sw2 = SwitchCls() sw2.dpid = 2 # It's really hard to mock this, because of using assert_type p1 = ofp_phy_port(port_no=1) p2 = ofp_phy_port(port_no=2) sw1.ports = [p1] sw2.ports = [p2] link = Link(sw1, p1, sw2, p2) self.assertEquals(sw1.dpid, link.start_software_switch.dpid) self.assertEquals(sw2.dpid, link.end_software_switch.dpid) self.assertEquals(p1, link.start_port) self.assertEquals(p2, link.end_port) @mock.patch('sts.entities.sts_entities.FuzzSoftwareSwitch') def test_eq(self, SwitchCls): sw1 = mock.MagicMock() sw1.dpid = 1 sw2 = SwitchCls() sw2.dpid = 2 # It's really hard to mock this, because of using assert_type p1 = ofp_phy_port(port_no=1) p2 = ofp_phy_port(port_no=2) sw1.ports = [p1] sw2.ports = [p2] link1 = Link(sw1, p1, sw2, p2) link2 = Link(sw2, p2, sw1, p1) self.assertEquals(link1, link1) self.assertNotEquals(link1, link2) @mock.patch('sts.entities.sts_entities.FuzzSoftwareSwitch') def test_reversed_link(self, SwitchCls): sw1 = mock.MagicMock() sw1.dpid = 1 sw2 = SwitchCls() sw2.dpid = 2 # It's really hard to mock this, because of using assert_type p1 = ofp_phy_port(port_no=1) p2 = ofp_phy_port(port_no=2) sw1.ports = [p1] sw2.ports = [p2] link1 = Link(sw1, p1, sw2, p2) link2 = link1.reversed_link() self.assertNotEquals(link1, link2) self.assertEquals(sw2.dpid, link2.start_software_switch.dpid) self.assertEquals(sw1.dpid, link2.end_software_switch.dpid) self.assertEquals(p2, link2.start_port) self.assertEquals(p1, link2.end_port) def test_to_json(self): # Arrange sw1 = mock.Mock() sw1.dpid = 1 sw1.to_json.return_value = 1 sw2 = mock.Mock() sw2.dpid = 2 sw2.to_json.return_value = 2 # It's really hard to mock this, because of using assert_type p1 = ofp_phy_port(port_no=1) p2 = ofp_phy_port(port_no=2) sw1.ports = [p1] sw2.ports = [p2] link = Link(sw1, p1, sw2, p2) # Act json_dict = link.to_json() # Assert self.assertEquals(json_dict['start_node'], 1) self.assertEquals(json_dict['start_port']['port_no'], 1) self.assertEquals(json_dict['end_node'], 2) self.assertEquals(json_dict['end_port']['port_no'], 2) def test_from_json(self): # Arrange sw1 = mock.Mock() sw1.dpid = 1 sw1.to_json.return_value = 1 sw2 = mock.Mock() sw2.dpid = 2 sw2.to_json.return_value = 2 # It's really hard to mock this, because of using assert_type p1 = ofp_phy_port(port_no=1) p2 = ofp_phy_port(port_no=2) sw1.ports = [p1] sw2.ports = [p2] json_dict = {'start_port': {'hw_addr': '00:00:00:00:00:00', 'curr': 0, 'name': '', 'supported': 0, '__type__': 'pox.openflow.libopenflow_01.ofp_phy_port', 'state': 0, 'advertised': 0, 'peer': 0, 'config': 0, 'port_no': 1}, 'start_node': 1, 'end_port': {'hw_addr': '00:00:00:00:00:00', 'curr': 0, 'name': '', 'supported': 0, '__type__': 'pox.openflow.libopenflow_01.ofp_phy_port', 'state': 0, 'advertised': 0, 'peer': 0, 'config': 0, 'port_no': 2}, '__type__': 'sts.entities.sts_entities.Link', 'end_node': 2} # Act link = Link.from_json(json_dict) # Assert self.assertEquals(link.start_node, json_dict['start_node']) self.assertEquals(link.start_port.port_no, json_dict['start_port']['port_no']) self.assertEquals(link.end_node, json_dict['end_node']) self.assertEquals(link.end_port.port_no, json_dict['end_port']['port_no']) class AccessLinkTest(unittest.TestCase): @mock.patch('sts.entities.sts_entities.FuzzSoftwareSwitch') def test_init(self, SwitchCls): # Arrange sw1 = SwitchCls() sw1.dpid = 1 # It's really hard to mock this, because of using assert_type p1 = ofp_phy_port(port_no=1) hw_addr_str = "11:22:33:44:55:66" hw_addr = EthAddr(hw_addr_str) ip_str = "127.0.0.1" ip = IPAddr(ip_str) ifname = "eth0" interface = HostInterface(hw_addr, ip, name=ifname) hname = "h1" hid = 1 host = Host(interface, name=hname, hid=hid) # Act link = AccessLink(host, interface, sw1, p1) # Assert self.assertEquals(link.host, host) self.assertEquals(link.interface, interface) @mock.patch('sts.entities.sts_entities.FuzzSoftwareSwitch') def test_to_jsont(self, SwitchCls): # Arrange sw1 = SwitchCls() sw1.dpid = 1 sw1.to_json.return_value = 1 # It's really hard to mock this, because of using assert_type p1 = ofp_phy_port(port_no=1) hw_addr_str = "11:22:33:44:55:66" hw_addr = EthAddr(hw_addr_str) ip_str = "127.0.0.1" ip = IPAddr(ip_str) ifname = "eth0" interface = HostInterface(hw_addr, ip, name=ifname) hname = "h1" hid = 1 host = Host(interface, name=hname, hid=hid) link = AccessLink(host, interface, sw1, p1) # Act json_dict = link.to_json() # Assert self.assertEquals(json_dict['node1'], host.to_json()) self.assertEquals(json_dict['port1'], interface.to_json()) self.assertEquals(json_dict['node2'], 1) self.assertEquals(json_dict['port2'], p1.to_json()) @mock.patch('sts.entities.sts_entities.FuzzSoftwareSwitch') def test_to_jsont(self, SwitchCls): # Arrange json_dict = { "node1": { "hid": 1, "interfaces": [ { "hw_addr": "11:22:33:44:55:66", "ips": [ "127.0.0.1" ], "__type__": "sts.entities.hosts.HostInterface", "name": "eth0" } ], "__type__": "sts.entities.hosts.Host", "name": "h1" }, "port2": { "hw_addr": "00:00:00:00:00:00", "curr": 0, "name": "", "supported": 0, "__type__": "pox.openflow.libopenflow_01.ofp_phy_port", "state": 0, "advertised": 0, "peer": 0, "config": 0, "port_no": 1 }, "node2": 1, "__type__": "sts.entities.sts_entities.AccessLink", "port1": { "hw_addr": "11:22:33:44:55:66", "ips": [ "127.0.0.1" ], "__type__": "sts.entities.hosts.HostInterface", "name": "eth0" } } sw1 = SwitchCls() sw1.dpid = 1 sw1.to_json.return_value = 1 # It's really hard to mock this, because of using assert_type p1 = ofp_phy_port(port_no=1) hw_addr_str = "11:22:33:44:55:66" hw_addr = EthAddr(hw_addr_str) ip_str = "127.0.0.1" ip = IPAddr(ip_str) ifname = "eth0" interface = HostInterface(hw_addr, ip, name=ifname) hname = "h1" hid = 1 host = Host(interface, name=hname, hid=hid) # Act link = AccessLink.from_json(json_dict) # Assert self.assertEquals(link.host.to_json(), host.to_json()) self.assertEquals(link.interface.to_json(), interface.to_json()) self.assertEquals(link.switch, 1) self.assertEquals(link.switch_port.to_json(), p1.to_json()) ``` #### File: unit/sts/event_dag_test.py ```python import unittest import sys import os.path sys.path.append(os.path.dirname(__file__) + "/../../..") from sts.replay_event import * from sts.event_dag import * class MockEvent(InputEvent): def proceed(self, simulation): pass class MockInternalEvent(InternalEvent): def __init__(self, fingerprint, label=None): InternalEvent.__init__(self, label) self.timed_out = False self._fingerprint = fingerprint @property def fingerprint(self): return self._fingerprint def proceed(self, simulation): pass class MockInputEvent(InputEvent): def proceed(self, simulation): pass class event_dag_test(unittest.TestCase): def test_split_basic(self): events = [MockEvent(), MockEvent(), MockEvent(), MockEvent()] dag = EventDag(events) splits = split_list(dag.input_events, 2) self.assertEqual(2, len(splits)) self.assertEqual(2, len(splits[0])) self.assertEqual(2, len(splits[1])) splits = split_list(dag.input_events, 4) self.assertEqual(4, len(splits)) self.assertEqual(1, len(splits[0])) self.assertEqual(1, len(splits[1])) self.assertEqual(1, len(splits[2])) self.assertEqual(1, len(splits[3])) splits = split_list(dag.input_events, 3) self.assertEqual(3, len(splits)) self.assertEqual(4, len(splits[0]) + len(splits[1]) + len(splits[2])) def test_split_single(self): events = [MockEvent()] dag = EventDag(events) splits = split_list(dag.input_events, 1) self.assertEqual(1, len(splits)) self.assertEqual(1, len(splits[0])) def test_split_zero(self): events = [] dag = EventDag(events) splits = split_list(dag.input_events, 1) self.assertEqual(1, len(splits)) self.assertEqual(0, len(splits[0])) def test_split_odd(self): events = [MockEvent(), MockEvent(), MockEvent()] dag = EventDag(events) splits = split_list(dag.input_events, 2) self.assertEqual(2, len(splits)) self.assertEqual(3, len(splits[0]) + len(splits[1])) splits = split_list(dag.input_events, 3) self.assertEqual(3, len(splits)) self.assertEqual(3, len(splits[0]) + len(splits[1]) + len(splits[2])) def test_event_dag(self): event_dag = EventDag( [ MockInternalEvent("a"), MockInputEvent() ]) self.assertEqual(2, len(event_dag)) self.assertEqual(2, len(event_dag.filter_unsupported_input_types())) event_dag.mark_invalid_input_sequences() self.assertEqual(2, len(event_dag)) def test_event_dag_subset(self): mockInputEvent = MockInputEvent() mockInputEvent2 = MockInputEvent() events = [ MockInternalEvent('a'), mockInputEvent, mockInputEvent2 ] event_dag = EventDag(events) # subset of (full set) is a noop self.assertEqual( event_dag.events, event_dag.input_subset(events).events) # subset of () empty retains the internal event self.assertEqual( event_dag.events[0:1], event_dag.input_subset([]).events) sub_graph = event_dag.input_subset([mockInputEvent]) self.assertEqual( event_dag.events[0:2], sub_graph.events) def test_event_dag_complement(self): mockInputEvent = MockInputEvent() mockInputEvent2 = MockInputEvent() events = [ MockInternalEvent('a'), mockInputEvent, mockInputEvent2 ] event_dag = EventDag(events) # complement of (nothing) is full set self.assertEqual( event_dag.events, event_dag.input_complement([]).events) # complement of (all elements) retains only the internal event self.assertEqual( event_dag.events[0:1], event_dag.input_complement(events).events) sub_graph = event_dag.input_complement([mockInputEvent]) self.assertEqual( [ e for (i, e) in enumerate(event_dag.events) if i==0 or i==2 ], sub_graph.events) def test_migration_simple(self): events = [ MockInternalEvent('a'), HostMigration(1,1,2,2,"host1"), MockInternalEvent('b'), HostMigration(2,2,3,3,"host1"), MockInputEvent() ] # Don't prune anything event_dag = EventDag(events) new_dag = event_dag.input_subset(events) self.assertEqual(events, new_dag.events) def test_migration_prune_1(self): events = [ MockInternalEvent('a'), HostMigration(1,1,2,2,"host1"), MockInternalEvent('b'), HostMigration(2,2,3,3,"host1"), MockInputEvent() ] # Prune the first migration subset = [events[1]] event_dag = EventDag(events) new_dag = event_dag.input_complement(subset) fingerprint = ('HostMigration',1,1,3,3,"host1") self.assertEqual(fingerprint, new_dag.events[2].fingerprint) def test_migration_prune_2(self): events = [ MockInternalEvent('a'), HostMigration(1,1,2,2,"host1"), MockInternalEvent('b'), HostMigration(2,2,3,3,"host1"), MockInputEvent(), HostMigration(3,3,4,4,"host1"), HostMigration(4,4,5,5,"host1") ] # Prune the seconds and third migration subset = [events[3], events[4], events[5]] event_dag = EventDag(events) new_dag = event_dag.input_complement(subset) fingerprint = ('HostMigration',2,2,5,5,"host1") self.assertEqual(fingerprint, new_dag.events[3].fingerprint) def test_migration_prune_last(self): events = [ MockInternalEvent('a'), HostMigration(1,1,2,2,"host1"), MockInternalEvent('b'), HostMigration(2,2,3,3,"host1"), MockInputEvent() ] # Prune the last migration subset = [events[3]] event_dag = EventDag(events) new_dag = event_dag.input_complement(subset) fingerprint = ('HostMigration',1,1,2,2,"host1") self.assertEqual(fingerprint, new_dag.events[1].fingerprint) if __name__ == '__main__': unittest.main() ``` #### File: unit/sts/socket_multiplexer_test.py ```python import unittest import sys import os import time from threading import Thread import logging log = logging.getLogger() from sts.util.socket_mux.server_socket_multiplexer import * from sts.util.socket_mux.sts_socket_multiplexer import * sys.path.append(os.path.dirname(__file__) + "/../../..") class MultiplexerTest(unittest.TestCase): client_messages = [ "foo", "bar", "baz" ] def setup_server(self, address): import socket mux_select = ServerMultiplexedSelect() ServerMockSocket.bind_called = False listener = ServerMockSocket(socket.AF_UNIX, socket.SOCK_STREAM, set_true_listen_socket=mux_select.set_true_listen_socket) listener.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) listener.bind(address) listener.listen(16) return (mux_select, listener) def setup_client(self, num_socks, address): try: from pox.lib.util import connect_socket_with_backoff io_master = MultiplexedSelect() socket = connect_socket_with_backoff(address=address) io_worker = io_master.create_worker_for_socket(socket) # TODO(cs): unused variable demux demux = STSSocketDemultiplexer(io_worker, address) mock_socks = [] for i in xrange(num_socks): mock_socket = STSMockSocket(None, None) mock_socket.connect(address) mock_socket.send(self.client_messages[i]) mock_socks.append(mock_socket) # Now flush messages while [ m for m in mock_socks if m.json_worker.io_worker._ready_to_send ] != []: io_master.select(mock_socks, mock_socks, []) except Exception as e: log.critical("Client died: %s" % e) raise e def wait_for_next_accept(self, listener, mux_select): log.info("waiting for next accept") rl = [] while listener not in rl: (rl, _, _) = mux_select.select([listener], [], [], 0.1) def test_basic(self): address = "basic_pipe" try: t = Thread(target=self.setup_client, args=(1,address,), name="MainThread") t.start() (mux_select, listener) = self.setup_server(address) # wait for client to connect self.wait_for_next_accept(listener, mux_select) mock_sock = listener.accept()[0] # now read client message (rl, _, _) = mux_select.select([mock_sock], [], []) start = last = time.time() while mock_sock not in rl: time.sleep(0.05) if time.time() - start > 5: self.fail("Did not find socket in rl in 5 seconds") elif time.time() - last > 1: log.debug("waiting for socket %s in rl %s..." % ( str(mock_sock), repr(rl))) last = time.time() (rl, _, _) = mux_select.select([mock_sock], [], []) d = mock_sock.recv(2048) self.assertEqual(self.client_messages[0], d) finally: if ServerSocketDemultiplexer.instance is not None: ServerSocketDemultiplexer.instance = None try: os.unlink(address) except OSError: if os.path.exists(address): raise RuntimeError("can't remove PIPE socket %s" % str(address)) def test_three_incoming(self): address = "three_pipe" try: t = Thread(target=self.setup_client, args=(3,address,), name="MainThread") t.start() (mux_select, listener) = self.setup_server(address) for i in xrange(len(self.client_messages)): self.wait_for_next_accept(listener, mux_select) mock_sock = listener.accept()[0] (rl, _, _) = mux_select.select([mock_sock], [], []) start = last = time.time() while mock_sock not in rl: if time.time() - start > 5: self.fail("Did not find socket in rl in 5 seconds") elif time.time() - last > 1: log.debug("waiting for socket %s in rl %s..." % ( str(mock_sock), repr(rl))) last = time.time() (rl, _, _) = mux_select.select([mock_sock], [], []) time.sleep(0.05) d = mock_sock.recv(2048) # order should be deterministic self.assertEqual(self.client_messages[i], d) finally: if ServerSocketDemultiplexer.instance is not None: ServerSocketDemultiplexer.instance = None try: os.unlink(address) except OSError: if os.path.exists(address): raise RuntimeError("can't remove PIPE socket %s" % str(address)) ``` #### File: sts/topology/teston_switches_manager_test.py ```python import mock import unittest from pox.lib.addresses import EthAddr from pox.lib.addresses import IPAddr from sts.topology.teston_switches_manager import TestONSwitchesManager class TestONSwitchesManagerTest(unittest.TestCase): def _mock_teston(self): mininet_net = """mininet> net h1 h1-eth0:s2-eth1 h2 h2-eth0:s2-eth2 h3 h3-eth0:s2-eth3 h4 h4-eth0:s3-eth1 h5 h5-eth0:s3-eth2 h6 h6-eth0:s3-eth3 h7 h7-eth0:s4-eth1 h8 h8-eth0:s4-eth2 h9 h9-eth0:s4-eth3 s1 lo: s1-eth1:s2-eth4 s1-eth2:s3-eth4 s1-eth3:s4-eth4 s2 lo: s2-eth1:h1-eth0 s2-eth2:h2-eth0 s2-eth3:h3-eth0 s2-eth4:s1-eth1 s3 lo: s3-eth1:h4-eth0 s3-eth2:h5-eth0 s3-eth3:h6-eth0 s3-eth4:s1-eth2 s4 lo: s4-eth1:h7-eth0 s4-eth2:h8-eth0 s4-eth3:h9-eth0 s4-eth4:s1-eth3 """ mininet_dump = """<Host h1: h1-eth0:10.0.0.1 pid=26370> <Host h2: h2-eth0:10.0.0.2 pid=26371> <Host h3: h3-eth0:10.0.0.3 pid=26372> <Host h4: h4-eth0:10.0.0.4 pid=26373> <Host h5: h5-eth0:10.0.0.5 pid=26374> <Host h6: h6-eth0:10.0.0.6 pid=26375> <Host h7: h7-eth0:10.0.0.7 pid=26376> <Host h8: h8-eth0:10.0.0.8 pid=26377> <Host h9: h9-eth0:10.0.0.9 pid=26378> <OVSSwitch s1: lo:127.0.0.1,s1-eth1:None,s1-eth2:None,s1-eth3:None pid=26381> <OVSSwitch s2: lo:127.0.0.1,s2-eth1:None,s2-eth2:None,s2-eth3:None,s2-eth4:None pid=26386> <OVSSwitch s3: lo:127.0.0.1,s3-eth1:None,s3-eth2:None,s3-eth3:None,s3-eth4:None pid=26391> <OVSSwitch s4: lo:127.0.0.1,s4-eth1:None,s4-eth2:None,s4-eth3:None,s4-eth4:None pid=26396> <RemoteController c0: 127.0.0.1:6633 pid=26363> """ s1_ports = """ , i.isUp()) for i in s1.intfs.values()])p=%s" % (i.name, i.MAC(), i.IP() name=lo,mac=None,ip=127.0.0.1,isUp=True name=s1-eth1,mac=ce:c5:1e:ee:36:b4,ip=None,isUp=True name=s1-eth2,mac=de:29:d4:1c:4d:a1,ip=None,isUp=True name=s1-eth3,mac=b6:2e:aa:c3:2e:0d,ip=None,isUp=True mininet> """ s2_ports = """ , i.isUp()) for i in s2.intfs.values()])p=%s" % (i.name, i.MAC(), i.IP() name=lo,mac=None,ip=127.0.0.1,isUp=True name=s2-eth1,mac=3e:cd:cd:bc:d0:bc,ip=None,isUp=True name=s2-eth2,mac=76:ea:fc:0c:dd:f2,ip=None,isUp=True name=s2-eth3,mac=5e:2b:cc:f5:a2:e5,ip=None,isUp=True name=s2-eth4,mac=42:b2:02:de:49:5c,ip=None,isUp=True mininet> """ s3_ports = """ , i.isUp()) for i in s3.intfs.values()])p=%s" % (i.name, i.MAC(), i.IP() name=lo,mac=None,ip=127.0.0.1,isUp=True name=s3-eth1,mac=66:c8:b8:3a:d5:c0,ip=None,isUp=True name=s3-eth2,mac=16:97:73:d7:43:8a,ip=None,isUp=True name=s3-eth3,mac=96:46:1e:cc:26:36,ip=None,isUp=True name=s3-eth4,mac=2a:d3:7e:8a:22:72,ip=None,isUp=True mininet> """ s4_ports = """ , i.isUp()) for i in s4.intfs.values()])p=%s" % (i.name, i.MAC(), i.IP() name=lo,mac=None,ip=127.0.0.1,isUp=True name=s4-eth1,mac=4a:82:af:b3:dd:bf,ip=None,isUp=True name=s4-eth2,mac=fa:30:f4:61:c7:c2,ip=None,isUp=True name=s4-eth3,mac=c2:8f:63:d1:27:f9,ip=None,isUp=True name=s4-eth4,mac=5e:c8:ae:c9:2c:fc,ip=None,isUp=True mininet> """ def getInterfaces(name): if name == 's1': return s1_ports elif name == 's2': return s2_ports elif name == 's3': return s3_ports elif name == 's4': return s4_ports else: raise ValueError("No ports were mocked for switch: %s" % name) def getSwitchDPID(name): if name == 's1': return 1 elif name == 's2': return 2 elif name == 's3': return 3 elif name == 's4': return 4 else: raise ValueError("No DPID mocked for switch: %s" % name) mn_driver = mock.Mock(name='TestONMininetDriver') mn_driver.dump.return_value = mininet_dump mn_driver.net.return_value = mininet_net mn_driver.getInterfaces.side_effect = getInterfaces mn_driver.getSwitchDPID.side_effect = getSwitchDPID return mn_driver def test_read(self): # Arrange mn_driver = self._mock_teston() # Act sw_mgm = TestONSwitchesManager(mn_driver) # Assert self.assertEquals(len(sw_mgm.switches), 4) for switch in sw_mgm.switches: if switch.name == 's1': self.assertEquals(len(switch.ports), 4, switch.ports) else: self.assertEquals(len(switch.ports), 5, switch.ports) s4 = sw_mgm.get_switch('s4') self.assertIn(0xfffe, s4.ports) # Local interface self.assertIn(1, s4.ports) self.assertIn(2, s4.ports) self.assertIn(3, s4.ports) self.assertIn(4, s4.ports) self.assertEquals(s4.ports[0xfffe].name, 'lo') self.assertEquals(s4.ports[0xfffe].hw_addr, None) self.assertEquals(s4.ports[0xfffe].ips, [IPAddr('127.0.0.1')]) self.assertEquals(s4.ports[1].name, 's4-eth1') self.assertEquals(s4.ports[1].hw_addr, EthAddr('4a:82:af:b3:dd:bf')) self.assertEquals(s4.ports[1].ips, []) self.assertEquals(s4.ports[2].name, 's4-eth2') self.assertEquals(s4.ports[2].hw_addr, EthAddr('fa:30:f4:61:c7:c2')) self.assertEquals(s4.ports[2].ips, []) self.assertEquals(s4.ports[3].name, 's4-eth3') self.assertEquals(s4.ports[3].hw_addr, EthAddr('c2:8f:63:d1:27:f9')) self.assertEquals(s4.ports[3].ips, []) self.assertEquals(s4.ports[4].name, 's4-eth4') self.assertEquals(s4.ports[4].hw_addr, EthAddr('5e:c8:ae:c9:2c:fc')) self.assertEquals(s4.ports[4].ips, []) def test_connect_to_controllers(self): # Arrange mn_driver = self._mock_teston() sw_mgm = TestONSwitchesManager(mn_driver) c1 = mock.Mock(name='c1') c1.config.address = '192.168.5.11' c1.config.port = 6633 c2 = mock.Mock(name='c2') c2.config.address = '192.168.5.12' c2.config.port = 6633 s1 = sw_mgm.get_switch('s1') # Act sw_mgm.connect_to_controllers(s1, [c1, c2]) # Assert mn_driver.assign_sw_controller.assert_called_with( sw='1', COUNT=2, ip1='192.168.5.11', port1=6633, ip2='192.168.5.12', port2=6633) def test_disconnect_controllers(self): # Arrange mn_driver = self._mock_teston() sw_mgm = TestONSwitchesManager(mn_driver) s1 = sw_mgm.get_switch('s1') # Act sw_mgm.disconnect_controllers(s1) # Assert mn_driver.delete_sw_controller.assert_called_with('s1') def test_get_connected_controllers(self): # Arrange mn_driver = self._mock_teston() mn_driver.get_sw_controller.return_value = """sh ovs-vsctl get-controller s1 ptcp:6634 tcp:192.168.5.11:6633 tcp:192.168.5.12:6633 tcp:192.168.5.13:6633 mininet>""" sw_mgm = TestONSwitchesManager(mn_driver) c1 = mock.Mock(name='c1') c1.config.address = '192.168.5.11' c1.config.port = 6633 c2 = mock.Mock(name='c2') c2.config.address = '192.168.5.12' c2.config.port = 6633 s1 = sw_mgm.get_switch('s1') c_mgm = mock.Mock(name='ControllersManager') c_mgm.controllers = set([c1, c2]) # Act controllers = sw_mgm.get_connected_controllers(s1, c_mgm) # Assert self.assertItemsEqual([c1, c2], controllers) def test_get_switch(self): # Arrange mn_driver = self._mock_teston() # Act sw_mgm = TestONSwitchesManager(mn_driver) # Assert get_s1 = sw_mgm.get_switch('s1') get_s2 = sw_mgm.get_switch('s2') get_s20 = sw_mgm.get_switch('s20') # Assert self.assertEquals(get_s1.name, 's1') self.assertEquals(get_s2.name, 's2') self.assertIsNone(get_s20) def test_has_switch(self): # Arrange mn_driver = self._mock_teston() # Act sw_mgm = TestONSwitchesManager(mn_driver) # Assert has_s1 = sw_mgm.has_switch('s1') has_s2 = sw_mgm.has_switch('s2') has_s20 = sw_mgm.has_switch('s20') # Assert self.assertTrue(has_s1) self.assertTrue(has_s2) self.assertFalse(has_s20) def test_get_switch_dpid(self): # Arrange mn_driver = self._mock_teston() # Act sw_mgm = TestONSwitchesManager(mn_driver) # Assert get_s1 = sw_mgm.get_switch_dpid(1) get_s2 = sw_mgm.get_switch_dpid(2) get_s20 = sw_mgm.get_switch_dpid(20) # Assert self.assertEquals(get_s1.name, 's1') self.assertEquals(get_s2.name, 's2') self.assertIsNone(get_s20) def test_edge_switches(self): # Arrange mn_driver = self._mock_teston() # Act sw_mgm = TestONSwitchesManager(mn_driver) s2 = sw_mgm.get_switch('s2') s3 = sw_mgm.get_switch('s3') s4 = sw_mgm.get_switch('s4') # Assert edge_switches = sw_mgm.edge_switches live_edge_switches = sw_mgm.live_edge_switches # Assert self.assertEquals(edge_switches, live_edge_switches) self.assertEquals(len(edge_switches), 3) self.assertItemsEqual([s2, s3, s4], edge_switches) ``` #### File: sts/topology/teston_topology_test.py ```python import mock import unittest from sts.topology.teston_topology import TestONTopology class TestONSwitchesManagerTest(unittest.TestCase): def _mock_teston(self): mininet_dump = """<Host h1: h1-eth0:10.0.0.1 pid=26370> <Host h2: h2-eth0:10.0.0.2 pid=26371> <Host h3: h3-eth0:10.0.0.3 pid=26372> <Host h4: h4-eth0:10.0.0.4 pid=26373> <Host h5: h5-eth0:10.0.0.5 pid=26374> <Host h6: h6-eth0:10.0.0.6 pid=26375> <Host h7: h7-eth0:10.0.0.7 pid=26376> <Host h8: h8-eth0:10.0.0.8 pid=26377> <Host h9: h9-eth0:10.0.0.9 pid=26378> <OVSSwitch s1: lo:127.0.0.1,s1-eth1:None,s1-eth2:None,s1-eth3:None pid=26381> <OVSSwitch s2: lo:127.0.0.1,s2-eth1:None,s2-eth2:None,s2-eth3:None,s2-eth4:None pid=26386> <OVSSwitch s3: lo:127.0.0.1,s3-eth1:None,s3-eth2:None,s3-eth3:None,s3-eth4:None pid=26391> <OVSSwitch s4: lo:127.0.0.1,s4-eth1:None,s4-eth2:None,s4-eth3:None,s4-eth4:None pid=26396> <RemoteController c0: 127.0.0.1:6633 pid=26363> """ h1_eth0 = """, i.isUp()) for i in h1.intfs.values()])p=%s" % (i.name, i.MAC(), i.IP() name=h1-eth0,mac=00:00:00:00:00:01,ip=10.0.0.1,isUp=True mininet> """ h2_eth0 = """, i.isUp()) for i in h2.intfs.values()])p=%s" % (i.name, i.MAC(), i.IP() name=h2-eth0,mac=00:00:00:00:00:02,ip=10.0.0.2,isUp=True mininet> """ h3_eth0 = """, i.isUp()) for i in h3.intfs.values()])p=%s" % (i.name, i.MAC(), i.IP() name=h3-eth0,mac=00:00:00:00:00:03,ip=10.0.0.3,isUp=True mininet> """ h4_eth0 = """, i.isUp()) for i in h4.intfs.values()])p=%s" % (i.name, i.MAC(), i.IP() name=h4-eth0,mac=00:00:00:00:00:04,ip=10.0.0.4,isUp=True mininet> """ h5_eth0 = """, i.isUp()) for i in h5.intfs.values()])p=%s" % (i.name, i.MAC(), i.IP() name=h5-eth0,mac=00:00:00:00:00:05,ip=10.0.0.5,isUp=True mininet> """ h6_eth0 = """, i.isUp()) for i in h6.intfs.values()])p=%s" % (i.name, i.MAC(), i.IP() name=h6-eth0,mac=00:00:00:00:00:06,ip=10.0.0.6,isUp=True mininet> """ h7_eth0 = """, i.isUp()) for i in h7.intfs.values()])p=%s" % (i.name, i.MAC(), i.IP() name=h7-eth0,mac=00:00:00:00:00:07,ip=10.0.0.7,isUp=True mininet> """ h8_eth0 = """, i.isUp()) for i in h8.intfs.values()])p=%s" % (i.name, i.MAC(), i.IP() name=h8-eth0,mac=00:00:00:00:00:08,ip=10.0.0.8,isUp=True mininet> """ h9_eth0 = """, i.isUp()) for i in h9.intfs.values()])p=%s" % (i.name, i.MAC(), i.IP() name=h9-eth0,mac=00:00:00:00:00:09,ip=10.0.0.9,isUp=True mininet> """ s1_ports = """ , i.isUp()) for i in s1.intfs.values()])p=%s" % (i.name, i.MAC(), i.IP() name=lo,mac=None,ip=127.0.0.1,isUp=True name=s1-eth1,mac=ce:c5:1e:ee:36:b4,ip=None,isUp=True name=s1-eth2,mac=de:29:d4:1c:4d:a1,ip=None,isUp=True name=s1-eth3,mac=b6:2e:aa:c3:2e:0d,ip=None,isUp=True mininet> """ s2_ports = """ , i.isUp()) for i in s2.intfs.values()])p=%s" % (i.name, i.MAC(), i.IP() name=lo,mac=None,ip=127.0.0.1,isUp=True name=s2-eth1,mac=3e:cd:cd:bc:d0:bc,ip=None,isUp=True name=s2-eth2,mac=76:ea:fc:0c:dd:f2,ip=None,isUp=True name=s2-eth3,mac=5e:2b:cc:f5:a2:e5,ip=None,isUp=True name=s2-eth4,mac=42:b2:02:de:49:5c,ip=None,isUp=True mininet> """ s3_ports = """ , i.isUp()) for i in s3.intfs.values()])p=%s" % (i.name, i.MAC(), i.IP() name=lo,mac=None,ip=127.0.0.1,isUp=True name=s3-eth1,mac=66:c8:b8:3a:d5:c0,ip=None,isUp=True name=s3-eth2,mac=16:97:73:d7:43:8a,ip=None,isUp=True name=s3-eth3,mac=96:46:1e:cc:26:36,ip=None,isUp=True name=s3-eth4,mac=2a:d3:7e:8a:22:72,ip=None,isUp=True mininet> """ s4_ports = """ , i.isUp()) for i in s4.intfs.values()])p=%s" % (i.name, i.MAC(), i.IP() name=lo,mac=None,ip=127.0.0.1,isUp=True name=s4-eth1,mac=4a:82:af:b3:dd:bf,ip=None,isUp=True name=s4-eth2,mac=fa:30:f4:61:c7:c2,ip=None,isUp=True name=s4-eth3,mac=c2:8f:63:d1:27:f9,ip=None,isUp=True name=s4-eth4,mac=5e:c8:ae:c9:2c:fc,ip=None,isUp=True mininet> """ def getInterfaces(name): if name == 'h1': return h1_eth0 elif name == 'h2': return h2_eth0 elif name == 'h3': return h3_eth0 elif name == 'h4': return h4_eth0 elif name == 'h5': return h5_eth0 elif name == 'h6': return h6_eth0 elif name == 'h7': return h7_eth0 elif name == 'h8': return h8_eth0 elif name == 'h9': return h9_eth0 if name == 's1': return s1_ports elif name == 's2': return s2_ports elif name == 's3': return s3_ports elif name == 's4': return s4_ports else: raise ValueError("No ports were mocked for node: %s" % name) mininet_net = """mininet> net h1 h1-eth0:s2-eth1 h2 h2-eth0:s2-eth2 h3 h3-eth0:s2-eth3 h4 h4-eth0:s3-eth1 h5 h5-eth0:s3-eth2 h6 h6-eth0:s3-eth3 h7 h7-eth0:s4-eth1 h8 h8-eth0:s4-eth2 h9 h9-eth0:s4-eth3 s1 lo: s1-eth1:s2-eth4 s1-eth2:s3-eth4 s1-eth3:s4-eth4 s2 lo: s2-eth1:h1-eth0 s2-eth2:h2-eth0 s2-eth3:h3-eth0 s2-eth4:s1-eth1 s3 lo: s3-eth1:h4-eth0 s3-eth2:h5-eth0 s3-eth3:h6-eth0 s3-eth4:s1-eth2 s4 lo: s4-eth1:h7-eth0 s4-eth2:h8-eth0 s4-eth3:h9-eth0 s4-eth4:s1-eth3 """ def getSwitchDPID(name): if name == 's1': return 1 elif name == 's2': return 2 elif name == 's3': return 3 elif name == 's4': return 4 else: raise ValueError("No DPID mocked for switch: %s" % name) mn_driver = mock.Mock(name='TestONMininetDriver') mn_driver.net.return_value = mininet_net mn_driver.dump.return_value = mininet_dump mn_driver.getInterfaces.side_effect = getInterfaces mn_driver.getSwitchDPID.side_effect = getSwitchDPID return mn_driver def test_read_topology(self): # Arrange mn_driver = self._mock_teston() # Act topo = TestONTopology(mn_driver, None) # Assert ``` #### File: sts/tools/trace_utils.py ```python import sys import json import os sys.path.append(os.path.join(os.path.dirname(__file__), "..")) import sts.replay_event as replay_events from sts.input_traces.log_parser import parse from sts.util.tabular import Tabular from sts.event_dag import EventDag from collections import Counter def parse_json(subsequence_violations_path): with open(subsequence_violations_path) as json_data: d = json.load(json_data) # Convert strings to integers for k,v in d.iteritems(): if type(k) != int: del d[k] d[int(k)] = v return d def parse_event_trace(trace_path): with open(trace_path) as input_file: return EventDag(parse(input_file)) class Stats(object): def __init__(self): self.input_events = Counter() self.internal_events = Counter() self.message_receives = Counter() self.message_sends = Counter() def update(self, event): if isinstance(event, replay_events.InputEvent): event_name = str(event.__class__.__name__) self.input_events[event_name] += 1 else: event_name = str(event.__class__.__name__) self.internal_events[event_name] += 1 if event_name == "ControlMessageReceive": pkt_class = event.get_packet().__class__.__name__ self.message_receives[pkt_class] += 1 elif event_name == "ControlMessageSend": pkt_class = event.get_packet().__class__.__name__ self.message_sends[pkt_class] += 1 @property def input_event_count(self): input_count = 0 for count in self.input_events.values(): input_count += count return input_count @property def internal_event_count(self): internal_count = 0 for count in self.internal_events.values(): internal_count += count return internal_count @property def total_event_count(self): return self.input_event_count + self.internal_event_count def __str__(self): s = "Events: %d total (%d input, %d internal).\n" % (self.total_event_count, self.input_event_count, self.internal_event_count) if len(self.input_events) > 0: s += "\n\tInput events:\n" for event_name, count in self.input_events.iteritems(): s += "\t %s : %d\n" % (event_name, count) if len(self.internal_events) > 0: s += "\n\tInternal events:\n" for event_name, count in self.internal_events.iteritems(): s += "\t %s : %d\n" % (event_name, count) if event_name == "ControlMessageReceive": for pkt_class, c in self.message_receives.iteritems(): s += "\t\t %s : %d\n" % (pkt_class, c) if event_name == "ControlMessageSend": for pkt_class, c in self.message_sends.iteritems(): s += "\t\t %s : %d\n" % (pkt_class, c) return s ```
{ "source": "Jhall1990/podcaster", "score": 3 }
#### File: Jhall1990/podcaster/episode.py ```python import os import re import config import pytube import threading import feedparser def get_episodes(rss_link, episode_re): """ Get's all the episodes from the provided rss_link. Only episodes titles that match the provided episode_re will be returned. """ episodes_ = [] feed = feedparser.parse(rss_link) for entry in feed.entries: match = re.search(episode_re, entry.title) if match: title_ = match.group(1) number_ = match.group(2) link_ = entry.links[0].href print("Found episode {}...".format(title_)) episodes_.append(Episode(title_, number_, link_)) return episodes_ def download_episodes(episodes): """ Starts a thread for each download, waits for them to compelete, then renames each of the episodes. """ threads = [] # Start all the threads. for episode in episodes: threads.append(episode.download()) # Wait for all the threads. for thread in threads: thread.join() # Rename each downloaded file. for episode in episodes: episode.rename() class Episode(object): """ An episode object holds all the information about a given youtube video episode. """ def __init__(self, title, number=None, yt_link=None): self.title = title self.number = number self.yt_link = yt_link self.file_name = "{}.mp4".format(self.title.replace(" ", "_").replace(",", "").lower()) self.file_location = os.path.join(config.EPISODE_LOCATION, self.file_name) self.local_link = "{}{}".format(config.EPISODE_URL_PREFIX, self.file_name) self.download_thread = None def __hash__(self): """ Used when converting a list of episodes into a set. Which is done to diff episode lists. """ return self.title.__hash__() def __eq__(self, other): """ Consider an episode object equal to another if it has the same title. Epsiode objects can have different data depending on whether it's created from a newly downloaded episode or an existing episode. """ if not isinstance(other, Episode): return False if self.title != other.title: return False return True def __ne__(self, other): return not self == other def size(self): """ The podcast rss file requires the filesize, this gets the file's size. """ if os.path.exists(self.file_location): return os.path.getsize(self.file_location) else: return 0 def download(self): """ Handles creating the download thread and starting it. Returns the thread so that it can be joined later. """ yt = pytube.YouTube(self.yt_link) stream = yt.streams.filter(only_audio=True, file_extension="mp4").first() self.download_thread = DownloadThread(stream) print("Starting download of {}...".format(self.title)) self.download_thread.start() return self.download_thread def rename(self): """ Move the downloaded episode from wherever it was downloaded to location specified in the config file. """ os.rename(self.download_thread.location, self.file_location) class DownloadThread(threading.Thread): def __init__(self, stream): super(DownloadThread, self).__init__() self.stream = stream self.done = False self.error = "" self.location = "" def run(self): """ Try to download the episode specified in the thread, mark it done when it finishes. I know, bare except is gross, but this was really only a personal project so cut me some slack :). """ try: self.location = self.stream.download(config.EPISODE_LOCATION) except Exception as e: self.error = e self.done = True ```
{ "source": "jhallard/libgvm", "score": 3 }
#### File: jhallard/libgvm/remove_snapshot.py ```python import sys import subprocess from sys import stdin from util import * SCOPE1 = "https://www.googleapis.com/auth/devstorage.read_write" SCOPE2 = "https://www.googleapis.com/auth/logging.write" def print_help() : print "This deletes an existing snapshot" print "Usage : remove_snapshot.py [-h -d] snapshot_name project_name" print "-h : prints this help message" print "-d : uses whatever default values have been stored by config.sh" if __name__ == "__main__": instance_name = "" snapshot_name = "" zone = "" machine_type = "" project = "" opts = get_opts(sys.argv) defs = {} args = sys.argv[len(opts):] if "help" in opts : print_help() sys.exit(0) if "default" in opts : defs = load_defaults() # if they only have one def. project set if len(defs["projects"]) == 1 : project = defs["projects"][0] if len(args) >= 2 : snapshot_name = args[1] names = get_snapshot_names() if snapshot_name not in names : print "Snapshot Name Does Not Exists (argv[1]) \n" print "Current Names are : \n" for x in names : print x sys.exit(1) else : snapshot_name = get_snapshot_name() # get project name if project == "" and len(args) >= 3 : project = args[2] elif project == "" : project = select_project_name() try : ret = subprocess.check_output(['gcloud','compute','snapshots', 'delete', snapshot_name, '--project', project ]) update_snapshot_list() except subprocess.CalledProcessError, e: print "Error : Failed to Delete Snapshot \n" + str(e) ```
{ "source": "jhallard/Praxyk-Clients", "score": 2 }
#### File: praxyk/pod/pod_ocr.py ```python import os, sys, json, requests import subprocess, argparse, getpass import datetime as dt from praxyk_exception import PraxykException from base import PraxykBase from pod_base import PODBase from results import Results # @info - this class is used to access the optical-character-recognition route of the POD service. # It allows users to upload files to the API to perform OCR upon. class POD_OCR(PODBase) : def __init__(self, file_names=[], *args, **kwargs) : super(POD_OCR, self).__init__(*args, **kwargs) self.file_names = file_names self.transaction = None def post(self, file_names=None, **kwargs) : if file_names : self.file_names = file_names files = {} try : files = [] for name in self.file_names : file_struct = self.load_file(name) files.append(file_struct) payload = {'token' : self.auth_token} # PODBase super class automatically turns the result from the API into a praxyk.Transaction # object for us to use new_trans = super(POD_OCR, self).post(self.POD_OCR_ROUTE, payload, files=files, **kwargs) if new_trans : self.transaction = new_trans return self.transaction return None except Exception as e : raise e return None def load_file(self, fn) : return ('files', (open(fn, 'rb'))) def to_dict(self) : try: base_dict = super(Transaction, self).to_dict() updated = { "file_names " : self.file_names } base_dict.update(updated) return base_dict except Exception as e: raise PraxykException('Error converting transaction to dictionary in call to \'to_dict\'', errors=self) ``` #### File: python/praxyk/result.py ```python import os, sys, json, requests import subprocess, argparse, getpass import datetime as dt from praxyk_exception import PraxykException from base import PraxykBase # @info - This class represents a single result from a request to a Praxyk service. It encapsulates a group # of meta-data about the result and the actual prediction data that is returned from the service that # the request was made to. Read the API docs for more info on Results. class Result(PraxykBase) : def __init__(self, trans_id=None, user_id=None, status=None, created_at=None, finished_at=None, item_name=None, item_number=None, size_KB=None, prediction=None, *args, **kwargs) : super(Result, self).__init__(*args, **kwargs) self.trans_id = trans_id self.user_id = user_id self.status = status self.created_at = created_at self.finished_at = finished_at self.size_KB = size_KB self.item_name = item_name self.item_number = item_number, self.prediction = prediction def get(self) : payload = {'token' : self.auth_token, 'page_size' : 1, 'page' : self.item_number} try : response = super(Result, self).get(self.RESULTS_ROUTE+str(self.trans_id), payload) if response : print 'RESPOINSE',response,'\n' self.result = response.get('transaction', None) if len(self.result) >= 1 : self.page = response.get('page', None) self.service = self.result.get('service', None) self.status = self.result.get('status', None) self.created_at = self.result.get('created_at', None) self.finished_at = self.result.get('finished_at', None) self.size_KB = self.result.get('size_total_KB', None) self.item_name = self.result.get('item_name', None) self.item_number = self.result.get('item_number', None) return self except Exception, e : sys.stderr.write(str(e)) raise e return None def to_dict(self) : base_dict = super(Result, self).to_dict() result_dict = { 'item_name' : self.item_name, 'service' : self.service, 'status' : self.status, 'trans_id' : self.trans_id, 'user_id' : self.user_id, 'created_at' : self.created_at, 'finished_at' : self.finished_at, 'size_KB' : self.size_KB, 'item_number' : self.item_number, 'prediction' : self.page } base_dict.update(result_dict) return base_dict ``` #### File: python/praxyk/transactions.py ```python import os, sys, json, requests import subprocess, argparse, getpass import datetime as dt from praxyk_exception import PraxykException from base import PraxykBase from paginated import Paginated from transaction import Transaction # @info - This class represents a group of Transactions as returned through the /transactions/ route. # This class encapsulates the pagination behavior by exposing functions like next_page(), first_page(), # and last_page() that will change the contents of the container of transactions contained inside to fit # the appropriate page. class Transactions(Paginated) : def __init__(self, user_id=None, **kwargs) : super(Transactions, self).__init__(**kwargs) self.transactions = [] self.transactions_raw = "" self.user_id = user_id # @info - standard wrapper around the GET /transactions/ route. Takes the standard pagination-related parameters, # if those don't exist it uses the ones defined as member variables for the class. def get(self, user_id = None, **kwargs) : payload = {'token' : self.auth_token} if user_id : self.user_id = user_id if self.user_id : payload['user_id'] = self.user_id try : response = super(Transactions, self).get(url=self.TRANSACTIONS_ROUTE, payload=payload, **kwargs) if response : self.transactions = [] if response.get('page', None) : self.transactions_raw = response['page'].get('transactions', None) else : self.transactions_raw = response.get('transactions', None) if not self.transactions_raw : return None for trans in self.transactions_raw : self.transactions.append(Transaction(auth_token=self.auth_token, caller=self.caller, local=self.local, port=self.port, **trans).get().to_dict()) return self # return self.transactions_raw except Exception as e : print str(e) raise e return None def to_json(self) : tdict = self.to_dict() # turn the Transaction object into serializable dictionaries to be jsonified tdict['transactions'] = [t.to_dict() for t in tdict['transactions']] return json.dumps(tdict) def to_dict(self) : base_dict = super(Transactions, self).to_dict() transaction_dict = { 'user_id' : self.user_id, 'transactions' : self.transactions, } base_dict.update(transaction_dict) return base_dict ``` #### File: jhallard/Praxyk-Clients/praxyk_client.py ```python import requests import sys, os import argparse import datetime import json import getpass import subprocess import ConfigParser import traceback from libs.python.praxyk import Praxyk from libs.python.praxyk.pod import pod_ocr, pod_bayes_spam, pod_face_detect from libs.python.praxyk import result from libs.python.praxyk import results from os.path import expanduser global CONFIG_DIR global CLIENT_CONFIG_FILE global USER_AUTH global USER_EMAIL global USER_PASS global PRAXYK global SCRIPTING CONFIG_DIR = str(expanduser("~"))+'/.praxyk_client/' CLIENT_CONFIG_FILE = CONFIG_DIR + 'config' PROMPT = '=> ' GREETING = '\nWelcome to the Praxyk command line client!\nPlease enter a command. (help displays a list of commands)\n' +\ 'Type ^C at any time to quit.' #BASE_URL = 'http://127.0.0.1:5000/' BASE_URL = 'http://api.praxyk.com' DESCRIPTION = """ Documentation for this script is available here: https://github.com/Praxyk/Praxyk-Clients/wiki/Command-Line-Utility """ def set_up_env() : if not os.path.exists(CONFIG_DIR) : os.makedirs(CONFIG_DIR) # @info - parse command line args into useable dictionary # right now we only take a config file as an argument def parse_args(argv) : parser = argparse.ArgumentParser(description=DESCRIPTION) parser.add_argument('--root', action='store_true', help="This flag will cause the program to look for a different" +\ " config file, one that contains a root token. If you don't have the root token, giving this flag will only " +\ "cause everything to fail, depending on what you are doing") parser.add_argument('--script', action='store_true', help='This flag will tell the client script that you want to run ' +\ 'it in \'scripting\' mode, where this program will expect only commands to come from the standard input, so as not to ' +\ 'present the script with choices.') return parser.parse_args() def get_input(desc, default=None) : if desc == '' : desc = PROMPT desc = desc + ("" if not default else " default : (%s)" % str(default)) inp = raw_input(desc).strip() inp = inp if inp else default while not inp : inp = raw_input(desc).strip() return inp def get_passwd(desc = None) : if desc: print desc inp = "" while not inp : inp = getpass.getpass().strip() return inp def get_input_choices(desc, choices) : print desc + " : " # print "Select One of the Following (by number) : " count = 1 for x in choices : print str(count) + ".) " + str(x) count += 1 inp = None while not inp or not inp.isdigit() or (int(inp) <= 0 or int(inp) > len(choices)) : inp = sys.stdin.readline().strip() if not inp or not inp.isdigit() or (int(inp) <= 0 or int(inp) > len(choices)) : print "Incorrect Choice. Select Again." return int(inp)-1 # @info - gets a yes/no input from the user, returns true if user chose yes # else returns false def get_yes_no(desc) : inp = "" print desc + " (Y/n)" while inp not in ['y', 'Y', 'n', 'N', 'yes', 'Yes', 'No', 'no'] : inp = sys.stdin.readline().strip() return inp in ['y', 'Y', 'yes', 'Yes'] # @info - looks at the raw response and prints relevant error messages if necessary def check_return(r) : if not r or not r.text : if "404" in r.text : sys.stderr.write("Content Not Found. Double check all content-IDs are correct (username, instance id, etc).\n") elif "401" in r.text : sys.stderr.write("Request Could not be Authorized. If you haven't logged in today, do so. If error persists, contact John.\n") elif "500" in r.text : sys.stderr.write("The Server had a Hiccup, do you mind forwarding this stack trace to John?\n") sys.stderr.write(str(80*'-'+'\n'+r.text+80*'-'+'\n')) else : sys.stderr.write("Request Could not be Fufilled.\nDetails : %s\n"%r.text) return False else : return True # @info - this attempts to load the user's login info from a local config file, or allows them to login/register # if such a file does not exist. def load_user() : answer = True if not SCRIPTING: if not os.path.isfile(CLIENT_CONFIG_FILE): answer = get_yes_no('Welcome to the Praxyk client script, we couldn\'t detect a configuration file, ' +\ 'do you have a Praxyk account already?') if not answer: print 'No problem, we will have you up and running in no time!' register_user() else: login_user() return else: answer = get_yes_no('Would you like to load user data from the Praxyk config file?') if answer or SCRIPTING: try: config = ConfigParser.ConfigParser() configfile = open(CLIENT_CONFIG_FILE, 'r') config.readfp(configfile) #print 'CONFIG: ',config.get('default_section','email') answer = get_yes_no('Would you like to use the credentials for the account associated with the email %s?' % config.get('default_section','email')) if answer : USER_AUTH = config.get('default_section', 'auth_tok') if not PRAXYK.login(auth_token=USER_AUTH): # @TODO add support for changing the config section to load print 'Unable to log in using the default credentials in config file, please log in with fresh credentials, ' +\ 'or type ^C to exit.' login_user() return else : print 'Successfully logged in using credentials from config file.' else : login_user return except Exception: sys.stderr.write('Unable to open the local configuration file.\n') if SCRIPTING: sys.stderr.write('Program is being run in scripting mode with invalid or nonexistant config file, cannot continue.\n') sys.exit(1) else: login_user() return else: login_user() return # @info - this logs the user into the API service by submitting their username and password in return for a temporary access # token. This token is stored in a hidden directory and can be loaded automatically when the user makes future requests. def login_user(argv=None) : print 'Please enter your Praxyk login credentials' USER_EMAIL = get_input('Email: ') USER_PASS = <PASSWORD>() while not PRAXYK.login(email=USER_EMAIL, password=<PASSWORD>): print 'Invalid username/password combination, please check your credentials and try again, or type ^C to exit' USER_EMAIL = get_input('Email: ') USER_PASS = <PASSWORD>_<PASSWORD>() print 'Login successful!' user = PRAXYK.user().get() config = ConfigParser.ConfigParser() configfile = open(CLIENT_CONFIG_FILE, 'w+') config.add_section('default_section') config.set('default_section', 'auth_tok', '%s' % user.auth_token) config.set('default_section', 'email', '%s' % user.email) config.write(configfile) def register_user(argv=None) : print 'Welcome to Praxyk!' print 'You are registering a new user account, if you do not wish to continue,' +\ '\ntype ^C at any time to exit.' user_name = get_input('What is your name?') user_email = get_input('Please enter your email.') user_pass1 = get_passwd('Please enter your password.') user_pass2 = get_passwd('Please confirm your password.') while (user_pass1 != user_pass2) : print 'The passwords do not match, please enter matching passwords.' user_pass1 = get_passwd('Please enter your password.') user_pass2 = get_passwd('Please confirm your password.') if (not get_yes_no('Have you read and accepted our terms and conditions?\n' +\ '(%s/terms_and_conditions.html)' % BASE_URL)) : print 'Feel free to come back if you change your mind.' exit_session() else : user = PRAXYK.user(name=user_name,email=user_email).post() if user : print 'Welcome, %s!' % user_name print 'You will need to confirm your account through your email before using our services.' exit_session() else : print 'Registration failed :(' # @TODO : put a reason why the registration failed if get_yes_no ('Would you like to attempt to register again?') : register_user() else : exit_session() def exit_session(argv=None) : print 'Thanks for using the Praxyk client script!' sys.exit(0) def change_password(user=None) : new_pass1 = get_passwd('Please enter your new password.') new_pass2 = get_passwd('Please confirm your new password.') while (user_pass1 != user_pass2) : print 'The passwords do not match, please enter matching passwords.' new_pass1 = get_passwd('Please enter your new password.') new_pass2 = get_passwd('Please confirm your new password.') response = PRAXYK.user().post(new_pass1) if not response : print 'Unable to complete your request to change password.\n' return else : print 'Password changed successfully.' return def change_email(user=None) : print 'This is not currently supported through our API, sorry about that!' return def update_user(argv=None) : PRAXYK.user().get() return def switch_user(argv=None) : return def display_users(argv=None) : print 'This action is only supported for admin accounts, and ' +\ 'is not even supported yet.' return def display_user(argv=None) : user_dict = PRAXYK.user().to_dict() user_auth = user_dict.get('auth_token') user_dict = user_dict.get('caller') #print 'USER_DICT: ',user_dict print '\n\tUser information for %s:' % user_dict.get('name') print_div() print_dict(user_dict) print_div() pass def apply_coupon(argv=None) : print 'Sorry, coupons are not currently supported through the Praxyk client script.' """ if not argv : coupon_id = get_input('Please specify the coupon code to apply') else : coupon_id = argv """ return SERVICES = ['ocr', 'face_detect', 'spam'] def begin_transaction(argv=None) : try : service = '' query = argv.split(' ') if len(query) < 2 or str(query[0]).lower() not in SERVICES : print 'To begin a transaction you need to specify a service (ocr, face_detect, spam) and a file or series of files to upload.' while (query[0].lower() not in SERVICES or len(query) < 2) : query = get_input('Please enter the service you would like to use followed by at least one file to upload: ').split(' ') service = query.pop(0) if service == 'ocr' : p = pod_ocr.POD_OCR(PRAXYK) elif service == 'face_detect' : p = pod_face_detect.POD_FaceDetect(PRAXYK) elif service == 'spam' : p = pod_bayes_spam.POD_BayesSpam(PRAXYK) res = p.post(file_names=query) return except Exception as e : print e return def cancel_transaction(argv=None) : trans_id = argv if trans_id is None : trans_id = get_input('Enter the id of the transaction you would like to cancel: ') result = PRAXYK.transaction(trans_id=trans_id).put(cancel=True) print 'RESULT: ',result if not result : print 'SOMETHING BAD HAPPENED' return def display_transactions(argv=None) : response = True transactions = PRAXYK.user().transactions().get() if len(transactions.to_dict()) > 0 : print '\n\tTransactions:' for transaction in transactions.to_dict().get('transactions') : print_div() print_dict(transaction) print_div() else : print 'No transactions to display, try starting a transaction now!' return def display_transaction(argv=None) : trans_id = argv if trans_id is None : trans_id = get_input('Please enter the id of the transaction you would like to view: ') try : transaction = PRAXYK.user().transaction(trans_id=trans_id) if not transaction.get() : print 'The transaction with id \'%s\' does not exist' % trans_id trans_dict = transaction.to_dict() if len(trans_dict) > 0 : print '\n\tTransaction:' print_div() print_dict(trans_dict) print_div() else : print 'The transaction ',transaction_id,' does not exist.' return except : print 'Error retreiving information about the transaction with id \'%s\'.' % trans_id print 'It is possible this is a transaction that does not belong to you or is not a valid id.' return def display_results(argv=None) : return def display_result(argv) : try : if argv == '' : trans_id = get_input('Please enter the transaction id of the result you wish to view: ') else : trans_id = argv res = PRAXYK.result(trans_id=trans_id).get() if res is None : print 'Unable to get the results of transaction with id %s, check to make sure the transaction ' +\ 'id you have entered is correct.' % trans_id else : print '\tResult for transaction #%s:' % trans_id print_div() print_dict(res.to_dict()) print_div() return except Exception as e : print e return # @info - this attempts to parse a command the user has typed in by matching it with the ACTION_MAP # dictionary, and calls the appropriate function if the user's command is valid. def parse_command(command) : if command == '' : return command_list = command.split(' ',2) num_args = len(command_list) action=noun=specifics='' #print 'command_list: ',command_list,' length {',num_args,'}' action = command_list[0] #print 'action: ',action if num_args > 1: noun = command_list[1] #print 'noun: ',noun if num_args > 2: specifics = command_list[2] #print 'specifics: ',specifics if action not in ACTIONS : sys.stderr.write('Must include a valid action (%s)\n' % ACTIONS) return if noun not in NOUNS : print '' in NOUNS print not '' sys.stderr.write('Must include a valid noun (%s)\n' % NOUNS) return action_func = ACTION_MAP.get(action).get(noun, None) if not action_func : sys.stderr.write('It looks like your input of \'%s\' is invalid or not yet implemented.' +\ ' If you feel this is an error, please contact the Praxyk team at %s\n' % ((action+' '+noun+' '+\ +' '+specifics), BASE_URL)) return res = action_func(argv=specifics) def print_div(char='-') : print str(char) * 80 def print_dict(dictionary) : for key,value in dictionary.items() : if key != 'auth_token' : if type(value) is dict and len(value) > 0: print '\t$',str(key).ljust(20) print_dict(value) else : print '\t-',str(key).ljust(20),': ',str(value).ljust(20) def print_help(argv=None) : print 'Valid actions followed by valid nouns to be used in conjunction with them:' print_div() for key,value in ACTION_MAP.items() : print 'Action: ',key for key in value.keys() : print '\t\tNoun: ',key print_div() return ACTION_MAP = { 'help' : { "" : print_help }, 'login' : { "" : login_user }, # 'register' : { "" : register_user }, # 'exit' : { "" : exit_session }, # 'switch' : { "user" : switch_user }, 'change' : { "email" : change_email, # "password" : <PASSWORD> }, # 'apply' : { "coupon": apply_coupon }, # 'begin' : { "transaction": begin_transaction }, # 'cancel' : { "transaction" : cancel_transaction }, # 'display' : { "users" : display_users, # "user" : display_user, # "transactions" : display_transactions, # "transaction" : display_transaction, # "results" : display_results, "result" : display_result } } ACTIONS = ACTION_MAP.keys() NOUNS = [] # @info - main function, loops to get user input and calls # appropriate functions as per the user's command if __name__ == "__main__" : try: set_up_env() for noun_func_pair in ACTION_MAP.values(): keys = noun_func_pair.keys() for key in keys: if key not in NOUNS: NOUNS.append(key) PRAXYK = Praxyk() SCRIPTING = False args = parse_args(sys.argv) if args.root : CLIENT_CONFIG_FILE = CONFIG_DIR + 'root.config' load_user() print GREETING while (True): command = get_input('') parse_command(command) except KeyboardInterrupt: print '\n^C received, exiting the Praxyk client script.' sys.exit(0) except Exception as e: print 'Something bad happened...\nPlease take the time to forward the following trace to <EMAIL>, thanks!' print e #traceback.print_exc() sys.exit(1) ```
{ "source": "JHaller27/des-sim", "score": 2 }
#### File: des-sim/src/function.py ```python from mybin import Bin import logging log = logging.getLogger('des-sim') E = \ [32, 1, 2, 3, 4, 5, 4, 5, 6, 7, 8, 9, 8, 9, 10, 11, 12, 13, 12, 13, 14, 15, 16, 17, 16, 17, 18, 19, 20, 21, 20, 21, 22, 23, 24, 25, 24, 25, 26, 27, 28, 29, 28, 29, 30, 31, 32, 1] S_BOXES = [ # S_1 [ [14, 4, 13, 1, 2, 15, 11, 8, 3, 10, 6, 12, 5, 9, 0, 7], [0, 15, 7, 4, 14, 2, 13, 1, 10, 6, 12, 11, 9, 5, 3, 8], [4, 1, 14, 8, 13, 6, 2, 11, 15, 12, 9, 7, 3, 10, 5, 0], [15, 12, 8, 2, 4, 9, 1, 7, 5, 11, 3, 14, 10, 0, 6, 13] ], # S_2 [ [15, 1, 8, 14, 6, 11, 3, 4, 9, 7, 2, 13, 12, 0, 5, 10], [ 3, 13, 4, 7, 15, 2, 8, 14, 12, 0, 1, 10, 6, 9, 11, 5], [ 0, 14, 7, 11, 10, 4, 13, 1, 5, 8, 12, 6, 9, 3, 2, 15], [13, 8, 10, 1, 3, 15, 4, 2, 11, 6, 7, 12, 0, 5, 14, 9] ], # S_3 [ [10, 0, 9, 14, 6, 3, 15, 5, 1, 13, 12, 7, 11, 4, 2, 8], [13, 7, 0, 9, 3, 4, 6, 10, 2, 8, 5, 14, 12, 11, 15, 1], [13, 6, 4, 9, 8, 15, 3, 0, 11, 1, 2, 12, 5, 10, 14, 7], [ 1, 10, 13, 0, 6, 9, 8, 7, 4, 15, 14, 3, 11, 5, 2, 12] ], # S_4 [ [ 7, 13, 14, 3, 0, 6, 9, 10, 1, 2, 8, 5, 11, 12, 4, 15], [13, 8, 11, 5, 6, 15, 0, 3, 4, 7, 2, 12, 1, 10, 14, 9], [10, 6, 9, 0, 12, 11, 7, 13, 15, 1, 3, 14, 5, 2, 8, 4], [ 3, 15, 0, 6, 10, 1, 13, 8, 9, 4, 5, 11, 12, 7, 2, 14] ], # S_5 [ [ 2, 12, 4, 1, 7, 10, 11, 6, 8, 5, 3, 15, 13, 0, 14, 9], [14, 11, 2, 12, 4, 7, 13, 1, 5, 0, 15, 10, 3, 9, 8, 6], [ 4, 2, 1, 11, 10, 13, 7, 8, 15, 9, 12, 5, 6, 3, 0, 14], [11, 8, 12, 7, 1, 14, 2, 13, 6, 15, 0, 9, 10, 4, 5, 3] ], # S_6 [ [12, 1, 10, 15, 9, 2, 6, 8, 0, 13, 3, 4, 14, 7, 5, 11], [10, 15, 4, 2, 7, 12, 9, 5, 6, 1, 13, 14, 0, 11, 3, 8], [ 9, 14, 15, 5, 2, 8, 12, 3, 7, 0, 4, 10, 1, 13, 11, 6], [ 4, 3, 2, 12, 9, 5, 15, 10, 11, 14, 1, 7, 6, 0, 8, 13] ], # S_7 [ [ 4, 11, 2, 14, 15, 0, 8, 13, 3, 12, 9, 7, 5, 10, 6, 1], [13, 0, 11, 7, 4, 9, 1, 10, 14, 3, 5, 12, 2, 15, 8, 6], [ 1, 4, 11, 13, 12, 3, 7, 14, 10, 15, 6, 8, 0, 5, 9, 2], [ 6, 11, 13, 8, 1, 4, 10, 7, 9, 5, 0, 15, 14, 2, 3, 12] ], # S_8 [ [13, 2, 8, 4, 6, 15, 11, 1, 10, 9, 3, 14, 5, 0, 12, 7], [ 1, 15, 13, 8, 10, 3, 7, 4, 12, 5, 6, 11, 0, 14, 9, 2], [ 7, 11, 4, 1, 9, 12, 14, 2, 0, 6, 10, 13, 15, 3, 5, 8], [ 2, 1, 14, 7, 4, 10, 8, 13, 15, 12, 9, 0, 3, 5, 6, 11] ] ] P = \ [16, 7, 20, 21, 29, 12, 28, 17, 1, 15, 23, 26, 5, 18, 31, 10, 2, 8, 24, 14, 32, 27, 3, 9, 19, 13, 30, 6, 22, 11, 4, 25] class Function: """ GoF Context class """ __slots__ = ['_encrypter', '_step', 'data', '_key_scheduler'] def __init__(self, encrypter: 'Encrypter', key_scheduler: 'KeyScheduler'): self._encrypter = encrypter self._step = None self.data = None self._key_scheduler = key_scheduler def get_result(self): self._step = Initialize(self) while self._step is not None: self._step = self._step.run() return self.data def set_new_data(self): self.data = self._encrypter.plaintext[1] def _get_key(self): key = self._key_scheduler.get_key() log.info(' key output {} ({} bits)'.format(key, len(key))) return key key = property(fget=_get_key) """==============================================================================""" class FunctionStep: """ GoF State super-class. """ __slots__ = ['_context'] def __init__(self, context: Function): self._context = context def run(self): raise NotImplementedError def _get_encrypter(self): return self._context._encrypter class Initialize(FunctionStep): def run(self): log.info(' Starting f function...') self._context.set_new_data() return Expansion(self._context) class Expansion(FunctionStep): OUTPUT_LEN = 48 def run(self): s = '' for new_bit_loc in range(self.OUTPUT_LEN): old_bit_loc = E[new_bit_loc] s += str(self._context.data[old_bit_loc - 1]) # Must subtract 1 b/c PC tables are 1-indexed self._context.data = Bin(self.OUTPUT_LEN, s, 2) log.debug(' E result {} ({} bits)'.format(self._context.data, len(self._context.data))) return Xor(self._context) class Xor(FunctionStep): def run(self): self._context.data ^= self._context.key log.debug(' Xor result {} ({} bits)'.format(self._context.data, len(self._context.data))) return Split(self._context) class Split(FunctionStep): def run(self): self._context.data = self._context.data.split(len(S_BOXES)) log.debug(' Split for S boxes:') i = 1 for s_in in self._context.data: log.debug(' [{}] {} ({} bits)'.format(i, s_in, len(s_in))) i += 1 return SBoxes(self._context) class SBoxes(FunctionStep): OUTPUT_LEN = 4 def run(self): log.debug(' S box results:') data_lst = self._context.data for s_box_idx in range(len(S_BOXES)): b = self.s_box_transformation(s_box_idx, data_lst[s_box_idx]) data_lst[s_box_idx] = b log.debug(' [{}] {} ({} bits)'.format(s_box_idx + 1, b, len(b))) self._context.data = tuple(data_lst) return Recombine(self._context) @staticmethod def s_box_transformation(s_box_idx: int, data: Bin) -> Bin: s_box = S_BOXES[s_box_idx] row_idx = int(data[0] + data[len(data) - 1], 2) col_idx = int(data[1:len(data) - 1], 2) return Bin(SBoxes.OUTPUT_LEN, s_box[row_idx][col_idx]) class Recombine(FunctionStep): def run(self): new_data = None for b in self._context.data: new_data = b if new_data is None else new_data + b self._context.data = new_data log.debug(' S box result {} ({} bits)'.format(self._context.data, len(self._context.data))) return Permutation(self._context) class Permutation(FunctionStep): OUTPUT_LEN = 32 def run(self): s = '' for new_bit_loc in range(self.OUTPUT_LEN): old_bit_loc = P[new_bit_loc] s += self._context.data[old_bit_loc - 1] # Must subtract 1 b/c PC tables are 1-indexed self._context.data = Bin(self.OUTPUT_LEN, s, 2) log.debug(' P result {} ({} bits)'.format(self._context.data, len(self._context.data))) return None from encrypter import * ```
{ "source": "JHaller27/ExpressionInterpreter", "score": 4 }
#### File: ExpressionInterpreter/src/ExpressionInterpreter.py ```python DIGITS = '0123456789.' OPERATORS = '+-*/^%' class Node: __slots__ = ['data', 'left', 'right'] def __init__(self, data=None, left=None, right=None): self.data = data self.left = left self.right = right def __str__(self): return format('(%s <- %s -> %s)' % (self.left, self.data, self.right)) def resolve(self): if self.data in OPERATORS: if self.left is None or self.right is None: raise SyntaxError("Operator must have two adjacent operands.") left = self.left.resolve() right = self.right.resolve() if self.data is '+': return left + right elif self.data is '-': return left - right elif self.data is '*': return left * right elif self.data is '/': result = left / right return int(result) if result == int(result) else result elif self.data is '^': return left ** right elif self.data is '%': return left % right else: if '.' in self.data: return float(self.data) else: return int(self.data) def tokenize(base): tokens = [] base = base.replace(' ', '') idx = 0 while idx < len(base): ch = base[idx] # Handle operator if ch in OPERATORS: tokens.append(ch) idx += 1 # Handle numbers elif ch in DIGITS: token = '' while ch in DIGITS: token += ch idx += 1 if idx < len(base): ch = base[idx] else: break tokens.append(token) # Handle parens elif ch is '(': paren_text = '' paren_num = 1 while paren_num > 0: idx += 1 ch = base[idx] if ch is '(': paren_num += 1 paren_text += ch elif ch is ')': paren_num -= 1 if paren_num is not 0: paren_text += ch else: paren_text += ch tokens.append(tokenize(paren_text)) idx += 1 return tokens def generate_tree(tokens: list) -> Node: # Priorities: +-=0, */=1, ^=2, ()=3, digit=4 priorities = [] # If only token is a list (ie parens), treat as own list if len(tokens) == 1 and type(tokens[0]) is list: tokens = tokens[0] # Assign priorities in parallel list for idx, val in enumerate(tokens): pri = 4 if type(val) is list: pri = 3 elif val in '^': pri = 2 elif val in '*/%': pri = 1 elif val in '+-': pri = 0 priorities.append(pri) # Determine first item with lowest priority least_priority = 4 least_idx = 0 for idx, pri in enumerate(priorities): if pri < least_priority: least_idx = idx least_priority = pri # Create node data = tokens[least_idx] left = generate_tree(tokens[:least_idx]) if len(tokens[:least_idx]) > 0 else None right = generate_tree(tokens[least_idx + 1:]) if len(tokens[least_idx + 1:]) > 0 else None return Node(data, left, right) def main(): s = input('Input expression >> ') tokens = tokenize(s) tree = generate_tree(tokens) result = tree.resolve() print(result) if __name__ == '__main__': main() ```
{ "source": "JHaller27/PyMultiConsumer", "score": 4 }
#### File: JHaller27/PyMultiConsumer/multiconsumer.py ```python def consume(*args): """ Generator which consumes multiple iterables and yields a tuple containing one item from each iterable. Ends when any collection runs out of elements. :param args: Any number of iterables to consume :return: A tuple containing the next item from each iterable """ iters = tuple([iter(arg) for arg in args]) while True: yield tuple([next(itr) for itr in iters]) ```
{ "source": "JHaller27/PyState", "score": 3 }
#### File: PyState/pystate/state.py ```python class State: """ Interface for encapsulating the behavior associated with a particular state of the Context. """ def __init__(self, context: 'Context' = None): """ :param context: Context object with data globally available to the state machine. """ self.context = context def run(self) -> 'State' or None: """ Perform this State's behavior. :return: The next State, or None if this is a final State. """ raise NotImplementedError ```
{ "source": "JHaller27/TowerOfHanoi-Python", "score": 4 }
#### File: TowerOfHanoi-Python/src/TowerOfHanoiSolver.py ```python class Tower: __slots__ = 'rings', 'capacity' def __init__(self, cap: int): self.rings = list() self.capacity = cap def add(self, ring_size:int): if len(self.rings) >= self.capacity: raise IndexError('Tower already at max capacity') if (len(self.rings) > 0) and (ring_size >= self.rings[-1]): raise ValueError("Trying to add ring of size %d on top of ring of size %d" % (ring_size, self.rings[-1])) self.rings.append(ring_size) def pop(self) -> int: if len(self.rings) <= 0: raise IndexError('Tower empty') return self.rings.pop() def get(self, depth: int) -> int: return self.rings[-1 * (depth + 1)] def print_towers(towers: list, size=None): if size is None: size = towers[0].capacity # Pad tower lists with zeros tower_list = list() for tower in towers: tl = list(tower.rings) while len(tl) < size: tl.append(0) tower_list.append(tl) for row in range(size): for tower in tower_list: ring_size = tower[size - row - 1] whitespace = padding(size - ring_size + 1) ring_space = padding(ring_size, '-') print(whitespace + ring_space + '|' + ring_space + whitespace, end=' ') print() for idx in range(len(tower_list)): base = padding((size + 1), character='=') print("%s%d%s" % (base, idx + 1, base), end=' ') print("\n") def padding(width: int, character=' ') -> str: pad = '' for i in range(width): pad += character return pad def move_tower(towers: list, size: int, src: int, dest: int): if size == 1: towers[dest].add(towers[src].pop()) print("\nTower %d -> Tower %d" % (src + 1, dest + 1)) print_towers(towers) else: # Determine temp peg all_pegs = list(range(3)) all_pegs.remove(src) all_pegs.remove(dest) temp_peg = all_pegs[0] # Move top tower (size n-1) to temp peg move_tower(towers, size-1, src, temp_peg) # Move bottom ring to destination peg move_tower(towers, 1, src, dest) # Move rest of tower to destination peg move_tower(towers, size-1, temp_peg, dest) def main(): size = int(input("Input tower size... ")) towers = list() for i in range(3): towers.append(Tower(size)) for ring in range(size, 0, -1): towers[0].add(ring) print("\nInitial configuration") print_towers(towers) move_tower(towers, size, 0, 1) if __name__ == '__main__': main() ```
{ "source": "jhalley/AdventOfCode2015", "score": 3 }
#### File: jhalley/AdventOfCode2015/day4.py ```python import hashlib def calc_lowest_zero_hash(input, num_zeroes): zero_matcher = '0'*num_zeroes curr_num = -1 while True: curr_num += 1 m = hashlib.md5() m.update(input + str(curr_num)) if (m.hexdigest()[:num_zeroes] == zero_matcher): return curr_num print calc_lowest_zero_hash('bgvyzdsv', 5) print calc_lowest_zero_hash('bgvyzdsv', 6) ```
{ "source": "jhalljhall/beiwe-backend", "score": 2 }
#### File: beiwe-backend/pages/forest_pages.py ```python import csv import datetime from collections import defaultdict from django.contrib import messages from django.http.response import FileResponse from django.shortcuts import redirect, render from django.utils import timezone from django.views.decorators.http import require_GET, require_http_methods, require_POST from authentication.admin_authentication import (authenticate_admin, authenticate_researcher_study_access, forest_enabled) from constants.data_access_api_constants import CHUNK_FIELDS from constants.forest_constants import ForestTaskStatus, ForestTree from database.data_access_models import ChunkRegistry from database.study_models import Study from database.tableau_api_models import ForestTask from database.user_models import Participant from forms.django_forms import CreateTasksForm from libs.http_utils import easy_url from libs.internal_types import ParticipantQuerySet, ResearcherRequest from libs.streaming_zip import zip_generator from libs.utils.date_utils import daterange from middleware.abort_middleware import abort from serializers.forest_serializers import ForestTaskCsvSerializer, ForestTaskSerializer @require_GET @authenticate_researcher_study_access @forest_enabled def analysis_progress(request: ResearcherRequest, study_id=None): study: Study = Study.objects.get(pk=study_id) participants: ParticipantQuerySet = Participant.objects.filter(study=study_id) # generate chart of study analysis progress logs trackers = ForestTask.objects.filter(participant__in=participants).order_by("created_on") start_date = (study.get_earliest_data_time_bin() or study.created_on).date() end_date = (study.get_latest_data_time_bin() or timezone.now()).date() # this code simultaneously builds up the chart of most recent forest results for date ranges # by participant and tree, and tracks the metadata params = dict() results = defaultdict(lambda: "--") tracker: ForestTask for tracker in trackers: for date in daterange(tracker.data_date_start, tracker.data_date_end, inclusive=True): results[(tracker.participant_id, tracker.forest_tree, date)] = tracker.status if tracker.status == tracker.status.success: params[(tracker.participant_id, tracker.forest_tree, date)] = tracker.forest_param_id else: params[(tracker.participant_id, tracker.forest_tree, date)] = None # generate the date range for charting dates = list(daterange(start_date, end_date, inclusive=True)) chart = [] for participant in participants: for tree in ForestTree.values(): row = [participant.patient_id, tree] + \ [results[(participant.id, tree, date)] for date in dates] chart.append(row) # ensure that within each tree, only a single set of param values are used (only the most recent runs # are considered, and unsuccessful runs are assumed to invalidate old runs, clearing params) params_conflict = False for tree in set([k[1] for k in params.keys()]): if len(set([m for k, m in params.items() if m is not None and k[1] == tree])) > 1: params_conflict = True break return render( request, 'forest/analysis_progress.html', context=dict( study=study, chart_columns=["participant", "tree"] + dates, status_choices=ForestTaskStatus, params_conflict=params_conflict, start_date=start_date, end_date=end_date, chart=chart # this uses the jinja safe filter and should never involve user input ) ) @require_http_methods(['GET', 'POST']) @authenticate_admin @forest_enabled def create_tasks(request: ResearcherRequest, study_id=None): # Only a SITE admin can queue forest tasks if not request.session_researcher.site_admin: return abort(403) try: study = Study.objects.get(pk=study_id) except Study.DoesNotExist: return abort(404) # FIXME: remove this double endpoint pattern, it is bad. if request.method == "GET": return render_create_tasks(request, study) form = CreateTasksForm(data=request.POST, study=study) if not form.is_valid(): error_messages = [ f'"{field}": {message}' for field, messages in form.errors.items() for message in messages ] error_messages_string = "\n".join(error_messages) messages.warning(request, f"Errors:\n\n{error_messages_string}") return render_create_tasks(request, study) form.save() messages.success(request, "Forest tasks successfully queued!") return redirect(easy_url("forest_pages.task_log", study_id=study_id)) @require_GET @authenticate_researcher_study_access @forest_enabled def task_log(request: ResearcherRequest, study_id=None): study = Study.objects.get(pk=study_id) forest_tasks = ForestTask.objects.filter(participant__study_id=study_id).order_by("-created_on") return render( request, "forest/task_log.html", context=dict( study=study, is_site_admin=request.session_researcher.site_admin, status_choices=ForestTaskStatus, forest_log=ForestTaskSerializer(forest_tasks, many=True).data, ) ) @require_GET @authenticate_admin def download_task_log(request: ResearcherRequest): forest_tasks = ForestTask.objects.order_by("created_on") return FileResponse( stream_forest_task_log_csv(forest_tasks), content_type="text/csv", filename=f"forest_task_log_{timezone.now().isoformat()}.csv", as_attachment=True, ) @require_POST @authenticate_admin @forest_enabled def cancel_task(request: ResearcherRequest, study_id, forest_task_external_id): if not request.session_researcher.site_admin: return abort(403) number_updated = \ ForestTask.objects.filter( external_id=forest_task_external_id, status=ForestTaskStatus.queued ).update( status=ForestTaskStatus.cancelled, stacktrace=f"Canceled by {request.session_researcher.username} on {datetime.date.today()}", ) if number_updated > 0: messages.success(request, "Forest task successfully cancelled.") else: messages.warning(request, "Sorry, we were unable to find or cancel this Forest task.") return redirect(easy_url("forest_pages.task_log", study_id=study_id)) @require_GET @authenticate_admin @forest_enabled def download_task_data(request: ResearcherRequest, study_id, forest_task_external_id): try: tracker: ForestTask = ForestTask.objects.get( external_id=forest_task_external_id, participant__study_id=study_id ) except ForestTask.DoesNotExist: return abort(404) chunks = ChunkRegistry.objects.filter(participant=tracker.participant).values(*CHUNK_FIELDS) f = FileResponse( zip_generator(chunks), content_type="zip", as_attachment=True, filename=f"{tracker.get_slug()}.zip", ) f.set_headers(None) return f def stream_forest_task_log_csv(forest_tasks): buffer = CSVBuffer() writer = csv.DictWriter(buffer, fieldnames=ForestTaskCsvSerializer.Meta.fields) writer.writeheader() yield buffer.read() for forest_task in forest_tasks: writer.writerow(ForestTaskCsvSerializer(forest_task).data) yield buffer.read() def render_create_tasks(request: ResearcherRequest, study: Study): participants = Participant.objects.filter(study=study) try: start_date = ChunkRegistry.objects.filter(participant__in=participants).earliest("time_bin") end_date = ChunkRegistry.objects.filter(participant__in=participants).latest("time_bin") start_date = start_date.time_bin.date() end_date = end_date.time_bin.date() except ChunkRegistry.DoesNotExist: start_date = study.created_on.date() end_date = timezone.now().date() return render( request, "forest/create_tasks.html", context=dict( study=study, participants=list( study.participants.order_by("patient_id").values_list("patient_id", flat=True) ), trees=ForestTree.choices(), start_date=start_date.strftime('%Y-%m-%d'), end_date=end_date.strftime('%Y-%m-%d') ) ) class CSVBuffer: line = "" def read(self): return self.line def write(self, line): self.line = line ```
{ "source": "jhalljhall/forest", "score": 3 }
#### File: willow/tests/test_log_stats.py ```python import pandas as pd from forest.willow.log_stats import comm_logs_summaries ID = "6b38vskd" STAMP_START = 1453837206 STAMP_END = 1454634000 TZ_STR = "America/New_York" OPTION = "daily" def test_comm_log_summaries_with_empty_data(): text_data = pd.DataFrame.from_dict({}) call_data = pd.DataFrame.from_dict({}) stats_pdframe = comm_logs_summaries(ID, text_data, call_data, STAMP_START, STAMP_END, TZ_STR, OPTION) assert isinstance(stats_pdframe, pd.DataFrame) def test_comm_log_summaries_with_empty_text_data(): text_data = pd.DataFrame.from_dict({}) call_data = pd.DataFrame.from_dict( {'timestamp': {0: 1454428647649}, 'UTC time': {0: '2016-02-02T15:57:27.649'}, 'hashed phone number': {0: 'ZlGtb-SRRIgOcHLBD02d2_F049naF0YZbCx_CeP7jss='}, 'call type': {0: 'Missed Call'}, 'duration in seconds': {0: 0}} ) stats_pdframe = comm_logs_summaries(ID, text_data, call_data, STAMP_START, STAMP_END, TZ_STR, OPTION) assert isinstance(stats_pdframe, pd.DataFrame) ```
{ "source": "jhallman5/TEKsys_Python", "score": 3 }
#### File: jhallman5/TEKsys_Python/script_test.py ```python import io import os import sys import unittest from unittest.mock import patch from script import find_files current_dir = os.path.dirname(__file__) test_root_dir = os.path.join(current_dir, 'test-dir') test_user_input = '[a-z]+ample' expected_output = "{'.': 0, 'a': 1, 'a/b': 2, 'a/b/c': 3, 'd': 2, 'd/e': 0, 'd/e/f': 2}\n" class Find_Files(unittest.TestCase): @patch('script.get_root_dir', return_value=test_root_dir) @patch('script.get_user_input', return_value=test_user_input) def test_dict_generation(self,root_dir, user_input): capture_output = io.StringIO() sys.stdout = capture_output find_files() sys.stdout = sys.__stdout__ self.assertEqual(capture_output.getvalue() , expected_output) if __name__ == '__main__': unittest.main() ```
{ "source": "jham20x6/turbo-octo-broccoli", "score": 4 }
#### File: action_tracker/test/TestActionTracker2.py ```python import simplejson as json import unittest from action_tracker.Tracker import ActionTracker class TestActionTracker2(unittest.TestCase): action_tracker = ActionTracker() def testAddActionWithFileInput(self): ''' Open the test input file for reading Call addAction for each line Verify that each addAction() was successful ''' with open("test2input.txt") as f: for line in f: result = self.action_tracker.addAction(json_string=line) self.assertEqual(result,None,"addAction() Failed") def testGetStatsWithFileInput(self): ''' Verify that the results from getStats() call matches line in the output file json.loads and json.dumps ensures the same format/whitespace ''' with open("test2output.txt") as f: self.assertEqual(json.dumps(json.loads(f.readline())), self.action_tracker.getStats(), "getStats() Failed") ``` #### File: action_tracker/test/TestActionTracker3.py ```python import simplejson as json import unittest import concurrent.futures from action_tracker.Tracker import ActionTracker class TestActionTracker3(unittest.TestCase): action_tracker = ActionTracker() def testAddActionWithThreads(self): ''' Open the test input file for reading Create a thread pool executor for handling threads and their output Starting threads for each input line in the input file Verify that each addAction() was successful using future.result() ''' with open("test3input.txt") as f: with concurrent.futures.ThreadPoolExecutor() as executor: future_add_actions = {executor.submit(self.action_tracker.addAction,line): line for line in f} for future in concurrent.futures.as_completed(future_add_actions): self.assertEqual(future.result(), None, "addAction() Failed") def testGetStatsWithThreads(self): ''' Open the test input file for reading Only one thread is needed, however ThreadPoolExecutor allows for returned value in .result() Starting threads for each input line in the input file Verify that the getStats() call was successful ''' with open("test3output.txt") as f: with concurrent.futures.ThreadPoolExecutor() as executor: future_add_actions = {executor.submit(self.action_tracker.getStats)} for future in concurrent.futures.as_completed(future_add_actions): formatted_line = json.dumps(json.loads(f.readline())) self.assertEqual(future.result(), formatted_line, "getStats() Failed") ```
{ "source": "jhamburg/capstone", "score": 3 }
#### File: foodblog_scraper/spiders/foodNetworkCountSpider.py ```python import scrapy import logging import re class foodNetworkSpider(scrapy.Spider): name = 'foodNetwork_totalRecipes' start_urls = ['http://www.foodnetwork.com/topics/'] def parse(self, response): # tmp = response.css('div.o-Capsule__m-Body li.m-PromoList__a-ListItem a::attr(href)') # tmp3 = tmp[:10] # for href in tmp3: for topicSelector in response.css('div.o-Capsule__m-Body li.m-PromoList__a-ListItem a'): topicName = topicSelector.xpath('./text()').extract_first() if topicName != 'Portobello Mushroom': href = topicSelector.css('a::attr(href)').extract_first() yield response.follow(href, self.parse_topic) def parse_topic(self, response): topicName = response.css('span.o-AssetTitle__a-HeadlineText::text').extract_first() numLinks = response.css('.o-SearchStatistics__a-MaxPage::text').extract_first() yield { 'topic': topicName, 'numLinks': re.search('(\d+)', numLinks).group(1) } ``` #### File: foodblog_scraper/spiders/foodNetworkSpider.py ```python import scrapy import logging import re class foodNetworkSpider(scrapy.Spider): name = 'foodNetwork' start_urls = ['http://www.foodnetwork.com/topics/'] def parse(self, response): topicSelectors = response.css('div.o-Capsule__m-Body li.m-PromoList__a-ListItem a') # topicSelectors = topicSelectors[:10] for topicSelector in topicSelectors: topicName = topicSelector.xpath('./text()').extract_first() # Portobello Mushroom has an issue --- contains over 100,000 recipes and # most are not mushroom recipesf if topicName != 'Portobello Mushroom': href = topicSelector.css('a::attr(href)').extract_first() yield response.follow(href, self.parse_topic) def parse_topic(self, response): """ On the topic page, navigate to actual recipe or if it is a video, navigate to the video page and then to the recipe page. Will also navigate the next button while it exists. """ # follow links to individual recipes. Specifically only chooses recipeResults # since other results may also exist (videos/article) recipeResults = response.css('.o-RecipeResult') videoResults = response.css('.o-SingleVideoResult') def getLinks(resultSelectors): return(resultSelectors.xpath('.//h3[@class="m-MediaBlock__a-Headline"]/a/@href')) for href in getLinks(recipeResults): yield response.follow(href, self.parse_recipe) for href in getLinks(videoResults): yield response.follow(href, self.parse_video_page) # # Topic link may go to a page with a video of the directions instead # # of the actual recipe. Need to continue past this middle page # if (href.extract().find('video') > 0): # yield response.follow(href, self.parse_video_page) # follow pagination links pageLink = response.css('a.o-Pagination__a-NextButton::attr(href)').extract_first() if (pageLink.find('www') > 0): yield response.follow(pageLink, self.parse_topic) def parse_video_page(self, response): """ Continue on to recipe page if possible """ # continue to actual recipe videoButtonLink = response.css('a.o-VideoMetadata__a-Button') buttonText = videoButtonLink.xpath('./text()').extract_first() # Run if button actually exists if buttonText: # Makes sure button is to a recipe and not another link if buttonText.lower().find('recipe') > 0: recipeLink = videoButtonLink.xpath('./@href').extract_first() yield response.follow(recipeLink, self.parse_recipe) def parse_recipe(self, response): """ Actually scrape recipe information from the final recipe webpage """ # Utility function for extracting first string and cleaning it def extract_with_xpath(selector, query): res = selector.xpath(query).extract_first() if res: return(res.strip()) return(None) # Utility function for extracting the time def extract_time(timeType, opts, vals): # In case there isn't a time presented if not opts: return(None) # Remove punctuation opts = [re.sub(r'\W+', '', txt.lower()) for txt in opts] # Will check for keyword "active" as well or will return None if not timeType in opts: if timeType == 'prep': timeType = 'active' if not timeType in opts: return(None) else: return(None) # Use pop to get the string instead of a list res = [i.strip() for i,j in zip(vals, opts) if j == timeType] return(res.pop()) # Get helper objects attributionSelector = response.xpath('//*[@data-module="recipe-lead"]') directions = response.xpath("//div[@class and contains(concat(' ', normalize-space(@class), ' '), ' method parbase section ')]//div[@class = 'o-Method__m-Body']/p/text()").extract() tags = response.xpath("//div[@class and contains(concat(' ', normalize-space(@class), ' '), ' parbase section tags')]").css('a.a-Tag::text').extract() timeDiv = attributionSelector.css('.o-Time') if timeDiv: timeDiv = timeDiv[0] timeOpts = timeDiv.xpath('.//dt/text()').extract() timeVals = timeDiv.xpath('.//dd/text()').extract() yield { 'name': extract_with_xpath(attributionSelector, './/*[@class="o-AssetTitle__a-HeadlineText"]/text()'), 'author': extract_with_xpath(attributionSelector, './/span[@class="o-Attribution__a-Name"]//a/text()'), 'totalTime': extract_time('total', timeOpts, timeVals), 'prepTime': extract_time('prep', timeOpts, timeVals), 'cookTime': extract_time('cook', timeOpts, timeVals), 'servings': extract_with_xpath(attributionSelector, './/*[@class="o-RecipeInfo__a-Description"]/text()'), 'ingredients': response.css('.o-Ingredients__a-ListItem label::text').extract(), 'tags': [tag.strip() for tag in tags], 'directions': [txt.strip() for txt in directions] } ```
{ "source": "jhamidu1117/ClientRestApplication", "score": 2 }
#### File: ClientRestApplication/ClientApp/DuplicateApp.py ```python from PyQt5 import QtWidgets, QtGui, QtMultimedia, QtCore, QtNetwork from ClientApp.DuplicateView import Ui_MainWindow from datetime import datetime import dateutil.parser import re import sys import os import json class mywindow(QtWidgets.QMainWindow): def __init__(self): super(mywindow, self).__init__() self.ui = Ui_MainWindow() self.ui.setupUi(self) self.scriptDir = os.path.dirname(os.path.realpath(__file__)) url = QtCore.QUrl.fromLocalFile(self.scriptDir + os.path.sep + 'Resources/Alarm.mp3') self.rest_url = "http://127.0.0.1:8000/api/trg-list/" content = QtMultimedia.QMediaContent(url) self.player = QtMultimedia.QMediaPlayer() self.player.setMedia(content) self.setWindowIcon(QtGui.QIcon(self.scriptDir + os.path.sep + 'Resources/Logo2.ico')) self.setWindowTitle('TRG-Duplicate Tracker') self.ui.Enter.clicked.connect(self.clickMethod) self.ui.TrgLineEdit.returnPressed.connect(self.clickMethod) self.currentLocation = '' def clickMethod(self): line = self.ui.TrgLineEdit.text() if re.search('TRG.*', line): if self.currentLocation == '': self.player.play() self.ui.TrgLineEdit.clear() self.req = QtNetwork.QNetworkRequest(QtCore.QUrl(self.rest_url)) self.req.setRawHeader(QtCore.QByteArray(b"Authorization"), QtCore.QByteArray(b"Token <PASSWORD>")) # self.req.deleteLater() self.nam = QtNetwork.QNetworkAccessManager() self.nam.finished.connect(self.handle_response) self.nam.get(self.req) elif re.search('L.*', line): self.ui.LocationLabel.setText('Location: ' + line) self.currentLocation = line self.ui.TrgLineEdit.clear() def handle_response(self, reply): er = reply.error() reply.deleteLater() reply.downloadProgress.connect(self.addProgressbar) if er == QtNetwork.QNetworkReply.NoError: bytes_string = reply.readAll() string = str(bytes_string, 'utf-8') data = json.loads(str(bytes_string, 'utf-8')) print(data) i = 0 progressBar = 0 for v in data: dateformat = dateutil.parser.parse(str(v['timestamp'])) datetime_str = datetime.strftime(dateformat, '%H:%M:%S %m/%d/%y') self.ui.OutputTable.setRowCount(i + 1) self.ui.OutputTable.setItem(i, 0, QtWidgets.QTableWidgetItem(str(v['trg_id']))) self.ui.OutputTable.setItem(i, 1, QtWidgets.QTableWidgetItem(datetime_str)) self.ui.OutputTable.setItem(i, 2, QtWidgets.QTableWidgetItem(str(v['location']))) i += 1 progressBar += 25 self.ui.progressBar.setValue(progressBar) self.ui.TrgLineEdit.clear() self.ui.progressBar.setValue(0) else: print("Error occured: ", er) print(reply.errorString()) self.ui.TrgLineEdit.clear() def addProgressbar(self, bytesReceived, bytesTotal): while bytesReceived != bytesTotal: progress = bytesReceived/bytesTotal self.ui.progressBar.setValue(progress) app = QtWidgets.QApplication([]) application = mywindow() application.show() sys.exit(app.exec()) ```
{ "source": "jhamman/esds-funnel", "score": 2 }
#### File: esds-funnel/funnel/cache.py ```python import enum import json import tempfile import typing import fsspec import pydantic from .metadata_db.schemas import Artifact from .registry import registry from .serializers import Serializer, pick_serializer class DuplicateKeyEnum(str, enum.Enum): skip = 'skip' overwrite = 'overwrite' check_collision = 'check_collision' raise_error = 'raise_error' @pydantic.dataclasses.dataclass class CacheStore: """Implements caching functionality. Support backends (in-memory, local, s3fs, etc...) scheme registered with fsspec. Some backends may require other dependencies. For example to work with S3 cache store, s3fs is required. Parameters ---------- path : str the path to the cache store storage_options : dict the storage options for the cache store readonly : bool if True, the cache store is readonly on_duplicate_key : DuplicateKeyEnum the behavior when a key is duplicated in the cache store """ path: str = tempfile.gettempdir() readonly: bool = False on_duplicate_key: DuplicateKeyEnum = 'skip' storage_options: typing.Dict = None def __post_init_post_parse__(self): self.storage_options = {} if self.storage_options is None else self.storage_options self.mapper = fsspec.get_mapper(self.path, **self.storage_options) self.fs = self.mapper.fs self.raw_path = self.fs._strip_protocol(self.path) self.protocol = self.fs.protocol self._ensure_dir(self.raw_path) def _ensure_dir(self, key: str) -> None: if not self.fs.exists(key): self.fs.makedirs(key, exist_ok=True) def _construct_item_path(self, key) -> str: return f'{self.path}/{key}' def get(self, key: str, serializer: str, **load_kwargs) -> typing.Any: """Returns the value for the key if the key is in the cache store. Parameters ---------- key : str serializer : str The name of the serializer you want to use. The built-in serializers are: - 'auto' (default): automatically choose the serializer based on the type of the value - 'xarray.netcdf': requires xarray and netCDF4 - 'xarray.zarr': requires xarray and zarr You can also register your own serializer via the @funnel.registry.serializers.register decorator. load_kwargs : dict Additional keyword arguments to pass to the serializer when loading artifact from the cache store. Returns ------- value : the value for the key if the key is in the cache store. """ if self.protocol == 'memory': data = self.mapper[key] return json.loads(data) else: serializer = registry.serializers.get(serializer)() return serializer.load(self._construct_item_path(key), **load_kwargs) def __contains__(self, key: str) -> bool: """Returns True if the key is in the cache store.""" return key in self.mapper def keys(self) -> typing.List[str]: """Returns a list of keys in the cache store.""" return list(self.mapper.keys()) def delete(self, key: str, **kwargs: typing.Dict) -> None: """Deletes the key from the cache store. Parameters ---------- key : str kwargs : dict """ self.fs.delete(key, **kwargs) def put( self, key: str, value: typing.Any, serializer: str = 'auto', dump_kwargs: typing.Dict = {}, custom_fields: typing.Dict = {}, ) -> Artifact: """Records and serializes key with its corresponding value in the cache store. Parameters ---------- key : str value : typing.Any serializer : str The name of the serializer you want to use. The built-in serializers are: - 'auto' (default): automatically choose the serializer based on the type of the value - 'xarray.netcdf': requires xarray and netCDF4 - 'xarray.zarr': requires xarray and zarr You can also register your own serializer via the @funnel.registry.serializers.register decorator. dump_kwargs : dict Additional keyword arguments to pass to the serializer when dumping artifact to the cache store. custom_fields : dict A dict with types that serialize to json. These fields can be used for searching artifacts in the metadata store. Returns ------- artifact : Artifact an `Artifact` object with corresping asset serialization information """ if not self.readonly: method = getattr(self, f'_put_{self.on_duplicate_key.value}') serializer_name = pick_serializer(value) if serializer == 'auto' else serializer serializer = registry.serializers.get(serializer_name)() artifact = Artifact( key=key, serializer=serializer_name, dump_kwargs=dump_kwargs, custom_fields=custom_fields, ) method(key, value, serializer, **dump_kwargs) return artifact def _put_skip(self, key, value, serializer: Serializer, **serializer_kwargs) -> None: if key not in self: self._put_overwrite(key, value, serializer, **serializer_kwargs) def _put_overwrite(self, key, value, serializer: Serializer, **serializer_kwargs) -> None: with self.fs.transaction: if self.protocol == 'memory': self.mapper[key] = json.dumps(value).encode('utf-8') else: serializer.dump(value, self._construct_item_path(key), **serializer_kwargs) ``` #### File: esds-funnel/tests/test_metadata.py ```python import pandas as pd import pytest import xarray as xr from funnel import CacheStore, MemoryMetadataStore, SQLMetadataStore ds = xr.tutorial.open_dataset('tiny') @pytest.mark.parametrize('metadata_store', [MemoryMetadataStore, SQLMetadataStore]) @pytest.mark.parametrize( 'key, value, serializer, dump_kwargs', [ ('test', {'a': [1, 2, 3], 'b': 'foo'}, 'auto', {}), ('tiny', ds, 'xarray.netcdf', {}), ('tiny_zarr', ds, 'xarray.zarr', {'mode': 'w'}), ], ) def test_memory_metadata_store(metadata_store, key, value, serializer, dump_kwargs): ms = metadata_store(CacheStore()) assert isinstance(ms.df, pd.DataFrame) ms.put(key, value, serializer, dump_kwargs=dump_kwargs) results = ms.get(key) assert type(results) == type(value) ```
{ "source": "jhamman/MetSim", "score": 3 }
#### File: MetSim/metsim/metsim.py ```python import os import time import numpy as np import pandas as pd from multiprocessing import Value, Process import metsim.io from metsim.disaggregate import disaggregate class MetSim(object): """ MetSim handles the distribution of jobs that write to a common file by launching muliple processes and queueing up their writeback so that work can be done while IO is happening. """ def __init__(self, analysis_method, job_list, n_processes): """ Constructor """ # Builds the infrastructure to keep track of jobs self.writable = Value('b', True, lock=False) # Set up the distribution of jobs and create process handles self.method = analysis_method.run n_jobs = len(job_list) job_size = int(n_jobs / min(n_processes, n_jobs)) self.run(self.method, job_list) #self.jobs = [job_list[i:i+job_size] for i in range(0, n_jobs, job_size)] #self.process_handles = [ # Process(target=self.run, args=(self.method, job_list)) # for job_list in self.jobs # ] self.process_handles = [] def run(self, method, job_list): """ Kicks off the disaggregation and queues up data for IO """ for job in job_list: dates = pd.date_range(metsim.start, metsim.stop) forcing = metsim.io.read(job, len(dates)) forcing = forcing.set_index(dates) forcing['day_of_year'] = dates.dayofyear metsim.n_days = len(forcing['day_of_year']) self.method(forcing) # Discard the daily data in favor of hourly data, then write #data = disaggregate(data) #metsim.io.sync_io(metsim.io.write_ascii, forcing, self.writable, # os.path.join(metsim.out_dir, os.path.basename(job))) def launch_processes(self): """ Launches all processes built in the constructor """ for p in self.process_handles: p.start() for p in self.process_handles: p.join() ```
{ "source": "jhamman/ndpyramid", "score": 2 }
#### File: ndpyramid/tests/test_pyramids.py ```python import numpy as np import pytest import xarray as xr from zarr.storage import MemoryStore from ndpyramid import pyramid_coarsen, pyramid_regrid, pyramid_reproject from ndpyramid.regrid import make_grid_ds @pytest.fixture def temperature(): ds = xr.tutorial.open_dataset('air_temperature') ds['air'].encoding = {} return ds def test_xarray_coarsened_pyramid(temperature): print(temperature) factors = [4, 2, 1] pyramid = pyramid_coarsen(temperature, dims=('lat', 'lon'), factors=factors, boundary='trim') assert pyramid.ds.attrs['multiscales'] assert len(pyramid.ds.attrs['multiscales'][0]['datasets']) == len(factors) pyramid.to_zarr(MemoryStore()) def test_reprojected_pyramid(temperature): rioxarray = pytest.importorskip("rioxarray") # noqa: F841 levels = 2 temperature = temperature.rio.write_crs('EPSG:4326') pyramid = pyramid_reproject(temperature, levels=2) assert pyramid.ds.attrs['multiscales'] assert len(pyramid.ds.attrs['multiscales'][0]['datasets']) == levels pyramid.to_zarr(MemoryStore()) def test_regridded_pyramid(temperature): xesmf = pytest.importorskip("xesmf") # noqa: F841 pyramid = pyramid_regrid(temperature, levels=2) assert pyramid.ds.attrs['multiscales'] pyramid.to_zarr(MemoryStore()) def test_make_grid_ds(): grid = make_grid_ds(0, pixels_per_tile=8) lon_vals = grid.lon_b.values assert np.all((lon_vals[-1, :] - lon_vals[0, :]) < 0.001) ```
{ "source": "jhamman/pangeo-forge", "score": 2 }
#### File: pangeo-forge/pangeo_forge/pipeline.py ```python import os from abc import ABC, abstractmethod from typing import List import fsspec import zarr from prefect import Flow, task from .utils import chunked_iterable class AbstractPipeline(ABC): @property @abstractmethod def sources(self) -> List[str]: pass @property @abstractmethod def targets(self) -> List[str]: pass @abstractmethod def run(self) -> None: pass class XarrayPrefectPipelineMixin: @task def download(self, source_url): target_url = os.path.join(self.cache_location, str(hash(source_url))) # there is probably a better way to do caching! try: fsspec.open(target_url).open() return target_url except FileNotFoundError: pass with fsspec.open(source_url, mode="rb") as source: with fsspec.open(target_url, mode="wb") as target: target.write(source.read()) return target_url @task def combine_and_write(self, sources, target, append_dim, first=True): import xarray as xr # while debugging this, I had itermittent fsspec / hdf5 read errors related to # "trying to read from a closed file" # but they seem to have gone away for now double_open_files = [fsspec.open(url).open() for url in sources] ds = xr.open_mfdataset(double_open_files, combine="nested", concat_dim=self.concat_dim) # by definition, this should be a contiguous chunk ds = ds.chunk({append_dim: len(sources)}) if first: kwargs = dict(mode="w") else: kwargs = dict(mode="a", append_dim=append_dim) mapper = fsspec.get_mapper(target) ds.to_zarr(mapper, **kwargs) @task def consolidate_metadata(self): mapper = fsspec.get_mapper(self.targets) zarr.consolidate_metadata(mapper) @property def flow(self) -> Flow: with Flow("Pangeo-Forge") as flow: cached_sources = [self.download(k) for k in self.sources] first = True write_tasks = [] for source_group in chunked_iterable(cached_sources, self.files_per_chunk): write_task = self.combine_and_write( source_group, self.targets, self.concat_dim, first=first ) write_tasks.append(write_task) first = False cm = self.consolidate_metadata(self.targets) # create dependencies in imperative mode for n in range(1, len(write_tasks)): write_tasks[n].set_upstream(write_tasks[n - 1], flow=flow) cm.set_upstream(write_tasks[-1], flow=flow) return flow def run(self): self.flow.run() ```
{ "source": "jhamman/pilot-hubs", "score": 3 }
#### File: pilot-hubs/deployer/utils.py ```python import os import json import tempfile import subprocess from ruamel.yaml import YAML from ruamel.yaml.scanner import ScannerError from contextlib import contextmanager yaml = YAML(typ='safe', pure=True) @contextmanager def decrypt_file(encrypted_path): """ Provide secure temporary decrypted contents of a given file If file isn't a sops encrypted file, we assume no encryption is used and return the current path. """ # We must first determine if the file is using sops # sops files are JSON/YAML with a `sops` key. So we first check # if the file is valid JSON/YAML, and then if it has a `sops` key with open(encrypted_path) as f: _, ext = os.path.splitext(encrypted_path) # Support the (clearly wrong) people who use .yml instead of .yaml if ext == '.yaml' or ext == '.yml': try: encrypted_data = yaml.load(f) except ScannerError: yield encrypted_path return elif ext == '.json': try: encrypted_data = json.load(f) except json.JSONDecodeError: yield encrypted_path return if 'sops' not in encrypted_data: yield encrypted_path return # If file has a `sops` key, we assume it's sops encrypted with tempfile.NamedTemporaryFile() as f: subprocess.check_call([ 'sops', '--output', f.name, '--decrypt', encrypted_path ]) yield f.name ```
{ "source": "jhamman/rioxarray", "score": 2 }
#### File: rioxarray/rioxarray/rioxarray.py ```python import copy import math from uuid import uuid4 import numpy as np import pyproj import rasterio.warp import xarray from affine import Affine from rasterio.crs import CRS from rasterio.enums import Resampling from rasterio.features import geometry_mask from rasterio.windows import get_data_window from scipy.interpolate import griddata from rioxarray.crs import crs_to_wkt from rioxarray.exceptions import ( DimensionError, DimensionMissingCoordinateError, InvalidDimensionOrder, MissingCRS, NoDataInBounds, OneDimensionalRaster, RioXarrayError, TooManyDimensions, ) FILL_VALUE_NAMES = ("_FillValue", "missing_value", "fill_value", "nodata") UNWANTED_RIO_ATTRS = ("nodatavals", "crs", "is_tiled", "res") DEFAULT_GRID_MAP = "spatial_ref" def affine_to_coords(affine, width, height, x_dim="x", y_dim="y"): """Generate 1d pixel centered coordinates from affine. Based on code from the xarray rasterio backend. Parameters ---------- affine: :obj:`affine.Affine` The affine of the grid. width: int The width of the grid. height: int The height of the grid. x_dim: str, optional The name of the X dimension. Default is 'x'. y_dim: str, optional The name of the Y dimension. Default is 'y'. Returns ------- dict: x and y coordinate arrays. """ x_coords, _ = affine * (np.arange(width) + 0.5, np.zeros(width) + 0.5) _, y_coords = affine * (np.zeros(height) + 0.5, np.arange(height) + 0.5) return {y_dim: y_coords, x_dim: x_coords} def _get_grid_map_name(src_data_array): """Get the grid map name of the variable.""" try: return src_data_array.attrs["grid_mapping"] except KeyError: return DEFAULT_GRID_MAP def _generate_attrs(src_data_array, dst_affine, dst_nodata): # add original attributes new_attrs = copy.deepcopy(src_data_array.attrs) # remove all nodata information for unwanted_attr in FILL_VALUE_NAMES + UNWANTED_RIO_ATTRS: new_attrs.pop(unwanted_attr, None) # add nodata information fill_value = ( src_data_array.rio.nodata if src_data_array.rio.nodata is not None else dst_nodata ) if src_data_array.rio.encoded_nodata is None and fill_value is not None: new_attrs["_FillValue"] = fill_value # add raster spatial information new_attrs["transform"] = tuple(dst_affine)[:6] new_attrs["grid_mapping"] = _get_grid_map_name(src_data_array) return new_attrs def add_xy_grid_meta(coords): """Add x,y metadata to coordinates""" # add metadata to x,y coordinates if "x" in coords: x_coord_attrs = dict(coords["x"].attrs) x_coord_attrs["long_name"] = "x coordinate of projection" x_coord_attrs["standard_name"] = "projection_x_coordinate" coords["x"].attrs = x_coord_attrs elif "longitude" in coords: x_coord_attrs = dict(coords["longitude"].attrs) x_coord_attrs["long_name"] = "longitude" x_coord_attrs["standard_name"] = "longitude" coords["longitude"].attrs = x_coord_attrs if "y" in coords: y_coord_attrs = dict(coords["y"].attrs) y_coord_attrs["long_name"] = "y coordinate of projection" y_coord_attrs["standard_name"] = "projection_y_coordinate" coords["y"].attrs = y_coord_attrs elif "latitude" in coords: x_coord_attrs = dict(coords["latitude"].attrs) x_coord_attrs["long_name"] = "latitude" x_coord_attrs["standard_name"] = "latitude" coords["latitude"].attrs = x_coord_attrs return coords def add_spatial_ref(in_ds, dst_crs, grid_map_name): in_ds.rio.write_crs( input_crs=dst_crs, grid_mapping_name=grid_map_name, inplace=True ) return in_ds def _add_attrs_proj(new_data_array, src_data_array): """Make sure attributes and projection correct""" # make sure dimension information is preserved if new_data_array.rio._x_dim is None: new_data_array.rio._x_dim = src_data_array.rio.x_dim if new_data_array.rio._y_dim is None: new_data_array.rio._y_dim = src_data_array.rio.y_dim # make sure attributes preserved new_attrs = _generate_attrs( src_data_array, new_data_array.rio.transform(recalc=True), None ) # remove fill value if it already exists in the encoding # this is for data arrays pulling the encoding from a # source data array instead of being generated anew. if "_FillValue" in new_data_array.encoding: new_attrs.pop("_FillValue", None) new_data_array.rio.set_attrs(new_attrs, inplace=True) # make sure projection added add_xy_grid_meta(new_data_array.coords) new_data_array = add_spatial_ref( new_data_array, src_data_array.rio.crs, _get_grid_map_name(src_data_array) ) # make sure encoding added new_data_array.encoding = src_data_array.encoding.copy() return new_data_array def _warp_spatial_coords(data_array, affine, width, height): """get spatial coords in new transform""" new_spatial_coords = affine_to_coords(affine, width, height) return { "x": xarray.IndexVariable("x", new_spatial_coords["x"]), "y": xarray.IndexVariable("y", new_spatial_coords["y"]), } def _get_nonspatial_coords(src_data_array): coords = {} for coord in set(src_data_array.coords) - { src_data_array.rio.x_dim, src_data_array.rio.y_dim, DEFAULT_GRID_MAP, }: if src_data_array[coord].dims: coords[coord] = xarray.IndexVariable( src_data_array[coord].dims, src_data_array[coord].values, src_data_array[coord].attrs, ) else: coords[coord] = xarray.Variable( src_data_array[coord].dims, src_data_array[coord].values, src_data_array[coord].attrs, ) return coords def _make_coords(src_data_array, dst_affine, dst_width, dst_height, dst_crs): """Generate the coordinates of the new projected `xarray.DataArray`""" coords = _get_nonspatial_coords(src_data_array) new_coords = _warp_spatial_coords(src_data_array, dst_affine, dst_width, dst_height) new_coords.update(coords) return add_xy_grid_meta(new_coords) def _make_dst_affine( src_data_array, src_crs, dst_crs, dst_resolution=None, dst_shape=None ): """Determine the affine of the new projected `xarray.DataArray`""" src_bounds = src_data_array.rio.bounds() src_height, src_width = src_data_array.rio.shape dst_height, dst_width = dst_shape if dst_shape is not None else (None, None) resolution_or_width_height = { k: v for k, v in [ ("resolution", dst_resolution), ("dst_height", dst_height), ("dst_width", dst_width), ] if v is not None } dst_affine, dst_width, dst_height = rasterio.warp.calculate_default_transform( src_crs, dst_crs, src_width, src_height, *src_bounds, **resolution_or_width_height, ) return dst_affine, dst_width, dst_height def _write_metatata_to_raster(raster_handle, xarray_dataset, tags): """ Write the metadata stored in the xarray object to raster metadata """ tags = xarray_dataset.attrs if tags is None else {**xarray_dataset.attrs, **tags} # write scales and offsets try: raster_handle.scales = tags["scales"] except KeyError: try: raster_handle.scales = (tags["scale_factor"],) * raster_handle.count except KeyError: pass try: raster_handle.offsets = tags["offsets"] except KeyError: try: raster_handle.offsets = (tags["add_offset"],) * raster_handle.count except KeyError: pass # filter out attributes that should be written in a different location skip_tags = ( UNWANTED_RIO_ATTRS + FILL_VALUE_NAMES + ("transform", "scales", "scale_factor", "add_offset", "offsets") ) # this is for when multiple values are used # in this case, it will be stored in the raster description if not isinstance(tags.get("long_name"), str): skip_tags += ("long_name",) tags = {key: value for key, value in tags.items() if key not in skip_tags} raster_handle.update_tags(**tags) # write band name information long_name = xarray_dataset.attrs.get("long_name") if isinstance(long_name, (tuple, list)): if len(long_name) != raster_handle.count: raise RioXarrayError( "Number of names in the 'long_name' attribute does not equal " "the number of bands." ) for iii, band_description in enumerate(long_name): raster_handle.set_band_description(iii + 1, band_description) else: band_description = long_name or xarray_dataset.name if band_description: for iii in range(raster_handle.count): raster_handle.set_band_description(iii + 1, band_description) def _get_data_var_message(obj): """ Get message for named data variables. """ try: return f" Data variable: {obj.name}" if obj.name else "" except AttributeError: return "" class XRasterBase(object): """This is the base class for the GIS extensions for xarray""" def __init__(self, xarray_obj): self._obj = xarray_obj self._x_dim = None self._y_dim = None # Determine the spatial dimensions of the `xarray.DataArray` if "x" in self._obj.dims and "y" in self._obj.dims: self._x_dim = "x" self._y_dim = "y" elif "longitude" in self._obj.dims and "latitude" in self._obj.dims: self._x_dim = "longitude" self._y_dim = "latitude" # properties self._width = None self._height = None self._crs = None @property def crs(self): """:obj:`rasterio.crs.CRS`: Retrieve projection from `xarray.DataArray` or `xarray.Dataset` """ if self._crs is not None: return None if self._crs is False else self._crs try: # look in grid_mapping grid_mapping_coord = self._obj.attrs.get("grid_mapping", DEFAULT_GRID_MAP) try: self.set_crs( pyproj.CRS.from_cf(self._obj.coords[grid_mapping_coord].attrs), inplace=True, ) except pyproj.exceptions.CRSError: pass except KeyError: try: # look in attrs for 'crs' self.set_crs(self._obj.attrs["crs"], inplace=True) except KeyError: self._crs = False return None return self._crs def _get_obj(self, inplace): """ Get the object to modify. Parameters ---------- inplace: bool If True, returns self. Returns ------- xarray.Dataset or xarray.DataArray: """ if inplace: return self._obj obj_copy = self._obj.copy(deep=True) # preserve attribute information obj_copy.rio._x_dim = self._x_dim obj_copy.rio._y_dim = self._y_dim obj_copy.rio._width = self._width obj_copy.rio._height = self._height obj_copy.rio._crs = self._crs return obj_copy def set_crs(self, input_crs, inplace=True): """ Set the CRS value for the Dataset/DataArray without modifying the dataset/data array. Parameters ---------- input_crs: object Anything accepted by `rasterio.crs.CRS.from_user_input`. inplace: bool, optional If True, it will write to the existing dataset. Default is False. Returns ------- xarray.Dataset or xarray.DataArray: Dataset with crs attribute. """ crs = CRS.from_user_input(crs_to_wkt(input_crs)) obj = self._get_obj(inplace=inplace) obj.rio._crs = crs return obj def write_crs( self, input_crs=None, grid_mapping_name=DEFAULT_GRID_MAP, inplace=False ): """ Write the CRS to the dataset in a CF compliant manner. Parameters ---------- input_crs: object Anything accepted by `rasterio.crs.CRS.from_user_input`. grid_mapping_name: str, optional Name of the coordinate to store the CRS information in. inplace: bool, optional If True, it will write to the existing dataset. Default is False. Returns ------- xarray.Dataset or xarray.DataArray: Modified dataset with CF compliant CRS information. """ if input_crs is not None: data_obj = self.set_crs(input_crs, inplace=inplace) else: data_obj = self._get_obj(inplace=inplace) # remove old grid maping coordinate if exists try: del data_obj.coords[grid_mapping_name] except KeyError: pass if data_obj.rio.crs is None: raise MissingCRS( "CRS not found. Please set the CRS with 'set_crs()' or 'write_crs()'." ) # add grid mapping coordinate data_obj.coords[grid_mapping_name] = xarray.Variable((), 0) grid_map_attrs = pyproj.CRS.from_user_input(data_obj.rio.crs).to_cf() # spatial_ref is for compatibility with GDAL crs_wkt = crs_to_wkt(data_obj.rio.crs) grid_map_attrs["spatial_ref"] = crs_wkt grid_map_attrs["crs_wkt"] = crs_wkt data_obj.coords[grid_mapping_name].rio.set_attrs(grid_map_attrs, inplace=True) # add grid mapping attribute to variables if hasattr(data_obj, "data_vars"): for var in data_obj.data_vars: if ( self.x_dim in data_obj[var].dims and self.y_dim in data_obj[var].dims ): data_obj[var].rio.update_attrs( dict(grid_mapping=grid_mapping_name), inplace=True ).rio.set_spatial_dims( x_dim=self.x_dim, y_dim=self.y_dim, inplace=True ) return data_obj.rio.update_attrs( dict(grid_mapping=grid_mapping_name), inplace=True ) def set_attrs(self, new_attrs, inplace=False): """ Set the attributes of the dataset/dataarray and reset rioxarray properties to re-search for them. Parameters ---------- new_attrs: dict A dictionary of new attributes. inplace: bool, optional If True, it will write to the existing dataset. Default is False. Returns ------- xarray.Dataset or xarray.DataArray: Modified dataset with new attributes. """ data_obj = self._get_obj(inplace=inplace) # set the attributes data_obj.attrs = new_attrs # reset rioxarray properties depending # on attributes to be generated data_obj.rio._nodata = None data_obj.rio._crs = None return data_obj def update_attrs(self, new_attrs, inplace=False): """ Update the attributes of the dataset/dataarray and reset rioxarray properties to re-search for them. Parameters ---------- new_attrs: dict A dictionary of new attributes to update with. inplace: bool, optional If True, it will write to the existing dataset. Default is False. Returns ------- xarray.Dataset or xarray.DataArray: Modified dataset with updated attributes. """ data_attrs = dict(self._obj.attrs) data_attrs.update(**new_attrs) return self.set_attrs(data_attrs, inplace=inplace) def set_spatial_dims(self, x_dim, y_dim, inplace=True): """ This sets the spatial dimensions of the dataset. Parameters ---------- x_dim: str The name of the x dimension. y_dim: str The name of the y dimension. inplace: bool, optional If True, it will modify the dataframe in place. Otherwise it will return a modified copy. Returns ------- xarray.Dataset or xarray.DataArray: Dataset with spatial dimensions set. """ def set_dims(obj, in_x_dim, in_y_dim): if in_x_dim in obj.dims: obj.rio._x_dim = x_dim else: raise DimensionError( f"x dimension ({x_dim}) not found.{_get_data_var_message(obj)}" ) if y_dim in obj.dims: obj.rio._y_dim = y_dim else: raise DimensionError( f"y dimension ({x_dim}) not found.{_get_data_var_message(obj)}" ) data_obj = self._get_obj(inplace=inplace) set_dims(data_obj, x_dim, y_dim) return data_obj @property def x_dim(self): if self._x_dim is not None: return self._x_dim raise DimensionError( "x dimension not found. 'set_spatial_dims()' can address this." f"{_get_data_var_message(self._obj)}" ) @property def y_dim(self): if self._y_dim is not None: return self._y_dim raise DimensionError( "x dimension not found. 'set_spatial_dims()' can address this." f"{_get_data_var_message(self._obj)}" ) @property def width(self): """int: Returns the width of the dataset (x dimension size)""" if self._width is not None: return self._width self._width = self._obj[self.x_dim].size return self._width @property def height(self): """int: Returns the height of the dataset (y dimension size)""" if self._height is not None: return self._height self._height = self._obj[self.y_dim].size return self._height @property def shape(self): """tuple(int, int): Returns the shape (height, width)""" return (self.height, self.width) def isel_window(self, window): """ Use a rasterio.window.Window to select a subset of the data. Parameters ---------- window: :class:`rasterio.window.Window` The window of the dataset to read. Returns ------- :obj:`xarray.Dataset` | :obj:`xarray.DataArray`: The data in the window. """ (row_start, row_stop), (col_start, col_stop) = window.toranges() row_slice = slice(int(math.floor(row_start)), int(math.ceil(row_stop))) col_slice = slice(int(math.floor(col_start)), int(math.ceil(col_stop))) return self._obj.isel( {self.y_dim: row_slice, self.x_dim: col_slice} ).rio.set_spatial_dims(x_dim=self.x_dim, y_dim=self.y_dim, inplace=True) @xarray.register_dataarray_accessor("rio") class RasterArray(XRasterBase): """This is the GIS extension for :class:`xarray.DataArray`""" def __init__(self, xarray_obj): super(RasterArray, self).__init__(xarray_obj) # properties self._nodata = None self._count = None def set_nodata(self, input_nodata, inplace=True): """ Set the nodata value for the DataArray without modifying the data array. Parameters ---------- input_nodata: object Valid nodata for dtype. inplace: bool, optional If True, it will write to the existing dataset. Default is False. Returns ------- xarray.DataArray: Dataset with nodata attribute set. """ obj = self._get_obj(inplace=inplace) obj.rio._nodata = input_nodata return obj def write_nodata(self, input_nodata, inplace=False): """ Write the nodata to the DataArray in a CF compliant manner. Parameters ---------- input_nodata: object Nodata value for the DataArray. If input_nodata is None, it will remove the _FillValue attribute. inplace: bool, optional If True, it will write to the existing DataArray. Default is False. Returns ------- xarray.DataArray: Modified DataArray with CF compliant nodata information. """ data_obj = self._get_obj(inplace=inplace) input_nodata = False if input_nodata is None else input_nodata if input_nodata is not False: data_obj.rio.update_attrs(dict(_FillValue=input_nodata), inplace=True) else: new_vars = dict(data_obj.attrs) new_vars.pop("_FillValue", None) data_obj.rio.set_attrs(new_vars, inplace=True) data_obj.rio.set_nodata(input_nodata, inplace=True) return data_obj @property def encoded_nodata(self): """Return the encoded nodata value for the dataset if encoded.""" return self._obj.encoding.get("_FillValue") @property def nodata(self): """Get the nodata value for the dataset.""" if self._nodata is not None: return None if self._nodata is False else self._nodata if self.encoded_nodata is not None: self._nodata = np.nan else: self._nodata = self._obj.attrs.get( "_FillValue", self._obj.attrs.get( "missing_value", self._obj.attrs.get("fill_value", self._obj.attrs.get("nodata")), ), ) # look in places used by `xarray.open_rasterio` if self._nodata is None: try: self._nodata = self._obj._file_obj.acquire().nodata except AttributeError: try: self._nodata = self._obj.attrs["nodatavals"][0] except (KeyError, IndexError): pass if self._nodata is None: self._nodata = False return None return self._nodata def _cached_transform(self): """ Get the transform from attrs or property. """ try: return Affine(*self._obj.attrs["transform"][:6]) except KeyError: pass return None def resolution(self, recalc=False): """Determine the resolution of the `xarray.DataArray` Parameters ---------- recalc: bool, optional Will force the resolution to be recalculated instead of using the transform attribute. """ transform = self._cached_transform() if ( not recalc or self.width == 1 or self.height == 1 ) and transform is not None: resolution_x = transform.a resolution_y = transform.e return resolution_x, resolution_y # if the coordinates of the spatial dimensions are missing # use the cached transform resolution try: left, bottom, right, top = self._internal_bounds() except DimensionMissingCoordinateError: if transform is None: raise resolution_x = transform.a resolution_y = transform.e return resolution_x, resolution_y if self.width == 1 or self.height == 1: raise OneDimensionalRaster( "Only 1 dimenional array found. Cannot calculate the resolution." f"{_get_data_var_message(self._obj)}" ) resolution_x = (right - left) / (self.width - 1) resolution_y = (bottom - top) / (self.height - 1) return resolution_x, resolution_y def _internal_bounds(self): """Determine the internal bounds of the `xarray.DataArray`""" if self.x_dim not in self._obj.coords: raise DimensionMissingCoordinateError(f"{self.x_dim} missing coordinates.") elif self.y_dim not in self._obj.coords: raise DimensionMissingCoordinateError(f"{self.y_dim} missing coordinates.") left = float(self._obj[self.x_dim][0]) right = float(self._obj[self.x_dim][-1]) top = float(self._obj[self.y_dim][0]) bottom = float(self._obj[self.y_dim][-1]) return left, bottom, right, top def _check_dimensions(self): """ This function validates that the dimensions 2D/3D and they are are in the proper order. Returns ------- str or None: Name extra dimension. """ extra_dims = list(set(list(self._obj.dims)) - set([self.x_dim, self.y_dim])) if len(extra_dims) > 1: raise TooManyDimensions( "Only 2D and 3D data arrays supported." f"{_get_data_var_message(self._obj)}" ) elif extra_dims and self._obj.dims != (extra_dims[0], self.y_dim, self.x_dim): raise InvalidDimensionOrder( "Invalid dimension order. Expected order: {0}. " "You can use `DataArray.transpose{0}`" " to reorder your dimensions.".format( (extra_dims[0], self.y_dim, self.x_dim) ) + f"{_get_data_var_message(self._obj)}" ) elif not extra_dims and self._obj.dims != (self.y_dim, self.x_dim): raise InvalidDimensionOrder( "Invalid dimension order. Expected order: {0}" "You can use `DataArray.transpose{0}` " "to reorder your dimensions.".format((self.y_dim, self.x_dim)) + f"{_get_data_var_message(self._obj)}" ) return extra_dims[0] if extra_dims else None @property def count(self): if self._count is not None: return self._count extra_dim = self._check_dimensions() self._count = 1 if extra_dim is not None: self._count = self._obj[extra_dim].size return self._count def bounds(self, recalc=False): """Determine the bounds of the `xarray.DataArray` Parameters ---------- recalc: bool, optional Will force the bounds to be recalculated instead of using the transform attribute. Returns ------- left, bottom, right, top: float Outermost coordinates. """ resolution_x, resolution_y = self.resolution(recalc=recalc) try: # attempt to get bounds from xarray coordinate values left, bottom, right, top = self._internal_bounds() left -= resolution_x / 2.0 right += resolution_x / 2.0 top -= resolution_y / 2.0 bottom += resolution_y / 2.0 except DimensionMissingCoordinateError: transform = self._cached_transform() left = transform.c top = transform.f right = left + resolution_x * self.width bottom = top + resolution_y * self.height return left, bottom, right, top def transform_bounds(self, dst_crs, densify_pts=21, recalc=False): """Transform bounds from src_crs to dst_crs. Optionally densifying the edges (to account for nonlinear transformations along these edges) and extracting the outermost bounds. Note: this does not account for the antimeridian. Parameters ---------- dst_crs: str, :obj:`rasterio.crs.CRS`, or dict Target coordinate reference system. densify_pts: uint, optional Number of points to add to each edge to account for nonlinear edges produced by the transform process. Large numbers will produce worse performance. Default: 21 (gdal default). recalc: bool, optional Will force the bounds to be recalculated instead of using the transform attribute. Returns ------- left, bottom, right, top: float Outermost coordinates in target coordinate reference system. """ return rasterio.warp.transform_bounds( self.crs, dst_crs, *self.bounds(recalc=recalc), densify_pts=densify_pts ) def transform(self, recalc=False): """Determine the affine of the `xarray.DataArray`""" src_left, _, _, src_top = self.bounds(recalc=recalc) src_resolution_x, src_resolution_y = self.resolution(recalc=recalc) return Affine.translation(src_left, src_top) * Affine.scale( src_resolution_x, src_resolution_y ) def reproject( self, dst_crs, resolution=None, shape=None, transform=None, resampling=Resampling.nearest, ): """ Reproject :class:`xarray.DataArray` objects Powered by `rasterio.warp.reproject` .. note:: Only 2D/3D arrays with dimensions 'x'/'y' are currently supported. Requires either a grid mapping variable with 'spatial_ref' or a 'crs' attribute to be set containing a valid CRS. If using a WKT (e.g. from spatiareference.org), make sure it is an OGC WKT. .. versionadded:: 0.0.27 shape .. versionadded:: 0.0.28 transform Parameters ---------- dst_crs: str OGC WKT string or Proj.4 string. resolution: float or tuple(float, float), optional Size of a destination pixel in destination projection units (e.g. degrees or metres). shape: tuple(int, int), optional Shape of the destination in pixels (dst_height, dst_width). Cannot be used together with resolution. transform, optional The destination transform. resampling: Resampling method, optional See rasterio.warp.reproject for more details. Returns ------- :class:`xarray.DataArray`: A reprojected DataArray. """ if resolution is not None and (shape is not None or transform is not None): raise RioXarrayError("resolution cannot be used with shape or transform.") if self.crs is None: raise MissingCRS( "CRS not found. Please set the CRS with 'set_crs()' or 'write_crs()'." f"{_get_data_var_message(self._obj)}" ) src_affine = self.transform(recalc=True) if transform is None: dst_affine, dst_width, dst_height = _make_dst_affine( self._obj, self.crs, dst_crs, resolution, shape ) else: dst_affine = transform if shape is not None: dst_height, dst_width = shape else: dst_height, dst_width = self.shape extra_dim = self._check_dimensions() if extra_dim: dst_data = np.zeros( (self._obj[extra_dim].size, dst_height, dst_width), dtype=self._obj.dtype.type, ) else: dst_data = np.zeros((dst_height, dst_width), dtype=self._obj.dtype.type) try: dst_nodata = self._obj.dtype.type( self.nodata if self.nodata is not None else -9999 ) except ValueError: # if integer, set nodata to -9999 dst_nodata = self._obj.dtype.type(-9999) src_nodata = self._obj.dtype.type( self.nodata if self.nodata is not None else dst_nodata ) rasterio.warp.reproject( source=np.copy(self._obj.load().data), destination=dst_data, src_transform=src_affine, src_crs=self.crs, src_nodata=src_nodata, dst_transform=dst_affine, dst_crs=dst_crs, dst_nodata=dst_nodata, resampling=resampling, ) # add necessary attributes new_attrs = _generate_attrs(self._obj, dst_affine, dst_nodata) # make sure dimensions with coordinates renamed to x,y dst_dims = [] for dim in self._obj.dims: if dim == self.x_dim: dst_dims.append("x") elif dim == self.y_dim: dst_dims.append("y") else: dst_dims.append(dim) xda = xarray.DataArray( name=self._obj.name, data=dst_data, coords=_make_coords(self._obj, dst_affine, dst_width, dst_height, dst_crs), dims=tuple(dst_dims), attrs=new_attrs, ) xda.encoding = self._obj.encoding return add_spatial_ref(xda, dst_crs, DEFAULT_GRID_MAP) def reproject_match(self, match_data_array, resampling=Resampling.nearest): """ Reproject a DataArray object to match the resolution, projection, and region of another DataArray. Powered by `rasterio.warp.reproject` .. note:: Only 2D/3D arrays with dimensions 'x'/'y' are currently supported. Requires either a grid mapping variable with 'spatial_ref' or a 'crs' attribute to be set containing a valid CRS. If using a WKT (e.g. from spatiareference.org), make sure it is an OGC WKT. Parameters ---------- match_data_array: :obj:`xarray.DataArray` DataArray of the target resolution and projection. resampling: Resampling method, optional See rasterio.warp.reproject for more details. Returns -------- :obj:`xarray.DataArray` Contains the data from the src_data_array, reprojected to match match_data_array. """ dst_crs = crs_to_wkt(match_data_array.rio.crs) return self.reproject( dst_crs, transform=match_data_array.rio.transform(recalc=True), shape=match_data_array.rio.shape, resampling=resampling, ) def slice_xy(self, minx, miny, maxx, maxy): """Slice the array by x,y bounds. Parameters ---------- minx: float Minimum bound for x coordinate. miny: float Minimum bound for y coordinate. maxx: float Maximum bound for x coordinate. maxy: float Maximum bound for y coordinate. Returns ------- DataArray: A sliced :class:`xarray.DataArray` object. """ left, bottom, right, top = self._internal_bounds() if top > bottom: y_slice = slice(maxy, miny) else: y_slice = slice(miny, maxy) if left > right: x_slice = slice(maxx, minx) else: x_slice = slice(minx, maxx) subset = self._obj.sel( {self.x_dim: x_slice, self.y_dim: y_slice} ).rio.set_spatial_dims(x_dim=self.x_dim, y_dim=self.y_dim, inplace=True) subset.attrs["transform"] = tuple(self.transform(recalc=True)) return subset def clip_box(self, minx, miny, maxx, maxy, auto_expand=False, auto_expand_limit=3): """Clip the :class:`xarray.DataArray` by a bounding box. Parameters ---------- minx: float Minimum bound for x coordinate. miny: float Minimum bound for y coordinate. maxx: float Maximum bound for x coordinate. maxy: float Maximum bound for y coordinate. auto_expand: bool If True, it will expand clip search if only 1D raster found with clip. auto_expand_limit: int maximum number of times the clip will be retried before raising an exception. Returns ------- DataArray: A clipped :class:`xarray.DataArray` object. """ if self.width == 1 or self.height == 1: raise OneDimensionalRaster( "At least one of the raster x,y coordinates has only one point." f"{_get_data_var_message(self._obj)}" ) resolution_x, resolution_y = self.resolution() clip_minx = minx - abs(resolution_x) / 2.0 clip_miny = miny - abs(resolution_y) / 2.0 clip_maxx = maxx + abs(resolution_x) / 2.0 clip_maxy = maxy + abs(resolution_y) / 2.0 cl_array = self.slice_xy(clip_minx, clip_miny, clip_maxx, clip_maxy) if cl_array.rio.width < 1 or cl_array.rio.height < 1: raise NoDataInBounds( f"No data found in bounds.{_get_data_var_message(self._obj)}" ) if cl_array.rio.width == 1 or cl_array.rio.height == 1: if auto_expand and auto_expand < auto_expand_limit: return self.clip_box( clip_minx, clip_miny, clip_maxx, clip_maxy, auto_expand=int(auto_expand) + 1, auto_expand_limit=auto_expand_limit, ) raise OneDimensionalRaster( "At least one of the clipped raster x,y coordinates" " has only one point." f"{_get_data_var_message(self._obj)}" ) # make sure correct attributes preserved & projection added _add_attrs_proj(cl_array, self._obj) return cl_array def clip(self, geometries, crs, all_touched=False, drop=True, invert=False): """ Crops a :class:`xarray.DataArray` by geojson like geometry dicts. Powered by `rasterio.features.geometry_mask`. Parameters ---------- geometries: list A list of geojson geometry dicts. crs: :obj:`rasterio.crs.CRS` The CRS of the input geometries. all_touched : bool, optional If True, all pixels touched by geometries will be burned in. If false, only pixels whose center is within the polygon or that are selected by Bresenham's line algorithm will be burned in. drop: bool, optional If True, drop the data outside of the extent of the mask geoemtries Otherwise, it will return the same raster with the data masked. Default is True. invert: boolean, optional If False, pixels that do not overlap shapes will be set as nodata. Otherwise, pixels that overlap the shapes will be set as nodata. False by default. Returns ------- DataArray: A clipped :class:`xarray.DataArray` object. Examples: >>> geometry = ''' {"type": "Polygon", ... "coordinates": [ ... [[-94.07955380199459, 41.69085871273774], ... [-94.06082436942204, 41.69103313774798], ... [-94.06063203899649, 41.67932439500822], ... [-94.07935807746362, 41.679150041277325], ... [-94.07955380199459, 41.69085871273774]]]}''' >>> cropping_geometries = [geojson.loads(geometry)] >>> xds = xarray.open_rasterio('cool_raster.tif') >>> cropped = xds.rio.clip(geometries=cropping_geometries, crs=4326) """ if self.crs is None: raise MissingCRS( "CRS not found. Please set the CRS with 'set_crs()' or 'write_crs()'." f"{_get_data_var_message(self._obj)}" ) dst_crs = CRS.from_user_input(crs_to_wkt(crs)) if self.crs != dst_crs: geometries = [ rasterio.warp.transform_geom(dst_crs, self.crs, geometry) for geometry in geometries ] clip_mask_arr = geometry_mask( geometries=geometries, out_shape=(int(self.height), int(self.width)), transform=self.transform(recalc=True), invert=not invert, all_touched=all_touched, ) clip_mask_xray = xarray.DataArray( clip_mask_arr, coords={ self.y_dim: self._obj.coords[self.y_dim], self.x_dim: self._obj.coords[self.x_dim], }, dims=(self.y_dim, self.x_dim), ) cropped_ds = self._obj.where(clip_mask_xray) if drop: cropped_ds.rio.set_spatial_dims( x_dim=self.x_dim, y_dim=self.y_dim, inplace=True ) cropped_ds = cropped_ds.rio.isel_window( get_data_window(np.ma.masked_array(clip_mask_arr, ~clip_mask_arr)) ) if self.nodata is not None and not np.isnan(self.nodata): cropped_ds = cropped_ds.fillna(self.nodata) cropped_ds = cropped_ds.astype(self._obj.dtype) if ( cropped_ds.coords[self.x_dim].size < 1 or cropped_ds.coords[self.y_dim].size < 1 ): raise NoDataInBounds( f"No data found in bounds.{_get_data_var_message(self._obj)}" ) # make sure correct attributes preserved & projection added _add_attrs_proj(cropped_ds, self._obj) return cropped_ds def _interpolate_na(self, src_data, method="nearest"): """ This method uses scipy.interpolate.griddata to interpolate missing data. Parameters ---------- method: {‘linear’, ‘nearest’, ‘cubic’}, optional The method to use for interpolation in `scipy.interpolate.griddata`. Returns ------- :class:`numpy.ndarray`: An interpolated :class:`numpy.ndarray`. """ src_data_flat = np.copy(src_data).flatten() try: data_isnan = np.isnan(self.nodata) except TypeError: data_isnan = False if not data_isnan: data_bool = src_data_flat != self.nodata else: data_bool = ~np.isnan(src_data_flat) if not data_bool.any(): return src_data x_coords, y_coords = np.meshgrid( self._obj.coords[self.x_dim].values, self._obj.coords[self.y_dim].values ) return griddata( points=(x_coords.flatten()[data_bool], y_coords.flatten()[data_bool]), values=src_data_flat[data_bool], xi=(x_coords, y_coords), method=method, fill_value=self.nodata, ) def interpolate_na(self, method="nearest"): """ This method uses scipy.interpolate.griddata to interpolate missing data. Parameters ---------- method: {‘linear’, ‘nearest’, ‘cubic’}, optional The method to use for interpolation in `scipy.interpolate.griddata`. Returns ------- :class:`xarray.DataArray`: An interpolated :class:`xarray.DataArray` object. """ extra_dim = self._check_dimensions() if extra_dim: interp_data = [] for _, sub_xds in self._obj.groupby(extra_dim): interp_data.append( self._interpolate_na(sub_xds.load().data, method=method) ) interp_data = np.array(interp_data) else: interp_data = self._interpolate_na(self._obj.load().data, method=method) interp_array = xarray.DataArray( name=self._obj.name, data=interp_data, coords=self._obj.coords, dims=self._obj.dims, attrs=self._obj.attrs, ) interp_array.encoding = self._obj.encoding # make sure correct attributes preserved & projection added _add_attrs_proj(interp_array, self._obj) return interp_array def to_raster( self, raster_path, driver="GTiff", dtype=None, tags=None, windowed=False, recalc_transform=True, **profile_kwargs, ): """ Export the DataArray to a raster file. Parameters ---------- raster_path: str The path to output the raster to. driver: str, optional The name of the GDAL/rasterio driver to use to export the raster. Default is "GTiff". dtype: str, optional The data type to write the raster to. Default is the datasets dtype. tags: dict, optional A dictionary of tags to write to the raster. windowed: bool, optional If True, it will write using the windows of the output raster. This only works if the output raster is tiled. As such, if you set this to True, the output raster will be tiled. Default is False. **profile_kwargs Additional keyword arguments to pass into writing the raster. The nodata, transform, crs, count, width, and height attributes are ignored. """ dtype = str(self._obj.dtype) if dtype is None else dtype # get the output profile from the rasterio object # if opened with xarray.open_rasterio() try: out_profile = self._obj._file_obj.acquire().profile except AttributeError: out_profile = {} out_profile.update(profile_kwargs) # filter out the generated attributes out_profile = { key: value for key, value in out_profile.items() if key not in ( "driver", "height", "width", "crs", "transform", "nodata", "count", "dtype", ) } with rasterio.open( raster_path, "w", driver=driver, height=int(self.height), width=int(self.width), count=int(self.count), dtype=dtype, crs=self.crs, transform=self.transform(recalc=recalc_transform), nodata=( self.encoded_nodata if self.encoded_nodata is not None else self.nodata ), **out_profile, ) as dst: _write_metatata_to_raster(dst, self._obj, tags) # write data to raster if windowed: window_iter = dst.block_windows(1) else: window_iter = [(None, None)] for _, window in window_iter: if window is not None: out_data = self.isel_window(window) else: out_data = self._obj if self.encoded_nodata is not None: out_data = out_data.fillna(self.encoded_nodata) data = out_data.astype(dtype).load().data if data.ndim == 2: dst.write(data, 1, window=window) else: dst.write(data, window=window) @xarray.register_dataset_accessor("rio") class RasterDataset(XRasterBase): """This is the GIS extension for :class:`xarray.Dataset`""" @property def vars(self): """list: Returns non-coordinate varibles""" return list(self._obj.data_vars) @property def crs(self): """:obj:`rasterio.crs.CRS`: Retrieve projection from `xarray.Dataset` """ if self._crs is not None: return None if self._crs is False else self._crs self._crs = super().crs if self._crs is not None: return self._crs for var in self.vars: crs = self._obj[var].rio.crs if crs is not None: self._crs = crs break else: self._crs = False return None return self._crs def reproject( self, dst_crs, resolution=None, shape=None, transform=None, resampling=Resampling.nearest, ): """ Reproject :class:`xarray.Dataset` objects .. note:: Only 2D/3D arrays with dimensions 'x'/'y' are currently supported. Requires either a grid mapping variable with 'spatial_ref' or a 'crs' attribute to be set containing a valid CRS. If using a WKT (e.g. from spatiareference.org), make sure it is an OGC WKT. .. versionadded:: 0.0.27 shape .. versionadded:: 0.0.28 transform Parameters ---------- dst_crs: str OGC WKT string or Proj.4 string. resolution: float or tuple(float, float), optional Size of a destination pixel in destination projection units (e.g. degrees or metres). shape: tuple(int, int), optional Shape of the destination in pixels (dst_height, dst_width). Cannot be used together with resolution. transform, optional The destination transform. resampling: Resampling method, optional See rasterio.warp.reproject for more details. Returns -------- :class:`xarray.Dataset`: A reprojected Dataset. """ resampled_dataset = xarray.Dataset(attrs=self._obj.attrs) for var in self.vars: resampled_dataset[var] = ( self._obj[var] .rio.set_spatial_dims(x_dim=self.x_dim, y_dim=self.y_dim, inplace=True) .rio.reproject( dst_crs, resolution=resolution, shape=shape, transform=transform, resampling=resampling, ) ) return resampled_dataset def reproject_match(self, match_data_array, resampling=Resampling.nearest): """ Reproject a Dataset object to match the resolution, projection, and region of another DataArray. .. note:: Only 2D/3D arrays with dimensions 'x'/'y' are currently supported. Requires either a grid mapping variable with 'spatial_ref' or a 'crs' attribute to be set containing a valid CRS. If using a WKT (e.g. from spatiareference.org), make sure it is an OGC WKT. Parameters ---------- match_data_array: :obj:`xarray.DataArray` DataArray of the target resolution and projection. resampling: Resampling method, optional See rasterio.warp.reproject for more details. Returns -------- :obj:`xarray.Dataset` Contains the data from the src_data_array, reprojected to match match_data_array. """ resampled_dataset = xarray.Dataset(attrs=self._obj.attrs) for var in self.vars: resampled_dataset[var] = ( self._obj[var] .rio.set_spatial_dims(x_dim=self.x_dim, y_dim=self.y_dim, inplace=True) .rio.reproject_match(match_data_array, resampling=resampling) ) return resampled_dataset.rio.set_spatial_dims( x_dim=self.x_dim, y_dim=self.y_dim, inplace=True ) def clip_box(self, minx, miny, maxx, maxy, auto_expand=False, auto_expand_limit=3): """Clip the :class:`xarray.Dataset` by a bounding box. .. warning:: Only works if all variables in the dataset have the same coordinates. Parameters ---------- minx: float Minimum bound for x coordinate. miny: float Minimum bound for y coordinate. maxx: float Maximum bound for x coordinate. maxy: float Maximum bound for y coordinate. auto_expand: bool If True, it will expand clip search if only 1D raster found with clip. auto_expand_limit: int maximum number of times the clip will be retried before raising an exception. Returns ------- DataArray: A clipped :class:`xarray.Dataset` object. """ clipped_dataset = xarray.Dataset(attrs=self._obj.attrs) for var in self.vars: clipped_dataset[var] = ( self._obj[var] .rio.set_spatial_dims(x_dim=self.x_dim, y_dim=self.y_dim, inplace=True) .rio.clip_box( minx, miny, maxx, maxy, auto_expand=auto_expand, auto_expand_limit=auto_expand_limit, ) ) return clipped_dataset.rio.set_spatial_dims( x_dim=self.x_dim, y_dim=self.y_dim, inplace=True ) def clip(self, geometries, crs, all_touched=False, drop=True, invert=False): """ Crops a :class:`xarray.Dataset` by geojson like geometry dicts. .. warning:: Only works if all variables in the dataset have the same coordinates. Powered by `rasterio.features.geometry_mask`. Parameters ---------- geometries: list A list of geojson geometry dicts. crs: :obj:`rasterio.crs.CRS` The CRS of the input geometries. all_touched : boolean, optional If True, all pixels touched by geometries will be burned in. If false, only pixels whose center is within the polygon or that are selected by Bresenham's line algorithm will be burned in. drop: bool, optional If True, drop the data outside of the extent of the mask geoemtries Otherwise, it will return the same raster with the data masked. Default is True. invert: boolean, optional If False, pixels that do not overlap shapes will be set as nodata. Otherwise, pixels that overlap the shapes will be set as nodata. False by default. Returns ------- Dataset: A clipped :class:`xarray.Dataset` object. Examples: >>> geometry = ''' {"type": "Polygon", ... "coordinates": [ ... [[-94.07955380199459, 41.69085871273774], ... [-94.06082436942204, 41.69103313774798], ... [-94.06063203899649, 41.67932439500822], ... [-94.07935807746362, 41.679150041277325], ... [-94.07955380199459, 41.69085871273774]]]}''' >>> cropping_geometries = [geojson.loads(geometry)] >>> xds = xarray.open_rasterio('cool_raster.tif') >>> cropped = xds.rio.clip(geometries=cropping_geometries, crs=4326) """ clipped_dataset = xarray.Dataset(attrs=self._obj.attrs) for var in self.vars: clipped_dataset[var] = ( self._obj[var] .rio.set_spatial_dims(x_dim=self.x_dim, y_dim=self.y_dim, inplace=True) .rio.clip( geometries, crs=crs, all_touched=all_touched, drop=drop, invert=invert, ) ) return clipped_dataset.rio.set_spatial_dims( x_dim=self.x_dim, y_dim=self.y_dim, inplace=True ) def interpolate_na(self, method="nearest"): """ This method uses `scipy.interpolate.griddata` to interpolate missing data. Parameters ---------- method: {‘linear’, ‘nearest’, ‘cubic’}, optional The method to use for interpolation in `scipy.interpolate.griddata`. Returns ------- :class:`xarray.DataArray`: An interpolated :class:`xarray.DataArray` object. """ interpolated_dataset = xarray.Dataset(attrs=self._obj.attrs) for var in self.vars: interpolated_dataset[var] = ( self._obj[var] .rio.set_spatial_dims(x_dim=self.x_dim, y_dim=self.y_dim, inplace=True) .rio.interpolate_na(method=method) ) return interpolated_dataset.rio.set_spatial_dims( x_dim=self.x_dim, y_dim=self.y_dim, inplace=True ) def to_raster( self, raster_path, driver="GTiff", dtype=None, tags=None, windowed=False, recalc_transform=True, **profile_kwargs, ): """ Export the Dataset to a raster file. Only works with 2D data. Parameters ---------- raster_path: str The path to output the raster to. driver: str, optional The name of the GDAL/rasterio driver to use to export the raster. Default is "GTiff". dtype: str, optional The data type to write the raster to. Default is the datasets dtype. tags: dict, optional A dictionary of tags to write to the raster. windowed: bool, optional If True, it will write using the windows of the output raster. This only works if the output raster is tiled. As such, if you set this to True, the output raster will be tiled. Default is False. **profile_kwargs Additional keyword arguments to pass into writing the raster. The nodata, transform, crs, count, width, and height attributes are ignored. """ variable_dim = "band_{}".format(uuid4()) data_array = self._obj.to_array(dim=variable_dim) # write data array names to raster data_array.attrs["long_name"] = data_array[variable_dim].values.tolist() # ensure raster metadata preserved scales = [] offsets = [] nodatavals = [] crs_list = [] for data_var in data_array[variable_dim].values: scales.append(self._obj[data_var].attrs.get("scale_factor", 1.0)) offsets.append(self._obj[data_var].attrs.get("add_offset", 0.0)) nodatavals.append(self._obj[data_var].rio.nodata) crs_list.append(self._obj[data_var].rio.crs) data_array.attrs["scales"] = scales data_array.attrs["offsets"] = offsets nodata = nodatavals[0] if ( all(nodataval == nodata for nodataval in nodatavals) or np.isnan(nodatavals).all() ): data_array.rio.write_nodata(nodata, inplace=True) else: raise RioXarrayError( "All nodata values must be the same when exporting to raster. " "Current values: {}".format(nodatavals) ) crs = crs_list[0] if all(crs_i == crs for crs_i in crs_list): data_array.rio.write_crs(crs, inplace=True) else: raise RioXarrayError( "All CRS must be the same when exporting to raster. " "Current values: {}".format(crs_list) ) # write it to a raster data_array.rio.to_raster( raster_path=raster_path, driver=driver, dtype=dtype, tags=tags, windowed=windowed, recalc_transform=recalc_transform, **profile_kwargs, ) ```
{ "source": "jhamman/storylines_workflow", "score": 3 }
#### File: storylines_workflow/tools/prms.py ```python from __future__ import print_function import time as tm from getpass import getuser import pandas as pd import xarray as xr import numpy as np now = tm.ctime(tm.time()) user = getuser() attrs = {'pr': {'units': 'in', 'long_name': 'precipitation'}, 'tmin': {'units': 'F', 'long_name': 'minimum daily temperature'}, 'tmax': {'units': 'F', 'long_name': 'maximum daily temperature'}, 'sw': {'units': 'Langley d-1', 'long_name': 'shortwave flux'}} encoding = {'pr': {'_FillValue': 0.}, 'tmin': {'_FillValue': -99.}, 'tmax': {'_FillValue': -99.}, 'sw': {'_FillValue': 0.}} def read_grid_file(filename): ''' Read one column from text file ''' df = pd.read_csv(filename, sep='\t', header=None, names=['hru', 'lat', 'lon']) return df def extract_nc(ncin, grid_df, ncout, varnames=['prec', 't_max', 't_min', 'shortwave']): ''' Parameters ---------- ncin : str input netCDF grid_df: Pandas.DataFrame list of selected grid hru, lat, lon ncout : str output subset netCDF varnames : list variables to subset ''' print('opening %s' % ncin) ds = xr.open_mfdataset(ncin) print(ds) print('subseting and then loading') # subset the dataset now lats = xr.Variable('hru', grid_df['lat']) lons = xr.Variable('hru', grid_df['lon']) print(lats, lons, ds[varnames], flush=True) subset = ds[varnames].sel(lat=lats, lon=lons, method='nearest') subset.coords['hru'] = xr.Variable( 'hru', np.arange(1, len(grid_df['hru']) + 1)) subset['hru'].attrs = {'description': 'HRU ID'} print('unit conversion and masking') # unit coversions and some masking for vi, varname in enumerate(varnames): if varname == 'prec': # mm --> in subset['prec'] *= 0.0393701 subset['prec'] = subset['prec'].where(subset['prec'] >= 0) elif varname in ['t_min', 't_max']: # C --> F subset[varname] = subset[varname] * 1.8 + 32.0 elif varname == 'shortwave': # W m-2 --> Langley/day factor = 86400.0 / 41868.0 subset['shortwave'] *= factor subset['shortwave'] = subset['shortwave'].where( subset['shortwave'] >= 0) else: raise ValueError('unknown varname: %s' % varname) # rename variables subset = subset.rename({'prec': 'pr', 't_min': 'tmin', 't_max': 'tmax', 'shortwave': 'sw'}) # reorder dimension subset = subset.transpose('time', 'hru') # drop some variables subset = subset.drop(['lat', 'lon']) subset['pr'] = subset['pr'].astype(np.float32) subset['tmax'] = subset['tmax'].astype(np.float32) subset['tmin'] = subset['tmin'].astype(np.float32) subset['sw'] = subset['sw'].astype(np.float32) for varname in subset.data_vars: subset[varname].attrs = attrs[varname] subset[varname].encoding = encoding[varname] subset[varname].encoding['dtype'] = 'f4' # Write subset print('writing %s' % ncout) subset.attrs['history'] += '\nSubset for PRMS: {0} by {1}'.format( now, user) subset.to_netcdf(ncout, format='NETCDF4', unlimited_dims=['time']) ```
{ "source": "jhamman/VICpy", "score": 3 }
#### File: models/vic/soil_param_utils.py ```python from __future__ import print_function from .share import MMPERMETER # -------------------------------------------------------------------- # def calc_max_moist(depth, bulk_density, soil_density): """ calculate the maximum soil moisture of each layer """ porosity = 1.0 - bulk_density / soil_density max_moist = depth * porosity * MMPERMETER return max_moist # -------------------------------------------------------------------- # # -------------------------------------------------------------------- # def nijssen2001_to_arno(d1, d2, d3, d4, max_moist): """ Convert parameters VIC Code: if(options.BASEFLOW == NIJSSEN2001) { layer = options.Nlayer-1; temp.dsmax = temp.dsmax * pow((double)(1./(temp.max_moist[layer]-temp.ws)), -temp.c) + temp.ds * temp.max_moist[layer]; temp.ds = temp.ds * temp.ws / temp.dsmax; temp.ws = temp.ws/temp.max_moist[layer]; d? - values corresponding to the 4 NIJSSEN2001 baseflow parameters max_moist - maximum moisture of the bottom soil layer """ dsmax = d2 * pow(1. / (max_moist - d3), -d4) + d1 * max_moist ds = d1 * d3 / dsmax ws = d3 / max_moist c = d4 return ds, dsmax, ws, c # -------------------------------------------------------------------- # # -------------------------------------------------------------------- # def anro_to_nijssen2001(ds, dsmax, ws, c, max_moist): """ Convert parameters VIC Code: if(options.BASEFLOW == NIJSSEN2001) { layer = options.Nlayer-1; temp.dsmax = temp.dsmax * pow((double)(1./(temp.max_moist[layer]-temp.ws)), -temp.c) + temp.ds * temp.max_moist[layer]; temp.ds = temp.ds * temp.ws / temp.dsmax; temp.ws = temp.ws/temp.max_moist[layer]; d? - values corresponding to the 4 NIJSSEN2001 baseflow parameters max_moist - maximum moisture of the bottom soil layer """ d4 = c d3 = max_moist * ws d1 = ds * dsmax / d3 d2 = (dsmax - d1 * max_moist) / pow(max_moist - d3, d4) return d1, d2, d3, d4 # -------------------------------------------------------------------- # ``` #### File: models/vic/vic2netcdf.py ```python from __future__ import print_function from os import path from glob import glob from re import findall from collections import deque from bisect import bisect_left from getpass import getuser from datetime import datetime, timedelta from pandas import read_table, DataFrame from netCDF4 import Dataset, date2num, num2date, default_fillvals import socket import subprocess import dateutil.relativedelta as relativedelta import os import sys import numpy as np import time as tm from tonic.io import read_config, SafeConfigParser from tonic.tonic import calc_grid, get_grid_inds, NcVar from tonic.pycompat import pyzip, pyrange description = 'Convert a set of VIC ascii outputs to gridded netCDF' help = 'Convert a set of VIC ascii outputs to gridded netCDF' # -------------------------------------------------------------------- # SECSPERDAY = 86400.0 REFERENCE_STRING = '0001-01-01 00:00:00' TIMEUNITS = 'days since {0}'.format(REFERENCE_STRING) # (MUST BE DAYS)! TIMESTAMPFORM = '%Y-%m-%d-%H' # Precision NC_DOUBLE = 'f8' NC_FLOAT = 'f4' NC_INT = 'i4' # -------------------------------------------------------------------- # # -------------------------------------------------------------------- # # Default configuration default_config = {'OPTIONS': {'out_file_format': 'NETCDF3_64BIT', 'precision': 'single', 'calendar': 'standard', 'time_segment': 'month', 'snow_bands': False, 'veg_tiles': False, 'soil_layers': False}, 'DOMAIN': {'longitude_var': 'longitude', 'latitude_var': 'latitude', 'y_x_dims': ['y', 'x']}} # -------------------------------------------------------------------- # # -------------------------------------------------------------------- # class Point(object): '''Creates a point class for intellegently storing coordinate information''' def __init__(self, lat='', lon='', x='', y='', filename=''): '''Defines x and y variables''' self.lat = lat self.lon = lon self.x = x self.y = y self.filename = filename def _open_binary(self): print('opening binary file: {0}'.format(self.filename)) self.f = open(self.filename, 'rb') def _open_ascii(self): print('opening ascii file: {0}'.format(self.filename)) # return an iterator self._reader = read_table(self.filename, sep=self.delimeter, header=None, iterator=True, usecols=self.usecols, names=self.names) def _open_netcdf(self): print('opening netcdf file: {0}'.format(self.filename)) self.f = Dataset(self.filename, 'r') def _read_ascii(self, count=None): self.df = self._reader.get_chunk(count) return def _read_binary(self, count=-1): d = np.fromfile(self.f, dtype=self.dt, count=count) data = {} for i, name in enumerate(self.names): data[name] = np.array(d[name], dtype=self.dtypes[i], copy=True) / float(self.bin_mults[i]) self.df = DataFrame(data) return def _read_netcdf(self): data = {} for key in self.names: data[key] = np.squeeze(self.f.variables[key][:]) self.df = DataFrame(data) def close(self): print('closing file: {0}'.format(self.filename)) try: self.f.close() except: pass def __str__(self): return "Point({0},{1},{2},{3})".format(self.lat, self.lon, self.y, self.x) def __repr__(self): return "Point(lat={0}, lon={1}, \ y={2}, x={3}, \ filename={4})".format(self.lat, self.lon, self.y, self.x, self.filename) # -------------------------------------------------------------------- # # -------------------------------------------------------------------- # class Plist(deque): '''List subclass that has a few helper methods for adding and obtaining coordinates''' def get_lons(self): return np.array([p.lon for p in self]) def get_lats(self): return np.array([p.lat for p in self]) def add_xs(self, xinds): for i in pyrange(len(self)): self[i].x = xinds[i] return def add_ys(self, yinds): for i in pyrange(len(self)): self[i].y = yinds[i] return def get_ys(self): return np.array([p.y for p in self]) def get_xs(self): return np.array([p.x for p in self]) def get_data(self, name, data_slice): return np.array([p.df[name].values[data_slice] for p in self]) def set_fileformat(self, fileformat): """sets and assigns fileformat specific attributes and methods""" if fileformat == 'ascii': delimeter = r'\t' # VIC ascii files are tab seperated else: delimeter = r',' # true csv for p in self: p.fileformat = fileformat if fileformat in ['ascii', 'csv']: p.open = p._open_ascii p.delimeter = delimeter p.read = p._read_ascii elif fileformat == 'binary': p.open = p._open_binary p.read = p._read_binary p.dt = np.dtype(list(pyzip(p.names, p.bin_dtypes))) elif fileformat == 'netcdf': p.open = p._open_netcdf p.read = p._read_netcdf else: raise ValueError('Unknown file format: {0}'.format(fileformat)) return def set_names(self, names): for p in self: p.names = names return def set_usecols(self, usecols): for p in self: p.usecols = usecols return def set_dtypes(self, dtypes): for p in self: p.dtypes = dtypes return def set_bin_dtypes(self, bin_dtypes): for p in self: p.bin_dtypes = bin_dtypes return def set_bin_mults(self, bin_mults): for p in self: p.bin_mults = bin_mults return # -------------------------------------------------------------------- # # -------------------------------------------------------------------- # class Segment(object): def __init__(self, num, i0, i1, nc_format, filename, memory_mode='original'): '''Class used for holding segment information ''' self.num = num self.i0 = i0 self.i1 = i1 self.filename = filename self.fields = {} self.memory_mode = memory_mode self.nc_write(nc_format) # Set slice if memory_mode == 'original': self.slice = slice(None) else: self.slice = slice(i0, i1) def nc_globals(self, title='VIC netCDF file', history='Created: {0} by {1}'.format(tm.ctime(tm.time()), getuser()), institution='University of Washington', source=sys.argv[0], references=( 'Primary Historical Reference for VIC: Liang,' 'X., <NAME>, <NAME>, and <NAME>,' '1994: A Simple hydrologically Based Model of Land' 'Surface Water and Energy Fluxes for GSMs, J. Geophys.' 'Res., 99(D7), 14,415-14,428.'), comment=( 'Output from the Variable Infiltration Capacity' '(VIC) Macroscale Hydrologic Model'), conventions='CF-1.6', target_grid_file='unknown', username=None, hostname=None, version=None, **kwargs): self.f.title = title.encode() self.f.history = history.encode() self.f.institution = institution.encode() self.f.source = source.encode() self.f.references = references.encode() self.f.comment = comment.encode() self.f.conventions = conventions.encode() if hostname: self.f.hostname = hostname else: self.f.hostname = socket.gethostname() self.f.hostname = self.f.hostname.encode() if username: self.f.username = username else: self.f.username = getuser() self.f.username = self.f.username.encode() if version: self.f.version = version else: try: self.f.version = subprocess.check_output(["git", "describe"]).rstrip() except: self.f.version = 'unknown' self.f.version = self.f.version.encode() for attribute, value in kwargs.items(): if hasattr(self.f, attribute): print( 'WARNING: Attribute {0} already exists'.format(attribute)) print('Renaming to g_{0} to avoid ' 'overwriting.'.format(attribute)) attribute = 'g_{0}'.format(attribute) if isinstance(value, str): value = value.encode() setattr(self.f, attribute, value) return def __str__(self): return "Segment Object({0})".format(self.filename) def __repr__(self): return """ -------------------------- Segment {0} -------------------------- Filename: {1} Start Index: {2} End Index: {3} Start Date: {4} End Date: {5} ------------------------------------------------------------------ """.format(self.num, self.filename, self.i0, self.i1, self.startdate, self.enddate) def nc_time(self, t0, t1, times, calendar): """ define time dimension (and write data) """ self.f.createDimension('time', len(times[self.i0:self.i1])) time = self.f.createVariable('time', 'f8', ('time', )) time[:] = times[self.i0:self.i1] time.long_name = 'time'.encode() time.units = TIMEUNITS.encode() time.calendar = calendar.encode() self.count = len(time) self.startdate = t0 self.enddate = t1 def nc_domain(self, domain): """ define the coordinate dimension (and write data) """ # Setup dimensions dimensions = [] for name, ncvar in domain.items(): # Setup dimensions for dim in ncvar.dimensions: if dim not in dimensions: dimensions.append(dim) self.f.createDimension(dim, getattr(ncvar, dim)) # Create variable if "_FillValue" in ncvar.attributes: fill_val = ncvar.attributes['_FillValue'] del ncvar.attributes['_FillValue'] else: fill_val = None self.fields[name] = self.f.createVariable(name, NC_DOUBLE, ncvar.dimensions, fill_value=fill_val) # Apply the data self.fields[name][:] = ncvar # Add the attributes for key, val in ncvar.attributes.items(): if isinstance(val, str): val = val.encode() setattr(self.fields[name], key, val) return def nc_dimensions(self, snow_bands=False, veg_tiles=False, soil_layers=False): """ Define 4th dimensions """ if snow_bands: self.f.createDimension('snow_bands', snow_bands) if veg_tiles: self.f.createDimension('veg_tiles', veg_tiles) if soil_layers: self.f.createDimension('soil_layers', soil_layers) return def nc_fields(self, fields, y_x_dims, precision): """ define each field """ coords = ('time',) + tuple(y_x_dims) if precision == 'single': prec_global = NC_FLOAT elif precision == 'double': prec_global = NC_DOUBLE else: raise ValueError('Unkown value for OPTIONS[precision] \ field: {0}'.format(precision)) self.three_dim_vars = [] self.four_dim_vars = [] for name, field in fields.items(): write_out_var = True if 'write_out_var' in field: if not field['write_out_var']: write_out_var = False if write_out_var: if 'dim4' in field: ncols = len(self.f.dimensions[field['dim4']]) if len(field['column']) == ncols: # 4d var coords = ('time',) + tuple([field['dim4']]) \ + tuple(y_x_dims) self.four_dim_vars.append(name) else: raise ValueError('Number of columns for variable {0}' 'does not match the length ({1}) of ' 'the {2} dimension'.format( name, ncols, field['dim4'])) else: # standard 3d var coords = ('time',) + tuple(y_x_dims) self.three_dim_vars.append(name) if 'type' in field: prec = field['type'] else: prec = prec_global fill_val = default_fillvals[prec] self.fields[name] = self.f.createVariable(name, prec, coords, fill_value=fill_val, zlib=False) if 'units' in field: self.fields[name].long_name = name.encode() self.fields[name].coordinates = 'lon lat'.encode() for key, val in field.items(): if isinstance(val, str): val = val.encode() setattr(self.fields[name], key, val) else: raise ValueError('Field {0} missing units \ attribute'.format(name)) return def allocate(self): self.data = {} for name, field in self.fields.items(): self.data[name] = np.atleast_3d(np.zeros_like(field)) if hasattr(field, '_FillValue'): self.data[name][:] = field._FillValue def nc_add_data_to_array(self, point): for name in self.three_dim_vars: self.data[name][:, point.y, point.x] = \ point.df[name].values[self.slice] for name in self.four_dim_vars: varshape = self.f.variables[name].shape[1] for i in pyrange(varshape): subname = name + str(i) self.data[name][:, i, point.y, point.x] = point.df[subname].values[self.slice] def nc_add_data_standard(self, points): ys = points.get_ys() xs = points.get_xs() for p in points: for name in self.three_dim_vars: data = points.get_data(name, self.slice) self.f.variables[name][:, ys, xs] = data for name in self.four_dim_vars: varshape = self.f.variables[name].shape[1] for i in pyrange(varshape): sn = name + str(i) self.f.variables[name][:, i, ys, xs] = p.df[sn].values[self.slice] def nc_write_data_from_array(self): """ write completed data arrays to disk """ for name in self.three_dim_vars: self.f.variables[name][:, :, :] = self.data[name] for name in self.four_dim_vars: self.f.variables[name][:, :, :, :] = self.data[name] def nc_write(self, nc_format): self.f = Dataset(self.filename, mode="w", clobber=True, format=nc_format) self.f.set_fill_on() def nc_close(self): self.f.close() print('Closed: {0}'.format(self.filename)) # -------------------------------------------------------------------- # def _run(args): """Top level driver""" print('running now...') if args.create_batch: # ------------------------------------------------------------ # # Create batch files and exit batch(args.config_file, args.create_batch, args.batch_dir) # ------------------------------------------------------------ # else: # ------------------------------------------------------------ # # Read Configuration files config_dict = read_config(args.config_file, default_config=default_config) options = config_dict.pop('OPTIONS') global_atts = config_dict.pop('GLOBAL_ATTRIBUTES') if not options['regular_grid']: domain_dict = config_dict.pop('DOMAIN') else: domain_dict = None # set aside fields dict fields = config_dict vic2nc(options, global_atts, domain_dict, fields) # ------------------------------------------------------------ # return # -------------------------------------------------------------------- # # -------------------------------------------------------------------- # def vic2nc(options, global_atts, domain_dict, fields): """ Convert ascii VIC files to netCDF format""" # determine run mode if (options['memory_mode'] == 'standard') \ and (options['chunksize'] in ['all', 'All', 'ALL', 0]): memory_mode = 'big_memory' else: memory_mode = options['memory_mode'] print("\n-------------------------------") print("Configuration File Options") print("-------------OPTIONS-------------") for pair in options.items(): print("{0}: {1}".format(*pair)) print('Fields: {0}'.format(", ".join(fields.keys()))) if domain_dict: print("-------------DOMAIN--------------") for pair in domain_dict.items(): print("{0}: {1}".format(*pair)) print("--------GLOBAL_ATTRIBUTES--------") for pair in global_atts.items(): print("{0}: {1}".format(*pair)) print("--------RUN MODE--------") print('Memory Mode: {0}'.format(memory_mode)) if memory_mode == 'standard': print('Chunksize={0}'.format(options['chunksize'])) print("---------------------------------\n") # ---------------------------------------------------------------- # # ---------------------------------------------------------------- # # Make output directory if not os.path.exists(options['out_directory']): os.makedirs(options['out_directory']) # ---------------------------------------------------------------- # # ---------------------------------------------------------------- # # Make pairs (i.e. find inds) files = glob(options['input_files']) points = get_file_coords(files) # ---------------------------------------------------------------- # # ---------------------------------------------------------------- # # Get target grid information if domain_dict: domain = read_domain(domain_dict) target_grid_file = path.split(domain_dict['filename'])[1] global_atts['target_grid_file'] = target_grid_file else: # must be a regular grid, build from file names domain = calc_grid(points.get_lats(), points.get_lons()) target_grid_file = None domain_dict = {'y_x_dims': ['lat', 'lon']} # ---------------------------------------------------------------- # # ---------------------------------------------------------------- # # Get grid index locations points = get_grid_inds(domain, points) # ---------------------------------------------------------------- # # ---------------------------------------------------------------- # # Get timestamps if options['input_file_format'].lower() == 'ascii': if ('bin_start_date' in options and 'bin_end_date' in options and 'bin_dt_sec' in options): vic_datelist, vic_ordtime = make_dates( options['bin_start_date'], options['bin_end_date'], options['bin_dt_sec'], calendar=options['calendar']) else: vic_datelist = get_dates(files[0]) vic_ordtime = date2num(vic_datelist, TIMEUNITS, calendar=options['calendar']) elif options['input_file_format'].lower() in ['binary', 'netcdf']: vic_datelist, vic_ordtime = make_dates(options['bin_start_date'], options['bin_end_date'], options['bin_dt_sec'], calendar=options['calendar']) else: raise ValueError('Unknown input file format: {}. Valid options are \ ascii or binary'.format(options['input_file_format'])) # ---------------------------------------------------------------- # # ---------------------------------------------------------------- # # Determine time segmentation if options['start_date']: start_date = datetime.strptime(options['start_date'], TIMESTAMPFORM) if start_date < vic_datelist[0]: print("WARNING: Start date in configuration file is before " "first date in file.") start_date = vic_datelist[0] print('WARNING: New start date is {0}'.format(start_date)) else: start_date = vic_datelist[0] if options['end_date']: end_date = datetime.strptime(options['end_date'], TIMESTAMPFORM) if end_date > vic_datelist[-1]: print("WARNING: End date in configuration file is after " "last date in file.") end_date = vic_datelist[-1] print('WARNING: New end date is {0}'.format(end_date)) else: end_date = vic_datelist[-1] # Ordinal Time start_ord = date2num(start_date, TIMEUNITS, calendar=options['calendar']) end_ord = date2num(end_date, TIMEUNITS, calendar=options['calendar']) print("netCDF Start Date: {0}".format(start_date)) print("netCDF End Date: {0}".format(end_date)) segment_dates = [] if options['time_segment'] == 'day': # calendar insensitive num_segments = np.ceil(end_ord - start_ord) if start_date.hour == 0: segment_dates = num2date(np.arange(start_ord, end_ord + 1, 1), TIMEUNITS, calendar=options['calendar']) else: # allow start at time other than 0 temp = [start_ord].append(np.arange(np.ceil(start_ord), end_ord + 1, 1)) segment_dates = num2date(temp, TIMEUNITS, calendar=options['calendar']) elif options['time_segment'] == 'month': num_segments = (end_date.year - start_date.year) * 12 \ + end_date.month - start_date.month + 1 month = start_date.month year = start_date.year for i in pyrange(num_segments + 1): segment_dates.append(datetime(year, month, 1)) month += 1 if month == 13: month = 1 year += 1 elif options['time_segment'] == 'year': num_segments = end_date.year - start_date.year + 1 year = start_date.year for i in pyrange(num_segments + 1): segment_dates.append(datetime(year, 1, 1)) year += 1 elif options['time_segment'] == 'decade': num_segments = (end_date.year - start_date.year) / 10 + 1 year = start_date.year for i in pyrange(num_segments + 1): segment_dates.append(datetime(year, 1, 1)) year += 10 elif options['time_segment'] == 'all': num_segments = 1 segment_dates = [start_date, end_date] else: raise ValueError('Unknown timesegment options \ {0}'.format(options['time_segment'])) print("Number of files: {0}".format(len(segment_dates) - 1)) assert len(segment_dates) == num_segments + 1 # Make sure the first and last dates are start/end_date segment_dates[0] = start_date segment_dates[-1] = end_date + timedelta(minutes=1) # ---------------------------------------------------------------- # # ---------------------------------------------------------------- # # Setup Segments segments = deque() for num in pyrange(num_segments): # Segment time bounds t0 = segment_dates[num] t1 = segment_dates[num + 1] # Get segment inds i0 = bisect_left(vic_datelist, t0) i1 = bisect_left(vic_datelist, t1) # Make segment filename (with path) if options['time_segment'] == 'day': filename = "{0}.{1}.nc".format(options['out_file_prefix'], t0.strftime('%Y-%m-%d')) elif options['time_segment'] == 'month': filename = "{0}.{1}.nc".format(options['out_file_prefix'], t0.strftime('%Y-%m')) elif options['time_segment'] == 'year': filename = "{0}.{1}.nc".format(options['out_file_prefix'], t0.strftime('%Y')) elif options['time_segment'] == 'all': filename = "{0}.{1}-{2}.nc".format(options['out_file_prefix'], t0.strftime('%Y%m%d'), t1.strftime('%Y%m%d')) filename = path.join(options['out_directory'], filename) # Setup segment and initialize netcdf segment = Segment(num, i0, i1, options['out_file_format'], filename, memory_mode=memory_mode) segment.nc_globals(**global_atts) segment.nc_time(t0, t1, vic_ordtime, options['calendar']) segment.nc_dimensions(snow_bands=options['snow_bands'], veg_tiles=options['veg_tiles'], soil_layers=options['soil_layers']) segment.nc_domain(domain) segment.nc_fields(fields, domain_dict['y_x_dims'], options['precision']) print(repr(segment)) segments.append(segment) # ---------------------------------------------------------------- # # ---------------------------------------------------------------- # # Get column numbers and names (will help speed up reading) names = [] usecols = [] dtypes = [] bin_dtypes = [] bin_mults = [] if options['precision'] == 'double': prec = NC_DOUBLE else: prec = NC_FLOAT for name, field in fields.items(): if not np.isscalar(field['column']): # multiple levels for i, col in enumerate(field['column']): names.append(name + str(i)) usecols.append(col) if 'type' in field: if type(field['type']) == list: dtypes.extend(field['type']) else: dtypes.extend([field['type']] * len(field['column'])) else: dtypes.append([prec] * len(field['column'])) if options['input_file_format'].lower() == 'binary': if 'bin_dtype' in field: if type(field['bin_dtype']) == list: bin_dtypes.extend(field['bin_dtype']) else: bin_dtypes.extend([field['bin_dtype']] * len(field['column'])) else: raise ValueError('bin_dtype not in field: {}'.format(name)) if 'bin_mult' in field: if type(field['bin_mult']) == list: bin_mults.extend(field['bin_mult']) else: bin_mults.extend([field['bin_mult']] * len(field['column'])) else: bin_mults.extend([1.0] * len(field['column'])) else: # no levels names.append(name) usecols.append(field['column']) if 'type' in field: dtypes.append(field['type']) else: dtypes.append(prec) if options['input_file_format'].lower() == 'binary': if 'bin_dtype' in field: bin_dtypes.append(field['bin_dtype']) else: raise ValueError('bin_dtype not in field: {}'.format(name)) if 'bin_mult' in field: bin_mults.append(field['bin_mult']) else: bin_mults.append(1.0) print('setting point attributes (fileformat, names, usecols, and dtypes)') # pandas.read_table does not 'honor' the order of the columns in usecols # it simply uses them in ascending order. So the names need to be sorted # the same way. For example, if the columns in the VIC file are: # 3: prcp; 4: evap; 5: runoff; 6; baseflow; 7: sm1; 8: sm2; 9: sm3; 10: swe # and this is parsed from the configuration file as # usecols = [3, 4, 5, 6, 10, 7, 8, 9] # names=['prcp', 'evap', 'runoff', 'baseflow', 'swe', 'sm1', 'sm2', 'sm3'] # then without sorting, the netcdf file will have the wrong variables: # nc_swe will contain sm1, nc_sm1 will contain sm2, nc_sm2: sm3 and # nc_swe: sm3 # the following will ensure that the names are sorted in increasing column # order. Note that sorted(usecols) is not strictly necessary, since # apparently that is done in read_table, but it keeps the names and columns # in the same order names = [x for (y, x) in sorted(pyzip(usecols, names))] usecols = sorted(usecols) points.set_names(names) points.set_usecols(usecols) points.set_dtypes(dtypes) # set binary attributes if options['input_file_format'].lower() == 'binary': points.set_bin_dtypes(bin_dtypes) points.set_bin_mults(bin_mults) points.set_fileformat(options['input_file_format']) print('done') # ---------------------------------------------------------------- # # ---------------------------------------------------------------- # if memory_mode == 'big_memory': # ------------------------------------------------------------ # # run in big memory mode for i, segment in enumerate(segments): segments[i].allocate() while points: point = points.popleft() point.open() point.read() point.close() for segment in segments: segment.nc_add_data_to_array(point) for segment in segments: segment.nc_write_data_from_array() segment.nc_close() # ------------------------------------------------------------ # elif memory_mode == 'standard': # ------------------------------------------------------------ # # Open VIC files and put data into netcdfs chunk = Plist() while points: point = points.popleft() point.open() point.read() point.close() chunk.append(point) if len(chunk) > int(options['chunksize']) or len(points) == 0: for segment in segments: segment.nc_add_data_standard(chunk) chunk = Plist() del point # ------------------------------------------------------------ # # ------------------------------------------------------------ # # Close the netcdf files for segment in segments: segment.nc_close() # ------------------------------------------------------------ # elif memory_mode == 'original': # ------------------------------------------------------------ # # Run in original memory mode (a.k.a. vic2nc.c mode) # Open all files for point in points: point.open() while segments: segment = segments.popleft() segment.allocate() count = segment.count for point in points: point.read(count) segment.nc_add_data_to_array(point) segment.nc_write_data_from_array() segment.nc_close() for point in points: point.close() # ------------------------------------------------------------ # return # -------------------------------------------------------------------- # # -------------------------------------------------------------------- # def get_file_coords(files): """ Get list of Point objects """ points = Plist() for i, filename in enumerate(files): # fname = path.split(f)[1][-16:] # just look at last 16 characters f = filename[-22:] # just look at last 16 characters lat, lon = list(map(float, findall(r"[-+]?\d*\.\d+|\d+", f)))[-2:] points.append(Point(lat=lat, lon=lon, filename=filename)) return points # -------------------------------------------------------------------- # # -------------------------------------------------------------------- # def get_dates(file): """ Read the first file in the input directory and create a ordinal based timeseries. Also find the indicies to split the time series into months and years """ hours = (0, 1, 2, 3) days = (0, 1, 2) try: data = np.loadtxt(file, usecols=hours, dtype=int) datelist = [datetime(*d) for d in data] except (ValueError, TypeError): data = np.loadtxt(file, usecols=days, dtype=int) datelist = [datetime(*d) for d in data] # check to make sure we haven't used used daily by mistake # (creating a bunch of duplicate times) newlist = [] for i in datelist: if i not in newlist: newlist.append(i) else: raise ValueError('Found duplicate datetimes in datelist') print('VIC startdate: {0}'.format(datelist[0])) print('VIC enddate: {0}'.format(datelist[-1])) return datelist # -------------------------------------------------------------------- # # -------------------------------------------------------------------- # def make_dates(start, end, dt, calendar='standard'): """ Return a list of datetime object from inputs of start - python date string (i.e. 1989-01-01-00) end - python date string (i.e. 1989-01-01-23) dt - int or float timestep in seconds """ start = map(int, start.split('-')) end = map(int, end.split('-')) start_ord = date2num(datetime(*start), TIMEUNITS, calendar=calendar) end_ord = date2num(datetime(*end), TIMEUNITS, calendar=calendar) step = float(dt) / SECSPERDAY ordlist = np.arange(start_ord, end_ord + step, step) datelist = num2date(ordlist, TIMEUNITS, calendar=calendar) return datelist, ordlist # -------------------------------------------------------------------- # # -------------------------------------------------------------------- # def read_domain(domain_dict): print('reading domain file: {0}'.format(domain_dict['filename'])) f = Dataset(domain_dict['filename']) domain = {'lon': NcVar(f, domain_dict['longitude_var']), 'lat': NcVar(f, domain_dict['latitude_var'])} if domain_dict['copy_vars']: for varname in domain_dict['copy_vars']: domain[varname] = NcVar(f, varname) f.close() return domain # -------------------------------------------------------------------- # # -------------------------------------------------------------------- # def batch(config_file, create_batch, batch_dir): """Create a set of batch configuration files""" # Read Configuration files config_dict = read_config(config_file) options = config_dict.pop('OPTIONS') global_atts = config_dict.pop('GLOBAL_ATTRIBUTES') domain_dict = config_dict.pop('DOMAIN', None) fields = config_dict config = SafeConfigParser() config.optionxform = str # Figure out what to call the new files nameprefix = os.path.splitext(os.path.split(config_file)[1])[0] if create_batch == 'variables': # batch by variables # options section config.add_section('OPTIONS') for option, value in options.items(): if type(value) == list: try: value = ", ".join(value) except TypeError: value = ", ".join(repr(e) for e in value) elif type(value) != str: value = str(value) config.set('OPTIONS', option, str(value)) # global_atts section config.add_section('GLOBAL_ATTRIBUTES') for option, value in global_atts.items(): if type(value) == list: try: value = ", ".join(value) except TypeError: value = ", ".join(repr(e) for e in value) elif type(value) != str: value = str(value) config.set('GLOBAL_ATTRIBUTES', option, str(value)) # domain dict section if domain_dict: config.add_section('DOMAIN') for option, value in domain_dict.items(): if type(value) == list: try: value = ", ".join(value) except TypeError: value = ", ".join(repr(e) for e in value) elif type(value) != str: value = str(value) config.set('DOMAIN', option, value.strip("'")) for var, field in fields.items(): suffix = "_{0}.cfg".format(var) new_cfg_file = os.path.join(batch_dir, nameprefix + suffix) # this var config.add_section(var) for option, value in field.items(): if type(value) == list: try: value = ", ".join(value) except TypeError: value = ", ".join(repr(e) for e in value) elif type(value) != str: value = str(value) config.set(var, option, str(value)) # write that config with open(new_cfg_file, 'wb') as cf: config.write(cf) # clear the var section config.remove_section(var) else: # start with existing config config.read(config_file) # by time start_date = datetime.strptime(options['start_date'], TIMESTAMPFORM) end_date = datetime.strptime(options['end_date'], TIMESTAMPFORM) t0 = start_date if create_batch == 'years': td = relativedelta.relativedelta(years=1) t1 = datetime(t0.year, 12, 31, end_date.hour) elif create_batch == 'months': td = relativedelta.relativedelta(months=1) elif create_batch == 'days': # days option is only valid for gregorian calendar td = relativedelta.relativedelta(days=1) hour = relativedelta.relativedelta(hours=-1) i = 0 while t0 < end_date: i += 1 t1 = t0 + td if t1 > end_date: t1 = end_date else: t1 += hour suffix = '_{0}'.format(i) new_cfg_file = os.path.join(batch_dir, nameprefix + suffix) # Write config replacing start and end dates config.set('OPTIONS', 'start_date', t0.strftime(TIMESTAMPFORM)) config.set('OPTIONS', 'end_date', t1.strftime(TIMESTAMPFORM)) with open(new_cfg_file, 'wb') as cf: config.write(cf) t0 += td return # -------------------------------------------------------------------- # ``` #### File: VICpy/tonic/tonic.py ```python import numpy as np from scipy.spatial import cKDTree MMPERMETER = 1000. # -------------------------------------------------------------------- # class NcVar(np.ndarray): """ Subclass of numpy array to cary netcdf attributes""" def __new__(cls, f, varname): obj = np.asarray(f.variables[varname][:]).view(cls) # add the new attribute to the created instance obj.dimensions = f.variables[varname].dimensions obj.attributes = f.variables[varname].__dict__ for dim in obj.dimensions: setattr(obj, dim, len(f.dimensions[dim])) # Finally, we must return the newly created object: return obj def __array_finalize__(self, obj): if obj is None: return # -------------------------------------------------------------------- # # -------------------------------------------------------------------- # class FakeNcVar(np.ndarray): """ Subclass of numpy array to carry netcdf attributes""" def __new__(cls, data, dimensions, attributes): obj = np.asarray(data).view(cls) # add the new attribute to the created instance obj.dimensions = dimensions obj.attributes = attributes shape = data.shape for i, dim in enumerate(obj.dimensions): setattr(obj, dim, shape[i]) # Finally, we must return the newly created object: return obj def __array_finalize__(self, obj): if obj is None: return # -------------------------------------------------------------------- # # -------------------------------------------------------------------- # def latlon2yx(plats, plons, glats, glons): """find y x coordinates """ if glons.ndim == 1 or glats.ndim == 1: glons, glats = np.meshgrid(glons, glats) combined = np.dstack(([glats.ravel(), glons.ravel()]))[0] points = list(np.vstack((np.array(plats), np.array(plons))).transpose()) mytree = cKDTree(combined) dist, indexes = mytree.query(points, k=1) y, x = np.unravel_index(np.array(indexes), glons.shape) return y, x # -------------------------------------------------------------------- # # -------------------------------------------------------------------- # def calc_grid(lats, lons, decimals=4): """ determine shape of regular grid from lons and lats""" print('Calculating grid size now...') target_grid = {} # get unique lats and lons lon = np.sort(np.unique(lons.round(decimals=decimals))) print('found {0} unique lons'.format(len(lon))) lat = np.sort(np.unique(lats.round(decimals=decimals))) print('found {0} unique lats'.format(len(lat))) y, x = latlon2yx(lats, lons, lat, lon) mask = np.zeros((len(lat), len(lon)), dtype=int) mask[y, x] = 1 # Create fake NcVar Types target_grid['lon'] = FakeNcVar(lon, ('lon', ), {'long_name': 'longitude coordinate', 'units': 'degrees_east'}) target_grid['lat'] = FakeNcVar(lat, ('lat', ), {'long_name': 'latitude coordinate', 'units': 'degrees_north'}) target_grid['mask'] = FakeNcVar(mask, ('lat', 'lon', ), {'long_name': 'domain mask', 'comment': '0 indicates grid cell is not \ active'}) print('Created a target grid based on the lats and lons in the ' 'input file names') print('Grid Size: {}'.format(mask.shape)) return target_grid # -------------------------------------------------------------------- # # -------------------------------------------------------------------- # def get_grid_inds(domain, points): """ Find location of lat/lon points in 2d target grid. Uses cKdtree nearest neighbor mapping. """ lons = points.get_lons() lats = points.get_lats() if (lons.min() < 0) and (domain['lon'].min() >= 0): posinds = np.nonzero(lons < 0) lons[posinds] += 360 print('adjusted VIC lon minimum (+360 for negative lons)') # Make sure the longitude / latitude vars are 2d if domain['lat'].ndim == 1 or domain['lon'].ndim == 1: dlons, dlats = np.meshgrid(domain['lon'], domain['lat']) combined = np.dstack(([dlats.ravel(), dlons.ravel()]))[0] point_list = list(np.vstack((lats, lons)).transpose()) mytree = cKDTree(combined) dist, indexes = mytree.query(point_list, k=1) yinds, xinds = np.unravel_index(indexes, dlons.shape) points.add_xs(xinds) points.add_ys(yinds) return points # -------------------------------------------------------------------- # ```
{ "source": "jhamman/xarray-test-docs", "score": 2 }
#### File: xarray/tests/test_backends_locks.py ```python import threading from xarray.backends import locks def test_threaded_lock() -> None: lock1 = locks._get_threaded_lock("foo") assert isinstance(lock1, type(threading.Lock())) lock2 = locks._get_threaded_lock("foo") assert lock1 is lock2 lock3 = locks._get_threaded_lock("bar") assert lock1 is not lock3 ``` #### File: xarray/tests/test_concat.py ```python from copy import deepcopy from typing import List import numpy as np import pandas as pd import pytest from xarray import DataArray, Dataset, Variable, concat from xarray.core import dtypes, merge from . import ( InaccessibleArray, assert_array_equal, assert_equal, assert_identical, requires_dask, ) from .test_dataset import create_test_data def test_concat_compat() -> None: ds1 = Dataset( { "has_x_y": (("y", "x"), [[1, 2]]), "has_x": ("x", [1, 2]), "no_x_y": ("z", [1, 2]), }, coords={"x": [0, 1], "y": [0], "z": [-1, -2]}, ) ds2 = Dataset( { "has_x_y": (("y", "x"), [[3, 4]]), "has_x": ("x", [1, 2]), "no_x_y": (("q", "z"), [[1, 2]]), }, coords={"x": [0, 1], "y": [1], "z": [-1, -2], "q": [0]}, ) result = concat([ds1, ds2], dim="y", data_vars="minimal", compat="broadcast_equals") assert_equal(ds2.no_x_y, result.no_x_y.transpose()) for var in ["has_x", "no_x_y"]: assert "y" not in result[var].dims and "y" not in result[var].coords with pytest.raises( ValueError, match=r"coordinates in some datasets but not others" ): concat([ds1, ds2], dim="q") with pytest.raises(ValueError, match=r"'q' is not present in all datasets"): concat([ds2, ds1], dim="q") class TestConcatDataset: @pytest.fixture def data(self) -> Dataset: return create_test_data().drop_dims("dim3") def rectify_dim_order(self, data, dataset) -> Dataset: # return a new dataset with all variable dimensions transposed into # the order in which they are found in `data` return Dataset( {k: v.transpose(*data[k].dims) for k, v in dataset.data_vars.items()}, dataset.coords, attrs=dataset.attrs, ) @pytest.mark.parametrize("coords", ["different", "minimal"]) @pytest.mark.parametrize("dim", ["dim1", "dim2"]) def test_concat_simple(self, data, dim, coords) -> None: datasets = [g for _, g in data.groupby(dim, squeeze=False)] assert_identical(data, concat(datasets, dim, coords=coords)) def test_concat_merge_variables_present_in_some_datasets(self, data) -> None: # coordinates present in some datasets but not others ds1 = Dataset(data_vars={"a": ("y", [0.1])}, coords={"x": 0.1}) ds2 = Dataset(data_vars={"a": ("y", [0.2])}, coords={"z": 0.2}) actual = concat([ds1, ds2], dim="y", coords="minimal") expected = Dataset({"a": ("y", [0.1, 0.2])}, coords={"x": 0.1, "z": 0.2}) assert_identical(expected, actual) # data variables present in some datasets but not others split_data = [data.isel(dim1=slice(3)), data.isel(dim1=slice(3, None))] data0, data1 = deepcopy(split_data) data1["foo"] = ("bar", np.random.randn(10)) actual = concat([data0, data1], "dim1") expected = data.copy().assign(foo=data1.foo) assert_identical(expected, actual) def test_concat_2(self, data) -> None: dim = "dim2" datasets = [g for _, g in data.groupby(dim, squeeze=True)] concat_over = [k for k, v in data.coords.items() if dim in v.dims and k != dim] actual = concat(datasets, data[dim], coords=concat_over) assert_identical(data, self.rectify_dim_order(data, actual)) @pytest.mark.parametrize("coords", ["different", "minimal", "all"]) @pytest.mark.parametrize("dim", ["dim1", "dim2"]) def test_concat_coords_kwarg(self, data, dim, coords) -> None: data = data.copy(deep=True) # make sure the coords argument behaves as expected data.coords["extra"] = ("dim4", np.arange(3)) datasets = [g for _, g in data.groupby(dim, squeeze=True)] actual = concat(datasets, data[dim], coords=coords) if coords == "all": expected = np.array([data["extra"].values for _ in range(data.dims[dim])]) assert_array_equal(actual["extra"].values, expected) else: assert_equal(data["extra"], actual["extra"]) def test_concat(self, data) -> None: split_data = [ data.isel(dim1=slice(3)), data.isel(dim1=3), data.isel(dim1=slice(4, None)), ] assert_identical(data, concat(split_data, "dim1")) def test_concat_dim_precedence(self, data) -> None: # verify that the dim argument takes precedence over # concatenating dataset variables of the same name dim = (2 * data["dim1"]).rename("dim1") datasets = [g for _, g in data.groupby("dim1", squeeze=False)] expected = data.copy() expected["dim1"] = dim assert_identical(expected, concat(datasets, dim)) def test_concat_data_vars_typing(self) -> None: # Testing typing, can be removed if the next function works with annotations. data = Dataset({"foo": ("x", np.random.randn(10))}) objs: List[Dataset] = [data.isel(x=slice(5)), data.isel(x=slice(5, None))] actual = concat(objs, dim="x", data_vars="minimal") assert_identical(data, actual) def test_concat_data_vars(self): # TODO: annotating this func fails data = Dataset({"foo": ("x", np.random.randn(10))}) objs: List[Dataset] = [data.isel(x=slice(5)), data.isel(x=slice(5, None))] for data_vars in ["minimal", "different", "all", [], ["foo"]]: actual = concat(objs, dim="x", data_vars=data_vars) assert_identical(data, actual) def test_concat_coords(self): # TODO: annotating this func fails data = Dataset({"foo": ("x", np.random.randn(10))}) expected = data.assign_coords(c=("x", [0] * 5 + [1] * 5)) objs = [ data.isel(x=slice(5)).assign_coords(c=0), data.isel(x=slice(5, None)).assign_coords(c=1), ] for coords in ["different", "all", ["c"]]: actual = concat(objs, dim="x", coords=coords) assert_identical(expected, actual) for coords in ["minimal", []]: with pytest.raises(merge.MergeError, match="conflicting values"): concat(objs, dim="x", coords=coords) def test_concat_constant_index(self): # TODO: annotating this func fails # GH425 ds1 = Dataset({"foo": 1.5}, {"y": 1}) ds2 = Dataset({"foo": 2.5}, {"y": 1}) expected = Dataset({"foo": ("y", [1.5, 2.5]), "y": [1, 1]}) for mode in ["different", "all", ["foo"]]: actual = concat([ds1, ds2], "y", data_vars=mode) assert_identical(expected, actual) with pytest.raises(merge.MergeError, match="conflicting values"): # previously dim="y", and raised error which makes no sense. # "foo" has dimension "y" so minimal should concatenate it? concat([ds1, ds2], "new_dim", data_vars="minimal") def test_concat_size0(self) -> None: data = create_test_data() split_data = [data.isel(dim1=slice(0, 0)), data] actual = concat(split_data, "dim1") assert_identical(data, actual) actual = concat(split_data[::-1], "dim1") assert_identical(data, actual) def test_concat_autoalign(self) -> None: ds1 = Dataset({"foo": DataArray([1, 2], coords=[("x", [1, 2])])}) ds2 = Dataset({"foo": DataArray([1, 2], coords=[("x", [1, 3])])}) actual = concat([ds1, ds2], "y") expected = Dataset( { "foo": DataArray( [[1, 2, np.nan], [1, np.nan, 2]], dims=["y", "x"], coords={"x": [1, 2, 3]}, ) } ) assert_identical(expected, actual) def test_concat_errors(self): # TODO: annotating this func fails data = create_test_data() split_data = [data.isel(dim1=slice(3)), data.isel(dim1=slice(3, None))] with pytest.raises(ValueError, match=r"must supply at least one"): concat([], "dim1") with pytest.raises(ValueError, match=r"Cannot specify both .*='different'"): concat( [data, data], dim="concat_dim", data_vars="different", compat="override" ) with pytest.raises(ValueError, match=r"must supply at least one"): concat([], "dim1") with pytest.raises(ValueError, match=r"are not coordinates"): concat([data, data], "new_dim", coords=["not_found"]) with pytest.raises(ValueError, match=r"global attributes not"): data0, data1 = deepcopy(split_data) data1.attrs["foo"] = "bar" concat([data0, data1], "dim1", compat="identical") assert_identical(data, concat([data0, data1], "dim1", compat="equals")) with pytest.raises(ValueError, match=r"compat.* invalid"): concat(split_data, "dim1", compat="foobar") with pytest.raises(ValueError, match=r"unexpected value for"): concat([data, data], "new_dim", coords="foobar") with pytest.raises( ValueError, match=r"coordinate in some datasets but not others" ): concat([Dataset({"x": 0}), Dataset({"x": [1]})], dim="z") with pytest.raises( ValueError, match=r"coordinate in some datasets but not others" ): concat([Dataset({"x": 0}), Dataset({}, {"x": 1})], dim="z") def test_concat_join_kwarg(self) -> None: ds1 = Dataset({"a": (("x", "y"), [[0]])}, coords={"x": [0], "y": [0]}) ds2 = Dataset({"a": (("x", "y"), [[0]])}, coords={"x": [1], "y": [0.0001]}) expected = {} expected["outer"] = Dataset( {"a": (("x", "y"), [[0, np.nan], [np.nan, 0]])}, {"x": [0, 1], "y": [0, 0.0001]}, ) expected["inner"] = Dataset( {"a": (("x", "y"), [[], []])}, {"x": [0, 1], "y": []} ) expected["left"] = Dataset( {"a": (("x", "y"), np.array([0, np.nan], ndmin=2).T)}, coords={"x": [0, 1], "y": [0]}, ) expected["right"] = Dataset( {"a": (("x", "y"), np.array([np.nan, 0], ndmin=2).T)}, coords={"x": [0, 1], "y": [0.0001]}, ) expected["override"] = Dataset( {"a": (("x", "y"), np.array([0, 0], ndmin=2).T)}, coords={"x": [0, 1], "y": [0]}, ) with pytest.raises(ValueError, match=r"indexes along dimension 'y'"): actual = concat([ds1, ds2], join="exact", dim="x") for join in expected: actual = concat([ds1, ds2], join=join, dim="x") assert_equal(actual, expected[join]) # regression test for #3681 actual = concat( [ds1.drop_vars("x"), ds2.drop_vars("x")], join="override", dim="y" ) expected2 = Dataset( {"a": (("x", "y"), np.array([0, 0], ndmin=2))}, coords={"y": [0, 0.0001]} ) assert_identical(actual, expected2) @pytest.mark.parametrize( "combine_attrs, var1_attrs, var2_attrs, expected_attrs, expect_exception", [ ( "no_conflicts", {"a": 1, "b": 2}, {"a": 1, "c": 3}, {"a": 1, "b": 2, "c": 3}, False, ), ("no_conflicts", {"a": 1, "b": 2}, {}, {"a": 1, "b": 2}, False), ("no_conflicts", {}, {"a": 1, "c": 3}, {"a": 1, "c": 3}, False), ( "no_conflicts", {"a": 1, "b": 2}, {"a": 4, "c": 3}, {"a": 1, "b": 2, "c": 3}, True, ), ("drop", {"a": 1, "b": 2}, {"a": 1, "c": 3}, {}, False), ("identical", {"a": 1, "b": 2}, {"a": 1, "b": 2}, {"a": 1, "b": 2}, False), ("identical", {"a": 1, "b": 2}, {"a": 1, "c": 3}, {"a": 1, "b": 2}, True), ( "override", {"a": 1, "b": 2}, {"a": 4, "b": 5, "c": 3}, {"a": 1, "b": 2}, False, ), ( "drop_conflicts", {"a": 41, "b": 42, "c": 43}, {"b": 2, "c": 43, "d": 44}, {"a": 41, "c": 43, "d": 44}, False, ), ( lambda attrs, context: {"a": -1, "b": 0, "c": 1} if any(attrs) else {}, {"a": 41, "b": 42, "c": 43}, {"b": 2, "c": 43, "d": 44}, {"a": -1, "b": 0, "c": 1}, False, ), ], ) def test_concat_combine_attrs_kwarg( self, combine_attrs, var1_attrs, var2_attrs, expected_attrs, expect_exception ): ds1 = Dataset({"a": ("x", [0])}, coords={"x": [0]}, attrs=var1_attrs) ds2 = Dataset({"a": ("x", [0])}, coords={"x": [1]}, attrs=var2_attrs) if expect_exception: with pytest.raises(ValueError, match=f"combine_attrs='{combine_attrs}'"): concat([ds1, ds2], dim="x", combine_attrs=combine_attrs) else: actual = concat([ds1, ds2], dim="x", combine_attrs=combine_attrs) expected = Dataset( {"a": ("x", [0, 0])}, {"x": [0, 1]}, attrs=expected_attrs ) assert_identical(actual, expected) @pytest.mark.parametrize( "combine_attrs, attrs1, attrs2, expected_attrs, expect_exception", [ ( "no_conflicts", {"a": 1, "b": 2}, {"a": 1, "c": 3}, {"a": 1, "b": 2, "c": 3}, False, ), ("no_conflicts", {"a": 1, "b": 2}, {}, {"a": 1, "b": 2}, False), ("no_conflicts", {}, {"a": 1, "c": 3}, {"a": 1, "c": 3}, False), ( "no_conflicts", {"a": 1, "b": 2}, {"a": 4, "c": 3}, {"a": 1, "b": 2, "c": 3}, True, ), ("drop", {"a": 1, "b": 2}, {"a": 1, "c": 3}, {}, False), ("identical", {"a": 1, "b": 2}, {"a": 1, "b": 2}, {"a": 1, "b": 2}, False), ("identical", {"a": 1, "b": 2}, {"a": 1, "c": 3}, {"a": 1, "b": 2}, True), ( "override", {"a": 1, "b": 2}, {"a": 4, "b": 5, "c": 3}, {"a": 1, "b": 2}, False, ), ( "drop_conflicts", {"a": 41, "b": 42, "c": 43}, {"b": 2, "c": 43, "d": 44}, {"a": 41, "c": 43, "d": 44}, False, ), ( lambda attrs, context: {"a": -1, "b": 0, "c": 1} if any(attrs) else {}, {"a": 41, "b": 42, "c": 43}, {"b": 2, "c": 43, "d": 44}, {"a": -1, "b": 0, "c": 1}, False, ), ], ) def test_concat_combine_attrs_kwarg_variables( self, combine_attrs, attrs1, attrs2, expected_attrs, expect_exception ): """check that combine_attrs is used on data variables and coords""" ds1 = Dataset({"a": ("x", [0], attrs1)}, coords={"x": ("x", [0], attrs1)}) ds2 = Dataset({"a": ("x", [0], attrs2)}, coords={"x": ("x", [1], attrs2)}) if expect_exception: with pytest.raises(ValueError, match=f"combine_attrs='{combine_attrs}'"): concat([ds1, ds2], dim="x", combine_attrs=combine_attrs) else: actual = concat([ds1, ds2], dim="x", combine_attrs=combine_attrs) expected = Dataset( {"a": ("x", [0, 0], expected_attrs)}, {"x": ("x", [0, 1], expected_attrs)}, ) assert_identical(actual, expected) def test_concat_promote_shape(self) -> None: # mixed dims within variables objs = [Dataset({}, {"x": 0}), Dataset({"x": [1]})] actual = concat(objs, "x") expected = Dataset({"x": [0, 1]}) assert_identical(actual, expected) objs = [Dataset({"x": [0]}), Dataset({}, {"x": 1})] actual = concat(objs, "x") assert_identical(actual, expected) # mixed dims between variables objs = [Dataset({"x": [2], "y": 3}), Dataset({"x": [4], "y": 5})] actual = concat(objs, "x") expected = Dataset({"x": [2, 4], "y": ("x", [3, 5])}) assert_identical(actual, expected) # mixed dims in coord variable objs = [Dataset({"x": [0]}, {"y": -1}), Dataset({"x": [1]}, {"y": ("x", [-2])})] actual = concat(objs, "x") expected = Dataset({"x": [0, 1]}, {"y": ("x", [-1, -2])}) assert_identical(actual, expected) # scalars with mixed lengths along concat dim -- values should repeat objs = [Dataset({"x": [0]}, {"y": -1}), Dataset({"x": [1, 2]}, {"y": -2})] actual = concat(objs, "x") expected = Dataset({"x": [0, 1, 2]}, {"y": ("x", [-1, -2, -2])}) assert_identical(actual, expected) # broadcast 1d x 1d -> 2d objs = [ Dataset({"z": ("x", [-1])}, {"x": [0], "y": [0]}), Dataset({"z": ("y", [1])}, {"x": [1], "y": [0]}), ] actual = concat(objs, "x") expected = Dataset({"z": (("x", "y"), [[-1], [1]])}, {"x": [0, 1], "y": [0]}) assert_identical(actual, expected) def test_concat_do_not_promote(self) -> None: # GH438 objs = [ Dataset({"y": ("t", [1])}, {"x": 1, "t": [0]}), Dataset({"y": ("t", [2])}, {"x": 1, "t": [0]}), ] expected = Dataset({"y": ("t", [1, 2])}, {"x": 1, "t": [0, 0]}) actual = concat(objs, "t") assert_identical(expected, actual) objs = [ Dataset({"y": ("t", [1])}, {"x": 1, "t": [0]}), Dataset({"y": ("t", [2])}, {"x": 2, "t": [0]}), ] with pytest.raises(ValueError): concat(objs, "t", coords="minimal") def test_concat_dim_is_variable(self) -> None: objs = [Dataset({"x": 0}), Dataset({"x": 1})] coord = Variable("y", [3, 4]) expected = Dataset({"x": ("y", [0, 1]), "y": [3, 4]}) actual = concat(objs, coord) assert_identical(actual, expected) def test_concat_multiindex(self) -> None: x = pd.MultiIndex.from_product([[1, 2, 3], ["a", "b"]]) expected = Dataset({"x": x}) actual = concat( [expected.isel(x=slice(2)), expected.isel(x=slice(2, None))], "x" ) assert expected.equals(actual) assert isinstance(actual.x.to_index(), pd.MultiIndex) @pytest.mark.parametrize("fill_value", [dtypes.NA, 2, 2.0, {"a": 2, "b": 1}]) def test_concat_fill_value(self, fill_value) -> None: datasets = [ Dataset({"a": ("x", [2, 3]), "b": ("x", [-2, 1]), "x": [1, 2]}), Dataset({"a": ("x", [1, 2]), "b": ("x", [3, -1]), "x": [0, 1]}), ] if fill_value == dtypes.NA: # if we supply the default, we expect the missing value for a # float array fill_value_a = fill_value_b = np.nan elif isinstance(fill_value, dict): fill_value_a = fill_value["a"] fill_value_b = fill_value["b"] else: fill_value_a = fill_value_b = fill_value expected = Dataset( { "a": (("t", "x"), [[fill_value_a, 2, 3], [1, 2, fill_value_a]]), "b": (("t", "x"), [[fill_value_b, -2, 1], [3, -1, fill_value_b]]), }, {"x": [0, 1, 2]}, ) actual = concat(datasets, dim="t", fill_value=fill_value) assert_identical(actual, expected) @pytest.mark.parametrize("dtype", [str, bytes]) @pytest.mark.parametrize("dim", ["x1", "x2"]) def test_concat_str_dtype(self, dtype, dim) -> None: data = np.arange(4).reshape([2, 2]) da1 = Dataset( { "data": (["x1", "x2"], data), "x1": [0, 1], "x2": np.array(["a", "b"], dtype=dtype), } ) da2 = Dataset( { "data": (["x1", "x2"], data), "x1": np.array([1, 2]), "x2": np.array(["c", "d"], dtype=dtype), } ) actual = concat([da1, da2], dim=dim) assert np.issubdtype(actual.x2.dtype, dtype) class TestConcatDataArray: def test_concat(self) -> None: ds = Dataset( { "foo": (["x", "y"], np.random.random((2, 3))), "bar": (["x", "y"], np.random.random((2, 3))), }, {"x": [0, 1]}, ) foo = ds["foo"] bar = ds["bar"] # from dataset array: expected = DataArray( np.array([foo.values, bar.values]), dims=["w", "x", "y"], coords={"x": [0, 1]}, ) actual = concat([foo, bar], "w") assert_equal(expected, actual) # from iteration: grouped = [g for _, g in foo.groupby("x")] stacked = concat(grouped, ds["x"]) assert_identical(foo, stacked) # with an index as the 'dim' argument stacked = concat(grouped, pd.Index(ds["x"], name="x")) assert_identical(foo, stacked) actual2 = concat([foo[0], foo[1]], pd.Index([0, 1])).reset_coords(drop=True) expected = foo[:2].rename({"x": "concat_dim"}) assert_identical(expected, actual2) actual3 = concat([foo[0], foo[1]], [0, 1]).reset_coords(drop=True) expected = foo[:2].rename({"x": "concat_dim"}) assert_identical(expected, actual3) with pytest.raises(ValueError, match=r"not identical"): concat([foo, bar], dim="w", compat="identical") with pytest.raises(ValueError, match=r"not a valid argument"): concat([foo, bar], dim="w", data_vars="minimal") def test_concat_encoding(self) -> None: # Regression test for GH1297 ds = Dataset( { "foo": (["x", "y"], np.random.random((2, 3))), "bar": (["x", "y"], np.random.random((2, 3))), }, {"x": [0, 1]}, ) foo = ds["foo"] foo.encoding = {"complevel": 5} ds.encoding = {"unlimited_dims": "x"} assert concat([foo, foo], dim="x").encoding == foo.encoding assert concat([ds, ds], dim="x").encoding == ds.encoding @requires_dask def test_concat_lazy(self) -> None: import dask.array as da arrays = [ DataArray( da.from_array(InaccessibleArray(np.zeros((3, 3))), 3), dims=["x", "y"] ) for _ in range(2) ] # should not raise combined = concat(arrays, dim="z") assert combined.shape == (2, 3, 3) assert combined.dims == ("z", "x", "y") @pytest.mark.parametrize("fill_value", [dtypes.NA, 2, 2.0]) def test_concat_fill_value(self, fill_value) -> None: foo = DataArray([1, 2], coords=[("x", [1, 2])]) bar = DataArray([1, 2], coords=[("x", [1, 3])]) if fill_value == dtypes.NA: # if we supply the default, we expect the missing value for a # float array fill_value = np.nan expected = DataArray( [[1, 2, fill_value], [1, fill_value, 2]], dims=["y", "x"], coords={"x": [1, 2, 3]}, ) actual = concat((foo, bar), dim="y", fill_value=fill_value) assert_identical(actual, expected) def test_concat_join_kwarg(self) -> None: ds1 = Dataset( {"a": (("x", "y"), [[0]])}, coords={"x": [0], "y": [0]} ).to_array() ds2 = Dataset( {"a": (("x", "y"), [[0]])}, coords={"x": [1], "y": [0.0001]} ).to_array() expected = {} expected["outer"] = Dataset( {"a": (("x", "y"), [[0, np.nan], [np.nan, 0]])}, {"x": [0, 1], "y": [0, 0.0001]}, ) expected["inner"] = Dataset( {"a": (("x", "y"), [[], []])}, {"x": [0, 1], "y": []} ) expected["left"] = Dataset( {"a": (("x", "y"), np.array([0, np.nan], ndmin=2).T)}, coords={"x": [0, 1], "y": [0]}, ) expected["right"] = Dataset( {"a": (("x", "y"), np.array([np.nan, 0], ndmin=2).T)}, coords={"x": [0, 1], "y": [0.0001]}, ) expected["override"] = Dataset( {"a": (("x", "y"), np.array([0, 0], ndmin=2).T)}, coords={"x": [0, 1], "y": [0]}, ) with pytest.raises(ValueError, match=r"indexes along dimension 'y'"): actual = concat([ds1, ds2], join="exact", dim="x") for join in expected: actual = concat([ds1, ds2], join=join, dim="x") assert_equal(actual, expected[join].to_array()) def test_concat_combine_attrs_kwarg(self) -> None: da1 = DataArray([0], coords=[("x", [0])], attrs={"b": 42}) da2 = DataArray([0], coords=[("x", [1])], attrs={"b": 42, "c": 43}) expected = {} expected["drop"] = DataArray([0, 0], coords=[("x", [0, 1])]) expected["no_conflicts"] = DataArray( [0, 0], coords=[("x", [0, 1])], attrs={"b": 42, "c": 43} ) expected["override"] = DataArray( [0, 0], coords=[("x", [0, 1])], attrs={"b": 42} ) with pytest.raises(ValueError, match=r"combine_attrs='identical'"): actual = concat([da1, da2], dim="x", combine_attrs="identical") with pytest.raises(ValueError, match=r"combine_attrs='no_conflicts'"): da3 = da2.copy(deep=True) da3.attrs["b"] = 44 actual = concat([da1, da3], dim="x", combine_attrs="no_conflicts") for combine_attrs in expected: actual = concat([da1, da2], dim="x", combine_attrs=combine_attrs) assert_identical(actual, expected[combine_attrs]) @pytest.mark.parametrize("dtype", [str, bytes]) @pytest.mark.parametrize("dim", ["x1", "x2"]) def test_concat_str_dtype(self, dtype, dim) -> None: data = np.arange(4).reshape([2, 2]) da1 = DataArray( data=data, dims=["x1", "x2"], coords={"x1": [0, 1], "x2": np.array(["a", "b"], dtype=dtype)}, ) da2 = DataArray( data=data, dims=["x1", "x2"], coords={"x1": np.array([1, 2]), "x2": np.array(["c", "d"], dtype=dtype)}, ) actual = concat([da1, da2], dim=dim) assert np.issubdtype(actual.x2.dtype, dtype) def test_concat_coord_name(self) -> None: da = DataArray([0], dims="a") da_concat = concat([da, da], dim=DataArray([0, 1], dims="b")) assert list(da_concat.coords) == ["b"] da_concat_std = concat([da, da], dim=DataArray([0, 1])) assert list(da_concat_std.coords) == ["dim_0"] @pytest.mark.parametrize("attr1", ({"a": {"meta": [10, 20, 30]}}, {"a": [1, 2, 3]}, {})) @pytest.mark.parametrize("attr2", ({"a": [1, 2, 3]}, {})) def test_concat_attrs_first_variable(attr1, attr2) -> None: arrs = [ DataArray([[1], [2]], dims=["x", "y"], attrs=attr1), DataArray([[3], [4]], dims=["x", "y"], attrs=attr2), ] concat_attrs = concat(arrs, "y").attrs assert concat_attrs == attr1 def test_concat_merge_single_non_dim_coord(): # TODO: annotating this func fails da1 = DataArray([1, 2, 3], dims="x", coords={"x": [1, 2, 3], "y": 1}) da2 = DataArray([4, 5, 6], dims="x", coords={"x": [4, 5, 6]}) expected = DataArray(range(1, 7), dims="x", coords={"x": range(1, 7), "y": 1}) for coords in ["different", "minimal"]: actual = concat([da1, da2], "x", coords=coords) assert_identical(actual, expected) with pytest.raises(ValueError, match=r"'y' is not present in all datasets."): concat([da1, da2], dim="x", coords="all") da1 = DataArray([1, 2, 3], dims="x", coords={"x": [1, 2, 3], "y": 1}) da2 = DataArray([4, 5, 6], dims="x", coords={"x": [4, 5, 6]}) da3 = DataArray([7, 8, 9], dims="x", coords={"x": [7, 8, 9], "y": 1}) for coords in ["different", "all"]: with pytest.raises(ValueError, match=r"'y' not present in all datasets"): concat([da1, da2, da3], dim="x") def test_concat_preserve_coordinate_order() -> None: x = np.arange(0, 5) y = np.arange(0, 10) time = np.arange(0, 4) data = np.zeros((4, 10, 5), dtype=bool) ds1 = Dataset( {"data": (["time", "y", "x"], data[0:2])}, coords={"time": time[0:2], "y": y, "x": x}, ) ds2 = Dataset( {"data": (["time", "y", "x"], data[2:4])}, coords={"time": time[2:4], "y": y, "x": x}, ) expected = Dataset( {"data": (["time", "y", "x"], data)}, coords={"time": time, "y": y, "x": x}, ) actual = concat([ds1, ds2], dim="time") # check dimension order for act, exp in zip(actual.dims, expected.dims): assert act == exp assert actual.dims[act] == expected.dims[exp] # check coordinate order for act, exp in zip(actual.coords, expected.coords): assert act == exp assert_identical(actual.coords[act], expected.coords[exp]) def test_concat_typing_check() -> None: ds = Dataset({"foo": 1}, {"bar": 2}) da = Dataset({"foo": 3}, {"bar": 4}).to_array(dim="foo") # concatenate a list of non-homogeneous types must raise TypeError with pytest.raises( TypeError, match="The elements in the input list need to be either all 'Dataset's or all 'DataArray's", ): concat([ds, da], dim="foo") with pytest.raises( TypeError, match="The elements in the input list need to be either all 'Dataset's or all 'DataArray's", ): concat([da, ds], dim="foo") ```
{ "source": "jhamman/xmap", "score": 3 }
#### File: xmap/xmap/utils.py ```python import numpy as np import xarray.ufuncs as xu def lon_lat_to_cartesian(lon, lat, radius=1): """ calculates lon, lat coordinates of a point on a sphere with radius radius """ # Unpack xarray object into plane arrays if hasattr(lon, 'data'): lon = lon.data if hasattr(lat, 'data'): lat = lat.data if lon.ndim != lat.ndim: raise ValueError('coordinate must share the same number of dimensions') if lon.ndim == 1: lon, lat = np.meshgrid(lon, lat) lon_r = xu.radians(lon) lat_r = xu.radians(lat) x = radius * xu.cos(lat_r) * xu.cos(lon_r) y = radius * xu.cos(lat_r) * xu.sin(lon_r) z = radius * xu.sin(lat_r) return x.flatten(), y.flatten(), z.flatten() ```
{ "source": "jhamman/xpublish", "score": 2 }
#### File: xpublish/tests/test_rest_api.py ```python import pytest import xarray as xr from starlette.testclient import TestClient import xpublish # noqa: F401 @pytest.fixture(scope='function') def airtemp_ds(): ds = xr.tutorial.open_dataset('air_temperature') return ds @pytest.fixture(scope='function') def airtemp_app(airtemp_ds): app_kws = dict( title='My Dataset', description='Dataset Description', version='1.0.0', openapi_url='/dataset.json', docs_url='/data-docs', ) client = TestClient(airtemp_ds.rest(app_kws=app_kws).app) yield client def test_rest_config(airtemp_ds): airtemp_ds.rest(cache_kws={'available_bytes': 999}) assert airtemp_ds.rest.cache.available_bytes == 999 def test_init_app(airtemp_ds): airtemp_ds.rest( app_kws=dict( title='My Dataset', description='Dataset Description', version='1.0.0', openapi_url='/dataset.json', docs_url='/data-docs', ) ) client = TestClient(airtemp_ds.rest.app) assert airtemp_ds.rest.app.title == 'My Dataset' assert airtemp_ds.rest.app.description == 'Dataset Description' assert airtemp_ds.rest.app.version == '1.0.0' response = client.get('/dataset.json') assert response.status_code == 200 response = client.get('/data-docs') assert response.status_code == 200 def test_keys(airtemp_ds, airtemp_app): response = airtemp_app.get('/keys') assert response.status_code == 200 assert response.json() == list(airtemp_ds.variables) def test_info(airtemp_ds, airtemp_app): response = airtemp_app.get('/info') assert response.status_code == 200 json_response = response.json() assert json_response['dimensions'] == airtemp_ds.dims assert list(json_response['variables'].keys()) == list(airtemp_ds.variables.keys()) def test_versions(airtemp_app): response = airtemp_app.get('/versions') assert response.status_code == 200 assert response.json()['xarray'] == xr.__version__ def test_repr(airtemp_ds, airtemp_app): response = airtemp_app.get('/') assert response.status_code == 200 def test_zmetadata(airtemp_ds, airtemp_app): response = airtemp_app.get('/.zmetadata') assert response.status_code == 200 assert response.json() == airtemp_ds.rest.zmetadata_json() def test_bad_key(airtemp_app): response = airtemp_app.get('/notakey') assert response.status_code == 404 def test_zarray(airtemp_app): response = airtemp_app.get('/air/.zarray') assert response.status_code == 200 def test_zattrs(airtemp_app): response = airtemp_app.get('/air/.zattrs') assert response.status_code == 200 response = airtemp_app.get('/.zattrs') assert response.status_code == 200 def test_get_chunk(airtemp_app): response = airtemp_app.get('/air/0.0.0') assert response.status_code == 200 def test_array_group_raises_404(airtemp_app): response = airtemp_app.get('/air/.zgroup') assert response.status_code == 404 def test_cache(airtemp_ds): rest = airtemp_ds.rest(cache_kws={'available_bytes': 1e9}) assert rest.cache.available_bytes == 1e9 client = TestClient(rest.app) response1 = client.get('/air/0.0.0') assert response1.status_code == 200 assert 'air/0.0.0' in airtemp_ds.rest.cache # test that we can retrieve response2 = client.get('/air/0.0.0') assert response2.status_code == 200 assert response1.content == response2.content ```
{ "source": "jhamman/zarr", "score": 2 }
#### File: zarr/tests/test_util.py ```python from __future__ import absolute_import, print_function, division import numpy as np import pytest from zarr.util import (normalize_shape, normalize_chunks, is_total_slice, normalize_resize_args, human_readable_size, normalize_order, guess_chunks, info_html_report, info_text_report, normalize_fill_value) def test_normalize_shape(): assert (100,) == normalize_shape((100,)) assert (100,) == normalize_shape([100]) assert (100,) == normalize_shape(100) with pytest.raises(TypeError): normalize_shape(None) with pytest.raises(ValueError): normalize_shape('foo') def test_normalize_chunks(): assert (10,) == normalize_chunks((10,), (100,), 1) assert (10,) == normalize_chunks([10], (100,), 1) assert (10,) == normalize_chunks(10, (100,), 1) assert (10, 10) == normalize_chunks((10, 10), (100, 10), 1) assert (10, 10) == normalize_chunks(10, (100, 10), 1) assert (10, 10) == normalize_chunks((10, None), (100, 10), 1) assert (30, 20, 10) == normalize_chunks(30, (100, 20, 10), 1) assert (30, 20, 10) == normalize_chunks((30,), (100, 20, 10), 1) assert (30, 20, 10) == normalize_chunks((30, None), (100, 20, 10), 1) assert (30, 20, 10) == normalize_chunks((30, None, None), (100, 20, 10), 1) assert (30, 20, 10) == normalize_chunks((30, 20, None), (100, 20, 10), 1) assert (30, 20, 10) == normalize_chunks((30, 20, 10), (100, 20, 10), 1) with pytest.raises(ValueError): normalize_chunks('foo', (100,), 1) with pytest.raises(ValueError): normalize_chunks((100, 10), (100,), 1) # test auto-chunking chunks = normalize_chunks(None, (100,), 1) assert (100,) == chunks def test_is_total_slice(): # 1D assert is_total_slice(Ellipsis, (100,)) assert is_total_slice(slice(None), (100,)) assert is_total_slice(slice(0, 100), (100,)) assert not is_total_slice(slice(0, 50), (100,)) assert not is_total_slice(slice(0, 100, 2), (100,)) # 2D assert is_total_slice(Ellipsis, (100, 100)) assert is_total_slice(slice(None), (100, 100)) assert is_total_slice((slice(None), slice(None)), (100, 100)) assert is_total_slice((slice(0, 100), slice(0, 100)), (100, 100)) assert not is_total_slice((slice(0, 100), slice(0, 50)), (100, 100)) assert not is_total_slice((slice(0, 50), slice(0, 100)), (100, 100)) assert not is_total_slice((slice(0, 50), slice(0, 50)), (100, 100)) assert not is_total_slice((slice(0, 100, 2), slice(0, 100)), (100, 100)) with pytest.raises(TypeError): is_total_slice('foo', (100,)) def test_normalize_resize_args(): # 1D assert (200,) == normalize_resize_args((100,), 200) assert (200,) == normalize_resize_args((100,), (200,)) # 2D assert (200, 100) == normalize_resize_args((100, 100), (200, 100)) assert (200, 100) == normalize_resize_args((100, 100), (200, None)) assert (200, 100) == normalize_resize_args((100, 100), 200, 100) assert (200, 100) == normalize_resize_args((100, 100), 200, None) with pytest.raises(ValueError): normalize_resize_args((100,), (200, 100)) def test_human_readable_size(): assert '100' == human_readable_size(100) assert '1.0K' == human_readable_size(2**10) assert '1.0M' == human_readable_size(2**20) assert '1.0G' == human_readable_size(2**30) assert '1.0T' == human_readable_size(2**40) assert '1.0P' == human_readable_size(2**50) def test_normalize_order(): assert 'F' == normalize_order('F') assert 'C' == normalize_order('C') assert 'F' == normalize_order('f') assert 'C' == normalize_order('c') with pytest.raises(ValueError): normalize_order('foo') def test_normalize_fill_value(): assert b'' == normalize_fill_value(0, dtype=np.dtype('S1')) assert b'' == normalize_fill_value(0, dtype=np.dtype([('foo', 'i4'), ('bar', 'f8')])) assert '' == normalize_fill_value(0, dtype=np.dtype('U1')) def test_guess_chunks(): shapes = ( (100,), (100, 100), (1000000,), (1000000000,), (10000000000000000000000,), (10000, 10000), (10000000, 1000), (1000, 10000000), (10000000, 1000, 2), (1000, 10000000, 2), (10000, 10000, 10000), (100000, 100000, 100000), (1000000000, 1000000000, 1000000000), (0,), (0, 0), (10, 0), (0, 10), (1, 2, 0, 4, 5), ) for shape in shapes: chunks = guess_chunks(shape, 1) assert isinstance(chunks, tuple) assert len(chunks) == len(shape) # doesn't make any sense to allow chunks to have zero length dimension assert all([0 < c <= max(s, 1) for c, s in zip(chunks, shape)]) # ludicrous itemsize chunks = guess_chunks((1000000,), 40000000000) assert isinstance(chunks, tuple) assert (1,) == chunks def test_info_text_report(): items = [('foo', 'bar'), ('baz', 'qux')] expect = "foo : bar\nbaz : qux\n" assert expect == info_text_report(items) def test_info_html_report(): items = [('foo', 'bar'), ('baz', 'qux')] actual = info_html_report(items) assert '<table' == actual[:6] assert '</table>' == actual[-8:] ```
{ "source": "jhammelman/visual-attribution", "score": 2 }
#### File: jhammelman/visual-attribution/create_explainer.py ```python from explainer import backprop as bp from explainer import deeplift as df from explainer import gradcam as gc from explainer import patterns as pt from explainer import ebp from explainer import real_time as rt def get_explainer(model, name): methods = { 'vanilla_grad': bp.VanillaGradExplainer, 'grad_x_input': bp.GradxInputExplainer, 'saliency': bp.SaliencyExplainer, 'integrate_grad': bp.IntegrateGradExplainer, 'deconv': bp.DeconvExplainer, 'guided_backprop': bp.GuidedBackpropExplainer, 'deeplift_rescale': df.DeepLIFTRescaleExplainer, 'gradcam': gc.GradCAMExplainer, 'pattern_net': pt.PatternNetExplainer, 'pattern_lrp': pt.PatternLRPExplainer, 'excitation_backprop': ebp.ExcitationBackpropExplainer, 'contrastive_excitation_backprop': ebp.ContrastiveExcitationBackpropExplainer, 'real_time_saliency': rt.RealTimeSaliencyExplainer } if name == 'smooth_grad': base_explainer = methods['vanilla_grad'](model) explainer = bp.SmoothGradExplainer(base_explainer) elif name.find('pattern') != -1: explainer = methods[name]( model, params_file='./weights/imagenet_224_vgg_16.npz', pattern_file='./weights/imagenet_224_vgg_16.patterns.A_only.npz' ) elif name == 'gradcam': if model.__class__.__name__ == 'VGG': explainer = methods[name]( model, target_layer_name_keys=['features', '30'] # pool5 ) elif model.__class__.__name__ == 'GoogleNet': explainer = methods[name]( model, target_layer_name_keys=['pool5'], use_inp=True, ) elif model.__class__.__name__ == 'ResNet': explainer = methods[name]( model, target_layer_name_keys=['avgpool'], use_inp=True, ) elif name == 'excitation_backprop': if model.__class__.__name__ == 'VGG': # vgg16 explainer = methods[name]( model, output_layer_keys=['features', '23'] # pool4 ) elif model.__class__.__name__ == 'ResNet': # resnet50 explainer = methods[name]( model, output_layer_keys=['layer4', '1', 'conv1'] # res4a ) elif model.__class__.__name__ == 'GoogleNet': # googlent explainer = methods[name]( model, output_layer_keys=['pool2'] ) elif name == 'contrastive_excitation_backprop': if model.__class__.__name__ == 'VGG': # vgg16 explainer = methods[name]( model, intermediate_layer_keys=['features', '30'], # pool5 output_layer_keys=['features', '23'], # pool4 final_linear_keys=['classifier', '6'] # fc8 ) elif model.__class__.__name__ == 'ResNet': # resnet50 explainer = methods[name]( model, intermediate_layer_keys=['avgpool'], output_layer_keys=['layer4', '1', 'conv1'], # res4a final_linear_keys=['fc'] ) elif model.__class__.__name__ == 'GoogleNet': explainer = methods[name]( model, intermediate_layer_keys=['pool5'], output_layer_keys=['pool2'], final_linear_keys=['loss3.classifier'] ) elif name == 'real_time_saliency': explainer = methods[name]('./weights/model-1.ckpt') else: explainer = methods[name](model) return explainer def get_heatmap(saliency): saliency = saliency.squeeze() if len(saliency.size()) == 2: return saliency.abs().cpu().numpy() else: return saliency.abs().max(0)[0].cpu().numpy() ``` #### File: visual-attribution/explainer/backprop.py ```python import numpy as np from torch.autograd import Variable, Function import torch import types class VanillaGradExplainer(object): def __init__(self, model): self.model = model def _backprop(self, inp, ind): output = self.model(inp) if ind is None: ind = output.data.max(1)[1] grad_out = output.data.clone() grad_out.fill_(0.0) grad_out.scatter_(1, ind.unsqueeze(0).t(), 1.0) output.backward(grad_out) return inp.grad.data def explain(self, inp, ind=None): return self._backprop(inp, ind) class GradxInputExplainer(VanillaGradExplainer): def __init__(self, model): super(GradxInputExplainer, self).__init__(model) def explain(self, inp, ind=None): grad = self._backprop(inp, ind) return inp.data * grad class SaliencyExplainer(VanillaGradExplainer): def __init__(self, model): super(SaliencyExplainer, self).__init__(model) def explain(self, inp, ind=None): grad = self._backprop(inp, ind) return grad.abs() class IntegrateGradExplainer(VanillaGradExplainer): def __init__(self, model, steps=100): super(IntegrateGradExplainer, self).__init__(model) self.steps = steps def explain(self, inp, ind=None): grad = 0 inp_data = inp.data.clone() for alpha in np.arange(1 / self.steps, 1.0, 1 / self.steps): new_inp = Variable(inp_data * alpha, requires_grad=True) g = self._backprop(new_inp, ind) grad += g return grad * inp_data / self.steps class DeconvExplainer(VanillaGradExplainer): def __init__(self, model): super(DeconvExplainer, self).__init__(model) self._override_backward() def _override_backward(self): class _ReLU(Function): @staticmethod def forward(ctx, input): output = torch.clamp(input, min=0) return output @staticmethod def backward(ctx, grad_output): grad_inp = torch.clamp(grad_output, min=0) return grad_inp def new_forward(self, x): return _ReLU.apply(x) def replace(m): if m.__class__.__name__ == 'ReLU': m.forward = types.MethodType(new_forward, m) self.model.apply(replace) class GuidedBackpropExplainer(VanillaGradExplainer): def __init__(self, model): super(GuidedBackpropExplainer, self).__init__(model) self._override_backward() def _override_backward(self): class _ReLU(Function): @staticmethod def forward(ctx, input): output = torch.clamp(input, min=0) ctx.save_for_backward(output) return output @staticmethod def backward(ctx, grad_output): output, = ctx.saved_tensors mask1 = (output > 0).float() mask2 = (grad_output.data > 0).float() grad_inp = mask1 * mask2 * grad_output.data grad_output.data.copy_(grad_inp) return grad_output def new_forward(self, x): return _ReLU.apply(x) def replace(m): if m.__class__.__name__ == 'ReLU': m.forward = types.MethodType(new_forward, m) self.model.apply(replace) # modified from https://github.com/PAIR-code/saliency/blob/master/saliency/base.py#L80 class SmoothGradExplainer(object): def __init__(self, base_explainer, stdev_spread=0.15, nsamples=25, magnitude=True): self.base_explainer = base_explainer self.stdev_spread = stdev_spread self.nsamples = nsamples self.magnitude = magnitude def explain(self, inp, ind=None): stdev = self.stdev_spread * (inp.data.max() - inp.data.min()) total_gradients = 0 origin_inp_data = inp.data.clone() for i in range(self.nsamples): noise = torch.randn(inp.size()).cuda() * stdev inp.data.copy_(noise + origin_inp_data) grad = self.base_explainer.explain(inp, ind) if self.magnitude: total_gradients += grad ** 2 else: total_gradients += grad return total_gradients / self.nsamples ``` #### File: visual-attribution/explainer/patterns.py ```python import numpy as np import torch import torch.nn.functional as F def load_patterns(filename): f = np.load(filename) ret = {} for prefix in ["A", "r", "mu"]: l = sum([x.startswith(prefix) for x in f.keys()]) ret.update({prefix: [f["%s_%i" % (prefix, i)] for i in range(l)]}) return ret def load_params(filename): f = np.load(filename) weights = [] for i in range(32): if i in [26, 28, 30]: weights.append(f['arr_%d' % i].T) else: weights.append(f['arr_%d' % i]) return weights class PatternNetExplainer(object): def __init__(self, model, params_file=None, pattern_file=None): self.model = model self.weights = list(self.model.parameters()) self.np_weights = load_params(params_file) self.np_patterns = load_patterns(pattern_file)['A'] self._to_cuda() def _to_cuda(self): for i in range(len(self.np_weights)): self.np_weights[i] = torch.from_numpy(self.np_weights[i]).float().cuda() for i in range(len(self.np_patterns)): self.np_patterns[i] = torch.from_numpy(self.np_patterns[i]).float().cuda() def _fill_in_params(self): for i in range(32): self.weights[i].data.copy_(self.np_weights[i]) def _fill_in_patterns(self): for i in range(0, 26, 2): self.weights[i].data.copy_(self.np_patterns[int(i / 2)]) for i in range(26, 32, 2): self.weights[i].data.copy_(self.np_patterns[int(i / 2)].t()) def explain(self, inp, ind=None): self._fill_in_params() output = self.model(inp) prob = F.softmax(output) if ind is None: ind = output.data.max(1)[1] probvalue = prob.data.gather(1, ind.unsqueeze(0).t()) grad_out = output.data.clone() grad_out.fill_(0.0) grad_out.scatter_(1, ind.unsqueeze(0).t(), probvalue) self._fill_in_patterns() output.backward(grad_out) return inp.grad.data class PatternLRPExplainer(PatternNetExplainer): def __init__(self, model, params_file=None, pattern_file=None): super(PatternLRPExplainer, self).__init__(model, params_file, pattern_file) def _fill_in_patterns(self): for i in range(0, 26, 2): self.weights[i].data.copy_( self.weights[i].data * self.np_patterns[int(i / 2)] ) for i in range(26, 32, 2): self.weights[i].data.copy_( self.weights[i].data * self.np_patterns[int(i / 2)].t() ) ``` #### File: jhammelman/visual-attribution/utils.py ```python from torchvision import models from torch.autograd import Variable from torch._thnn import type2backend def load_model(arch): ''' Args: arch: (string) valid torchvision model name, recommendations 'vgg16' | 'googlenet' | 'resnet50' ''' if arch == 'googlenet': from googlenet import get_googlenet model = get_googlenet(pretrain=True) else: model = models.__dict__[arch](pretrained=True) model.eval() return model def cuda_var(tensor, requires_grad=False): return Variable(tensor.cuda(), requires_grad=requires_grad) def upsample(inp, size): ''' Args: inp: (Tensor) input size: (Tuple [int, int]) height x width ''' backend = type2backend[inp.type()] f = getattr(backend, 'SpatialUpSamplingBilinear_updateOutput') upsample_inp = inp.new() f(backend.library_state, inp, upsample_inp, size[0], size[1]) return upsample_inp ```
{ "source": "jhampson-dbre/addon_host_a_blog", "score": 2 }
#### File: host_a_blog/blog/models.py ```python import datetime from django.db import models from django.core.paginator import EmptyPage, PageNotAnInteger, Paginator from django.utils.functional import cached_property from django.http import Http404 from modelcluster.fields import ParentalKey from modelcluster.tags import ClusterTaggableManager from taggit.models import Tag as TaggitTag from taggit.models import TaggedItemBase from wagtail.admin.edit_handlers import ( FieldPanel, FieldRowPanel, InlinePanel, MultiFieldPanel, PageChooserPanel, StreamFieldPanel, ) from wagtail.core.models import Page from wagtail.images.edit_handlers import ImageChooserPanel from wagtail.snippets.edit_handlers import SnippetChooserPanel from wagtail.snippets.models import register_snippet from wagtail.core.fields import StreamField from wagtail.contrib.routable_page.models import RoutablePageMixin, route from .blocks import BodyBlock class BlogPage(RoutablePageMixin, Page): description = models.CharField(max_length=255, blank=True,) content_panels = Page.content_panels + \ [FieldPanel("description", classname="full")] def get_context(self, request, *args, **kwargs): context = super().get_context(request, *args, **kwargs) context['blog_page'] = self paginator = Paginator(self.posts, 2) page = request.GET.get("page") try: posts = paginator.page(page) except PageNotAnInteger: posts = paginator.page(1) except EmptyPage: posts = paginator.object_list.none() context['posts'] = posts return context def get_posts(self): return PostPage.objects.descendant_of(self).live().order_by("-post_date") @route(r"^(\d{4})/$") @route(r"^(\d{4})/(\d{2})/$") @route(r"^(\d{4})/(\d{2})/(\d{2})/$") def post_by_date(self, request, year, month=None, day=None, *args, **kwargs): self.posts = self.get_posts().filter(post_date__year=year) if month: self.posts = self.posts.filter(post_date__month=month) if day: self.posts = self.posts.filter(post_date__day=day) return self.render(request) @route(r"^(\d{4})/(\d{2})/(\d{2})/(.+)/$") def post_by_date_slug(self, request, year, month, day, slug, *args, **kwargs): post_page = self.get_posts().filter(slug=slug).first() if not post_page: raise Http404 # here we render another page, so we call the serve method of the page instance return post_page.serve(request) @route(r'^tag/(?P<tag>[-\w]+)/$') def post_by_tag(self, request, tag, *args, **kwargs): self.posts = self.get_posts().filter(tags__slug=tag) return self.render(request) @route(r'^category/(?P<category>[-\w]+)/$') def post_by_category(self, request, category, *args, **kwargs): self.posts = self.get_posts().filter(categories__blog_category__slug=category) return self.render(request) @route(r'^$') def post_list(self, request, *args, **kwargs): self.posts = self.get_posts() return self.render(request) class PostPage(Page): header_image = models.ForeignKey( "wagtailimages.Image", null=True, blank=True, on_delete=models.SET_NULL, related_name="+", ) body = StreamField(BodyBlock(), blank=True) tags = ClusterTaggableManager(through="blog.PostPageTag", blank=True) content_panels = Page.content_panels + [ ImageChooserPanel("header_image"), InlinePanel("categories", label="category"), FieldPanel("tags"), StreamFieldPanel("body"), ] post_date = models.DateTimeField( verbose_name="Post date", default=datetime.datetime.today ) settings_panels = Page.settings_panels + [ FieldPanel("post_date"), ] def get_context(self, request, *args, **kwargs): context = super().get_context(request, *args, **kwargs) context['blog_page'] = self.blog_page return context @cached_property def blog_page(self): return self.get_parent().specific @cached_property def canonical_url(self): # we import here to avoid circular import from blog.templatetags.blogapp_tags import post_page_date_slug_url blog_page = self.get_parent().specific return post_page_date_slug_url(self, blog_page) class PostPageBlogCategory(models.Model): page = ParentalKey( "blog.PostPage", on_delete=models.CASCADE, related_name="categories" ) blog_category = models.ForeignKey( "blog.BlogCategory", on_delete=models.CASCADE, related_name="post_pages" ) panels = [ SnippetChooserPanel("blog_category"), ] class Meta: unique_together = ("page", "blog_category") @register_snippet class BlogCategory(models.Model): name = models.CharField(max_length=255) slug = models.SlugField(unique=True, max_length=80) panels = [ FieldPanel("name"), FieldPanel("slug"), ] def __str__(self): return self.name class Meta: verbose_name = "Category" verbose_name_plural = "Categories" class PostPageTag(TaggedItemBase): content_object = ParentalKey("PostPage", related_name="post_tags") @register_snippet class Tag(TaggitTag): class Meta: proxy = True ```
{ "source": "jhamrak/instrument-recognition-lab", "score": 3 }
#### File: jhamrak/instrument-recognition-lab/model_builder.py ```python import keras from keras.utils import np_utils from keras import layers from keras import models import os from keras.models import Sequential from keras.layers import Dense, Conv2D, MaxPool2D , Flatten, Dropout from keras.preprocessing.image import ImageDataGenerator import numpy as np def build_vgg(in_shape): model = models.Sequential() model.name = 'VGG' model.add(Conv2D(filters=64 , kernel_size=(3,3), padding="same", activation="relu", data_format="channels_first", input_shape=in_shape)) model.add(Conv2D(filters=64 , kernel_size=(3,3), padding="same", activation="relu", data_format="channels_first")) model.add(MaxPool2D(pool_size=(3,3), strides=(2,2), data_format="channels_first")) model.add(Dropout(0.2)) model.add(Conv2D(filters=128, kernel_size=(3,3), padding="same", activation="relu", data_format="channels_first")) model.add(Conv2D(filters=128, kernel_size=(3,3), padding="same", activation="relu", data_format="channels_first")) model.add(MaxPool2D(pool_size=(3,3), strides=(2,2), data_format="channels_first")) model.add(Dropout(0.2)) model.add(Conv2D(filters=256, kernel_size=(3,3), padding="same", activation="relu", data_format="channels_first")) model.add(Conv2D(filters=256, kernel_size=(3,3), padding="same", activation="relu", data_format="channels_first")) model.add(MaxPool2D(pool_size=(3,3), strides=(2,2), data_format="channels_first")) model.add(Dropout(0.2)) model.add(Conv2D(filters=512, kernel_size=(3,3), padding="same", activation="relu", data_format="channels_first")) model.add(Conv2D(filters=512, kernel_size=(3,3), padding="same", activation="relu", data_format="channels_first")) model.add(MaxPool2D(pool_size=(3,3), strides=(2,2), data_format="channels_first")) model.add(layers.Flatten()) model.add(layers.Dense(units=4096, activation='relu')) model.add(layers.Dense(units=2048, activation='relu')) model.add(layers.Dense(units=1, activation='sigmoid')) return model def build_small(in_shape): model = models.Sequential() model.name = 'VGG small' model.add(Conv2D(filters=64 , kernel_size=(3,3), padding="same", activation="relu", data_format="channels_first", input_shape=in_shape)) model.add(Conv2D(filters=64 , kernel_size=(3,3), padding="same", activation="relu", data_format="channels_first")) model.add(MaxPool2D(pool_size=(3,3), strides=(2,2), data_format="channels_first")) model.add(Dropout(0.2)) model.add(Conv2D(filters=128, kernel_size=(3,3), padding="same", activation="relu", data_format="channels_first")) model.add(MaxPool2D(pool_size=(3,3), strides=(2,2), data_format="channels_first")) model.add(Dropout(0.2)) model.add(layers.Flatten()) model.add(layers.Dense(units=128, activation='relu')) model.add(layers.Dense(units=1, activation='sigmoid')) return model ``` #### File: jhamrak/instrument-recognition-lab/utils.py ```python import os import librosa as lb from datetime import datetime import numpy as np def undersample(X,Y,inst_coords,inst_num, threshold): X_inst = X[inst_coords] Y_inst = Y[inst_coords, inst_num] >= threshold count = np.sum(Y_inst) i = 0 undersampled_coords = [] for i in range(len(Y_inst)) : if Y_inst[i] == 1: undersampled_coords.append(i) elif count > 0: undersampled_coords.append(i) count -= 1 return X_inst[undersampled_coords], Y_inst[undersampled_coords] def get_instrument_arrays(X, Y, mask, inst_num, threshold): inst_coords = mask[:, inst_num] X, Y = undersample(X,Y,inst_coords,inst_num, threshold) X = get_transformed_array(X) return X, Y def get_instrument_arrays_ml(X, Y, mask, inst_num, threshold): inst_coords = mask[:, inst_num] X, Y = undersample(X,Y,inst_coords,inst_num, threshold) X = get_normalized_array(X) return X, Y def get_transformed_array(X_old): X = X_old shape = X.shape X = X.astype('float16') X = X.reshape(shape[0],1, shape[1], shape[2]) X = lb.util.normalize(X) return X def get_normalized_array(X_old): X = X_old X = X.astype('float16') X = lb.util.normalize(X) return X def create_dir(mode, data): dir_name = "logs/" + datetime.now().strftime("%m%d%H%M%S") + "-" + mode + "-" + data os.mkdir(dir_name) return dir_name ```
{ "source": "jhamrick/bayesian-quadrature", "score": 2 }
#### File: bayesian_quadrature/tests/test_bq_object.py ```python import numpy as np import pytest import matplotlib.pyplot as plt from gp import GP from .. import BQ from . import util import logging logger = logging.getLogger("bayesian_quadrature") logger.setLevel("DEBUG") DTYPE = util.DTYPE options = util.options def test_init(): util.npseed() x, y = util.make_xy() bq = BQ(x, y, **options) assert (x == bq.x_s).all() assert (y == bq.l_s).all() assert (np.log(y) == bq.tl_s).all() assert (x.shape[0] == bq.ns) assert not bq.initialized assert bq.gp_log_l is None assert bq.gp_l is None assert bq.x_c is None assert bq.l_c is None assert bq.nc is None assert bq.x_sc is None assert bq.l_sc is None assert bq.nsc is None assert bq._approx_x is None assert bq._approx_px is None util.init_bq(bq) assert (x == bq.x_s).all() assert (y == bq.l_s).all() assert (np.log(y) == bq.tl_s).all() assert (x.shape[0] == bq.ns) assert bq.initialized assert bq.gp_log_l is not None assert hasattr(bq.gp_log_l, 'jitter') assert bq.gp_l is not None assert hasattr(bq.gp_l, 'jitter') assert bq.x_c is not None assert bq.l_c is not None assert bq.nc is not None assert bq.x_sc is not None assert bq.l_sc is not None assert bq.nsc is not None assert bq._approx_x is not None assert bq._approx_px is not None def test_bad_init(): util.npseed() x, y = util.make_xy() with pytest.raises(ValueError): BQ(x[:, None], y, **options) with pytest.raises(ValueError): BQ(x, y[:, None], **options) with pytest.raises(ValueError): BQ(x[:-1], y, **options) with pytest.raises(ValueError): BQ(x, y[:-1], **options) with pytest.raises(ValueError): BQ(x, -y, **options) def test_choose_candidates(): util.npseed() bq = util.make_bq(nc=1000) assert bq.x_c.ndim == 1 assert bq.x_sc.size >= bq.x_s.size diff = np.abs(bq.x_sc[:, None] - bq.x_c[None]) thresh = bq.options['candidate_thresh'] assert ((diff > thresh) | (diff == 0)).all() def test_l_mean(): util.npseed() bq = util.make_bq() l = bq.l_mean(bq.x_s) assert np.allclose(l, bq.l_s, atol=1e-4) def test_Z_mean(): util.npseed() bq = util.make_bq() xo = util.make_xo() approx_Z = bq._approx_Z_mean(xo) calc_Z = bq._exact_Z_mean() assert np.allclose(approx_Z, calc_Z, atol=1e-5) def test_Z_mean_same(): util.npseed() bq = util.make_bq() means = np.empty(100) for i in xrange(100): means[i] = bq.Z_mean() assert (means[0] == means).all() @pytest.mark.xfail(reason="https://github.com/numpy/numpy/issues/661") def test_Z_var_same(): util.npseed() bq = util.make_bq() vars = np.empty(100) for i in xrange(100): vars[i] = bq.Z_var() assert (vars[0] == vars).all() def test_Z_var_close(): util.npseed() bq = util.make_bq() vars = np.empty(100) for i in xrange(100): vars[i] = bq.Z_var() assert np.allclose(vars[0], vars) def test_Z_var(): # int int m_l(x) m_l(x') C_tl(x, x') dx dx' util.npseed() bq = util.make_bq() xo = util.make_xo() approx_var = bq._approx_Z_var(xo) calc_var = bq._exact_Z_var() assert np.allclose(approx_var, calc_var, atol=1e-4) def test_expected_Z_var_close(): util.npseed() bq = util.make_bq() Z_var = bq.Z_var() E_Z_var = bq.expected_Z_var(bq.x_s) assert np.allclose(E_Z_var, Z_var, atol=1e-4) def test_expected_squared_mean_valid(): util.npseed() bq = util.make_bq() x_a = np.random.uniform(-10, 10, 10) esm = bq.expected_squared_mean(x_a) assert (esm >= 0).all() def test_expected_squared_mean_params(): util.npseed() bq = util.make_bq() with pytest.raises(ValueError): bq.expected_squared_mean(np.array([np.nan])) with pytest.raises(ValueError): bq.expected_squared_mean(np.array([np.inf])) with pytest.raises(ValueError): bq.expected_squared_mean(np.array([-np.inf])) def test_expected_squared_mean(): util.npseed() bq = util.make_bq() x_a = np.random.uniform(-10, 10, 20) esm = bq.expected_squared_mean(x_a) bq.options['use_approx'] = True approx = bq.expected_squared_mean(x_a) assert np.allclose(approx, esm, rtol=1) def test_plot_gp_log_l(): util.npseed() bq = util.make_bq() fig, ax = plt.subplots() bq.plot_gp_log_l(ax) ax.cla() bq.plot_gp_log_l(ax, f_l=lambda x: np.log(util.f_x(x))) ax.cla() bq.plot_gp_log_l(ax, xmin=-10, xmax=10) ax.cla() plt.close('all') def test_plot_gp_l(): util.npseed() bq = util.make_bq() fig, ax = plt.subplots() bq.plot_gp_l(ax) ax.cla() bq.plot_gp_l(ax, f_l=util.f_x) ax.cla() bq.plot_gp_l(ax, xmin=-10, xmax=10) ax.cla() plt.close('all') def test_plot_l(): util.npseed() bq = util.make_bq() fig, ax = plt.subplots() bq.plot_l(ax) ax.cla() bq.plot_l(ax, f_l=util.f_x) ax.cla() bq.plot_l(ax, xmin=-10, xmax=10) ax.cla() plt.close('all') def test_plot(): util.npseed() bq = util.make_bq() bq.plot() plt.close('all') bq.plot(f_l=util.f_x) plt.close('all') bq.plot(xmin=-10, xmax=10) plt.close('all') def test_plot_expected_variance(): util.npseed() bq = util.make_bq() fig, ax = plt.subplots() bq.plot_expected_variance(ax) ax.cla() bq.plot_expected_variance(ax, xmin=-10, xmax=10) ax.cla() plt.close('all') def test_plot_expected_squared_mean(): util.npseed() bq = util.make_bq() fig, ax = plt.subplots() bq.plot_expected_squared_mean(ax) ax.cla() bq.plot_expected_squared_mean(ax, xmin=-10, xmax=10) ax.cla() plt.close('all') def test_l(): util.npseed() bq = util.make_bq() assert (np.log(bq.l_s) == bq.tl_s).all() assert (bq.l_s == bq.l_sc[:bq.ns]).all() assert (bq.l_sc[bq.ns:] == np.exp(bq.gp_log_l.mean(bq.x_c))).all() def test_expected_squared_mean_1(): util.npseed() X = np.linspace(-5, 5, 20)[:, None] for x in X: bq = util.make_bq(x=x, nc=0) m2 = bq.Z_mean() ** 2 E_m2 = bq.expected_squared_mean(x) assert np.allclose(m2, E_m2, atol=1e-4) E_m2_close = bq.expected_squared_mean(x - 1e-10) assert np.allclose(m2, E_m2_close, atol=1e-4) E_m2_close = bq.expected_squared_mean(x - 1e-8) assert np.allclose(m2, E_m2_close, atol=1e-4) def test_periodic(): util.npseed() bq = util.make_periodic_bq() x = np.linspace(-np.pi, np.pi, 1000) y = util.f_xp(x) assert np.allclose(bq.l_mean(x), y, atol=1e-3) def test_periodic_z_mean(): util.npseed() bq = util.make_periodic_bq() x = np.linspace(-np.pi, np.pi, 1000) l = bq.l_mean(x) p_x = bq._make_approx_px(x) approx_z = np.trapz(l * p_x, x) assert np.allclose(bq.Z_mean(), approx_z) def test_periodic_z_var(): util.npseed() bq = util.make_periodic_bq() x = np.linspace(-np.pi, np.pi, 1000) l = bq.l_mean(x) C = bq.gp_log_l.cov(x) p_x = bq._make_approx_px(x) approx_z = np.trapz(np.trapz(C * l * p_x, x) * l * p_x, x) assert np.allclose(bq.Z_var(), approx_z) @pytest.mark.xfail(reason="poorly conditioned matrix") def test_periodic_expected_squared_mean(): util.npseed() bq = util.make_periodic_bq(nc=0) x_a = np.random.uniform(-np.pi, np.pi, 20)[:, None] x = np.linspace(-np.pi, np.pi, 1000) for xa in x_a: esm = bq.expected_squared_mean(xa) approx = bq._approx_expected_squared_mean(xa, x) assert np.allclose(esm, approx) def test_periodic_expected_squared_mean_1(): util.npseed() X = np.linspace(-np.pi, np.pi, 20)[:, None] for x in X: bq = util.make_periodic_bq(x=x, nc=0) m2 = bq.Z_mean() ** 2 E_m2 = bq.expected_squared_mean(x) assert np.allclose(m2, E_m2, atol=1e-4) E_m2_close = bq.expected_squared_mean(x - 1e-10) assert np.allclose(m2, E_m2_close, atol=1e-4) E_m2_close = bq.expected_squared_mean(x - 1e-8) assert np.allclose(m2, E_m2_close, atol=1e-4) def test_add_observation(): util.npseed() bq = util.make_bq() x = bq.x_s.copy() l = bq.l_s.copy() tl = bq.tl_s.copy() x_a = 20 l_a = util.f_x(x_a) tl_a = np.log(l_a) bq.add_observation(x_a, l_a) assert (bq.x_s == np.append(x, x_a)).all() assert (bq.l_s == np.append(l, l_a)).all() assert (bq.tl_s == np.append(tl, tl_a)).all() assert (bq.x_s == bq.x_sc[:bq.ns]).all() assert (bq.l_s == bq.l_sc[:bq.ns]).all() old_x_s = bq.x_s.copy() old_l_s = bq.l_s.copy() bq.add_observation(x[0], l[0]) assert (old_x_s == bq.x_s).all() assert (old_l_s == bq.l_s).all() def test_approx_add_observation(): util.npseed() bq = util.make_periodic_bq(np.linspace(-np.pi, 0, 4)) x = bq.x_s.copy() l = bq.l_s.copy() tl = bq.tl_s.copy() x_a = np.pi / 2. l_a = util.f_x(x_a) tl_a = np.log(l_a) bq.add_observation(x_a, l_a) assert (bq.x_s == np.append(x, x_a)).all() assert (bq.l_s == np.append(l, l_a)).all() assert (bq.tl_s == np.append(tl, tl_a)).all() assert (bq.x_s == bq.x_sc[:bq.ns]).all() assert (bq.l_s == bq.l_sc[:bq.ns]).all() old_x_s = bq.x_s.copy() old_l_s = bq.l_s.copy() bq.add_observation(x[0], l[0]) assert (old_x_s == bq.x_s).all() assert (old_l_s == bq.l_s).all() def test_getstate(): util.npseed() bq = util.make_bq(init=False) # uninitialized state = bq.__getstate__() assert (state['x_s'] == bq.x_s).all() assert (state['l_s'] == bq.l_s).all() assert (state['tl_s'] == bq.tl_s).all() assert state['options'] == bq.options assert state['initialized'] == bq.initialized assert sorted(state.keys()) == sorted( ['x_s', 'l_s', 'tl_s', 'options', 'initialized']) util.init_bq(bq) state = bq.__getstate__() assert (state['x_s'] == bq.x_s).all() assert (state['l_s'] == bq.l_s).all() assert (state['tl_s'] == bq.tl_s).all() assert state['options'] == bq.options assert state['initialized'] == bq.initialized assert state['gp_log_l'] == bq.gp_log_l assert (state['gp_log_l_jitter'] == bq.gp_log_l.jitter).all() assert state['gp_l'] == bq.gp_l assert (state['gp_l_jitter'] == bq.gp_l.jitter).all() assert sorted(state.keys()) == sorted( ['x_s', 'l_s', 'tl_s', 'options', 'initialized', 'gp_log_l', 'gp_log_l_jitter', 'gp_l', 'gp_l_jitter', '_approx_x', '_approx_px']) def test_copy(): util.npseed() bq1 = util.make_bq(init=False) bq2 = bq1.copy(deep=False) assert bq1 is not bq2 state1 = bq1.__getstate__() state2 = bq2.__getstate__() assert sorted(state1.keys()) == sorted(state2.keys()) for key in state1.keys(): if isinstance(state1[key], np.ndarray): assert (state1[key] == state2[key]).all() elif not isinstance(state1[key], GP): assert state1[key] == state2[key] if not isinstance(state1[key], bool): assert state1[key] is state2[key] util.init_bq(bq1) assert bq1.initialized assert not bq2.initialized state1 = bq1.__getstate__() state2 = bq2.__getstate__() assert sorted(state1.keys()) != sorted(state2.keys()) for key in state1.keys(): if key == 'initialized': continue if key not in state2: continue if isinstance(state1[key], np.ndarray): assert (state1[key] == state2[key]).all() elif not isinstance(state1[key], GP): assert state1[key] == state2[key] if not isinstance(state1[key], bool): assert state1[key] is state2[key] bq1 = util.make_bq() bq2 = bq1.copy(deep=False) state1 = bq1.__getstate__() state2 = bq2.__getstate__() assert sorted(state1.keys()) == sorted(state2.keys()) for key in state1.keys(): if isinstance(state1[key], np.ndarray): assert (state1[key] == state2[key]).all() elif not isinstance(state1[key], GP): assert state1[key] == state2[key] if not isinstance(state1[key], bool): assert state1[key] is state2[key] def test_deepcopy(): util.npseed() bq1 = util.make_bq(init=False) bq2 = bq1.copy(deep=True) assert bq1 is not bq2 state1 = bq1.__getstate__() state2 = bq2.__getstate__() assert sorted(state1.keys()) == sorted(state2.keys()) for key in state1.keys(): if isinstance(state1[key], np.ndarray): assert (state1[key] == state2[key]).all() elif not isinstance(state1[key], GP): assert state1[key] == state2[key] if not isinstance(state1[key], bool): assert state1[key] is not state2[key] util.init_bq(bq1) assert bq1.initialized assert not bq2.initialized state1 = bq1.__getstate__() state2 = bq2.__getstate__() assert sorted(state1.keys()) != sorted(state2.keys()) for key in state1.keys(): if key == 'initialized': continue if key not in state2: continue if isinstance(state1[key], np.ndarray): assert (state1[key] == state2[key]).all() elif not isinstance(state1[key], GP): assert state1[key] == state2[key] if not isinstance(state1[key], bool): assert state1[key] is not state2[key] bq1 = util.make_bq() bq2 = bq1.copy(deep=True) state1 = bq1.__getstate__() state2 = bq2.__getstate__() assert sorted(state1.keys()) == sorted(state2.keys()) for key in state1.keys(): if isinstance(state1[key], np.ndarray): assert (state1[key] == state2[key]).all() elif not isinstance(state1[key], GP): assert state1[key] == state2[key] if not isinstance(state1[key], bool): assert state1[key] is not state2[key] def test_set_params(): util.npseed() bq = util.make_bq() params_tl = bq.gp_log_l.params params_l = bq.gp_l.params x_sc = bq.x_sc.copy() l_sc = bq.l_sc.copy() bq._set_gp_log_l_params(dict(h=10, w=3.0, s=0.01)) assert (bq.gp_log_l.params != params_tl).all() assert (bq.gp_l.params == params_l).all() assert (bq.gp_log_l.jitter == 0).all() assert (bq.gp_l.jitter == 0).all() assert (bq.x_sc == x_sc).all() assert not (bq.l_sc == l_sc).all() params_tl = bq.gp_log_l.params bq._set_gp_l_params(dict(h=0.3, w=1.4, s=0.01)) assert (bq.gp_log_l.params == params_tl).all() assert (bq.gp_l.params != params_l).all() assert (bq.gp_log_l.jitter == 0).all() assert (bq.gp_l.jitter == 0).all() def test_fit_hypers(): util.npseed() bq = util.make_bq() llh = bq.gp_log_l.log_lh + bq.gp_l.log_lh bq.fit_hypers(['h', 'w']) new_llh = bq.gp_log_l.log_lh + bq.gp_l.log_lh assert new_llh >= llh def test_sample_hypers(): util.npseed() bq = util.make_bq() params = ['h', 'w'] params_tl = {p: bq.gp_log_l.get_param(p) for p in params} params_l = {p: bq.gp_l.get_param(p) for p in params} bq.sample_hypers(params) assert not np.isinf(bq.gp_log_l.log_lh) assert not np.isinf(bq.gp_l.log_lh) for p in params: assert bq.gp_log_l.get_param(p) != params_tl[p] assert bq.gp_l.get_param(p) != params_l[p] bq = util.make_bq(init=False) bq.init(params_tl=(15, 2, 0), params_l=(0.2, 1.3, 0)) bq.sample_hypers(['h', 'w']) assert not np.isinf(bq.gp_log_l.log_lh) assert not np.isinf(bq.gp_l.log_lh) bq = util.make_bq(init=False) bq.init(params_tl=(15, 2, 0), params_l=(0.00002, 1.3, 0)) bq.sample_hypers(['h']) assert not np.isinf(bq.gp_log_l.log_lh) assert not np.isinf(bq.gp_l.log_lh) bq = util.make_bq(init=False) bq.init(params_tl=(15, 2, 0), params_l=(0.2, 5, 0)) bq.sample_hypers(['w']) assert not np.isinf(bq.gp_log_l.log_lh) assert not np.isinf(bq.gp_l.log_lh) bq = util.make_bq(init=False) bq.init(params_tl=(15, 2, 0), params_l=(0.00000002, 1.3, 0)) with pytest.raises(RuntimeError): bq.sample_hypers(['w']) def test_marginal_mean(): util.npseed() bq = util.make_bq() # marginal mean values = bq.marginalize( [bq.Z_mean], 20, ['h', 'w']) assert len(values) == 1 assert values[0].shape == (20,) def test_marginal_mean_and_variance(): util.npseed() bq = util.make_bq() # marginal mean and variance values = bq.marginalize( [bq.Z_mean, bq.Z_var], 20, ['h', 'w']) assert len(values) == 2 assert values[0].shape == (20,) assert values[1].shape == (20,) def test_marginal_loss(): util.npseed() bq = util.make_bq() x_a = np.random.uniform(-10, 10, 5) # setting params llh = bq.gp_log_l.log_lh + bq.gp_l.log_lh f = lambda: bq.expected_squared_mean(x_a) values = bq.marginalize([f], 20, ['h', 'w']) assert len(values) == 1 assert values[0].shape == (20, 5) def test_choose_next(): util.npseed() bq = util.make_bq() x_a = np.random.uniform(-10, 10, 5) bq.choose_next(x_a, 20, ['h', 'w']) bq.choose_next(x_a, 20, ['h', 'w'], plot=True) ``` #### File: bayesian_quadrature/tests/test_gauss_c.py ```python import numpy as np import scipy.stats import pytest from .. import gauss_c from .. import linalg_c as la from . import util import logging logger = logging.getLogger("bayesian_quadrature") logger.setLevel("DEBUG") DTYPE = util.DTYPE options = util.options def test_mvn_logpdf(): util.npseed() x_mean = options['x_mean'] x_var = options['x_var'] mu = np.array([x_mean], order='F') cov = np.array([[x_var]], order='F') la.cho_factor(cov, cov) logdet = la.logdet(cov) n = 20 x = np.array(np.random.uniform(-10, 10, n)[None], order='F') y = np.log(np.array( scipy.stats.norm.pdf(x, x_mean, np.sqrt(x_var)), order='F')) pdf = np.empty(n, order='F') for i in xrange(n): pdf[i] = gauss_c.mvn_logpdf(x[:, i], mu, cov, logdet) assert np.allclose(y, pdf) def test_mvn_logpdf_same(): util.npseed() mu = np.array([options['x_mean']], order='F') cov = np.array([[options['x_var']]], order='F') la.cho_factor(cov, cov) logdet = la.logdet(cov) n = 20 m = 20 x = np.array(np.random.uniform(-10, 10, n)[None], order='F') pdf = np.empty((m, n), order='F') for i in xrange(m): for j in xrange(n): pdf[i, j] = gauss_c.mvn_logpdf(x[:, j], mu, cov, logdet) assert (pdf[0] == pdf).all() def test_int_exp_norm(): def approx_int_exp_norm(xo, c, m, S): e = np.exp(xo * c) p = scipy.stats.norm.pdf(xo, m, np.sqrt(S)) return np.trapz(e * p, xo) xo = np.linspace(-20, 20, 1000) approx = approx_int_exp_norm(xo, 2, 0, 1) calc = gauss_c.int_exp_norm(2, 0, 1) assert np.allclose(approx, calc) approx = approx_int_exp_norm(xo, 1, 0, 1) calc = gauss_c.int_exp_norm(1, 0, 1) assert np.allclose(approx, calc) approx = approx_int_exp_norm(xo, 2, 1, 1) calc = gauss_c.int_exp_norm(2, 1, 1) assert np.allclose(approx, calc) approx = approx_int_exp_norm(xo, 2, 1, 2) calc = gauss_c.int_exp_norm(2, 1, 2) assert np.allclose(approx, calc) def test_int_K(): util.npseed() bq = util.make_bq() xo = util.make_xo() x_mean = bq.options['x_mean'] x_cov = bq.options['x_cov'] Kxxo = np.array(bq.gp_l.Kxxo(xo), order='F') approx_int = np.empty(bq.gp_l.x.shape[0], order='F') gauss_c.approx_int_K( approx_int, np.array(xo[None], order='F'), Kxxo, x_mean, x_cov) calc_int = np.empty(bq.gp_l.x.shape[0], order='F') gauss_c.int_K( calc_int, np.array(bq.gp_l.x[None], order='F'), bq.gp_l.K.h, np.array([bq.gp_l.K.w]), x_mean, x_cov) assert np.allclose(calc_int, approx_int, atol=1e-5) def test_int_K_same(): util.npseed() bq = util.make_bq() xo = util.make_xo() x_mean = bq.options['x_mean'] x_cov = bq.options['x_cov'] vals = np.empty((bq.gp_l.x.shape[0], 20), order='F') for i in xrange(20): gauss_c.int_K( vals[:, i], np.array(bq.gp_l.x[None], order='F'), bq.gp_l.K.h, np.array([bq.gp_l.K.w]), x_mean, x_cov) assert (vals[:, [0]] == vals).all() def test_approx_int_K_same(): util.npseed() bq = util.make_bq() xo = util.make_xo() x_mean = bq.options['x_mean'] x_cov = bq.options['x_cov'] Kxxo = np.array(bq.gp_l.Kxxo(xo), order='F') vals = np.empty((bq.gp_l.x.shape[0], 20), order='F') xo = np.array(xo[None], order='F') for i in xrange(20): gauss_c.approx_int_K( vals[:, i], xo, np.array(Kxxo, order='F'), x_mean, x_cov) assert (vals[:, [0]] == vals).all() def test_int_K1_K2(): util.npseed() bq = util.make_bq() xo = util.make_xo() x_mean = bq.options['x_mean'] x_cov = bq.options['x_cov'] K1xxo = np.array(bq.gp_l.Kxxo(xo), order='F') K2xxo = np.array(bq.gp_log_l.Kxxo(xo), order='F') approx_int = np.empty((bq.gp_l.x.shape[0], bq.gp_log_l.x.shape[0]), order='F') gauss_c.approx_int_K1_K2( approx_int, np.array(xo[None], order='F'), K1xxo, K2xxo, x_mean, x_cov) calc_int = np.empty((bq.gp_l.x.shape[0], bq.gp_log_l.x.shape[0]), order='F') gauss_c.int_K1_K2( calc_int, np.array(bq.gp_l.x[None], order='F'), np.array(bq.gp_log_l.x[None], order='F'), bq.gp_l.K.h, np.array([bq.gp_l.K.w], order='F'), bq.gp_log_l.K.h, np.array([bq.gp_log_l.K.w], order='F'), x_mean, x_cov) assert np.allclose(calc_int, approx_int, atol=1e-3) def test_int_K1_K2_same(): util.npseed() bq = util.make_bq() x_mean = bq.options['x_mean'] x_cov = bq.options['x_cov'] vals = np.empty((bq.gp_l.x.shape[0], bq.gp_log_l.x.shape[0], 20), order='F') for i in xrange(vals.shape[-1]): gauss_c.int_K1_K2( vals[:, :, i], np.array(bq.gp_l.x[None], order='F'), np.array(bq.gp_log_l.x[None], order='F'), bq.gp_l.K.h, np.array([bq.gp_l.K.w], order='F'), bq.gp_log_l.K.h, np.array([bq.gp_log_l.K.w], order='F'), x_mean, x_cov) assert (vals[:, :, [0]] == vals).all() def test_approx_int_K1_K2_same(): util.npseed() bq = util.make_bq() xo = util.make_xo() x_mean = bq.options['x_mean'] x_cov = bq.options['x_cov'] K1xxo = np.array(bq.gp_l.Kxxo(xo), order='F') K2xxo = np.array(bq.gp_log_l.Kxxo(xo), order='F') vals = np.empty((bq.gp_l.x.shape[0], bq.gp_log_l.x.shape[0], 20), order='F') for i in xrange(vals.shape[-1]): gauss_c.approx_int_K1_K2( vals[:, :, i], np.array(xo[None], order='F'), K1xxo, K2xxo, x_mean, x_cov) assert (vals[:, :, [0]] == vals).all() def test_int_int_K1_K2_K1(): util.npseed() bq = util.make_bq() xo = util.make_xo() x_mean = bq.options['x_mean'] x_cov = bq.options['x_cov'] K1xxo = np.array(bq.gp_l.Kxxo(xo), order='F') K2xoxo = np.array(bq.gp_log_l.Kxoxo(xo), order='F') approx_int = np.empty((bq.gp_l.x.shape[0], bq.gp_l.x.shape[0]), order='F') gauss_c.approx_int_int_K1_K2_K1( approx_int, np.array(xo[None], order='F'), K1xxo, K2xoxo, x_mean, x_cov) calc_int = np.empty((bq.gp_l.x.shape[0], bq.gp_l.x.shape[0]), order='F') gauss_c.int_int_K1_K2_K1( calc_int, np.array(bq.gp_l.x[None], order='F'), bq.gp_l.K.h, np.array([bq.gp_l.K.w]), bq.gp_log_l.K.h, np.array([bq.gp_log_l.K.w]), x_mean, x_cov) assert np.allclose(calc_int, approx_int, atol=1e-5) def test_int_int_K1_K2_K1_same(): util.npseed() bq = util.make_bq() x_mean = bq.options['x_mean'] x_cov = bq.options['x_cov'] vals = np.empty((bq.gp_l.x.shape[0], bq.gp_l.x.shape[0], 20), order='F') for i in xrange(vals.shape[-1]): gauss_c.int_int_K1_K2_K1( vals[:, :, i], np.array(bq.gp_l.x[None], order='F'), bq.gp_l.K.h, np.array([bq.gp_l.K.w]), bq.gp_log_l.K.h, np.array([bq.gp_log_l.K.w]), x_mean, x_cov) assert (vals[:, :, [0]] == vals).all() def test_approx_int_int_K1_K2_K1_same(): util.npseed() bq = util.make_bq() xo = util.make_xo() x_mean = bq.options['x_mean'] x_cov = bq.options['x_cov'] K1xxo = np.array(bq.gp_l.Kxxo(xo), order='F') K2xoxo = np.array(bq.gp_log_l.Kxoxo(xo), order='F') vals = np.empty((bq.gp_l.x.shape[0], bq.gp_l.x.shape[0], 20), order='F') for i in xrange(vals.shape[-1]): gauss_c.approx_int_int_K1_K2_K1( vals[:, :, i], np.array(xo[None], order='F'), K1xxo, K2xoxo, x_mean, x_cov) assert (vals[:, :, [0]] == vals).all() def test_int_int_K1_K2(): util.npseed() bq = util.make_bq() xo = util.make_xo() x_mean = bq.options['x_mean'] x_cov = bq.options['x_cov'] K1xoxo = np.array(bq.gp_l.Kxoxo(xo), order='F') K2xxo = np.array(bq.gp_log_l.Kxxo(xo), order='F') approx_int = np.empty(bq.gp_log_l.x.shape[0], order='F') gauss_c.approx_int_int_K1_K2( approx_int, np.array(xo[None], order='F'), K1xoxo, K2xxo, x_mean, x_cov) calc_int = np.empty(bq.gp_log_l.x.shape[0], order='F') gauss_c.int_int_K1_K2( calc_int, np.array(bq.gp_log_l.x[None], order='F'), bq.gp_l.K.h, np.array([bq.gp_l.K.w]), bq.gp_log_l.K.h, np.array([bq.gp_log_l.K.w]), x_mean, x_cov) assert np.allclose(calc_int, approx_int, atol=1e-5) def test_int_int_K1_K2_same(): util.npseed() bq = util.make_bq() x_mean = bq.options['x_mean'] x_cov = bq.options['x_cov'] vals = np.empty((bq.gp_log_l.x.shape[0], 20), order='F') for i in xrange(vals.shape[-1]): gauss_c.int_int_K1_K2( vals[:, i], np.array(bq.gp_log_l.x[None], order='F'), bq.gp_l.K.h, np.array([bq.gp_l.K.w]), bq.gp_log_l.K.h, np.array([bq.gp_log_l.K.w]), x_mean, x_cov) assert (vals[:, [0]] == vals).all() def test_approx_int_int_K1_K2_same(): util.npseed() bq = util.make_bq() xo = util.make_xo() x_mean = bq.options['x_mean'] x_cov = bq.options['x_cov'] K1xoxo = np.array(bq.gp_l.Kxoxo(xo), order='F') K2xxo = np.array(bq.gp_log_l.Kxxo(xo), order='F') vals = np.empty((bq.gp_log_l.x.shape[0], 20), order='F') for i in xrange(vals.shape[-1]): gauss_c.approx_int_int_K1_K2( vals[:, i], np.array(xo[None], order='F'), K1xoxo, K2xxo, x_mean, x_cov) assert (vals[:, [0]] == vals).all() def test_int_int_K(): util.npseed() bq = util.make_bq() xo = util.make_xo() x_mean = bq.options['x_mean'] x_cov = bq.options['x_cov'] Kxoxo = np.array(bq.gp_l.Kxoxo(xo), order='F') approx_int = gauss_c.approx_int_int_K( np.array(xo[None], order='F'), Kxoxo, x_mean, x_cov) calc_int = gauss_c.int_int_K( 1, bq.gp_l.K.h, np.array([bq.gp_l.K.w]), x_mean, x_cov) assert np.allclose(calc_int, approx_int, atol=1e-6) def test_int_int_K_same(): util.npseed() bq = util.make_bq() x_mean = bq.options['x_mean'] x_cov = bq.options['x_cov'] vals = np.empty(20) for i in xrange(vals.shape[-1]): vals[i] = gauss_c.int_int_K( 1, bq.gp_l.K.h, np.array([bq.gp_l.K.w]), x_mean, x_cov) assert (vals[0] == vals).all() def test_approx_int_int_K_same(): util.npseed() bq = util.make_bq() xo = util.make_xo() x_mean = bq.options['x_mean'] x_cov = bq.options['x_cov'] Kxoxo = np.array(bq.gp_l.Kxoxo(xo), order='F') vals = np.empty(20) for i in xrange(vals.shape[-1]): vals[i] = gauss_c.approx_int_int_K( np.array(xo[None], order='F'), Kxoxo, x_mean, x_cov) assert (vals[0] == vals).all() ``` #### File: bayesian_quadrature/tests/test_util.py ```python import matplotlib.pyplot as plt import numpy as np import scipy.stats import logging from bayesian_quadrature import util from . import util as tutil logger = logging.getLogger("bayesian_quadrature.util") logger.setLevel("INFO") def test_set_scientific(): fig, ax = plt.subplots() util.set_scientific(ax, -5, 4, axis=None) util.set_scientific(ax, -5, 4, axis='x') util.set_scientific(ax, -5, 4, axis='y') plt.close('all') def test_slice_sample_normal(): tutil.npseed() def logpdf(x): return (-(x ** 2) / 2.) - 0.5 * np.log(2 * np.pi) w = np.array([1.0]) x0 = np.array([0.0]) samples = util.slice_sample(logpdf, 10000, w, xval=x0, nburn=10, freq=1) hist, bins = np.histogram(samples, bins=10, normed=True) centers = (bins[:-1] + bins[1:]) / 2. bin_pdf = np.exp(logpdf(centers)) assert (np.abs(bin_pdf - hist) < 0.02).all() def test_slice_sample_uniform(): tutil.npseed() def logpdf(x): if x > 1 or x < 0: return -np.inf return 0 w = np.array([0.5]) x0 = np.array([0.0]) samples = util.slice_sample(logpdf, 10000, w, xval=x0, nburn=10, freq=1) hist, bins = np.histogram(samples, bins=5, normed=True, range=[0, 1]) assert (np.abs(hist - 1) < 0.05).all() ``` #### File: bayesian-quadrature/bayesian_quadrature/util.py ```python import matplotlib.pyplot as plt import numpy as np import logging import scipy.optimize as optim from . import bq_c from .util_c import slice_sample as _slice_sample logger = logging.getLogger("bayesian_quadrature.util") DTYPE = np.dtype('float64') PREC = np.finfo(DTYPE).precision MIN = np.log(np.exp2(np.float64(np.finfo(np.float64).minexp + 4))) def set_scientific(ax, low, high, axis=None): """Set the axes or axis specified by `axis` to use scientific notation for ticklabels, if the value is <10**low or >10**high. Parameters ---------- ax : axis object The matplotlib axis object to use low : int Lower exponent bound for non-scientific notation high : int Upper exponent bound for non-scientific notation axis : str (default=None) Which axis to format ('x', 'y', or None for both) """ # create the tick label formatter fmt = plt.ScalarFormatter() fmt.set_scientific(True) fmt.set_powerlimits((low, high)) # format the x axis if axis is None or axis == 'x': ax.get_yaxis().set_major_formatter(fmt) # format the y axis if axis is None or axis == 'y': ax.get_yaxis().set_major_formatter(fmt) def slice_sample(logpdf, niter, w, xval, nburn=1, freq=1): """Draws samples from 'logpdf', optionally starting from 'xval'. The pdf should return log values. Parameters ---------- logpdf : function Target distribution. logpdf(xval) should return ln(Pr(xval)) niter : int Number of iterations to run w : np.ndarray The step by which to adjust the window size. xval : numpy.ndarray The initial starting value. nburn : int (default 1) Number of samples to skip at the beginning freq : int (default 1) How often to record samples """ samples = np.empty((niter, xval.size)) samples[0] = xval # zero means unset, so we don't want to log in that case verbose = (logger.level != 0) and (logger.level < 10) _slice_sample(samples, logpdf, xval, w, verbose) # don't return burnin samples or inbetween samples out = samples[nburn:][::freq] return out def vlines(ax, x, **kwargs): ymin, ymax = ax.get_ylim() ax.vlines(x, ymin, ymax, **kwargs) ax.set_ylim(ymin, ymax) def hlines(ax, y, **kwargs): xmin, xmax = ax.get_xlim() ax.hlines(y, xmin, xmax, **kwargs) ax.set_xlim(xmin, xmax) def improve_conditioning(gp): Kxx = gp.Kxx cond = np.linalg.cond(Kxx) logger.debug("Kxx conditioning number is %s", cond) if hasattr(gp, "jitter"): jitter = gp.jitter else: jitter = np.zeros(Kxx.shape[0], dtype=DTYPE) gp.jitter = jitter # the conditioning is really bad -- just increase the variance # a little for all the elements until it's less bad idx = np.arange(Kxx.shape[0]) while np.log10(cond) > (PREC / 2.0): logger.debug("Adding jitter to all elements") bq_c.improve_covariance_conditioning(Kxx, jitter, idx=idx) cond = np.linalg.cond(Kxx) logger.debug("Kxx conditioning number is now %s", cond) # now improve just for those elements which result in a # negative variance, until there are no more negative elements # in the diagonal gp._memoized = {'Kxx': Kxx} var = np.diag(gp.cov(gp._x)) while (var < 0).any(): idx = np.nonzero(var < 0)[0] logger.debug("Adding jitter to indices %s", idx) bq_c.improve_covariance_conditioning(Kxx, jitter, idx=idx) Kxx = gp.Kxx gp._memoized = {'Kxx': Kxx} var = np.diag(gp.cov(gp._x)) cond = np.linalg.cond(Kxx) logger.debug("Kxx conditioning number is now %s", cond) def improve_tail_covariance(gp): Kxx = gp.Kxx gp._memoized = {'Kxx': Kxx} max_jitter = np.diag(Kxx).max() * 1e-2 new_jitter = np.clip(-gp.x * 1e-4, 0, max_jitter) Kxx += np.eye(gp.x.size) * new_jitter gp.jitter += new_jitter def _anneal(*args, **kwargs): """Hack, because sometimes scipy's anneal function throws a TypeError for no particular reason. So just try again until it works. """ while True: try: res = optim.minimize(*args, **kwargs) except TypeError: pass else: break return res def find_good_parameters(logpdf, x0, method, ntry=10): logger.debug("Trying to find good parameters...") for i in xrange(ntry): logger.debug("Attempt #%d with %s", i+1, method) res = optim.minimize( fun=lambda x: -logpdf(x), x0=x0, method=method) logger.debug(res) p = logpdf(res['x']) if p > MIN: return res['x'] if logpdf(x0) < p: x0 = res['x'] return None ```
{ "source": "jhamrick/cogsci-proceedings-analysis", "score": 3 }
#### File: jhamrick/cogsci-proceedings-analysis/cleaning.py ```python import re import difflib import pandas as pd import numpy as np from nameparser import HumanName from nameparser.config import CONSTANTS CONSTANTS.titles.remove("gen") CONSTANTS.titles.remove("prin") def parse_paper_type(section_name): section_name = section_name.strip().lower() if section_name == '': paper_type = None elif re.match('.*workshop.*', section_name): paper_type = 'workshop' elif re.match('.*symposi.*', section_name): paper_type = 'symposium' elif re.match('.*poster.*', section_name): paper_type = 'poster' elif re.match('.*tutorial.*', section_name): paper_type = 'workshop' elif re.match('.*abstract.*', section_name): paper_type = 'poster' elif re.match('.*addenda.*', section_name): paper_type = 'other' else: paper_type = 'talk' return paper_type def clean_authors(authors): cleaned_authors = [] authors = authors.lower() # get rid of commas where there are suffixes, like Jr. or III authors = authors.replace(", jr.", " jr.") authors = authors.replace(", iii", " iii") authors = authors.replace(", ph.d", "") # special cases authors = authors.replace("organizer:", "") authors = authors.replace("roel m,", "roel m.") if authors == '<NAME>, <NAME>, t.': author_list = ['<NAME>', '<NAME>, t.'] else: author_list = authors.split(",") for author in author_list: author = HumanName(author.lower()) if author.first == '' or author.last == '': raise ValueError("invalid author name: {}".format(author)) author.capitalize() author.string_format = u"{last}, {title} {first} {middle}, {suffix}" cleaned_authors.append(unicode(author)) return cleaned_authors def extract_authors(papers): author_papers = [] for i, paper in papers.iterrows(): authors = clean_authors(paper['authors']) for author in authors: entry = paper.copy().drop('authors') entry['author'] = author author_papers.append(entry) author_papers = pd.DataFrame(author_papers) return author_papers def fix_author_misspellings(papers, G): authors = np.sort(papers['author'].unique()) for i in xrange(len(authors)): window = 20 lower = i + 1 upper = min(i + 1 + window, len(authors) - 1) for j in xrange(len(authors[lower:upper])): author1 = authors[i] author2 = authors[lower + j] if author1 == author2: continue author1_hn = HumanName(author1) author2_hn = HumanName(author2) same_first = author1_hn.first == author2_hn.first same_last = author1_hn.last == author2_hn.last if same_first and same_last: replace = True else: ratio = difflib.SequenceMatcher(None, author1, author2).ratio() if ratio > 0.9: coauthors = set(G[author1].keys()) & set(G[author2].keys()) if len(coauthors) > 0: replace = True else: print u"\nPossible match: '{}' vs '{}' (r={})".format( author1, author2, ratio) print sorted(G[author1].keys()) print sorted(G[author2].keys()) accept = "" while accept not in ("y", "n"): accept = raw_input("Accept? (y/n) ") replace = accept == "y" else: replace = False if replace: num1 = len(papers.groupby('author').get_group(author1)) num2 = len(papers.groupby('author').get_group(author2)) if num1 > num2: oldname = author2 newname = author1 else: oldname = author1 newname = author2 print u"Replacing '{}' with '{}'".format(oldname, newname) papers.loc[papers['author'] == oldname, 'author'] = newname authors[authors == oldname] = newname for neighbor in G[oldname]: if neighbor not in G[newname]: G.add_edge(newname, neighbor) G[newname][neighbor]['weight'] = 0 weight = G[oldname][neighbor]['weight'] G[newname][neighbor]['weight'] += weight G.remove_node(oldname) return papers, G if __name__ == "__main__": import graph papers = pd.read_csv("cogsci_proceedings_raw.csv") papers['type'] = papers['section'].apply(parse_paper_type) papers = extract_authors(papers) G = graph.make_author_graph(papers) papers, G = fix_author_misspellings(papers, G) papers.to_csv("cogsci_proceedings.csv", encoding='utf-8') ``` #### File: jhamrick/cogsci-proceedings-analysis/scraper.py ```python import urllib2 import pandas as pd from bs4 import BeautifulSoup, element def load_html(url): response = urllib2.urlopen(url) html = response.read().replace("&nbsp;", "") return html def get_papers_table(year): url = "https://mindmodeling.org/cogsci{}/".format(year) soup = BeautifulSoup(load_html(url)) tables = soup.find_all("table") tds = tables[5].find_all("td") tds = [td for td in tds if len(td.contents) > 0] paper_type = None papers = [] paper = {} for td in tds: elem = td.contents[0] if isinstance(elem, element.NavigableString): paper['authors'] = unicode(elem) paper['year'] = year paper['section'] = paper_type papers.append(paper) paper = {} elif elem.name == 'a': href = url + elem.attrs['href'] title = "".join(elem.contents) paper['url'] = href paper['title'] = title elif elem.name == 'h2': section_name, = elem.contents paper_type = section_name.strip() return pd.DataFrame(papers) def get_papers_list(year): url = "https://mindmodeling.org/cogsci{}/".format(year) html = load_html(url) html = html.replace("<li>", "").replace("<li id=session>", "") soup = BeautifulSoup(html) papers = [] paper = {} paper_type = None for elem in soup.findAll("a"): if not isinstance(elem.contents[0], element.NavigableString): continue sibling = elem.findNextSibling() if not hasattr(sibling, "name"): continue if sibling.name != "ul": continue toplevel = elem.findParent().findParent() break for section in toplevel.contents: if isinstance(section, element.NavigableString): paper_type = section.strip() continue for elem in section.find_all("a"): href = url + elem.attrs['href'] try: title = "".join(elem.contents) except TypeError: continue paper = {} paper['year'] = year paper['url'] = href paper['title'] = title paper['section'] = paper_type sibling = elem.findNextSibling() authors, = sibling.contents paper['authors'] = unicode(authors) papers.append(paper) return pd.DataFrame(papers) def get_papers(): papers = pd.concat([ get_papers_table(2014), get_papers_list(2013), get_papers_list(2012), get_papers_list(2011), get_papers_list(2010) ]) papers = papers\ .set_index('url')\ .sort() if papers.isnull().any().any(): raise RuntimeError("some entries are null") return papers if __name__ == "__main__": pathname = "cogsci_proceedings_raw.csv" papers = get_papers() papers.to_csv(pathname, encoding='utf-8') ```
{ "source": "jhamrick/Dallinger", "score": 3 }
#### File: demos/concentration/experiment.py ```python import ConfigParser import dallinger as dlgr class ConcentrationGame(dlgr.experiments.Experiment): """Define the structure of the experiment.""" def __init__(self, session): """Initialize the experiment.""" config = ConfigParser.ConfigParser() config.read("config.txt") super(ConcentrationGame, self).__init__(session) self.experiment_repeats = 1 N = config.get("Experiment Configuration", "num_participants") self.initial_recruitment_size = N self.setup() ``` #### File: demos/snake/experiment.py ```python import ConfigParser import dallinger class SnakeGame(dallinger.experiments.Experiment): """Define the structure of the experiment.""" def __init__(self, session): """Initialize the experiment.""" config = ConfigParser.ConfigParser() config.read("config.txt") super(SnakeGame, self).__init__(session) self.experiment_repeats = 1 N = config.get("Experiment Configuration", "num_participants") self.initial_recruitment_size = N self.setup() ``` #### File: Dallinger/tests/test_command_line.py ```python import os import subprocess class TestCommandLine(object): def setup(self): """Set up the environment by moving to the demos directory.""" os.chdir("demos") def teardown(self): os.chdir("..") def add(self, *args): self.db.add_all(args) self.db.commit() def test_dallinger_help(self): output = subprocess.check_output("dallinger", shell=True) assert("Usage: dallinger [OPTIONS] COMMAND [ARGS]" in output) ```
{ "source": "jhamrick/dbtools", "score": 3 }
#### File: dbtools/tests/table_base.py ```python import numpy as np import os from nose.tools import raises from sqlite3 import OperationalError from dbtools import Table from . import RewriteDocstringMeta try: xrange except NameError: xrange = range class TestTable(object): __metaclass__ = RewriteDocstringMeta dtypes = ( ('id', int), ('name', str), ('age', int), ('height', float) ) idata = np.array([ [2, '<NAME>', 25, 66.25], [4, '<NAME>', 24, 70.1], [6, '<NAME>', 26, 68.0], [8, '<NAME>', 29, 67.42] ], dtype='object') def setup(self): self.tbl = Table.create( ':memory:', "Foo", self.dtypes, verbose=True) def insert(self): self.tbl.insert(self.idata) def check_data(self, indata, outdata): out = True if not (indata == outdata.as_matrix()).all(): out = False return out def check_index(self, indata, outdata): out = True if not (np.arange(0, indata.shape[0]) == np.array(outdata.index)).all(): out = False return out def check(self, indata, outdata): data = self.check_data(indata, outdata) index = self.check_index(indata, outdata) return data and index def test_create_name(self): """Check for correct table name""" assert self.tbl.name == "Foo" def test_create_columns(self): """Check that the columns are named correctly""" assert self.tbl.columns == list(zip(*self.dtypes))[0] def test_create_primary_key(self): """Check that the primary key is not set""" assert self.tbl.primary_key is None def test_create_autoincrement(self): """Check that autoincrement is not set""" assert not self.tbl.autoincrement def test_create_from_dataframe(self): """Create a table from a dataframe""" self.insert() data = self.tbl.select() tbl = Table.create(':memory:', "Foo_2", data, verbose=True) self.check(self.idata, tbl.select()) def test_create_from_dicts(self): """Create a table from dictionaries""" cols = list(zip(*self.dtypes))[0] dicts = [dict([(cols[i], d[i]) for i in xrange(len(d))]) for d in self.idata] tbl = Table.create(':memory:', "Bar", dicts, verbose=True) self.check_index(self.idata, tbl.select()) for idx, col in enumerate(cols): self.check_data(self.idata[:, [idx]], tbl[col]) @raises(OperationalError) def test_drop(self): """Drop table""" self.tbl.drop() self.tbl.select() def test_insert_null(self): """Insert a null entry""" self.tbl.insert({}) @raises(ValueError) def test_insert_string(self): """Insert just a string""" self.tbl.insert('<NAME>') @raises(ValueError) def test_insert_int(self): """Insert just an integer""" self.tbl.insert(25) @raises(ValueError) def test_insert_float(self): """Insert just a float""" self.tbl.insert(66.25) @raises(ValueError) def test_insert_shortlist(self): """Insert a list that's too short""" self.tbl.insert(['<NAME>', 25]) @raises(ValueError) def test_insert_longlist(self): """Insert a list of lists that are too short""" self.tbl.insert([1, 2, '<NAME>', 25, 66.25]) def test_insert_list(self): """Insert a list""" self.tbl.insert([1, '<NAME>', 25, 66.25]) data = self.tbl.select() idata = np.array([[1, '<NAME>', 25, 66.25]], dtype='object') assert self.check(idata, data) def test_insert_lists(self): """Insert a list of lists""" self.insert() data = self.tbl.select() assert self.check(self.idata, data) def test_select_columns(self): """Make sure columns of selected data are correct""" self.insert() data = self.tbl.select() assert tuple(data.columns) == self.tbl.columns def test_select_where_args(self): """Check where selection with one argument""" self.insert() data = self.tbl.select(where=("age=?", 25)) assert self.check(self.idata[[0]], data) def test_select_where_args2(self): """Check where selection with list of arguments""" self.insert() data = self.tbl.select(where=("age=?", (25,))) assert self.check(self.idata[[0]], data) def test_select_where_no_args(self): """Check where selection with no arguments""" self.insert() data = self.tbl.select(where="age=25") assert self.check(self.idata[[0]], data) def test_insert_dict(self): """Insert a dictionary""" self.tbl.insert({ 'id': 1, 'name': '<NAME>', 'age': 25, 'height': 66.25 }) data = self.tbl.select() idata = np.array([[1, '<NAME>', 25, 66.25]], dtype='object') assert self.check(idata, data) def test_insert_dictlist(self): """Insert a list of dictionaries""" self.tbl.insert([ { 'id': 1, 'name': '<NAME>', 'age': 25, 'height': 66.25 }, { 'id': 2, 'name': '<NAME>', 'age': 24, 'height': 70.1 }]) data = self.tbl.select() idata = np.array([ [1, '<NAME>', 25, 66.25], [2, '<NAME>', 24, 70.1]], dtype='object') assert self.check(idata, data) def test_slice_name(self): """Slice the 'name' column""" self.insert() data = self.tbl['name'] assert self.check(self.idata[:, [1]], data) def test_slice_name_age(self): """Slice the 'name' and 'age' columns""" self.insert() data = self.tbl['name', 'age'] assert self.check(self.idata[:, [1, 2]], data) def test_slice_name_height(self): """Slice the 'name' and 'height' columns""" self.insert() data = self.tbl['name', 'height'] assert self.check(self.idata[:, [1, 3]], data) def test_slice_all(self): """Slice all the data""" self.insert() data = self.tbl[:] assert self.check(self.idata, data) def test_update(self): """Update a single value""" self.insert() self.tbl.update({'name': '<NAME>'}, where="name='<NAME>'") data = np.array(self.tbl.select()['name']) assert data[0] == '<NAME>' def test_update_multiple(self): """Update multiple values""" self.insert() self.tbl.update({'name': '<NAME>', 'age': 26}, where="name='<NAME>'") data = self.tbl.select() assert np.array(data['name'])[0] == '<NAME>' assert np.array(data['age'])[0] == 26 def test_update_arg(self): """Update a value using a WHERE argument""" self.insert() self.tbl.update({'name': '<NAME>'}, where=("name=?", "<NAME>")) data = self.tbl.select() assert np.array(data['name'])[0] == '<NAME>' def test_update_args(self): """Update a value using multiple WHERE arguments""" self.insert() self.tbl.update({'name': '<NAME>'}, where=("name=? AND age=?", ("<NAME>", 25))) data = self.tbl.select() assert np.array(data['name'])[0] == '<NAME>' def test_update_no_filter(self): """Update an entire column""" self.insert() self.tbl.update({'age': 0}) data = self.tbl.select() assert (np.array(data['age']) == 0).all() @raises(ValueError) def test_update_fail(self): """Update with invalid values""" self.insert() self.tbl.update('name') def test_delete_row(self): """Delete a row""" self.insert() self.tbl.delete(where="age=25") data = self.tbl.select() assert self.check_data(self.idata[1:], data) def test_delete_row_arg(self): """Delete a row with an argument""" self.insert() self.tbl.delete(where=("age=?", 25)) data = self.tbl.select() assert self.check_data(self.idata[1:], data) def test_delete_rows(self): """Delete multiple rows""" self.insert() self.tbl.delete(where="age>25") data = self.tbl.select() assert self.check_data(self.idata[:2], data) def test_delete_rows_args(self): """Delete multiple rows with multiple arguments""" self.insert() self.tbl.delete(where=("age=? OR height>?", (25, 70))) data = self.tbl.select() assert self.check_data(self.idata[2:], data) def test_delete_all(self): """Delete all rows""" self.insert() self.tbl.delete() data = self.tbl.select() assert self.check_data(self.idata[:0], data) def test_csv(self): """Write a csv file""" self.insert() self.tbl.save_csv("test.csv") os.remove("test.csv") ```
{ "source": "jhamrick/gaussian_processes", "score": 3 }
#### File: gp/kernels/periodic.py ```python __all__ = ['PeriodicKernel'] import numpy as np import sympy as sym from functools import wraps from gp.ext import periodic_c from . import Kernel DTYPE = np.float64 EPS = np.finfo(DTYPE).eps class PeriodicKernel(Kernel): r""" Periodic kernel function. Parameters ---------- h : float Output scale kernel parameter w : float Input scale kernel parameter p : float Period kernel parameter Notes ----- The periodic kernel is defined by Equation 4.31 of [RW06]_: .. math:: K(x_1, x_2) = h^2\exp\left(\frac{-2\sin^2\left(\frac{x_1-x_2}{2p}\right)}{w^2}\right) where :math:`w` is the input scale parameter (equivalent to the standard deviation of the Gaussian), :math:`h` is the output scale parameter, and :math:`p` is the period kernel parameter. """ def __init__(self, h, w, p): self.h = None #: Output scale kernel parameter self.w = None #: Input scale kernel parameter self.p = None #: Period kernel parameter self.set_param('h', h) self.set_param('w', w) self.set_param('p', p) @property def params(self): r""" Kernel parameters. Returns ------- params : numpy.ndarray ``(h, w, p)`` """ return np.array([self.h, self.w, self.p], dtype=DTYPE) @params.setter def params(self, val): self.set_param('h', val[0]) self.set_param('w', val[1]) self.set_param('p', val[2]) def set_param(self, name, val): if name == 'h': if val < EPS: raise ValueError("invalid value for h: %s" % val) self.h = DTYPE(val) elif name == 'w': if val < EPS: raise ValueError("invalid value for w: %s" % val) self.w = DTYPE(val) elif name == 'p': if val < EPS: raise ValueError("invalid value for p: %s" % val) self.p = DTYPE(val) else: raise ValueError("unknown parameter: %s" % name) @property @wraps(Kernel.sym_K) def sym_K(self): h = sym.Symbol('h') w = sym.Symbol('w') p = sym.Symbol('p') d = sym.Symbol('d') h2 = h ** 2 w2 = w ** 2 f = h2 * sym.exp(-2. * (sym.sin(d / (2. * p)) ** 2) / w2) return f @wraps(Kernel.K) def K(self, x1, x2, out=None): if out is None: out = np.empty((x1.size, x2.size), dtype=DTYPE) periodic_c.K(out, x1, x2, self.h, self.w, self.p) return out @wraps(Kernel.jacobian) def jacobian(self, x1, x2, out=None): if out is None: out = np.empty((3, x1.size, x2.size), dtype=DTYPE) periodic_c.jacobian(out, x1, x2, self.h, self.w, self.p) return out @wraps(Kernel.hessian) def hessian(self, x1, x2, out=None): if out is None: out = np.empty((3, 3, x1.size, x2.size), dtype=DTYPE) periodic_c.hessian(out, x1, x2, self.h, self.w, self.p) return out def dK_dh(self, x1, x2, out=None): if out is None: out = np.empty((x1.size, x2.size), dtype=DTYPE) periodic_c.dK_dh(out, x1, x2, self.h, self.w, self.p) return out def dK_dw(self, x1, x2, out=None): if out is None: out = np.empty((x1.size, x2.size), dtype=DTYPE) periodic_c.dK_dw(out, x1, x2, self.h, self.w, self.p) return out def dK_dp(self, x1, x2, out=None): if out is None: out = np.empty((x1.size, x2.size), dtype=DTYPE) periodic_c.dK_dp(out, x1, x2, self.h, self.w, self.p) return out def d2K_dhdh(self, x1, x2, out=None): if out is None: out = np.empty((x1.size, x2.size), dtype=DTYPE) periodic_c.d2K_dhdh(out, x1, x2, self.h, self.w, self.p) return out def d2K_dhdw(self, x1, x2, out=None): if out is None: out = np.empty((x1.size, x2.size), dtype=DTYPE) periodic_c.d2K_dhdw(out, x1, x2, self.h, self.w, self.p) return out def d2K_dhdp(self, x1, x2, out=None): if out is None: out = np.empty((x1.size, x2.size), dtype=DTYPE) periodic_c.d2K_dhdp(out, x1, x2, self.h, self.w, self.p) return out def d2K_dwdh(self, x1, x2, out=None): if out is None: out = np.empty((x1.size, x2.size), dtype=DTYPE) periodic_c.d2K_dwdh(out, x1, x2, self.h, self.w, self.p) return out def d2K_dwdw(self, x1, x2, out=None): if out is None: out = np.empty((x1.size, x2.size), dtype=DTYPE) periodic_c.d2K_dwdw(out, x1, x2, self.h, self.w, self.p) return out def d2K_dwdp(self, x1, x2, out=None): if out is None: out = np.empty((x1.size, x2.size), dtype=DTYPE) periodic_c.d2K_dwdp(out, x1, x2, self.h, self.w, self.p) return out def d2K_dpdh(self, x1, x2, out=None): if out is None: out = np.empty((x1.size, x2.size), dtype=DTYPE) periodic_c.d2K_dpdh(out, x1, x2, self.h, self.w, self.p) return out def d2K_dpdw(self, x1, x2, out=None): if out is None: out = np.empty((x1.size, x2.size), dtype=DTYPE) periodic_c.d2K_dpdw(out, x1, x2, self.h, self.w, self.p) return out def d2K_dpdp(self, x1, x2, out=None): if out is None: out = np.empty((x1.size, x2.size), dtype=DTYPE) periodic_c.d2K_dpdp(out, x1, x2, self.h, self.w, self.p) return out ``` #### File: gp/tests/test_kernels.py ```python import numpy as np import pytest from .util import opt, approx_deriv, allclose EPS = np.finfo(float).eps DTHETA = opt['dtheta'] ###################################################################### def check_params(kernel, params): k = kernel(*params) assert (k.params == np.array(params)).all() k.params = params assert (k.params == np.array(params)).all() def check_invalid_params(kernel, good_params, bad_params): with pytest.raises(ValueError): kernel(*bad_params) k = kernel(*good_params) with pytest.raises(ValueError): k.params = bad_params def check_jacobian(k, x): kernel = type(k) params = k.params.copy() jac1 = k.jacobian(x, x) jac2 = np.empty_like(jac1) k.jacobian(x, x, out=jac2) approx_jac = np.empty(jac1.shape) for i in xrange(len(params)): p0 = list(params) p0[i] -= DTHETA p1 = list(params) p1[i] += DTHETA k0 = kernel(*p0)(x, x) k1 = kernel(*p1)(x, x) approx_jac[i] = approx_deriv(k0, k1, DTHETA) assert allclose(jac1, approx_jac) assert allclose(jac2, approx_jac) assert allclose(jac1, jac2) def check_dK_dtheta(k, x, p, i): kernel = type(k) params = k.params.copy() f = getattr(k, "dK_d%s" % p) dK_dtheta1 = f(x, x) dK_dtheta2 = np.empty_like(dK_dtheta1) f(x, x, out=dK_dtheta2) params0 = list(params) params0[i] -= DTHETA params1 = list(params) params1[i] += DTHETA k0 = kernel(*params0)(x, x) k1 = kernel(*params1)(x, x) approx_dK_dtheta = approx_deriv(k0, k1, DTHETA) assert allclose(dK_dtheta1, approx_dK_dtheta) assert allclose(dK_dtheta2, approx_dK_dtheta) assert allclose(dK_dtheta1, dK_dtheta2) def check_hessian(k, x): kernel = type(k) params = k.params.copy() hess1 = k.hessian(x, x) hess2 = np.empty_like(hess1) k.hessian(x, x, out=hess2) approx_hess = np.empty(hess1.shape) for i in xrange(len(params)): p0 = list(params) p1 = list(params) p0[i] -= DTHETA p1[i] += DTHETA jac0 = kernel(*p0).jacobian(x, x) jac1 = kernel(*p1).jacobian(x, x) approx_hess[:, i] = approx_deriv(jac0, jac1, DTHETA) assert allclose(hess1, approx_hess) assert allclose(hess2, approx_hess) assert allclose(hess1, hess2) def check_d2K_dtheta2(k, x, p1, p2, i): kernel = type(k) params = k.params.copy() f = getattr(k, "d2K_d%sd%s" % (p1, p2)) d2K_dtheta21 = f(x, x) d2K_dtheta22 = np.empty_like(d2K_dtheta21) f(x, x, out=d2K_dtheta22) params0 = list(params) params1 = list(params) params0[i] -= DTHETA params1[i] += DTHETA dK_dtheta0 = getattr(kernel(*params0), "dK_d%s" % p1)(x, x) dK_dtheta1 = getattr(kernel(*params1), "dK_d%s" % p1)(x, x) approx_d2K_dtheta2 = approx_deriv(dK_dtheta0, dK_dtheta1, DTHETA) assert allclose(d2K_dtheta21, approx_d2K_dtheta2) assert allclose(d2K_dtheta22, approx_d2K_dtheta2) assert allclose(d2K_dtheta21, d2K_dtheta22) ```
{ "source": "jhamrick/plotchecker", "score": 3 }
#### File: plotchecker/plotchecker/barplot.py ```python import numpy as np from .base import PlotChecker, InvalidPlotError class BarPlotChecker(PlotChecker): """A plot checker for bar plots. Parameters ---------- axis : ``matplotlib.axes.Axes`` object A set of matplotlib axes (e.g. obtained through ``plt.gca()``) """ def __init__(self, axis): """Initialize the bar plot checker.""" super(BarPlotChecker, self).__init__(axis) self._patches = np.array(self.axis.patches) self._patches = self._patches[np.argsort([p.get_x() for p in self._patches])] if len(self._patches) == 0: raise InvalidPlotError("no data found") def _parse_expected_attr(self, attr_name, attr_val): """Ensure that the given expected attribute values are in the right shape.""" if attr_name in ('colors', 'edgecolors'): # if it's a color, first check if it's just a single color -- if it's # not a single color, this command will throw an error and we can try # iterating over the multiple colors that were given try: attr_val = np.array([self._color2rgb(attr_val)]) except (ValueError, TypeError): attr_val = np.array([self._color2rgb(x) for x in attr_val]) elif not hasattr(attr_val, '__iter__'): # if it's not a color, then just make sure we have an array attr_val = np.array([attr_val]) # tile the given values if we've only been given one, so it's the same # shape as the data if len(attr_val) == 1: attr_val = self._tile_or_trim(self.centers, attr_val) return attr_val def assert_num_bars(self, num_bars): """Assert that the plot has the given number of bars. Parameters ---------- num_bars : int """ if num_bars != len(self._patches): raise AssertionError( "Plot has incorrect number of bars: {} (expected {})".format( len(self._patches), num_bars)) @property def centers(self): """The centers of the plotted bars.""" return np.array([p.get_x() + (p.get_width() / 2) for p in self._patches]) def assert_centers_equal(self, centers): """Assert that the given centers are equivalent to the plotted :attr:`~plotchecker.BarPlotChecker.centers`. Parameters ---------- centers : 1-D array-like The expected centers. The number of elements should be equal to the (expected) number of plotted bars, or just a single value (which will then be applied to all bars). """ np.testing.assert_equal( self.centers, self._parse_expected_attr("centers", centers)) def assert_centers_allclose(self, centers, **kwargs): """Assert that the given centers are almost equal to the plotted :attr:`~plotchecker.BarPlotChecker.centers`. Parameters ---------- centers : 1-D array-like The expected centers. The number of elements should be equal to the (expected) number of plotted bars, or just a single value (which will then be applied to all bars). kwargs : Additional keyword arguments to pass to ``numpy.testing.assert_allclose`` """ np.testing.assert_allclose( self.centers, self._parse_expected_attr("centers", centers), **kwargs) @property def heights(self): """The heights of the plotted bars.""" return np.array([p.get_height() for p in self._patches]) def assert_heights_equal(self, heights): """Assert that the given heights are equivalent to the plotted :attr:`~plotchecker.BarPlotChecker.heights`. Parameters ---------- heights : 1-D array-like The expected heights. The number of elements should be equal to the (expected) number of plotted bars, or just a single value (which will then be applied to all bars). """ np.testing.assert_equal( self.heights, self._parse_expected_attr("heights", heights)) def assert_heights_allclose(self, heights, **kwargs): """Assert that the given heights are almost equal to the plotted :attr:`~plotchecker.BarPlotChecker.heights`. Parameters ---------- heights : 1-D array-like The expected heights. The number of elements should be equal to the (expected) number of plotted bars, or just a single value (which will then be applied to all bars). kwargs : Additional keyword arguments to pass to ``numpy.testing.assert_allclose`` """ np.testing.assert_allclose( self.heights, self._parse_expected_attr("heights", heights), **kwargs) @property def widths(self): """The widths of the plotted bars.""" return np.array([p.get_width() for p in self._patches]) def assert_widths_equal(self, widths): """Assert that the given widths are equivalent to the plotted :attr:`~plotchecker.BarPlotChecker.widths`. Parameters ---------- widths : 1-D array-like The expected widths. The number of elements should be equal to the (expected) number of plotted bars, or just a single value (which will then be applied to all bars). """ np.testing.assert_equal( self.widths, self._parse_expected_attr("widths", widths)) def assert_widths_allclose(self, widths, **kwargs): """Assert that the given widths are almost equal to the plotted :attr:`~plotchecker.BarPlotChecker.widths`. Parameters ---------- widths : 1-D array-like The expected widths. The number of elements should be equal to the (expected) number of plotted bars, or just a single value (which will then be applied to all bars). kwargs : Additional keyword arguments to pass to ``numpy.testing.assert_allclose`` """ np.testing.assert_allclose( self.widths, self._parse_expected_attr("widths", widths), **kwargs) @property def bottoms(self): """The y-coordinates of the bottoms of the plotted bars.""" return np.array([p.get_y() for p in self._patches]) def assert_bottoms_equal(self, bottoms): """Assert that the given bottoms are equivalent to the plotted :attr:`~plotchecker.BarPlotChecker.bottoms`. Parameters ---------- bottoms : 1-D array-like The expected bottoms. The number of elements should be equal to the (expected) number of plotted bars, or just a single value (which will then be applied to all bars). """ np.testing.assert_equal( self.bottoms, self._parse_expected_attr("bottoms", bottoms)) def assert_bottoms_allclose(self, bottoms, **kwargs): """Assert that the given bottoms are almost equal to the plotted :attr:`~plotchecker.BarPlotChecker.bottoms`. Parameters ---------- bottoms : 1-D array-like The expected bottoms. The number of elements should be equal to the (expected) number of plotted bars, or just a single value (which will then be applied to all bars). kwargs : Additional keyword arguments to pass to ``numpy.testing.assert_allclose`` """ np.testing.assert_allclose( self.bottoms, self._parse_expected_attr("bottoms", bottoms), **kwargs) @property def colors(self): """The colors of the plotted bars.""" return np.array([self._color2rgb(p.get_facecolor()) for p in self._patches]) def assert_colors_equal(self, colors): """Assert that the given colors are equivalent to the plotted :attr:`~plotchecker.BarPlotChecker.colors`. Parameters ---------- colors : single color, or list of expected colors Each color can be either a matplotlib color name (e.g. ``'r'`` or ``'red'``), a hexcode (e.g. ``"#FF0000"``), a 3-tuple RGB color, or a 4-tuple RGBA color. """ np.testing.assert_equal( self.colors, self._parse_expected_attr("colors", colors)) def assert_colors_allclose(self, colors, **kwargs): """Assert that the given colors are almost equal to the plotted :attr:`~plotchecker.BarPlotChecker.colors`. Parameters ---------- colors : single color, or list of expected edge colors Each color can be either a matplotlib color name (e.g. ``'r'`` or ``'red'``), a hexcode (e.g. ``"#FF0000"``), a 3-tuple RGB color, or a 4-tuple RGBA color. kwargs : Additional keyword arguments to pass to ``numpy.testing.assert_allclose`` """ np.testing.assert_allclose( self.colors, self._parse_expected_attr("colors", colors), **kwargs) @property def edgecolors(self): """The edge colors of the plotted bars.""" return np.array([self._color2rgb(p.get_edgecolor()) for p in self._patches]) def assert_edgecolors_equal(self, edgecolors): """Assert that the given edgecolors are equivalent to the plotted :attr:`~plotchecker.BarPlotChecker.edgecolors`. Parameters ---------- edgecolors : single color, or list of expected edge colors Each color can be either a matplotlib color name (e.g. ``'r'`` or ``'red'``), a hexcode (e.g. ``"#FF0000"``), a 3-tuple RGB color, or a 4-tuple RGBA color. """ np.testing.assert_equal( self.edgecolors, self._parse_expected_attr("edgecolors", edgecolors)) def assert_edgecolors_allclose(self, edgecolors, **kwargs): """Assert that the given edgecolors are almost equal to the plotted :attr:`~plotchecker.BarPlotChecker.edgecolors`. Parameters ---------- edgecolors : single color, or list of expected edge colors Each color can be either a matplotlib color name (e.g. ``'r'`` or ``'red'``), a hexcode (e.g. ``"#FF0000"``), a 3-tuple RGB color, or a 4-tuple RGBA color. kwargs : Additional keyword arguments to pass to ``numpy.testing.assert_allclose`` """ np.testing.assert_allclose( self.edgecolors, self._parse_expected_attr("edgecolors", edgecolors), **kwargs) @property def alphas(self): """The alpha values of the plotted bars.""" all_alphas = [] for p in self._patches: if p.get_alpha() is None: alpha = self._color2alpha(p.get_facecolor()) else: alpha = p.get_alpha() all_alphas.append(alpha) return np.array(all_alphas) def assert_alphas_equal(self, alphas): """Assert that the given alphas are equivalent to the plotted :attr:`~plotchecker.BarPlotChecker.alphas`. Parameters ---------- alphas : 1-D array-like The expected alphas. The number of elements should be equal to the (expected) number of plotted bars, or just a single value (which will then be applied to all bars). """ np.testing.assert_equal( self.alphas, self._parse_expected_attr("alphas", alphas)) def assert_alphas_allclose(self, alphas, **kwargs): """Assert that the given alphas are almost equal to the plotted :attr:`~plotchecker.BarPlotChecker.alphas`. Parameters ---------- alphas : 1-D array-like The expected alphas. The number of elements should be equal to the (expected) number of plotted bars, or just a single value (which will then be applied to all bars). kwargs : Additional keyword arguments to pass to ``numpy.testing.assert_allclose`` """ np.testing.assert_allclose( self.alphas, self._parse_expected_attr("alphas", alphas), **kwargs) @property def linewidths(self): """The line widths of the plotted bars.""" return np.array([p.get_linewidth() for p in self._patches]) def assert_linewidths_equal(self, linewidths): """Assert that the given linewidths are equivalent to the plotted :attr:`~plotchecker.BarPlotChecker.linewidths`. Parameters ---------- linewidths : 1-D array-like The expected linewidths. The number of elements should be equal to the (expected) number of plotted bars, or just a single value (which will then be applied to all bars). """ np.testing.assert_equal( self.linewidths, self._parse_expected_attr("linewidths", linewidths)) def assert_linewidths_allclose(self, linewidths, **kwargs): """Assert that the given linewidths are almost equal to the plotted :attr:`~plotchecker.BarPlotChecker.linewidths`. Parameters ---------- linewidths : 1-D array-like The expected linewidths. The number of elements should be equal to the (expected) number of plotted bars, or just a single value (which will then be applied to all bars). kwargs : Additional keyword arguments to pass to ``numpy.testing.assert_allclose`` """ np.testing.assert_allclose( self.linewidths, self._parse_expected_attr("linewidths", linewidths), **kwargs) ``` #### File: plotchecker/plotchecker/base.py ```python from __future__ import division import matplotlib import matplotlib.colors import matplotlib.markers import numpy as np import six import warnings try: _named_colors = matplotlib.colors.ColorConverter.colors.copy() for colorname, hexcode in matplotlib.colors.cnames.items(): _named_colors[colorname] = matplotlib.colors.hex2color(hexcode) except: # pragma: no cover warnings.warn("Could not get matplotlib colors, named colors will not be available") _named_colors = {} class InvalidPlotError(Exception): pass class PlotChecker(object): """A generic object to test plots. Parameters ---------- axis : ``matplotlib.axes.Axes`` object A set of matplotlib axes (e.g. obtained through ``plt.gca()``) """ _named_colors = _named_colors def __init__(self, axis): """Initialize the PlotChecker object.""" self.axis = axis @classmethod def _color2rgb(cls, color): """Converts the given color to a 3-tuple RGB color. Parameters ---------- color : Either a matplotlib color name (e.g. ``'r'`` or ``'red'``), a hexcode (e.g. ``"#FF0000"``), a 3-tuple RGB color, or a 4-tuple RGBA color. Returns ------- rgb : 3-tuple RGB color """ if isinstance(color, six.string_types): if color in cls._named_colors: return tuple(cls._named_colors[color]) else: return tuple(matplotlib.colors.hex2color(color)) elif hasattr(color, '__iter__') and len(color) == 3: return tuple(float(x) for x in color) elif hasattr(color, '__iter__') and len(color) == 4: return tuple(float(x) for x in color[:3]) else: raise ValueError("Invalid color: {}".format(color)) @classmethod def _color2alpha(cls, color): """Converts the given color to an alpha value. For all cases except RGBA colors, this value will be 1.0. Parameters ---------- color : Either a matplotlib color name (e.g. ``'r'`` or ``'red'``), a hexcode (e.g. ``"#FF0000"``), a 3-tuple RGB color, or a 4-tuple RGBA color. Returns ------- alpha : float """ if isinstance(color, six.string_types): return 1.0 elif hasattr(color, '__iter__') and len(color) == 3: return 1.0 elif hasattr(color, '__iter__') and len(color) == 4: return float(color[3]) else: raise ValueError("Invalid color: {}".format(color)) @classmethod def _parse_marker(cls, marker): """Converts the given marker to a consistent marker type. In practice, this is basically just making sure all null markers (``''``, ``'None'``, ``None``) get converted to empty strings. Parameters ---------- marker : string The marker type Returns ------- marker : string """ if marker is None or marker == 'None': return '' return marker @classmethod def _tile_or_trim(cls, x, y): """Tiles or trims the first dimension of ``y`` so that ``x.shape[0]`` == ``y.shape[0]``. Parameters ---------- x : array-like A numpy array with any number of dimensions. y : array-like A numpy array with any number of dimensions. """ xn = x.shape[0] yn = y.shape[0] if xn > yn: numrep = int(np.ceil(xn / yn)) y = np.tile(y, (numrep,) + (1,) * (y.ndim - 1)) yn = y.shape[0] if xn < yn: y = y[:xn] return y @property def title(self): """The title of the matplotlib plot, stripped of whitespace.""" return self.axis.get_title().strip() def assert_title_equal(self, title): """Asserts that the given title is the same as the plotted :attr:`~plotchecker.PlotChecker.title`. Parameters ---------- title : string The expected title """ title = title.strip() if self.title != title: raise AssertionError( "title is incorrect: '{}'' (expected '{}')".format( self.title, title)) def assert_title_exists(self): """Asserts that the plotted :attr:`~plotchecker.PlotChecker.title` is non-empty. """ if self.title == '': raise AssertionError("no title") @property def xlabel(self): """The xlabel of the matplotlib plot, stripped of whitespace.""" return self.axis.get_xlabel().strip() def assert_xlabel_equal(self, xlabel): """Asserts that the given xlabel is the same as the plotted :attr:`~plotchecker.PlotChecker.xlabel`. Parameters ---------- xlabel : string The expected xlabel """ xlabel = xlabel.strip() if self.xlabel != xlabel: raise AssertionError( "xlabel is incorrect: '{}'' (expected '{}')".format( self.xlabel, xlabel)) def assert_xlabel_exists(self): """Asserts that the plotted :attr:`~plotchecker.PlotChecker.xlabel` is non-empty. """ if self.xlabel == '': raise AssertionError("no xlabel") @property def ylabel(self): """The ylabel of the matplotlib plot, stripped of whitespace.""" return self.axis.get_ylabel().strip() def assert_ylabel_equal(self, ylabel): """Asserts that the given ylabel is the same as the plotted :attr:`~plotchecker.PlotChecker.ylabel`. Parameters ---------- ylabel : string The expected ylabel """ ylabel = ylabel.strip() if self.ylabel != ylabel: raise AssertionError( "ylabel is incorrect: '{}'' (expected '{}')".format( self.ylabel, ylabel)) def assert_ylabel_exists(self): """Asserts that the plotted :attr:`~plotchecker.PlotChecker.ylabel` is non-empty. """ if self.ylabel == '': raise AssertionError("no ylabel") @property def xlim(self): """The x-axis limits of the matplotlib plot.""" return self.axis.get_xlim() def assert_xlim_equal(self, xlim): """Asserts that the given xlim is the same as the plot's :attr:`~plotchecker.PlotChecker.xlim`. Parameters ---------- xlim : 2-tuple The expected xlim """ if self.xlim != xlim: raise AssertionError( "xlim is incorrect: {} (expected {})".format( self.xlim, xlim)) @property def ylim(self): """The y-axis limits of the matplotlib plot.""" return self.axis.get_ylim() def assert_ylim_equal(self, ylim): """Asserts that the given ylim is the same as the plot's :attr:`~plotchecker.PlotChecker.ylim`. Parameters ---------- ylim : 2-tuple The expected ylim """ if self.ylim != ylim: raise AssertionError( "ylim is incorrect: {} (expected {})".format( self.ylim, ylim)) @property def xticks(self): """The tick locations along the plot's x-axis.""" return self.axis.get_xticks() def assert_xticks_equal(self, xticks): """Asserts that the given xticks are the same as the plot's :attr:`~plotchecker.PlotChecker.xticks`. Parameters ---------- xticks : list The expected tick locations on the x-axis """ np.testing.assert_equal(self.xticks, xticks) @property def yticks(self): """The tick locations along the plot's y-axis.""" return self.axis.get_yticks() def assert_yticks_equal(self, yticks): """Asserts that the given yticks are the same as the plot's :attr:`~plotchecker.PlotChecker.yticks`. Parameters ---------- yticks : list The expected tick locations on the y-axis """ np.testing.assert_equal(self.yticks, yticks) @property def xticklabels(self): """The tick labels along the plot's x-axis, stripped of whitespace.""" return [x.get_text().strip() for x in self.axis.get_xticklabels()] def assert_xticklabels_equal(self, xticklabels): """Asserts that the given xticklabels are the same as the plot's :attr:`~plotchecker.PlotChecker.xticklabels`. Parameters ---------- xticklabels : list The expected tick labels on the x-axis """ xticklabels = [x.strip() for x in xticklabels] np.testing.assert_equal(self.xticklabels, xticklabels) @property def yticklabels(self): """The tick labels along the plot's y-axis, stripped of whitespace.""" return [x.get_text().strip() for x in self.axis.get_yticklabels()] def assert_yticklabels_equal(self, yticklabels): """Asserts that the given yticklabels are the same as the plot's :attr:`~plotchecker.PlotChecker.yticklabels`. Parameters ---------- yticklabels : list The expected tick labels on the y-axis """ yticklabels = [y.strip() for y in yticklabels] np.testing.assert_equal(self.yticklabels, yticklabels) @property def _texts(self): """All ``matplotlib.text.Text`` objects in the plot, excluding titles.""" texts = [] for x in self.axis.get_children(): if not isinstance(x, matplotlib.text.Text): continue if x == self.axis.title: continue if x == getattr(self.axis, '_left_title', None): continue if x == getattr(self.axis, '_right_title', None): continue texts.append(x) return texts @property def textlabels(self): """The labels of all ``matplotlib.text.Text`` objects in the plot, excluding titles.""" return [x.get_text().strip() for x in self._texts] def assert_textlabels_equal(self, textlabels): """Asserts that the given textlabels are the same as the plot's :attr:`~plotchecker.PlotChecker.textlabels`. Parameters ---------- textlabels : list The expected text labels on the plot """ textlabels = [x.strip() for x in textlabels] np.testing.assert_equal(self.textlabels, textlabels) @property def textpoints(self): """The locations of all ``matplotlib.text.Text`` objects in the plot, excluding titles.""" return np.vstack([x.get_position() for x in self._texts]) def assert_textpoints_equal(self, textpoints): """Asserts that the given locations of the text objects are the same as the plot's :attr:`~plotchecker.PlotChecker.textpoints`. Parameters ---------- textpoints : array-like, N-by-2 The expected text locations on the plot, where the first column corresponds to the x-values, and the second column corresponds to the y-values. """ np.testing.assert_equal(self.textpoints, textpoints) def assert_textpoints_allclose(self, textpoints, **kwargs): """Asserts that the given locations of the text objects are almost the same as the plot's :attr:`~plotchecker.PlotChecker.textpoints`. Parameters ---------- textpoints : array-like, N-by-2 The expected text locations on the plot, where the first column corresponds to the x-values, and the second column corresponds to the y-values. kwargs : Additional keyword arguments to pass to ``numpy.testing.assert_allclose`` """ np.testing.assert_allclose(self.textpoints, textpoints, **kwargs) ``` #### File: plotchecker/plotchecker/scatterplot.py ```python import numpy as np from .base import PlotChecker, InvalidPlotError class ScatterPlotChecker(PlotChecker): """A plot checker for scatter plots. Parameters ---------- axis : ``matplotlib.axes.Axes`` object A set of matplotlib axes (e.g. obtained through ``plt.gca()``) """ def __init__(self, axis): """Initialize the scatter plot checker.""" super(ScatterPlotChecker, self).__init__(axis) self.lines = self.axis.get_lines() self.collections = self.axis.collections # check that there are only lines or collections, not both if len(self.lines) == 0 and len(self.collections) == 0: raise InvalidPlotError("No data found") # check that if there are lines, linestyle is '' and markers are not '' for x in self.lines: if len(x.get_xydata()) > 1 and x.get_linestyle() != 'None': raise InvalidPlotError("This is supposed to be a scatter plot, but it has lines!") if self._parse_marker(x.get_marker()) == '': raise InvalidPlotError("This is supposed to be a scatter plot, but there are no markers!") def _parse_expected_attr(self, attr_name, attr_val): """Ensure that the given expected attribute values are in the right shape.""" if attr_name in ('colors', 'edgecolors'): # if it's a color, first check if it's just a single color -- if it's # not a single color, this command will throw an error and we can try # iterating over the multiple colors that were given try: attr_val = np.array([self._color2rgb(attr_val)]) except (ValueError, TypeError): attr_val = np.array([self._color2rgb(x) for x in attr_val]) elif not hasattr(attr_val, '__iter__'): # if it's not a color, then just make sure we have an array attr_val = np.array([attr_val]) # tile the given values if we've only been given one, so it's the same # shape as the data if len(attr_val) == 1: attr_val = self._tile_or_trim(self.x_data, attr_val) return attr_val def assert_num_points(self, num_points): """Assert that the plot has the given number of points. Parameters ---------- num_points : int """ if num_points != len(self.x_data): raise AssertionError( "Plot has incorrect number of points: {} (expected {})".format( len(self.x_data), num_points)) @property def x_data(self): """The x-values of the plotted data (1-D array).""" all_x_data = [] if len(self.lines) > 0: all_x_data.append(np.concatenate([x.get_xydata()[:, 0] for x in self.lines])) if len(self.collections) > 0: all_x_data.append(np.concatenate([x.get_offsets()[:, 0] for x in self.collections])) return np.concatenate(all_x_data, axis=0) def assert_x_data_equal(self, x_data): """Assert that the given x-data is equivalent to the plotted :attr:`~plotchecker.ScatterPlotChecker.x_data`. Parameters ---------- x_data : 1-D array-like The expected x-data. The number of elements should be equal to the (expected) number of plotted points. """ np.testing.assert_equal(self.x_data, x_data) def assert_x_data_allclose(self, x_data, **kwargs): """Assert that the given x-data is almost equal to the plotted :attr:`~plotchecker.ScatterPlotChecker.x_data`. Parameters ---------- x_data : 1-D array-like The expected x-data. The number of elements should be equal to the (expected) number of plotted points. kwargs : Additional keyword arguments to pass to ``numpy.testing.assert_allclose`` """ np.testing.assert_allclose(self.x_data, x_data, **kwargs) @property def y_data(self): """The y-values of the plotted data (1-D array).""" all_y_data = [] if len(self.lines) > 0: all_y_data.append(np.concatenate([x.get_xydata()[:, 1] for x in self.lines])) if len(self.collections) > 0: all_y_data.append(np.concatenate([x.get_offsets()[:, 1] for x in self.collections])) return np.concatenate(all_y_data, axis=0) def assert_y_data_equal(self, y_data): """Assert that the given y-data is equivalent to the plotted :attr:`~plotchecker.ScatterPlotChecker.y_data`. Parameters ---------- y_data : 1-D array-like The expected y-data. The number of elements should be equal to the (expected) number of plotted points. """ np.testing.assert_equal(self.y_data, y_data) def assert_y_data_allclose(self, y_data, **kwargs): """Assert that the given y-data is almost equal to the plotted :attr:`~plotchecker.ScatterPlotChecker.y_data`. Parameters ---------- y_data : 1-D array-like The expected y-data. The number of elements should be equal to the (expected) number of plotted points. kwargs : Additional keyword arguments to pass to ``numpy.testing.assert_allclose`` """ np.testing.assert_allclose(self.y_data, y_data, **kwargs) @property def colors(self): """The colors of the plotted points. Columns correspond to RGB values.""" all_colors = [] if len(self.lines) > 0: for x in self.lines: points = x.get_xydata() colors = np.array([self._color2rgb(x.get_markerfacecolor())]) all_colors.append(self._tile_or_trim(points, colors)) if len(self.collections) > 0: for x in self.collections: points = x.get_offsets() colors = np.array([self._color2rgb(i) for i in x.get_facecolors()]) all_colors.append(self._tile_or_trim(points, colors)) return np.concatenate(all_colors, axis=0) def assert_colors_equal(self, colors): """Assert that the given colors are equivalent to the plotted :attr:`~plotchecker.ScatterPlotChecker.colors`. Parameters ---------- colors : single color, or list of expected line colors Each color can be either a matplotlib color name (e.g. ``'r'`` or ``'red'``), a hexcode (e.g. ``"#FF0000"``), a 3-tuple RGB color, or a 4-tuple RGBA color. """ np.testing.assert_equal( self.colors, self._parse_expected_attr("colors", colors)) def assert_colors_allclose(self, colors, **kwargs): """Assert that the given colors are almost equal to the plotted :attr:`~plotchecker.ScatterPlotChecker.colors`. Parameters ---------- colors : single color, or list of expected line colors Each color can be either a matplotlib color name (e.g. ``'r'`` or ``'red'``), a hexcode (e.g. ``"#FF0000"``), a 3-tuple RGB color, or a 4-tuple RGBA color. kwargs : Additional keyword arguments to pass to ``numpy.testing.assert_allclose`` """ np.testing.assert_allclose( self.colors, self._parse_expected_attr("colors", colors), **kwargs) @property def alphas(self): """The alpha values of the plotted points.""" all_alphas = [] if len(self.lines) > 0: for x in self.lines: points = x.get_xydata() if x.get_alpha() is None: alpha = np.array([self._color2alpha(x.get_markerfacecolor())]) else: alpha = np.array([x.get_alpha()]) all_alphas.append(self._tile_or_trim(points, alpha)) if len(self.collections) > 0: for x in self.collections: points = x.get_offsets() if x.get_alpha() is None: alpha = np.array([self._color2alpha(i) for i in x.get_facecolors()]) else: alpha = np.array([x.get_alpha()]) all_alphas.append(self._tile_or_trim(points, alpha)) return np.concatenate(all_alphas) def assert_alphas_equal(self, alphas): """Assert that the given alpha values are equivalent to the plotted :attr:`~plotchecker.ScatterPlotChecker.alphas`. Parameters ---------- alphas : The expected alpha values. This should either be a single number (which will apply to all the points) or an array with size equal to the number of (expected) points. """ np.testing.assert_equal( self.alphas, self._parse_expected_attr("alphas", alphas)) def assert_alphas_allclose(self, alphas, **kwargs): """Assert that the given alpha values are almost equal to the plotted :attr:`~plotchecker.ScatterPlotChecker.alphas`. Parameters ---------- alphas : The expected alpha values. This should either be a single number (which will apply to all the points) or an array with size equal to the number of (expected) points. kwargs : Additional keyword arguments to pass to ``numpy.testing.assert_allclose`` """ np.testing.assert_allclose( self.alphas, self._parse_expected_attr("alphas", alphas), **kwargs) @property def edgecolors(self): """The edge colors of the plotted points. Columns correspond to RGB values.""" all_colors = [] if len(self.lines) > 0: for x in self.lines: points = x.get_xydata() colors = np.array([self._color2rgb(x.get_markeredgecolor())]) all_colors.append(self._tile_or_trim(points, colors)) if len(self.collections) > 0: for x in self.collections: points = x.get_offsets() colors = np.array([self._color2rgb(i) for i in x.get_edgecolors()]) all_colors.append(self._tile_or_trim(points, colors)) return np.concatenate(all_colors, axis=0) def assert_edgecolors_equal(self, edgecolors): """Assert that the given edge colors are equivalent to the plotted :attr:`~plotchecker.ScatterPlotChecker.edgecolors`. Parameters ---------- edgecolors : single color, or list of expected edge colors Each color can be either a matplotlib color name (e.g. ``'r'`` or ``'red'``), a hexcode (e.g. ``"#FF0000"``), a 3-tuple RGB color, or a 4-tuple RGBA color. """ np.testing.assert_equal( self.edgecolors, self._parse_expected_attr("edgecolors", edgecolors)) def assert_edgecolors_allclose(self, edgecolors, **kwargs): """Assert that the given edge colors are almost equal to the plotted :attr:`~plotchecker.ScatterPlotChecker.edgecolors`. Parameters ---------- edgecolors : single color, or list of expected edge colors Each color can be either a matplotlib color name (e.g. ``'r'`` or ``'red'``), a hexcode (e.g. ``"#FF0000"``), a 3-tuple RGB color, or a 4-tuple RGBA color. kwargs : Additional keyword arguments to pass to ``numpy.testing.assert_allclose`` """ np.testing.assert_allclose( self.edgecolors, self._parse_expected_attr("edgecolors", edgecolors), **kwargs) @property def edgewidths(self): """The edge widths of the plotted points.""" all_colors = [] if len(self.lines) > 0: for x in self.lines: points = x.get_xydata() colors = np.array([x.get_markeredgewidth()]) all_colors.append(self._tile_or_trim(points, colors)) if len(self.collections) > 0: for x in self.collections: points = x.get_offsets() colors = np.array(x.get_linewidths()) all_colors.append(self._tile_or_trim(points, colors)) return np.concatenate(all_colors, axis=0) def assert_edgewidths_equal(self, edgewidths): """Assert that the given edge widths are equivalent to the plotted :attr:`~plotchecker.ScatterPlotChecker.edgewidths`. Parameters ---------- edgewidths : The expected edge widths. This should either be a single number (which will apply to all the points) or an array with size equal to the number of (expected) points. """ np.testing.assert_equal( self.edgewidths, self._parse_expected_attr("edgewidths", edgewidths)) def assert_edgewidths_allclose(self, edgewidths, **kwargs): """Assert that the given edge widths are almost equal to the plotted :attr:`~plotchecker.ScatterPlotChecker.edgewidths`. Parameters ---------- edgewidths : The expected edge widths. This should either be a single number (which will apply to all the points) or an array with size equal to the number of (expected) points. kwargs : Additional keyword arguments to pass to ``numpy.testing.assert_allclose`` """ np.testing.assert_allclose( self.edgewidths, self._parse_expected_attr("edgewidths", edgewidths), **kwargs) @property def sizes(self): """The size of the plotted points. This is the square of :attr:`~plotchecker.ScatterPlotChecker.markersizes`. """ all_sizes = [] if len(self.lines) > 0: for x in self.lines: points = x.get_xydata() sizes = np.array([x.get_markersize() ** 2]) all_sizes.append(self._tile_or_trim(points, sizes)) if len(self.collections) > 0: for x in self.collections: points = x.get_offsets() sizes = x.get_sizes() all_sizes.append(self._tile_or_trim(points, sizes)) return np.concatenate(all_sizes, axis=0) def assert_sizes_equal(self, sizes): """Assert that the given point sizes are equivalent to the plotted :attr:`~plotchecker.ScatterPlotChecker.sizes`. Parameters ---------- sizes : The expected point sizes. This should either be a single number (which will apply to all the points) or an array with size equal to the number of (expected) points. """ np.testing.assert_equal( self.sizes, self._parse_expected_attr("sizes", sizes)) def assert_sizes_allclose(self, sizes, **kwargs): """Assert that the given point sizes are almost equal to the plotted :attr:`~plotchecker.ScatterPlotChecker.sizes`. Parameters ---------- sizes : The expected point sizes. This should either be a single number (which will apply to all the points) or an array with size equal to the number of (expected) points. kwargs : Additional keyword arguments to pass to ``numpy.testing.assert_allclose`` """ np.testing.assert_allclose( self.sizes, self._parse_expected_attr("sizes", sizes), **kwargs) @property def markersizes(self): """The marker size of the plotted points. This is the square root of :attr:`~plotchecker.ScatterPlotChecker.sizes`. """ return np.sqrt(self.sizes) def assert_markersizes_equal(self, markersizes): """Assert that the given marker sizes are equivalent to the plotted :attr:`~plotchecker.ScatterPlotChecker.markersizes`. Parameters ---------- markersizes : The expected marker sizes. This should either be a single number (which will apply to all the points) or an array with size equal to the number of (expected) points. """ np.testing.assert_equal( self.markersizes, self._parse_expected_attr("markersizes", markersizes)) def assert_markersizes_allclose(self, markersizes, **kwargs): """Assert that the given marker sizes are almost equal to the plotted :attr:`~plotchecker.ScatterPlotChecker.markersizes`. Parameters ---------- markersizes : The expected marker sizes. This should either be a single number (which will apply to all the points) or an array with size equal to the number of (expected) points. kwargs : Additional keyword arguments to pass to ``numpy.testing.assert_allclose`` """ np.testing.assert_allclose( self.markersizes, self._parse_expected_attr("markersizes", markersizes), **kwargs) @property def markers(self): """The marker styles of the plotted points. Unfortunately, this information is currently unrecoverable from matplotlib, and so this attribute is not actually implemented. """ raise NotImplementedError("markers are unrecoverable for scatter plots") def assert_markers_equal(self, markers): """Assert that the given marker styles are equivalent to the plotted :attr:`~plotchecker.ScatterPlotChecker.markers`. Note: information about marker style is currently unrecoverable from collections in matplotlib, so this method is not actually implemented. Parameters ---------- markers : The expected marker styles. This should either be a single style (which will apply to all the points) or an array with size equal to the number of (expected) points. """ np.testing.assert_equal( self.markers, self._parse_expected_attr("markers", markers)) ``` #### File: plotchecker/tests/test_lineplot.py ```python import pytest import numpy as np from .. import LinePlotChecker, InvalidPlotError def test_empty_plot(axis): """Is an error thrown when there is nothing plotted?""" with pytest.raises(InvalidPlotError): LinePlotChecker(axis) def test_num_lines(axis): """Are the number of lines correct?""" # first just try for a single line x0 = [1, 2.17, 3.3, 4] y0 = [2.5, 3.25, 4.4, 5] axis.plot(x0, y0) pc = LinePlotChecker(axis) pc.assert_num_lines(1) # now plot another line x1 = [2, 3.17, 4.3, 5, 6] y1 = [1.5, 2.25, 3.4, 4, 7] axis.plot(x1, y1) pc = LinePlotChecker(axis) pc.assert_num_lines(2) # do a line without x values y2 = [10, 20, 30] axis.plot(y2) pc = LinePlotChecker(axis) pc.assert_num_lines(3) # and now two more lines, plotted at the same time x3 = np.array([[4.3, 5, 6], [5.3, 6, 7]]) y3 = np.array([[3.4, 4, 7], [10.2, 9, 8]]) axis.plot(x3.T, y3.T) pc = LinePlotChecker(axis) pc.assert_num_lines(5) with pytest.raises(AssertionError): pc.assert_num_lines(6) def test_data(axis): """Are the x and y values correct?""" # first just try for a single line x0 = [1, 2.17, 3.3, 4] y0 = [2.5, 3.25, 4.4, 5] axis.plot(x0, y0) pc = LinePlotChecker(axis) pc.assert_x_data_equal([x0]) pc.assert_y_data_equal([y0]) # now plot another line x1 = [2, 3.17, 4.3, 5, 6] y1 = [1.5, 2.25, 3.4, 4, 7] axis.plot(x1, y1) pc = LinePlotChecker(axis) pc.assert_x_data_equal([x0, x1]) pc.assert_y_data_equal([y0, y1]) # do a line without x values x2 = [0, 1, 2] y2 = [10, 20, 30] axis.plot(y2) pc = LinePlotChecker(axis) pc.assert_x_data_equal([x0, x1, x2]) pc.assert_y_data_equal([y0, y1, y2]) # and now two more lines, plotted at the same time x3 = np.array([[4.3, 5, 6], [5.3, 6, 7]]) y3 = np.array([[3.4, 4, 7], [10.2, 9, 8]]) axis.plot(x3.T, y3.T) pc = LinePlotChecker(axis) pc.assert_x_data_equal([x0, x1, x2] + list(x3)) pc.assert_y_data_equal([y0, y1, y2] + list(y3)) def test_data_allclose(axis): """Are the x and y values almost correct?""" err = 1e-12 x0 = np.array([1, 2.17, 3.3, 4]) y0 = np.array([2.5, 3.25, 4.4, 5]) axis.plot(x0 + err, y0 + err) pc = LinePlotChecker(axis) with pytest.raises(AssertionError): pc.assert_x_data_equal([x0]) with pytest.raises(AssertionError): pc.assert_y_data_equal([y0]) with pytest.raises(AssertionError): pc.assert_x_data_allclose([x0], rtol=1e-13) with pytest.raises(AssertionError): pc.assert_y_data_allclose([y0], rtol=1e-13) pc.assert_x_data_allclose([x0]) pc.assert_y_data_allclose([y0]) def test_colors(axis): """Are the colors correct?""" # first just try for a single line using rgb axis.plot([1, 2.17, 3.3, 4], [2.5, 3.25, 4.4, 5], color=[0, 1, 1]) pc = LinePlotChecker(axis) pc.assert_colors_equal([[0, 1, 1]]) # add another line, using hex values axis.plot([1, 2.17, 3.3, 4], [2.5, 3.25, 4.4, 5], color='#FF0000') pc = LinePlotChecker(axis) pc.assert_colors_equal([[0, 1, 1], '#FF0000']) # add another line, using matplotlib colors axis.plot([1, 2.17, 3.3, 4], [2.5, 3.25, 4.4, 5], color='g') pc = LinePlotChecker(axis) pc.assert_colors_equal([[0, 1, 1], '#FF0000', 'g']) # add another line, using full matplotlib color names axis.plot([1, 2.17, 3.3, 4], [2.5, 3.25, 4.4, 5], color='magenta') pc = LinePlotChecker(axis) pc.assert_colors_equal([[0, 1, 1], '#FF0000', 'g', 'magenta']) # and now two more lines, plotted at the same time x = np.array([[4.3, 5, 6], [5.3, 6, 7]]) y = np.array([[3.4, 4, 7], [10.2, 9, 8]]) axis.plot(x.T, y.T, color='k') pc = LinePlotChecker(axis) pc.assert_colors_equal([[0, 1, 1], '#FF0000', 'g', 'magenta', 'k', 'k']) def test_colors_allclose(axis): """Are the colors almost correct?""" err = 1e-12 color = np.array([0.1, 1, 1]) axis.plot([1, 2.17, 3.3, 4], [2.5, 3.25, 4.4, 5], color=color - err) pc = LinePlotChecker(axis) with pytest.raises(AssertionError): pc.assert_colors_equal([color]) with pytest.raises(AssertionError): pc.assert_colors_allclose([color], rtol=1e-13) pc.assert_colors_allclose([color]) def test_linewidths(axis): """Are the linewidths correct?""" # first just try for a single line axis.plot([1, 2.17, 3.3, 4], [2.5, 3.25, 4.4, 5], linewidth=1) pc = LinePlotChecker(axis) pc.assert_linewidths_equal([1]) # add another line axis.plot([1, 2.17, 3.3, 4], [2.5, 3.25, 4.4, 5], linewidth=2) pc = LinePlotChecker(axis) pc.assert_linewidths_equal([1, 2]) # and now two more lines, plotted at the same time x = np.array([[4.3, 5, 6], [5.3, 6, 7]]) y = np.array([[3.4, 4, 7], [10.2, 9, 8]]) axis.plot(x.T, y.T, linewidth=4) pc = LinePlotChecker(axis) pc.assert_linewidths_equal([1, 2, 4, 4]) def test_linewidths_allclose(axis): """Are the linewidths almost correct?""" err = 1e-12 lw = 1 axis.plot([1, 2.17, 3.3, 4], [2.5, 3.25, 4.4, 5], lw=lw + err) pc = LinePlotChecker(axis) with pytest.raises(AssertionError): pc.assert_linewidths_equal([lw]) with pytest.raises(AssertionError): pc.assert_linewidths_allclose([lw], rtol=1e-13) pc.assert_linewidths_allclose([lw]) def test_markerfacecolors(axis): """Are the marker face colors correct?""" # inherit the color from the line axis.plot([1, 2.17, 3.3, 4], [2.5, 3.25, 4.4, 5], marker='o', color='c') pc = LinePlotChecker(axis) pc.assert_markerfacecolors_equal(['c']) # add another line, using rgb axis.plot([1, 2.17, 3.3, 4], [2.5, 3.25, 4.4, 5], marker='o', markerfacecolor=[0, 1, 1]) pc = LinePlotChecker(axis) pc.assert_markerfacecolors_equal(['c', [0, 1, 1]]) # add another line, using hex values axis.plot([1, 2.17, 3.3, 4], [2.5, 3.25, 4.4, 5], marker='o', markerfacecolor='#FF0000') pc = LinePlotChecker(axis) pc.assert_markerfacecolors_equal(['c', [0, 1, 1], '#FF0000']) # add another line, using matplotlib colors axis.plot([1, 2.17, 3.3, 4], [2.5, 3.25, 4.4, 5], marker='o', markerfacecolor='g') pc = LinePlotChecker(axis) pc.assert_markerfacecolors_equal(['c', [0, 1, 1], '#FF0000', 'g']) # add another line, using full matplotlib color names axis.plot([1, 2.17, 3.3, 4], [2.5, 3.25, 4.4, 5], marker='o', markerfacecolor='magenta') pc = LinePlotChecker(axis) pc.assert_markerfacecolors_equal(['c', [0, 1, 1], '#FF0000', 'g', 'magenta']) # and now two more lines, plotted at the same time x = np.array([[4.3, 5, 6], [5.3, 6, 7]]) y = np.array([[3.4, 4, 7], [10.2, 9, 8]]) axis.plot(x.T, y.T, marker='o', markerfacecolor='k') pc = LinePlotChecker(axis) pc.assert_markerfacecolors_equal(['c', [0, 1, 1], '#FF0000', 'g', 'magenta', 'k', 'k']) def test_markerfacecolors_allclose(axis): """Are the markerfacecolors almost correct?""" err = 1e-12 markerfacecolor = np.array([0.1, 1, 1]) axis.plot([1, 2.17, 3.3, 4], [2.5, 3.25, 4.4, 5], markerfacecolor=list(markerfacecolor + err)) pc = LinePlotChecker(axis) with pytest.raises(AssertionError): pc.assert_markerfacecolors_equal([markerfacecolor]) with pytest.raises(AssertionError): pc.assert_markerfacecolors_allclose([markerfacecolor], rtol=1e-13) pc.assert_markerfacecolors_allclose([markerfacecolor]) def test_markeredgecolors(axis): """Are the marker edge colors correct?""" # inherit the color from the line -- this should actually be the default (grey) axis.plot([1, 2.17, 3.3, 4], [2.5, 3.25, 4.4, 5], marker='o', color='c') pc = LinePlotChecker(axis) pc.assert_markeredgecolors_equal([[0, 0.75, 0.75]]) # add another line, using rgb axis.plot([1, 2.17, 3.3, 4], [2.5, 3.25, 4.4, 5], marker='o', markeredgecolor=[0, 1, 1]) pc = LinePlotChecker(axis) pc.assert_markeredgecolors_equal([[0, 0.75, 0.75], [0, 1, 1]]) # add another line, using hex values axis.plot([1, 2.17, 3.3, 4], [2.5, 3.25, 4.4, 5], marker='o', markeredgecolor='#FF0000') pc = LinePlotChecker(axis) pc.assert_markeredgecolors_equal([[0, 0.75, 0.75], [0, 1, 1], '#FF0000']) # add another line, using matplotlib colors axis.plot([1, 2.17, 3.3, 4], [2.5, 3.25, 4.4, 5], marker='o', markeredgecolor='g') pc = LinePlotChecker(axis) pc.assert_markeredgecolors_equal([[0, 0.75, 0.75], [0, 1, 1], '#FF0000', 'g']) # add another line, using full matplotlib color names axis.plot([1, 2.17, 3.3, 4], [2.5, 3.25, 4.4, 5], marker='o', markeredgecolor='magenta') pc = LinePlotChecker(axis) pc.assert_markeredgecolors_equal([[0, 0.75, 0.75], [0, 1, 1], '#FF0000', 'g', 'magenta']) # and now two more lines, plotted at the same time x = np.array([[4.3, 5, 6], [5.3, 6, 7]]) y = np.array([[3.4, 4, 7], [10.2, 9, 8]]) axis.plot(x.T, y.T, marker='o', markeredgecolor='k') pc = LinePlotChecker(axis) pc.assert_markeredgecolors_equal([[0, 0.75, 0.75], [0, 1, 1], '#FF0000', 'g', 'magenta', 'k', 'k']) def test_markeredgecolors_allclose(axis): """Are the markeredgecolors almost correct?""" err = 1e-12 markeredgecolor = np.array([0.1, 1, 1]) axis.plot([1, 2.17, 3.3, 4], [2.5, 3.25, 4.4, 5], markeredgecolor=list(markeredgecolor + err)) pc = LinePlotChecker(axis) with pytest.raises(AssertionError): pc.assert_markeredgecolors_equal([markeredgecolor]) with pytest.raises(AssertionError): pc.assert_markeredgecolors_allclose([markeredgecolor], rtol=1e-13) pc.assert_markeredgecolors_allclose([markeredgecolor]) def test_markeredgewidths(axis): """Are the markeredgewidths correct?""" # first just try for a single line axis.plot([1, 2.17, 3.3, 4], [2.5, 3.25, 4.4, 5], marker='o', markeredgewidth=1) pc = LinePlotChecker(axis) pc.assert_markeredgewidths_equal([1]) # add another line axis.plot([1, 2.17, 3.3, 4], [2.5, 3.25, 4.4, 5], marker='o', markeredgewidth=2) pc = LinePlotChecker(axis) pc.assert_markeredgewidths_equal([1, 2]) # and now two more lines, plotted at the same time x = np.array([[4.3, 5, 6], [5.3, 6, 7]]) y = np.array([[3.4, 4, 7], [10.2, 9, 8]]) axis.plot(x.T, y.T, marker='o', markeredgewidth=4) pc = LinePlotChecker(axis) pc.assert_markeredgewidths_equal([1, 2, 4, 4]) def test_markeredgewidths_allclose(axis): """Are the markeredgewidths almost correct?""" err = 1e-12 markeredgewidth = 1 axis.plot([1, 2.17, 3.3, 4], [2.5, 3.25, 4.4, 5], markeredgewidth=markeredgewidth + err) pc = LinePlotChecker(axis) with pytest.raises(AssertionError): pc.assert_markeredgewidths_equal([markeredgewidth]) with pytest.raises(AssertionError): pc.assert_markeredgewidths_allclose([markeredgewidth], rtol=1e-13) pc.assert_markeredgewidths_allclose([markeredgewidth]) def test_markersizes(axis): """Are the markersizes correct?""" # first just try for a single line axis.plot([1, 2.17, 3.3, 4], [2.5, 3.25, 4.4, 5], marker='o', markersize=1) pc = LinePlotChecker(axis) pc.assert_markersizes_equal([1]) # add another line axis.plot([1, 2.17, 3.3, 4], [2.5, 3.25, 4.4, 5], marker='o', markersize=2) pc = LinePlotChecker(axis) pc.assert_markersizes_equal([1, 2]) # and now two more lines, plotted at the same time x = np.array([[4.3, 5, 6], [5.3, 6, 7]]) y = np.array([[3.4, 4, 7], [10.2, 9, 8]]) axis.plot(x.T, y.T, marker='o', markersize=4) pc = LinePlotChecker(axis) pc.assert_markersizes_equal([1, 2, 4, 4]) def test_markersizes_allclose(axis): """Are the markersizes almost correct?""" err = 1e-12 markersize = 1 axis.plot([1, 2.17, 3.3, 4], [2.5, 3.25, 4.4, 5], markersize=markersize + err) pc = LinePlotChecker(axis) with pytest.raises(AssertionError): pc.assert_markersizes_equal([markersize]) with pytest.raises(AssertionError): pc.assert_markersizes_allclose([markersize], rtol=1e-13) pc.assert_markersizes_allclose([markersize]) def test_markers(axis): """Are the markers correct?""" # first just try for a single line with no markers axis.plot([1, 2.17, 3.3, 4], [2.5, 3.25, 4.4, 5]) pc = LinePlotChecker(axis) pc.assert_markers_equal(['']) # now use an empty marker axis.plot([1, 2.17, 3.3, 4], [2.5, 3.25, 4.4, 5], marker='') pc = LinePlotChecker(axis) pc.assert_markers_equal(['', '']) # now use the o marker axis.plot([1, 2.17, 3.3, 4], [2.5, 3.25, 4.4, 5], marker='o') pc = LinePlotChecker(axis) pc.assert_markers_equal(['', '', 'o']) # add another line with the . marker axis.plot([1, 2.17, 3.3, 4], [2.5, 3.25, 4.4, 5], marker='.') pc = LinePlotChecker(axis) pc.assert_markers_equal(['', '', 'o', '.']) # and now two more lines, plotted at the same time, with the D marker x = np.array([[4.3, 5, 6], [5.3, 6, 7]]) y = np.array([[3.4, 4, 7], [10.2, 9, 8]]) axis.plot(x.T, y.T, marker='D') pc = LinePlotChecker(axis) pc.assert_markers_equal(['', '', 'o', '.', 'D', 'D']) def test_kwarg_labels(axis): """Are the legend labels correct when given as kwargs?""" axis.plot([1, 2.17, 3.3, 4], [2.5, 3.25, 4.4, 5], label='foo') axis.plot([2.17, 3.3, 4], [3.25, 4.4, 5], label='bar') axis.plot([1, 2.17, 3.3], [2.5, 3.25, 4.4], label='baz') # make sure it fails before the legend is created pc = LinePlotChecker(axis) with pytest.raises(AssertionError): pc.assert_labels_equal(['foo', 'bar', 'baz']) axis.legend() pc = LinePlotChecker(axis) pc.assert_labels_equal(['foo', 'bar', 'baz']) def test_legend_labels(axis): """Are the legend labels correct when they are passed into the legend call?""" axis.plot([1, 2.17, 3.3, 4], [2.5, 3.25, 4.4, 5]) axis.plot([2.17, 3.3, 4], [3.25, 4.4, 5]) axis.plot([1, 2.17, 3.3], [2.5, 3.25, 4.4]) # make sure it fails before the legend is created pc = LinePlotChecker(axis) with pytest.raises(AssertionError): pc.assert_labels_equal(['foo', 'bar', 'baz']) axis.legend(['foo', 'bar', 'baz']) pc = LinePlotChecker(axis) pc.assert_labels_equal(['foo', 'bar', 'baz']) def test_legend_handles_and_labels(axis): """Are the legend labels correct when they are passed into the legend call with the corresponding handle?""" l0, = axis.plot([1, 2.17, 3.3, 4], [2.5, 3.25, 4.4, 5]) l1, = axis.plot([2.17, 3.3, 4], [3.25, 4.4, 5]) l2, = axis.plot([1, 2.17, 3.3], [2.5, 3.25, 4.4]) # make sure it fails before the legend is created pc = LinePlotChecker(axis) with pytest.raises(AssertionError): pc.assert_labels_equal(['foo', 'bar', 'baz']) axis.legend([l0, l1, l2], ['foo', 'bar', 'baz']) pc = LinePlotChecker(axis) pc.assert_labels_equal(['foo', 'bar', 'baz']) def test_alphas(axis): """Are the alphas correct?""" # first just try for a single line using rgb axis.plot([1, 2.17, 3.3, 4], [2.5, 3.25, 4.4, 5], color=[0, 1, 1]) pc = LinePlotChecker(axis) pc.assert_alphas_equal([1]) # get the alpha value from rgba axis.plot([1, 2.17, 3.3, 4], [2.5, 3.25, 4.4, 5], color=[0, 1, 1, 0.5]) pc = LinePlotChecker(axis) pc.assert_alphas_equal([1, 0.5]) # specify the alpha value explicitly x = np.array([[4.3, 5, 6], [5.3, 6, 7]]) y = np.array([[3.4, 4, 7], [10.2, 9, 8]]) axis.plot(x.T, y.T, alpha=0.2) pc = LinePlotChecker(axis) pc.assert_alphas_equal([1, 0.5, 0.2, 0.2]) def test_alphas_allclose(axis): """Are the alphas almost correct?""" err = 1e-12 alpha = 0.5 axis.plot([1, 2.17, 3.3, 4], [2.5, 3.25, 4.4, 5], alpha=alpha + err) pc = LinePlotChecker(axis) with pytest.raises(AssertionError): pc.assert_alphas_equal([alpha]) with pytest.raises(AssertionError): pc.assert_alphas_allclose([alpha], rtol=1e-13) pc.assert_alphas_allclose([alpha]) def test_permutations(axis): x = np.linspace(0, 1, 20)[None] * np.ones((3, 20)) y = x ** np.array([1, 2, 3])[:, None] colors = ['r', 'g', 'b'] markers = ['o', 's', 'D'] labels = ['Line A', 'Line B', 'Line C'] # plot lines in a different order from the values for i in [2, 0, 1]: axis.plot(x[i], y[i], color=colors[i], marker=markers[i], label=labels[i], alpha=0.5) axis.legend() # do the permutation based off of colors pc = LinePlotChecker(axis) pc.assert_num_lines(3) pc.find_permutation('colors', colors) pc.assert_x_data_equal(x) pc.assert_y_data_equal(y) pc.assert_colors_equal(colors) pc.assert_markers_equal(markers) pc.assert_labels_equal(labels) pc.assert_alphas_equal([0.5, 0.5, 0.5]) # do the permutation based off of markers pc = LinePlotChecker(axis) pc.assert_num_lines(3) pc.find_permutation('markers', markers) pc.assert_x_data_equal(x) pc.assert_y_data_equal(y) pc.assert_colors_equal(colors) pc.assert_markers_equal(markers) pc.assert_labels_equal(labels) pc.assert_alphas_equal([0.5, 0.5, 0.5]) # do the permutation based off of labels pc = LinePlotChecker(axis) pc.assert_num_lines(3) pc.find_permutation('labels', labels) pc.assert_x_data_equal(x) pc.assert_y_data_equal(y) pc.assert_colors_equal(colors) pc.assert_markers_equal(markers) pc.assert_labels_equal(labels) pc.assert_alphas_equal([0.5, 0.5, 0.5]) with pytest.raises(AssertionError): pc.find_permutation('labels', labels[:-1]) with pytest.raises(AssertionError): pc.find_permutation('labels', [x + 'a' for x in labels]) ```
{ "source": "jhamrick/scenesim", "score": 2 }
#### File: scenesim/display/simviewer.py ```python from libpanda import AntialiasAttrib, BitMask32 from pandac.PandaModules import (AmbientLight, NodePath, PerspectiveLens, Spotlight) from scenesim.display.lightbase import LightBase class SimViewer(LightBase): """ Visualizes simulation.""" def __init__(self): super(SimViewer, self).__init__() # Make a window. size = (700, 520) self.create_output(size, "SimViewer") self.output.setClearColor((0.0, 0.0, 0.0, 1.0)) # Lights node self.lights = NodePath('lights') # Create a spotlight slight = Spotlight('slight') slight.setScene(self.root) slight.setShadowCaster(True, 2 ** 11, 2 ** 11) # Set shadow mask, so we can exclude objects from casting shadows self.shadow_mask = BitMask32.bit(2) slight.setCameraMask(self.shadow_mask) c = 1.4 slight.setColor((c, c, c, 1.0)) slight.getLens().setNearFar(4, 100) slight.getLens().setFov(45) slnp = self.lights.attachNewNode(slight) slnp.setPos((7, 10, 40)) slnp.lookAt(2, 0, 1.5) self.root.setLight(slnp) # Create an ambient light. alight = AmbientLight('alight') c = 0.6 alight.setColor((c, c, c, 1.0)) alnp = self.lights.attachNewNode(alight) self.root.setLight(alnp) self.lights.reparentTo(self.root) # Set auto shading for shadows. self.root.setShaderAuto() # Set antialiasing on. self.root.setAntialias(AntialiasAttrib.MAuto) # Camera. lens = PerspectiveLens() self.lens = lens self.lens.setNearFar(0.1, 1000.) self.lens.setFov((40, 30)) self.cameras = self.root.attachNewNode('cameras') self.camera = self.make_camera(self.output, lens=self.lens) self.camera.setPos(15, 44, 3.) self.camera.setPos(15, 35, 15.) self.camera.lookAt(0, 0, 1.) def create_output(self, size, name): self.output = self.make_window(size=size, name=name) self.render_frame() self.render_frame() class SimImager(SimViewer): """ Make images of simulation scenes.""" def create_output(self, size, name): self.output, tex = self.make_texture_buffer( size=size, name=name, mode="RTMCopyRam") self.render_frame() self.render_frame() ``` #### File: scenesim/objects/pso.py ```python from abc import ABCMeta, abstractmethod from collections import Iterable, OrderedDict from contextlib import contextmanager from ctypes import c_float from functools import wraps from itertools import izip import re ## from libpanda import Mat4, Point3, TransformState, Vec3 import numpy as np import panda3d.bullet as p3b from panda3d.bullet import BulletBodyNode, BulletGhostNode, BulletRigidBodyNode from panda3d.core import NodePathCollection ## from scenesim.objects.sso import SSO ## from pdb import set_trace as BP def cast_c_float(func): """ Decorator for casting a function's return value to ctypes.f32.""" def func_c_float(*args, **kwargs): return c_float(func(*args, **kwargs)).value return func_c_float class BaseShape(list): """ Base class for shape objects.""" __metaclass__ = ABCMeta _type_rx = re.compile("Bullet(.+)Shape") def __new__(cls, *args, **kwargs): """ Set cls._bshape by reading from bases of derived class.""" obj = super(BaseShape, cls).__new__(cls) cls.name = cls.read_name(cls.bshape) return obj @classmethod def _fix_args(cls, args): """ Standardize the BulletShape arguments.""" args1 = cls.args0.copy() if isinstance(args, dict): args1.update(args) else: for key, prm in izip(args1, args): args1[key] = prm return args1.values() @staticmethod def _fix_xform(T): """ Converts T into a valid xform. Returns None on fail.""" # If T has "flat" attribute, it is an ndarray and that should # be returned. Otherwise just return T. if not T: xform = TransformState.makeIdentity() else: if not isinstance(T, TransformState): mat = Mat4(*T.flat) if hasattr(T, "flat") else Mat4(T) xform = TransformState.makeMat(mat) else: xform = T return xform @classmethod def _fix_prms(cls, p): """ Standardizes parameters.""" if not isinstance(p, Iterable) or isinstance(p, str) or len(p) > 2: raise ValueError("Bad input: %s" % p) elif len(p) == 0: prms = [(), None] elif len(p) == 1: prms = [p[0], None] else: prms = list(p) # Make prms into list with fixed args and xform. prms = [cls._fix_args(prms[0]), cls._fix_xform(prms[1])] return prms def __init__(self, *prms): """ Initialize.""" prms = self._fix_prms(prms) super(BaseShape, self).__init__(prms) def init(self): """ Return the initialized Bullet*Shape and the xform.""" args, xform = self bshape = self.bshape(*args) return bshape, xform @classmethod def read_name(cls, bshape): return cls._type_rx.match(bshape.__name__).group(1) @classmethod def read_params(cls, bshape): return [getattr(bshape, "get%s" % key)() for key in cls.args0] @abstractmethod def scale(self, scale): """ Scales shape arguments.""" pass def shift(self, pos=(0, 0, 0), quat=(1, 0, 0, 0)): """ Translate and rotate shape's transform.""" ones = Vec3(1, 1, 1) T = TransformState.makePosQuatScale(pos, quat, ones) xform = T.compose(self[1]) self[1] = xform def transform(self, node, other=None): """ Shift and scale the shape by node's transform.""" if other is None: other = node.getParent() scale = node.get_scale(other) pos = node.get_pos(other) quat = node.get_quat(other) self.scale(scale) self.shift(pos=pos, quat=quat) class BoxShape(BaseShape): """ BulletBoxShape.""" bshape = p3b.BulletBoxShape args0 = OrderedDict((("HalfExtentsWithMargin", Vec3(0.5, 0.5, 0.5)),)) def scale(self, scale): args = (Vec3(*[s * a for s, a in izip(scale, self[0][0])]),) self[0] = args class CapsuleShape(BaseShape): """ BulletCapsuleShape.""" bshape = p3b.BulletCapsuleShape args0 = OrderedDict((("Radius", 0.5), ("HalfHeight", 0.5))) def scale(self, scale): if scale[0] != scale[1]: raise ValueError("%s does not support anisotropic x,y scaling" % self.bshape) self[0] = (scale[0] * self[0][0], scale[2] * self[0][1]) class ConeShape(BaseShape): """ BulletConeShape.""" bshape = p3b.BulletConeShape args0 = OrderedDict((("Radius", 0.5), ("Height", 1.))) def scale(self, scale): if scale[0] != scale[1]: raise ValueError("%s does not support anisotropic x,y scaling" % self.bshape) self[0] = (scale[0] * self[0][0], scale[2] * self[0][1]) class CylinderShape(BaseShape): """ BulletCylinderShape.""" bshape = p3b.BulletCylinderShape args0 = OrderedDict((("Radius", 0.5), ("Height", 1.))) def scale(self, scale): if scale[0] != scale[1]: raise ValueError("%s does not support anisotropic x,y scaling" % self.bshape) self[0] = (scale[0] * self[0][0], scale[2] * self[0][1]) class PlaneShape(BaseShape): """ BulletPlaneShape.""" bshape = p3b.BulletPlaneShape args0 = OrderedDict((("PlaneNormal", Vec3(0, 0, 1)), ("PlaneConstant", 0))) def scale(self, scale): s = self[0][0].dot(Vec3(*scale)) / self[0][0].length() args = (self[0][0], self[0][1] * s) self[0] = args class SphereShape(BaseShape): """ BulletSphereShape.""" bshape = p3b.BulletSphereShape args0 = OrderedDict((("Radius", 0.5),)) def scale(self, scale): if (scale[0] != scale[1]) or (scale[0] != scale[2]): raise ValueError("%s does not support anisotropic x,y,z scaling" % self.bshape) self[0] = (scale[0] * self[0][0],) class ShapeManager(object): """ Utility class for making *Shape objects and reading BulletShapes.""" _name2shape = { "Box": BoxShape, "Capsule": CapsuleShape, "Cone": ConeShape, "Cylinder": CylinderShape, "Plane": PlaneShape, "Sphere": SphereShape} @classmethod def make1(cls, val): """ Initialize a *Shape object from input.""" if isinstance(val, str): type_ = cls._name2shape[val] prms = [(), None] else: type_ = cls._name2shape[val[0]] prms = val[1:] return type_(*prms) @classmethod def make(cls, vals): """ Standardizes list of vals.""" if isinstance(vals, str): vals = [[vals, (), None]] elif isinstance(vals[0], str): vals = [vals] # Verify that vals contains valid names and xform matrices. shapes = [cls.make1(val) for val in vals] return shapes @classmethod def read(cls, node): """ Get shape list from Bullet*Shape(s).""" # Get valid node. try: node = node.node() except AttributeError: pass # Get shapes. n_shapes = node.getNumShapes() if n_shapes == 0: # For no shape, return "". shapes = [] else: # For 1+ shapes. parent = node.getParent() node.detachNode() shapes = [] for i in xrange(n_shapes): # Get shape and shape's matrix. bshape = node.getShape(i) # Get name. name = cls.read_name(bshape) # Get params. Shape = cls._name2shape[name] params = Shape.read_params(bshape) # Get xform. xform = TransformState.makeMat(node.getShapeMat(i)) # Store. shapes.append((name, params, xform)) node.reparentTo(parent) return shapes class ShapeList(list): """ List of *Shapes.""" def __init__(self, val=""): super(ShapeList, self).__init__(ShapeManager.make(val)) def __setitem__(self, key, val): super(ShapeList, self).__setitem__(key, ShapeManager.make1(val)) def __add__(self, val): super(ShapeList, self).__add__(ShapeManager.make(val)) def __iadd__(self, val): super(ShapeList, self).__iadd__(ShapeManager.make(val)) def append(self, val): super(ShapeList, self).append(ShapeManager.make1(val)) def extend(self, val): super(ShapeList, self).extend(ShapeManager.make(val)) def insert(self, key, val): super(ShapeList, self).insert(key, ShapeManager.make1(val)) def init(self): """ Build the BulletShapes (bshapes) and TransformStates (xforms).""" bshapes = [] xforms = [] for shape in self: bshape, xform = shape.init() bshapes.append(bshape) xforms.append(xform) return bshapes, xforms class PSO(SSO): """ Bullet physics object.""" type_ = BulletBodyNode _prop_tags = ("friction", "restitution", "shape", "deactivation_enabled") _res_tags = ("shape",) def __init__(self, *args, **kwargs): # Converts args so they're appropriate for self.type_. if len(args) == 0: args = ("",) if isinstance(args[0], str): args = (self.type_(args[0]),) + args[1:] tag = self.__class__ else: tag = None ## Using super fails, probably because NodePath is a C++ class. # super(PSO, self).__init__(self, *new_args, **kwargs) SSO.__init__(self, *args, **kwargs) if tag: self.setPythonTag("sso", tag) @wraps(type_.set_friction, assigned=("__name__", "__doc__")) def set_friction(self, friction): self.node().set_friction(friction) @cast_c_float @wraps(type_.get_friction, assigned=("__name__", "__doc__")) def get_friction(self): return self.node().get_friction() @wraps(type_.set_restitution, assigned=("__name__", "__doc__")) def set_restitution(self, restitution): self.node().set_restitution(restitution) @cast_c_float @wraps(type_.get_restitution, assigned=("__name__", "__doc__")) def get_restitution(self): return self.node().get_restitution() def set_shape(self, shape): self.setPythonTag("shape", shape) def get_shape(self): if "shape" not in self.getPythonTagKeys(): self.set_shape("") return self.getPythonTag("shape") def add_shape(self, shapes): """ Adds the shape.""" # Construct the BulletShapes. bshapes, xforms = shapes.init() for bshape, xform in izip(bshapes, xforms): # Add each to the BulletBodyNode. self.node().addShape(bshape, xform) def create_shape(self): """ Initializes a BulletShape(s).""" shapes = ShapeList(self.get_shape()) self.add_shape(shapes) self.setTag("resource", "shape") def delete_shape(self): """ Destroys the BulletShape(s).""" shapes = self.node().getShapes() for shape in shapes: self.node().removeShape(shape) self.clearTag("resource") @wraps(type_.set_deactivation_enabled, assigned=("__name__", "__doc__")) def set_deactivation_enabled(self, is_enabled): return self.node().set_deactivation_enabled(is_enabled) @wraps(type_.is_deactivation_enabled, assigned=("__name__", "__doc__")) def get_deactivation_enabled(self): return self.node().is_deactivation_enabled() class RBSO(PSO): type_ = BulletRigidBodyNode _prop_tags = ("linear_velocity", "angular_velocity", "mass", "gravity") _res_tags = () @wraps(type_.set_mass, assigned=("__name__", "__doc__")) def set_mass(self, mass): self.node().set_mass(mass) @cast_c_float @wraps(type_.get_mass, assigned=("__name__", "__doc__")) def get_mass(self): return self.node().get_mass() @wraps(type_.set_linear_velocity, assigned=("__name__", "__doc__")) def set_linear_velocity(self, linear_velocity): self.node().set_linear_velocity(linear_velocity) @wraps(type_.get_linear_velocity, assigned=("__name__", "__doc__")) def get_linear_velocity(self): return self.node().get_linear_velocity() @wraps(type_.set_angular_velocity, assigned=("__name__", "__doc__")) def set_angular_velocity(self, angular_velocity): self.node().set_angular_velocity(angular_velocity) @wraps(type_.get_angular_velocity, assigned=("__name__", "__doc__")) def get_angular_velocity(self): return self.node().get_angular_velocity() @wraps(type_.set_gravity, assigned=("__name__", "__doc__")) def set_gravity(self, grav): return self.node().set_gravity(grav) @wraps(type_.get_gravity, assigned=("__name__", "__doc__")) def get_gravity(self): return self.node().get_gravity() class CPSO(RBSO): """ Bullet physics object, specialized for compound shapes.""" def __init__(self, *args, **kwargs): if len(args) == 0: args = ("compound",) super(CPSO, self).__init__(*args, **kwargs) @property def components(self): return self.descendants(depths=[1], type_=PSO) @contextmanager def _preserve_child_tranforms(self): """ Remember transforms of existing children to avoid center-of-mass shift.""" parent = self.getParent() descendants = self.descendants(depths=[1]) # Remember child transforms. mats = [child.get_mat(parent) for child in descendants] yield parent # Update transforms of existing children. for descendant, mat in izip(descendants, mats): descendant.set_mat(parent, mat) def _compute_shapes(self): """ Computes shapes from self.components.""" # Compute mass and center-of-mass. masses = [] poses = [] psos = self.descendants(depths=[1], type_=PSO) parent = self.getParent() for pso in psos: mass = pso.get_mass() pos = pso.get_pos(parent) if mass == 0.: com = pos break poses.append(pos) masses.append(mass) else: mass = np.sum(masses) com = Point3(*(np.sum(np.array(poses).T * masses, axis=-1) / mass)) self.set_mass(mass) with self._preserve_child_tranforms() as parent: self.set_pos(parent, com) # Add shapes from PSOs. vals = [] for pso in psos: shapes0 = ShapeList(pso.get_shape()) for shape0 in shapes0: name = shape0.name args0, xform0 = shape0 if name != "Box": print("Can't handle that shape: %s" % name) BP() shape = ShapeManager.make1((name, args0, xform0)) shape.transform(pso, other=self) # scale = pso.get_scale(self) # pos = pso.get_pos(self) # quat = pso.get_quat(self) # shape.scale(scale) # shape.shift(pos, quat) val = (name, shape[0], shape[1]) vals.append(val) # Set compound object's shapes tag. self.set_shape(vals) def add(self, psos): """ Add sequence of PSOs to compound object.""" NodePathCollection(psos).wrtReparentTo(self) self._compute_shapes() def remove(self, psos): """ Remove sequence of PSOs from compound object.""" with self._preserve_child_tranforms(): NodePathCollection(psos).detach() if self.getNumChildren() > 0: self._compute_shapes() def destroy_component_shapes(self): """ Destroys the shape resources of the component PSOs.""" for pso in self.components: pso.destroy_resources(tags=("shape",)) def remove_component_bodies(self, bbase): """ Destroys the shape resources of the component PSOs.""" bbase.remove(self.components) def init_tree(self, tags=None): """ Overrides parent's init_tree() so that components' shapes are not initialized.""" super(CPSO, self).init_tree(tags=tags) # if tags is None or "shape" in tags: self.destroy_component_shapes() class GHSO(PSO): """ PSO subclass for `BulletGhostNode`s.""" type_ = BulletGhostNode _prop_tags = ("num_overlapping_nodes", "overlapping_nodes") _res_tags = () @wraps(type_.get_num_overlapping_nodes, assigned=("__name__", "__doc__")) def get_num_overlapping_nodes(self): return self.node().get_num_overlapping_nodes() @wraps(type_.get_overlapping_nodes, assigned=("__name__", "__doc__")) def get_overlapping_nodes(self): return self.node().get_overlapping_nodes() ## TODO # # class A(object): # def __init__(self): # print "A.__init__()" # class AA(A): # def __init__(self): # print "AA.__init__()" # class B(AA): # def __init__(self): # print "B.__init__()" # super(B, self).__init__() # class C(AA): # def __init__(self): # print "C.__init__()" # super(C, self).__init__() # class D(C, B): # pass # d = D() # print D.mro() ```
{ "source": "jhance/mypy", "score": 3 }
#### File: mypy/codec/register.py ```python from __future__ import absolute_import import codecs import encodings import sys def search_function(encoding): if encoding != 'mypy': return None # Assume utf8 encoding utf8 = encodings.search_function('utf8') if sys.version_info[0] == 3: # Python 3 return utf8 else: # Python 2 from .mypy_codec import mypy_decode, MyPyIncrementalDecoder, MyPyStreamReader return codecs.CodecInfo(name='mypy', encode=utf8.encode, decode=mypy_decode, incrementalencoder=utf8.incrementalencoder, incrementaldecoder=MyPyIncrementalDecoder, streamreader=MyPyStreamReader, streamwriter=utf8.streamwriter) codecs.register(search_function) def main(): fn = sys.argv[1] with open(fn) as fp: data = fp.read() print(codecs.decode(data, 'mypy')) if __name__ == '__main__': main() ``` #### File: mypy/mypy/semanal.py ```python from typing import ( List, Dict, Set, Tuple, cast, Any, overload, TypeVar, Union, Optional ) from mypy.nodes import ( MypyFile, TypeInfo, Node, AssignmentStmt, FuncDef, OverloadedFuncDef, ClassDef, Var, GDEF, MODULE_REF, FuncItem, Import, ImportFrom, ImportAll, Block, LDEF, NameExpr, MemberExpr, IndexExpr, TupleExpr, ListExpr, ExpressionStmt, ReturnStmt, RaiseStmt, YieldStmt, AssertStmt, OperatorAssignmentStmt, WhileStmt, ForStmt, BreakStmt, ContinueStmt, IfStmt, TryStmt, WithStmt, DelStmt, GlobalDecl, SuperExpr, DictExpr, CallExpr, RefExpr, OpExpr, UnaryExpr, SliceExpr, CastExpr, TypeApplication, Context, SymbolTable, SymbolTableNode, BOUND_TVAR, UNBOUND_TVAR, ListComprehension, GeneratorExpr, FuncExpr, MDEF, FuncBase, Decorator, SetExpr, TypeVarExpr, StrExpr, PrintStmt, ConditionalExpr, PromoteExpr, ComparisonExpr, StarExpr, ARG_POS, ARG_NAMED, MroError, type_aliases, YieldFromStmt, YieldFromExpr, NamedTupleExpr, NonlocalDecl, SetComprehension, DictionaryComprehension, TYPE_ALIAS, TypeAliasExpr, YieldExpr, ExecStmt, COVARIANT, CONTRAVARIANT, INVARIANT ) from mypy.visitor import NodeVisitor from mypy.traverser import TraverserVisitor from mypy.errors import Errors from mypy.types import ( NoneTyp, CallableType, Overloaded, Instance, Type, TypeVarType, AnyType, FunctionLike, UnboundType, TypeList, ErrorType, TypeVarDef, replace_leading_arg_type, TupleType, UnionType, StarType, EllipsisType ) from mypy.nodes import function_type, implicit_module_attrs from mypy.typeanal import TypeAnalyser, TypeAnalyserPass3, analyze_type_alias from mypy.exprtotype import expr_to_unanalyzed_type, TypeTranslationError from mypy.lex import lex from mypy.parsetype import parse_type from mypy.sametypes import is_same_type from mypy import defaults T = TypeVar('T') # Inferred value of an expression. ALWAYS_TRUE = 0 ALWAYS_FALSE = 1 TRUTH_VALUE_UNKNOWN = 2 # Map from obsolete name to the current spelling. obsolete_name_mapping = { 'typing.Function': 'typing.Callable', 'typing.typevar': 'typing.TypeVar', } # Hard coded type promotions (shared between all Python versions). # These add extra ad-hoc edges to the subtyping relation. For example, # int is considered a subtype of float, even though there is no # subclass relationship. TYPE_PROMOTIONS = { 'builtins.int': 'builtins.float', 'builtins.float': 'builtins.complex', } # Hard coded type promotions for Python 3. # # Note that the bytearray -> bytes promotion is a little unsafe # as some functions only accept bytes objects. Here convenience # trumps safety. TYPE_PROMOTIONS_PYTHON3 = TYPE_PROMOTIONS.copy() TYPE_PROMOTIONS_PYTHON3.update({ 'builtins.bytearray': 'builtins.bytes', }) # Hard coded type promotions for Python 2. # # These promotions are unsafe, but we are doing them anyway # for convenience and also for Python 3 compatibility # (bytearray -> str). TYPE_PROMOTIONS_PYTHON2 = TYPE_PROMOTIONS.copy() TYPE_PROMOTIONS_PYTHON2.update({ 'builtins.str': 'builtins.unicode', 'builtins.bytearray': 'builtins.str', }) class SemanticAnalyzer(NodeVisitor): """Semantically analyze parsed mypy files. The analyzer binds names and does various consistency checks for a parse tree. Note that type checking is performed as a separate pass. This is the second phase of semantic analysis. """ # Library search paths lib_path = None # type: List[str] # Module name space modules = None # type: Dict[str, MypyFile] # Global name space for current module globals = None # type: SymbolTable # Names declared using "global" (separate set for each scope) global_decls = None # type: List[Set[str]] # Names declated using "nonlocal" (separate set for each scope) nonlocal_decls = None # type: List[Set[str]] # Local names of function scopes; None for non-function scopes. locals = None # type: List[SymbolTable] # Nested block depths of scopes block_depth = None # type: List[int] # TypeInfo of directly enclosing class (or None) type = None # type: TypeInfo # Stack of outer classes (the second tuple item contains tvars). type_stack = None # type: List[TypeInfo] # Type variables that are bound by the directly enclosing class bound_tvars = None # type: List[SymbolTableNode] # Stack of type varialbes that were bound by outer classess tvar_stack = None # type: List[List[SymbolTableNode]] # Stack of functions being analyzed function_stack = None # type: List[FuncItem] loop_depth = 0 # Depth of breakable loops cur_mod_id = '' # Current module id (or None) (phase 2) imports = None # type: Set[str] # Imported modules (during phase 2 analysis) errors = None # type: Errors # Keeps track of generated errors def __init__(self, lib_path: List[str], errors: Errors, pyversion: Tuple[int, int] = defaults.PYTHON3_VERSION) -> None: """Construct semantic analyzer. Use lib_path to search for modules, and report analysis errors using the Errors instance. """ self.locals = [None] self.imports = set() self.type = None self.type_stack = [] self.bound_tvars = None self.tvar_stack = [] self.function_stack = [] self.block_depth = [0] self.loop_depth = 0 self.lib_path = lib_path self.errors = errors self.modules = {} self.pyversion = pyversion def visit_file(self, file_node: MypyFile, fnam: str) -> None: self.errors.set_file(fnam) self.errors.set_ignored_lines(file_node.ignored_lines) self.cur_mod_node = file_node self.cur_mod_id = file_node.fullname() self.is_stub_file = fnam.lower().endswith('.pyi') self.globals = file_node.names if 'builtins' in self.modules: self.globals['__builtins__'] = SymbolTableNode( MODULE_REF, self.modules['builtins'], self.cur_mod_id) for name in implicit_module_attrs: v = self.globals[name].node if isinstance(v, Var): v.type = self.anal_type(v.type) v.is_ready = True defs = file_node.defs for d in defs: d.accept(self) if self.cur_mod_id == 'builtins': remove_imported_names_from_symtable(self.globals, 'builtins') self.errors.set_ignored_lines(set()) def visit_func_def(self, defn: FuncDef) -> None: self.errors.push_function(defn.name()) self.update_function_type_variables(defn) self.errors.pop_function() if self.is_class_scope(): # Method definition defn.is_conditional = self.block_depth[-1] > 0 defn.info = self.type if not defn.is_decorated: if not defn.is_overload: if defn.name() in self.type.names: n = self.type.names[defn.name()].node if self.is_conditional_func(n, defn): defn.original_def = cast(FuncDef, n) else: self.name_already_defined(defn.name(), defn) self.type.names[defn.name()] = SymbolTableNode(MDEF, defn) if not defn.is_static: if not defn.args: self.fail('Method must have at least one argument', defn) elif defn.type: sig = cast(FunctionLike, defn.type) # TODO: A classmethod's first argument should be more # precisely typed than Any. leading_type = AnyType() if defn.is_class else self_type(self.type) defn.type = replace_implicit_first_type(sig, leading_type) if self.is_func_scope() and (not defn.is_decorated and not defn.is_overload): self.add_local_func(defn, defn) defn._fullname = defn.name() self.errors.push_function(defn.name()) self.analyze_function(defn) self.errors.pop_function() def is_conditional_func(self, n: Node, defn: FuncDef) -> bool: return (isinstance(n, FuncDef) and cast(FuncDef, n).is_conditional and defn.is_conditional) def update_function_type_variables(self, defn: FuncDef) -> None: """Make any type variables in the signature of defn explicit. Update the signature of defn to contain type variable definitions if defn is generic. """ if defn.type: functype = cast(CallableType, defn.type) typevars = self.infer_type_variables(functype) # Do not define a new type variable if already defined in scope. typevars = [(name, tvar) for name, tvar in typevars if not self.is_defined_type_var(name, defn)] if typevars: defs = [TypeVarDef(tvar[0], -i - 1, tvar[1].values, self.object_type(), tvar[1].variance) for i, tvar in enumerate(typevars)] functype.variables = defs def infer_type_variables(self, type: CallableType) -> List[Tuple[str, TypeVarExpr]]: """Return list of unique type variables referred to in a callable.""" names = [] # type: List[str] tvars = [] # type: List[TypeVarExpr] for arg in type.arg_types + [type.ret_type]: for name, tvar_expr in self.find_type_variables_in_type(arg): if name not in names: names.append(name) tvars.append(tvar_expr) return list(zip(names, tvars)) def find_type_variables_in_type( self, type: Type) -> List[Tuple[str, TypeVarExpr]]: """Return a list of all unique type variable references in type. This effectively does partial name binding, results of which are mostly thrown away. """ result = [] # type: List[Tuple[str, TypeVarExpr]] if isinstance(type, UnboundType): name = type.name node = self.lookup_qualified(name, type) if node and node.kind == UNBOUND_TVAR: result.append((name, cast(TypeVarExpr, node.node))) for arg in type.args: result.extend(self.find_type_variables_in_type(arg)) elif isinstance(type, TypeList): for item in type.items: result.extend(self.find_type_variables_in_type(item)) elif isinstance(type, UnionType): for item in type.items: result.extend(self.find_type_variables_in_type(item)) elif isinstance(type, AnyType): pass elif isinstance(type, EllipsisType) or isinstance(type, TupleType): pass else: assert False, 'Unsupported type %s' % type return result def is_defined_type_var(self, tvar: str, context: Node) -> bool: return self.lookup_qualified(tvar, context).kind == BOUND_TVAR def visit_overloaded_func_def(self, defn: OverloadedFuncDef) -> None: t = [] # type: List[CallableType] for i, item in enumerate(defn.items): # TODO support decorated overloaded functions properly item.is_overload = True item.func.is_overload = True item.accept(self) t.append(cast(CallableType, function_type(item.func, self.builtin_type('builtins.function')))) if item.func.is_property and i == 0: # This defines a property, probably with a setter and/or deleter. self.analyze_property_with_multi_part_definition(defn) break if not [dec for dec in item.decorators if refers_to_fullname(dec, 'typing.overload')]: self.fail("'overload' decorator expected", item) defn.type = Overloaded(t) defn.type.line = defn.line if self.is_class_scope(): self.type.names[defn.name()] = SymbolTableNode(MDEF, defn, typ=defn.type) defn.info = self.type elif self.is_func_scope(): self.add_local_func(defn, defn) def analyze_property_with_multi_part_definition(self, defn: OverloadedFuncDef) -> None: """Analyze a propery defined using multiple methods (e.g., using @x.setter). Assume that the first method (@property) has already been analyzed. """ defn.is_property = True items = defn.items for item in items[1:]: if len(item.decorators) == 1: node = item.decorators[0] if isinstance(node, MemberExpr): if node.name == 'setter': # The first item represents the entire property. defn.items[0].var.is_settable_property = True else: self.fail("Decorated property not supported", item) item.func.accept(self) def analyze_function(self, defn: FuncItem) -> None: is_method = self.is_class_scope() tvarnodes = self.add_func_type_variables_to_symbol_table(defn) if defn.type: # Signature must be analyzed in the surrounding scope so that # class-level imported names and type variables are in scope. defn.type = self.anal_type(defn.type) self.check_function_signature(defn) if isinstance(defn, FuncDef): defn.info = self.type defn.type = set_callable_name(defn.type, defn) for init in defn.init: if init: init.rvalue.accept(self) self.function_stack.append(defn) self.enter() for v in defn.args: self.add_local(v, defn) for init_ in defn.init: if init_: init_.lvalues[0].accept(self) # The first argument of a non-static, non-class method is like 'self' # (though the name could be different), having the enclosing class's # instance type. if is_method and not defn.is_static and not defn.is_class and defn.args: defn.args[0].is_self = True defn.body.accept(self) disable_typevars(tvarnodes) self.leave() self.function_stack.pop() def add_func_type_variables_to_symbol_table( self, defn: FuncItem) -> List[SymbolTableNode]: nodes = [] # type: List[SymbolTableNode] if defn.type: tt = defn.type names = self.type_var_names() items = cast(CallableType, tt).variables for i, item in enumerate(items): name = item.name if name in names: self.name_already_defined(name, defn) node = self.bind_type_var(name, -i - 1, defn) nodes.append(node) names.add(name) return nodes def type_var_names(self) -> Set[str]: if not self.type: return set() else: return set(self.type.type_vars) def bind_type_var(self, fullname: str, id: int, context: Context) -> SymbolTableNode: node = self.lookup_qualified(fullname, context) node.kind = BOUND_TVAR node.tvar_id = id return node def check_function_signature(self, fdef: FuncItem) -> None: sig = cast(CallableType, fdef.type) if len(sig.arg_types) < len(fdef.args): self.fail('Type signature has too few arguments', fdef) elif len(sig.arg_types) > len(fdef.args): self.fail('Type signature has too many arguments', fdef) def visit_class_def(self, defn: ClassDef) -> None: self.clean_up_bases_and_infer_type_variables(defn) self.setup_class_def_analysis(defn) self.bind_class_type_vars(defn) self.analyze_base_classes(defn) self.analyze_metaclass(defn) for decorator in defn.decorators: self.analyze_class_decorator(defn, decorator) self.enter_class(defn) self.setup_is_builtinclass(defn) # Analyze class body. defn.defs.accept(self) self.calculate_abstract_status(defn.info) self.setup_type_promotion(defn) self.leave_class() self.unbind_class_type_vars() def enter_class(self, defn: ClassDef) -> None: # Remember previous active class self.type_stack.append(self.type) self.locals.append(None) # Add class scope self.block_depth.append(-1) # The class body increments this to 0 self.type = defn.info def leave_class(self) -> None: """ Restore analyzer state. """ self.block_depth.pop() self.locals.pop() self.type = self.type_stack.pop() def bind_class_type_vars(self, defn: ClassDef) -> None: """ Unbind type variables of previously active class and bind the type variables for the active class. """ if self.bound_tvars: disable_typevars(self.bound_tvars) self.tvar_stack.append(self.bound_tvars) self.bound_tvars = self.bind_class_type_variables_in_symbol_table(defn.info) def unbind_class_type_vars(self) -> None: """ Unbind the active class' type vars and rebind the type vars of the previously active class. """ disable_typevars(self.bound_tvars) self.bound_tvars = self.tvar_stack.pop() if self.bound_tvars: enable_typevars(self.bound_tvars) def analyze_class_decorator(self, defn: ClassDef, decorator: Node) -> None: decorator.accept(self) def setup_is_builtinclass(self, defn: ClassDef): for decorator in defn.decorators: if refers_to_fullname(decorator, 'typing.builtinclass'): defn.is_builtinclass = True if defn.fullname == 'builtins.object': # Only 'object' is marked as a built-in class, as otherwise things elsewhere # would break. We need a better way of dealing with built-in classes. defn.is_builtinclass = True def calculate_abstract_status(self, typ: TypeInfo) -> None: """Calculate abstract status of a class. Set is_abstract of the type to True if the type has an unimplemented abstract attribute. Also compute a list of abstract attributes. """ concrete = set() # type: Set[str] abstract = [] # type: List[str] for base in typ.mro: for name, symnode in base.names.items(): node = symnode.node if isinstance(node, OverloadedFuncDef): # Unwrap an overloaded function definition. We can just # check arbitrarily the first overload item. If the # different items have a different abstract status, there # should be an error reported elsewhere. func = node.items[0] # type: Node else: func = node if isinstance(func, Decorator): fdef = func.func if fdef.is_abstract and name not in concrete: typ.is_abstract = True abstract.append(name) concrete.add(name) typ.abstract_attributes = sorted(abstract) def setup_type_promotion(self, defn: ClassDef) -> None: """Setup extra, ad-hoc subtyping relationships between classes (promotion). This includes things like 'int' being compatible with 'float'. """ promote_target = None # type: Type for decorator in defn.decorators: if isinstance(decorator, CallExpr): analyzed = decorator.analyzed if isinstance(analyzed, PromoteExpr): # _promote class decorator (undocumented faeture). promote_target = analyzed.type if not promote_target: promotions = (TYPE_PROMOTIONS_PYTHON3 if self.pyversion[0] >= 3 else TYPE_PROMOTIONS_PYTHON2) if defn.fullname in promotions: promote_target = self.named_type_or_none(promotions[defn.fullname]) defn.info._promote = promote_target def clean_up_bases_and_infer_type_variables(self, defn: ClassDef) -> None: """Remove extra base classes such as Generic and infer type vars. For example, consider this class: . class Foo(Bar, Generic[T]): ... Now we will remove Generic[T] from bases of Foo and infer that the type variable 'T' is a type argument of Foo. Note that this is performed *before* semantic analysis. """ removed = [] # type: List[int] type_vars = [] # type: List[TypeVarDef] for i, base_expr in enumerate(defn.base_type_exprs): try: base = expr_to_unanalyzed_type(base_expr) except TypeTranslationError: # This error will be caught later. continue tvars = self.analyze_typevar_declaration(base) if tvars is not None: if type_vars: self.fail('Duplicate Generic in bases', defn) removed.append(i) for j, (name, tvar_expr) in enumerate(tvars): type_vars.append(TypeVarDef(name, j + 1, tvar_expr.values, self.object_type(), tvar_expr.variance)) if type_vars: defn.type_vars = type_vars if defn.info: defn.info.type_vars = [tv.name for tv in type_vars] for i in reversed(removed): del defn.base_type_exprs[i] def analyze_typevar_declaration(self, t: Type) -> List[Tuple[str, TypeVarExpr]]: if not isinstance(t, UnboundType): return None unbound = cast(UnboundType, t) sym = self.lookup_qualified(unbound.name, unbound) if sym is None: return None if sym.node.fullname() == 'typing.Generic': tvars = [] # type: List[Tuple[str, TypeVarExpr]] for arg in unbound.args: tvar = self.analyze_unbound_tvar(arg) if tvar: tvars.append(tvar) else: self.fail('Free type variable expected in %s[...]' % sym.node.name(), t) return tvars return None def analyze_unbound_tvar(self, t: Type) -> Tuple[str, TypeVarExpr]: if not isinstance(t, UnboundType): return None unbound = cast(UnboundType, t) sym = self.lookup_qualified(unbound.name, unbound) if sym is not None and sym.kind == UNBOUND_TVAR: return unbound.name, cast(TypeVarExpr, sym.node) return None def setup_class_def_analysis(self, defn: ClassDef) -> None: """Prepare for the analysis of a class definition.""" if not defn.info: defn.info = TypeInfo(SymbolTable(), defn) defn.info._fullname = defn.info.name() if self.is_func_scope() or self.type: kind = MDEF if self.is_func_scope(): kind = LDEF self.add_symbol(defn.name, SymbolTableNode(kind, defn.info), defn) def analyze_base_classes(self, defn: ClassDef) -> None: """Analyze and set up base classes.""" for base_expr in defn.base_type_exprs: # The base class is originallly an expression; convert it to a type. try: base = self.expr_to_analyzed_type(base_expr) except TypeTranslationError: self.fail('Invalid base class', base_expr) return if isinstance(base, TupleType): if defn.info.tuple_type: self.fail("Class has two incompatible bases derived from tuple", defn) defn.info.tuple_type = base base = base.fallback if (not self.is_stub_file and not defn.info.is_named_tuple and base.type.fullname() == 'builtins.tuple'): self.fail("Tuple[...] not supported as a base class outside a stub file", defn) if isinstance(base, Instance) or isinstance(base, TupleType): defn.base_types.append(base) elif not isinstance(base, UnboundType): self.fail('Invalid base class', base_expr) if isinstance(base, Instance): defn.info.is_enum = base.type.fullname() == 'enum.Enum' # Add 'object' as implicit base if there is no other base class. if (not defn.base_types and defn.fullname != 'builtins.object'): obj = self.object_type() defn.base_types.insert(0, obj) defn.info.bases = defn.base_types if not self.verify_base_classes(defn): return try: defn.info.calculate_mro() except MroError: self.fail("Cannot determine consistent method resolution order " '(MRO) for "%s"' % defn.name, defn) else: # If there are cyclic imports, we may be missing 'object' in # the MRO. Fix MRO if needed. if defn.info.mro[-1].fullname() != 'builtins.object': defn.info.mro.append(self.object_type().type) def expr_to_analyzed_type(self, expr: Node) -> Type: if isinstance(expr, CallExpr): expr.accept(self) info = self.check_namedtuple(expr) if info is None: # Some form of namedtuple is the only valid type that looks like a call # expression. This isn't a valid type. raise TypeTranslationError() fallback = Instance(info, []) return TupleType(info.tuple_type.items, fallback=fallback) typ = expr_to_unanalyzed_type(expr) return self.anal_type(typ) def verify_base_classes(self, defn: ClassDef) -> bool: info = defn.info for base in info.bases: baseinfo = base.type if self.is_base_class(info, baseinfo): self.fail('Cycle in inheritance hierarchy', defn) # Clear bases to forcefully get rid of the cycle. info.bases = [] if baseinfo.fullname() == 'builtins.bool': self.fail("'%s' is not a valid base class" % baseinfo.name(), defn) return False dup = find_duplicate(info.direct_base_classes()) if dup: self.fail('Duplicate base class "%s"' % dup.name(), defn) return False return True def is_base_class(self, t: TypeInfo, s: TypeInfo) -> bool: """Determine if t is a base class of s (but do not use mro).""" # Search the base class graph for t, starting from s. worklist = [s] visited = {s} while worklist: nxt = worklist.pop() if nxt == t: return True for base in nxt.bases: if base.type not in visited: worklist.append(base.type) visited.add(base.type) return False def analyze_metaclass(self, defn: ClassDef) -> None: if defn.metaclass: sym = self.lookup_qualified(defn.metaclass, defn) if sym is not None and not isinstance(sym.node, TypeInfo): self.fail("Invalid metaclass '%s'" % defn.metaclass, defn) def object_type(self) -> Instance: return self.named_type('__builtins__.object') def named_type(self, qualified_name: str, args: List[Type] = None) -> Instance: sym = self.lookup_qualified(qualified_name, None) return Instance(cast(TypeInfo, sym.node), args or []) def named_type_or_none(self, qualified_name: str) -> Instance: sym = self.lookup_fully_qualified_or_none(qualified_name) if not sym: return None return Instance(cast(TypeInfo, sym.node), []) def is_instance_type(self, t: Type) -> bool: return isinstance(t, Instance) def bind_class_type_variables_in_symbol_table( self, info: TypeInfo) -> List[SymbolTableNode]: vars = info.type_vars nodes = [] # type: List[SymbolTableNode] for index, var in enumerate(vars, 1): node = self.bind_type_var(var, index, info) nodes.append(node) return nodes def visit_import(self, i: Import) -> None: for id, as_id in i.ids: if as_id != id: self.add_module_symbol(id, as_id, i) else: base = id.split('.')[0] self.add_module_symbol(base, base, i) def add_module_symbol(self, id: str, as_id: str, context: Context) -> None: if id in self.modules: m = self.modules[id] self.add_symbol(as_id, SymbolTableNode(MODULE_REF, m, self.cur_mod_id), context) else: self.add_unknown_symbol(as_id, context) def visit_import_from(self, i: ImportFrom) -> None: i_id = self.correct_relative_import(i) if i_id in self.modules: m = self.modules[i_id] for id, as_id in i.names: node = m.names.get(id, None) if node: node = self.normalize_type_alias(node, i) if not node: return symbol = SymbolTableNode(node.kind, node.node, self.cur_mod_id, node.type_override) self.add_symbol(as_id, symbol, i) else: message = "Module has no attribute '{}'".format(id) extra = self.undefined_name_extra_info('{}.{}'.format(i_id, id)) if extra: message += " {}".format(extra) self.fail(message, i) else: for id, as_id in i.names: self.add_unknown_symbol(as_id, i) def normalize_type_alias(self, node: SymbolTableNode, ctx: Context) -> SymbolTableNode: if node.fullname in type_aliases: # Node refers to an aliased type such as typing.List; normalize. node = self.lookup_qualified(type_aliases[node.fullname], ctx) return node def correct_relative_import(self, node: Union[ImportFrom, ImportAll]) -> str: if node.relative == 0: return node.id parts = self.cur_mod_id.split(".") cur_mod_id = self.cur_mod_id rel = node.relative if self.cur_mod_node.is_package_init_file(): rel -= 1 if len(parts) < rel: self.fail("Relative import climbs too many namespaces", node) if rel != 0: cur_mod_id = ".".join(parts[:-rel]) return cur_mod_id + (("." + node.id) if node.id else "") def visit_import_all(self, i: ImportAll) -> None: i_id = self.correct_relative_import(i) if i_id in self.modules: m = self.modules[i_id] for name, node in m.names.items(): node = self.normalize_type_alias(node, i) if not name.startswith('_'): self.add_symbol(name, SymbolTableNode(node.kind, node.node, self.cur_mod_id), i) else: # Don't add any dummy symbols for 'from x import *' if 'x' is unknown. pass def add_unknown_symbol(self, name: str, context: Context) -> None: var = Var(name) var._fullname = self.qualified_name(name) var.is_ready = True var.type = AnyType() self.add_symbol(name, SymbolTableNode(GDEF, var, self.cur_mod_id), context) # # Statements # def visit_block(self, b: Block) -> None: if b.is_unreachable: return self.block_depth[-1] += 1 for s in b.body: s.accept(self) self.block_depth[-1] -= 1 def visit_block_maybe(self, b: Block) -> None: if b: self.visit_block(b) def anal_type(self, t: Type, allow_tuple_literal: bool = False) -> Type: if t: if allow_tuple_literal: # Types such as (t1, t2, ...) only allowed in assignment statements. They'll # generate errors elsewhere, and Tuple[t1, t2, ...] must be used instead. if isinstance(t, TupleType): # Unlike TypeAnalyser, also allow implicit tuple types (without Tuple[...]). star_count = sum(1 for item in t.items if isinstance(item, StarType)) if star_count > 1: self.fail('At most one star type allowed in a tuple', t) return None items = [self.anal_type(item, True) for item in t.items] return TupleType(items, self.builtin_type('builtins.tuple'), t.line) a = TypeAnalyser(self.lookup_qualified, self.lookup_fully_qualified, self.fail) return t.accept(a) else: return None def visit_assignment_stmt(self, s: AssignmentStmt) -> None: for lval in s.lvalues: self.analyze_lvalue(lval, explicit_type=s.type is not None) s.rvalue.accept(self) if s.type: allow_tuple_literal = isinstance(s.lvalues[-1], (TupleExpr, ListExpr)) s.type = self.anal_type(s.type, allow_tuple_literal) else: # For simple assignments, allow binding type aliases. if (s.type is None and len(s.lvalues) == 1 and isinstance(s.lvalues[0], NameExpr)): res = analyze_type_alias(s.rvalue, self.lookup_qualified, self.lookup_fully_qualified, self.fail) if res and (not isinstance(res, Instance) or cast(Instance, res).args): # TODO: What if this gets reassigned? name = cast(NameExpr, s.lvalues[0]) node = self.lookup(name.name, name) node.kind = TYPE_ALIAS node.type_override = res if isinstance(s.rvalue, IndexExpr): s.rvalue.analyzed = TypeAliasExpr(res) if s.type: # Store type into nodes. for lvalue in s.lvalues: self.store_declared_types(lvalue, s.type) self.check_and_set_up_type_alias(s) self.process_typevar_declaration(s) self.process_namedtuple_definition(s) def check_and_set_up_type_alias(self, s: AssignmentStmt) -> None: """Check if assignment creates a type alias and set it up as needed.""" # For now, type aliases only work at the top level of a module. if (len(s.lvalues) == 1 and not self.is_func_scope() and not self.type and not s.type): lvalue = s.lvalues[0] if isinstance(lvalue, NameExpr): if not lvalue.is_def: # Only a definition can create a type alias, not regular assignment. return rvalue = s.rvalue if isinstance(rvalue, RefExpr): node = rvalue.node if isinstance(node, TypeInfo): # TODO: We should record the fact that this is a variable # that refers to a type, rather than making this # just an alias for the type. self.globals[lvalue.name].node = node def analyze_lvalue(self, lval: Node, nested: bool = False, add_global: bool = False, explicit_type: bool = False) -> None: """Analyze an lvalue or assignment target. Only if add_global is True, add name to globals table. If nested is true, the lvalue is within a tuple or list lvalue expression. """ if isinstance(lval, NameExpr): nested_global = (not self.is_func_scope() and self.block_depth[-1] > 0 and not self.type) if (add_global or nested_global) and lval.name not in self.globals: # Define new global name. v = Var(lval.name) v._fullname = self.qualified_name(lval.name) v.is_ready = False # Type not inferred yet lval.node = v lval.is_def = True lval.kind = GDEF lval.fullname = v._fullname self.globals[lval.name] = SymbolTableNode(GDEF, v, self.cur_mod_id) elif isinstance(lval.node, Var) and lval.is_def: # Since the is_def flag is set, this must have been analyzed # already in the first pass and added to the symbol table. v = cast(Var, lval.node) assert v.name() in self.globals elif (self.is_func_scope() and lval.name not in self.locals[-1] and lval.name not in self.global_decls[-1] and lval.name not in self.nonlocal_decls[-1]): # Define new local name. v = Var(lval.name) lval.node = v lval.is_def = True lval.kind = LDEF lval.fullname = lval.name self.add_local(v, lval) elif not self.is_func_scope() and (self.type and lval.name not in self.type.names): # Define a new attribute within class body. v = Var(lval.name) v.info = self.type v.is_initialized_in_class = True lval.node = v lval.is_def = True lval.kind = MDEF lval.fullname = lval.name self.type.names[lval.name] = SymbolTableNode(MDEF, v) else: # Bind to an existing name. if explicit_type: self.name_already_defined(lval.name, lval) lval.accept(self) self.check_lvalue_validity(lval.node, lval) elif isinstance(lval, MemberExpr): if not add_global: self.analyze_member_lvalue(lval) if explicit_type and not self.is_self_member_ref(lval): self.fail('Type cannot be declared in assignment to non-self ' 'attribute', lval) elif isinstance(lval, IndexExpr): if explicit_type: self.fail('Unexpected type declaration', lval) if not add_global: lval.accept(self) elif (isinstance(lval, TupleExpr) or isinstance(lval, ListExpr)): items = cast(Any, lval).items if len(items) == 0 and isinstance(lval, TupleExpr): self.fail("Can't assign to ()", lval) self.analyze_tuple_or_list_lvalue(cast(Union[ListExpr, TupleExpr], lval), add_global, explicit_type) elif isinstance(lval, StarExpr): if nested: self.analyze_lvalue(lval.expr, nested, add_global, explicit_type) else: self.fail('Starred assignment target must be in a list or tuple', lval) else: self.fail('Invalid assignment target', lval) def analyze_tuple_or_list_lvalue(self, lval: Union[ListExpr, TupleExpr], add_global: bool = False, explicit_type: bool = False) -> None: """Analyze an lvalue or assignment target that is a list or tuple.""" items = lval.items star_exprs = [cast(StarExpr, item) for item in items if isinstance(item, StarExpr)] if len(star_exprs) > 1: self.fail('Two starred expressions in assignment', lval) else: if len(star_exprs) == 1: star_exprs[0].valid = True for i in items: self.analyze_lvalue(i, nested=True, add_global=add_global, explicit_type = explicit_type) def analyze_member_lvalue(self, lval: MemberExpr) -> None: lval.accept(self) if (self.is_self_member_ref(lval) and self.type.get(lval.name) is None): # Implicit attribute definition in __init__. lval.is_def = True v = Var(lval.name) v.info = self.type v.is_ready = False lval.def_var = v lval.node = v self.type.names[lval.name] = SymbolTableNode(MDEF, v) self.check_lvalue_validity(lval.node, lval) def is_self_member_ref(self, memberexpr: MemberExpr) -> bool: """Does memberexpr to refer to an attribute of self?""" if not isinstance(memberexpr.expr, NameExpr): return False node = (cast(NameExpr, memberexpr.expr)).node return isinstance(node, Var) and (cast(Var, node)).is_self def check_lvalue_validity(self, node: Node, ctx: Context) -> None: if isinstance(node, (FuncDef, TypeInfo, TypeVarExpr)): self.fail('Invalid assignment target', ctx) def store_declared_types(self, lvalue: Node, typ: Type) -> None: if isinstance(typ, StarType) and not isinstance(lvalue, StarExpr): self.fail('Star type only allowed for starred expressions', lvalue) if isinstance(lvalue, RefExpr): lvalue.is_def = False if isinstance(lvalue.node, Var): var = cast(Var, lvalue.node) var.type = typ var.is_ready = True # If node is not a variable, we'll catch it elsewhere. elif isinstance(lvalue, TupleExpr): if isinstance(typ, TupleType): if len(lvalue.items) != len(typ.items): self.fail('Incompatible number of tuple items', lvalue) return for item, itemtype in zip(lvalue.items, typ.items): self.store_declared_types(item, itemtype) else: self.fail('Tuple type expected for multiple variables', lvalue) elif isinstance(lvalue, StarExpr): if isinstance(typ, StarType): self.store_declared_types(lvalue.expr, typ.type) else: self.fail('Star type expected for starred expression', lvalue) else: # This has been flagged elsewhere as an error, so just ignore here. pass def process_typevar_declaration(self, s: AssignmentStmt) -> None: """Check if s declares a TypeVar; it yes, store it in symbol table.""" call = self.get_typevar_declaration(s) if not call: return lvalue = cast(NameExpr, s.lvalues[0]) name = lvalue.name if not lvalue.is_def: if s.type: self.fail("Cannot declare the type of a type variable", s) else: self.fail("Cannot redefine '%s' as a type variable" % name, s) return if not self.check_typevar_name(call, name, s): return # Constraining types n_values = call.arg_kinds[1:].count(ARG_POS) values = self.analyze_types(call.args[1:1 + n_values]) variance = self.process_typevar_parameters(call.args[1 + n_values:], call.arg_names[1 + n_values:], call.arg_kinds[1 + n_values:], s) if variance is None: return # Yes, it's a valid type variable definition! Add it to the symbol table. node = self.lookup(name, s) node.kind = UNBOUND_TVAR TypeVar = TypeVarExpr(name, node.fullname, values, variance) TypeVar.line = call.line call.analyzed = TypeVar node.node = TypeVar def check_typevar_name(self, call: CallExpr, name: str, context: Context) -> bool: if len(call.args) < 1: self.fail("Too few arguments for TypeVar()", context) return False if not isinstance(call.args[0], StrExpr) or not call.arg_kinds[0] == ARG_POS: self.fail("TypeVar() expects a string literal as first argument", context) return False if cast(StrExpr, call.args[0]).value != name: self.fail("Unexpected TypeVar() argument value", context) return False return True def get_typevar_declaration(self, s: AssignmentStmt) -> Optional[CallExpr]: """Returns the TypeVar() call expression if `s` is a type var declaration or None otherwise. """ if len(s.lvalues) != 1 or not isinstance(s.lvalues[0], NameExpr): return None if not isinstance(s.rvalue, CallExpr): return None call = cast(CallExpr, s.rvalue) if not isinstance(call.callee, RefExpr): return None callee = cast(RefExpr, call.callee) if callee.fullname != 'typing.TypeVar': return None return call def process_typevar_parameters(self, args: List[Node], names: List[Optional[str]], kinds: List[int], context: Context) -> Optional[int]: covariant = False contravariant = False for param_value, param_name, param_kind in zip(args, names, kinds): if not param_kind == ARG_NAMED: self.fail("Unexpected argument to TypeVar()", context) return None if param_name == 'covariant': if isinstance(param_value, NameExpr): if param_value.name == 'True': covariant = True else: self.fail("TypeVar 'covariant' may only be 'True'", context) return None else: self.fail("TypeVar 'covariant' may only be 'True'", context) return None elif param_name == 'contravariant': if isinstance(param_value, NameExpr): if param_value.name == 'True': contravariant = True else: self.fail("TypeVar 'contravariant' may only be 'True'", context) return None else: self.fail("TypeVar 'contravariant' may only be 'True'", context) return None elif param_name == 'bound': self.fail("TypeVar 'bound' argument not supported yet", context) return None elif param_name == 'values': # Probably using obsolete syntax with values=(...). Explain the current syntax. self.fail("TypeVar 'values' argument not supported", context) self.fail("Use TypeVar('T', t, ...) instead of TypeVar('T', values=(t, ...))", context) return None else: self.fail("Unexpected argument to TypeVar(): {}".format(param_name), context) return None if covariant and contravariant: self.fail("TypeVar cannot be both covariant and contravariant", context) return None elif covariant: return COVARIANT elif contravariant: return CONTRAVARIANT else: return INVARIANT def process_namedtuple_definition(self, s: AssignmentStmt) -> None: """Check if s defines a namedtuple; if yes, store the definition in symbol table.""" if len(s.lvalues) != 1 or not isinstance(s.lvalues[0], NameExpr): return named_tuple = self.check_namedtuple(s.rvalue) if named_tuple is None: return # Yes, it's a valid namedtuple definition. Add it to the symbol table. lvalue = cast(NameExpr, s.lvalues[0]) name = lvalue.name node = self.lookup(name, s) node.kind = GDEF # TODO locally defined namedtuple # TODO call.analyzed node.node = named_tuple def check_namedtuple(self, node: Node) -> TypeInfo: """Check if a call defines a namedtuple. If it does, return the corresponding TypeInfo. Return None otherwise. If the definition is invalid but looks like a namedtuple, report errors but return (some) TypeInfo. """ if not isinstance(node, CallExpr): return None call = cast(CallExpr, node) if not isinstance(call.callee, RefExpr): return None callee = cast(RefExpr, call.callee) fullname = callee.fullname if fullname not in ('collections.namedtuple', 'typing.NamedTuple'): return None items, types = self.parse_namedtuple_args(call, fullname) if not items: # Error. Construct dummy return value. return self.build_namedtuple_typeinfo('namedtuple', [], []) else: name = cast(StrExpr, call.args[0]).value info = self.build_namedtuple_typeinfo(name, items, types) call.analyzed = NamedTupleExpr(info).set_line(call.line) return info def parse_namedtuple_args(self, call: CallExpr, fullname: str) -> Tuple[List[str], List[Type]]: # TODO Share code with check_argument_count in checkexpr.py? args = call.args if len(args) < 2: return self.fail_namedtuple_arg("Too few arguments for namedtuple()", call) if len(args) > 2: return self.fail_namedtuple_arg("Too many arguments for namedtuple()", call) if call.arg_kinds != [ARG_POS, ARG_POS]: return self.fail_namedtuple_arg("Unexpected arguments to namedtuple()", call) if not isinstance(args[0], StrExpr): return self.fail_namedtuple_arg( "namedtuple() expects a string literal as the first argument", call) types = [] # type: List[Type] if not isinstance(args[1], ListExpr): if fullname == 'collections.namedtuple' and isinstance(args[1], StrExpr): str_expr = cast(StrExpr, args[1]) items = str_expr.value.split() else: return self.fail_namedtuple_arg( "List literal expected as the second argument to namedtuple()", call) else: listexpr = cast(ListExpr, args[1]) if fullname == 'collections.namedtuple': # The fields argument contains just names, with implicit Any types. if any(not isinstance(item, StrExpr) for item in listexpr.items): return self.fail_namedtuple_arg("String literal expected as namedtuple() item", call) items = [cast(StrExpr, item).value for item in listexpr.items] else: # The fields argument contains (name, type) tuples. items, types = self.parse_namedtuple_fields_with_types(listexpr.items, call) if not types: types = [AnyType() for _ in items] return items, types def parse_namedtuple_fields_with_types(self, nodes: List[Node], context: Context) -> Tuple[List[str], List[Type]]: items = [] # type: List[str] types = [] # type: List[Type] for item in nodes: if isinstance(item, TupleExpr): if len(item.items) != 2: return self.fail_namedtuple_arg("Invalid NamedTuple field definition", item) name, type_node = item.items if isinstance(name, StrExpr): items.append(name.value) else: return self.fail_namedtuple_arg("Invalid NamedTuple() field name", item) try: type = expr_to_unanalyzed_type(type_node) except TypeTranslationError: return self.fail_namedtuple_arg('Invalid field type', type_node) types.append(self.anal_type(type)) else: return self.fail_namedtuple_arg("Tuple expected as NamedTuple() field", item) return items, types def fail_namedtuple_arg(self, message: str, context: Context) -> Tuple[List[str], List[Type]]: self.fail(message, context) return [], [] def build_namedtuple_typeinfo(self, name: str, items: List[str], types: List[Type]) -> TypeInfo: symbols = SymbolTable() class_def = ClassDef(name, Block([])) class_def.fullname = self.qualified_name(name) info = TypeInfo(symbols, class_def) # Add named tuple items as attributes. # TODO: Make them read-only. for item, typ in zip(items, types): var = Var(item) var.info = info var.type = typ symbols[item] = SymbolTableNode(MDEF, var) # Add a __init__ method. init = self.make_namedtuple_init(info, items, types) symbols['__init__'] = SymbolTableNode(MDEF, init) info.tuple_type = TupleType(types, self.named_type('__builtins__.tuple', [AnyType()])) info.is_named_tuple = True info.mro = [info] + info.tuple_type.fallback.type.mro info.bases = [info.tuple_type.fallback] return info def make_namedtuple_init(self, info: TypeInfo, items: List[str], types: List[Type]) -> FuncDef: args = [Var(item) for item in items] for arg, type in zip(args, types): arg.type = type # TODO: Make sure that the self argument name is not visible? args = [Var('__self')] + args arg_kinds = [ARG_POS] * (len(items) + 1) signature = CallableType([cast(Type, None)] + types, arg_kinds, ['__self'] + items, NoneTyp(), self.named_type('__builtins__.function'), name=info.name()) return FuncDef('__init__', args, arg_kinds, [None] * (len(items) + 1), Block([]), typ=signature) def analyze_types(self, items: List[Node]) -> List[Type]: result = [] # type: List[Type] for node in items: try: result.append(self.anal_type(expr_to_unanalyzed_type(node))) except TypeTranslationError: self.fail('Type expected', node) result.append(AnyType()) return result def visit_decorator(self, dec: Decorator) -> None: for d in dec.decorators: d.accept(self) removed = [] # type: List[int] no_type_check = False for i, d in enumerate(dec.decorators): if refers_to_fullname(d, 'abc.abstractmethod'): removed.append(i) dec.func.is_abstract = True self.check_decorated_function_is_method('abstractmethod', dec) elif refers_to_fullname(d, 'asyncio.tasks.coroutine'): removed.append(i) dec.func.is_coroutine = True elif refers_to_fullname(d, 'builtins.staticmethod'): removed.append(i) dec.func.is_static = True dec.var.is_staticmethod = True self.check_decorated_function_is_method('staticmethod', dec) elif refers_to_fullname(d, 'builtins.classmethod'): removed.append(i) dec.func.is_class = True dec.var.is_classmethod = True self.check_decorated_function_is_method('classmethod', dec) elif refers_to_fullname(d, 'builtins.property'): removed.append(i) dec.func.is_property = True dec.var.is_property = True self.check_decorated_function_is_method('property', dec) if len(dec.func.args) > 1: self.fail('Too many arguments', dec.func) elif refers_to_fullname(d, 'typing.no_type_check'): dec.var.type = AnyType() no_type_check = True for i in reversed(removed): del dec.decorators[i] if not dec.is_overload or dec.var.is_property: if self.is_func_scope(): self.add_symbol(dec.var.name(), SymbolTableNode(LDEF, dec), dec) elif self.type: dec.var.info = self.type dec.var.is_initialized_in_class = True self.add_symbol(dec.var.name(), SymbolTableNode(MDEF, dec), dec) if dec.decorators and dec.var.is_property: self.fail('Decorated property not supported', dec) if not no_type_check: dec.func.accept(self) if not dec.decorators and not dec.var.is_property: # No non-special decorators left. We can trivially infer the type # of the function here. dec.var.type = dec.func.type def check_decorated_function_is_method(self, decorator: str, context: Context) -> None: if not self.type or self.is_func_scope(): self.fail("'%s' used with a non-method" % decorator, context) def visit_expression_stmt(self, s: ExpressionStmt) -> None: s.expr.accept(self) def visit_return_stmt(self, s: ReturnStmt) -> None: if not self.is_func_scope(): self.fail("'return' outside function", s) if s.expr: s.expr.accept(self) def visit_raise_stmt(self, s: RaiseStmt) -> None: if s.expr: s.expr.accept(self) if s.from_expr: s.from_expr.accept(self) def visit_yield_stmt(self, s: YieldStmt) -> None: if not self.is_func_scope(): self.fail("'yield' outside function", s) else: self.function_stack[-1].is_generator = True if s.expr: s.expr.accept(self) def visit_yield_from_stmt(self, s: YieldFromStmt) -> None: if not self.is_func_scope(): self.fail("'yield from' outside function", s) if s.expr: s.expr.accept(self) def visit_assert_stmt(self, s: AssertStmt) -> None: if s.expr: s.expr.accept(self) def visit_operator_assignment_stmt(self, s: OperatorAssignmentStmt) -> None: s.lvalue.accept(self) s.rvalue.accept(self) def visit_while_stmt(self, s: WhileStmt) -> None: s.expr.accept(self) self.loop_depth += 1 s.body.accept(self) self.loop_depth -= 1 self.visit_block_maybe(s.else_body) def visit_for_stmt(self, s: ForStmt) -> None: s.expr.accept(self) # Bind index variables and check if they define new names. self.analyze_lvalue(s.index) self.loop_depth += 1 self.visit_block(s.body) self.loop_depth -= 1 self.visit_block_maybe(s.else_body) def visit_break_stmt(self, s: BreakStmt) -> None: if self.loop_depth == 0: self.fail("'break' outside loop", s) def visit_continue_stmt(self, s: ContinueStmt) -> None: if self.loop_depth == 0: self.fail("'continue' outside loop", s) def visit_if_stmt(self, s: IfStmt) -> None: infer_reachability_of_if_statement(s, pyversion=self.pyversion) for i in range(len(s.expr)): s.expr[i].accept(self) self.visit_block(s.body[i]) self.visit_block_maybe(s.else_body) def visit_try_stmt(self, s: TryStmt) -> None: self.analyze_try_stmt(s, self) def analyze_try_stmt(self, s: TryStmt, visitor: NodeVisitor, add_global: bool = False) -> None: s.body.accept(visitor) for type, var, handler in zip(s.types, s.vars, s.handlers): if type: type.accept(visitor) if var: self.analyze_lvalue(var, add_global=add_global) handler.accept(visitor) if s.else_body: s.else_body.accept(visitor) if s.finally_body: s.finally_body.accept(visitor) def visit_with_stmt(self, s: WithStmt) -> None: for e in s.expr: e.accept(self) for n in s.target: if n: self.analyze_lvalue(n) self.visit_block(s.body) def visit_del_stmt(self, s: DelStmt) -> None: s.expr.accept(self) if not isinstance(s.expr, (IndexExpr, NameExpr, MemberExpr)): self.fail('Invalid delete target', s) def visit_global_decl(self, g: GlobalDecl) -> None: for name in g.names: if name in self.nonlocal_decls[-1]: self.fail("Name '{}' is nonlocal and global".format(name), g) self.global_decls[-1].add(name) def visit_nonlocal_decl(self, d: NonlocalDecl) -> None: if not self.is_func_scope(): self.fail("nonlocal declaration not allowed at module level", d) else: for name in d.names: for table in reversed(self.locals[:-1]): if table is not None and name in table: break else: self.fail("No binding for nonlocal '{}' found".format(name), d) if self.locals[-1] is not None and name in self.locals[-1]: self.fail("Name '{}' is already defined in local " "scope before nonlocal declaration".format(name), d) if name in self.global_decls[-1]: self.fail("Name '{}' is nonlocal and global".format(name), d) self.nonlocal_decls[-1].add(name) def visit_print_stmt(self, s: PrintStmt) -> None: for arg in s.args: arg.accept(self) if s.target: s.target.accept(self) def visit_exec_stmt(self, s: ExecStmt) -> None: s.expr.accept(self) if s.variables1: s.variables1.accept(self) if s.variables2: s.variables2.accept(self) # # Expressions # def visit_name_expr(self, expr: NameExpr) -> None: n = self.lookup(expr.name, expr) if n: if n.kind == BOUND_TVAR: self.fail("'{}' is a type variable and only valid in type " "context".format(expr.name), expr) else: expr.kind = n.kind expr.node = (cast(Node, n.node)) expr.fullname = n.fullname def visit_super_expr(self, expr: SuperExpr) -> None: if not self.type: self.fail('"super" used outside class', expr) return expr.info = self.type def visit_tuple_expr(self, expr: TupleExpr) -> None: for item in expr.items: item.accept(self) def visit_list_expr(self, expr: ListExpr) -> None: for item in expr.items: item.accept(self) def visit_set_expr(self, expr: SetExpr) -> None: for item in expr.items: item.accept(self) def visit_dict_expr(self, expr: DictExpr) -> None: for key, value in expr.items: key.accept(self) value.accept(self) def visit_star_expr(self, expr: StarExpr) -> None: if not expr.valid: self.fail('Can use starred expression only as assignment target', expr) else: expr.expr.accept(self) def visit_yield_from_expr(self, e: YieldFromExpr) -> None: if not self.is_func_scope(): # not sure self.fail("'yield from' outside function", e) if e.expr: e.expr.accept(self) def visit_call_expr(self, expr: CallExpr) -> None: """Analyze a call expression. Some call expressions are recognized as special forms, including cast(...) and Any(...). """ expr.callee.accept(self) if refers_to_fullname(expr.callee, 'typing.cast'): # Special form cast(...). if not self.check_fixed_args(expr, 2, 'cast'): return # Translate first argument to an unanalyzed type. try: target = expr_to_unanalyzed_type(expr.args[0]) except TypeTranslationError: self.fail('Cast target is not a type', expr) return # Piggyback CastExpr object to the CallExpr object; it takes # precedence over the CallExpr semantics. expr.analyzed = CastExpr(expr.args[1], target) expr.analyzed.line = expr.line expr.analyzed.accept(self) elif refers_to_fullname(expr.callee, 'typing.Any'): # Special form Any(...). if not self.check_fixed_args(expr, 1, 'Any'): return expr.analyzed = CastExpr(expr.args[0], AnyType()) expr.analyzed.line = expr.line expr.analyzed.accept(self) elif refers_to_fullname(expr.callee, 'typing._promote'): # Special form _promote(...). if not self.check_fixed_args(expr, 1, '_promote'): return # Translate first argument to an unanalyzed type. try: target = expr_to_unanalyzed_type(expr.args[0]) except TypeTranslationError: self.fail('Argument 1 to _promote is not a type', expr) return expr.analyzed = PromoteExpr(target) expr.analyzed.line = expr.line expr.analyzed.accept(self) else: # Normal call expression. for a in expr.args: a.accept(self) def check_fixed_args(self, expr: CallExpr, numargs: int, name: str) -> bool: """Verify that expr has specified number of positional args. Return True if the arguments are valid. """ s = 's' if numargs == 1: s = '' if len(expr.args) != numargs: self.fail("'%s' expects %d argument%s" % (name, numargs, s), expr) return False if expr.arg_kinds != [ARG_POS] * numargs: self.fail("'%s' must be called with %s positional argument%s" % (name, numargs, s), expr) return False return True def visit_member_expr(self, expr: MemberExpr) -> None: base = expr.expr base.accept(self) # Bind references to module attributes. if isinstance(base, RefExpr) and cast(RefExpr, base).kind == MODULE_REF: file = cast(MypyFile, cast(RefExpr, base).node) names = file.names n = names.get(expr.name, None) if n: n = self.normalize_type_alias(n, expr) if not n: return expr.kind = n.kind expr.fullname = n.fullname expr.node = n.node else: # We only catch some errors here; the rest will be # catched during type checking. # # This way we can report a larger number of errors in # one type checker run. If we reported errors here, # the build would terminate after semantic analysis # and we wouldn't be able to report any type errors. full_name = '%s.%s' % (file.fullname(), expr.name) if full_name in obsolete_name_mapping: self.fail("Module has no attribute %r (it's now called %r)" % ( expr.name, obsolete_name_mapping[full_name]), expr) def visit_op_expr(self, expr: OpExpr) -> None: expr.left.accept(self) expr.right.accept(self) def visit_comparison_expr(self, expr: ComparisonExpr) -> None: for operand in expr.operands: operand.accept(self) def visit_unary_expr(self, expr: UnaryExpr) -> None: expr.expr.accept(self) def visit_index_expr(self, expr: IndexExpr) -> None: expr.base.accept(self) if refers_to_class_or_function(expr.base): # Special form -- type application. # Translate index to an unanalyzed type. types = [] # type: List[Type] if isinstance(expr.index, TupleExpr): items = (cast(TupleExpr, expr.index)).items else: items = [expr.index] for item in items: try: typearg = expr_to_unanalyzed_type(item) except TypeTranslationError: self.fail('Type expected within [...]', expr) return typearg = self.anal_type(typearg) types.append(typearg) expr.analyzed = TypeApplication(expr.base, types) expr.analyzed.line = expr.line else: expr.index.accept(self) def visit_slice_expr(self, expr: SliceExpr) -> None: if expr.begin_index: expr.begin_index.accept(self) if expr.end_index: expr.end_index.accept(self) if expr.stride: expr.stride.accept(self) def visit_cast_expr(self, expr: CastExpr) -> None: expr.expr.accept(self) expr.type = self.anal_type(expr.type) def visit_type_application(self, expr: TypeApplication) -> None: expr.expr.accept(self) for i in range(len(expr.types)): expr.types[i] = self.anal_type(expr.types[i]) def visit_list_comprehension(self, expr: ListComprehension) -> None: expr.generator.accept(self) def visit_set_comprehension(self, expr: SetComprehension) -> None: expr.generator.accept(self) def visit_dictionary_comprehension(self, expr: DictionaryComprehension) -> None: self.enter() self.analyze_comp_for(expr) expr.key.accept(self) expr.value.accept(self) self.leave() def visit_generator_expr(self, expr: GeneratorExpr) -> None: self.enter() self.analyze_comp_for(expr) expr.left_expr.accept(self) self.leave() def analyze_comp_for(self, expr: Union[GeneratorExpr, DictionaryComprehension]) -> None: """Analyses the 'comp_for' part of comprehensions. That is the part after 'for' in (x for x in l if p) """ for index, sequence, conditions in zip(expr.indices, expr.sequences, expr.condlists): sequence.accept(self) # Bind index variables. self.analyze_lvalue(index) for cond in conditions: cond.accept(self) def visit_func_expr(self, expr: FuncExpr) -> None: self.analyze_function(expr) def visit_conditional_expr(self, expr: ConditionalExpr) -> None: expr.if_expr.accept(self) expr.cond.accept(self) expr.else_expr.accept(self) def visit__promote_expr(self, expr: PromoteExpr) -> None: expr.type = self.anal_type(expr.type) def visit_yield_expr(self, expr: YieldExpr) -> None: expr.expr.accept(self) # # Helpers # def lookup(self, name: str, ctx: Context) -> SymbolTableNode: """Look up an unqualified name in all active namespaces.""" # 1a. Name declared using 'global x' takes precedence if name in self.global_decls[-1]: if name in self.globals: return self.globals[name] else: self.name_not_defined(name, ctx) return None # 1b. Name declared using 'nonlocal x' takes precedence if name in self.nonlocal_decls[-1]: for table in reversed(self.locals[:-1]): if table is not None and name in table: return table[name] else: self.name_not_defined(name, ctx) return None # 2. Class attributes (if within class definition) if self.is_class_scope() and name in self.type.names: return self.type[name] # 3. Local (function) scopes for table in reversed(self.locals): if table is not None and name in table: return table[name] # 4. Current file global scope if name in self.globals: return self.globals[name] # 5. Builtins b = self.globals.get('__builtins__', None) if b: table = cast(MypyFile, b.node).names if name in table: if name[0] == "_" and name[1] != "_": self.name_not_defined(name, ctx) return None node = table[name] return node # Give up. self.name_not_defined(name, ctx) self.check_for_obsolete_short_name(name, ctx) return None def check_for_obsolete_short_name(self, name: str, ctx: Context) -> None: matches = [obsolete_name for obsolete_name in obsolete_name_mapping if obsolete_name.rsplit('.', 1)[-1] == name] if len(matches) == 1: self.fail("(Did you mean '{}'?)".format(obsolete_name_mapping[matches[0]]), ctx) def lookup_qualified(self, name: str, ctx: Context) -> SymbolTableNode: if '.' not in name: return self.lookup(name, ctx) else: parts = name.split('.') n = self.lookup(parts[0], ctx) # type: SymbolTableNode if n: for i in range(1, len(parts)): if isinstance(n.node, TypeInfo): result = cast(TypeInfo, n.node).get(parts[i]) if not result: # Fall back to direct lookup from the class. This can be important # when we have a forward reference of a nested class that is being # bound before the outer class has been fully semantically analyzed. # # A better approach would be to introduce a new analysis pass or # to move things around between passes, but this unblocks a common # use case even though this is a little limited in case there is # inheritance involved, for example. result = cast(TypeInfo, n.node).names.get(parts[i]) n = result elif isinstance(n.node, MypyFile): n = cast(MypyFile, n.node).names.get(parts[i], None) if not n: self.name_not_defined(name, ctx) break if n: n = self.normalize_type_alias(n, ctx) return n def builtin_type(self, fully_qualified_name: str) -> Instance: node = self.lookup_fully_qualified(fully_qualified_name) info = cast(TypeInfo, node.node) return Instance(info, []) def lookup_fully_qualified(self, name: str) -> SymbolTableNode: """Lookup a fully qualified name. Assume that the name is defined. This happens in the global namespace -- the local module namespace is ignored. """ assert '.' in name parts = name.split('.') n = self.modules[parts[0]] for i in range(1, len(parts) - 1): n = cast(MypyFile, n.names[parts[i]].node) return n.names[parts[-1]] def lookup_fully_qualified_or_none(self, name: str) -> SymbolTableNode: """Lookup a fully qualified name. Assume that the name is defined. This happens in the global namespace -- the local module namespace is ignored. """ assert '.' in name parts = name.split('.') n = self.modules[parts[0]] for i in range(1, len(parts) - 1): next_sym = n.names.get(parts[i]) if not next_sym: return None n = cast(MypyFile, next_sym.node) return n.names.get(parts[-1]) def qualified_name(self, n: str) -> str: return self.cur_mod_id + '.' + n def enter(self) -> None: self.locals.append(SymbolTable()) self.global_decls.append(set()) self.nonlocal_decls.append(set()) def leave(self) -> None: self.locals.pop() self.global_decls.pop() self.nonlocal_decls.pop() def is_func_scope(self) -> bool: return self.locals[-1] is not None def is_class_scope(self) -> bool: return self.type is not None and not self.is_func_scope() def add_symbol(self, name: str, node: SymbolTableNode, context: Context) -> None: if self.is_func_scope(): if name in self.locals[-1]: # Flag redefinition unless this is a reimport of a module. if not (node.kind == MODULE_REF and self.locals[-1][name].node == node.node): self.name_already_defined(name, context) self.locals[-1][name] = node elif self.type: self.type.names[name] = node else: existing = self.globals.get(name) if existing and (not isinstance(node.node, MypyFile) or existing.node != node.node): # Modules can be imported multiple times to support import # of multiple submodules of a package (e.g. a.x and a.y). if not (existing.type and node.type and is_same_type(existing.type, node.type)): # Only report an error if the symbol collision provides a different type. self.name_already_defined(name, context) self.globals[name] = node def add_var(self, v: Var, ctx: Context) -> None: if self.is_func_scope(): self.add_local(v, ctx) else: self.globals[v.name()] = SymbolTableNode(GDEF, v, self.cur_mod_id) v._fullname = self.qualified_name(v.name()) def add_local(self, v: Var, ctx: Context) -> None: if v.name() in self.locals[-1]: self.name_already_defined(v.name(), ctx) v._fullname = v.name() self.locals[-1][v.name()] = SymbolTableNode(LDEF, v) def add_local_func(self, defn: FuncBase, ctx: Context) -> None: # TODO combine with above if defn.name() in self.locals[-1]: self.name_already_defined(defn.name(), ctx) self.locals[-1][defn.name()] = SymbolTableNode(LDEF, defn) def check_no_global(self, n: str, ctx: Context, is_func: bool = False) -> None: if n in self.globals: if is_func and isinstance(self.globals[n].node, FuncDef): self.fail(("Name '{}' already defined (overload variants " "must be next to each other)").format(n), ctx) else: self.name_already_defined(n, ctx) def name_not_defined(self, name: str, ctx: Context) -> None: message = "Name '{}' is not defined".format(name) extra = self.undefined_name_extra_info(name) if extra: message += ' {}'.format(extra) self.fail(message, ctx) def name_already_defined(self, name: str, ctx: Context) -> None: self.fail("Name '{}' already defined".format(name), ctx) def fail(self, msg: str, ctx: Context) -> None: self.errors.report(ctx.get_line(), msg) def undefined_name_extra_info(self, fullname: str) -> Optional[str]: if fullname in obsolete_name_mapping: return "(it's now called '{}')".format(obsolete_name_mapping[fullname]) else: return None class FirstPass(NodeVisitor): """First phase of semantic analysis. See docstring of 'analyze' for a description of what this does. """ def __init__(self, sem: SemanticAnalyzer) -> None: self.sem = sem self.pyversion = sem.pyversion def analyze(self, file: MypyFile, fnam: str, mod_id: str) -> None: """Perform the first analysis pass. Resolve the full names of definitions not nested within functions and construct type info structures, but do not resolve inter-definition references such as base classes. Also add implicit definitions such as __name__. """ sem = self.sem sem.cur_mod_id = mod_id sem.errors.set_file(fnam) sem.globals = SymbolTable() sem.global_decls = [set()] sem.nonlocal_decls = [set()] sem.block_depth = [0] defs = file.defs # Add implicit definitions of module '__name__' etc. for name, t in implicit_module_attrs.items(): v = Var(name, UnboundType(t)) v._fullname = self.sem.qualified_name(name) self.sem.globals[name] = SymbolTableNode(GDEF, v, self.sem.cur_mod_id) for d in defs: d.accept(self) # Add implicit definition of 'None' to builtins, as we cannot define a # variable with a None type explicitly. if mod_id == 'builtins': v = Var('None', NoneTyp()) v._fullname = self.sem.qualified_name('None') self.sem.globals['None'] = SymbolTableNode(GDEF, v, self.sem.cur_mod_id) def visit_block(self, b: Block) -> None: if b.is_unreachable: return self.sem.block_depth[-1] += 1 for node in b.body: node.accept(self) self.sem.block_depth[-1] -= 1 def visit_assignment_stmt(self, s: AssignmentStmt) -> None: for lval in s.lvalues: self.sem.analyze_lvalue(lval, add_global=True, explicit_type=s.type is not None) def visit_func_def(self, d: FuncDef) -> None: sem = self.sem d.is_conditional = sem.block_depth[-1] > 0 if d.name() in sem.globals: n = sem.globals[d.name()].node if sem.is_conditional_func(n, d): # Conditional function definition -- multiple defs are ok. d.original_def = cast(FuncDef, n) else: sem.check_no_global(d.name(), d, True) d._fullname = sem.qualified_name(d.name()) sem.globals[d.name()] = SymbolTableNode(GDEF, d, sem.cur_mod_id) def visit_overloaded_func_def(self, d: OverloadedFuncDef) -> None: self.sem.check_no_global(d.name(), d) d._fullname = self.sem.qualified_name(d.name()) self.sem.globals[d.name()] = SymbolTableNode(GDEF, d, self.sem.cur_mod_id) def visit_class_def(self, d: ClassDef) -> None: self.sem.check_no_global(d.name, d) d.fullname = self.sem.qualified_name(d.name) info = TypeInfo(SymbolTable(), d) info.set_line(d.line) d.info = info self.sem.globals[d.name] = SymbolTableNode(GDEF, info, self.sem.cur_mod_id) self.process_nested_classes(d) def process_nested_classes(self, outer_def: ClassDef) -> None: for node in outer_def.defs.body: if isinstance(node, ClassDef): node.info = TypeInfo(SymbolTable(), node) node.info._fullname = node.info.name() symbol = SymbolTableNode(MDEF, node.info) outer_def.info.names[node.name] = symbol self.process_nested_classes(node) def visit_for_stmt(self, s: ForStmt) -> None: self.sem.analyze_lvalue(s.index, add_global=True) def visit_with_stmt(self, s: WithStmt) -> None: for n in s.target: if n: self.sem.analyze_lvalue(n, add_global=True) def visit_decorator(self, d: Decorator) -> None: d.var._fullname = self.sem.qualified_name(d.var.name()) self.sem.add_symbol(d.var.name(), SymbolTableNode(GDEF, d.var), d) def visit_if_stmt(self, s: IfStmt) -> None: infer_reachability_of_if_statement(s, pyversion=self.pyversion) for node in s.body: node.accept(self) if s.else_body: s.else_body.accept(self) def visit_try_stmt(self, s: TryStmt) -> None: self.sem.analyze_try_stmt(s, self, add_global=True) class ThirdPass(TraverserVisitor[None]): """The third and final pass of semantic analysis. Check type argument counts and values of generic types. """ def __init__(self, errors: Errors) -> None: self.errors = errors def visit_file(self, file_node: MypyFile, fnam: str) -> None: self.errors.set_file(fnam) file_node.accept(self) def visit_func_def(self, fdef: FuncDef) -> None: self.errors.push_function(fdef.name()) self.analyze(fdef.type) super().visit_func_def(fdef) self.errors.pop_function() def visit_class_def(self, tdef: ClassDef) -> None: for type in tdef.info.bases: self.analyze(type) super().visit_class_def(tdef) def visit_assignment_stmt(self, s: AssignmentStmt) -> None: self.analyze(s.type) super().visit_assignment_stmt(s) def visit_cast_expr(self, e: CastExpr) -> None: self.analyze(e.type) super().visit_cast_expr(e) def visit_type_application(self, e: TypeApplication) -> None: for type in e.types: self.analyze(type) super().visit_type_application(e) def analyze(self, type: Type) -> None: if type: analyzer = TypeAnalyserPass3(self.fail) type.accept(analyzer) def fail(self, msg: str, ctx: Context) -> None: self.errors.report(ctx.get_line(), msg) def self_type(typ: TypeInfo) -> Union[Instance, TupleType]: """For a non-generic type, return instance type representing the type. For a generic G type with parameters T1, .., Tn, return G[T1, ..., Tn]. """ tv = [] # type: List[Type] for i in range(len(typ.type_vars)): tv.append(TypeVarType(typ.type_vars[i], i + 1, typ.defn.type_vars[i].values, typ.defn.type_vars[i].upper_bound, typ.defn.type_vars[i].variance)) inst = Instance(typ, tv) if typ.tuple_type is None: return inst else: return TupleType(typ.tuple_type.items, inst) def replace_implicit_first_type(sig: FunctionLike, new: Type) -> FunctionLike: if isinstance(sig, CallableType): return replace_leading_arg_type(sig, new) else: sig = cast(Overloaded, sig) return Overloaded([cast(CallableType, replace_implicit_first_type(i, new)) for i in sig.items()]) def set_callable_name(sig: Type, fdef: FuncDef) -> Type: if isinstance(sig, FunctionLike): if fdef.info: return sig.with_name( '"{}" of "{}"'.format(fdef.name(), fdef.info.name())) else: return sig.with_name('"{}"'.format(fdef.name())) else: return sig def refers_to_fullname(node: Node, fullname: str) -> bool: """Is node a name or member expression with the given full name?""" return isinstance(node, RefExpr) and cast(RefExpr, node).fullname == fullname def refers_to_class_or_function(node: Node) -> bool: """Does semantically analyzed node refer to a class?""" return (isinstance(node, RefExpr) and isinstance(cast(RefExpr, node).node, (TypeInfo, FuncDef, OverloadedFuncDef))) def find_duplicate(list: List[T]) -> T: """If the list has duplicates, return one of the duplicates. Otherwise, return None. """ for i in range(1, len(list)): if list[i] in list[:i]: return list[i] return None def disable_typevars(nodes: List[SymbolTableNode]) -> None: for node in nodes: assert node.kind in (BOUND_TVAR, UNBOUND_TVAR) node.kind = UNBOUND_TVAR def enable_typevars(nodes: List[SymbolTableNode]) -> None: for node in nodes: assert node.kind in (BOUND_TVAR, UNBOUND_TVAR) node.kind = BOUND_TVAR def remove_imported_names_from_symtable(names: SymbolTable, module: str) -> None: """Remove all imported names from the symbol table of a module.""" removed = [] # type: List[str] for name, node in names.items(): fullname = node.node.fullname() prefix = fullname[:fullname.rfind('.')] if prefix != module: removed.append(name) for name in removed: del names[name] def infer_reachability_of_if_statement(s: IfStmt, pyversion: Tuple[int, int]) -> None: for i in range(len(s.expr)): result = infer_if_condition_value(s.expr[i], pyversion) if result == ALWAYS_FALSE: # The condition is always false, so we skip the if/elif body. mark_block_unreachable(s.body[i]) elif result == ALWAYS_TRUE: # This condition is always true, so all of the remaining # elif/else bodies will never be executed. for body in s.body[i + 1:]: mark_block_unreachable(s.body[i]) if s.else_body: mark_block_unreachable(s.else_body) break def infer_if_condition_value(expr: Node, pyversion: Tuple[int, int]) -> int: """Infer whether if condition is always true/false. Return ALWAYS_TRUE if always true, ALWAYS_FALSE if always false, and TRUTH_VALUE_UNKNOWN otherwise. """ name = '' negated = False alias = expr if isinstance(alias, UnaryExpr): if alias.op == 'not': expr = alias.expr negated = True if isinstance(expr, NameExpr): name = expr.name elif isinstance(expr, MemberExpr): name = expr.name result = TRUTH_VALUE_UNKNOWN if name == 'PY2': result = ALWAYS_TRUE if pyversion[0] == 2 else ALWAYS_FALSE elif name == 'PY3': result = ALWAYS_TRUE if pyversion[0] == 3 else ALWAYS_FALSE elif name == 'MYPY': result = ALWAYS_TRUE if negated: if result == ALWAYS_TRUE: result = ALWAYS_FALSE elif result == ALWAYS_FALSE: result = ALWAYS_TRUE return result def mark_block_unreachable(block: Block) -> None: block.is_unreachable = True block.accept(MarkImportsUnreachableVisitor()) class MarkImportsUnreachableVisitor(TraverserVisitor): """Visitor that flags all imports nested within a node as unreachable.""" def visit_import(self, node: Import) -> None: node.is_unreachable = True def visit_import_from(self, node: ImportFrom) -> None: node.is_unreachable = True def visit_import_all(self, node: ImportAll) -> None: node.is_unreachable = True ``` #### File: data/fixtures/alias.py ```python class object: def __init__(self) -> None: pass class type: def __init__(self, x) -> None: pass class int: pass class str: pass class function: pass bytes = str ``` #### File: data/fixtures/transform.py ```python class object: def __init__(self) -> None: pass class type: pass # str is handy for debugging; allows outputting messages. class str: pass # Primitive types int/float have special coercion behaviour (they may have # a different representation from ordinary values). class int: pass class float: pass # The functions below are special functions used in test cases; their # implementations are actually in the __dynchk module, but they are defined # here so that the semantic analyzer and the type checker are happy without # having to analyze the entire __dynchk module all the time. # # The transformation implementation has special case handling for these # functions; it's a bit ugly but it works for now. def __print(a1=None, a2=None, a3=None, a4=None): # Do not use *args since this would require list and break many test # cases. pass ``` #### File: mypy/pinfer/test_pinfer3.py ```python import unittest import pinfer # Include all of the shared unit tests from test_pinfer import TestInfer class TestInfer3(unittest.TestCase): def test_infer_keyword_only_args(self): # decorators break the parsing def f(x, *, y=0): pass f = pinfer.infer_signature(f) f(1, y='x') self.assert_infer_state( 'def f(x: int, *, y: str = 0) -> None') def f(*, x=None, y=None): pass f = pinfer.infer_signature(f) f(y='x') self.assert_infer_state( 'def f(*, x: None = None, y: str = None) -> None') def assert_infer_state(self, expected): state = pinfer.format_state() self.assertEqual(state, expected) pinfer.reset() if __name__ == '__main__': unittest.main() ```
{ "source": "jhancock1975/GEL", "score": 2 }
#### File: jhancock1975/GEL/cpir_gel(beta).py ```python import numpy as np import pandas as pd import itertools from functools import partial from scipy.linalg import block_diag def get_diag_index(d_, l): idx = d_[d_.Class == d_.Class.value_counts().index[l]].index return idx def row_feature_rep(rows_, features_): r_1 = rows_.mean(axis=1).values f_1 = features_.mean(axis=0).values r_0 = 1 - r_1 f_0 = 1 - f_1 f = np.array([f_0, f_1]) r = np.array([r_0, r_1]) Q_ = np.matmul(f.transpose(), r) return Q_ def get_upper_Fs(d_, ccm, i): ret = row_feature_rep(rows_= d_[d_.Class == ccm.cj[i]] .drop("Class", axis=1), features_=d_[(d_.Class == ccm.ci[i]) | (d_.Class == ccm.cj[i])] .drop("Class", axis=1)) return ret def get_lower_Fs(d_, ccm, i): ret = row_feature_rep(rows_ = d_[d_.Class == ccm.ci[i]] .drop("Class", axis=1), features_ = d_[(d_.Class == ccm.ci[i]) | (d_.Class == ccm.cj[i])] .drop("Class", axis=1)) return ret def get_diag(d_, diag_idx_, i): ret = row_feature_rep(rows_ = d_.iloc[diag_idx_[i]] .drop("Class", axis=1), features_ = d_.iloc[diag_idx_[i]] .drop("Class", axis=1)) return ret def makeMat(k_, which_diag, ccm, d_, Fs, D_): if which_diag == 'upper': l = ccm[ccm.ci == d_.Class.value_counts().index[k_]].index else: l = ccm[ccm.cj == d_.Class.value_counts().index[k_]].index if (len(l) == 0 and which_diag == 'upper'): Q_ = D_[len(d_.Class.unique()) - 1] elif (len(l) == 0 and which_diag == 'lower'): Q_ = D_[0] else: q = [Fs[l] for l in l.values] q_ = np.concatenate(q, axis=1) if (which_diag == 'upper'): Q_ = np.concatenate([D_[k_], q_], axis=1) else: Q_ = np.concatenate([q_, D_[k_]], axis=1) if (Q_.shape[1] == d_.shape[0]): Q = Q_ else: if (which_diag == 'upper'): Q = np.concatenate([np.zeros((d_.shape[1] - 1, d_.shape[0] - Q_.shape[1])), Q_], axis=1) else: Q = np.concatenate([Q_, np.zeros((d_.shape[1] - 1, d_.shape[0] - Q_.shape[1]))], axis=1) return Q def cpir_gel(source_data_, k = 10, learning_method = "unsupervised", class_var = None): ''' Args: source_data_: a one-hot encoded dataframe k: number of eigenvectors to use for new embedding, if 'max' dim(source_data_) = dim(emb) learning_method: 'unsupervised' indicates no class label, otherwise 'supervised' Returns: emb: new embedded space mb: one-hot data source_data_: original data frame ''' if learning_method == 'supervised': source_data_ = source_data_.rename(columns = {class_var: "Class"}) mb_ = source_data_.drop("Class", axis = 1) mb_['Class'] = pd.Categorical(source_data_.Class, categories=source_data_.Class.value_counts() .keys() .tolist(), ordered=True) mb = mb_.sort_values(by='Class') class_combs = pd.DataFrame( list(itertools.combinations( mb.Class.value_counts().index, 2)), columns=['ci', 'cj'] ) # diag_idx = map(partial(get_diag_index, mb), range(len(mb.Class.unique()))) diag_idx = [get_diag_index(mb, l) for l in range(len(mb.Class.unique()))] D = [get_diag(mb, diag_idx, x) for x in range(len(mb.Class.unique()))] upper_Fs = [get_upper_Fs(mb, class_combs, x) for x in range(len(class_combs))] lower_Fs = [get_lower_Fs(mb, class_combs, x) for x in range(len(class_combs))] upper_block = np.concatenate( list(map(partial(makeMat, which_diag="upper", ccm=class_combs, d_=mb, Fs=upper_Fs, D_=D), range(len(diag_idx)))) ) lower_block = np.concatenate( list(map(partial(makeMat, which_diag="lower", ccm=class_combs, d_=mb, Fs=lower_Fs, D_=D), range(len(diag_idx))))) b = block_diag(*map(lambda x: np.full(D[x].shape, .5), range(len(D)))) b[b == 0] = 1 A = block_diag(*map(lambda x: np.full((mb.Class.value_counts().values[x], mb.Class.value_counts().values[x]), mb.Class.value_counts(normalize=True).values[x]), range(len(D))) ) Q_ = (upper_block + lower_block) * b Q = np.matmul(Q_.transpose(), Q_) S_ = np.matmul(np.divide(Q, np.max(Q)) * A, mb.drop("Class", axis=1).values) U, s, V = np.linalg.svd(S_) else: mb = source_data_ u = row_feature_rep(rows_= mb, features_= mb) Q = np.matmul(u.transpose(), u) S_ = np.matmul(np.divide(Q, np.max(Q)), mb.values) U, s, V = np.linalg.svd(S_) if k == 'max': v_t = V.transpose() else: v_t = V.transpose()[:, 0:k] if learning_method == 'supervised': emb = np.matmul(mb.drop("Class", axis=1).values, v_t) else: emb = np.matmul(mb.values, v_t) return emb, v_t, mb, source_data_.rename(columns = {"Class" : class_var}) ```
{ "source": "jhancock1975/kaggle-titanic", "score": 3 }
#### File: kaggle-titanic/baseline/titanic-rf.py ```python import numpy as np import pandas as pd from sklearn.ensemble import RandomForestClassifier from sklearn.model_selection import train_test_split import logging import argparse from sklearn.metrics import accuracy_score import constant class Classifier(object): def __init__(self, train_csv_name, test_csv_name): """ constructor :param train_csv_name: path to, and name of training data :param test_csv_name: path to, and name of test data """ logger.debug('created %s classifier object' % self) self.train_csv_name = train_csv_name self.test_csv_name = test_csv_name logger.debug('training csv file name: %s' % train_csv_name) logger.debug('validation data file name: %s' % test_csv_name) self.trained_model=None class GenderClassifier(Classifier): def train_and_eval(self): """ uses gender as predictor for survival :return: """ df = pd.read_csv(self.train_csv_name) logger.debug("gender classifier accuracy: %s" % accuracy_score(df.Survived, df.Sex=='female')) class TitanicRf(Classifier): def clean_data(self, df): """ clean data before training, for this simple case we drop any non-numeric columns, except for the target value, and we drop any NaN values so we can train with random forest :param df: :return: dataframe with cleaned data """ for column_name in df.columns: if not np.issubdtype(df[column_name], np.number) and (column_name != 'Survived'): df = df.drop([column_name], axis=1) df.dropna(inplace=True) return df def train_and_eval(self): """ trains and tests a random forest classifier, for use as the baseline classifier for this project :return: """ logger.debug('starting model fitting') train_csv = pd.read_csv(self.train_csv_name) train_csv = self.clean_data(train_csv) X = train_csv.drop(['Survived'], axis=1) # the ...values.ravel() is to suppress warning # titanic-rf.py:67: DataConversionWarning: A column-vector y was passed when a 1d array # was expected. Please change the shape of y to (n_samples,), for example using ravel(). # solution from https://stackoverflow.com/posts/36120015/revisions # StackOverflow.com user: <NAME>, # edited by StackOverflow user: <NAME> # accessed December 24th, 2018 y = train_csv[['Survived']].values.ravel() X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=0.1, random_state=constant.RANDOM_STATE) logger.debug("X_train dimensions: %s", X_train.shape) logger.debug("X_val dimensions: %s", X_val.shape) logger.debug("y_train length: %s", len(y_train)) logger.debug("y_val length: %s", len(y_val)) rf = RandomForestClassifier(random_state=constant.RANDOM_STATE, n_estimators=10) logger.debug('fitting classifier') rf.fit(X_train, y_train) self.trained_model=rf logger.debug('starting predictions') predictions = rf.predict(X_val) logger.debug("random forest accuracy: %s" % accuracy_score(y_val, predictions)) def test(self): """ evaluates accuracy of trained model on test data """ logger.debug('starting test predictions') test_csv = pd.read_csv(self.test_csv_name) test_csv = self.clean_data(test_csv) #test_csv = test_csv.concat(self.trained_model.predict,axis=1) logger.debug(test_csv.head()) # parse command line arguments # this program expects the user # to supply the file path, and name of # test and training data parser = argparse.ArgumentParser() parser.add_argument("train_data", metavar="train-data", help="path and name of csv file containing training data") parser.add_argument("test_data", metavar="test-data", help="path and name of csv file containing test data") args = parser.parse_args() # logging setup logger = logging.getLogger(__name__) c_handler = logging.StreamHandler() c_format = logging.Formatter('%(asctime)s %(name)s - %(levelname)s - %(message)s') c_handler.setFormatter(c_format) logger.addHandler(c_handler) logger.setLevel(logging.DEBUG) if __name__ == "__main__": # show all the columns when printing pd.set_option('display.max_columns', None) logger.debug('starting up') titanicRf = TitanicRf(args.train_data, args.test_data) genderClassifier = GenderClassifier(args.train_data, args.test_data) for clf in [titanicRf, genderClassifier]: clf.train_and_eval() titanicRf.test() ```
{ "source": "jhancock1975/research_experimenter", "score": 3 }
#### File: jhancock1975/research_experimenter/find_missing.py ```python import os import argparse import dill as pickle import sys import subprocess def find_missing(pickle_file, results_dir): command = subprocess.run(['squeue', '-u', f'{os.environ["USER"]}', '-h', f'-o "%j|%k" '], capture_output=True) running_jobs = [] std_out_str = command.stdout.decode('utf-8') for j in std_out_str.split('\n'): if 'mate-terminal' in j: continue if len(j.strip())==0: continue else: running_jobs.append(j.split('|')[1].replace('"', '').strip()) with open(pickle_file, 'rb') as f: d = pickle.loads(f.read()) exp_names = [] for exp_name in d.keys(): found = False for r_name in os.listdir(results_dir): # experiment name should equal file name without .json if r_name[:-5] == exp_name: found = True break for r_name in running_jobs: if r_name == exp_name: found = True break if not found: exp_names.append(exp_name) return exp_names if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument('--pickle_file', '-p' , help='experiments pickled dictionary') parser.add_argument('--results_dir', '-r', help='directory containing results files') args = parser.parse_args() if len(sys.argv)==1: parser.print_help(sys.stderr) sys.exit(1) to_run_exps = find_missing(args.pickle_file, args.results_dir) for exp_name in to_run_exps: print(exp_name) ``` #### File: jhancock1975/research_experimenter/tabulate_one_parallel_results.py ```python import numpy as np import json import copy import re import os def capitalize(s:str) -> str: """ make first letter of s upper case :param s: string :return: s with first letter capitalized """ return s[0].upper()+s[1:] def space_underscores(s): """ replace underscores with spaces separate function to keep f strings from getting too long :param s: a string :return: s with underscores replaced with spaces """ return s.replace("_", " ") def escape_underscores(s): """ escape underscores with back slashes separate function to keep f strings from getting too long :param s: a string :return: s with underscores prefixed with back slashes """ return s.replace("_", "\_") def proper_form(s:str)->str: """ return properly formed string for metric to be consistent, for example a_prc->AUPRC since in papers & other official documents we abbreviate "area under the precison recall curve" as AUPRC :parm s: any string :return: proper form of input value if we know it, the input value unchanged if not """ if s == "a_prc": return "\\ac{AUPRC}" elif s == "auc": return "\\ac{AUC}" elif s == "acc": return "accuracy" else: return s def englishify(l:object)->str: """ convert list to string separating elements by commas except for between the last two elements which are separated by the word and """ ll = copy.deepcopy(l) for i in range(len(ll)): ll[i] = proper_form(ll[i]) return ', '.join(list(map(str, ll)))[::-1].replace(',', ' dna ', 1)[::-1] def acc_from_cm(cm: object)->float: """ calculate accuracy from confusion matrix :param cm: confusion matrix :return: accuracy """ tp = cm[0][0] fp = cm[0][1] fn = cm[1][0] tn = cm[1][1] return (tp + tn)/(tp + tn + fp + fn) def format_number(x): """ return in LaTeX scientific notation if very small or very close to one :param x: number to format :return: string """ # caught this issue formatting # results from sample data assert not np.isnan(x), 'encountered NaN metric' def py_sci_to_latex(x): arr = x.split("E") return f'${arr[0]} \\times 10^{{{arr[1]}}}$' if (x < 1E-5): return py_sci_to_latex(f'{x:.5E}') else: return f'{round(x, 5):.5f}' def cross_validation_results_table(results_dict: object, metrics_list: object, exp_title=None, shorten_func=None, exp_subtitle=None, long_exp_names=False)->None: """ print mean values of results_dict in LaTeX format :param results_dict: dictionary of dictionary of results :param metrics_list: list of metrics to tabulate, currently supported: 'auc' for accuracy, 'a_prc' for area under precision recall curve, 'acc' for accuracy :return: string holding contents of LaTeX table """ if not shorten_func: # define an identity function if # shorten_func not specified def shorten_func(s): return s for clf_name, results_files in results_dict.items(): # LaTeX table boilerplate table_str = '\\bgroup' table_str += '\\begin{table}[H]\n' table_str += '\t\\centering\n' table_str += f'\t\\caption{{{exp_title if exp_title else ""}; \n' table_str += f'\tMean and standard deviations of {englishify(metrics_list)}, \n' table_str += '\t(10 iterations of 5-fold cross-validation)}\n' # wrap long exp names if long_exp_names == True: first_col = 'p{2in}' else: first_col = 'l' alignment_chars = 'c'*(2*len(metrics_list)) table_str += f'\t\\begin{{tabular}}{{{first_col}{alignment_chars}}} \\toprule\n' # table header table_str += '\tExperiment Name' for metric in metrics_list: table_str += ' & Mean & \\ac{SD}' table_str += '\\\\ \n' for metric in metrics_list: table_str += f' & {proper_form(metric)} & {proper_form(metric)}' table_str += '\\\\ \\midrule\n' # compute mean values of metrics and put them # on rows for file_name in results_files: with open(file_name, 'r') as f: j = json.loads(f.read()) metrics_dict = {} for exp_name, exp_data in j.items(): for metric in metrics_list: metrics_dict[metric] = [] for iter_num, iter_data in exp_data['results_data'].items(): for fold_num, fold_data in iter_data.items(): for metric in metrics_list: if metric == 'acc': # calculate accuracy from confusion matrix metrics_dict[metric].append(acc_from_cm(fold_data['cm'])) else: metrics_dict[metric].append(fold_data[metric]) if len(metrics_dict[metric]) != 50: # something is not right if we get more than 50 measurements in a file raise Exception(f'found results with more than 50 measurements in {file_name}') table_str += f'\t\t{space_underscores(shorten_func(exp_name))}' for metric in metrics_list: table_str += f'& {format_number(np.mean(metrics_dict[metric]))} & {format_number(np.std(metrics_dict[metric]))}' table_str += '\\\\ \\midrule\n' table_str += '\t\\end{tabular}\n' table_str += f'\t\label{{tab:mean_{metric}_for_{clf_name.replace(" ","_")}_exp}} \n' if exp_subtitle: table_str += f"\\flushleft{exp_subtitle}" table_str += '\\end{table}\n' table_str += '\\egroup' return table_str def find_max_metric_by_clf(dir_name, metric_name, clf_names, output_file_name, file_name_pattern=None): """ return list of files that has max metric by key in max_by_key_dict :param dir_name: directory of results files :param metric_name: name of metric to search for max valuee of, e.g. auc au_prc :param file_name_pattern: filter files to search, e.g. *default*none*agree* :param max_by_key_dict: keys to search for results for, for example mlp, cp, dt (classifier names :result: list of files that can be passed to merge_results.merge_dicts and then cross_validation_results_table """ if file_name_pattern: file_list = [f for f in os.listdir(dir_name) if file_name_pattern.match(f)] else: file_list = [f for f in os.listdir(dir_name)] max_by_key_dict = {clf_name: None for clf_name in clf_names} for search_key in max_by_key_dict.keys(): for f_name in file_list: with open(os.path.join(dir_name, f_name), 'r') as f: d = json.loads(f.read()) for k, v in d.items(): # we have been saving results in json file where # top level format has key that is experiment name # that is the file name without the .json extension # and the value is the results data if search_key in f_name: results_data = v['results_data'] metric_mean = np.mean([results_data[str(i)][str(j)][metric_name] for i in range(1, len(results_data) + 1) for j in range(1, len(results_data[str(i)]) + 1)]) try: if metric_mean > max_by_key_dict[search_key]['metric_mean']: max_by_key_dict[search_key]['metric_mean'] = metric_mean max_by_key_dict[search_key]['file_name'] = f_name except TypeError as te: max_by_key_dict[search_key] = {'metric_mean' :metric_mean, 'file_name': f_name} res_dict = { output_file_name: []} for k,v in max_by_key_dict.items(): res_dict[output_file_name].append(os.path.join(dir_name, v['file_name'])) return res_dict ```
{ "source": "jhancock1975/slurm_experimenter", "score": 2 }
#### File: jhancock1975/slurm_experimenter/run_exp_list.py ```python import sys import os import subprocess import argparse import json import dill as pickle # we *want* that to throw a key error if the n_jobs environment variable is not set # one_parallel_with_resume.py does some of its own multithreading; however, # we will remove all that, let it run sequentially and have multithreading # in libraries n_jobs=os.environ['n_jobs'] def run_exp(exp_name, pickle_file_name, node_name, ram=None): """ runs experiment with sbatch command TODO: how to handle requested ram? move to exp def? :param exp_name: name of experiment, should be key to experiment in pickled dictionary in file named args.pickle_file_name """ if not ram: # "Go big or go home' ram = '164G' if not os.path.isdir('./logs'): os.makedirs('./logs') with open(pickle_file_name, 'rb') as f: exp_dict = pickle.loads(f.read())[exp_name] sub_proc_arr = ['sbatch', f'--nodelist={node_name}', '-o' , f'./logs/{exp_name}.log', '--comment', exp_name, f'--cpus-per-task={n_jobs}', f'--mem={ram}'] if exp_dict['slurm options'] and len(exp_dict['slurm options']) > 0: sub_proc_arr.append(exp_dict['slurm options']) sub_proc_arr += ['-p', 'longq7-mri', f'{os.environ["SLEX_HOME"]}/one_parallel_with_resume.py', '-f' f'{pickle_file_name}', '-e', exp_name] command = subprocess.run(sub_proc_arr, capture_output=True) return json.dumps( {'stdout': command.stdout.decode('utf8'), 'stderr': command.stderr.decode('utf8')}, indent=2) if __name__ == "__main__": parser = argparse.ArgumentParser(description = 'Run slurm experiments, we find slurm sends too many jobs to the same node so we send them to all nodes uniformly, this program will create a directory named logs that will hold log files for all experiments in the same directory it is invoked from.') parser.add_argument('-e', '--exp_name_list_file', help='file of experiment names, one per line') parser.add_argument('-p', '--pickle_file_name', help='pickle file name containing experiment definitions') parser.add_argument('-n', '--node_list', help='space separated list of last two characters (digits) of nodes to run experiments on, e.g. 01 05 16', nargs='+') if len(sys.argv)==1: parser.print_help(sys.stderr) sys.exit(1) args = parser.parse_args() # list of nodes to submit jobs to base_node_name = 'nodenviv1000' node_numbers = args.node_list os.makedirs('./logs', exist_ok=True) nodes = [f'{base_node_name}{node_numbers[i]}' for i in range(len(node_numbers))] with open(args.exp_name_list_file, 'r') as f: exp_names = f.read().splitlines() i = 0 for exp_name in exp_names: i += 1 run_exp(exp_name, args.pickle_file_name, node_name=nodes[i % len(nodes)], ram='164G') ```
{ "source": "jhancuch/insiderTrades", "score": 3 }
#### File: insiderTrades/utils/formFilterDerivativeHoldings.py ```python import re def formFilterDerivativeHoldings(filing, footnoteKeywords = None, issuerKeywords = None, issuerTradingSymbol = None, rptOwnerKeywords = None): """ This function checks if any transactions on the form contain any of the keywords Parameters: filing (string): the text file of the filing footnoteKeywords (list): list containing desired keywords located in footnotes issuerKeywords (list): list containing desired keywords located in the issuer's section issuerTradingSymbol (list): list containing desired trading symbols rptOwnerKeywords (list): list containing desired keywords located in the reporting owner's section Returns: keyCount (int): value of the number of keywords found within the filing """ # This section checks if any transactions on the form contain any of the footnote keywords. For each key word identified, a value of 1 is added to the score. Any form with a # score above 1 is then examined by the rest of the script. # Extract the section of the form with the transactions and also extract the footnote section if footnoteKeywords is not None: footnoteKeywordsCount = 0 filingTransactionSection = re.search(r'<derivativeTable>.*?</derivativeTable>', filing, flags = re.MULTILINE | re.DOTALL)[0] filingFootnoteSection = re.search(r'<footnotes>.*?</footnotes>', filing, flags = re.MULTILINE | re.DOTALL)[0] # Check if a transaction has a footnote for footnote in range(1,30): footnote = str(footnote) footnoteIdText = 'footnoteId id="F' + footnote + '"' footnoteCitationIdentified = len(re.findall(footnoteIdText, filingTransactionSection, flags = re.MULTILINE | re.DOTALL)) # if a footnote is identified with the transaction, we then check the footnote section for what the footnote contains. If the footnote contains a key word, the number # is added the tempValue. # this clause is for if there are more than 1 key words if footnoteCitationIdentified > 0 and len(footnoteKeywords) > 1: footnoteKeywordsJoined = "|".join(footnoteKeywords) specificFootnote = re.search(r'<footnote id="F' + footnote + '"' + '>.*?</footnote>', filingFootnoteSection, flags = re.MULTILINE | re.DOTALL | re.IGNORECASE)[0] tempValue = len(re.findall(footnoteKeywordsJoined, specificFootnote, flags = re.MULTILINE | re.DOTALL | re.IGNORECASE)) # this clause is for if there is only 1 key word elif footnoteCitationIdentified > 0 and len(footnoteKeywords) == 1: footnoteKeywordsJoined = "".join(footnoteKeywords) specificFootnote = re.search(r'<footnote id="F' + footnote + '"' + '>.*?</footnote>', filingFootnoteSection, flags = re.MULTILINE | re.DOTALL | re.IGNORECASE)[0] tempValue = len(re.findall(footnoteKeywordsJoined, specificFootnote, flags = re.MULTILINE | re.DOTALL | re.IGNORECASE)) # If the footnote doesn't contain a key word, we set the value as zero. else: tempValue = 0 footnoteKeywordsCount += tempValue if footnoteKeywords is None: footnoteKeywordsCount = 0 # this section checks if any transaction on the form contain any of the keywords in the issuer section if issuerKeywords is not None: issuerKeywordsCount = 0 # Extract the issuer section of the form issuerSection = re.search('<issuer>.*?</issuer>', filing, flags = re.MULTILINE | re.DOTALL)[0] # we check the issuer section to see if any of our keywords appear if len(issuerKeywords) > 1: issuerKeywordsJoined = "|".join(issuerKeywords) tempValue = len(re.findall(issuerKeywordsJoined, issuerSection, flags = re.MULTILINE | re.DOTALL | re.IGNORECASE)) # this clause is for if there is only 1 key word elif len(issuerKeywords) == 1: issuerKeywordsJoined = "".join(issuerKeywords) tempValue = len(re.findall(issuerKeywordsJoined, issuerSection, flags = re.MULTILINE | re.DOTALL | re.IGNORECASE)) issuerKeywordsCount += tempValue if issuerKeywords is None: issuerKeywordsCount = 0 # this section checks if the issuer trading symbol section contains any of the chosen trading symbols if issuerTradingSymbol is not None: issuerTradingSymbolCount = 0 # Extract the issuer trading symbol section issuerTradingSection = re.search('<issuerTradingSymbol>.*?</issuerTradingSymbol>', filing, flags = re.MULTILINE | re.DOTALL)[0] # we check the issuer trading section to see if any of our keywords appear if len(issuerTradingSymbol) > 1: issuerTradingSymbolJoined = "|".join(issuerTradingSymbol) tempValue = len(re.findall(issuerTradingSymbolJoined, issuerTradingSection, flags = re.MULTILINE | re.DOTALL | re.IGNORECASE)) # this clause is for if there is only 1 key word elif len(issuerTradingSymbol) == 1: issuerTradingSymbolJoined = "".join(issuerTradingSymbol) tempValue = len(re.findall(issuerTradingSymbolJoined, issuerTradingSection, flags = re.MULTILINE | re.DOTALL | re.IGNORECASE)) issuerTradingSymbolCount += tempValue if issuerTradingSymbol is None: issuerTradingSymbolCount = 0 # this section checks if any of the rptOwners contain any of the keywords if rptOwnerKeywords is not None: rptOwnerKeywordsCount = 0 # Extract the reporting owner section rptOwnerSection = re.search('<issuer>.*?<derivativeTable>', filing, flags = re.MULTILINE | re.DOTALL)[0] # we check the reporting owner section to see if any of our keywords appear if len(rptOwnerKeywords) > 1: rptOwnerKeywordsJoined = "|".join(rptOwnerKeywords) tempValue = len(re.findall(rptOwnerKeywordsJoined, rptOwnerSection, flags = re.MULTILINE | re.DOTALL | re.IGNORECASE)) # this clause is for if there is only 1 key word elif len(rptOwnerKeywords) == 1: rptOwnerKeywordsJoined = "".join(rptOwnerKeywords) tempValue = len(re.findall(rptOwnerKeywordsJoined, rptOwnerSection, flags = re.MULTILINE | re.DOTALL | re.IGNORECASE)) rptOwnerKeywordsCount += tempValue if rptOwnerKeywords is None: rptOwnerKeywordsCount = 0 # Sum up all the keywords identified in a single form keyCount = footnoteKeywordsCount + issuerKeywordsCount + issuerTradingSymbolCount + rptOwnerKeywordsCount # Return the sum of all the keywords identified in a single form return(keyCount) ```
{ "source": "J-hanks/django-bootstrap-datepicker-plus", "score": 3 }
#### File: django-bootstrap-datepicker-plus/bootstrap_datepicker_plus/_helpers.py ```python def get_base_input(test=False): """ Return DateTimeBaseInput class from django.forms.widgets module Return _compatibility.DateTimeBaseInput class for older django versions. """ from django.forms.widgets import DateTimeBaseInput if "get_context" in dir(DateTimeBaseInput) and not test: # django version 1.11 and above base_input = DateTimeBaseInput else: # django version below 1.11 from bootstrap_datepicker_plus._compatibility import CompatibleDateTimeBaseInput base_input = CompatibleDateTimeBaseInput return base_input class DatePickerDictionary: """Keeps track of all date-picker input classes.""" _i = 0 items = dict() @classmethod def generate_id(cls): """Return a unique ID for each date-picker input class.""" cls._i += 1 return "dp_%s" % cls._i ``` #### File: django-bootstrap-datepicker-plus/tests/test_compatibility_patch.py ```python from django.test import SimpleTestCase from bootstrap_datepicker_plus._compatibility import BaseRenderer from bootstrap_datepicker_plus._helpers import get_base_input class CustomCompatibleDatePickerInput(get_base_input(True)): template_name = "myapp/custom_input/date-picker.html" class TestCompatibilityPatch(SimpleTestCase): def setUp(self): self.CompatibleDatePickerInput = get_base_input(True) self.dp_input = self.CompatibleDatePickerInput() def test_raise_on_get_template(self): self.assertRaises( NotImplementedError, lambda: BaseRenderer().get_template("test") ) def test_format_value_method(self): self.assertEqual(self.dp_input.format_value(""), None) def test_get_context(self): context = self.dp_input.get_context("input_name", "2018-04-12", {}) self.assertEqual(context["widget"]["name"], "input_name") self.assertEqual(context["widget"]["value"], "2018-04-12") def test_compatible_input_render(self): html = self.dp_input.render("input_name", "2018-04-12", {}) self.assertGreater(len(html), 0) def test_compatible_custom_input_render(self): dp_input = CustomCompatibleDatePickerInput() html = dp_input.render("input_name", "2018-04-12", {}) self.assertGreater(len(html), 0) ```
{ "source": "jhanley634/changepoint", "score": 3 }
#### File: changepoint/auto/fit_pnomial.py ```python from numpy.polynomial import Polynomial from sklearn.linear_model import LinearRegression from sklearn.metrics import mean_absolute_error from sklearn.preprocessing import PolynomialFeatures import matplotlib.pyplot as plt import numpy as np import pandas as pd def arr(a): """Input should be a 1-dimensional vector.""" return np.array(a).reshape(-1, 1) # This is a 2-D array, with single column. def pnomial(x, a=0, b=3, c=4, d=5): return a * x**3 + b * x**2 + c * x + d def get_curve(lo=0, hi=100, n_samples=1000, sigma=2e3): df = pd.DataFrame(dict(x=np.linspace(lo, hi, n_samples))) df['y'] = df.x.apply(pnomial) + sigma * np.random.standard_normal(n_samples) return df def main(): # training (fitting) df = get_curve() np_model = Polynomial.fit(df.x, df.y, 2) # np stmt 1 print(type(np_model), np_model) poly_features = PolynomialFeatures(degree=2, include_bias=False) x_poly = poly_features.fit_transform(arr(df.x)) lin_reg = LinearRegression() lin_reg.fit(x_poly, df.y) print(lin_reg.intercept_, lin_reg.coef_) # inference df = get_curve(-300, 500) np_pred = list(map(np_model, df.x)) # np stmt 2 x = arr(df.x) x = poly_features.transform(x) pred = lin_reg.predict(x) mae = mean_absolute_error(arr(df.y), pred) print("MAE:", mae) print(type(pred)) df['pred'] = np.array(pred) df['delta'] = df.y - df.pred print(df) fig, ax = plt.subplots() ax.plot(df.x, df.y, label='signal') ax.plot(df.x, pred, label='predicted') # ax.plot(df.x, np_pred, label='predicted by NP') # overwritten by pred pixels diff = max(map(abs, np_pred - pred)) assert diff < 1e-6, diff ax.legend(loc='upper left') plt.show() if __name__ == '__main__': main() ``` #### File: changepoint/ch4_covid/view_cases.py ```python from pathlib import Path import matplotlib.pyplot as plt import numpy as np import pandas as pd import streamlit as st from ch2_adjustable_detector.view_all_det import rpt_algorithms def get_covid_repo(): nyt_repo = Path(__file__ + '/../../../covid-19-data').resolve() assert nyt_repo.exists(), nyt_repo return nyt_repo class CaseDetector: """A changepoint detector for covid case rates. """ @staticmethod def _get_deltas(s: pd.Series): # cf diff(): https://numpy.org/doc/stable/reference/generated/numpy.diff.html prev = s[0] for val in s: yield float(val - prev) prev = val @classmethod def demo5(cls): kind = st.radio('kind', ['raw', 'delta']) df = pd.read_csv(get_covid_repo() / 'us.csv') daily_cases = np.array(list(cls._get_deltas(df.cases))) df['daily_cases'] = daily_cases daily_deaths = np.array(list(cls._get_deltas(df.deaths))) df['daily_deaths'] = daily_deaths rate = np.array(list(cls._get_deltas(df.daily_deaths))) df['rate'] = rate results = [] # detection for algo in rpt_algorithms: bkpt_result = algo(model='rbf').fit(daily_cases).predict(pen=10) bkpt_result += [0] * 10 # In case we miss the occasional breakpoint. d = dict(name=algo.__name__) for i in range(8): d[f'b{i}'] = bkpt_result[i] results.append(d) results = pd.DataFrame(results) st.write(results) # display fig, ax = plt.subplots() if kind == 'raw': df['date'] = pd.to_datetime(df.date, format='%Y-%m-%d').copy() ax.plot(df.date, df.cases, label='cases') ax.plot(df.date, df.deaths, label='deaths') else: df['date'] = list(range(len(df.date))) ax.plot(df.date, df.daily_cases, label='daily cases') ax.plot(df.date, df.daily_deaths, label='daily deaths') ax.plot(df.date, df.rate, label='2nd derivative') ax.legend(loc='upper right') st.pyplot(fig) st.write(df) if __name__ == '__main__': CaseDetector.demo5() ```
{ "source": "jhanley634/grid-map", "score": 2 }
#### File: grid_map/bin/show_grid.py ```python import streamlit as st def main(): st.write('hello there') if __name__ == '__main__': main() ```
{ "source": "jhanley634/mapping", "score": 3 }
#### File: us/zip/po.py ```python import pandas as pd import uszipcode.search class PostalMapper: def __init__(self): self.search = uszipcode.search.SearchEngine() self.conn = self.search.ses.connection() def get_big_cities(self, min_pop=1e5): select = '''SELECT zipcode, population, lat, lng, major_city FROM simple_zipcode WHERE population >= :min_pop ORDER BY population DESC ''' params = dict(min_pop=min_pop) df = pd.read_sql_query(select, self.conn, params=params) return df if __name__ == '__main__': print(PostalMapper().get_big_cities()) ```
{ "source": "jhanley634/problems", "score": 2 }
#### File: jutland/bin/report.py ```python from autoPyTorch import AutoNetClassification from jutland.dataset import Dataset from numpy import nan import pandas as pd import pyarrow as pa import pyarrow.parquet as pq import sklearn.datasets import sklearn.metrics import sklearn.model_selection def _get_northern_subset() -> pd.DataFrame: cache = Dataset.TMP / 'northern_subset.parquet' if not cache.exists(): df = Dataset.get_df().reset_index(drop=True) pq.write_table(pa.Table.from_pandas(df), cache) return pq.read_table(cache).to_pandas() def _train_and_test(ground_truth: pd.DataFrame): train = ground_truth.copy() train = train[train.index % 2 == 0] assert 12716 == len(train) test = ground_truth.copy() test = test[test.index % 2 == 1] test['osm_id'] = nan test.osm_id = nan return train, test def find_clusters(): ground_truth = _get_northern_subset() X = ground_truth.copy().drop(columns=['osm_id']) y = ground_truth.copy()[['osm_id']] X_train, X_test, y_train, y_test = ( sklearn.model_selection.train_test_split(X, y, random_state=1)) autonet = AutoNetClassification( 'tiny_cs', budget_type='epochs', min_budget=1, max_budget=9, num_iterations=1, log_level='debug', use_pynisher=False) res = autonet.fit(X_train=X_train, Y_train=y_train, cross_validator='k_fold', cross_validator_args={'n_splits': 3}) print(res) print('Score:', autonet.score(X_test=X_train, Y_test=y_train)) if __name__ == '__main__': find_clusters() ``` #### File: cluster/jutland/dataset.py ```python from collections import Counter from pathlib import Path import re from pandas_profiling import ProfileReport import pandas as pd import pyarrow as pa import pyarrow.parquet as pq class Dataset: TMP = Path('/tmp') SPATIAL = TMP / '3D_spatial_network.txt' @classmethod def get_df(cls) -> pd.DataFrame: """Densifies (filters) the somewhat sparse UCI roadway dataset.""" base = re.sub(r'\.txt$', '', f'{cls.SPATIAL}') cache = Path(f'{base}.parquet') if not cache.exists(): cols = 'osm_id lon lat alt' # Open Street Map ID, deg, deg, meters df = pd.read_csv(cls.SPATIAL, names=cols.split()) assert (df.alt < 135).all() # All mentioned roads are near sea level. assert (df.lat > 56.58).all() assert (df.lat < 57.76).all() assert (df.lon > 8.14).all() assert (df.lon < 11.20).all() assert 434874 == len(df), len(df) df = cls.filter_short_segments(df) # assert 405_241 == len(df), len(df) # 3 assert 388_147 == len(df), len(df) # 4 # assert 352_220 == len(df), len(df) # 6 # assert 287_331 == len(df), len(df) # 10 # assert 55_972 == len(df), len(df) # 50 df = df[df.lat > 57.55] assert 25_431 == len(df), len(df) cls.profile(df, Path(f'{base}.html')) pq.write_table(pa.Table.from_pandas(df), cache) return pq.read_table(cache).to_pandas() # Elapsed time is less than two seconds. @staticmethod def filter_short_segments(df: pd.DataFrame, k=4): """Demands that a given osm_id shall have at least K segments. So e.g. singleton "roads", containing just a single point, are discarded. """ counts = Counter(df.osm_id) small_roads = set(osm_id for osm_id, count in counts.items() if count < k) return df[~df.osm_id.isin(small_roads)] @staticmethod def profile(df: pd.DataFrame, out: Path): if not out.exists(): ProfileReport(df).to_file(out) ``` #### File: percolate/bin/subproc_streaming.py ```python from pathlib import Path from select import PIPE_BUF, select from subprocess import PIPE, Popen import datetime as dt import io import os import typer def streaming_subproc(cmd): timeout = dt.timedelta(seconds=5) with Popen(cmd, stdout=PIPE) as proc: stdout = io.TextIOWrapper(proc.stdout) fd = stdout.fileno() os.set_blocking(fd, False) buf = '' temp = 'sentinel' while proc.poll() is None and temp: select([fd], [], [], timeout.total_seconds()) temp = stdout.read(PIPE_BUF) buf += temp start, end = 0, 0 while start > -1: end = buf.index('\n', start) + 1 if end: yield buf[start:end] start = end buf = buf[start:] if buf: yield buf # Now drain the last few lines from the wrapper, until EOF. for line in stdout: yield line proc.terminate() proc.wait() def parent(): repo_top = Path(__file__ + '/../../..').resolve() os.chdir(repo_top) cmd = 'bash percolate/bin/subproc_slow_output.sh 3 1'.split() for line in streaming_subproc(cmd): line = line.rstrip('\n') print(f']] {line} [[') if __name__ == '__main__': typer.run(parent) ``` #### File: vision/find_shape/web_image.py ```python from hashlib import sha3_224 from pathlib import Path import requests class WebImage: """Offers cached access to images from the web.""" def __init__(self, url, fname='shapes.jpg', temp=Path('/tmp')): self.url = url pfx = 'img' + sha3_224(url.encode()).hexdigest()[:4] self.fspec = temp / f'{pfx}_{fname}' def image(self): if not self.fspec.exists(): resp = requests.get(self.url) resp.raise_for_status() with open(self.fspec, 'wb') as fout: fout.write(resp.content) return f'{self.fspec}' ``` #### File: loadtxt.d/rust_lib/load_2.py ```python from pathlib import Path from rust_fast import load_txt from tqdm import tqdm import numpy as np import typer def main(in_folder: Path = '/tmp/loadtxt.d'): for in_file in tqdm(sorted(in_folder.glob('*.txt')), smoothing=0.002): vals = load_txt(in_file.as_posix()) a = np.array(vals) assert (12_500,) == a.shape assert np.float64 == a.dtype if vals[0] == -0.0015479121: assert vals[1] == -0.0008777569 if __name__ == '__main__': typer.run(main) ``` #### File: web/wiki/history.py ```python from pathlib import Path import datetime as dt import os import re import subprocess from glom import glom import click import requests utc = dt.timezone.utc class HistoryScraper: def __init__(self, title: str): assert '/' not in title, title assert re.search(r'^[\w()-]+$', title), title self.page_url_prefix = f'https://en.wikipedia.org/w/rest.php/v1/page/{title}' self.title = title @staticmethod def get(url: str): headers = { 'User-Agent': 'jhanley634-history-scraper-v1' } resp = requests.get(url, headers=headers) resp.raise_for_status() return resp @staticmethod def _minor(is_minor: bool): return 'm' if is_minor else '.' author_re = re.compile(r'([\w .-]+)') @classmethod def _get_author(cls, rev: dict): name = glom(rev, 'user.name') name = cls.author_re.search(name).group(1).strip() # Sanitize. assert name, rev # Wikipedia requires a non-empty author name. pseudo_email = f"<{name.replace(' ', '_')}@wiki>" return f'{name} {pseudo_email}' # Git requires Ident + email addr, so dup it. older_re = re.compile(r'/history\?older_than=\d+$') def _get_reverse_history_ids(self): # now = int(dt.datetime.now(tz=dt.timezone.utc).timestamp()) older = f'{self.page_url_prefix}/history' while older: d = self.get(older).json() for rev in d['revisions']: minor = 'm' if rev['minor'] else ' ' yield (rev['id'], dt.datetime.fromisoformat(rev['timestamp'].removesuffix('Z')).replace(tzinfo=utc), self._get_author(rev), f"{minor} {rev['comment']}".strip() or '.') older = d.get('older') if older: assert self.older_re.search(older), older def _get_all_history_ids(self): return sorted(self._get_reverse_history_ids()) @staticmethod def _short_lines(txt: str) -> str: txt = txt.replace('\n', '\n\n======\n') # This improves `diff` output a bit. txt = txt.replace('<', '\n<') # Line break, for diff, on a deterministic subset of words. (Nothing special about "vowels".) txt = re.sub(r'\b([aeiou])', r'\n\1', txt, re.IGNORECASE) lines = [line.strip() # so `git log -p` won't append EOL red "trailing blank" notations for line in txt.split('\n')] return '\n'.join(lines) def write_versions(self, out_dir=Path('/tmp/wiki_history')): out_dir.mkdir(exist_ok=True) git_dir = out_dir / '.git' if not git_dir.exists(): cmd = f'cd {out_dir} && git init' subprocess.check_call(cmd, shell=True) assert git_dir.exists() comment_re = re.compile(r'^([()[\]\w  !#$%&*+,./:;<=>?@^~{|}–-]*)') single_quote = "'" xlate_tbl = str.maketrans(f'{single_quote}"\t\n', '.. ') for id_, stamp, author, comment in self._get_all_history_ids(): sec = int(stamp.timestamp()) stamp = stamp.strftime('%Y-%m-%dT%H:%M:%S') comment = comment.translate(xlate_tbl) out_file = out_dir / self.title out_file_id = Path(f'{out_file}-{id_}') if not out_file_id.exists(): resp = self.get(f'https://en.wikipedia.org/w/index.php?title={self.title}&oldid={id_}') with open(out_file, 'w') as fout: fout.write(self._short_lines(resp.text)) fout.write('\n') with open(out_file_id, 'w') as fout: fout.write(resp.text) fout.write('\n') os.utime(out_file_id, (sec, sec)) m = comment_re.search(comment) if comment != m.group(1): print(comment) print(m.group(1)) print('') cmd = f''' cd {out_dir} && git add {self.title} && git commit --date={stamp} --author "{author}" -m "{id_} {comment}"''' subprocess.check_call(cmd, shell=True) @click.command() @click.option('--article-url', default='Nathan_Safir') def main(article_url): prefix = 'https://en.wikipedia.org/wiki/' # We strip the prefix if present, for copy-n-paste convenience HistoryScraper(article_url.replace(prefix, '')).write_versions() if __name__ == '__main__': main() ```
{ "source": "jhanley634/pysqlgrid", "score": 2 }
#### File: pysqlgrid/pysqlgrid/pysqlgrid.py ```python import csv import datetime as dt import html import io import re import urllib.parse import flask import sqlalchemy.exc as exc import sqlalchemy.orm as orm class PySqlGrid: """Turns a SQL query into an interactive sortable data grid. All results are computed in the backend DB + web servers. """ def __init__(self, engine, query: str, sort_cols: list): self.engine = engine self.query_sorted = '{}\n order by {}'.format( query.rstrip(), ', '.join(sort_cols)) self.result = self._execute(self.query_sorted + ' limit 1000') self.col_names = self._get_metadata() def _execute(self, query): result = None sess = orm.sessionmaker(self.engine)() try: result = sess.execute(query) except exc.StatementError: # Can't reconnect until invalid transaction is rolled back # (even though `query` really did start with 'select ...') sess.rollback() result = sess.execute(query) return result def _get_metadata(self): return [col.name for col in self.result.cursor.description] def _render_html_head(self, table_name, top_text=''): dl_link = href(add_query_arg(flask.request.url, 'csv'), 'csv') return f"""<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN" "http://www.w3.org/TR/html4/strict.dtd"> <html> <head> <title>SQL datagrid - {table_name}</title> <link rel="stylesheet" type="text/css" href="/static/style.css"> </head> <body> {top_text} <p class="csv_download_link">{dl_link}</p> <pre class="query_small">{self.query_sorted}</pre> """ def render_table(self): if 'csv' in flask.request.args: return self._render_as_csv() rows = [self._render_column_heading()] for i, row in enumerate(self.result.fetchall()): row_class = 'mod%d' % (i % 3) html = ['<td class="col_{}">{}</td>'.format( self.col_names[j], self._render_element(val)) for j, val in enumerate(row)] rows.append('<tr class="{}">{}</tr>'.format( row_class, ' '.join(html))) rows.append('</table>') return self._render_html_head(self._get_tbl_name()) + '\n'.join(rows) def _render_element(self, elt): if isinstance(elt, dt.datetime): day = elt.strftime('%Y-%m-%d ') hms = elt.strftime('<span class="small_hms">%H:%M:%S</span>') return day + hms return html.escape(str(elt)) def _render_column_heading(self): cols = ['<th><span class="label">{}</span>{}</th>'.format( col_name.replace('_', ' '), self._sort_up_down(col_name)) for col_name in self.col_names] return '\n<table summary=""><tr>{}</tr>\n'.format(''.join(cols)) def _sort_up_down(self, col_name): return ' &nbsp; {}{}{}'.format( href('?sort=' + col_name + '+ASC', '&uarr;'), href('?', '&#x2612;'), href('?sort=' + col_name + '+DESC', '&darr;'), ) def _get_tbl_name(self): """Returns name of first table SELECTed from, if available.""" # We discard schema, so FROM scm.tbl becomes tbl. m = re.search(r'\bfrom\s+(\w+\.)?(\w+)', self.query_sorted, re.IGNORECASE) return m.group(2) if m else 'table' def _render_as_csv(self): fout = io.StringIO() sheet = csv.writer(fout) sheet.writerow(self.col_names) for row in self.result.fetchall(): sheet.writerow(row) fout.seek(0) # Equivalently we could return a triple: text, 200, dict return flask.Response(fout.read(), mimetype='text/plain') def href(url, txt): return "<a href='{}'>{}</a>".format(url, txt) def add_query_arg(url, query_arg): parts = list(urllib.parse.urlparse(url)) parts[4] += '&' + query_arg return urllib.parse.urlunparse(parts) ```
{ "source": "jhanley634/testing-tools", "score": 2 }
#### File: testing-tools/file_source/store_working_file_names.py ```python import argparse import os import re import sqlite3 import subprocess def store(conn, repo): ins = "insert into file_source (pathname, src, pkg) values (?,'git',?)" curs = conn.cursor() if repo == '[clean]': curs.execute("delete from file_source where src = 'git'") return os.chdir(os.path.join(repo, '.git')) # We verify it exists. os.chdir(repo) for path in backed_up_files(repo, paths_not_in_repo(repo)): curs.execute(ins, (path, repo)) def backed_up_files(repo, unsafe): for root, dirs, files in os.walk(repo): for file in files: path = os.path.join(root, file) if path not in unsafe: yield path def paths_not_in_repo(prefix_dir): '''Returns files not yet safely backed up.''' strip_slash_re = re.compile(r'/$') # e.g. 'bin/' stdout = subprocess.check_output('git status --porcelain'.split()) lines = [os.path.join(prefix_dir, strip_slash_re.sub('', line[3:])) for line in stdout.decode('utf8').split('\n') if len(line) > 0 and line[2] == ' '] return set(lines) def arg_parser(): p = argparse.ArgumentParser(description='Store backed-up filenames.') p.add_argument('db_directory', help='location of sqlite database') p.add_argument('repo_directory', nargs='?', default='[clean]', help='working dir of a git repository') return p if __name__ == '__main__': args = arg_parser().parse_args() db_file = os.path.join(args.db_directory, 'pkg_contents.sqlite') with sqlite3.connect(db_file) as conn: store(conn, args.repo_directory) ``` #### File: problem/bench/compress.py ```python import unittest def _get_chars_plus_sentinel(s): yield from s yield None # sentinel def compress_rle(s: str): """Compresses letters, e.g. AAA -> A3, leaving A alone (since A1 would be longer).""" # This is a primitive Run Length Encoding scheme. # Since digits are prohibited in the input, a compressed output token # like A24 is unambiguous, we know '4' was not in the input. ret = [] prev = None count = 0 for c in _get_chars_plus_sentinel(s): assert str(c).isalpha(), f'input must be alphabetic letters, but it contained {c}' if c != prev: if prev: ret.append(prev) if count > 1: ret.append(str(count)) count = 0 prev = c count += 1 return ''.join(ret) def _get_tokens(s): count = 0 ch = None for c in _get_chars_plus_sentinel(s): if str(c).isdigit(): # The text 'None" is not a digit. count *= 10 count += int(c) else: if ch: yield ch, count if count else 1 ch = c count = 0 def uncompress_rle(compressed: str): """Run length decoder, turns e.g. A3 into AAA""" return ''.join(ch * count for ch, count in _get_tokens(compressed)) class Compress(unittest.TestCase): def test_compress_run_length_encoder(self): msg = 'xyyyyzz' for i in range(9, 13): msg += 'j' + 'k' * i self.assertEqual('xy4z2jk9jk10jk11jk12', compress_rle(msg)) while msg: self.assertEqual(msg, uncompress_rle(compress_rle(msg))) msg = msg[1:] self.assertEqual('', uncompress_rle(compress_rle(''))) def test_compress_validates_the_input_message(self): msg = 'A24' # Digits may not appear in the input message -- this is enforced. with self.assertRaises(AssertionError): compress_rle(msg) ``` #### File: bench/db/id_gaps.py ```python from sqlalchemy import text from sqlalchemy.engine.base import Engine from uszipcode import SearchEngine class GapFinder: def __init__(self, engine: Engine): self.engine = engine def _get_ids(self): select = "SELECT zipcode FROM simple_zipcode ORDER BY 1" with self.engine.connect() as conn: for id_, in conn.execute(text(select)): yield int(id_) def _get_filtered_ids(self, k=100): """We discard most input rows, yielding every K-th ID.""" # The final ID typically is suppressed, for random number of input rows. countdown = 0 for id_ in self._get_ids(): if countdown == 0: yield id_ countdown = k countdown -= 1 def measure_gaps(self): prev = 0 for id_ in self._get_filtered_ids(): delta = id_ - prev print(delta, id_) prev = id_ if __name__ == '__main__': """example usage: $ ./id_gaps.py | cat -n | sort -nk2 """ GapFinder(SearchEngine().engine).measure_gaps() ``` #### File: problem/bench/test_bbox.py ```python import os import timeit import unittest import PIL.Image import PIL.ImageColor import PIL.ImageDraw2 import numpy as np def get_marked_image(mark_coord=None, radius=20, im_size=(200, 100)): if mark_coord is None: mark_coord = (20, 10) x0, y0 = mark_coord box = (x0, y0, x0 + radius, y0 + radius) pen = PIL.ImageDraw2.Pen('steelblue', width=9) im = PIL.Image.new('RGBA', im_size) draw = PIL.ImageDraw2.Draw(im) draw.ellipse(box, pen) return im class BboxFinder1: @staticmethod def _find_first_positive(vals, reverse=False): start = 0 sgn = 1 if reverse: start = len(vals) - 1 sgn = -1 vals = reversed(vals) for i, val in enumerate(vals): if val > 0: return start + sgn * i return start + sgn * i class BboxFinder2: @staticmethod def _find_first_positive(vals, reverse=False): nz = np.nonzero(vals)[0] if len(nz) == 0: if reverse: return len(vals) else: return 0 if reverse: return nz[-1] else: return nz[0] class BboxFinder(BboxFinder2): @classmethod def find_bbox(cls, im): pix = np.array(im.convert('L')) # one-byte greyscale col_sums = pix.sum(axis=0) row_sums = pix.sum(axis=1) assert len(col_sums) == im.width assert len(row_sums) == im.height x0 = cls._find_first_positive(col_sums) x1 = cls._find_first_positive(col_sums, reverse=True) y0 = cls._find_first_positive(row_sums) y1 = cls._find_first_positive(row_sums, reverse=True) return (x0, y0, x1, y1) class BboxFinderTest(unittest.TestCase): @staticmethod def _find1(): im = get_marked_image() return BboxFinder.find_bbox(im) def test_bbox_finder(self): im = get_marked_image() im.save(os.path.expanduser('~/Desktop/t.png')) bbox = BboxFinder.find_bbox(im) self.assertEqual((20, 10, 40, 30), bbox) # t = timeit.Timer(self._find1).autorange() elapsed = timeit.timeit(self._find1, number=1000) print(f'{elapsed:.3f}') self.assertLess(.15, elapsed) self.assertLess(elapsed, .99) ``` #### File: problem/breadcrumb/etl.py ```python import datetime import glob import json import re import sys import time from sqlalchemy import Table import sqlalchemy.ext.declarative # sys.path.append('.') import dbcred Base = sqlalchemy.ext.declarative.declarative_base() def etl_many(files): '''Extract, transform, & load a collection of JSON files.''' for file in files: m = re.search(r'(\d+)\.json$', file) assert m, file file_no = int(m.group(1)) with open(file) as fin: if file_no >= 19: etl1(file_no, json.loads(fin.read())) def etl1(file_no, d, user='2002'): '''Parse the input dictionary d.''' # print(json.dumps(d, sort_keys=True, indent=4)) assert(d['userId'] == user) d['tripId'] = re.sub('^' + user, '', d['tripId']) start, end, id = [int(d[k]) for k in 'tripStart tripEnd tripId'.split()] assert 0 <= start - id < 80, start - id # id is assigned within 80 msec six_sec = 6 * 1e3 assert 0 < d['tripPoints'][0]['timeStamp'] - start < six_sec if end != 0: assert 0 < end - d['tripPoints'][-1]['timeStamp'] < six_sec longest_journey = 6 * 3600 * 1000 # six hours assert end - start < longest_journey, end - start if end < start: assert end == 0, end # Four trips exhibit this corrupted end time. insert(file_no, start, d['tripPoints']) def insert(file_no, start, points): prev = '1970-01' # Time began in <NAME>, NJ in January 1970. trip_point = Table('trip_point', META, autoload=True, autoload_with=ENGINE) CONN.execute(trip_point.delete().where('file_no=%d' % file_no)) print('\n%d' % file_no) n = 1 t0 = time.time() expected = set('bearing edgeId lat lng' ' rpm speed timeStamp timeZone'.split()) for point in points: assert expected == point.keys() assert 0 == point['timeZone'] assert 0 <= point['bearing'] < 360, point['bearing'] assert 0 <= point['rpm'] < 4200 assert 0 <= point['speed'] < 600 stamp = iso8601(point['timeStamp']) assert prev <= stamp # Breadcrumb stamps are monotonic. if prev == stamp: # On trip 3 oversampling occurred just twice. # Trip 3's elapsed: 1786.941, 1787.892, 1789.683, 1789.918 # Trip 5 has eight oversample events, but it's frequent on 4 & 12. print('\nSuppressing closely spaced reading at', stamp) continue # Avoid trouble with unique index. prev = stamp print('.', end='') sys.stdout.flush() n += 1 CONN.execute(trip_point.insert().values( file_no=file_no, stamp=stamp, elapsed=(point['timeStamp'] - start) / 1e3, lng=point['lng'], lat=point['lat'], bearing=round10(point['bearing']), edge_id=point['edgeId'], rpm=point['rpm'], speed=point['speed'], )) CONN.execute('commit') tput = n / (time.time() - t0) print('%.1f rows/sec' % tput) def ms_to_date(msec): return datetime.datetime.utcfromtimestamp(msec / 1e3) def round10(n, k=10): return float(round(n * k)) / k def iso8601(msec): d = datetime.datetime.utcfromtimestamp(msec / 1e3) return d.strftime('%Y-%m-%d %H:%M:%S') if __name__ == '__main__': CONN, ENGINE, META = dbcred.get_cem('breadcrumb') etl_many(glob.glob('/tmp/2002/*.json')) ``` #### File: problem/charge_state/charge.py ```python from pathlib import Path import datetime as dt import re from pandas.plotting import register_matplotlib_converters import matplotlib matplotlib.use('Agg') # noqa E402 import matplotlib.pyplot as plt import pandas as pd import seaborn as sns def plot(df): register_matplotlib_converters() sns.set() fig, (ax1, ax2) = plt.subplots(2) ax1.scatter(df.stamp, df.range) ax2.scatter(df.stamp, df.odometer) fig.autofmt_xdate() # g = sns.relplot(x='stamp', y='range', kind='line', data=df) # g = sns.relplot(x='odometer', y='range', kind='line', data=df) # g.fig.autofmt_xdate() folder = Path('~/Desktop').expanduser() plt.savefig(folder / 'charge.png') def read_csv(): stamp_miles_re = re.compile( r'^<(\d{4}-\d+-\d+ \w{3} \d+:\d+)>\s*(\d+)\s+(\d+)') fspec = (Path(__file__) / '../charge.txt').resolve() rows = [] with open(fspec) as fin: for line in fin: m = stamp_miles_re.search(line) if m: stamp, odometer, range = m.groups() stamp = dt.datetime.strptime(stamp, '%Y-%m-%d %a %H:%M') rows.append(dict(stamp=stamp, odometer=int(odometer), range=int(range))) folder = Path('~/Desktop').expanduser() out_file = str(folder / 'charge.csv') columns = list(rows[0].keys()) df = pd.DataFrame(rows, columns=columns) df.to_csv(out_file, columns=columns, index=False) return df if __name__ == '__main__': plot(read_csv()) ``` #### File: problem/code_explode/import_to_pkg.py ```python from pathlib import Path import importlib import pprint import subprocess import sys import pkg_resources as pkg class ImportToPkg: def __init__(self): self.import_to_pkg = dict(self.get_import_to_pkg()) def get_pkg_names(self): for line in subprocess.check_output('conda list'.split()).decode().splitlines(): if not line.startswith('#'): # columns are: Name Version Build Channel pkg_name = line.split()[0] yield pkg_name def get_import_to_pkg(self): for name in self.get_pkg_names(): try: meta = pkg.get_distribution(name) except pkg.DistributionNotFound: continue # Skips binaries: arrow-cpp, boost-cpp, brotli, bzip2, c-ares. folder = Path(meta.egg_info) try: import_name = self._get_imports(folder / 'top_level.txt')[0].rstrip() except FileNotFoundError: continue # Skips the entrypoints-0.3 package try: importlib.import_module(import_name) sys.modules[import_name] # Verify that it actually _was_ imported. except ModuleNotFoundError: continue # Skips 'amd' from cvxopt. yield import_name, meta.project_name @classmethod def _get_imports(cls, fspec): with open(fspec) as fin: lines = fin.readlines() return sorted(lines, key=cls._underscores_to_the_end) @staticmethod def _underscores_to_the_end(s): # The '_' character is between 'Z' & 'a'. This helper moves it past 'z', # so names starting with a letter will sort earlier than underscore names. return s.replace('_', '~') if __name__ == '__main__': pprint.pprint(ImportToPkg().import_to_pkg) ``` #### File: problem/column_explorer/column_explorer.py ```python import click import sqlalchemy as sa import sqlalchemy.dialects.postgresql.base as pg_base import sqlalchemy.sql.sqltypes as sqltypes import uszipcode def get_zipcode_session(): return uszipcode.SearchEngine().ses def get_zipcode_cs(): """Returns a JDBC connect string for the zipcode database.""" # typical value: sqlite:////Users/foo/.uszipcode/simple_db.sqlite return get_zipcode_session().connection().engine.url class ColumnExplorer: def __init__(self, cs_or_engine): self.engine = sa.create_engine(cs_or_engine) def _get_table(self, table_name): kwargs = dict(autoload=True) table_short_name = table_name if '.' in table_name: schema, table_short_name = table_name.split('.') kwargs['schema'] = schema meta = sa.MetaData(bind=self.engine) return sa.Table(table_short_name, meta, **kwargs) def show_informative_columns(self, table_name): """A column is uninformative if it has a constant value, e.g. always NULL.""" tbl = self._get_table(table_name) cnt, = self.engine.execute(f'select count(*) from {table_name}').fetchone() if cnt <= 1: return # Nothing to see here, by definition there are no informative columns. for column, _ in self._get_cols(tbl): select = f'select count(distinct {column}) from {table_name}' cnt, = self.engine.execute(select).fetchone() # print(f'{cnt:8d} {column}') if cnt > 1: print(column + ',') def report(self, table_name, round_digits=3): tbl = self._get_table(table_name) stat = None non_numeric = set([ sqltypes.BLOB, sqltypes.BOOLEAN, sqltypes.CHAR, sqltypes.DATE, sqltypes.TEXT, sqltypes.VARCHAR, pg_base.CIDR, pg_base.ENUM, pg_base.INET, pg_base.INTERVAL, pg_base.TIME, pg_base.TIMESTAMP, ]) cnt, = self.engine.execute(f'select count(*) from {table_name}').fetchone() print(f'# {table_name}\n{cnt} rows, {len(tbl.c)} columns\n') for column, typ in self._get_cols(tbl): print('\n## ' + column) for agg in ['min(', 'avg(', 'max(', 'mode', 'mode count', 'nulls', 'count(distinct ']: params = {} select = f'select {agg}{column}) from {table_name}' if agg == 'avg(' and typ in non_numeric: continue if agg == 'mode': select = (f'select {column} from {table_name}' f' group by {column} order by count(*) desc limit 1') if cnt == 0: continue if agg == 'mode count': params = dict(val=stat) select = f'select count(*) from {table_name} where {column} = :val' if ((agg == 'nulls') or (agg == 'mode count' and stat is None)): select = f'select count(*) from {table_name} where {column} is null' stat, = self.engine.execute(sa.text(select), params).fetchone() if agg == 'avg(' and stat is not None: stat = round(stat, round_digits) if agg == 'nulls': pct = round(100 * stat / cnt, round_digits) stat = f'{stat} ({pct} %)' print('-', agg.replace('(', ' '), stat) print(f'\n{cnt} rows in {table_name}') def _get_cols(self, table): for col in table.columns: if type(col.type) != sqltypes.BOOLEAN: # Can't take max(B) of boolean B. yield str(col).split('.')[-1], type(col.type) @click.command() @click.option('--uri-getter', default='get_zipcode_cs') @click.option('--table', default='simple_zipcode') def main(uri_getter, table): callable = globals()[uri_getter] ce = ColumnExplorer(callable()) # ce.show_informative_columns(table) ce.report(table) if __name__ == '__main__': main() ``` #### File: problem/covid/sd_cases_deaths.py ```python import datetime as dt from altair import datum from covid.us_cases_deaths import delta, get_cases_and_deaths, get_chart, smooth import altair as alt import streamlit as st def _get_annotation(df): # https://en.wikipedia.org/wiki/Sturgis_Motorcycle_Rally rally = 1e3 * dt.datetime.strptime('2020-08-07', '%Y-%m-%d').timestamp() ten_days = 10 * 1e3 * 86400 annotation = alt.Chart(df).mark_text( align='left', baseline='middle', fontSize=20, dx=7 ).encode( x='date', y='val', text='label' ).transform_filter( (rally <= datum.date) & (datum.date < rally + ten_days) ) return annotation def main(): df = get_cases_and_deaths('us-states.csv', 'South Dakota') df['label'] = '.' st.altair_chart(get_chart(df) + _get_annotation(df)) st.altair_chart(get_chart(df, 'log') + _get_annotation(df)) delta(df) smooth(df, span=7) st.altair_chart(get_chart(df) + _get_annotation(df)) if __name__ == '__main__': main() ``` #### File: problem/incremental_row_copy/table_updater_test.py ```python from pathlib import Path import datetime as dt import os import unittest import sqlalchemy as sa import sqlalchemy.orm as orm from problem.incremental_row_copy.table_updater import TableUpdater from problem.incremental_row_copy.tbl_event_log import EventLog from problem.incremental_row_copy.tbl_event_log_copy import EventLogCopy def gen_events(engine: sa.engine.Engine, n): sess = orm.sessionmaker(bind=engine)() for i in range(int(n)): event = EventLog( stamp=dt.datetime.now(), id=i % 10, event='foo %7d' % i) sess.add(event) sess.commit() def num_rows(engine, table_name): query = f'select count(*) from {table_name}' return engine.execute(query).fetchone()[0] class TableUpdaterTest(unittest.TestCase): def setUp(self, unlink=True): db_file = Path(os.environ.get('EVENT_DB_FILE', '/tmp/event_log.db')) if unlink and db_file.exists(): db_file.unlink() # We start afresh each time. self.db_url = f'sqlite:///{db_file}' self.engine = sa.create_engine(self.db_url) tables = ( EventLog.__table__, EventLogCopy.__table__, ) meta = sa.MetaData(bind=self.engine) meta.create_all(tables=tables) def test_update(self): dest_name = EventLogCopy.__tablename__ src_name = EventLog.__tablename__ self.assertEqual(0, num_rows(self.engine, dest_name)) self.assertEqual(0, num_rows(self.engine, src_name)) upd = TableUpdater(self.engine, EventLog, EventLogCopy) gen_events(self.engine, 7) upd.update() self.assertEqual(7, num_rows(self.engine, src_name)) self.assertEqual(7, num_rows(self.engine, dest_name)) gen_events(self.engine, 2) upd.update() self.assertEqual(9, num_rows(self.engine, src_name)) self.assertEqual(9, num_rows(self.engine, dest_name)) # Now update a src event log message, and verify it appears in dest. msg = 'six' self.assertEqual(0, len(list(upd.sess.query(EventLog).filter( EventLog.event == 'msg')))) six = list(upd.sess.query(EventLog).filter(EventLog.id == 6))[0] six.stamp = dt.datetime.now() six.event = msg upd.sess.commit() upd.update() self.assertEqual(1, len(list(upd.sess.query(EventLog).filter( EventLog.event == 'msg')))) if __name__ == '__main__': unittest.main() ``` #### File: problem/memory_use/controller.py ```python import gc import math import os import pprint import subprocess from memory_use.allocator import ListAllocator def find_acceptable_memory_size(type_): meg = 2 ** 20 successful_size = size = bottom = meg # initial target allocation is one megabyte top = math.inf # top is always greater than acceptable size, will cause failure accuracy_threshold = 10 * meg while top - bottom > accuracy_threshold: cmd = 'memory_use/mem_hog.py --bytes={}'.format(size) p = subprocess.run(cmd, shell=True) if p.returncode: assert 137 == p.returncode, p.returncode # malloc fail top = size else: bottom = successful_size = size # Yay! We survived. if top == math.inf: size *= 2 # The sky's the limit! (so far) else: size = (top - bottom) // 2 + bottom # binary search return successful_size def _helper(size): a = ListAllocator() return a.allocate(size) def allocate_then_out_of_scope(size, k=12): for _ in range(k): print(size, _) assert _helper(size) >= size def allocate_then_del(size, k=12): a = ListAllocator() for _ in range(k): print(size, _) assert a.allocate(size) >= size del a.big_list def show(): """Shows GC stats, to help identify when garbage collection was invoked.""" print('GC counts:', gc.get_count()) pprint.pprint(gc.get_stats()) def main(type_='list', margin=.10): show() size = int((1 - margin) * find_acceptable_memory_size(type_)) # 90% of max mem size show() allocate_then_out_of_scope(size) show() allocate_then_del(size) show() # allocate_then_del(int(1.4 * size)) # This, predictably, will fail. if __name__ == '__main__': os.chdir('../problem') main() ``` #### File: problem/memory_use/mem_hog.py ```python import click from memory_use.allocator import DfAllocator, DictAllocator, ListAllocator @click.command() @click.option('--bytes', type=int) @click.option('--kind', default='df') def hog(bytes, kind): m = dict(zip('list dict df'.split(), [ListAllocator, DictAllocator, DfAllocator])) a = m[kind]() a.allocate(bytes) if __name__ == '__main__': hog() ``` #### File: nearby_zips/tsp/tsp.py ```python from operator import itemgetter import json import os from geopy.distance import distance from tspy import TSP from tspy.solvers import TwoOpt_solver import folium import numpy as np import tspy.lower_bounds.lp as lp from problem.nearby_zips.tsp.travel_map import parse_addresses, shorten def _dist(loc1, loc2): return round(distance(loc1, loc2).meters, 2) # cm resolution class PlaceGroup: """Models a collection of places (or, in TSP parlance, cities).""" def __init__(self): self.places_with_description = self._get_places() locs = [loc for loc, _ in self.places_with_description] bb_s, bb_w = self._find_origin(locs) self.locs = [(_dist((lat, lng), (bb_s, lng)), _dist((lat, lng), (lat, bb_w))) for lat, lng in locs] self.origin = bb_s, bb_w def _get_places(self, infile='/tmp/addrs.txt'): if not os.path.exists(self._json_filename(infile)): with open(infile) as fin: places = [(loc, addr) for loc, addr, details in parse_addresses(fin)] with open(self._json_filename(infile), 'w') as fout: json.dump(places, fout, indent=2) with open(self._json_filename(infile)) as fin: return json.load(fin) @staticmethod def _json_filename(txt_filename): return txt_filename.replace('.txt', '.json') @staticmethod def _find_origin(locs): bb_s = min(map(itemgetter(0), locs)) # lat bb_w = min(map(itemgetter(1), locs)) # lng return bb_s, bb_w def plot(tour, pg, outfile='~/Desktop/map.html'): map_ = folium.Map(location=pg.origin, tiles='Stamen Terrain', zoom_start=14) prev = None for idx in tour: loc, desc = pg.places_with_description[idx] print(idx, ' ', shorten(desc)) if prev: folium.PolyLine([prev, loc], color='purple').add_to(map_) prev = loc map_.save(os.path.expanduser(outfile)) def traveling_salesman(): pg = PlaceGroup() tsp = TSP() tsp.read_data(np.array(pg.locs)) two_opt = TwoOpt_solver(initial_tour='NN') tour = two_opt.solve(tsp) plot(tour, pg) assert len(pg.locs) + 1 == len(tour) tsp.get_approx_solution(two_opt) for BoundTechnique in [ lp.Simple_LP_bound, lp.Connected_LP_bound, lp.MinCut_LP_bound, # hk.Held_Karp, ]: print(f'\n{BoundTechnique.__name__}') tsp.get_lower_bound(BoundTechnique()) if __name__ == '__main__': traveling_salesman() ``` #### File: pop_map/grid/grid_map.py ```python from decimal import Decimal from geopy.distance import distance import geopy import pandas as pd import pydeck as pdk import streamlit as st import uszipcode def step_size(miles=64): # https://en.wikipedia.org/wiki/St._Louis_Lambert_International_Airport stl = geopy.Point(38.747222, -90.361389) # population midpoint one_grid = distance(miles=miles) north = one_grid.destination(stl, bearing=0) east = one_grid.destination(stl, bearing=90) lat_step = north.latitude - stl.latitude lng_step = east.longitude - stl.longitude return map(_round3, (Decimal(f'{lat_step}'), lng_step)) def _round3(n): """Rounds to nearest thousandth.""" return round(n, 3) def _get_select(bottom=None, top=None, pop_thresh=30_000): order_by = 'zipcode' in_continental_48 = "state NOT IN ('AK', 'HI', 'PR')" in_raster = "TRUE" if top: in_raster = f"{bottom} <= lat AND lat < {top}" order_by = 'lng' return f""" SELECT lat, lng AS lon, population, major_city FROM simple_zipcode WHERE lat > 0 AND zipcode_type = 'Standard' AND population >= {pop_thresh} AND {in_continental_48} AND {in_raster} ORDER BY {order_by} """ def _get_rows(): search = uszipcode.SearchEngine() return list(map(dict, search.ses.execute(_get_select()))) class GridCell: """Models 1 dimension of a geographic grid. Consider a chessboard. Then a grid cell is one of 64 squares. As we scan a raster of 8 cells, left to right, a grid cell models the left and right boundaries of a cell. We use repeated addition, rather than base + i * step. This motivates adding Decimals, scaled integers, to avoid annoying roundoff effects. """ @staticmethod def _dec(n): """Converts to scaled integer, to avoid IEEE-754 roundoff nonsense.""" # Repeatedly applying this function won't change n. return Decimal(str(n)) # We use str() because # python 3.1 and later will choose the shortest decimal # using David Gay's algorithm. # https://docs.python.org/3/tutorial/floatingpoint.html # https://docs.python.org/3/whatsnew/3.1.html#other-language-changes # https://web.archive.org/web/2006/http://ftp.ccs.neu.edu/pub/people/will/retrospective.pdf # round-trip: https://people.csail.mit.edu/jaffer/r5rs/Numerical-input-and-output.html def __init__(self, west, lng_step): self.west = self._dec(west) self.lng_step = self._dec(lng_step) def contains(self, lng): """Predicate.""" lng = self._dec(lng) assert self.west <= lng # Caller must play nice, e.g. Honolulu is west of WA. return lng < self.west + self.lng_step def advance_to(self, lng): """Moves western boundary by one or more grids.""" assert not self.contains(lng) while not self.contains(lng): self.west += self.lng_step assert self.contains(lng) class GridMap: def __init__(self, pop_thresh=10_000): self.pop_thresh = pop_thresh self.ses = uszipcode.SearchEngine().ses def get_grid_counts(self): rows = [] lat_step, lng_step = step_size() oak_island_mn = 49.3 # N lat key_west_fl = Decimal(f'{int(24.6)}') lat = key_west_fl while lat <= oak_island_mn: select = _get_select(lat, lat + lat_step) rows += self._get_raster_counts(select, lng_step, lat) lat += lat_step return rows def _get_raster_counts(self, select, lng_step, south=None): def _get_dict(): return dict(count=count, total_pop=total_pop, lat=b_lat, lon=b_lng) neah_bay_wa = -125 # degrees W lng, approximately grid = GridCell(neah_bay_wa, lng_step) count = total_pop = 0 b_lat = b_lng = b_pop = 0 # Biggest city within a grid. for lat, lng, pop, city in self.ses.execute(select): if not grid.contains(lng): if count: yield _get_dict() if south: # if caller wants grid cells displayed yield from self._show_grid_cell_pattern( south, float(grid.west), lng_step, lng) grid.advance_to(lng) count = total_pop = 0 b_lat = b_lng = b_pop = 0 assert grid.contains(lng) count += 1 total_pop += pop if b_pop < pop: # new max? b_pop = pop b_lat = lat b_lng = lng if count: # finally, the most eastern grid in a raster yield _get_dict() @staticmethod def _show_grid_cell_pattern(south, west, lng_step, lng): while west + lng_step < lng: yield dict(count=1, total_pop=10_000, lat=float(south), lon=west) west += lng_step def column_layer(df): st.pydeck_chart(pdk.Deck( map_style='mapbox://styles/mapbox/light-v9', initial_view_state=pdk.ViewState( latitude=38, longitude=-97, zoom=4, pitch=50, ), layers=[ pdk.Layer( 'ColumnLayer', data=df, get_position='[lon, lat]', get_fill_color='total_pop == 10000' ' ? [0, 0, 0, 64] : [255, 0, 255]', get_elevation="total_pop / 1000", elevation_scale=100, radius=2_000, ), ], )) def main(): # scale = st.slider('scale', 0, 3) # df = pd.DataFrame(_get_rows()) df = pd.DataFrame(GridMap().get_grid_counts()) print(df) # st.map(df) column_layer(df) if __name__ == '__main__': main() ``` #### File: pop_map/hexagon/design_study.py ```python def _display_unicode_row(ch: str, n=4): line = f'{ch} ' * n print(f"{line}\n {line}\n{line}\n") hex_horiz = '\u2394' hex_pointy = '\u2b21' black_horiz = '\u2b23' black_pointy = '\u2b22' x_super = '\u2093' ne_arrow = '\N{Heavy North East Arrow}' def display_unicode_example(): for hex in [ hex_horiz, # hex_pointy, black_horiz, # black_pointy, ]: _display_unicode_row(hex) def slash(n=3, reps=4): div = '\N{division slash}' div = ne_arrow line = f'{div} ' * n for i in range(reps): if i % 2: print(' ', end='') print(line) def _sub(s: str, hx=hex_horiz): s = s.replace('a', '\N{Quadrant lower right}') s = s.replace('b', '\N{Quadrant lower left}') s = s.replace(':', '\u268F') # ⚏ diagram for greater yin s = s.replace('M', '\N{SNOW CAPPED MOUNTAIN}') s = s.replace('C', '\N{OFFICE BUILDING}') return s.replace('x', hx) def display_ascii_horiz_height2_example(n=3, reps=4): line1 = r'/ab\__' * n line2 = r'\__/ab' * n for i in range(reps): print('\n'.join(map(_sub, (line1, line2)))) def display_ascii_horiz_height3_example(n=3, reps=4): line1 = (r' / \ y ' * n).replace('y', x_super) line2 = r'( x )---' * n line3 = r' \___/ . ' * n for i in range(reps): print('\n'.join((line1, line2, line3))) def display_ascii_horiz_height4_example(n=3, reps=4): line1 = r' / \ M ' * n line2 = r'/ : \___' * n line3 = r'\ : : / ' * n line4 = r' \___/ M ' * n for i in range(reps): print('\n'.join(map(_sub, (line1, line2, line3, line4)))) if __name__ == '__main__': display_unicode_example() display_ascii_horiz_height2_example() display_ascii_horiz_height4_example() ``` #### File: problem/pop_map/test_grid_projection.py ```python import unittest from boltons.iterutils import frange from geopy.distance import geodesic import geopy def get_circle_of_weighted_points( center, distance_m=1000, weight=1, num_points=1000): dist = geopy.distance.GeodesicDistance(meters=distance_m) return [(weight, dist.destination(point=center, bearing=b)) for b in frange(0, 360, 360 / num_points)] def get_center(weighted_points): n = len(weighted_points) assert n >= 1 mid = int(n / 2) wt, center = sorted(weighted_points, key=compare_weighted_points)[mid] total_wt = 0 lat_disp = 0 # Total (signed) displacement from center in Y direction. lng_disp = 0 for wt, point in weighted_points: total_wt += wt lat_disp += wt * (point.latitude - center.latitude) lng_disp += wt * (point.longitude - center.longitude) return geopy.Point(center.latitude + lat_disp / total_wt, center.longitude + lng_disp / total_wt) def compare_weighted_points(wp): wt, pt = wp return (wt, pt.latitude, pt.longitude) # NB: we care more about latitude, since distance on a parallel shows # a sin(lat) scaling effect, while dist. on a meridian has constant scale. class GridProjectionTest(unittest.TestCase): def test_distance(self): newport_ri = (41.490080, -71.312796) cleveland_oh = (41.499498, -81.695391) meters = round(geodesic(newport_ri, cleveland_oh).meters, 1) self.assertEqual(866455.4, meters) def test_projection(self): center = st_louis_mo = geopy.Point(38.627222, -90.197778) weighted_points = get_circle_of_weighted_points(center, 20_000) error = st_louis_mo.latitude - get_center(weighted_points).latitude self.assertEqual(.000114, round(error, 6)) # Answer drifted south. if __name__ == '__main__': unittest.main() ``` #### File: problem/powerset/powerset.py ```python from time import time import itertools import unittest def permutations(vals): '''Pass in a list. All permutations of the list will be generated.''' # Itertools is roughly 10x faster. # This is the four-step algorithm described (in English) in # http://en.wikipedia.org/wiki/Permutation#Generation_in_lexicographic_order def largest_k(a): for k in range(len(a) - 2, -1, -1): if a[k] < a[k + 1]: return k return -1 def largest_m(k, a): for m in range(len(a) - 1, k, -1): if a[k] < a[m]: return m assert None, (k, m, a) # Can't happen. if len(vals) > 0: yield tuple(vals) k = largest_k(vals) while k > -1: m = largest_m(k, vals) vals[k], vals[m] = vals[m], vals[k] # Swap k, l. vals = vals[:k + 1] + list(reversed(vals[k + 1:])) yield tuple(vals) k = largest_k(vals) def powerset_recursive(n): if n == 0: return [] return powerset(n - 1) + list(itertools.permutations(tuple(range(n)))) def powerset(n): ret = [] for i in range(n): ret.extend(itertools.permutations(tuple(range(i + 1)))) return ret class GenTest(unittest.TestCase): def assertGenEqual(self, expected_list1, gen2): self.assertEqual(expected_list1, list(gen2)) class PowersetTest(GenTest): def test_permutations(self): self.assertGenEqual([], permutations(list(range(0)))) self.assertGenEqual([(0,)], permutations(list(range(1)))) self.assertGenEqual([(0, 1), (1, 0)], permutations(list(range(2)))) self.assertGenEqual([(0, 1, 2), (0, 2, 1), (1, 0, 2), (1, 2, 0), (2, 0, 1), (2, 1, 0), ], permutations(list(range(3)))) self.assertGenEqual([(0, 1, 2, 3), (0, 1, 3, 2), (0, 2, 1, 3), (0, 2, 3, 1), (0, 3, 1, 2), (0, 3, 2, 1), (1, 0, 2, 3), (1, 0, 3, 2), (1, 2, 0, 3), (1, 2, 3, 0), (1, 3, 0, 2), (1, 3, 2, 0), (2, 0, 1, 3), (2, 0, 3, 1), (2, 1, 0, 3), (2, 1, 3, 0), (2, 3, 0, 1), (2, 3, 1, 0), (3, 0, 1, 2), (3, 0, 2, 1), (3, 1, 0, 2), (3, 1, 2, 0), (3, 2, 0, 1), (3, 2, 1, 0), ], permutations(list(range(4)))) def test_pset_correctness(self): self.assertEqual([], powerset(0)) self.assertEqual([(0,)], powerset(1)) self.assertEqual([(0,), (0, 1), (1, 0)], powerset(2)) self.assertEqual([(0,), (0, 1), (1, 0), (0, 1, 2), (0, 2, 1), (1, 0, 2), (1, 2, 0), (2, 0, 1), (2, 1, 0)], powerset(3)) self.assertEqual(9, len(powerset(3))) def test_timing(self, n=10): '''Evaluating at n=11 would give 43,954,713 elements in 26 sec.''' for pset in [powerset, powerset_recursive]: # 1.15s vs 1.31s t0 = time() self.assertEqual(4037913, len(pset(n))) self.assertLess(time() - t0, 1.8) if __name__ == '__main__': unittest.main() ``` #### File: problem/regress1/regress.py ```python from pathlib import Path from sklearn.feature_selection import SelectKBest, chi2 from sklearn.linear_model import LinearRegression import matplotlib.pyplot as plt import numpy import pandas import seaborn as sns def report(fspec='~/Desktop/data.csv', pair=False): df = (pandas.read_csv(Path(fspec).expanduser()) .drop('foo', axis=1)) print(df[0:3]) ncols = df.shape[1] - 1 array = df.values X = array[:, 0:ncols] Y = array[:, ncols] find_good_features(X, Y) regress(df.median_dom.values.reshape(-1, 1), df.q3_dom.values) if pair: sns.pairplot(df) def regress(X, y): lr = LinearRegression() lr.fit(X, y) fig, ax = plt.subplots() ax.scatter(y, lr.predict(X), edgecolors=(0, 0, 0)) ax.plot([y.min(), y.max()], [y.min(), y.max()], 'k--', lw=4) ax.set_xlabel('Measured') ax.set_ylabel('Predicted') plt.show() # https://machinelearningmastery.com/feature-selection-machine-learning-python/ # Feature Extraction with Univariate Statistical Tests # (Chi-squared for classification) def find_good_features(X, Y, nfeat=2): test = SelectKBest(score_func=chi2, k=nfeat) fit = test.fit(X, Y) numpy.set_printoptions(precision=3) print(fit.scores_) features = fit.transform(X) print(features[0:nfeat, :]) if __name__ == '__main__': report() ``` #### File: problem/stores_and_cust/top_pop_cities.py ```python import sys import cartopy.crs as ccrs import cartopy.io.shapereader as shpreader import matplotlib matplotlib.use('Agg') # noqa E402 import matplotlib.pyplot as plt import uszipcode def get_populous_cities(): search = uszipcode.SearchEngine() for r in search.by_population(1e5): print(r.population, r.post_office_city) yield r.lat, r.lng def draw_map(): def colorize_state(geometry): return {'facecolor': (.94, .94, .86), 'edgecolor': (.55, .55, .55)} fig = plt.figure() ax = fig.add_axes([0, 0, 1, 1], projection=ccrs.PlateCarree()) ax.set_extent([-125, -66.5, 20, 50], ccrs.Geodetic()) shapename = 'admin_1_states_provinces_lakes_shp' states_shp = shpreader.natural_earth(resolution='110m', category='cultural', name=shapename) ax.add_geometries( shpreader.Reader(states_shp).geometries(), ccrs.PlateCarree(), styler=colorize_state) ax.stock_img() xs = [] ys = [] for lat, lng in get_populous_cities(): xs.append(lng) ys.append(lat) ax.plot(xs, ys, 'ok', transform=ccrs.PlateCarree(), markersize=8) plt.savefig('/tmp/states.png') # from https://stackoverflow.com/questions/8315389/print-fns-as-theyre-called def tracefunc(frame, event, _, indent=[0]): if event == "call": indent[0] += 2 file = frame.f_code.co_filename.split('/')[-1] print("-" * indent[0] + "> call function", frame.f_code.co_name, file) elif event == "return": print("<" + "-" * indent[0], "exit function", frame.f_code.co_name) indent[0] -= 2 return tracefunc if __name__ == '__main__': sys.setprofile(None) # (tracefunc) draw_map() ``` #### File: viz/market/covid19_stats.py ```python from pathlib import Path import pandas as pd """Reads morbidity stats from https://github.com/nytimes/covid-19-data.git""" class Extract: @staticmethod def _get_nyt_covid19_repo(): top = Path(f'{__file__}/../../../..') # git rev-parse --show-toplevel return (top / '../covid-19-data').resolve() def __init__(self): covid = self._get_nyt_covid19_repo() self.us_stat = pd.read_csv(covid / 'us.csv') self.state_stat = pd.read_csv(covid / 'us-states.csv') self.county_stat = pd.read_csv(covid / 'us-counties.csv') class Transform(Extract): def __init__(self): super().__init__() df = self.county_stat df = df[~df.fips.isna()].copy() df['fips'] = df.fips.astype('int32') self.county_stat = df for df in [self.us_stat, self.state_stat, self.county_stat]: df['date'] = pd.to_datetime(df.date) class Load: """Empty -- we don't upload to a server such as RDBMS or S3.""" ```
{ "source": "jhanley-com/google-cloud-run-getting-started-python-flask", "score": 3 }
#### File: jhanley-com/google-cloud-run-getting-started-python-flask/app.py ```python import os import logging from flask import Flask # Change the format of messages logged to Stackdriver logging.basicConfig(format='%(message)s', level=logging.INFO) app = Flask(__name__) @app.route('/') def home(): html = """ <html> <head> <title> Google Cloud Run - Sample Python Flask Example </title> </head> <body> <p>Hello Google Cloud Run World!</p> <a href="https://cloud.google.com/run/" target="_blank">Google Cloud Run Website</a> </body> </html> """ return html if __name__ == '__main__': app.run(debug=True, host='0.0.0.0', port=int(os.environ.get('PORT', 8080))) ```
{ "source": "jhannington/django-rt", "score": 2 }
#### File: django-rt/django_rt/runcourier.py ```python import os import sys import signal import argparse import re import stat import logging from django_rt import VERSION, VERSION_STATUS DEFAULT_ADDR = '0.0.0.0' DEFAULT_PORT = 8080 def main(): # Parse command line parser = argparse.ArgumentParser(description='Run a Django-RT courier server.') parser.add_argument('server_type', choices=['asyncio', 'gevent'], help='server type' ) parser.add_argument('addrport', nargs='?', default=str(DEFAULT_PORT), help='TCP port and/or address to listen on' ) parser.add_argument('--unix-socket', metavar='FILE', help='listen on a Unix domain socket instead of TCP; FILE must specify a valid file path' ) parser.add_argument('--django-url', metavar='FILE', help='URL to a running Django instance (overrides RT_DJANGO_URL setting); protocol may be "http" or "http+unix"' ) parser.add_argument('--debug', action='store_const', const=True, help='log debug messages' ) args = parser.parse_args() # Show version print('Django-RT version %s (%s)' % (VERSION, VERSION_STATUS)) # Show pre-alpha warning banner print( """ ******************************************************************************** * * * WARNING: Django-RT is currently in PRE-ALPHA stages of development, and is * * not ready for public consumption. Expect major API changes and, potentially, * * security risks in pre-alpha releases. Use in production at your own risk! * * * ******************************************************************************** """) # Enable logging if args.debug: logging.basicConfig(level=logging.DEBUG) else: logging.basicConfig(level=logging.INFO) # Parse addrport and unix_socket args if args.unix_socket: addr = None port = None unix_socket = args.unix_socket else: unix_socket = None # Split addrport if possible, otherwise assume just the port has been given try: addr, port = args.addrport.split(':') except ValueError: addr = DEFAULT_ADDR port = args.addrport # Default addr if addr is empty (i.e. user gave addrport in the form ':PORT') if not addr: addr = DEFAULT_ADDR # Check port is an integer try: port = int(port) except ValueError: print('addrport must be in either the form "IP:PORT" or "PORT"', file=sys.stderr) sys.exit(1) if unix_socket: # Attempt to remove socket file if it already exists and is a socket. try: res = os.stat(unix_socket) if stat.S_ISSOCK(res.st_mode): os.unlink(unix_socket) except FileNotFoundError: pass # Import appropriate server class and create instance if args.server_type == 'asyncio': from django_rt.couriers.asyncio_courier import AsyncioCourier server = AsyncioCourier() elif args.server_type == 'gevent': from django_rt.couriers.gevent_courier import GeventCourier server = GeventCourier() # Trap signals to shut down gracefully def quit_handler(signum, frame): signames = { signal.SIGTERM: 'SIGTERM', signal.SIGINT: 'SIGINT', signal.SIGQUIT: 'SIGQUIT', } signal_name = signames[signum] logging.info('Caught %s; shutting down...' % (signal_name,)) server.stop() signal.signal(signal.SIGTERM, quit_handler) signal.signal(signal.SIGINT, quit_handler) signal.signal(signal.SIGQUIT, quit_handler) # Run server server.run(addr, port, unix_socket, args.django_url) if __name__ == '__main__': main() ``` #### File: django-rt/django_rt/settings.py ```python from django.conf import settings as dj_settings DEFAULTS = { 'RT_CORS_ALLOW_ORIGIN': None, 'RT_CORS_ALLOW_CREDENTIALS': None, 'RT_PREFIX': 'rt', 'RT_SSE_HEARTBEAT': 30, # in seconds 'RT_REDIS_HOST': 'localhost', 'RT_REDIS_PORT': 6379, 'RT_REDIS_DB': 0, 'RT_REDIS_PASSWORD': None, 'RT_SSE_RETRY': 2*1000, # in milliseconds 'RT_COURIER_IPS': ['127.0.0.1'], } class RtSettings: def __getattr__(self, name): if name in DEFAULTS: return getattr(dj_settings, name, DEFAULTS[name]) else: return getattr(dj_settings, name) settings = RtSettings() ``` #### File: chat/chat/views.py ```python import json from django.http import JsonResponse, Http404 from django.views.generic import View from django.views.generic.base import TemplateView from django_rt.publish import publish from django_rt.views import RtResourceView ROOMS = ( 'main', 'room1', 'room2', ) class ChatRoomView(TemplateView): template_name = 'chat.html' def get(self, request, room=None): # Check user has requested a valid room if room not in ROOMS: raise Http404('Room not found') self.room = room return super().get(request, room) class ApiChatRoomMessagesView(RtResourceView): messages = { 'main': [ { 'user': '<system>', 'msg': 'Welcome to Django-RT chat!', }, ], 'room1': [ { 'user': '<system>', 'msg': 'Welcome to #room1', }, ], 'room2': [ { 'user': '<system>', 'msg': 'Welcome to #room2', }, ], } def get(self, request, room=None): # Check user has requested a valid room if room not in ROOMS: raise Http404('Room not found') # Return all messages sent to the room return JsonResponse({ 'messages': self.messages[room] }) def post(self, request, room=None): # Check user has requested a valid room if room not in ROOMS: raise Http404('Room not found') # Get JSON message object msgJson = request.body.decode('utf-8') msg = json.loads(msgJson) # Store message self.messages[room].append(msg) # Publish message to event queue publish(request.path, data=msg) # Return all messages sent to the room return JsonResponse({}) def rt_get_permission(self, action, request): return True ```
{ "source": "jhanse9522/toolkitten", "score": 4 }
#### File: toolkitten/Python_Hard_Way_W2/functions.py ```python def greet_user(): """Display a simple greeting!""" print("Hello!") greet_user() def greet_user(username): print("Hello, " + username + "!") greet_user('Jamey') def make_pizza(topping='bacon'): #"""Make a single-topping pizza.""" print("Have a " + topping + " pizza!") make_pizza() make_pizza('pepperoni') end1 = "c" end2 = "e" end3 = "d" print(end1 + end2 + end3) ```
{ "source": "jhaochenz/spectral_contrastive_learning", "score": 3 }
#### File: spectral_contrastive_learning/augmentations/__init__.py ```python from .simsiam_aug import SimSiamTransform, StandardTransform from .eval_aug import Transform_single def get_aug(name='simsiam', image_size=224, train=True, train_classifier=None): if train==True: if name == 'standard': augmentation = StandardTransform(image_size) elif name == 'spectral': augmentation = SimSiamTransform(image_size) else: raise NotImplementedError elif train==False: if train_classifier is None: raise Exception augmentation = Transform_single(image_size, train=train_classifier) else: raise Exception return augmentation ``` #### File: spectral_contrastive_learning/datasets/dataset_tinyimagenet.py ```python import os import torch import torchvision.transforms as transforms from torchvision.datasets import folder import torch.utils.data import torch.utils.data.distributed import torchvision from .loader import TwoCropsTransform, GaussianBlur def get_dataset_path(): if os.path.exists('PATH_TO_DATASET'): return 'PATH_TO_DATASET' elif os.path.exists('PATH_TO_DATASET'): return 'PATH_TO_DATASET' elif os.path.exists('PATH_TO_DATASET'): return 'PATH_TO_DATASET' data_path_dict = { 'imagenet': get_dataset_path(), 'tiny-imagenet': 'PATH_TO_DATASET', 'cifar10': 'PATH_TO_DATASET', 'cifar100': 'PATH_TO_DATASET' } crop_size_dict = { 'imagenet': 224, 'tiny-imagenet': 64, 'cifar10': 32, 'cifar100': 32 } resize_size_dict = { 'imagenet': 256, 'tiny-imagenet': 74, 'cifar10': 40, 'cifar100': 40 } num_classes_dict = { 'imagenet': 1000, 'tiny-imagenet': 200, 'cifar10': 10, 'cifar100': 100 } normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) def obtain_aug(dataset, data_aug, aug_plus): crop_size = crop_size_dict[dataset] if data_aug == 'pretrain': if aug_plus: # MoCo v2's aug: similar to SimCLR https://arxiv.org/abs/2002.05709 augmentation = [ transforms.RandomResizedCrop(crop_size, scale=(0.2, 1.)), transforms.RandomApply([ transforms.ColorJitter(0.4, 0.4, 0.4, 0.1) # not strengthened ], p=0.8), transforms.RandomGrayscale(p=0.2), transforms.RandomApply([GaussianBlur([.1, 2.])], p=0.5), transforms.RandomHorizontalFlip(), transforms.ToTensor(), normalize ] else: # MoCo v1's aug: the same as InstDisc https://arxiv.org/abs/1805.01978 augmentation = [ transforms.RandomResizedCrop(crop_size, scale=(0.2, 1.)), transforms.RandomGrayscale(p=0.2), transforms.ColorJitter(0.4, 0.4, 0.4, 0.4), transforms.RandomHorizontalFlip(), transforms.ToTensor(), normalize ] train_transform = TwoCropsTransform(transforms.Compose(augmentation)) elif data_aug == 'standard': train_transform = transforms.Compose([ transforms.RandomResizedCrop(crop_size), transforms.RandomHorizontalFlip(), transforms.ToTensor(), normalize ]) elif data_aug == 'mocov1': train_transform = transforms.Compose([ transforms.RandomResizedCrop(crop_size, scale=(0.2, 1.)), transforms.RandomGrayscale(p=0.2), transforms.ColorJitter(0.4, 0.4, 0.4, 0.4), transforms.RandomHorizontalFlip(), transforms.ToTensor(), normalize ]) elif data_aug == 'mocov2': train_transform = transforms.Compose([ transforms.RandomResizedCrop(crop_size, scale=(0.2, 1.)), transforms.RandomApply([ transforms.ColorJitter(0.4, 0.4, 0.4, 0.1) # not strengthened ], p=0.8), transforms.RandomGrayscale(p=0.2), transforms.RandomApply([GaussianBlur([.1, 2.])], p=0.5), transforms.RandomHorizontalFlip(), transforms.ToTensor(), normalize ]) else: train_transform = transforms.Compose([ transforms.Resize(resize_size_dict[dataset]), transforms.CenterCrop(crop_size), transforms.ToTensor(), normalize ]) return train_transform def load_train_dataset(dataset, tranform): traindir = os.path.join(data_path_dict[dataset], 'train') return folder.ImageFolder(traindir, tranform) def load_train(dataset, num_per_class, distributed, batch_size, workers, aug_plus=False, orig_aug=None, data_aug='pretrain', mode='train', random_labels=None): ''' data_aug: if pretrain, apply contrastive learning data augmentation (returning 2 crops), if standard, simply choose a single random crop (for linear classification). if off, choose center crop (no data augmentation applied). ''' data_path = data_path_dict[dataset] assert mode in ['train', 'val'] if dataset == 'cifar10': train_transform = obtain_aug(dataset, data_aug, aug_plus) train_dataset = torchvision.datasets.CIFAR10(data_path_dict['cifar10'], train=True, transform=train_transform, download=False) elif dataset == 'cifar100': train_transform = obtain_aug(dataset, data_aug, aug_plus) train_dataset = torchvision.datasets.CIFAR100(data_path_dict['cifar100'], train=True, transform=train_transform, download=False) else: traindir = os.path.join(data_path, mode) train_transform = obtain_aug(dataset, data_aug, aug_plus) if orig_aug is not None: orig_aug = obtain_aug(dataset, orig_aug, False) train_dataset = SubsetImageFolder_NoAug( traindir, orig_transform=orig_aug, transform=train_transform, num_per_class=num_per_class, random_labels=random_labels) print('train dataset size is', len(train_dataset)) if distributed: train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset) else: train_sampler = None train_loader = torch.utils.data.DataLoader( train_dataset, batch_size=batch_size, shuffle=(not distributed), num_workers=workers, pin_memory=True, sampler=train_sampler, drop_last=data_aug == 'pretrain') return train_sampler, train_loader def load_val_dataset(dataset, tranform): valdir = os.path.join(data_path_dict[dataset], 'val') return folder.ImageFolder(valdir, tranform) def load_val_loader(dataset, batch_size, workers): val_transform = transforms.Compose([ transforms.Resize(resize_size_dict[dataset]), transforms.CenterCrop(crop_size_dict[dataset]), transforms.ToTensor(), normalize ]) if dataset == 'cifar10': val_dataset = torchvision.datasets.CIFAR10(data_path_dict['cifar10'], train=False, transform=val_transform, download=False) elif dataset == 'cifar100': val_dataset = torchvision.datasets.CIFAR100(data_path_dict['cifar100'], train=False, transform=val_transform, download=False) else: valdir = os.path.join(data_path_dict[dataset], 'val') val_dataset = folder.ImageFolder(valdir, val_transform) return torch.utils.data.DataLoader( val_dataset, batch_size=batch_size, shuffle=False, num_workers=workers, pin_memory=True) def get_loaders(dataset, num_per_class, distributed, batch_size, workers, aug_plus=False, data_aug='standard', train_mode='train', random_labels=None): _, train_loader = load_train(dataset, num_per_class, distributed, batch_size, workers, aug_plus=aug_plus, data_aug=data_aug, mode=train_mode, random_labels=random_labels) val_loader = load_val_loader(dataset, batch_size, workers) return train_loader, val_loader class SubsetImageFolder_NoAug(folder.DatasetFolder): """ Data loader that loads only a subset of the samples """ def __init__(self, root, orig_transform=None, transform=None, target_transform=None, num_per_class=None, loader=folder.default_loader, extensions=folder.IMG_EXTENSIONS, random_labels=None): super(folder.DatasetFolder, self).__init__(root, transform=transform, target_transform=target_transform) classes, class_to_idx = self._find_classes(self.root) samples = make_dataset(self.root, class_to_idx, extensions, num_per_class) if random_labels is not None: samples = [(inst[0], rl) for (inst, rl) in zip(samples, random_labels)] if len(samples) == 0: msg = "Found 0 files in subfolders of: {}\n".format(self.root) if extensions is not None: msg += "Supported extensions are: {}".format(",".join(extensions)) raise RuntimeError(msg) self.loader = loader self.extensions = extensions self.orig_transform = orig_transform self.use_random_labels = random_labels is not None self.classes = classes self.class_to_idx = class_to_idx self.imgs = self.samples = samples self.targets = [s[1] for s in samples] def __getitem__(self, index): if self.orig_transform is None: return super(SubsetImageFolder_NoAug, self).__getitem__(index) else: path, target = self.samples[index] orig_sample = self.loader(path) if self.target_transform is not None: target = self.target_transform(target) orig_sample = self.orig_transform(orig_sample) return orig_sample, target class SubsetImageFolder(folder.DatasetFolder): """ Data loader that loads only a subset of the samples """ def __init__(self, root, orig_transform=None, transform=None, target_transform=None, num_per_class=None, loader=folder.default_loader, extensions=folder.IMG_EXTENSIONS, random_labels=None): super(folder.DatasetFolder, self).__init__(root, transform=transform, target_transform=target_transform) classes, class_to_idx = self._find_classes(self.root) samples = make_dataset(self.root, class_to_idx, extensions, num_per_class) if random_labels is not None: samples = [(inst[0], rl) for (inst, rl) in zip(samples, random_labels)] if len(samples) == 0: msg = "Found 0 files in subfolders of: {}\n".format(self.root) if extensions is not None: msg += "Supported extensions are: {}".format(",".join(extensions)) raise RuntimeError(msg) self.loader = loader self.extensions = extensions self.orig_transform = orig_transform self.use_random_labels = random_labels is not None self.classes = classes self.class_to_idx = class_to_idx self.imgs = self.samples = samples self.targets = [s[1] for s in samples] def __getitem__(self, index): if self.orig_transform is None: return super(SubsetImageFolder, self).__getitem__(index) else: path, target = self.samples[index] orig_sample = self.loader(path) if self.transform is not None: sample = self.transform(orig_sample.copy()) else: sample = orig_sample.copy() if self.target_transform is not None: target = self.target_transform(target) orig_sample = self.orig_transform(orig_sample) return (sample, orig_sample), target def make_dataset(directory, class_to_idx, extensions, num_per_class): instances = [] directory = os.path.expanduser(directory) def is_valid_file(x): return folder.has_file_allowed_extension(x, extensions) for target_class in sorted(class_to_idx.keys()): class_index = class_to_idx[target_class] target_dir = os.path.join(directory, target_class) if not os.path.isdir(target_dir): continue num_added = 0 for root, _, fnames in sorted(os.walk(target_dir, followlinks=True)): if num_added >= num_per_class: break for fname in sorted(fnames): path = os.path.join(root, fname) if is_valid_file(path): item = path, class_index instances.append(item) num_added += 1 if num_added >= num_per_class: break return instances ``` #### File: spectral_contrastive_learning/datasets/__init__.py ```python import torch import torchvision from .dataset_tinyimagenet import load_train_dataset, load_val_dataset import torch.utils.data as data import numpy as np from PIL import Image import os def get_dataset(dataset, data_dir, transform, train=True, download=False): if dataset == 'cifar10': dataset = torchvision.datasets.CIFAR10('PATH_TO_DATASET', train=train, transform=transform, download=download) elif dataset == 'cifar100': dataset = torchvision.datasets.CIFAR100('PATH_TO_DATASET', train=train, transform=transform, download=download) elif dataset == 'imagenet': dataset = load_train_dataset(dataset, transform) if train==True else load_val_dataset(dataset, transform) elif dataset == 'tiny-imagenet': dataset = load_train_dataset(dataset, transform) if train==True else load_val_dataset(dataset, transform) else: raise NotImplementedError return dataset ``` #### File: jhaochenz/spectral_contrastive_learning/pretrain.py ```python import time import os import torch import torch.nn as nn import torch.nn.functional as F import torchvision import numpy as np from arguments import get_args from augmentations import get_aug from models import get_model from datasets import get_dataset from optimizers import get_optimizer, LR_Scheduler from datetime import datetime from PIL import ImageFile ImageFile.LOAD_TRUNCATED_IMAGES = True def main(log_writer, log_file, device, args): iter_count = 0 train_loader = torch.utils.data.DataLoader( dataset=get_dataset( transform=get_aug(train=True, **args.aug_kwargs), train=True, **args.dataset_kwargs), shuffle=True, batch_size=args.train.batch_size, pin_memory=True, drop_last=True, num_workers=args.workers ) test_loader = torch.utils.data.DataLoader( dataset=get_dataset( transform=get_aug(train=True, **args.aug_kwargs), train=False, **args.dataset_kwargs), shuffle=True, batch_size=args.test_bs, **args.dataloader_kwargs ) # define model model = get_model(args.model).to(device) model = torch.nn.DataParallel(model) # define optimizer optimizer = get_optimizer( args.train.optimizer.name, model, lr=args.train.base_lr*args.train.batch_size/256, momentum=args.train.optimizer.momentum, weight_decay=args.train.optimizer.weight_decay) lr_scheduler = LR_Scheduler( optimizer, args.train.warmup_epochs, args.train.warmup_lr*args.train.batch_size/256, args.train.num_epochs, args.train.base_lr*args.train.batch_size/256, args.train.final_lr*args.train.batch_size/256, len(train_loader), constant_predictor_lr=True # see the end of section 4.2 predictor ) ckpt_dir = os.path.join(args.log_dir, "checkpoints") if not os.path.exists(ckpt_dir): os.makedirs(ckpt_dir) for epoch in range(0, args.train.stop_at_epoch): model.train() loss_list = [] print("number of iters this epoch: {}".format(len(train_loader))) for idx, ((images1, images2), labels) in enumerate(train_loader): iter_count += 1 model.zero_grad() data_dict = model.forward(images1.to(device, non_blocking=True), images2.to(device, non_blocking=True)) loss = data_dict['loss'].mean() loss.backward() optimizer.step() lr_scheduler.step() data_dict.update({'lr':lr_scheduler.get_lr()}) loss_list.append(loss.item()) model.eval() test_loss_list = [] for idx, ((images1, images2), labels) in enumerate(test_loader): data_dict = model.forward(images1.to(device, non_blocking=True), images2.to(device, non_blocking=True)) test_loss = data_dict['loss'].mean() test_loss_list.append(test_loss.item()) write_dict = { 'epoch': epoch, 'loss': sum(loss_list) / len(loss_list), 'lr': lr_scheduler.get_lr(), 'test_loss': sum(test_loss_list) / len(test_loss_list), } log_writer.writerow(write_dict) log_file.flush() if (epoch+1) % args.log_freq == 0: model_path = os.path.join(ckpt_dir, f"{epoch + 1}.pth") torch.save({ 'epoch': epoch + 1, 'state_dict': model.module.state_dict() }, model_path) print(f"Model saved to {model_path}") # Save checkpoint model_path = os.path.join(ckpt_dir, f"latest_{epoch+1}.pth") torch.save({ 'epoch': epoch+1, 'state_dict':model.module.state_dict() }, model_path) print(f"Model saved to {model_path}") with open(os.path.join(args.log_dir, "checkpoints", f"checkpoint_path.txt"), 'w+') as f: f.write(f'{model_path}') if __name__ == "__main__": args, log_file, log_writer = get_args() main(log_writer, log_file, device=args.device, args=args) completed_log_dir = args.log_dir.replace('in-progress', 'debug' if args.debug else 'completed') completed_log_dir = args.log_dir.replace('in-progress', 'debug' if args.debug else 'completed') os.rename(args.log_dir, completed_log_dir) print(f'Log file has been saved to {completed_log_dir}') ```
{ "source": "jhaowunhuang/pixelwork", "score": 3 }
#### File: pixelwork/ImageProcess/image_creat.py ```python import cv2 import numpy as np from scipy.stats import multivariate_normal as mn from scipy.stats import skewnorm import datetime class PixelWork: def __init__(self): self.input_file_name = '' self.input_img_arr = None self.output_file_name = '' self.output_img_arr = None self.img_height = 0 #default height self.img_width = 0 #default width self.spot_size = 10 #equivalent to spot size self.edge_factor = 0 #near edge effect self.signals = 200 #number of signals in each pixel # get input image array def add_image(self): # allow the file name from arguments current_input_file_name = self.input_file_name if self.input_file_name else 'sample/input.jpg' print('current_input_file_name: ', current_input_file_name) self.input_img_arr = cv2.imread(current_input_file_name, cv2.IMREAD_GRAYSCALE) self.output_img_arr = np.zeros(self.input_img_arr.shape) self.img_width, self.img_height = self.input_img_arr.shape # draw the simulated image def draw(self): time1 = datetime.datetime.now() it = np.nditer(self.input_img_arr, flags=['multi_index']) for elem in it: if elem < 128: pos = mn.rvs(it.multi_index, [[self.spot_size, 0], [0, self.spot_size]], size=self.signals) extra = np.array([[-1, -1]]) for item in pos: if all([0 <= int(round(x, 0)) < 512 for x in item]) and self.input_img_arr[int(round(item[0], 0)), int(round(item[1], 0))] >= 128: for _ in range(3): extra = np.concatenate((extra, [it.multi_index])) pos = np.concatenate((pos, extra)) x_pos, y_pos = pos.T hist, xedges, yedges = np.histogram2d(x_pos, y_pos, bins=512, range=[[0, 512], [0, 512]]) self.output_img_arr += hist else: pos = mn.rvs(it.multi_index, [[self.spot_size, 0], [0, self.spot_size]], size=self.signals//2) for item in pos: if all([0 <= int(round(x, 0)) < 512 for x in item]) and self.input_img_arr[int(round(item[0], 0)), int(round(item[1], 0))] < 128: pos = pos x_pos, y_pos = pos.T hist, xedges, yedges = np.histogram2d(x_pos, y_pos, bins=512, range=[[0, 512], [0, 512]]) self.output_img_arr += hist # allow the file name from arguments current_output_file_name = self.output_file_name if self.output_file_name else 'sample/output.jpg' print('current_output_file_name: ', current_output_file_name) cv2.imwrite(current_output_file_name, self.output_img_arr) print(datetime.datetime.now() - time1) ``` #### File: jhaowunhuang/pixelwork/main.py ```python import argparse from ImageProcess.image_creat import PixelWork def main(): parser = argparse.ArgumentParser() parser.add_argument("input_file_name", help='input file name') parser.add_argument("output_file_name", help='output file name') args = parser.parse_args() print("Input file name: ", args.input_file_name, ", Output file name: ", args.output_file_name) new_pic = PixelWork() new_pic.output_file_name = args.output_file_name new_pic.input_file_name = args.input_file_name new_pic.add_image() new_pic.draw() main() ```
{ "source": "jhapran/OCR-Form-Tools", "score": 2 }
#### File: redact/io/blob_reader.py ```python from typing import List from pathlib import Path from azure.storage.blob import ContainerClient from redact.types.file_bundle import FileBundle class BlobReader(): def __init__(self, container_url: str, prefix: str): self.container_client = ContainerClient.from_container_url( container_url) self.prefix = prefix def download_bundles(self, to: str) -> List[FileBundle]: blobs = self.container_client.list_blobs(name_starts_with=self.prefix) all_file_name_list = [Path(blob.name).name for blob in blobs] file_bundles = FileBundle.from_names(all_file_name_list) for bundle in file_bundles: image_blob_path = self.prefix + bundle.image_file_name fott_blob_path = self.prefix + bundle.fott_file_name ocr_blob_path = self.prefix + bundle.ocr_file_name image_path = Path(to, bundle.image_file_name) fott_path = Path(to, bundle.fott_file_name) ocr_path = Path(to, bundle.ocr_file_name) with open(image_path, 'wb') as image_file, \ open(fott_path, 'wb') as fott_file, \ open(ocr_path, 'wb') as ocr_file: image_file.write( self.container_client. download_blob(image_blob_path).readall()) fott_file.write( self.container_client. download_blob(fott_blob_path).readall()) ocr_file.write( self.container_client. download_blob(ocr_blob_path).readall()) return file_bundles ``` #### File: redact/redaction/ocr_result_redaction.py ```python from typing import List, Set from jsonpointer import resolve_pointer, set_pointer from redact.types.annotation import Annotation from redact.utils.bounding_box_mapping import similar from redact.utils.redact_policy import first_char class OcrResultRedaction: LINE_OVERLAP_THRESHOLD = 0.1 WORD_OVERLAP_THRESHOLD = 0.98 def __init__(self, ocr_result: dict, annotations: List[Annotation]): self.ocr_result = ocr_result self.annotations = annotations def redact(self): refs = [] for annot in self.annotations: refs.extend(self.find_mapped_refs(annot)) self.redact_words(refs) self.redact_lines(refs) # Set is faster than List in this case. self.redact_page_results(set(refs)) def find_mapped_refs(self, annot: Annotation): refs = [] read_results = self.ocr_result["analyzeResult"]["readResults"] for read_id, read_result in enumerate(read_results): lines: List[dict] = read_result["lines"] for line_id, line in enumerate(lines): # Early rejection. if not similar(annot.bounding_box, line["boundingBox"], self.LINE_OVERLAP_THRESHOLD): continue words: List[dict] = line["words"] for word_id, word in enumerate(words): if similar(annot.bounding_box, word["boundingBox"], self.WORD_OVERLAP_THRESHOLD): refs.append(self.build_ref(read_id, line_id, word_id)) return refs def redact_words(self, refs: List[str]): def word_path(ref: str) -> str: # Remove leading '#'. return ref[1:] for ref in refs: r = word_path(ref) word = resolve_pointer(self.ocr_result, r) word["text"] = first_char(word["text"]) set_pointer(self.ocr_result, r, word) def redact_lines(self, refs: List[str]): def line_path(ref: str) -> str: end = ref.find("/word") # Remove leading '#' and trailing word path. return ref[1:end] for ref in refs: r = line_path(ref) line = resolve_pointer(self.ocr_result, r) tokens = line["text"].split(' ') word_id = int(ref.split('/')[-1]) tokens[word_id] = first_char(tokens[word_id]) line["text"] = ' '.join(tokens) set_pointer(self.ocr_result, r, line) def redact_page_results(self, refs: Set[str]): def add_analyze_layer(elem: str) -> str: return elem.replace('#/', '#/analyzeResult/') page_results = self.ocr_result["analyzeResult"]["pageResults"] for page_result in page_results: tables: List[dict] = page_result["tables"] for table in tables: cells: List[dict] = table["cells"] for cell in cells: elements: List[str] = cell["elements"] for elem_id, element in enumerate(elements): full_elem = add_analyze_layer(element) if full_elem in refs: tokens = cell["text"].split(' ') tokens[elem_id] = first_char(tokens[elem_id]) cell["text"] = ' '.join(tokens) @ staticmethod def build_ref(read_id: int, line_id: int, word_id: int) -> str: return f'#/analyzeResult/readResults/{read_id}/lines/{line_id}/words/{word_id}' ``` #### File: redact/types/fott_label.py ```python from dataclasses import dataclass from typing import List, Dict, Tuple from redact.types.annotation import Annotation @dataclass class Entity: page: int text: str # camelCase instead of snake_case for aligning with the JSON schema. boundingBoxes: List[List[float]] @dataclass class Label: label: str value: List[Entity] @dataclass class FottLabel: labels: List[Label] def to_annotations(self, page_size: Dict[int, Tuple[float, float]] = {1: (1.0, 1.0)}) -> List[Annotation]: def to_pixel(page: int, bounding_box: List[float]) -> List[float]: width = page_size[page][0] height = page_size[page][1] ret = [] for i, elem in enumerate(bounding_box): if i % 2 == 0: ret.append(elem * width) else: ret.append(elem * height) return ret annotations = [] for label in self.labels: for entity in label.value: for bounding_box in entity.boundingBoxes: annot = Annotation( bounding_box=to_pixel(entity.page, bounding_box), field=label.label, text=entity.text) annotations.append(annot) return annotations ``` #### File: redact/utils/bounding_box_mapping.py ```python from typing import List, Tuple from shapely.geometry import Polygon OVERLAP_THRESHOLD = 0.5 def similar(bounding_box_a: List[float], bounding_box_b: List[float], threshold=OVERLAP_THRESHOLD) -> bool: a = Polygon(pairwise(bounding_box_a)) b = Polygon(pairwise(bounding_box_b)) base_area = min(a.area, b.area) intersect_area = a.intersection(b).area return intersect_area / base_area > threshold def pairwise(elements: List[float]) -> List[Tuple[float, float]]: ret = [] for i in range(0, len(elements), 2): pair = tuple([elements[i], elements[i+1]]) ret.append(pair) return ret ``` #### File: tests/redaction/test_ocr_result_redaction.py ```python from redact.redaction.ocr_result_redaction import OcrResultRedaction from tests.factories.ocr_result_factory import OcrResultFactory from tests.factories.annotation_factory import AnnotationFactory class TestOcrResultRedaction: def test_ctor(self) -> None: ocr_result = OcrResultFactory.build() annotations = AnnotationFactory.build_annotations() ocr_result_redacton = OcrResultRedaction(ocr_result, annotations) assert ocr_result_redacton.ocr_result == ocr_result def test_redact(self) -> None: ocr_result = OcrResultFactory.build() expected = OcrResultFactory.build_redacted() annotations = AnnotationFactory.build_annotations() ocr_result_redacton = OcrResultRedaction(ocr_result, annotations) ocr_result_redacton.redact() actual = ocr_result_redacton.ocr_result assert actual == expected ``` #### File: tests/types/test_file_bundle.py ```python from redact.types.file_bundle import FileBundle class TestFileBundle: def test_from_names(self) -> None: names = [ "a.jpg", "a.jpg.labels.json", "dummy_file.jpg", "a.jpg.ocr.json"] expected = [FileBundle( image_file_name="a.jpg", fott_file_name="a.jpg.labels.json", ocr_file_name="a.jpg.ocr.json")] actual = FileBundle.from_names(names) assert actual == expected ``` #### File: tests/types/test_fott_label.py ```python from tests.factories.annotation_factory import AnnotationFactory from tests.factories.fott_label_factory import FottLabelFactory class TestFottLabel: def test_to_annotations(self) -> None: fott_label = FottLabelFactory.build() annotations = AnnotationFactory.build_annotations() actual = fott_label.to_annotations(page_size={1: (2481, 3509)}) assert actual == annotations # Modify the first label to be on a 10-times large page. def test_to_annotations_multi_page(self) -> None: fott_label = FottLabelFactory.build() fott_label.labels[0].value[0].page = 2 annotations = AnnotationFactory.build_annotations() bbox = annotations[0].bounding_box for i, element in enumerate(bbox): bbox[i] = element * 10 actual = fott_label.to_annotations( page_size={1: (2481, 3509), 2: (24810, 35090)}) assert actual == annotations ```
{ "source": "jhapreis/MuonDecay", "score": 3 }
#### File: modules/graph/graph_waveforms.py ```python import os import ROOT as root import matplotlib.pyplot as plt from array import array #==================================================================================================== def GraphWaveforms_Folder( folder_path: "str", numberADChannels: "int" = 2500, tree_name: "str" = "tree_waveforms", branch_name: "str" = "waveforms", output_file: "str" = "output.txt" ) -> "int": #---------------------------------------------------------------------------------------------------- root_files = [i for i in os.listdir(folder_path) if i.endswith(".root")] # Fill TChain #---------------------------------------------------------------------------------------------------- chain = root.TChain(tree_name) for i in range( len(root_files) ): file = folder_path+'/'+root_files[i] chain.Add(file) #---------------------------------------------------------------------------------------------------- waveform_in_units = array('i', [0]*numberADChannels) chain.SetBranchAddress(branch_name, waveform_in_units) entries = chain.GetEntries() # Draw waveforms #---------------------------------------------------------------------------------------------------- fig, ax = plt.subplots() ax.set_title(f'{entries} waveforms') ax.set_xlabel(f'time (units)') ax.set_ylabel(f'value') for i in range(entries): chain.GetEntry(i) ax.plot(waveform_in_units, color='black') fig.savefig(folder_path+'/waveforms.png') return 0 #==================================================================================================== def GraphWaveforms_File( file_name: "str", folder_path: "str"= "./", numberADChannels: "int"=2500, tree_name: "str"="tree_waveforms", branch_name: "str"="waveforms" ) -> "int": #---------------------------------------------------------------------------------------------------- file = root.TFile.Open(file_name) tree = file.Get(tree_name) #---------------------------------------------------------------------------------------------------- waveform_in_mv = array('f', [0]*numberADChannels) tree.SetBranchAddress(branch_name, waveform_in_mv) entries = tree.GetEntries() # Draw waveforms #---------------------------------------------------------------------------------------------------- fig, ax = plt.subplots() ax.set_title(f'{entries} waveforms') ax.set_xlabel(f'time (ADChannel)') ax.set_ylabel(f'value (mV)') for i in range(entries): tree.GetEntry(i) ax.plot(waveform_in_mv, color='black') fig.savefig(folder_path+'/waveforms.png') plt.clf() return 0 ``` #### File: modules/root_file/delete_files.py ```python import os from ROOT import TFile #==================================================================================================== def delete_root_files_in_folder(folder:"str", tree_name:"str"='tree_waveforms'): """ Given path to folder, remove the empty root files. Args: folder (string) : path to folder tree_name (string): name of the TTree """ root_files = [i for i in os.listdir(folder) if i.endswith(".root")] for i in range( len(root_files) ): file = folder+'/'+root_files[i] tree_exists = delete_blank_root_file(file, tree_name) if tree_exists == False: print(f' {file}...removed') #==================================================================================================== def delete_blank_root_file(path_to_root_file, tree_name): """ Delete file if the given TTree is not found on it """ tree_exists = True try: file = TFile(path_to_root_file, "read") except: tree_exists = False else: tree_exists = file.GetListOfKeys().Contains(tree_name) file.Close() if tree_exists == False: if os.path.isfile(path_to_root_file): os.remove(path_to_root_file) else: print(f"Error: {path_to_root_file} file not found") return tree_exists ```
{ "source": "jhardenberg/EnsClus", "score": 2 }
#### File: EnsClus/clus/ens_eof_kmeans.py ```python import numpy as np import sys import os from sklearn.cluster import KMeans import datetime import math import pandas as pd import collections from itertools import combinations from numpy import linalg as LA def clus_eval_indexes(elements, centroids, labels): """ Computes clustering evaluation indexes, as the Davies-Bouldin Index, the Dunn Index, the optimal variance ratio and the Silhouette value. Also computes cluster sigmas and distances. """ PCs = elements ### Computing clustering evaluation Indexes numclus = len(centroids) inertia_i = np.empty(numclus) for i in range(numclus): lab_clus = labels == i inertia_i[i] = np.sum([np.sum((pcok-centroids[i])**2) for pcok in PCs[lab_clus]]) clus_eval = dict() clus_eval['Indexes'] = dict() # Optimal ratio n_clus = np.empty(numclus) for i in range(numclus): n_clus[i] = np.sum(labels == i) mean_intra_clus_variance = np.sum(inertia_i)/len(labels) dist_couples = dict() coppie = list(combinations(range(numclus), 2)) for (i,j) in coppie: dist_couples[(i,j)] = LA.norm(centroids[i]-centroids[j]) mean_inter_clus_variance = np.sum(np.array(dist_couples.values())**2)/len(coppie) clus_eval['Indexes']['Inter-Intra Variance ratio'] = mean_inter_clus_variance/mean_intra_clus_variance sigma_clusters = np.sqrt(inertia_i/n_clus) clus_eval['Indexes']['Inter-Intra Distance ratio'] = np.mean(dist_couples.values())/np.mean(sigma_clusters) # Davies-Bouldin Index R_couples = dict() for (i,j) in coppie: R_couples[(i,j)] = (sigma_clusters[i]+sigma_clusters[j])/dist_couples[(i,j)] DBI = 0. for i in range(numclus): coppie_i = [coup for coup in coppie if i in coup] Di = np.max([R_couples[cop] for cop in coppie_i]) DBI += Di DBI /= numclus clus_eval['Indexes']['Davies-Bouldin'] = DBI # Dunn Index Delta_clus = np.empty(numclus) for i in range(numclus): lab_clus = labels == i distances = [LA.norm(pcok-centroids[i]) for pcok in PCs[lab_clus]] Delta_clus[i] = np.sum(distances)/n_clus[i] clus_eval['Indexes']['Dunn'] = np.min(dist_couples.values())/np.max(Delta_clus) clus_eval['Indexes']['Dunn 2'] = np.min(dist_couples.values())/np.max(sigma_clusters) # Silhouette sils = [] for ind, el, lab in zip(range(len(PCs)), PCs, labels): lab_clus = labels == lab lab_clus[ind] = False ok_Pcs = PCs[lab_clus] a = np.sum([LA.norm(okpc - el) for okpc in ok_Pcs])/n_clus[lab] bs = [] others = range(numclus) others.remove(lab) for lab_b in others: lab_clus = labels == lab_b ok_Pcs = PCs[lab_clus] b = np.sum([LA.norm(okpc - el) for okpc in ok_Pcs])/n_clus[lab_b] bs.append(b) b = np.min(bs) sils.append((b-a)/max([a,b])) sils = np.array(sils) sil_clus = [] for i in range(numclus): lab_clus = labels == i popo = np.sum(sils[lab_clus])/n_clus[i] sil_clus.append(popo) siltot = np.sum(sil_clus)/numclus clus_eval['Indexes']['Silhouette'] = siltot clus_eval['clus_silhouettes'] = sil_clus clus_eval['Indexes']['Dunn2/DB'] = clus_eval['Indexes']['Dunn 2']/clus_eval['Indexes']['Davies-Bouldin'] clus_eval['R couples'] = R_couples clus_eval['Inter cluster distances'] = dist_couples clus_eval['Sigma clusters'] = sigma_clusters return clus_eval def ens_eof_kmeans(inputs): ''' Find the most representative ensemble member for each cluster. METHODS: - Empirical Orthogonal Function (EOF) analysis of the input file - K-means cluster analysis applied to the retained Principal Components (PCs) TODO: - Order clusters per frequency - Give the anomalies in input (not from file) ''' # User-defined libraries from read_netcdf import read_N_2Dfields from eof_tool import eof_computation OUTPUTdir = inputs['OUTPUTdir'] numens = inputs['numens'] name_outputs = inputs['name_outputs'] filenames = inputs['filenames'] numpcs = inputs['numpcs'] perc = inputs['perc'] numclus = inputs['numclus'] # Either perc (cluster analysis is applied on a number of PCs such as they explain # 'perc' of total variance) or numpcs (number of PCs to retain) is set: if numpcs is not None: print('Number of principal components: {0}'.format(numpcs)) if perc is not None: print('Percentage of explained variance: {0}%'.format(int(perc))) if (perc is None and numpcs is None) or (perc is not None and numpcs is not None): raise ValueError('You have to specify either "perc" or "numpcs".') print('Number of clusters: {0}'.format(numclus)) #____________Reading the netCDF file of N 2Dfields of anomalies, saved by ens_anom.py ifile=os.path.join(OUTPUTdir,'ens_anomalies_{0}.nc'.format(name_outputs)) var, varunits, lat, lon = read_N_2Dfields(ifile) print('var dim: (numens x lat x lon)={0}'.format(var.shape)) #____________Compute EOFs (Empirical Orthogonal Functions) #____________and PCs (Principal Components) with respect to ensemble memeber print('____________________________________________________________________________________________________________________') print('EOF analysis') #---------------------------------------------------------------------------------------- solver, pcs_scal1, eofs_scal2, pcs_unscal0, eofs_unscal0, varfrac = eof_computation(var,varunits,lat,lon) acc=np.cumsum(varfrac*100) if perc is not None: # Find how many PCs explain a certain percentage of variance # (find the mode relative to the percentage closest to perc, but bigger than perc) numpcs=min(enumerate(acc), key=lambda x: x[1]<=perc)[0]+1 print('\nThe number of PCs that explain the percentage closest to {0}% of variance (but grater than {0}%) is {1}'.format(perc,numpcs)) exctperc=min(enumerate(acc), key=lambda x: x[1]<=perc)[1] if numpcs is not None: exctperc=acc[numpcs-1] if np.isnan(exctperc): print(acc) raise ValueError('NaN in evaluation of variance explained by first pcs') print('(the first {0} PCs explain exactly the {1}% of variance)'.format(numpcs,"%.2f" %exctperc)) #____________Compute k-means analysis using a subset of PCs print('__________________________________________________\n') print('k-means analysis using a subset of PCs') print('_____________________________________________\n') #---------------------------------------------------------------------------------------- PCs=pcs_unscal0[:,:numpcs] clus=KMeans(n_clusters=numclus, n_init=600, max_iter=1000) start = datetime.datetime.now() clus.fit(PCs) end = datetime.datetime.now() print('k-means algorithm took me %s seconds' %(end-start)) centroids=clus.cluster_centers_ # shape---> (numclus,numpcs) labels=clus.labels_ # shape---> (numens,) inertia = clus.inertia_ ## Ordering clusters for number of members centroids = np.array(centroids) labels = np.array(labels) num_mem = [] for i in range(numclus): num_mem.append(np.sum(labels == i)) num_mem = np.array(num_mem) new_ord = num_mem.argsort()[::-1] centroids = centroids[new_ord] labels_new = np.array(labels) for nu, i in zip(range(numclus), new_ord): labels_new[labels == i] = nu labels = labels_new ### clus_eval = clus_eval_indexes(PCs, centroids, labels) for nam in clus_eval['Indexes'].keys(): print(nam, clus_eval['Indexes'][nam]) print('\nClusters are identified for {0} PCs (explained variance {1}%)'.format(numpcs, "%.2f" %exctperc)) print('PCs dim: (number of ensemble members, number of PCs)={0}, EOF dim: (number of ensemble members, lat, lon)={1}'.format(pcs_unscal0[:,:numpcs].shape,eofs_unscal0[:numpcs].shape)) print('Centroid coordinates dim: (number of clusters, number of PCs)={0}, labels dim: (number of ensemble members,)={1}\n'.format(centroids.shape,labels.shape)) #____________Save labels namef=os.path.join(OUTPUTdir,'labels_{0}.txt'.format(name_outputs)) #np.savetxt(namef,labels,fmt='%d') filo = open(namef, 'w') stringo = '{:6s} {:20s} {:8s}\n'.format('#', 'filename', 'cluster') filo.write(stringo) filo.write(' \n') for filnam, ii, lab in zip(inputs['filenames'], range(numens), labels): indr = filnam.rindex('/') filnam = filnam[indr+1:] stringo = '{:6d} {:20s} {:8d}\n'.format(ii, filnam, lab) filo.write(stringo) filo.close() #____________Compute cluster frequencies L=[] for nclus in range(numclus): cl=list(np.where(labels==nclus)[0]) fr=len(cl)*100/len(labels) L.append([nclus,fr,cl]) print('Cluster labels:') print([L[ncl][0] for ncl in range(numclus)]) print('Cluster frequencies (%):') print([round(L[ncl][1],3) for ncl in range(numclus)]) print('Cluster members:') print([L[ncl][2] for ncl in range(numclus)]) #____________Find the most representative ensemble member for each cluster print('____________________________________________________________________________________________________________________') print('In order to find the most representative ensemble member for each cluster\n(which is the closest member to the cluster centroid)') print('the Euclidean distance between cluster centroids and each ensemble member is computed in the PC space') print('____________________________________________________________________________________________________________________') # 1) print('Check: cluster #1 centroid coordinates vector dim {0} should be the same as the member #1 PC vector dim {1}\n'.format(centroids[1,:].shape,PCs[1,:].shape)) #print('\nIn the PC space, the distance between:') norm=np.empty([numclus,numens]) finalOUTPUT=[] repres=[] ens_mindist = [] ens_maxdist = [] for nclus in range(numclus): for ens in range(numens): normens=centroids[nclus,:]-PCs[ens,:] norm[nclus,ens]=math.sqrt(sum(normens**2)) #print('The distance between centroid of cluster {0} and member {1} is {2}'.format(nclus,ens,round(norm[nclus,ens],3))) print('The distances between centroid of cluster {0} and member #0 to #{1} are:\n{2}'.format(nclus,numens-1,np.round(norm[nclus],3))) ens_mindist.append((np.argmin(norm[nclus,:]), norm[nclus].min())) print('MINIMUM DISTANCE FOR CLUSTER {0} IS {1} --> member #{2}'.format(nclus, round(ens_mindist[-1][1],3), ens_mindist[-1][0])) repres.append(np.where(norm[nclus] == norm[nclus].min())[0][0]) ens_maxdist.append((np.argmax(norm[nclus,:]), norm[nclus].max())) print('MAXIMUM DISTANCE FOR CLUSTER {0} IS {1} --> member #{2}'.format(nclus, round(ens_maxdist[-1][1],3), ens_maxdist[-1][0])) txt='Closest ensemble member/members to centroid of cluster {0} is/are {1}\n'.format(nclus,list(np.where(norm[nclus] == norm[nclus].min())[0])) finalOUTPUT.append(txt) with open(OUTPUTdir+'RepresentativeEnsembleMembers_{0}.txt'.format(name_outputs), "w") as text_file: text_file.write(''.join(str(e) for e in finalOUTPUT)) #____________Save the most representative ensemble members namef=os.path.join(OUTPUTdir,'repr_ens_{0}.txt'.format(name_outputs)) filo = open(namef, 'w') filo.write('List of cluster representatives\n') stringo = '{:10s} {:8s} -> {:20s}\n'.format('', '#', 'filename') filo.write(stringo) filo.write(' \n') for ii in range(numclus): okin = repres[ii] filnam = inputs['filenames'][okin] indr = filnam.rindex('/') filnam = filnam[indr+1:] stringo = 'Cluster {:2d}: {:8d} -> {:20s}\n'.format(ii, okin, filnam) filo.write(stringo) filo.close() #np.savetxt(namef,repres,fmt='%i') print('____________________________________________________________________________________________________________________') print('In order to study the spread of each cluster,') print('the standard deviation of the distances between each member in a cluster and the cluster centroid is computed in the PC space') print('____________________________________________________________________________________________________________________') print('\nIn the PC space:') statOUTPUT=[] for nclus in range(numclus): members=L[nclus][2] norm=np.empty([numclus,len(members)]) for mem in range(len(members)): #print('mem=',mem) ens=members[mem] #print('ens',ens) normens=centroids[nclus,:]-PCs[ens,:] norm[nclus,mem]=math.sqrt(sum(normens**2)) #print('norm=',norm[nclus],norm.dtype) print('the distances between centroid of cluster {0} and its belonging members {1} are:\n{2}'.format(nclus,members,np.round(norm[nclus],3))) print('MINIMUM DISTANCE WITHIN CLUSTER {0} IS {1} --> member #{2}'.format(nclus,round(norm[nclus].min(),3),members[np.where(norm[nclus] == norm[nclus].min())[0][0]])) print('MAXIMUM DISTANCE WITHIN CLUSTER {0} IS {1} --> member #{2}'.format(nclus,round(norm[nclus].max(),3),members[np.where(norm[nclus] == norm[nclus].max())[0][0]])) print('INTRA-CLUSTER STANDARD DEVIATION FOR CLUSTER {0} IS {1}\n'.format(nclus,norm[nclus].std())) d_stat=collections.OrderedDict() d_stat['cluster']=nclus d_stat['member']=members d_stat['d_to_centroid']=np.round(norm[nclus],3) d_stat['intra-clus_std']=norm[nclus].std() d_stat['d_min']=round(norm[nclus].min(),3) d_stat['d_max']=round(norm[nclus].max(),3) d_stat['freq(%)']=round(L[nclus][1],3) stat=pd.DataFrame(d_stat) statOUTPUT.append(stat) statOUTPUT = pd.concat(statOUTPUT, axis=0) #____________Save statistics of cluster analysis namef=os.path.join(OUTPUTdir,'statistics_clutering_{0}.txt'.format(name_outputs)) with open(namef, 'w') as text_file: text_file.write(statOUTPUT.__repr__()) return centroids, labels, ens_mindist, ens_maxdist, clus_eval #======================================================== # if __name__ == '__main__': # print('This program is being run by itself') # # print('**************************************************************') # print('Running {0}'.format(sys.argv[0])) # print('**************************************************************') # dir_OUTPUT = sys.argv[1] # OUTPUT DIRECTORY # name_outputs = sys.argv[2] # name of the outputs # numens = int(sys.argv[3]) # number of ensemble members # numpcs = sys.argv[4] # number of retained PCs # perc = sys.argv[5] # percentage of explained variance by PCs # numclus = int(sys.argv[6]) # number of clusters # # ens_eof_kmeans(dir_OUTPUT,name_outputs,numens,numpcs,perc,numclus) # # else: # print('ens_eof_kmeans is being imported from another module') ```
{ "source": "jhardy0/deer", "score": 4 }
#### File: deer/base_classes/policy.py ```python import numpy as np class Policy(object): """Abstract class for all policies. A policy takes observations as input, and outputs an action. Parameters ----------- learning_algo : object from class LearningALgo n_actions : int or list Definition of the action space provided by Environment.nActions() random_state : numpy random number generator """ def __init__(self, learning_algo, n_actions,random_state): self.learning_algo = learning_algo self.n_actions = n_actions self.random_state = random_state pass def bestAction(self, state, mode=None, *args, **kwargs): """ Returns the best Action for the given state. This is an additional encapsulation for q-network. """ action,V = self.learning_algo.chooseBestAction(state, mode, *args, **kwargs) return action, V def randomAction(self): """ Returns a random action """ if ( isinstance(self.n_actions,int)): # Discrete set of actions [0,nactions[ action = self.random_state.randint(0, self.n_actions) else: # Continuous set of actions action=[] for a in self.n_actions: action.append( self.random_state.uniform(a[0],a[1]) ) action=np.array(action) V = 0 return action, V def action(self, state): """Main method of the Policy class. It can be called by agent.py, given a state, and should return a valid action w.r.t. the environment given to the constructor. """ raise NotImplementedError() ``` #### File: deer/helper/tree.py ```python import numpy as np class Node: def __init__(self, position=-1, priority=0, end=-1): """ The information contained in each node is: - Children and parent - Position: indice of the transition in the replay memory, i.e. the circular buffer used for storing the experiences - Priority: sum of the priorities of the children. If leaf node, then it is the priority of the transition. - End: variable used for tree search based on Position """ self.left = None self.right = None self.parent = None self.position = position self.priority = priority self.end = end def hasChildren(self): if (self.right == None and self.left == None): return False return True class SumTree: def __init__(self, size): """ The tree does not implement any insert-related method because the idea is to initialize the tree to have the same number of leaves as the size of the replay memory. """ self._root = Node() size_left = int(size/2) # Initialization of the tree self._root.left = self._createSubtree(self._root, 0, size_left) # [a,b[ self._root.right = self._createSubtree(self._root, size_left, size) self._max_priority = 1 def _createSubtree(self, parent, begin, end): """ Build balanced subtrees. The leaf nodes have their "priority" initialized to 0 and "position" from 0 to n-1, with n being the size of the replay memory. The inner nodes are built while setting their "end" value that is used to position based search in the tree. Arguments: parent - parent node begin - lower bound of the range of positions end - upper bound (excluded) of the range of positions Return: node - root of the subtree """ n_elem = end - begin if (n_elem == 1): node = Node(position=begin) node.parent = parent node.end = end return node # At least 2 values (leaves) left mid = int((end + begin)/2) node = Node(end=end) node.parent = parent node.left = self._createSubtree(node, begin, mid) node.right = self._createSubtree(node, mid, end) return node def update(self, index, priority=-1): """ Update a leaf and the tree priorities. When the replay memory is updated with a new transition, it is also updated in the tree. The priority of the successive parent nodes are also modified. The function is also used to update the priority of an existing transtion after it has been replayed. Arguments: index - index of the leaf corresponding to the index of the new transition in the replay memory priority - the new priority of the leaf """ if (priority == -1): priority = self._max_priority elif (priority > self._max_priority): self._max_priority = priority # Search for index node = self.findIndex(index) # Replace with new priority diff = priority - node.priority node.priority = priority # Update value self._updateValue(node.parent, diff) def _updateValue(self, node, diff): node.priority += diff if (node.parent != None): self._updateValue(node.parent, diff) def findIndex(self, index): """ Find a leaf based on the index. Arguments: index - integer between 0 and n-1, n being the size of the replay memory Return: node - leaf with the index """ if(self._root != None): return self._findIndex(index, self._root) else: return None def _findIndex(self, index, node): if (node.position == index): return node if (index < node.left.end): return self._findIndex(index, node.left) else: return self._findIndex(index, node.right) def getBatch(self, n, rng, dataset): """ Generate the indices of a random batch of size n. The samples within the random batch are selected following the priorities (probabilities) of each transition in the replay memory. Argument: rng - number of elements in the random batch Return: indices - list with indices drawn w.r.t. the transition priorities. """ pmax = self._root.priority step = pmax / n indices = np.zeros(n, dtype='int32') for i in range(n): p = rng.uniform(i*step, (i+1)*step) node = self.find(p) index = self._checkTerminal(node.position, dataset) if (index >= 0): indices[i] = index else: return np.zeros(0) return indices def _checkTerminal(self, index, dataset): """ Avoid terminal states in the x samples preceding the chosen index. Argument: index - chosen index based on priority dataset - contains the circular buffers Return: index - checked or corrected value of the input index. """ history_size = dataset._max_history_size terminals = dataset._terminals n_elems = dataset.n_elems lower_bound = history_size - 1 # Check if the index is valid wrt terminals first_try = index start_wrapped = False while True: i = index - 1 processed = 0 for _ in range(history_size - 1): if (i < 0 or terminals[i]): break; i -= 1 processed += 1 if (processed < history_size - 1): # if we stopped prematurely, shift slice to the left and try again index = i if (index < lower_bound): start_wrapped = True index = n_elems - 1 if (start_wrapped and index <= first_try): return -1 else: # else index was ok according to terminals return index def find(self, priority): """ Find a leaf based on the priority. Arguments: priority - the target priority generated randomly Return: node - the closest leaf node with a greater priority """ if(self._root != None): return self._find(priority, self._root) else: return None def _find(self, priority, node): if (not node.hasChildren()): return node if(priority <= node.left.priority): return self._find(priority, node.left) else: return self._find(priority - node.left.priority, node.right) def printTree(self): # Classical printout method. Mostly for debugging purposes. if(self._root != None): self._printTree(self._root) print("===============") def _printTree(self, node): if(node != None): self._printTree(node.left) print(node.position, node.priority) self._printTree(node.right) if __name__ == "__main__": t = SumTree(10) t.update(1, 1) t.update(2, 0.2) t.update(3, 3.3) t.update(4, 2.5) t.update(6, 2) t.printTree() rng = np.random.RandomState() for _ in range(10): print(t.getBatch(10, rng)) ``` #### File: deer/policies/LongerExplorationPolicy.py ```python from ..base_classes import Policy import itertools import random import copy import numpy as np class LongerExplorationPolicy(Policy): """Simple alternative to :math:`\epsilon`-greedy that can explore more efficiently for a broad class of realistic problems. Parameters ----------- epsilon : float Proportion of random steps length : int Length of the exploration sequences that will be considered """ def __init__(self, learning_algo, n_actions, random_state, epsilon, length=10): Policy.__init__(self, learning_algo, n_actions, random_state) self._epsilon = epsilon self._l = length self._count_down = -1 self._action_sequence = [] def action(self, state, mode=None, *args, **kwargs): if self._count_down >= 0: # Take the next exploration action in the sequence V = 0 action = self._action_sequence[self._count_down] self._count_down -= 1 else: if self.random_state.rand() < self._epsilon/((1+(self._l-1)*(1-self._epsilon))): # Take a random action and build an exploration sequence for the next steps self._count_down = self._l - 1 self._action_sequence = self.sampleUniformActionSequence() action = self._action_sequence[self._count_down] V = 0 self._count_down -= 1 else: # Simply act greedily with respect to what is currently believed to be the best action action, V = self.bestAction(state, mode, args, kwargs) return np.array(action), V def setEpsilon(self, e): """ Set the epsilon """ self._epsilon = e def epsilon(self): """ Get the epsilon """ return self._epsilon def sampleUniformActionSequence(self): if ( isinstance(self.n_actions,int)): """ Sample an action sequence of length self._l, where the unordered sequences have uniform probabilities""" actions_list = range(self.n_actions) else: """For N exploration steps, the goal is to have actions such that their sum spans quite uniformly the whole range of possibilities. Among those possibilities, random choice/order of actions. """ possible_actions=[] # Add for all actions N random element between min and max N=3 for i,a in enumerate(self.n_actions): possible_actions.append([]) for j in range(N): possible_actions[i].append( self.random_state.uniform(self.n_actions[i][0],self.n_actions[i][1]) ) actions_list = list(itertools.product(*possible_actions)) sequences_with_replacement = list(itertools.combinations_with_replacement(actions_list, self._l)) index_pick = self.random_state.randint(0, len(sequences_with_replacement)) sequence = list(sequences_with_replacement[index_pick]) self.random_state.shuffle(sequence) return sequence ```
{ "source": "jhare96/reinforcement-learning", "score": 2 }
#### File: rlib/A2C/A2C_lstm.py ```python import numpy as np import scipy import gym import os, time, datetime import threading from rlib.A2C.ActorCritic import ActorCritic_LSTM from rlib.networks.networks import* from rlib.utils.utils import fold_batch, stack_many, totorch, fastsample from rlib.utils.SyncMultiEnvTrainer import SyncMultiEnvTrainer from rlib.utils.VecEnv import* from rlib.utils.wrappers import* class A2CLSTM_Trainer(SyncMultiEnvTrainer): def __init__(self, envs, model, val_envs, train_mode='nstep', return_type='nstep', log_dir='logs/', model_dir='models/', total_steps=1000000, nsteps=20, validate_freq=1e6, save_freq=0, render_freq=0, num_val_episodes=50, max_val_steps=10000, log_scalars=True): super().__init__(envs, model, val_envs, log_dir=log_dir, model_dir=model_dir, train_mode=train_mode, return_type=return_type, total_steps=total_steps, nsteps=nsteps, validate_freq=validate_freq, save_freq=save_freq, render_freq=render_freq, update_target_freq=0, num_val_episodes=num_val_episodes, max_val_steps=max_val_steps, log_scalars=log_scalars) self.prev_hidden = self.model.get_initial_hidden(self.num_envs) hyper_params = {'learning_rate':model.lr, 'learning_rate_final':model.lr_final, 'lr_decay_steps':model.decay_steps , 'grad_clip':model.grad_clip, 'nsteps':self.nsteps, 'num_workers':self.num_envs, 'total_steps':self.total_steps, 'entropy_coefficient':model.entropy_coeff, 'value_coefficient':model.value_coeff, 'gamma':self.gamma, 'lambda':self.lambda_} if self.log_scalars: filename = log_dir + '/hyperparameters.txt' self.save_hyperparameters(filename, **hyper_params) def _train_nstep(self): batch_size = (self.num_envs * self.nsteps) start = time.time() num_updates = self.total_steps // batch_size s = 0 # main loop for t in range(1,num_updates+1): states, actions, rewards, first_hidden, dones, values, last_values = self.rollout() if self.return_type == 'nstep': R = self.nstep_return(rewards, last_values, dones, gamma=self.gamma) elif self.return_type == 'GAE': R = self.GAE(rewards, values, last_values, dones, gamma=self.gamma, lambda_=self.lambda_) + values elif self.return_type == 'lambda': R = self.lambda_return(rewards, values, last_values, dones, gamma=self.gamma, lambda_=self.lambda_) # stack all states, actions and Rs across all workers into a single batch actions, R = fold_batch(actions), fold_batch(R) l = self.model.backprop(states, R, actions, first_hidden, dones) if self.render_freq > 0 and t % ((self.validate_freq // batch_size) * self.render_freq) == 0: render = True else: render = False if self.validate_freq > 0 and t % (self.validate_freq //batch_size) == 0: self.validation_summary(t,l,start,render) start = time.time() if self.save_freq > 0 and t % (self.save_freq // batch_size) == 0: s += 1 self.saver.save(self.sess, str(self.model_dir + str(s) + ".ckpt") ) print('saved model') def _validate_async(self, env, num_ep, max_steps, render=False): for episode in range(num_ep): state = env.reset() episode_score = [] hidden = self.model.get_initial_hidden(1) for t in range(max_steps): policy, value, hidden = self.model.evaluate(state[None, None], hidden) #print('policy', policy, 'value', value) action = int(fastsample(policy)) next_state, reward, done, info = env.step(action) state = next_state episode_score.append(reward) if render: with self.lock: env.render() if done or t == max_steps -1: tot_reward = np.sum(episode_score) with self.lock: self.validate_rewards.append(tot_reward) break if render: with self.lock: env.close() def validate_sync(self, render): episode_scores = [] env = self.val_envs for episode in range(self.num_val_episodes//len(env)): states = env.reset() episode_score = [] prev_hidden = self.model.get_initial_hidden(len(self.val_envs)) for t in range(self.val_steps): policies, values, hidden = self.model.evaluate(states[None], prev_hidden) actions = fastsample(policies) next_states, rewards, dones, infos = env.step(actions) states = next_states episode_score.append(rewards*(1-dones)) if render: with self.lock: env.render() if dones.sum() == self.num_envs or t == self.val_steps -1: tot_reward = np.sum(np.stack(episode_score), axis=0) episode_scores.append(tot_reward) break return np.mean(episode_scores) def rollout(self,): rollout = [] first_hidden = self.prev_hidden for t in range(self.nsteps): policies, values, hidden = self.model.evaluate(self.states[None], self.prev_hidden) actions = fastsample(policies) next_states, rewards, dones, infos = self.env.step(actions) rollout.append((self.states, actions, rewards, values, dones)) self.states = next_states self.prev_hidden = self.model.mask_hidden(hidden, dones) # reset hidden state at end of episode states, actions, rewards, values, dones = stack_many(*zip(*rollout)) _, last_values, _ = self.model.evaluate(self.states[None], self.prev_hidden) return states, actions, rewards, first_hidden, dones, values, last_values def main(env_id): num_envs = 32 nsteps = 20 classic_list = ['MountainCar-v0', 'Acrobot-v1', 'LunarLander-v2', 'CartPole-v0', 'CartPole-v1'] if any(env_id in s for s in classic_list): print('Classic Control') val_envs = [gym.make(env_id) for i in range(10)] envs = BatchEnv(DummyEnv, env_id, num_envs, blocking=False) elif 'ApplePicker' in env_id: print('ApplePicker') make_args = {'num_objects':100, 'default_reward':-0.1} val_envs = [gym.make(env_id, **make_args) for i in range(10)] envs = DummyBatchEnv(apple_pickgame, env_id, num_envs, max_steps=5000, auto_reset=True, make_args=make_args) print(val_envs[0]) print(envs.envs[0]) else: print('Atari') env = gym.make(env_id) if env.unwrapped.get_action_meanings()[1] == 'FIRE': reset = True print('fire on reset') else: reset = False print('only stack frames') env.close() val_envs = [AtariEnv(gym.make(env_id), k=1, rescale=84, episodic=False, reset=reset, clip_reward=False) for i in range(16)] envs = BatchEnv(AtariEnv, env_id, num_envs, rescale=84, blocking=False , k=1, reset=reset, episodic=False, clip_reward=True) action_size = val_envs[0].action_space.n input_size = val_envs[0].reset().shape current_time = datetime.datetime.now().strftime('%y-%m-%d_%H-%M-%S') train_log_dir = 'logs/A2C_LSTM/' + env_id +'/' + current_time model_dir = "models/A2C_LSTM/" + env_id + '/' + current_time model = ActorCritic_LSTM(NatureCNN, input_size=input_size, action_size=action_size, cell_size=256, lr=1e-3, lr_final=1e-4, decay_steps=50e6//(num_envs*nsteps), grad_clip=0.5, optim=torch.optim.RMSprop, device='cuda') a2c_trainer = A2CLSTM_Trainer(envs=envs, model=model, model_dir=model_dir, log_dir=train_log_dir, val_envs=val_envs, train_mode='nstep', return_type='GAE', total_steps=50e6, nsteps=nsteps, validate_freq=1e6, save_freq=0, render_freq=0, num_val_episodes=25, log_scalars=False) print(env_id) a2c_trainer.train() del model if __name__ == "__main__": env_id_list = ['SpaceInvadersDeterministic-v4', 'FreewayDeterministic-v4', 'MontezumaRevengeDeterministic-v4', 'PongDeterministic-v4'] #env_id_list = ['MountainCar-v0', 'Acrobot-v1'] #env_id_list = ['SuperMarioBros-1-1-v0'] for env_id in env_id_list: main(env_id) ``` #### File: rlib/A2C/ActorCritic.py ```python import torch import torch.nn.functional as F import numpy as np from rlib.networks.networks import MaskedLSTMCell, MaskedRNN, MaskedLSTMBlock from rlib.utils.schedulers import polynomial_sheduler from rlib.utils.utils import totorch, tonumpy, totorch_many, tonumpy_many class ActorCritic(torch.nn.Module): def __init__(self, model, input_size, action_size, entropy_coeff=0.01, value_coeff=0.5, lr=1e-3, lr_final=1e-6, decay_steps=6e5, grad_clip=0.5, build_optimiser=True, optim=torch.optim.RMSprop, optim_args={}, device='cuda', **model_args): super(ActorCritic, self).__init__() self.lr = lr self.lr_final = lr_final self.entropy_coeff = entropy_coeff self.value_coeff = value_coeff self.decay_steps = decay_steps self.grad_clip = grad_clip self.action_size = action_size self.device = device self.model = model(input_size, **model_args).to(self.device) self.dense_size = self.model.dense_size self.policy_distrib = torch.nn.Linear(self.dense_size, action_size).to(self.device) # Actor self.V = torch.nn.Linear(self.dense_size, 1).to(self.device) # Critic if build_optimiser: self.optimiser = optim(self.parameters(), lr, **optim_args) self.scheduler = polynomial_sheduler(self.optimiser, lr_final, decay_steps, power=1) def loss(self, policy, R, V, actions_onehot): Advantage = R - V value_loss = 0.5 * torch.mean(torch.square(Advantage)) log_policy = torch.log(torch.clip(policy, 1e-6, 0.999999)) log_policy_actions = torch.sum(log_policy * actions_onehot, dim=1) policy_loss = torch.mean(-log_policy_actions * Advantage.detach()) entropy = torch.mean(torch.sum(policy * -log_policy, dim=1)) loss = policy_loss + self.value_coeff * value_loss - self.entropy_coeff * entropy return loss def forward(self, state): enc_state = self.model(state) policy = F.softmax(self.policy_distrib(enc_state), dim=-1) value = self.V(enc_state).view(-1) return policy, value def evaluate(self, state:np.ndarray): state = totorch(state, self.device) with torch.no_grad(): policy, value = self.forward(state) return tonumpy(policy), tonumpy(value) def backprop(self, state, R, action): state, R, action = totorch_many(state, R, action, device=self.device) action_onehot = F.one_hot(action.long(), num_classes=self.action_size) policy, value = self.forward(state) loss = self.loss(policy, R, value, action_onehot) loss.backward() if self.grad_clip is not None: torch.nn.utils.clip_grad_norm_(self.parameters(), self.grad_clip) self.optimiser.step() self.optimiser.zero_grad() self.scheduler.step() return loss.detach().cpu().numpy() class ActorCritic_LSTM(torch.nn.Module): def __init__(self, model, input_size, action_size, cell_size, entropy_coeff=0.01, value_coeff=0.5, lr=1e-3, lr_final=1e-6, decay_steps=6e5, grad_clip=0.5, build_optimiser=True, optim=torch.optim.RMSprop, optim_args={}, device='cuda', **model_args): super(ActorCritic_LSTM, self).__init__() self.lr = lr self.lr_final = lr_final self.input_size = input_size self.entropy_coeff = entropy_coeff self.value_coeff = value_coeff self.decay_steps = decay_steps self.grad_clip = grad_clip self.cell_size = cell_size self.action_size = action_size self.device = device self.model = model(input_size, **model_args).to(self.device) self.dense_size = self.model.dense_size #self.lstm = MaskedRNN(MaskedLSTMCell(cell_size, self.dense_size), time_major=True) self.lstm = MaskedLSTMBlock(self.dense_size, cell_size, time_major=True).to(self.device) self.policy_distrib = torch.nn.Linear(cell_size, action_size, device=self.device) # Actor self.V = torch.nn.Linear(cell_size, 1, device=self.device) # Critic if build_optimiser: self.optimiser = optim(self.parameters(), lr, **optim_args) self.scheduler = polynomial_sheduler(self.optimiser, lr_final, decay_steps, power=1) def loss(self, policy, R, V, actions_onehot): Advantage = R - V value_loss = 0.5 * torch.mean(torch.square(Advantage)) log_policy = torch.log(torch.clip(policy, 1e-6, 0.999999)) log_policy_actions = torch.sum(log_policy * actions_onehot, dim=1) policy_loss = torch.mean(-log_policy_actions * Advantage.detach()) entropy = torch.mean(torch.sum(policy * -log_policy, dim=1)) loss = policy_loss + self.value_coeff * value_loss - self.entropy_coeff * entropy return loss def forward(self, state, hidden=None, done=None): T, num_envs = state.shape[:2] folded_state = state.view(-1, *self.input_size) enc_state = self.model(folded_state) folded_enc_state = enc_state.view(T, num_envs, self.dense_size) lstm_outputs, hidden = self.lstm(folded_enc_state, hidden, done) policy = F.softmax(self.policy_distrib(lstm_outputs), dim=-1).view(-1, self.action_size) value = self.V(lstm_outputs).view(-1) return policy, value, hidden def evaluate(self, state:np.ndarray, hidden:np.ndarray=None, done=None): state = totorch(state, self.device) hidden = totorch_many(*hidden, device=self.device) if hidden is not None else None with torch.no_grad(): policy, value, hidden = self.forward(state, hidden, done) return tonumpy(policy), tonumpy(value), tonumpy_many(*hidden) def backprop(self, state, R, action, hidden, done): state, R, action, done = totorch_many(state, R, action, done, device=self.device) hidden = totorch_many(*hidden, device=self.device) action_onehot = F.one_hot(action.long(), num_classes=self.action_size) policy, value, hidden = self.forward(state, hidden, done) loss = self.loss(policy, R, value, action_onehot) loss.backward() if self.grad_clip is not None: torch.nn.utils.clip_grad_norm_(self.parameters(), self.grad_clip) self.optimiser.step() self.optimiser.zero_grad() self.scheduler.step() return loss.detach().cpu().numpy() def get_initial_hidden(self, batch_size): return np.zeros((1, batch_size, self.cell_size)), np.zeros((1, batch_size, self.cell_size)) def mask_hidden(self, hidden, dones): mask = (1-dones).reshape(-1, 1) return (hidden[0]*mask, hidden[1]*mask) ``` #### File: rlib/A3C/A3C.py ```python import gym import torch import torch.multiprocessing as mp import torch.nn.functional as F import numpy as np import math import time from rlib.A2C.ActorCritic import ActorCritic from rlib.networks.networks import NatureCNN from rlib.utils.wrappers import AtariEnv from rlib.utils.utils import stack_many, tonumpy, totorch, lambda_return def train(global_model, model, env, nsteps, num_episodes, ID): opt = torch.optim.RMSprop(global_model.parameters(), lr=1e-3) episode = 0 episode_steps = 0 episode_score = 0 T = 0 state = env.reset() start = time.time() while episode < num_episodes: rollout = [] for t in range(nsteps): with torch.no_grad(): policy, value = model(totorch(state[None], device='cpu')) policy, value = tonumpy(policy), tonumpy(value) action = np.random.choice(policy.shape[1], p=policy[0]) next_state, reward, done, info = env.step(action) episode_score += reward rollout.append((state, action, reward, value, done)) state = next_state T += 1 episode_steps += 1 if done or t == nsteps-1: states, actions, rewards, values, dones = stack_many(*zip(*rollout)) with torch.no_grad(): _, last_values = model.forward(totorch(next_state[None], device='cpu')) last_values = last_values.cpu().numpy() R = lambda_return(rewards, values, last_values, dones, gamma=0.9, lambda_=0.95, clip=False) loss = update_params(model, global_model, opt, states, actions, R) #self.T += t if done: episode += 1 state = env.reset() if episode % 1 == 0: time_taken = time.time() - start print(f'worker {ID}, total worker steps {T:,} local episode {episode}, episode score {episode_score} episode steps {episode_steps}, time taken {time_taken:,.1f}s, fps {episode_steps/time_taken:.2f}') episode_steps = 0 episode_score = 0 start = time.time() break def update_params(lm, gm, gopt, states, actions, R): states, R, actions = totorch(states, 'cpu'), totorch(R, 'cpu'), totorch(actions, 'cpu') actions_onehot = F.one_hot(actions.long(), num_classes=lm.action_size) policies, values = lm.forward(states) loss = lm.loss(policies, R, values, actions_onehot) loss.backward() if lm.grad_clip is not None: torch.nn.utils.clip_grad_norm_(lm.parameters(), lm.grad_clip) for local_param, global_param in zip(lm.parameters(), gm.parameters()): global_param._grad = local_param.grad gopt.step() gopt.zero_grad() #self.scheduler.step() lm.load_state_dict(gm.state_dict()) return loss.detach().cpu().numpy() # class SharedAdam(torch.optim.Adam): # def __init__(self, params, lr=1e-3, betas=(0.9, 0.99), eps=1e-8, # weight_decay=0): # super(SharedAdam, self).__init__(params, lr=lr, betas=betas, eps=eps, weight_decay=weight_decay) # # State initialization # for group in self.param_groups: # for p in group['params']: # state = self.state[p] # state['step'] = 0 # state['exp_avg'] = torch.zeros_like(p.data) # state['exp_avg_sq'] = torch.zeros_like(p.data) # # share in memory # state['exp_avg'].share_memory_() # state['exp_avg_sq'].share_memory_() class SharedAdam(torch.optim.Adam): """Implements Adam algorithm with shared states. """ def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0): super(SharedAdam, self).__init__(params, lr, betas, eps, weight_decay) for group in self.param_groups: for p in group['params']: state = self.state[p] state['step'] = torch.zeros(1) state['exp_avg'] = p.data.new().resize_as_(p.data).zero_() state['exp_avg_sq'] = p.data.new().resize_as_(p.data).zero_() def share_memory(self): for group in self.param_groups: for p in group['params']: state = self.state[p] state['step'].share_memory_() state['exp_avg'].share_memory_() state['exp_avg_sq'].share_memory_() def step(self, closure=None): """Performs a single optimization step. Arguments: closure (callable, optional): A closure that reevaluates the model and returns the loss. """ loss = None if closure is not None: loss = closure() for group in self.param_groups: for p in group['params']: if p.grad is None: continue grad = p.grad.data state = self.state[p] exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq'] beta1, beta2 = group['betas'] state['step'] += 1 if group['weight_decay'] != 0: grad = grad.add(group['weight_decay'], p.data) # Decay the first and second moment running average coefficient exp_avg.mul_(beta1).add_(1 - beta1, grad) exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad) denom = exp_avg_sq.sqrt().add_(group['eps']) bias_correction1 = 1 - beta1 ** state['step'].item() bias_correction2 = 1 - beta2 ** state['step'].item() step_size = group['lr'] * math.sqrt( bias_correction2) / bias_correction1 p.data.addcdiv_(-step_size, exp_avg, denom) return loss if __name__ == '__main__': env_id = 'SpaceInvadersDeterministic-v4' env = AtariEnv(gym.make(env_id), reset=True) input_size = env.reset().shape action_size = env.action_space.n print('action_size', action_size) global_model = ActorCritic(NatureCNN, input_size, action_size, build_optimiser=False) global_model.share_memory() #opt = SharedAdam(global_model.parameters(), lr=1e-3) #opt.share_memory() #actor = ActorCritic(NatureCNN, input_size, action_size) env_args = dict(k=4, rescale=84, episodic=True, reset=True, clip_reward=True, Noop=True, time_limit=None, channels_first=True) model_args = dict(model=NatureCNN, input_size=input_size, action_size=action_size, build_optimiser=False) processes = [] for rank in range(8): p = mp.Process(target=train, args=(global_model, ActorCritic(**model_args), AtariEnv(gym.make(env_id), **env_args), 20, 1000, rank)) p.start() processes.append(p) time.sleep(0.5) for p in processes: p.join() ``` #### File: rlib/Curiosity/CuriosityA2C.py ```python import torch import torch.nn.functional as F import numpy as np import scipy import gym import os, time import threading from rlib.A2C.A2C import ActorCritic from rlib.networks.networks import* from rlib.utils.SyncMultiEnvTrainer import SyncMultiEnvTrainer from rlib.utils.VecEnv import* from rlib.utils.wrappers import* from rlib.utils.utils import fastsample, fold_batch, one_hot, RunningMeanStd, normalise, stack_many, totorch_many from rlib.utils.schedulers import polynomial_sheduler class RollingObs(object): def __init__(self, mean=0): self.rolling = RunningMeanStd() def update(self, x): if len(x.shape) == 4: # assume image obs return self.rolling.update(np.mean(x, axis=1, keepdims=True)) #[time*batch,height,width,stack] -> [height, width] else: return self.rolling.update(x) #[time*batch,*shape] -> [*shape] class ICM(torch.nn.Module): def __init__(self, model_head, input_size, action_size, forward_coeff, device='cuda', **model_head_args): super(ICM, self).__init__() self.action_size = action_size self.forward_coeff = forward_coeff self.phi = model_head(input_size, **model_head_args) dense_size = self.phi.dense_size self.device = device # forward model self.forward1 = torch.nn.Sequential(torch.nn.Linear(dense_size + action_size, dense_size), torch.nn.ReLU()).to(device) self.pred_state = torch.nn.Linear(dense_size, dense_size).to(device) # inverse model self.inverse1 = torch.nn.Sequential(torch.nn.Linear(dense_size*2, dense_size), torch.nn.ReLU()).to(device) self.pred_action = torch.nn.Sequential(torch.nn.Linear(dense_size*2, dense_size), torch.nn.ReLU()).to(device) def intr_reward(self, phi, action_onehot, phi_next): f1 = self.forward1(torch.cat([phi, action_onehot], dim=1)) phi_pred = self.pred_state(f1) intr_reward = 0.5 * torch.sum(torch.square(phi_pred - phi_next), dim=1) # l2 distance metric ‖ˆφ(st+1)−φ(st+1)‖22 return intr_reward def predict_action(self, phi1, phi2): phi_cat = torch.cat([phi1, phi2], dim=1) pred_action = self.pred_action(phi_cat) return pred_action def get_intr_reward(self, state, action, next_state): state, next_state, action = totorch_many(state, next_state, action, device=self.device) action = action.long() phi1 = self.phi(state) phi2 = self.phi(next_state) action_onehot = F.one_hot(action, self.action_size) with torch.no_grad(): intr_reward = self.intr_reward(phi1, action_onehot, phi2) return intr_reward.cpu().numpy() def get_pred_action(self, state, next_state): state, next_state = totorch_many(state, next_state, device=self.device) return self.pred_action(state, next_state) def loss(self, state, action, next_state): action = action.long() phi1 = self.phi(state) phi2 = self.phi(next_state) action_onehot = F.one_hot(action, self.action_size) forward_loss = torch.mean(self.intr_reward(phi1, action_onehot, phi2)) inverse_loss = F.cross_entropy(self.predict_action(phi1, phi2), action) return (1-self.forward_coeff) * inverse_loss + self.forward_coeff * forward_loss class Curiosity(torch.nn.Module): def __init__(self, policy_model, ICM_model, input_size, action_size, forward_coeff, policy_importance, reward_scale, entropy_coeff, value_coeff=0.5, lr=1e-3, lr_final=1e-3, decay_steps=6e5, grad_clip=0.5, policy_args={}, ICM_args={}, device='cuda'): super(Curiosity, self).__init__() self.reward_scale, self.forward_coeff, self.policy_importance, self.entropy_coeff = reward_scale, forward_coeff, policy_importance, entropy_coeff self.lr, self.lr_final, self.decay_steps = lr, lr_final, decay_steps self.grad_clip = grad_clip self.action_size = action_size self.device = device try: iterator = iter(input_size) except TypeError: input_size = (input_size,) self.ICM = ICM(ICM_model, input_size, action_size, forward_coeff, device=device, **ICM_args) self.AC = ActorCritic(policy_model, input_size, action_size, entropy_coeff, value_coeff, lr, lr_final, decay_steps, grad_clip, build_optimiser=False, device=device, **policy_args) self.optimiser = torch.optim.RMSprop(self.parameters(), lr=lr) self.scheduler = polynomial_sheduler(self.optimiser, lr_final, decay_steps, power=1) def forward(self, state): return self.AC.forward(state) def evaluate(self, state): return self.AC.evaluate(state) def intrinsic_reward(self, state, action, next_state): return self.ICM.get_intr_reward(state, action, next_state) def backprop(self, state, next_state, R, Adv, action, state_mean, state_std): state, next_state, R, Adv, action, state_mean, state_std = totorch_many(state, next_state, R, Adv, action, state_mean, state_std, device=self.device) policy, value = self.AC.forward(state) action_onehot = F.one_hot(action.long(), self.action_size) policy_loss = self.AC.loss(policy, R, value, action_onehot) ICM_loss = self.ICM.loss((state-state_mean)/state_std, action, (next_state-state_mean)/state_std) loss = self.policy_importance * policy_loss + self.reward_scale * ICM_loss loss.backward() if self.grad_clip is not None: torch.nn.utils.clip_grad_norm_(self.parameters(), self.grad_clip) self.optimiser.step() self.optimiser.zero_grad() self.scheduler.step() return loss.detach().cpu().numpy() class Curiosity_Trainer(SyncMultiEnvTrainer): def __init__(self, envs, model, val_envs, train_mode='nstep', log_dir='logs/', total_steps=1000000, nsteps=5, validate_freq=1000000, save_freq=0, render_freq=0, num_val_episodes=50, max_val_steps=10000, log_scalars=True): super().__init__(envs, model, val_envs, train_mode=train_mode, return_type='nstep', log_dir=log_dir, total_steps=total_steps, nsteps=nsteps, validate_freq=validate_freq, save_freq=save_freq, render_freq=render_freq, update_target_freq=0, num_val_episodes=num_val_episodes, max_val_steps=max_val_steps, log_scalars=log_scalars) self.state_obs = RollingObs() self.state_mean = None self.state_std = None hyper_paras = {'learning_rate':model.lr, 'learning_rate_final':model.lr_final, 'lr_decay_steps':model.decay_steps, 'grad_clip':model.grad_clip, 'nsteps':self.nsteps, 'num_workers':self.num_envs, 'total_steps':self.total_steps, 'entropy_coefficient':0.01, 'value_coefficient':0.5, 'reward_scale':model.reward_scale, 'forward_model_scale':model.forward_coeff, 'policy_importance':model.policy_importance, 'gamma':self.gamma, 'lambda':self.lambda_} if self.log_scalars: filename = log_dir + '/hyperparameters.txt' self.save_hyperparameters(filename, **hyper_paras) self.lambda_ = 0.95 def init_state_obs(self, num_steps): states = 0 for i in range(num_steps): rand_actions = np.random.randint(0, self.model.action_size, size=self.num_envs) next_states, rewards, dones, infos = self.env.step(rand_actions) states += next_states return states / num_steps def _train_nstep(self): num_updates = self.total_steps // (self.num_envs * self.nsteps) s = 0 self.state_mean, self.state_std = self.state_obs.update(self.init_state_obs(10000//self.num_envs)) self.states = self.env.reset() print(self.state_mean.shape, self.state_std.shape) start = time.time() # main loop batch_size = self.num_envs * self.nsteps for t in range(1,num_updates+1): states, next_states, actions, rewards, dones, values = self.rollout() _, last_values = self.model.evaluate(next_states[-1]) R = self.nstep_return(rewards, last_values, dones) Adv = R - values #delta = rewards + self.gamma * values[:-1] - values[1:] #Adv = self.multistep_target(delta, values[-1], dones, gamma=self.gamma*self.lambda_) # stack all states, next_states, actions and Rs across all workers into a single batch states, next_states, actions, R, Adv = fold_batch(states), fold_batch(next_states), fold_batch(actions), fold_batch(R), fold_batch(Adv) mean, std = self.state_mean, self.state_std l = self.model.backprop(states, next_states, R, Adv, actions, mean, std) # self.state_mean, self.state_std = self.state_obs.update(states) if self.render_freq > 0 and t % (self.validate_freq * self.render_freq) == 0: render = True else: render = False if self.validate_freq > 0 and t % (self.validate_freq // batch_size) == 0: self.validation_summary(t,l,start,render) start = time.time() if self.save_freq > 0 and t % (self.save_freq // batch_size) == 0: s += 1 self.saver.save(self.sess, str(self.model_dir + self.current_time + '/' + str(s) + ".ckpt") ) print('saved model') def get_action(self, state): policy, value = self.model.evaluate(state) action = int(np.random.choice(policy.shape[1], p=policy[0])) return action def rollout(self,): rollout = [] for t in range(self.nsteps): start = time.time() policies, values = self.model.evaluate(self.states) actions = fastsample(policies) next_states, extr_rewards, dones, infos = self.env.step(actions) mean, std = self.state_mean[None], self.state_std[None] intr_rewards = self.model.intrinsic_reward((self.states-mean)/std, actions, (next_states-mean)/std) rewards = extr_rewards + intr_rewards rollout.append((self.states, next_states, actions, rewards, values, dones)) self.states = next_states states, next_states, actions, rewards, values, dones = stack_many(*zip(*rollout)) return states, next_states, actions, rewards, dones, values def main(env_id): num_envs = 32 nsteps = 20 classic_list = ['MountainCar-v0', 'Acrobot-v1', 'LunarLander-v2', 'CartPole-v0', 'CartPole-v1'] if any(env_id in s for s in classic_list): print('Classic Control') val_envs = [gym.make(env_id) for i in range(1)] envs = BatchEnv(DummyEnv, env_id, num_envs, blocking=False) else: env = gym.make(env_id) print('Atari') if env.unwrapped.get_action_meanings()[1] == 'FIRE': reset = True print('fire on reset') else: reset = False print('only stack frames') val_envs = [AtariEnv(gym.make(env_id), k=4, rescale=84, episodic=False, reset=reset, clip_reward=False) for i in range(1)] envs = BatchEnv(AtariEnv, env_id, num_envs, blocking=False, rescale=84, k=4, reset=reset, episodic=False, clip_reward=True, time_limit=4500) env.close() action_size = val_envs[0].action_space.n input_size = val_envs[0].reset().shape train_log_dir = 'logs/Curiosity/' + env_id + '/hyper_unclipped/' model = Curiosity(NatureCNN, NatureCNN, input_size=input_size, action_size=action_size, forward_coeff=0.2, policy_importance=1, reward_scale=1.0, entropy_coeff=0.01, #intr_coeff=1, lr=1e-3, lr_final=0, decay_steps=50e6//(num_envs*nsteps), grad_clip=0.5, policy_args={}, ICM_args={'scale':False}).cuda() curiosity = Curiosity_Trainer(envs=envs, model=model, val_envs=val_envs, train_mode='nstep', total_steps=5e6, nsteps=nsteps, validate_freq=1e5, save_freq=0, render_freq=0, num_val_episodes=1, log_dir=train_log_dir, log_scalars=False) print(env_id) curiosity.train() del curiosity if __name__ == "__main__": env_id_list = ['SpaceInvadersDeterministic-v4', 'FreewayDeterministic-v4', 'MontezumaRevengeDeterministic-v4', 'PongDeterministic-v4'] #env_id_list = ['MountainCar-v0', 'Acrobot-v1', 'CartPole-v1', ] #for i in range(5): for env_id in env_id_list: main(env_id) ``` #### File: rlib/Unreal/UnrealA2C2.py ```python from numpy.core.fromnumeric import size import torch import torch.nn.functional as F import numpy as np import gym import os, time, datetime from rlib.utils.utils import fastsample, fold_batch, one_hot, RunningMeanStd, stack_many, totorch, totorch_many, tonumpy, GAE from rlib.utils.schedulers import polynomial_sheduler from collections import deque from rlib.networks.networks import* from rlib.utils.SyncMultiEnvTrainer import SyncMultiEnvTrainer from rlib.utils.VecEnv import* from rlib.utils.wrappers import* from rlib.A2C.ActorCritic import ActorCritic # A2C-CNN version of Unsupervised Reinforcement Learning with Auxiliary Tasks (UNREAL) https://arxiv.org/abs/1611.05397 # Modifications: # no action-reward fed into policy # Use greyscaled images # deconvolute to pixel grid that overlaps FULL image # Generalised Advantage Estimation # Assumes input image size is 84x84 #torch.backends.cudnn.benchmark=True def sign(x): if x < 0: return 2 elif x == 0: return 0 elif x > 0: return 1 else: raise ValueError class UnrealA2C2(torch.nn.Module): def __init__(self, policy_model, input_shape, action_size, pixel_control=True, RP=1.0, PC=1.0, VR=1.0, entropy_coeff=0.001, value_coeff=0.5, lr=1e-3, lr_final=1e-4, decay_steps=50e6, grad_clip=0.5, policy_args={}, optim=torch.optim.RMSprop, device='cuda', optim_args={}): super(UnrealA2C2, self).__init__() self.RP, self.PC, self.VR = RP, PC, VR self.lr = lr self.entropy_coeff, self.value_coeff = entropy_coeff, value_coeff self.pixel_control = pixel_control self.grad_clip = grad_clip self.action_size = action_size self.device = device try: iterator = iter(input_shape) except TypeError: input_size = (input_shape,) self.policy = ActorCritic(policy_model, input_shape, action_size, entropy_coeff=entropy_coeff, value_coeff=value_coeff, build_optimiser=False, device=device, **policy_args) if pixel_control: self.feat_map = torch.nn.Sequential(torch.nn.Linear(self.policy.dense_size, 32*8*8), torch.nn.ReLU()).to(device) self.deconv1 = torch.nn.Sequential(torch.nn.ConvTranspose2d(32, 32, kernel_size=[3,3], stride=[1,1]), torch.nn.ReLU()).to(device) self.deconv_advantage = torch.nn.ConvTranspose2d(32, action_size, kernel_size=[3,3], stride=[2,2]).to(device) self.deconv_value = torch.nn.ConvTranspose2d(32, 1, kernel_size=[3,3], stride=[2,2]).to(device) # reward model self.r1 = torch.nn.Sequential(torch.nn.Linear(self.policy.dense_size, 128), torch.nn.ReLU()).to(device) self.r2 = torch.nn.Linear(128, 3).to(device) self.optimiser = optim(self.parameters(), lr, **optim_args) self.scheduler = polynomial_sheduler(self.optimiser, lr_final, decay_steps, power=1) def forward(self, state): return self.policy.forward(state) def evaluate(self, state): return self.policy.evaluate(state) def Qaux(self, enc_state): # Auxillary Q value calculated via dueling network # <NAME>, <NAME>, and <NAME>. Dueling Network Architectures for Deep ReinforcementLearning. https://arxiv.org/pdf/1511.06581.pdf batch_size = enc_state.shape[0] feat_map = self.feat_map(enc_state).view([batch_size,32,8,8]) deconv1 = self.deconv1(feat_map) deconv_adv = self.deconv_advantage(deconv1) deconv_value = self.deconv_value(deconv1) qaux = deconv_value + deconv_adv - torch.mean(deconv_adv, dim=1, keepdim=True) return qaux def get_pixel_control(self, state:np.ndarray): with torch.no_grad(): enc_state = self.policy.model(totorch(state, self.device)) Qaux = self.Qaux(enc_state) return tonumpy(Qaux) def pixel_loss(self, Qaux, Qaux_actions, Qaux_target): # Qaux_target temporal difference target for Q_aux #print('max qaux actions', Qaux_actions) #print('action_size', self.action_size) one_hot_actions = F.one_hot(Qaux_actions.long(), self.action_size) pixel_action = one_hot_actions.view([-1,self.action_size,1,1]) Q_aux_action = torch.sum(Qaux * pixel_action, dim=1) pixel_loss = 0.5 * torch.mean(torch.square(Qaux_target - Q_aux_action)) # l2 loss for Q_aux over all pixels and batch return pixel_loss def reward_loss(self, reward_states, reward_target): r1 = self.r1(self.policy.model(reward_states)) pred_reward = self.r2(r1) reward_loss = torch.mean(F.cross_entropy(pred_reward, reward_target.long())) # cross entropy over caterogical reward return reward_loss def replay_loss(self, R, V): return torch.mean(torch.square(R - V)) def forward_loss(self, states, R, actions): states, R, actions = totorch_many(states, R, actions, device=self.device) actions_onehot = F.one_hot(actions.long(), num_classes=self.action_size) policies, values = self.forward(states) forward_loss = self.policy.loss(policies, R, values, actions_onehot) return forward_loss def auxiliary_loss(self, reward_states, rewards, Qaux_target, Qaux_actions, replay_states, replay_R): reward_states, rewards, Qaux_target, Qaux_actions, replay_states, replay_R = totorch_many(reward_states, rewards, Qaux_target, Qaux_actions, replay_states, replay_R, device=self.device) policy_enc = self.policy.model(replay_states) replay_values = self.policy.V(policy_enc) reward_loss = self.reward_loss(reward_states, rewards) replay_loss = self.replay_loss(replay_R, replay_values) aux_loss = self.RP * reward_loss + self.VR * replay_loss Qaux_actions = Qaux_actions.long() if self.pixel_control: Qaux = self.Qaux(policy_enc) pixel_loss = self.pixel_loss(Qaux, Qaux_actions, Qaux_target) aux_loss += self.PC * pixel_loss return aux_loss def backprop(self, states, R, actions, reward_states, rewards, Qaux_target, Qaux_actions, replay_states, replay_R): forward_loss = self.forward_loss(states, R, actions) aux_losses = self.auxiliary_loss(reward_states, rewards, Qaux_target, Qaux_actions, replay_states, replay_R) loss = forward_loss + aux_losses loss.backward() if self.grad_clip is not None: torch.nn.utils.clip_grad_norm_(self.parameters(), self.grad_clip) self.optimiser.step() self.optimiser.zero_grad() self.scheduler.step() return loss.detach().cpu().numpy() class UnrealTrainer(SyncMultiEnvTrainer): def __init__(self, envs, model, val_envs, train_mode='nstep', log_dir='logs/UnrealA2C2', model_dir='models/UnrealA2C2', total_steps=1000000, nsteps=5, normalise_obs=True, validate_freq=1000000, save_freq=0, render_freq=0, num_val_episodes=50, replay_length=2000, max_val_steps=10000, log_scalars=True): super().__init__(envs, model, val_envs, train_mode=train_mode, log_dir=log_dir, model_dir=model_dir, total_steps=total_steps, nsteps=nsteps, validate_freq=validate_freq, save_freq=save_freq, render_freq=render_freq, update_target_freq=0, num_val_episodes=num_val_episodes, max_val_steps=max_val_steps, log_scalars=log_scalars) self.replay = deque([], maxlen=replay_length) #replay length per actor self.action_size = self.model.action_size hyper_paras = {'learning_rate':model.lr, 'grad_clip':model.grad_clip, 'nsteps':nsteps, 'num_workers':self.num_envs, 'total_steps':self.total_steps, 'entropy_coefficient':model.entropy_coeff, 'value_coefficient':model.value_coeff, 'gamma':self.gamma, 'lambda':self.lambda_} if log_scalars: filename = log_dir + '/hyperparameters.txt' self.save_hyperparameters(filename, **hyper_paras) self.normalise_obs = normalise_obs if self.normalise_obs: self.obs_running = RunningMeanStd() self.state_mean = np.zeros_like(self.states) self.state_std = np.ones_like(self.states) self.aux_reward_rolling = RunningMeanStd() def populate_memory(self): for t in range(2000//self.nsteps): states, *_ = self.rollout() #self.state_mean, self.state_std = self.obs_running.update(fold_batch(states)[...,-1:]) self.update_minmax(states) def update_minmax(self, obs): minima = obs.min() maxima = obs.max() if minima < self.state_min: self.state_min = minima if maxima > self.state_max: self.state_max = maxima def norm_obs(self, obs): ''' normalise pixel intensity changes by recording min and max pixel observations not using per pixel normalisation because expected image is singular greyscale frame ''' return (obs - self.state_min) * (1/(self.state_max - self.state_min)) def auxiliary_target(self, pixel_rewards, last_values, dones): T = len(pixel_rewards) R = np.zeros((T,*last_values.shape)) dones = dones[:,:,np.newaxis,np.newaxis] R[-1] = last_values * (1-dones[-1]) for i in reversed(range(T-1)): # restart score if done as BatchEnv automatically resets after end of episode R[i] = pixel_rewards[i] + 0.99 * R[i+1] * (1-dones[-1]) return R def pixel_rewards(self, prev_state, states): # states of rank [T, B, channels, 84, 84] T = len(states) # time length B = states.shape[1] # batch size pixel_rewards = np.zeros((T,B,21,21)) states = states[:,:,-1,:,:] prev_state = prev_state[:,-1,:,:] if self.normalise_obs: states = self.norm_obs(states) #print('states, max', states.max(), 'min', states.min(), 'mean', states.mean()) prev_state = self.norm_obs(prev_state) pixel_rewards[0] = np.abs(states[0] - prev_state).reshape(-1,4,4,21,21).mean(axis=(1,2)) for i in range(1,T): pixel_rewards[i] = np.abs(states[i] - states[i-1]).reshape(-1,4,4,21,21).mean(axis=(1,2)) #print('pixel reward',pixel_rewards.shape, 'max', pixel_rewards.max(), 'mean', pixel_rewards.mean()) return pixel_rewards def sample_replay(self): workers = np.random.choice(self.num_envs, replace=False, size=2) # randomly sample from one of n workers sample_start = np.random.randint(1, len(self.replay) - self.nsteps -2) replay_sample = [] for i in range(sample_start, sample_start+self.nsteps): replay_sample.append(self.replay[i]) replay_states = np.stack([replay_sample[i][0][workers] for i in range(len(replay_sample))]) replay_actions = np.stack([replay_sample[i][1][workers] for i in range(len(replay_sample))]) replay_rewards = np.stack([replay_sample[i][2][workers] for i in range(len(replay_sample))]) replay_values = np.stack([replay_sample[i][3][workers] for i in range(len(replay_sample))]) replay_dones = np.stack([replay_sample[i][4][workers] for i in range(len(replay_sample))]) #print('replay dones shape', replay_dones.shape) #print('replay_values shape', replay_values.shape) next_state = self.replay[sample_start+self.nsteps][0][workers] # get state _, replay_last_values = self.model.evaluate(next_state) replay_R = GAE(replay_rewards, replay_values, replay_last_values, replay_dones, gamma=0.99, lambda_=0.95) + replay_values if self.model.pixel_control: prev_states = self.replay[sample_start-1][0][workers] Qaux_value = self.model.get_pixel_control(next_state) pixel_rewards = self.pixel_rewards(prev_states, replay_states) Qaux_target = self.auxiliary_target(pixel_rewards, np.max(Qaux_value, axis=1), replay_dones) else: Qaux_target = np.zeros((len(replay_states),1,1,1)) # produce fake Qaux to save writing unecessary code return fold_batch(replay_states), fold_batch(replay_actions), fold_batch(replay_R), fold_batch(Qaux_target), fold_batch(replay_dones) #return replay_states, replay_actions, replay_R, Qaux_target, replay_dones def sample_reward(self): # worker = np.random.randint(0,self.num_envs) # randomly sample from one of n workers replay_rewards = np.array([self.replay[i][2] for i in range(len(self.replay))]) worker = np.argmax(np.sum(replay_rewards, axis=0)) # sample experience from best worker nonzero_idxs = np.where(np.abs(replay_rewards) > 0)[0] # idxs where |reward| > 0 zero_idxs = np.where(replay_rewards == 0)[0] # idxs where reward == 0 if len(nonzero_idxs) ==0 or len(zero_idxs) == 0: # if nonzero or zero idxs do not exist i.e. all rewards same sign idx = np.random.randint(len(replay_rewards)) elif np.random.uniform() > 0.5: # sample from zero and nonzero rewards equally #print('nonzero') idx = np.random.choice(nonzero_idxs) else: idx = np.random.choice(zero_idxs) reward_states = self.replay[idx][0][worker] reward = np.array([sign(replay_rewards[idx,worker])]) # source of error return reward_states[None], reward def _train_nstep(self): batch_size = self.num_envs * self.nsteps num_updates = self.total_steps // batch_size s = 0 self.state_min = 0 self.state_max = 0 self.populate_memory() # main loop start = time.time() for t in range(1,num_updates+1): states, actions, rewards, values, dones, last_values = self.rollout() # R = self.nstep_return(rewards, last_values, dones, clip=False) R = GAE(rewards, values, last_values, dones, gamma=0.99, lambda_=0.95) + values # stack all states, actions and Rs across all workers into a single batch states, actions, rewards, R = fold_batch(states), fold_batch(actions), fold_batch(rewards), fold_batch(R) #self.state_mean, self.state_std = self.obs_running.update(states[...,-1:]) # update state normalisation statistics self.update_minmax(states) reward_states, sample_rewards = self.sample_reward() replay_states, replay_actions, replay_R, Qaux_target, replay_dones = self.sample_replay() l = self.model.backprop(states, R, actions, reward_states, sample_rewards, Qaux_target, replay_actions, replay_states, replay_R) if self.render_freq > 0 and t % ((self.validate_freq // batch_size) * self.render_freq) == 0: render = True else: render = False if self.validate_freq > 0 and t % (self.validate_freq // batch_size) == 0: self.validation_summary(t,l,start,render) start = time.time() if self.save_freq > 0 and t % (self.save_freq // batch_size) == 0: s += 1 self.save(self.s) print('saved model') def rollout(self,): rollout = [] for t in range(self.nsteps): policies, values = self.model.evaluate(self.states) # Qaux = self.model.get_pixel_control(self.states, self.prev_hidden, self.prev_actions_rewards[np.newaxis]) actions = fastsample(policies) next_states, rewards, dones, infos = self.env.step(actions) rollout.append((self.states, actions, rewards, values, dones)) self.replay.append((self.states, actions, rewards, values, dones)) # add to replay memory self.states = next_states states, actions, rewards, values, dones = stack_many(*zip(*rollout)) _, last_values = self.model.evaluate(next_states) return states, actions, rewards, values, dones, last_values def get_action(self, state): policy, value = self.model.evaluate(state) action = int(np.random.choice(policy.shape[1], p=policy[0])) return action def main(env_id): num_envs = 32 nsteps = 20 classic_list = ['MountainCar-v0', 'Acrobot-v1', 'LunarLander-v2', 'CartPole-v0', 'CartPole-v1'] if any(env_id in s for s in classic_list): print('Classic Control') val_envs = [gym.make(env_id) for i in range(16)] envs = BatchEnv(DummyEnv, env_id, num_envs, blocking=False) elif 'ApplePicker' in env_id: print('ApplePicker') make_args = {'num_objects':300, 'default_reward':0} val_envs = [apple_pickgame(gym.make(env_id, **make_args), max_steps=5000, auto_reset=False, grey_scale=False, k=1) for i in range(15)] envs = DummyBatchEnv(apple_pickgame, env_id, num_envs, max_steps=5000, auto_reset=True, grey_scale=False, k=1, make_args=make_args) print(val_envs[0]) print(envs.envs[0]) else: print('Atari') env = gym.make(env_id) if env.unwrapped.get_action_meanings()[1] == 'FIRE': reset = True print('fire on reset') else: reset = False print('only stack frames') env.close() val_envs = [AtariEnv(gym.make(env_id), k=4, episodic=False, reset=reset, clip_reward=False) for i in range(15)] envs = BatchEnv(AtariEnv, env_id, num_envs, blocking=False, k=4, reset=reset, episodic=False, clip_reward=True, time_limit=4500) action_size = val_envs[0].action_space.n input_size = val_envs[0].reset().shape current_time = datetime.datetime.now().strftime('%y-%m-%d_%H-%M-%S') train_log_dir = 'logs/UnrealA2C2/' + env_id + '/' + current_time model_dir = "models/UnrealA2C2/" + env_id + '/' + current_time model = UnrealA2C2(UniverseCNN, input_shape=input_size, action_size=action_size, PC=1, entropy_coeff=0.01, lr=1e-3, lr_final=1e-6, decay_steps=50e6//(num_envs*nsteps), pixel_control=True, grad_clip=0.5, policy_args=dict(), ).cuda() auxiliary = UnrealTrainer(envs=envs, model=model, model_dir=model_dir, log_dir=train_log_dir, val_envs=val_envs, train_mode='nstep', total_steps=50e6, nsteps=nsteps, normalise_obs=True, validate_freq=5e5, save_freq=0, render_freq=0, num_val_episodes=15, log_scalars=True) auxiliary.train() del auxiliary if __name__ == "__main__": import apple_picker env_id_list = ['SpaceInvadersDeterministic-v4', 'MontezumaRevengeDeterministic-v4' 'FreewayDeterministic-v4', 'PongDeterministic-v4' ] #env_id_list = ['MountainCar-v0','CartPole-v1', 'Acrobot-v1'] env_id_list = ['ApplePicker-v0'] for env_id in env_id_list: main(env_id) ``` #### File: rlib/utils/SyncMultiEnvTrainer.py ```python import time, datetime, os import threading import numpy as np import torch import copy import json from typing import Union from abc import ABC, abstractmethod from rlib.utils.utils import fold_batch from rlib.utils.VecEnv import BatchEnv, DummyBatchEnv import torch from torch.utils.tensorboard import SummaryWriter class SyncMultiEnvTrainer(object): def __init__(self, envs: Union[BatchEnv, DummyBatchEnv], model:torch.nn.Module, val_envs: Union[list, BatchEnv, DummyBatchEnv], train_mode='nstep', return_type='nstep', log_dir='logs/', model_dir='models/', total_steps=50e6, nsteps=5, gamma=0.99, lambda_=0.95, validate_freq=1e6, save_freq=0, render_freq=0, update_target_freq=0, num_val_episodes=50, max_val_steps=10000, log_scalars=True): ''' A synchronous multiple env training framework for pytorch Args: envs - BatchEnv | DummyBatchEnv: multiple synchronous training environments model - reinforcement learning model log_dir, log directory string for location of directory to log scalars log_dir='logs/', model_dir='models/', val_envs - use your own discretion to choose which validation mode you wan't, recommended BatchEnv or list for Atari and DummyBatchEnv for Classic Control like envs list: a list of envs for validation, uses threading to run environments asychronously BatchEnv: uses multiprocessing to run validation envs sychronously in parallel DummyBatchEnv: allows for sychronous env stepping without the overhead of multiprocessing, good for computationally cheap environments train_mode - 'nstep' or 'onestep' species whether training is done using multiple step TD learning or single step return_type - string to determine whether 'nstep', 'lambda' or 'GAE' returns are to be used total_steps - number of Total training steps across all environements nsteps - number of steps TD error is caluclated over validate_freq - number of steps across all environements before performing validating, 0 for no validation save_freq - number of steps across all environements before saving model, 0 for no saving render_freq - multiple of validate_freq before rendering (i.e. render every X validations), 0 for no rendering update_target_freq - number of steps across all environements before updating target model, 0 for no updating num_val_episodes - number of episodes to average over when validating max_val_steps - maximum number of steps for each validation episode (prevents infinite loops) log_scalars - boolean flag whether to log tensorboard scalars to log_dir ''' self.env = envs if isinstance(envs, list): self.validate_func = self.validate_async else: self.validate_func = self.validate_sync if train_mode not in ['nstep', 'onestep']: raise ValueError('train_mode %s is not a valid argument. Valid arguments are ... %s, %s' %(train_mode,'nstep','onestep')) assert num_val_episodes >= len(val_envs), 'number of validation epsiodes {} must be greater than or equal to the number of validation envs {}'.format(num_val_episodes, len(val_envs)) if return_type not in ['nstep', 'lambda', 'GAE']: raise ValueError('return_type %s is not a valid argument. Valid arguments are ... %s, %s, %s' %(return_type, 'nstep', 'lambda', 'GAE')) self.train_mode = train_mode self.num_envs = len(envs) self.env_id = envs.spec.id self.val_envs = val_envs self.validate_rewards = [] self.model = model self.total_steps = int(total_steps) self.nsteps = nsteps self.return_type = return_type self.gamma = gamma self.lambda_ = lambda_ self.validate_freq = int(validate_freq) self.num_val_episodes = num_val_episodes self.val_steps = max_val_steps self.lock = threading.Lock() self.save_freq = int(save_freq) self.render_freq = render_freq self.target_freq = int(update_target_freq) self.s = 0 # number of saves made self.t = 1 # number of updates done self.log_scalars = log_scalars self.log_dir = log_dir self.model_dir = model_dir self.states = self.env.reset() if log_scalars: # Tensorboard Variables self.train_log_dir = self.log_dir + '/train' self.train_writer = SummaryWriter(self.train_log_dir) if not os.path.exists(self.model_dir) and save_freq > 0: os.makedirs(self.model_dir) def __del__(self): self.env.close() def train(self): if self.train_mode == 'nstep': self._train_nstep() elif self.train_mode == 'onestep': self._train_onestep() else: raise ValueError('%s is not a valid training mode'%(self.train_mode)) @abstractmethod def _train_nstep(self): ''' template for multi-step training loop for synchronous training over multiple environments ''' start = time.time() batch_size = self.num_envs * self.nsteps num_updates = self.total_steps // batch_size # main loop for t in range(self.t,num_updates+1): states, actions, rewards, dones, values, last_values = self.rollout() if self.return_type == 'nstep': R = self.nstep_return(rewards, last_values, dones, gamma=self.gamma) elif self.return_type == 'GAE': R = self.GAE(rewards, values, last_values, dones, gamma=self.gamma, lambda_=self.lambda_) + values elif self.return_type == 'lambda': R = self.lambda_return(rewards, values, last_values, dones, gamma=self.gamma, lambda_=self.lambda_, clip=False) # stack all states, actions and Rs from all workers into a single batch states, actions, R = fold_batch(states), fold_batch(actions), fold_batch(R) l = self.model.backprop(states, R, actions) if self.render_freq > 0 and t % ((self.validate_freq // batch_size) * self.render_freq) == 0: render = True else: render = False if self.validate_freq > 0 and t % (self.validate_freq // batch_size) == 0: self.validation_summary(t,l,start,render) start = time.time() if self.save_freq > 0 and t % (self.save_freq // batch_size) == 0: self.s += 1 self.save(self.s) print('saved model') if self.target_freq > 0 and t % (self.target_freq // batch_size) == 0: # update target network (for value based learning e.g. DQN) self.update_target() self.t +=1 @abstractmethod def rollout(self): raise NotImplementedError(self, 'No rollout method found') def nstep_return(self, rewards, last_values, dones, gamma=0.99, clip=False): if clip: rewards = np.clip(rewards, -1, 1) T = len(rewards) # Calculate R for advantage A = R - V R = np.zeros_like(rewards) R[-1] = last_values * (1-dones[-1]) for i in reversed(range(T-1)): # restart score if done as BatchEnv automatically resets after end of episode R[i] = rewards[i] + gamma * R[i+1] * (1-dones[i]) return R def lambda_return(self, rewards, values, last_values, dones, gamma=0.99, lambda_=0.8, clip=False): if clip: rewards = np.clip(rewards, -1, 1) T = len(rewards) # Calculate eligibility trace R^lambda R = np.zeros_like(rewards) R[-1] = last_values * (1-dones[-1]) for t in reversed(range(T-1)): # restart score if done as BatchEnv automatically resets after end of episode R[t] = rewards[t] + gamma * (lambda_* R[t+1] + (1.0-lambda_) * values[t+1]) * (1-dones[t]) return R def GAE(self, rewards, values, last_values, dones, gamma=0.99, lambda_=0.95, clip=False): if clip: rewards = np.clip(rewards, -1, 1) # Generalised Advantage Estimation Adv = np.zeros_like(rewards) Adv[-1] = rewards[-1] + gamma * last_values * (1-dones[-1]) - values[-1] T = len(rewards) for t in reversed(range(T-1)): delta = rewards[t] + gamma * values[t+1] * (1-dones[t]) - values[t] Adv[t] = delta + gamma * lambda_ * Adv[t+1] * (1-dones[t]) return Adv def validation_summary(self, t, loss, start, render): batch_size = self.num_envs * self.nsteps tot_steps = t * batch_size time_taken = time.time() - start frames_per_update = (self.validate_freq // batch_size) * batch_size fps = frames_per_update / time_taken score = self.validate_func(render) print("update %i, validation score %f, total steps %i, loss %f, time taken for %i frames:%fs, fps %f \t\t\t" %(t,score,tot_steps,loss,frames_per_update,time_taken,fps)) if self.log_scalars: self.train_writer.add_scalar('validation/score', score, tot_steps) self.train_writer.add_scalar('train/loss', loss, tot_steps) def save_model(self, s): model_loc = f'{self.model_dir}/{s}.pt' # default saving method is to save session torch.save(self.model.state_dict(), model_loc) def load_model(self, modelname, model_dir="models/"): filename = model_dir + modelname + '.pt' if os.path.exists(filename): self.model.load_state_dict(torch.load(filename)) print("loaded:", filename) else: print(filename, " does not exist") def base_attr(self): attributes = {'train_mode':self.train_mode, 'total_steps':self.total_steps, 'nsteps':self.nsteps, 'return_type':self.return_type, 'gamma':self.gamma, 'lambda_':self.lambda_, 'validate_freq':self.validate_freq, 'num_val_episodes':self.num_val_episodes, 'save_freq':self.save_freq, 'render_freq':self.render_freq, 'model_dir':self.model_dir, 'train_log_dir':self.train_log_dir, 's':self.s, 't':self.t} return attributes def local_attr(self, attr): # attr[variable] = z return attr def save(self, s): model_loc = str(self.model_dir + '/' + str(s) + '.trainer') file = open(model_loc, 'w+') attributes = self.base_attr() # add local variables to dict attributes = self.local_attr(attributes) json.dump(attributes, file) # save model self.save_model(s) file.close() def load(self, Class, model, model_checkpoint, envs, val_envs, filename, log_scalars=True, allow_gpu_growth=True, continue_train=True): with open(filename, 'r') as file: attrs = json.loads(file.read()) s = attrs.pop('s') t = attrs.pop('t') time = attrs.pop('current_time') print(attrs) trainer = Class(envs=envs, model=model, val_envs=val_envs, log_scalars=log_scalars, gpu_growth=allow_gpu_growth, **attrs) if continue_train: trainer.s = s trainer.t = t self.load_model(model_checkpoint, trainer.model_dir) return trainer @abstractmethod def update_target(self): pass @abstractmethod def _train_onestep(self): ''' more efficient implementation of train_nstep when nsteps=1 ''' raise NotImplementedError(self, 'does not have an one-step training implementation') def save_hyperparameters(self, filename, **kwargs): handle = open(filename, "w") for key, value in kwargs.items(): handle.write("{} = {}\n" .format(key, value)) handle.close() def validate_async(self, render=False): num_val_envs = len(self.val_envs) num_val_eps = [self.num_val_episodes//num_val_envs for i in range(num_val_envs)] num_val_eps[-1] = num_val_eps[-1] + self.num_val_episodes % self.num_val_episodes//(num_val_envs) render_array = np.zeros((len(self.val_envs))) render_array[0] = render threads = [threading.Thread(daemon=True, target=self._validate_async, args=(self.val_envs[i], num_val_eps[i], self.val_steps, render_array[i])) for i in range(num_val_envs)] try: for thread in threads: thread.start() for thread in threads: thread.join() except KeyboardInterrupt: for thread in threads: thread.join() score = np.mean(self.validate_rewards) self.validate_rewards = [] return score def _validate_async(self, env, num_ep, max_steps, render=False): 'single env validation' for episode in range(num_ep): state = env.reset() episode_score = [] for t in range(max_steps): action = self.get_action(state[np.newaxis]) next_state, reward, done, info = env.step(action) state = next_state #print('state', state, 'action', action, 'reward', reward) episode_score.append(reward) if render: with self.lock: env.render() if done or t == max_steps -1: tot_reward = np.sum(episode_score) with self.lock: self.validate_rewards.append(tot_reward) break if render: with self.lock: env.close() def validate_sync(self, render=False): 'batch env validation' episode_scores = [] env = self.val_envs for episode in range(self.num_val_episodes//len(env)): states = env.reset() episode_score = [] for t in range(self.val_steps): actions = self.get_action(states) next_states, rewards, dones, infos = env.step(actions) states = next_states #print('state', state, 'action', action, 'reward', reward) episode_score.append(rewards*(1-dones)) if render: with self.lock: env.render() if dones.sum() == self.num_envs or t == self.val_steps -1: tot_reward = np.sum(np.stack(episode_score), axis=0) episode_scores.append(tot_reward) break return np.mean(episode_scores) def get_action(self, state): # include small fn in order to reuse validate raise NotImplementedError('get_action method is required when using the default validation functions, check that this is implemented properly') def fold_batch(self, x): rows, cols = x.shape[0], x.shape[1] y = x.reshape(rows*cols,*x.shape[2:]) return y # class Runner(ABC): # def __init__(self,model,env,num_steps): # self.model = model # self.env = env # self.num_steps = num_steps # self.states = self.env.reset() # @abstractmethod # def run(self): # pass ``` #### File: rlib/VIN/VIN.py ```python import torch import torch.nn.functional as F import numpy as np from torch.utils.tensorboard import SummaryWriter import datetime import threading import time from rlib.utils.VecEnv import* from rlib.utils.wrappers import* from rlib.utils.utils import fold_batch, stack_many, one_hot, totorch, totorch_many, tonumpy class VINCNN(torch.nn.Module): def __init__(self, input_size, action_size, k=10, lr=1e-3, device='cuda'): super(VINCNN, self).__init__() channels, height, width = input_size self.action_size = action_size self.conv_enc = torch.nn.Conv2d(channels, 150, kernel_size=[3,3], stride=[1,1], padding=1).to(device) # φ(s) self.R_bar = torch.nn.Conv2d(150, 1, kernel_size=[1,1], stride=[1,1], padding=0, bias=False).to(device) self.Q_bar = torch.nn.Conv2d(1, action_size, kernel_size=[3,3], stride=[1,1], padding=1, bias=False).to(device) self.w = torch.nn.Parameter(torch.zeros(action_size, 1, 3, 3), requires_grad=True).to(device) self.Q = torch.nn.Linear(action_size, action_size).to(device) self.k = k # nsteps to plan with VIN self.optim = torch.optim.RMSprop(params=self.parameters(), lr=lr) self.device = device def forward(self, img, x, y): hidden = self.conv_enc(img) R_bar = self.R_bar(hidden) Q_bar = self.Q_bar(R_bar) V_bar, _ = torch.max(Q_bar, dim=1, keepdim=True) batch_size = img.shape[0] psi = self._plan_ahead(R_bar, V_bar)[torch.arange(batch_size), :, x.long(), y.long()].view(batch_size, self.action_size) # ψ(s) Qsa = self.Q(psi) return Qsa def backprop(self, states, locs, R, actions): x, y = zip(*locs) Qsa = self.forward(totorch(states, self.device), torch.tensor(x).to(self.device), torch.tensor(y)).to(self.device) actions_onehot = totorch(one_hot(actions, self.action_size), self.device) Qvalue = torch.sum(Qsa * actions_onehot, axis=1) loss = torch.mean(torch.square(totorch(R).float().cuda() - Qvalue)) loss.backward() self.optim.step() self.optim.zero_grad() return loss.detach().cpu().numpy() def value_iteration(self, r, V): return F.conv2d( # Stack reward with most recent value torch.cat([r, V], 1), # Convolve r->q weights to r, and v->q weights for v. These represent transition probabilities torch.cat([self.Q_bar.weight, self.w], 1), stride=1, padding=1) def _plan_ahead(self, r, V): for i in range(self.k): Q = self.value_iteration(r, V) V, _ = torch.max(Q, dim=1, keepdim=True) Q = self.value_iteration(r, V) return Q class VINTrainer(object): def __init__(self, model, envs, val_envs, epsilon=0.1, epsilon_final=0.1, epsilon_steps=1000000, epsilon_test=0.1, return_type='nstep', log_dir='logs/', model_dir='models/', total_steps=50000000, nsteps=20, gamma=0.99, lambda_=0.95, validate_freq=1e6, save_freq=0, render_freq=0, update_target_freq=0, num_val_episodes=50, log_scalars=True): self.model = model self.env = envs self.num_envs = len(envs) self.val_envs = val_envs self.total_steps = total_steps self.action_size = self.model.action_size self.epsilon = epsilon self.epsilon_test = epsilon_test self.states = self.env.reset() self.loc = self.get_locs() print('locs', self.loc) self.total_steps = int(total_steps) self.nsteps = nsteps self.return_type = return_type self.gamma = gamma self.lambda_ = lambda_ self.validate_freq = int(validate_freq) self.num_val_episodes = num_val_episodes self.save_freq = int(save_freq) self.render_freq = render_freq self.target_freq = int(update_target_freq) self.t=1 self.validate_rewards = [] self.lock = threading.Lock() self.scheduler = self.linear_schedule(epsilon, epsilon_final, epsilon_steps) self.log_scalars = log_scalars self.log_dir = log_dir if log_scalars: # Tensorboard Variables train_log_dir = self.log_dir + '/train' self.train_writer = SummaryWriter(train_log_dir) def nstep_return(self, rewards, last_values, dones, gamma=0.99, clip=False): if clip: rewards = np.clip(rewards, -1, 1) T = len(rewards) # Calculate R for advantage A = R - V R = np.zeros_like(rewards) R[-1] = last_values * (1-dones[-1]) for i in reversed(range(T-1)): # restart score if done as BatchEnv automatically resets after end of episode R[i] = rewards[i] + gamma * R[i+1] * (1-dones[i]) return R def lambda_return(self, rewards, values, last_values, dones, gamma=0.99, lambda_=0.8, clip=False): if clip: rewards = np.clip(rewards, -1, 1) T = len(rewards) # Calculate eligibility trace R^lambda R = np.zeros_like(rewards) R[-1] = last_values * (1-dones[-1]) for t in reversed(range(T-1)): # restart score if done as BatchEnv automatically resets after end of episode R[t] = rewards[t] + gamma * (lambda_* R[t+1] + (1.0-lambda_) * values[t+1]) * (1-dones[t]) return R def GAE(self, rewards, values, last_values, dones, gamma=0.99, lambda_=0.95, clip=False): if clip: rewards = np.clip(rewards, -1, 1) # Generalised Advantage Estimation Adv = np.zeros_like(rewards) Adv[-1] = rewards[-1] + gamma * last_values * (1-dones[-1]) - values[-1] T = len(rewards) for t in reversed(range(T-1)): delta = rewards[t] + gamma * values[t+1] * (1-dones[t]) - values[t] Adv[t] = delta + gamma * lambda_ * Adv[t+1] * (1-dones[t]) return Adv def get_locs(self): locs = [] for env in self.env.envs: locs.append(env.agent_loc) return locs def train(self): self.train_nstep() def train_nstep(self): batch_size = self.num_envs * self.nsteps num_updates = self.total_steps // batch_size # main loop start = time.time() for t in range(self.t,num_updates+1): states, locs, actions, rewards, dones, infos, values, last_values = self.rollout() if self.return_type == 'nstep': R = self.nstep_return(rewards, last_values, dones, gamma=self.gamma) elif self.return_type == 'GAE': R = self.GAE(rewards, values, last_values, dones, gamma=self.gamma, lambda_=self.lambda_) + values elif self.return_type == 'lambda': R = self.lambda_return(rewards, values, last_values, dones, gamma=self.gamma, lambda_=self.lambda_, clip=False) # stack all states, actions and Rs from all workers into a single batch states, locs, actions, R = fold_batch(states), fold_batch(locs), fold_batch(actions), fold_batch(R) #print('locs', locs.shape) l = self.model.backprop(states, locs, R, actions) if self.validate_freq > 0 and t % (self.validate_freq // batch_size) == 0: self.validation_summary(t,l,start,False) start = time.time() if self.save_freq > 0 and t % (self.save_freq // batch_size) == 0: self.s += 1 self.save(self.s) print('saved model') if self.target_freq > 0 and t % (self.target_freq // batch_size) == 0: # update target network (for value based learning e.g. DQN) self.update_target() self.t +=1 def eval_state(self, state, loc): with torch.no_grad(): x, y = zip(*loc) x, y = torch.tensor(x).to(self.device), torch.tensor(y).to(self.device) state_torch = totorch(state, self.device) Qsa = self.model(state_torch, x, y) return tonumpy(Qsa) def rollout(self): rollout = [] for t in range(self.nsteps): Qsa = self.eval_state(self.states, self.loc) actions = np.argmax(Qsa, axis=1) random = np.random.uniform(size=(self.num_envs)) random_actions = np.random.randint(self.action_size, size=(self.num_envs)) actions = np.where(random < self.epsilon, random_actions, actions) next_states, rewards, dones, infos = self.env.step(actions) values = np.sum(Qsa * one_hot(actions, self.action_size), axis=-1) rollout.append((self.states, self.loc, actions, rewards, dones, infos, values)) self.states = next_states self.epsilon = self.scheduler.step() self.loc = self.get_locs() states, locs, actions, rewards, dones, infos, values = stack_many(*zip(*rollout)) last_Qsa = self.eval_state(next_states, self.loc) # Q(s,a|theta) last_actions = np.argmax(last_Qsa, axis=1) last_values = np.sum(last_Qsa * one_hot(last_actions, self.action_size), axis=-1) return states, locs, actions, rewards, dones, infos, values, last_values def get_action(self, state, loc): Qsa = self.eval_state(state, loc) if np.random.uniform() < self.epsilon_test: action = np.random.choice(self.action_size) else: action = np.argmax(Qsa, axis=1) return action def validation_summary(self,t,loss,start,render): batch_size = self.num_envs * self.nsteps tot_steps = t * batch_size time_taken = time.time() - start frames_per_update = (self.validate_freq // batch_size) * batch_size fps = frames_per_update /time_taken num_val_envs = len(self.val_envs) num_val_eps = [self.num_val_episodes//num_val_envs for i in range(num_val_envs)] num_val_eps[-1] = num_val_eps[-1] + self.num_val_episodes % self.num_val_episodes//(num_val_envs) render_array = np.zeros((len(self.val_envs))) render_array[0] = render threads = [threading.Thread(daemon=True, target=self.validate, args=(self.val_envs[i], num_val_eps[i], 10000, render_array[i])) for i in range(num_val_envs)] try: for thread in threads: thread.start() for thread in threads: thread.join() except KeyboardInterrupt: for thread in threads: thread.join() score = np.mean(self.validate_rewards) self.validate_rewards = [] print("update %i, validation score %f, total steps %i, loss %f, time taken for %i frames:%fs, fps %f" %(t,score,tot_steps,loss,frames_per_update,time_taken,fps)) if self.log_scalars: self.train_writer.add_scalar('Validation/Score', score) self.train_writer.add_scalar('Training/Loss', loss) def validate(self,env,num_ep,max_steps,render=False): episode_scores = [] for episode in range(num_ep): state = env.reset() loc = env.agent_loc episode_score = [] for t in range(max_steps): action = self.get_action(state[np.newaxis], [loc]) next_state, reward, done, info = env.step(action) state = next_state loc = env.agent_loc episode_score.append(reward) if render: with self.lock: env.render() if done or t == max_steps -1: tot_reward = np.sum(episode_score) with self.lock: self.validate_rewards.append(tot_reward) break if render: with self.lock: env.close() class linear_schedule(object): def __init__(self, epsilon, epsilon_final, num_steps=1000000): self._counter = 0 self._epsilon = epsilon self._epsilon_final = epsilon_final self._step = (epsilon - epsilon_final) / num_steps self._num_steps = num_steps def step(self,): if self._counter < self._num_steps : self._epsilon -= self._step self._counter += 1 else: self._epsilon = self._epsilon_final return self._epsilon def main(env_id): num_envs = 32 nsteps = 1 current_time = datetime.datetime.now().strftime('%y-%m-%d_%H-%M-%S') train_log_dir = 'logs/VIN/' + env_id +'/n_step/' + current_time model_dir = "models/VIN/" + env_id + '/n_step/' + current_time if 'ApplePicker' in env_id: print('ApplePicker') make_args = {'num_objects':300, 'default_reward':-0.01} val_envs = [apple_pickgame(gym.make('ApplePicker-v0', **make_args)) for i in range(10)] envs = DummyBatchEnv(apple_pickgame, 'ApplePicker-v0', num_envs, max_steps=1000, auto_reset=True, make_args=make_args) print(val_envs[0]) print(envs.envs[0]) else: print('Atari') env = gym.make(env_id) if env.unwrapped.get_action_meanings()[1] == 'FIRE': reset = True print('fire on reset') else: reset = False print('only stack frames') env.close() val_envs = [AtariEnv(gym.make(env_id), k=4, episodic=False, reset=reset, clip_reward=False) for i in range(5)] envs = BatchEnv(AtariEnv, env_id, num_envs, blocking=False, k=4, reset=reset, episodic=False, clip_reward=True) action_size = val_envs[0].action_space.n input_size = val_envs[0].reset().shape print('input shape', input_size) print('action space', action_size) vin = VINCNN(input_size, action_size, k=50, lr=1e-3).cuda() trainer = VINTrainer(envs=envs, model=vin, log_dir=train_log_dir, val_envs=val_envs, return_type='nstep', total_steps=10e6, nsteps=nsteps, validate_freq=1e5, save_freq=0, render_freq=0, num_val_episodes=10, log_scalars=False) trainer.train() if __name__ == "__main__": import apple_picker #env_id_list = ['SpaceInvadersDeterministic-v4', 'FreewayDeterministic-v4', 'MontezumaRevengeDeterministic-v4', 'PongDeterministic-v4'] #env_id_list = ['MontezumaRevengeDeterministic-v4'] env_id_list = ['ApplePicker-v0'] for env_id in env_id_list: main(env_id) ```
{ "source": "jhargis/hello-brawndo", "score": 3 }
#### File: jhargis/hello-brawndo/tests.py ```python import pytest def func(x): return x + 1 def test_answer_true(): assert func(4) == 5 # def test_answer_false(): # assert func(3) == 5 ``` #### File: jhargis/hello-brawndo/web.py ```python import os from flask import Flask app = Flask(__name__) @app.route('/') def index(): PIPELINE_LOCATION = os.environ.get('PIPELINE_LOCATION', '') TESTVAR = os.environ.get('TESTVAR', '') return '..hello from %s...10 .... TESTVAR=%s TESTVAR_TYPE=%s' % (PIPELINE_LOCATION, TESTVAR, type(TESTVAR)) ```
{ "source": "jharilal/calculator_assignment", "score": 3 }
#### File: calculator/csv_operations/csv_read.py ```python import pandas as pd from calculator.csv_operations.path_finder import abs_path_to_csv class CsvRead: @staticmethod def csv_to_df(csv_to_convert): pathway = abs_path_to_csv(csv_to_convert) return pd.read_csv(pathway) ``` #### File: calculator/operations/calculation.py ```python class Calculation: """Creates the Calculation parent class for the arithmetic subclasses""" # pylint: disable=bad-option-value, too-few-public-methods def __init__(self, values: tuple): """Constructor Method""" self.values = Calculation.convert_to_float(values) @classmethod def create(cls, values: tuple): """Creates an object""" return cls(values) @staticmethod def convert_to_float(values): """Converts the values passed to function into float values in a list""" list_of_floats = [] for item in values: list_of_floats.append(float(item)) return tuple(list_of_floats) ``` #### File: calculator/tests/calculator_test.py ```python import pytest from calculator.main import Calculator from calculator.history.calculation_history import History from calculator.operations.addition import Addition from calculator.operations.division import Division from calculator.operations.multiplication import Multiplication from calculator.csv_operations.csv_read import CsvRead @pytest.fixture def clear_history(): """Clears the history of the calculator for each test""" History.clear_history() def test_adding(clear_history): """Tests the adding function of the calculator class""" # pylint: disable=redefined-outer-name, unused-argument tup = (3, 4, 5) # Arrange result = Calculator.adding(tup) # Act assert result == 12 # Assert def test_subtracting(clear_history): """Tests the subtracting function of the calculator class""" # pylint: disable=redefined-outer-name, unused-argument df = CsvRead.csv_to_df('calculator/tests/csvs_for_operations/subtracting_short.csv') for index, row in df.iterrows(): tup = (row['value_a'], row['value_b'], row['value_c']) assert Calculator.subtracting(tup) == row['result'] def test_multiplying(clear_history): """Tests the multiplying function of the calculator class""" # pylint: disable=redefined-outer-name, unused-argument tup = (0, 3, 2) tup_two = (4, 3, 2) Calculator.multiplying(tup) result = History.last_result() Calculator.multiplying(tup_two) result_two = History.last_result() assert result == 0 assert result_two == 24 def test_dividing(clear_history): """Tests the dividing function of the calculator class""" # pylint: disable=redefined-outer-name, unused-argument tup_one = (24, 2, 3) tup_two = (0, 2, 3) tup_three = (2, 0, 3) result_one = Calculator.dividing(tup_one) result_two = Calculator.dividing(tup_two) result_three = Calculator.dividing(tup_three) assert result_one == 4 assert result_two == ZeroDivisionError assert result_three == ZeroDivisionError def test_last_result(clear_history): """Tests the last result function for the calculator class""" # pylint: disable=redefined-outer-name, unused-argument tup = (12, 4) assert Calculator.dividing(tup) == 3 assert Calculator.multiplying(tup) == 48 assert Calculator.adding(tup) == 16 assert History.last_result() == 16 def test_first_result(clear_history): """Tests the first result function for the calculator class""" # pylint: disable=redefined-outer-name, unused-argument tup = (12, 4) assert Calculator.dividing(tup) == 3 assert Calculator.multiplying(tup) == 48 assert Calculator.adding(tup) == 16 assert History.first_result() == 3 def test_last_object(clear_history): """Tests the last object function for the calculator class""" # pylint: disable=redefined-outer-name, unused-argument tup = (12, 4) assert Calculator.dividing(tup) == 3 assert Calculator.multiplying(tup) == 48 assert Calculator.adding(tup) == 16 assert isinstance(History.get_last_object(), Addition) is True def test_first_object(clear_history): """Tests the first object function for the calculator class""" # pylint: disable=redefined-outer-name, unused-argument tup = (12, 4) assert Calculator.dividing(tup) == 3 assert Calculator.multiplying(tup) == 48 assert Calculator.adding(tup) == 16 assert isinstance(History.get_first_object(), Division) is True def test_count_history(clear_history): """Tests the history count function for the calculator class""" # pylint: disable=redefined-outer-name, unused-argument assert History.history_count() == 0 tup = (12, 4) assert Calculator.dividing(tup) == 3 assert Calculator.multiplying(tup) == 48 assert Calculator.adding(tup) == 16 assert History.history_count() == 3 def test_clear_history(clear_history): """Tests the clear history function for the calculator class""" # pylint: disable=redefined-outer-name, unused-argument tup = (12, 4) assert Calculator.dividing(tup) == 3 assert Calculator.multiplying(tup) == 48 assert Calculator.adding(tup) == 16 assert History.clear_history() is True assert History.history_count() == 0 def test_get_history(clear_history): """Tests the get history function for the calculator class""" # pylint: disable=redefined-outer-name, unused-argument tup = (12, 4) assert Calculator.dividing(tup) == 3 assert Calculator.multiplying(tup) == 48 assert Calculator.adding(tup) == 16 assert isinstance(History.get_history()[0], Division) is True assert isinstance(History.get_history()[1], Multiplication) is True assert isinstance(History.get_history()[2], Addition) is True ```
{ "source": "jharkawat/Amazon_ml_challenge", "score": 3 }
#### File: Amazon_ml_challenge/src/splitter.py ```python import csv from sklearn.model_selection import train_test_split import pandas as pd import os from sklearn.model_selection import train_test_split import re, string, unicodedata import nltk import inflect from nltk import word_tokenize, sent_tokenize from nltk.corpus import stopwords from nltk.stem import LancasterStemmer, WordNetLemmatizer from collections import Counter import datetime from nltk.stem import SnowballStemmer from pandarallel import pandarallel pandarallel.initialize() def get_data_explained_percentage(df, tags_df, K): tags = list(tags_df['class'])[:K] new_df = df[df['BROWSE_NODE_ID'].isin(tags)] print("Percentage Explained by top ", K, " tags is :", len(new_df)/len(df)*100) return new_df """Text Cleaning""" stop_words = set(stopwords.words('english')) stemmer = SnowballStemmer(language='english') total_processed = 0 def process_questions(row): global total_processed article = str(row['BULLET_POINTS']) title = str(row['TITLE']) total_processed += 1 if(total_processed%10000 == 0): print("processed : ", total_processed) if '[' in article: article=article.lstrip('[').rstrip(']') question=3*(str(title)+" ")+str(article) question=re.sub(r'[^A-Za-z,.]+',' ',question) words_in_questions=word_tokenize(str(question.lower())) #Removing stopwords, 1 letter words except 'c' question_cleaned=" ".join(stemmer.stem(word) for word in words_in_questions if not word in stop_words) return question_cleaned if __name__ == "__main__": df = pd.read_csv("data/dataset/train.csv", escapechar = "\\", quoting = csv.QUOTE_NONE) df = df[["TITLE", "BULLET_POINTS", "BROWSE_NODE_ID"]] freq_dict = Counter(df['BROWSE_NODE_ID']) tags_df=pd.DataFrame(list(freq_dict.items()),columns=['class','Frequency']) tags_df.head() tags_df=tags_df.sort_values(ascending=False,by='Frequency') #sorted by no. of occurances topK = 2000 new_df = get_data_explained_percentage(df, tags_df, topK) processed_data=pd.DataFrame() processed_data['desc']=new_df.parallel_apply(process_questions, axis=1) print("Total no. of descriptions processed from each title and bullet: ",len(processed_data['desc'])) processed_data['BROWSE_NODE_ID']=new_df['BROWSE_NODE_ID'] df = processed_data train, test = train_test_split(df, test_size=0.2) train , val = train_test_split(train, test_size=0.25) os.makedirs("custum-data", exist_ok=True) train.to_csv("custum-data/train.csv", index=False) test.to_csv("custum-data/test.csv", index=False) val.to_csv("custum-data/val.csv", index=False) print("done") ``` #### File: Amazon_ml_challenge/src/train.py ```python import numpy as np import pandas as pd import csv import os import glob import pandas as pd from torch.utils.data import TensorDataset from transformers import AutoTokenizer, AutoModelForSequenceClassification from torch.utils.data import DataLoader, RandomSampler, SequentialSampler from transformers import AdamW, get_linear_schedule_with_warmup import argparse from sklearn.model_selection import train_test_split import torch import torch.nn as nn from tqdm import tqdm import json from util import f1_score_func, accuracy_per_class import util import logging parser = argparse.ArgumentParser() parser.add_argument('--model', default='roberta-base', help="model name from huggingface") parser.add_argument('--experiment_name', default='sample-roberta-training-properly', help="model name from huggingface") #sample-try-with-bert-base parser.add_argument('--used_tokenized_data', type=bool, default=False, help="saved pickled tokenizer faster laoding in future") parser.add_argument("--epochs", type=int, default=5) parser.add_argument("--batch_size_train", type=int, default=32) parser.add_argument("--batch_size_val", type=int, default=32) parser.add_argument("--dummy", type=bool, default=False) parser.add_argument("--full_finetuning", type=bool, default=True) parser.add_argument("--loading_from_prev_pretrain", type=bool, default=False) parser.add_argument("--trained_model", default="sample-distilbert-run2/finetuned_BERT_epoch_1.model") def evaluate(dataloader_val, model): model.eval() loss_val_total = 0 predictions, true_vals = [], [] for batch in dataloader_val: batch = tuple(b.to(0) for b in batch) inputs = {'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[2], } with torch.no_grad(): outputs = model(**inputs) loss = outputs[0] loss = loss.mean() logits = outputs[1] loss_val_total += loss.item() logits = logits.detach().cpu().numpy() label_ids = inputs['labels'].cpu().numpy() predictions.append(logits) true_vals.append(label_ids) loss_val_avg = loss_val_total/len(dataloader_val) predictions = np.concatenate(predictions, axis=0) true_vals = np.concatenate(true_vals, axis=0) return loss_val_avg, predictions, true_vals if (__name__ == "__main__"): args = parser.parse_args() exp_name = args.experiment_name # logging args parsers fileds #assert Path("./"+exp_name).exists() os.mkdir(exp_name) util.set_logger(os.path.join(exp_name, 'train.log')) logging.info("Training Arguments: {}" .format(args)) try: os.system("nvidia-smi") except: print("Something went wrong with nvidia-smi command") logging.info("loading all the files of data") # logging tokeinzer used tokenizer = AutoTokenizer.from_pretrained(args.model) logging.info("Used tokenizer: {}".format(tokenizer)) # directly loading tokenized data from from pickle if args.used_tokenized_data: logging.info("loading tokenized data") encoded_data_train = torch.load("data/tokenized/encoded_data_train.pt") encoded_data_val = torch.load("data/tokenized/encoded_data_val.pt") else: #filenames = [name for name in glob.glob('./data/dataset/train.csv')] filenames = ["custum-data/val.csv", "custum-data/train.csv"] df = pd.concat( [ pd.read_csv(f, low_memory=False) for f in filenames ] ) dict = {'desc': 'BULLET_POINTS', 'BROWSE_NODE_ID': 'BROWSE_NODE_ID'} # call rename () method df.rename(columns=dict, inplace=True) #df = pd.read_csv("./data/dataset/train.csv", escapechar = "\\", quoting = csv.QUOTE_NONE) if (args.dummy): # logging logging.info("dummy data") df = df[:400] #df = df[["BULLET_POINTS", "BROWSE_NODE_ID"]] #df = pd.concat( [ pd.read_csv(f, sep='\t', names=['id', 'BROWSE_NODE_ID','BULLET_POINTS']) for f in filenames ] ) logging.info("Loaded sucessfull") possible_labels = df.BROWSE_NODE_ID.unique() label_dict = {} for index, possible_label in enumerate(possible_labels): label_dict[possible_label] = index #os.path.join(exp_name, 'params.json') with open(os.path.join(exp_name, 'params.json'), 'w') as fp: label_dict = {int(k):int(v) for k,v in label_dict.items() } json.dump(label_dict, fp) # logging the location of dump dict logging.info("Dump label_dict location: {}".format(os.path.join(exp_name, 'params.json'))) df['label'] = df.BROWSE_NODE_ID.replace(label_dict) df = df.dropna() # drop row if no of label in BROWSE_NODE_ID is less than 2 X_train, X_val, y_train, y_val = train_test_split(df.index.values, df.label.values, test_size=0.35, random_state=44) df['data_type'] = ['not_set']*df.shape[0] df.loc[X_train, 'data_type'] = 'train' df.loc[X_val, 'data_type'] = 'val' encoded_data_train = tokenizer.batch_encode_plus( df[df.data_type=='train'].BULLET_POINTS.values.tolist(), add_special_tokens=True, return_attention_mask=True, pad_to_max_length=True, max_length=512, return_tensors='pt' ) # logging of encoded data logging.info("Encoded train data: encoded_data_train") encoded_data_val = tokenizer.batch_encode_plus( df[df.data_type=='val'].BULLET_POINTS.values.tolist(), add_special_tokens=True, return_attention_mask=True, pad_to_max_length=True, max_length=512, return_tensors='pt' ) logging.info("Encoded val data: encoded_data_val") #torch.save(encoded_data_train, exp_name+"/encoded_data_train.pt") #torch.save(encoded_data_val, exp_name+"/encoded_data_val.pt") logging.info("Dumped encoded_data_train.pt and encoded_data_val.pt") input_ids_train = encoded_data_train['input_ids'] attention_masks_train = encoded_data_train['attention_mask'] labels_train = torch.tensor(df[df.data_type=='train'].label.values) input_ids_val = encoded_data_val['input_ids'] attention_masks_val = encoded_data_val['attention_mask'] labels_val = torch.tensor(df[df.data_type=='val'].label.values) dataset_train = TensorDataset(input_ids_train, attention_masks_train, labels_train) dataset_val = TensorDataset(input_ids_val, attention_masks_val, labels_val) ## BERT MODEL logging.info("Loading AutoModel model") model = AutoModelForSequenceClassification.from_pretrained(args.model, num_labels=len(label_dict), output_attentions=False, output_hidden_states=False) if args.loading_from_prev_pretrain: logging.info("Loading pretrained model") model.load_state_dict(torch.load(args.trained_model)) model = nn.DataParallel(model) logging.info("using Multi GPU data parallel") ## DataLoader batch_size_train = args.batch_size_train batch_size_val = args.batch_size_val logging.info("batch size train: {}" .format(batch_size_train)) logging.info("batch size val: {}" .format(batch_size_val)) dataloader_train = DataLoader(dataset_train, sampler=RandomSampler(dataset_train), batch_size=batch_size_train) dataloader_validation = DataLoader(dataset_val, sampler=SequentialSampler(dataset_val), batch_size=batch_size_val) #optimizer = AdamW(model.parameters(), # lr=1e-5, # eps=1e-8) epochs = args.epochs weight_decay = 0.01 logging.info("epochs: {}" .format(epochs)) if args.full_finetuning: param_optimizer = list(model.named_parameters()) no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight'] optimizer_grouped_parameters = [ {'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': weight_decay}, {'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0} ] # embedding(tokenzing instialled form based pretrained)--> Architecture (tranformer based stacks) --> classifier (linear classifier/ svm ) # intally ended to trainabl e else: # only finetune the head classifier param_optimizer = list(model.classifier.named_parameters()) optimizer_grouped_parameters = [{'params': [p for n, p in param_optimizer]}] optimizer = AdamW(optimizer_grouped_parameters, lr=1e-5, correct_bias=False) scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=0, num_training_steps=len(dataloader_train)*epochs) device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') #model.to(device) model = model.to(0) logging.info("Device: {}" .format(device)) #print(device) best_acc = 0.0 patience_counter = 0 # logging all the paramters of agrs #logging.info("Training Arguments: {}" .format(args)) for epoch in tqdm(range(1, epochs+1)): if args.full_finetuning: model.train() else: model.classifier.train() loss_train_total = 0 progress_bar = tqdm(dataloader_train, desc='Epoch {:1d}'.format(epoch), leave=False, disable=False) for batch in progress_bar: model.zero_grad() batch = tuple(b.to(0) for b in batch) inputs = {'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[2], } outputs = model(**inputs) loss = outputs[0] loss = loss.mean() loss_train_total += loss.item() loss.backward() torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0) optimizer.step() scheduler.step() progress_bar.set_postfix({'training_loss': '{:.3f}'.format(loss.item()/len(batch))}) torch.save(model.state_dict(), f'{exp_name}/finetuned_BERT_epoch_{epoch}.model') tqdm.write(f'\nEpoch {epoch}') loss_train_avg = loss_train_total/len(dataloader_train) tqdm.write(f'Training loss: {loss_train_avg}') val_loss, predictions, true_vals = evaluate(dataloader_validation, model) val_f1, acc = f1_score_func(predictions, true_vals) tqdm.write(f'Validation loss: {val_loss}') tqdm.write(f'F1 Score (Weighted): {val_f1}') logging.info(f'F1 Score (Weighted): {val_f1}') logging.info(f'Accuracy: {acc}') improve_acc = acc - best_acc patience = 0.02 patience_num = 10 min_epoch_num =5 if improve_acc > 1e-5: logging.info("- Found new best Accuarcy") best_acc = acc torch.save(model.state_dict(), f'{exp_name}/finetuned_BERT_best.model') if improve_acc < patience: patience_counter += 1 else: patience_counter = 0 else: patience_counter += 1 # Early stopping and logging best f1 if (patience_counter >= patience_num and epoch > min_epoch_num) or epoch == epochs: logging.info("Best val f1: {:05.2f}".format(best_acc)) break ``` #### File: Amazon_ml_challenge/src/util.py ```python import os import json import logging import numpy as np from sklearn.metrics import f1_score, accuracy_score def f1_score_func(preds, labels): preds_flat = np.argmax(preds, axis=1).flatten() labels_flat = labels.flatten() return f1_score(labels_flat, preds_flat, average='weighted'), accuracy_score(labels_flat, preds_flat) def accuracy_per_class(preds, labels, label_dict): label_dict_inverse = {v: k for k, v in label_dict.items()} preds_flat = np.argmax(preds, axis=1).flatten() labels_flat = labels.flatten() for label in np.unique(labels_flat): y_preds = preds_flat[labels_flat==label] y_true = labels_flat[labels_flat==label] print(f'Class: {label_dict_inverse[label]}') print(f'Accuracy: {len(y_preds[y_preds==label])}/{len(y_true)}\n') def set_logger(log_path): """Set the logger to log info in terminal and file `log_path`. In general, it is useful to have a logger so that every output to the terminal is saved in a permanent file. Here we save it to `model_dir/train.log`. Example: ``` logging.info("Starting training...") ``` Args: log_path: (string) where to log """ logger = logging.getLogger() logger.setLevel(logging.INFO) if not logger.handlers: # Logging to a file file_handler = logging.FileHandler(log_path) file_handler.setFormatter(logging.Formatter('%(asctime)s:%(levelname)s: %(message)s')) logger.addHandler(file_handler) # Logging to console stream_handler = logging.StreamHandler() stream_handler.setFormatter(logging.Formatter('%(message)s')) logger.addHandler(stream_handler) class Params(): """Class that loads hyperparameters from a json file. Example: ``` params = Params(json_path) print(params.learning_rate) params.learning_rate = 0.5 # change the value of learning_rate in params ``` """ def __init__(self, json_path): with open(json_path) as f: params = json.load(f) self.__dict__.update(params) def save(self, json_path): with open(json_path, 'w') as f: json.dump(self.__dict__, f, indent=4) def update(self, json_path): """Loads parameters from json file""" with open(json_path) as f: params = json.load(f) self.__dict__.update(params) @property def dict(self): """Gives dict-like access to Params instance by `params.dict['learning_rate']""" return self.__dict__ ```
{ "source": "jharkawat/meddoprof_shared_task", "score": 3 }
#### File: conllandstandoff_convertor/convert_standoff_conll_ner/anntoconll_wlp.py ```python from __future__ import print_function import os from glob import glob import re import sys from collections import namedtuple from io import StringIO from os import path from sentencesplit import sentencebreaks_to_newlines # assume script in brat tools/ directory, extend path to find sentencesplit.py sys.path.append(os.path.join(os.path.dirname(__file__), '../server/src')) sys.path.append('.') options = None EMPTY_LINE_RE = re.compile(r'^\s*$') CONLL_LINE_RE = re.compile(r'^\S+\t\d+\t\d+.') from nltk.tokenize import word_tokenize from map_text_to_char import map_text_to_char #JT: Dec 6 class FormatError(Exception): pass def argparser(): import argparse ap = argparse.ArgumentParser(description='Convert text and standoff ' + 'annotations into CoNLL format.') ap.add_argument('-a', '--annsuffix', default="ann", help='Standoff annotation file suffix (default "ann")') ap.add_argument('-c', '--singleclass', default=None, help='Use given single class for annotations') ap.add_argument('-n', '--nosplit', default=True, action='store_true', help='No sentence splitting') ap.add_argument('-o', '--outsuffix', default="conll", help='Suffix to add to output files (default "conll")') ap.add_argument('-v', '--verbose', default=False, action='store_true', help='Verbose output') # ap.add_argument('text', metavar='TEXT', nargs='+', # help='Text files ("-" for STDIN)') return ap def read_sentence(f): """Return lines for one sentence from the CoNLL-formatted file. Sentences are delimited by empty lines. """ lines = [] for l in f: lines.append(l) if EMPTY_LINE_RE.match(l): break if not CONLL_LINE_RE.search(l): raise FormatError( 'Line not in CoNLL format: "%s"' % l.rstrip('\n')) return lines def strip_labels(lines): """Given CoNLL-format lines, strip the label (first TAB-separated field) from each non-empty line. Return list of labels and list of lines without labels. Returned list of labels contains None for each empty line in the input. """ labels, stripped = [], [] labels = [] for l in lines: if EMPTY_LINE_RE.match(l): labels.append(None) stripped.append(l) else: fields = l.split('\t') labels.append(fields[0]) stripped.append('\t'.join(fields[1:])) return labels, stripped def attach_labels(labels, lines): """Given a list of labels and CoNLL-format lines, affix TAB-separated label to each non-empty line. Returns list of lines with attached labels. """ assert len(labels) == len( lines), "Number of labels (%d) does not match number of lines (%d)" % (len(labels), len(lines)) attached = [] for label, line in zip(labels, lines): empty = EMPTY_LINE_RE.match(line) assert (label is None and empty) or (label is not None and not empty) if empty: attached.append(line) else: attached.append('%s\t%s' % (label, line)) return attached # NERsuite tokenization: any alnum sequence is preserved as a single # token, while any non-alnum character is separated into a # single-character token. TODO: non-ASCII alnum. TOKENIZATION_REGEX = re.compile(r'([0-9a-zA-Z]+|[^0-9a-zA-Z])') NEWLINE_TERM_REGEX = re.compile(r'(.*?\n)') def handle_non_standard_char(s): s = s.replace('\xa0', ' ') s = s.replace('\x2f', "/") s = s.replace('\x27', "'") s = s.replace('\x28', "(") s = s.replace('\x29', ")") s = s.replace('\x2e', ".") s = s.replace('\x5b', "[") s = s.replace('\x5c', "\\") s = s.replace('\x5d', "]") s = s.replace('\x7b', "{") s = s.replace('\x7b', "|") s = s.replace('\x7d', "}") s = s.replace('\t', ' ') s = re.sub(r'[^\x00-\x7F]+',' ', s) return s def text_to_conll(f): """Convert plain text into CoNLL format.""" global options # print(f) if options.nosplit: sentences = f.readlines() # print("sentences: ",sentences) else: sentences = [] for l in f: l = sentencebreaks_to_newlines(l) sentences.extend([s for s in NEWLINE_TERM_REGEX.split(l) if s]) lines = [] offset = 0 # print(sentences) for s in sentences: nonspace_token_seen = False s = handle_non_standard_char(s) tokens = word_tokenize(s) token_w_pos = map_text_to_char(s, tokens, offset) # print("token_w_pos: ",token_w_pos) if 'TetKanCam' in s: print(token_w_pos) for(t, pos) in token_w_pos: t=t.strip() if t=='': continue if not t.isspace(): l1=['O', pos, pos + len(t), t] lines.append(l1) # print(l1) lines.append([]) offset+=len(s) # tokens = [t for t in TOKENIZATION_REGEX.split(s) if t] # JT : Dec 6 # for t in tokens: # if not t.isspace(): # lines.append(['O', offset, offset + len(t), t]) # nonspace_token_seen = True # offset += len(t) # # sentences delimited by empty lines # if nonspace_token_seen: # lines.append([]) # add labels (other than 'O') from standoff annotation if specified if options.annsuffix: textbounds, dict_of_entity, list_of_relns=get_annotations(f.name) lines = relabel(lines, textbounds , dict_of_entity, list_of_relns, f) # print(lines) # lines = [[l[0], str(l[1]), str(l[2]), l[3]] if l else l for l in lines] #JT: Dec 6 # print(lines) lines = [[l[3],l[0]] if l else l for l in lines] #JT: Dec 6 # lines = [[l[3],l[0],l[4],l[5],l[6]] if l else l for l in lines] #JT: Dec 6 return StringIO('\n'.join(('\t'.join(l) for l in lines))) def Find_All_Reln(id_, list_of_relns): type_id_counter = {} list_of_relns_w_id =[] for l in list_of_relns: type_ = l["type"] arg1= l["arg1"] arg2 = l["arg2"] # print(type_, arg1, arg2) if type_ not in type_id_counter: type_id_counter[(type_, arg1, arg2)]=0 type_id_counter[(type_, arg1, arg2)]+=1 type_w_id = type_+ str(type_id_counter[(type_, arg1, arg2)]) reln = {} reln["arg1"]= l["arg1"] reln["arg2"] = l["arg2"] reln["type"]=type_w_id list_of_relns_w_id.append(reln) all_relations_w_id = [] for reln in list_of_relns_w_id: type_ = reln["type"] arg1 = reln["arg1"] arg2 = reln["arg2"] # print(type_, arg1, arg2) if id_==arg1: l = (type_, 1) all_relations_w_id.append(l) if id_==arg2: l = (type_, 2) all_relations_w_id.append(l) # print(all_relations_w_id) return all_relations_w_id def relabel(lines, annotations, dict_of_entity, list_of_relns, file_name): # print("lines: ",lines) # print("annotations", annotations) global options # print(dict_of_entity) # TODO: this could be done more neatly/efficiently offset_label = {} for tb in annotations: # print(tb) for i in range(tb.start, tb.end): if i in offset_label: print("Warning: overlapping annotations in ", file=sys.stderr) offset_label[i] = tb # print(offset_label) prev_label = None for i, l in enumerate(lines): if not l: prev_label = None continue tag, start, end, token = l # print(l) # TODO: warn for multiple, detailed info for non-initial label = None id_=None if (start, end) in dict_of_entity: tb, id_ = dict_of_entity[(start, end)] # print(id_) all_relations_w_id = [] if id_: all_relations_w_id = Find_All_Reln(id_, list_of_relns) # print(all_relations_w_id) for o in range(start, end): if o in offset_label: if o != start: pass # print('Warning: annotation-token boundary mismatch: "%s" --- "%s"' % ( # token, offset_label[o].text), file=sys.stderr) label = offset_label[o].type break tag_prefix = "" if label is not None: if label == prev_label: tag = 'I-' + label tag_prefix= "I-" else: tag = 'B-' + label tag_prefix= "B-" prev_label = label relation = "[]" arg1 = "[]" arg2 = "[]" if len(all_relations_w_id)>0: # print(all_relations_ws_id) relation = "[ " arg1 = "[ " arg2 = "[ " for rel_info in all_relations_w_id: (type_, arg_num)= rel_info arg_num= int(arg_num) type_w_tag_prefix = tag_prefix+type_ # print(type_w_tag_prefix, arg_num) relation += type_w_tag_prefix + "," if arg_num==1: arg2 += "_ " + "," arg1 += tag_prefix+"Arg1" + "," if arg_num==2: arg1 += "_" + "," arg2 += tag_prefix+"Arg2" + "," relation=relation[:-1] arg1=arg1[:-1] arg2=arg2[:-1] relation += " ]" arg1 += " ]" arg2 += " ]" if token!='': lines[i] = [tag, start, end, token.strip(), relation, arg1, arg2] # print(lines[i]) # optional single-classing if options.singleclass: for l in lines: if l and l[0] != 'O': l[0] = l[0][:2] + options.singleclass return lines def process(f): return text_to_conll(f) def process_files_v1(files): global options nersuite_proc = [] try: for fn in files: print("now_processing", fn) try: if fn == '-': lines = process(sys.stdin) else: with open(fn, 'rU') as f: lines = process(f) # TODO: better error handling if lines is None: raise FormatError if fn == '-' or not options.outsuffix: sys.stdout.write(''.join(lines)) else: ofn = path.splitext(fn)[0] + options.outsuffix with open(ofn, 'wt') as of: of.write(''.join(lines)) except BaseException: # TODO: error processing raise except Exception as e: for p in nersuite_proc: p.kill() if not isinstance(e, FormatError): raise def process_files(files, output_directory_main, phase_name): global options # print("phase_name: ",phase_name) output_directory = output_directory_main+"/"+phase_name+"/" try: os.mkdir(output_directory) except Exception as e: pass nersuite_proc = [] for fn in files: # print("now_processing: ",fn) with open(fn, 'rU') as f: lines = text_to_conll(f) # print(lines) # TODO: better error handling if lines is None: print("Line is None") continue file_name=fn.split("/")[-1][0:-4] ofn = output_directory+file_name+"_" +options.outsuffix.replace(".","")+".txt" with open(ofn, 'wt') as of: of.write(''.join(lines)) of.write("\n\n\n") # start standoff processing TEXTBOUND_LINE_RE1 = re.compile(r'^T\d+\t') TEXTBOUND_LINE_RE2 = re.compile(r'^R\d+\t') TEXTBOUND_LINE_RE3 = re.compile(r'^E\d+\t') Textbound = namedtuple('Textbound', 'start end type text') def parse_textbounds(f): """Parse textbound annotations in input, returning a list of Textbound.""" textbounds = [] dict_of_entity = {} list_of_relns = [] for l in f: l = l.rstrip('\n') if TEXTBOUND_LINE_RE3.search(l): try: # print(l.strip().split('\t')) id_, act_ = l.strip().split('\t') # print("----------------",act_.split()) type_ = "Action" arg_1_id, arg_2_id = act_.split() arg_1_id = arg_1_id.replace("Action:","") arg_2_id = arg_2_id.replace("Acts-on:","") # print(type_, arg_1_id, arg_2_id) reln_dict = {} reln_dict["type"]=type_ reln_dict["arg1"]=arg_1_id reln_dict["arg2"]=arg_2_id list_of_relns.append(reln_dict) # start, end = int(start), int(end) # print(id_, type_offsets, text) # # textbounds.append(Textbound(start, end, type_, text)) except Exception as e: continue if TEXTBOUND_LINE_RE2.search(l): try: # print(l.strip().split('\t')) id_, rel_ = l.strip().split('\t') type_, arg_1_id, arg_2_id = rel_.split() arg_1_id = arg_1_id.replace("Arg1:","") arg_2_id = arg_2_id.replace("Arg2:","") reln_dict = {} reln_dict["type"]=type_ reln_dict["arg1"]=arg_1_id reln_dict["arg2"]=arg_2_id list_of_relns.append(reln_dict) # print(type_, arg_1_id, arg_2_id) # start, end = int(start), int(end) # print(id_, type_offsets, text) # # textbounds.append(Textbound(start, end, type_, text)) except Exception as e: # print("*********************************",l) print(f) print(e) if TEXTBOUND_LINE_RE1.search(l): try: line_values = l.strip().split('\t') id_ = line_values[0] type_offsets = line_values[1] text = " ".join(l[1:]) # id_, type_offsets, text = l.split('\t') type_, start, end = type_offsets.split() start, end = int(start), int(end) # print(id_, type_offsets, text) dict_of_entity[(start, end)]= (Textbound(start, end, type_, text),id_) textbounds.append(Textbound(start, end, type_, text)) except Exception as e: # print("*********************************",l) print(f, e, l) # print(dict_of_entity) # print(list_of_relns) return textbounds, dict_of_entity, list_of_relns def eliminate_overlaps(textbounds, fn): eliminate = {} # TODO: avoid O(n^2) overlap check for t1 in textbounds: for t2 in textbounds: if t1 is t2: continue if t2.start >= t1.end or t2.end <= t1.start: continue # eliminate shorter if t1.end - t1.start > t2.end - t2.start: print("Eliminate %s due to overlap with %s" % ( t2, t1), file=sys.stderr) print(fn) eliminate[t2] = True else: print("Eliminate %s due to overlap with %s" % ( t1, t2), file=sys.stderr) eliminate[t1] = True print(fn) return [t for t in textbounds if t not in eliminate] def get_annotations(fn): global options annfn = path.splitext(fn)[0] + options.annsuffix with open(annfn, 'rU') as f: textbounds, dict_of_entity, list_of_relns = parse_textbounds(f) textbounds = eliminate_overlaps(textbounds, fn) return textbounds, dict_of_entity, list_of_relns # end standoff processing def Read_Main_Input_Folder(input_folder, phase_name=""): start_dir = input_folder # start_dir = "/Users/jeniya/Desktop/NER_RECOG_SW/brat-v1.3_Crunchy_Frog/data/so_annotated_data/selected/phase_01_01" pattern = "*.txt" file_location_list=[] for dir,_,_ in os.walk(start_dir): file_location_list.extend(glob(os.path.join(dir,pattern))) # print(file_location_list) # for txt_file_loc in file_location_list: # print("txt_file_loc: ",txt_file_loc) # #print("phase_name: ", phase_name) # try: # ann_file_loc=txt_file_loc[:-4]+".ann" # except: # continue return file_location_list def covert_standoff_to_conll(input_folder_main= "all_data/train_data/train/", output_folder = 'all_data/train_data/Conll_Format/'): # print(input_folder_main, output_folder) global options options = argparser().parse_args("") # make sure we have a dot in the suffixes, if any if options.outsuffix and options.outsuffix[0] != '.': options.outsuffix = '.' + options.outsuffix if options.annsuffix and options.annsuffix[0] != '.': options.annsuffix = '.' + options.annsuffix list_of_folders = [] for dir,_,_ in sorted(os.walk(input_folder_main)): if dir == input_folder_main: continue try: folder_name = dir.replace(input_folder_main,"") list_of_folders.append(folder_name) # print(dir, input_folder_main, folder_name) # list_of_phases.append(phase_num) except: continue if len(list_of_folders)==0: list_of_folders=[input_folder_main] # print(list_of_folders) for folder in list_of_folders: phase_name= "" input_folder=input_folder_main+phase_name # print("input_folder", input_folder) list_of_files=Read_Main_Input_Folder(input_folder, phase_name.replace("/","")) # print(list_of_files) # list_of_files=['checked_annotations_training/phase_01/protocol_0.txt'] process_files(list_of_files, output_folder,phase_name) def convert_standoff_conll_single_file(input_standoff_folder, output_conll_folder, output_conll_file): covert_standoff_to_conll(input_folder_main= input_standoff_folder, output_folder = output_conll_folder) list_of_files = sorted(Read_Main_Input_Folder(output_conll_folder)) fout = open(output_conll_file,'w') for file_path in list_of_files: for line in open(file_path): fout.write(line) fout.write("\n") fout.flush() # print(list_of_files) if __name__ == "__main__": covert_standoff_to_conll(input_folder_main= "all_data/train_data/train/", output_folder = 'all_data/train_data/Conll_Format/') covert_standoff_to_conll(input_folder_main= "all_data/test_data/test/", output_folder = 'all_data/test_data/Conll_Format/') covert_standoff_to_conll(input_folder_main= "all_data/dev_data/dev/", output_folder = 'all_data/dev_data/Conll_Format/') # covert_standoff_to_conll(input_folder_main= "all_data/sample_test/train/", output_folder = 'all_data/sample_test/Conll_Format/') # covert_standoff_to_conll(input_folder_main= "wo_labels_surprise/", output_folder = 'wo_labels_surprise_conll/') # covert_standoff_to_conll(input_folder_main= "wo_labels/general/", output_folder = 'wo_labels/general/') ```
{ "source": "jharke/covid19-prosociality", "score": 2 }
#### File: otree/payment_info/pages.py ```python from ._builtin import Page, WaitPage from otree.api import Currency as c, currency_range from .models import Constants class PaymentInfo(Page): form_model = 'player' form_fields = ['survey_comments'] def vars_for_template(self): global_donation = self.participant.vars['global_donation'] local_donation = (self.participant.vars['donation'] - self.participant.vars['global_donation']) participation_fee = self.session.config['participation_fee'] payoff = self.participant.payoff if payoff == self.session.config['endowment']: non_donor = True local_donation = str(100 - int(global_donation)) + '%' global_donation = str(int(global_donation)) + '%' else: non_donor = False if self.participant.vars['timeout']: payoff = c(0) return dict( participation_fee=participation_fee, payoff=payoff, global_donation=global_donation, local_donation=local_donation, non_donor=non_donor, ) class RedirectProlific(Page): pass page_sequence = [PaymentInfo, RedirectProlific] ```
{ "source": "jharley/packer-example-appserver", "score": 2 }
#### File: packer-example-appserver/tests/test_default.py ```python def test_ami_launch(host): assert host.system_info.type == 'linux' def test_appserver(host): assert host.socket('tcp://80').is_listening cmd = host.run("curl -s http://localhost | grep 'Hello, DevOps TO'") assert cmd.rc == 0 ```
{ "source": "jharman25/pytc", "score": 3 }
#### File: pytc/experiments/base.py ```python __description__ = \ """ experiments.py Classes for loading experimental ITC data and associating those data with a model. Units: Volumes are in microliters Temperatures are in Kelvin Concentrations are in molar Energy is `units`, where `units` is specified when instantiating the ITCExperiment class. It must be a in the AVAIL_UNITS dictionary. """ __author__ = "<NAME>" __date__ = "2016-06-22" import random, string, os import numpy as np class BaseITCExperiment: """ Class that holds an experimental ITC measurement and a model that describes it. """ AVAIL_UNITS = {"cal/mol":1.9872036, "kcal/mol":0.0019872036, "J/mol":8.3144598, "kJ/mol":0.0083144598} def __init__(self,dh_file,model,shot_start=1,units="cal/mol", uncertainty=0.1,**model_kwargs): """ Parameters ---------- dh_file: string integrated heats file written out by origin software. model: ITCModel subclass instance ITCModel subclass to use for modeling shot_start: int what shot to use as the first real point. Shots start at 0, so default=1 discards first point. units : string file units ("cal/mol","kcal/mol","J/mol","kJ/mol") uncertainty : float > 0.0 uncertainty in integrated heats (set to same for all shots, unless specified in something like NITPIC output file). **model_kwargs: any keyword arguments to pass to the model. Any keywords passed here will override whatever is stored in the dh_file. """ self.dh_file = dh_file self._shot_start = shot_start # Deal with units self._units = units try: self._R = self.AVAIL_UNITS[self._units] except KeyError: err = "units must be one of:\n" for k in self.AVAIL_UNITS.keys(): err += " {}\n".format(k) err += "\n" raise ValueError(err) # For numerical reasons, there should always be *some* uncertainty self._uncertainty = uncertainty if self._uncertainty == 0.0: self._uncertainty = 1e-12 # Load in heats extension = self.dh_file.split(".")[-1] self._read_heats_file() # Initialize model using information read from heats file self._model = model(S_cell=self.stationary_cell_conc, T_syringe=self.titrant_syringe_conc, cell_volume=self.cell_volume, shot_volumes=self._shots,**model_kwargs) r = "".join([random.choice(string.ascii_letters) for i in range(20)]) self._experiment_id = "{}_{}".format(self.dh_file,r) def _read_heats_file(self): """ Dummy heat reading file. """ pass @property def dQ(self): """ Return heats calculated by the model with parameters defined in params dictionary. """ if len(self._model.dQ) == 0: return np.array(()) return self._model.dQ[self._shot_start:] @property def dilution_heats(self): """ Return dilution heats calculated by the model with parameters defined in params dictionary. """ if len(self._model.dilution_heats) == 0: return np.array(()) return self._model.dilution_heats[self._shot_start:] @property def param_values(self): """ Values of fit parameters. """ return self._model.param_values @property def param_stdevs(self): """ Standard deviations on fit parameters. """ return self._model.param_stdevs @property def param_ninetyfives(self): """ 95% confidence intervals on fit parmeters. """ return self._model.param_ninetyfives @property def model(self): """ Fitting model. """ return self._model @property def shot_start(self): """ Starting shot to use. """ return self._shot_start @shot_start.setter def shot_start(self,value): """ Change starting shot. """ self._shot_start = value @property def heats(self): """ Return experimental heats. """ return self._heats[self._shot_start:] @heats.setter def heats(self,heats): """ Set the heats. """ self._heats[self._shot_start:] = heats[:] @property def heats_stdev(self): """ Standard deviation on the uncertainty of the heat. """ return self._heats_stdev[self._shot_start:] @heats_stdev.setter def heats_stdev(self,heats_stdev): """ Set the standard deviation on the uncertainty of the heat. """ self._heats_stdev[self._shot_start:] = heats_stdev[:] @property def mol_injected(self): """ Return the mols injected over shots. """ # uL * mol/L * L/1e6 uL -> mol return self._shots[self._shot_start:]*self.titrant_syringe_conc*1e-6 @property def mole_ratio(self): """ Return the mole ratio of titrant to stationary. """ return self._model.mole_ratio[self._shot_start:] @property def experiment_id(self): """ Return a unique experimental id. """ return self._experiment_id @property def units(self): """ Units for file. """ return self._units @units.setter def units(self,units): """ Change the units. """ # Deal with units self._units = units try: self._R = self.AVAIL_UNITS[self._units] except KeyError: err = "units must be one of:\n" for k in self.AVAIL_UNITS.keys(): err += " {}\n".format(k) err += "\n" raise ValueError(err) @property def R(self): """ Experiment gas constant. """ return self._R ``` #### File: pytc/fitters/bayesian.py ```python __description__ = \ """ Fitter subclass for performing bayesian (MCMC) fits. """ __author__ = "<NAME>" __date__ = "2017-05-10" from .base import Fitter import emcee, corner import numpy as np import scipy.optimize as optimize import multiprocessing class BayesianFitter(Fitter): """ """ def __init__(self,num_walkers=100,initial_walker_spread=1e-4,ml_guess=True, num_steps=100,burn_in=0.1,num_threads=1): """ Initialize the bayesian fitter Parameters ---------- num_walkers : int > 0 how many markov chains to have in the analysis initial_walker_spread : float each walker is initialized with parameters sampled from normal distributions with mean equal to the initial guess and a standard deviation of guess*initial_walker_spread ml_guess : bool if true, do an ML optimization to get the initial guess num_steps: number of steps to run the markov chains burn_in : float between 0 and 1 fraction of samples to discard from the start of the run num_threads : int or `"max"` number of threads to use. if `"max"`, use the total number of cpus. [NOT YET IMPLEMENTED] """ Fitter.__init__(self) self._num_walkers = num_walkers self._initial_walker_spread = initial_walker_spread self._ml_guess = ml_guess self._num_steps = num_steps self._burn_in = burn_in self._num_threads = num_threads if self._num_threads == "max": self._num_threads = multiprocessing.cpu_count() if not type(self._num_threads) == int and self._num_threads > 0: err = "num_threads must be 'max' or a positive integer\n" raise ValueError(err) if self._num_threads != 1: err = "multithreading has not yet been (fully) implemented.\n" raise NotImplementedError(err) self._success = None self.fit_type = "bayesian" def ln_prior(self,param): """ Log prior of fit parameters. Priors are uniform between bounds and set to -np.inf outside of bounds. Parameters ---------- param : array of floats parameters to fit Returns ------- float value for log of priors. """ # If a paramter falls outside of the bounds, make the prior -infinity if np.sum(param < self._bounds[0,:]) > 0 or np.sum(param > self._bounds[1,:]) > 0: return -np.inf # otherwise, uniform return 0.0 def ln_prob(self,param): """ Posterior probability of model parameters. Parameters ---------- param : array of floats parameters to fit Returns ------- float value for log posterior proability """ # Calcualte prior. If not finite, this solution has an -infinity log # likelihood ln_prior = self.ln_prior(param) if not np.isfinite(ln_prior): return -np.inf # Calcualte likelihood. If not finite, this solution has an -infinity # log likelihood ln_like = self.ln_like(param) if not np.isfinite(ln_like): return -np.inf # log posterior is log prior plus log likelihood return ln_prior + ln_like def fit(self,model,parameters,bounds,y_obs,y_err=None,param_names=None): """ Fit the parameters. Parameters ---------- model : callable model to fit. model should take "parameters" as its only argument. this should (usually) be GlobalFit._y_calc parameters : array of floats parameters to be optimized. usually constructed by GlobalFit._prep_fit bounds : list list of two lists containing lower and upper bounds y_obs : array of floats observations in an concatenated array y_err : array of floats or None standard deviation of each observation. if None, each observation is assigned an error of 1/num_obs param_names : array of str names of parameters. If None, parameters assigned names p0,p1,..pN """ self._model = model self._y_obs = y_obs # Convert the bounds (list of lower and upper lists) into a 2d numpy array self._bounds = np.array(bounds) # If no error is specified, assign the error as 1/N, identical for all # points self._y_err = y_err if y_err is None: self._y_err = np.array([1/len(self._y_obs) for i in range(len(self._y_obs))]) if param_names is None: self._param_names = ["p{}".format(i) for i in range(len(parameters))] else: self._param_names = param_names[:] # Make initial guess (ML or just whatever the paramters sent in were) if self._ml_guess: fn = lambda *args: -self.weighted_residuals(*args) ml_fit = optimize.least_squares(fn,x0=parameters,bounds=self._bounds) self._initial_guess = np.copy(ml_fit.x) else: self._initial_guess = np.copy(parameters) # Create walker positions # Size of perturbation in parameter depends on the scale of the parameter perturb_size = self._initial_guess*self._initial_walker_spread ndim = len(parameters) pos = [self._initial_guess + np.random.randn(ndim)*perturb_size for i in range(self._num_walkers)] # Sample using walkers self._fit_result = emcee.EnsembleSampler(self._num_walkers, ndim, self.ln_prob, threads=self._num_threads) self._fit_result.run_mcmc(pos, self._num_steps) # Create list of samples to_discard = int(round(self._burn_in*self._num_steps,0)) self._samples = self._fit_result.chain[:,to_discard:,:].reshape((-1,ndim)) self._lnprob = self._fit_result.lnprobability[:,:].reshape(-1) # Get mean and standard deviation self._estimate = np.mean(self._samples,axis=0) self._stdev = np.std(self._samples,axis=0) # Calculate 95% confidence intervals self._ninetyfive = [] lower = int(round(0.025*self._samples.shape[0],0)) upper = int(round(0.975*self._samples.shape[0],0)) for i in range(self._samples.shape[1]): nf = np.sort(self._samples[:,i]) self._ninetyfive.append([nf[lower],nf[upper]]) self._ninetyfive = np.array(self._ninetyfive) self._success = True @property def fit_info(self): """ Information about the Bayesian run. """ output = {} output["Num walkers"] = self._num_walkers output["Initial walker spread"] = self._initial_walker_spread output["Use ML guess"] = self._ml_guess output["Num steps"] = self._num_steps output["Burn in"] = self._burn_in output["Final sample number"] = len(self._samples[:,0]) output["Num threads"] = self._num_threads return output @property def samples(self): """ Bayesian samples. """ return self._samples ``` #### File: pytc/indiv_models/single_site_competitor.py ```python __description__ = \ """ single_site.py Model describing competition between two ligands for a single site. """ __author__ = "<NAME>" __date__ = "2016-06-22" import numpy as np from .base import ITCModel class SingleSiteCompetitor(ITCModel): """ Competition between two ligands for the same site. Model taken from: <NAME> (2000) Analytical Biochemistry 277(2):260-266 doi:10.1006/abio.1999.4402 http://www.sciencedirect.com/science/article/pii/S0003269799944020 """ def param_definition(K=1e6,Kcompetitor=1e6, dH=-4000,dHcompetitor=-4000, fx_competent=1.0): pass def __init__(self, S_cell=100e-6,S_syringe=0.0, T_cell=0.0, T_syringe=1000e-6, C_cell=200e-6,C_syringe=0.0, cell_volume=300.0, shot_volumes=[2.5 for i in range(30)]): """ S_cell: stationary concentration in cell in M S_syringe: stationary concentration in syringe in M T_cell: titrant concentration cell in M T_syringe: titrant concentration syringe in M C_cell: competitor concentration cell in M C_syringe: competitor concentration syringe in M cell_volume: cell volume, in uL shot_volumes: list of shot volumes, in uL. """ # Run standard __init__ function to create titrations, initialize params # etc. super().__init__(S_cell,S_syringe,T_cell,T_syringe,cell_volume,shot_volumes) # Titrate the competitor self._C_cell = C_cell self._C_syringe = C_syringe self._C_conc = self._titrate_species(self._C_cell,self._C_syringe) @property def dQ(self): """ Calculate the heats that would be observed across shots for a given set of enthalpies and binding constants for each reaction. """ # ----- Determine mole fractions ----- S_conc_corr = self._S_conc*self.param_values["fx_competent"] c_a = self.param_values["K"]*S_conc_corr c_b = self.param_values["Kcompetitor"]*S_conc_corr r_a = self._T_conc/S_conc_corr r_b = self._C_conc/S_conc_corr alpha = 1/c_a + 1/c_b + r_a + r_b - 1 beta = (r_a - 1)/c_b + (r_b - 1)/c_a + 1/(c_a*c_b) gamma = -1/(c_a*c_b) theta = np.arccos((-2*alpha**3 + 9*alpha*beta - 27*gamma)/(2*np.sqrt((alpha**2 - 3*beta)**3))) mol_fx_s = (2*np.sqrt(alpha**2 - 3*beta) * np.cos(theta/3) - alpha)/3 mol_fx_st = r_a*mol_fx_s/(1/c_a + mol_fx_s) mol_fx_sc = r_b*mol_fx_s/(1/c_b + mol_fx_s) # ---- Relate mole fractions to heat ----- X = self.param_values["dH"]*(mol_fx_st[1:] - mol_fx_st[:-1]) Y = self.param_values["dHcompetitor"]*(mol_fx_sc[1:] - mol_fx_sc[:-1]) to_return = self._cell_volume*S_conc_corr[1:]*(X + Y) + self.dilution_heats return to_return ``` #### File: pytc/indiv_models/single_site.py ```python __description__ = \ """ single_site.py Model describing binding of a ligand to a single site. """ __author__ = "<NAME>" __date__ = "2016-06-22" import numpy as np from .base import ITCModel class SingleSite(ITCModel): """ Binding at a single site. """ def param_definition(K=1e6,dH=-4000.0,fx_competent=1.0): pass @property def dQ(self): """ Calculate the heats that would be observed across shots for a given set of enthalpies and binding constants for each reaction. """ # ----- Determine mole fractions ----- S_conc_corr = self._S_conc*self.param_values["fx_competent"] b = S_conc_corr + self._T_conc + 1/self.param_values["K"] ST = (b - np.sqrt((b)**2 - 4*S_conc_corr*self._T_conc))/2 mol_fx_st = ST/S_conc_corr # ---- Relate mole fractions to heat ----- X = self.param_values["dH"]*(mol_fx_st[1:] - mol_fx_st[:-1]) to_return = self._cell_volume*S_conc_corr[1:]*X + self.dilution_heats return to_return ```
{ "source": "jharmer95/ECE-531-Throttle-Body-Project", "score": 3 }
#### File: ECE-531-Throttle-Body-Project/controller/i2c_comms.py ```python __author__ = "<NAME>" __copyright__ = "Copyright (c) 2020 <NAME>. All rights reserved." __license__ = "MIT" __version__ = "0.1" from enum import IntEnum, unique from typing import Any, Dict, List, Tuple from simple_i2c import read_bytes, write_bytes import struct, sys # Globals ADDR = 0x08 # bus address @unique class Function(IntEnum): FUNC_GET_SERVO = 1 FUNC_SET_SERVO = 2 @unique class ErrorCode(IntEnum): ERROR_NONE = 0 ERROR_GENERIC = -1 class Buffer: cmd: int params: Dict[str, List] union_type: str def __init__(self, cmd): self.cmd = cmd self.params = { "ints": [0, 0, 0, 0, 0, 0, 0], "floats": [0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00], "string": "", } self.union_type = "int" def pack(self) -> bytes: if self.union_type == "void": data = struct.pack("<b", self.cmd) while len(data) < 28: data += bytes([0]) return data elif self.union_type == "int": return struct.pack("<biiiiiii", self.cmd, *self.params["ints"][0:7]) elif self.union_type == "uint": return struct.pack("<bIIIIIII", self.cmd, *self.params["ints"][0:7]) elif self.union_type == "float": return struct.pack("<bfffffff", self.cmd, *self.params["floats"][0:7]) elif self.union_type == "string": data = struct.pack( "<bs", self.cmd, self.params["string"].encode("utf-8")[0:27] ) while len(data) < 28: data += bytes([0]) return data else: return bytes() @classmethod def unpack(cls, data: bytes, u_type: str): if u_type == "void": [cmd] = struct.unpack("<b", data[0:1]) inst = cls(cmd) inst.union_type = "void" return inst elif u_type == "int": [cmd, i1, i2, i3, i4, i5, i6, i7] = struct.unpack("<biiiiiii", data) inst = cls(cmd) inst.params["ints"] = [i1, i2, i3, i4, i5, i6, i7] inst.union_type = "int" return inst elif u_type == "uint": [cmd, i1, i2, i3, i4, i5, i6, i7] = struct.unpack("<bIIIIIII", data) inst = cls(cmd) inst.params["ints"] = [i1, i2, i3, i4, i5, i6, i7] inst.union_type = "uint" return inst elif u_type == "float": [cmd, i1, i2, i3, i4, i5, i6, i7] = struct.unpack("<bfffffff", data) inst = cls(cmd) inst.params["floats"] = [i1, i2, i3, i4, i5, i6, i7] inst.union_type = "float" return inst elif u_type == "string": [cmd] = struct.unpack("<b", data[0:1]) s = data[1:28].decode("utf-8") inst = cls(cmd) inst.params["string"] = s inst.union_type = "string" return inst def call_function(function: Function, *args) -> Tuple[bool, Any]: global ADDR mesg_buf = Buffer(function) return_type: str = "int" if function == Function.FUNC_GET_SERVO: pass elif function == Function.FUNC_SET_SERVO: mesg_buf.params["ints"][0] = args[0] mesg_buf.union_type = "int" return_type = "void" write_bytes(ADDR, mesg_buf.pack()) response = read_bytes(ADDR, 29) mesg_buf = Buffer.unpack(response, return_type) if mesg_buf.cmd != ErrorCode.ERROR_NONE: mesg_buf = Buffer.unpack(response, "string") print(f"Error {mesg_buf.cmd}: {mesg_buf.params['string']}") return (False, mesg_buf.cmd) if return_type == "void": return (True, None) elif return_type == "int" or return_type == "uint": return (True, mesg_buf.params["ints"][0]) elif return_type == "float": return (True, mesg_buf.params["floats"][0]) ```
{ "source": "jharmison-redhat/oc-mirror-e2e", "score": 2 }
#### File: plugins/action/catalog_merge.py ```python from __future__ import (absolute_import, division, print_function) __metaclass__ = type from ansible.errors import AnsibleActionFail from ansible.module_utils.parsing.convert_bool import boolean from ansible.plugins.action import ActionBase class ActionModule(ActionBase): '''Merge OLM Operator Catalog entries, setting facts''' TRANSFERS_FILES = False def run(self, tmp=None, task_vars=None): if task_vars is None: task_vars = dict() result = super(ActionModule, self).run(tmp, task_vars) del tmp # tmp no longer has any effect facts = dict() cacheable = boolean(self._task.args.pop('cacheable', False)) operator_catalogs = list(self._task.args.pop('operator_catalogs', [])) catalog = str(self._task.args.pop('catalog', 'registry.redhat.io/redhat/redhat-operator-index:v4.9')) full = boolean(self._task.args.pop('full', True)) packages = list(self._task.args.pop('packages', [])) if self._task.args: raise AnsibleActionFail(f'Unknown args provided for catalog merging: {self._task.args}') added = False for operator_catalog in operator_catalogs: if operator_catalog.get('catalog') == catalog: if boolean(operator_catalog.get('full')) != full: raise AnsibleActionFail('Unable to merge two catalog specs with different full specifications') catalog_packages = operator_catalog.get('packages') for package in packages: if package not in catalog_packages: catalog_packages.append(package) added = True break if not added: operator_catalogs.append({ 'catalog': catalog, 'full': full, 'packages': packages, }) facts['operator_catalogs'] = operator_catalogs if facts: result['ansible_facts'] = facts result['_ansible_facts_cacheable'] = cacheable return result ```